fsaudm commited on
Commit
c8f24dd
·
verified ·
1 Parent(s): e89c595

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -19
app.py CHANGED
@@ -1,4 +1,4 @@
1
- from smolagents import CodeAgent, tool, load_tool
2
  import datetime
3
  import requests
4
  import pytz
@@ -15,10 +15,10 @@ def text_to_flowchart(steps_text: str) -> str:
15
  """
16
  Generates a flowchart diagram from pre-processed text that lists sequential process steps.
17
  The input should be a text with each step on a new line (optionally prefixed by bullet markers).
18
-
19
  Args:
20
  steps_text: A string containing the process steps.
21
-
22
  Returns:
23
  A data URL for a PNG image of the generated flowchart.
24
  """
@@ -56,46 +56,53 @@ def text_to_flowchart(steps_text: str) -> str:
56
 
57
  return data_url
58
 
 
59
  @tool
60
  def get_current_time_in_timezone(timezone: str) -> str:
61
- """
62
- Fetches the current local time for a specified timezone.
63
-
64
  Args:
65
- timezone: A valid timezone string (e.g., 'America/New_York').
66
-
67
- Returns:
68
- A string with the current local time or an error message.
69
  """
70
  try:
 
71
  tz = pytz.timezone(timezone)
 
72
  local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
73
  return f"The current local time in {timezone} is: {local_time}"
74
  except Exception as e:
75
  return f"Error fetching time for timezone '{timezone}': {str(e)}"
76
 
 
77
  final_answer = FinalAnswerTool()
78
 
79
- model = load_tool("HfApiModel", trust_remote_code=True)(
80
- max_tokens=2096,
81
- temperature=0.5,
82
- model_id='Qwen/Qwen2.5-Coder-32B-Instruct',
83
- custom_role_conversions=None,
 
 
 
84
  )
85
 
 
 
 
 
86
  with open("prompts.yaml", 'r') as stream:
87
  prompt_templates = yaml.safe_load(stream)
88
 
89
  agent = CodeAgent(
90
  model=model,
91
- tools=[text_to_flowchart, final_answer],
92
  max_steps=6,
93
  verbosity_level=1,
94
  grammar=None,
95
  planning_interval=None,
96
- name="Flowchart Generator",
97
- description="Generates a flowchart diagram from sequential steps using SchemDraw's flow module.",
98
  prompt_templates=prompt_templates
99
  )
100
 
101
- GradioUI(agent).launch()
 
 
1
+ from smolagents import CodeAgent,DuckDuckGoSearchTool, HfApiModel,load_tool,tool
2
  import datetime
3
  import requests
4
  import pytz
 
15
  """
16
  Generates a flowchart diagram from pre-processed text that lists sequential process steps.
17
  The input should be a text with each step on a new line (optionally prefixed by bullet markers).
18
+
19
  Args:
20
  steps_text: A string containing the process steps.
21
+
22
  Returns:
23
  A data URL for a PNG image of the generated flowchart.
24
  """
 
56
 
57
  return data_url
58
 
59
+
60
  @tool
61
  def get_current_time_in_timezone(timezone: str) -> str:
62
+ """A tool that fetches the current local time in a specified timezone.
 
 
63
  Args:
64
+ timezone: A string representing a valid timezone (e.g., 'America/New_York').
 
 
 
65
  """
66
  try:
67
+ # Create timezone object
68
  tz = pytz.timezone(timezone)
69
+ # Get current time in that timezone
70
  local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
71
  return f"The current local time in {timezone} is: {local_time}"
72
  except Exception as e:
73
  return f"Error fetching time for timezone '{timezone}': {str(e)}"
74
 
75
+
76
  final_answer = FinalAnswerTool()
77
 
78
+ # If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
79
+ # model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
80
+
81
+ model = HfApiModel(
82
+ max_tokens=2096,
83
+ temperature=0.5,
84
+ model_id='Qwen/Qwen2.5-Coder-32B-Instruct',# it is possible that this model may be overloaded
85
+ custom_role_conversions=None,
86
  )
87
 
88
+
89
+ # Import tool from Hub
90
+ image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
91
+
92
  with open("prompts.yaml", 'r') as stream:
93
  prompt_templates = yaml.safe_load(stream)
94
 
95
  agent = CodeAgent(
96
  model=model,
97
+ tools=[final_answer], ## add your tools here (don't remove final answer)
98
  max_steps=6,
99
  verbosity_level=1,
100
  grammar=None,
101
  planning_interval=None,
102
+ name=None,
103
+ description=None,
104
  prompt_templates=prompt_templates
105
  )
106
 
107
+
108
+ GradioUI(agent).launch()