rawc0der commited on
Commit
0ee8033
·
verified ·
1 Parent(s): c34e859

Update Gradio_UI.py

Browse files

fix the streaming err

Files changed (1) hide show
  1. Gradio_UI.py +56 -1
Gradio_UI.py CHANGED
@@ -122,7 +122,6 @@ def pull_messages_from_step(
122
  yield gr.ChatMessage(role="assistant", content=f"{step_footnote}")
123
  yield gr.ChatMessage(role="assistant", content="-----")
124
 
125
-
126
  def stream_to_gradio(
127
  agent,
128
  task: str,
@@ -139,6 +138,62 @@ def stream_to_gradio(
139
  total_input_tokens = 0
140
  total_output_tokens = 0
141
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
142
  for step_log in agent.run(task, stream=True, reset=reset_agent_memory, additional_args=additional_args):
143
  # Track tokens if model provides them
144
  if hasattr(agent.model, "last_input_token_count"):
 
122
  yield gr.ChatMessage(role="assistant", content=f"{step_footnote}")
123
  yield gr.ChatMessage(role="assistant", content="-----")
124
 
 
125
  def stream_to_gradio(
126
  agent,
127
  task: str,
 
138
  total_input_tokens = 0
139
  total_output_tokens = 0
140
 
141
+ for step_log in agent.run(task, stream=True, reset=reset_agent_memory, additional_args=additional_args):
142
+ # Track tokens if model provides them
143
+ if hasattr(agent.model, "last_input_token_count"):
144
+ # Check if token counts are not None before adding
145
+ if agent.model.last_input_token_count is not None:
146
+ total_input_tokens += agent.model.last_input_token_count
147
+ if isinstance(step_log, ActionStep):
148
+ step_log.input_token_count = agent.model.last_input_token_count
149
+
150
+ if hasattr(agent.model, "last_output_token_count") and agent.model.last_output_token_count is not None:
151
+ total_output_tokens += agent.model.last_output_token_count
152
+ if isinstance(step_log, ActionStep):
153
+ step_log.output_token_count = agent.model.last_output_token_count
154
+
155
+ for message in pull_messages_from_step(
156
+ step_log,
157
+ ):
158
+ yield message
159
+
160
+ final_answer = step_log # Last log is the run's final_answer
161
+ final_answer = handle_agent_output_types(final_answer)
162
+
163
+ if isinstance(final_answer, AgentText):
164
+ yield gr.ChatMessage(
165
+ role="assistant",
166
+ content=f"**Final answer:**\n{final_answer.to_string()}\n",
167
+ )
168
+ elif isinstance(final_answer, AgentImage):
169
+ yield gr.ChatMessage(
170
+ role="assistant",
171
+ content={"path": final_answer.to_string(), "mime_type": "image/png"},
172
+ )
173
+ elif isinstance(final_answer, AgentAudio):
174
+ yield gr.ChatMessage(
175
+ role="assistant",
176
+ content={"path": final_answer.to_string(), "mime_type": "audio/wav"},
177
+ )
178
+ else:
179
+ yield gr.ChatMessage(role="assistant", content=f"**Final answer:** {str(final_answer)}")
180
+
181
+ def stream_to_gradio_bk(
182
+ agent,
183
+ task: str,
184
+ reset_agent_memory: bool = False,
185
+ additional_args: Optional[dict] = None,
186
+ ):
187
+ """Runs an agent with the given task and streams the messages from the agent as gradio ChatMessages."""
188
+ if not _is_package_available("gradio"):
189
+ raise ModuleNotFoundError(
190
+ "Please install 'gradio' extra to use the GradioUI: `pip install 'smolagents[gradio]'`"
191
+ )
192
+ import gradio as gr
193
+
194
+ total_input_tokens = 0
195
+ total_output_tokens = 0
196
+
197
  for step_log in agent.run(task, stream=True, reset=reset_agent_memory, additional_args=additional_args):
198
  # Track tokens if model provides them
199
  if hasattr(agent.model, "last_input_token_count"):