Dango233 commited on
Commit
86662f6
·
1 Parent(s): ffc5b57
Files changed (2) hide show
  1. app.py +171 -91
  2. lang.py +13 -7
app.py CHANGED
@@ -11,17 +11,23 @@ load_dotenv(override=True)
11
  required_env_vars = ["API_KEY", "API_URL", "API_MODEL"]
12
  missing_vars = [var for var in required_env_vars if not os.getenv(var)]
13
  if missing_vars:
14
- raise EnvironmentError(f"Missing required environment variables: {', '.join(missing_vars)}")
 
 
15
 
16
 
17
  class AppConfig:
18
  DEFAULT_THROUGHPUT = 10
19
  SYNC_THRESHOLD_DEFAULT = 0
20
  API_TIMEOUT = 20
21
- LOADING_DEFAULT = "✅ Ready! <br> Think together with AI. Use Shift+Enter to toggle generation"
 
 
 
22
 
23
  class DynamicState:
24
  """动态UI状态"""
 
25
  def __init__(self):
26
  self.should_stream = False
27
  self.stream_completed = False
@@ -36,35 +42,45 @@ class DynamicState:
36
  self.stream_completed = False
37
  self.should_stream = True
38
  return self.ui_state_controller()
39
-
40
  def ui_state_controller(self):
41
  """生成动态UI组件状态"""
42
  print("UPDATE UI!!")
43
  # [control_button, status_indicator, thought_editor, reset_button]
44
  lang_data = LANGUAGE_CONFIG[self.current_language]
45
- control_value = lang_data["pause_btn"] if self.should_stream else lang_data["generate_btn"]
 
 
46
  control_variant = "secondary" if self.should_stream else "primary"
47
- status_value = lang_data["completed"] if self.stream_completed else lang_data["interrupted"]
 
 
 
 
48
  return (
49
- gr.update(
50
- value=control_value,
51
- variant=control_variant
52
- ),
53
  gr.update(
54
  value=status_value,
55
  ),
56
  gr.update(),
57
- gr.update(interactive = not self.should_stream)
58
  )
 
59
  def reset_workspace(self):
60
  """重置工作区状态"""
61
  self.stream_completed = False
62
  self.should_stream = False
63
  self.in_cot = True
64
- return self.ui_state_controller() + ("", "", LANGUAGE_CONFIG["en"]["bot_default"])
65
-
 
 
 
 
 
66
  class CoordinationManager:
67
  """管理人类与AI的协同节奏"""
 
68
  def __init__(self, paragraph_threshold, initial_content):
69
  self.paragraph_threshold = paragraph_threshold
70
  self.initial_paragraph_count = initial_content.count("\n\n")
@@ -75,7 +91,10 @@ class CoordinationManager:
75
  return False
76
 
77
  current_paragraphs = current_content.count("\n\n")
78
- if current_paragraphs - self.initial_paragraph_count >= self.paragraph_threshold:
 
 
 
79
  self.triggered = True
80
  return True
81
  return False
@@ -83,6 +102,7 @@ class CoordinationManager:
83
 
84
  class ConvoState:
85
  """State of current ROUND of convo"""
 
86
  def __init__(self):
87
  self.throughput = AppConfig.DEFAULT_THROUGHPUT
88
  self.sync_threshold = AppConfig.SYNC_THRESHOLD_DEFAULT
@@ -97,14 +117,19 @@ class ConvoState:
97
  self.current["result"] = ""
98
  self.convo.append(self.current)
99
 
100
-
101
  def flatten_output(self):
102
  output = []
103
  for round in self.convo:
104
  output.append({"role": "user", "content": round["user"]})
105
- if len(round["cot"])>0:
106
- output.append({"role": "assistant", "content": round["cot"], "metadata":{"title": f"Chain of Thought"}})
107
- if len(round["result"])>0:
 
 
 
 
 
 
108
  output.append({"role": "assistant", "content": round["result"]})
109
  return output
110
 
@@ -115,21 +140,25 @@ class ConvoState:
115
  api_client = OpenAI(
116
  api_key=os.getenv("API_KEY"),
117
  base_url=os.getenv("API_URL"),
118
- timeout=AppConfig.API_TIMEOUT
119
  )
120
  coordinator = CoordinationManager(self.sync_threshold, current_content)
121
 
122
  try:
123
  messages = [
124
  {"role": "user", "content": user_prompt},
125
- {"role": "assistant", "content": f"<think>\n{current_content}", "prefix": True}
 
 
 
 
126
  ]
127
  self.current["user"] = user_prompt
128
  response_stream = api_client.chat.completions.create(
129
  model=os.getenv("API_MODEL"),
130
  messages=messages,
131
  stream=True,
132
- timeout=AppConfig.API_TIMEOUT
133
  )
134
  for chunk in response_stream:
135
  chunk_content = chunk.choices[0].delta.content
@@ -137,34 +166,51 @@ class ConvoState:
137
  dynamic_state.should_stream = False
138
  if not dynamic_state.should_stream:
139
  break
140
-
141
  if chunk_content:
142
  full_response += chunk_content
143
  # Update Convo State
144
  think_complete = "</think>" in full_response
145
  dynamic_state.in_cot = not think_complete
146
  if think_complete:
147
- self.current["cot"], self.current["result"] = full_response.split("</think>")
 
 
148
  else:
149
- self.current["cot"], self.current["result"] = (full_response, "")
150
- status = lang_data["loading_thinking"] if dynamic_state.in_cot else lang_data["loading_output"]
 
 
 
 
 
 
 
151
  yield full_response, status, self.flatten_output()
152
 
153
  interval = 1.0 / self.throughput
154
  start_time = time.time()
155
- while (time.time() - start_time) < interval and dynamic_state.should_stream:
 
 
156
  time.sleep(0.005)
157
 
158
  except Exception as e:
159
  error_msg = LANGUAGE_CONFIG[self.current_language].get("error", "Error")
160
  full_response += f"\n\n[{error_msg}: {str(e)}]"
161
- yield full_response, error_msg, status, self.flatten_output() + [{"role":"assistant","content": error_msg, "metadata":{"title": f"❌Error"}}]
 
 
 
 
 
 
162
 
163
  finally:
164
  dynamic_state.should_stream = False
165
  if "status" not in locals():
166
  status = "Whoops... ERROR"
167
- if 'response_stream' in locals():
168
  response_stream.close()
169
  yield full_response, status, self.flatten_output()
170
 
@@ -176,66 +222,57 @@ def update_interface_language(selected_lang, convo_state, dynamic_state):
176
  lang_data = LANGUAGE_CONFIG[selected_lang]
177
  return [
178
  gr.update(value=f"{lang_data['title']}"),
179
- gr.update(label=lang_data["prompt_label"], placeholder=lang_data["prompt_placeholder"]),
180
- gr.update(label=lang_data["editor_label"], placeholder=lang_data["editor_placeholder"]),
181
- gr.update(label=lang_data["sync_threshold_label"], info=lang_data["sync_threshold_info"]),
182
- gr.update(label=lang_data["throughput_label"], info=lang_data["throughput_info"]),
183
  gr.update(
184
- value=lang_data["pause_btn"] if dynamic_state.should_stream else lang_data["generate_btn"],
185
- variant="secondary" if dynamic_state.should_stream else "primary"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
186
  ),
187
  gr.update(label=lang_data["language_label"]),
188
- gr.update(value=lang_data["clear_btn"], interactive = not dynamic_state.should_stream),
 
 
189
  gr.update(value=lang_data["introduction"]),
190
  gr.update(value=lang_data["bot_default"], label=lang_data["bot_label"]),
191
  ]
192
 
193
 
194
-
195
  theme = gr.themes.Base(font="system-ui", primary_hue="stone")
196
 
197
  with gr.Blocks(theme=theme, css_paths="styles.css") as demo:
198
  convo_state = gr.State(ConvoState)
199
- dynamic_state = gr.State(DynamicState) # DynamicState is now a separate state
200
 
201
  with gr.Row(variant=""):
202
- title_md = gr.Markdown(f"## {LANGUAGE_CONFIG['en']['title']} \n GitHub: https://github.com/Intelligent-Internet/CoT-Lab-Demo", container=False)
 
 
 
203
  lang_selector = gr.Dropdown(
204
- choices=["en", "zh"],
205
  value="en",
206
  elem_id="compact_lang_selector",
207
  scale=0,
208
- container=False
209
  )
210
-
211
  with gr.Row(equal_height=True):
212
- # 对话面板
213
- with gr.Column(scale=1, min_width=500):
214
- chatbot = gr.Chatbot(type="messages", height=300,
215
- value=LANGUAGE_CONFIG['en']['bot_default'],
216
- group_consecutive_messages=False,
217
- show_copy_all_button=True,
218
- show_share_button=True,
219
- label=LANGUAGE_CONFIG['en']['bot_label']
220
- )
221
- prompt_input = gr.Textbox(
222
- label=LANGUAGE_CONFIG["en"]["prompt_label"],
223
- lines=2,
224
- placeholder=LANGUAGE_CONFIG["en"]["prompt_placeholder"],
225
- max_lines=5,
226
- )
227
- with gr.Row():
228
- control_button = gr.Button(
229
- value=LANGUAGE_CONFIG["en"]["generate_btn"],
230
- variant="primary"
231
- )
232
- next_turn_btn = gr.Button(
233
- value=LANGUAGE_CONFIG["en"]["clear_btn"],
234
- interactive=True
235
- )
236
- status_indicator = gr.Markdown(AppConfig.LOADING_DEFAULT)
237
- intro_md = gr.Markdown(LANGUAGE_CONFIG["en"]["introduction"], visible=False)
238
-
239
  # 思考编辑面板
240
  with gr.Column(scale=1, min_width=400):
241
  thought_editor = gr.Textbox(
@@ -243,7 +280,7 @@ with gr.Blocks(theme=theme, css_paths="styles.css") as demo:
243
  lines=16,
244
  placeholder=LANGUAGE_CONFIG["en"]["editor_placeholder"],
245
  autofocus=True,
246
- elem_id="editor"
247
  )
248
  with gr.Row():
249
  sync_threshold_slider = gr.Slider(
@@ -252,7 +289,7 @@ with gr.Blocks(theme=theme, css_paths="styles.css") as demo:
252
  value=AppConfig.SYNC_THRESHOLD_DEFAULT,
253
  step=1,
254
  label=LANGUAGE_CONFIG["en"]["sync_threshold_label"],
255
- info=LANGUAGE_CONFIG["en"]["sync_threshold_info"]
256
  )
257
  throughput_control = gr.Slider(
258
  minimum=1,
@@ -260,64 +297,107 @@ with gr.Blocks(theme=theme, css_paths="styles.css") as demo:
260
  value=AppConfig.DEFAULT_THROUGHPUT,
261
  step=1,
262
  label=LANGUAGE_CONFIG["en"]["throughput_label"],
263
- info=LANGUAGE_CONFIG["en"]["throughput_info"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
264
  )
 
 
265
 
266
  # 交互逻辑
267
 
268
- stateful_ui = (control_button, status_indicator, thought_editor, next_turn_btn)
269
 
270
  throughput_control.change(
271
  lambda val, s: setattr(s, "throughput", val),
272
  [throughput_control, convo_state],
273
  None,
274
- queue=False
275
- )
276
 
277
  sync_threshold_slider.change(
278
  lambda val, s: setattr(s, "sync_threshold", val),
279
  [sync_threshold_slider, convo_state],
280
  None,
281
- queue=False
282
  )
283
 
284
- def wrap_stream_generator(convo_state, dynamic_state, prompt, content): # Pass dynamic_state here
285
- for response in convo_state.generate_ai_response(prompt, content, dynamic_state): # Pass dynamic_state to generate_ai_response
 
 
 
 
286
  yield response
287
 
288
- gr.on( #主按钮trigger
289
  [control_button.click, prompt_input.submit, thought_editor.submit],
290
- lambda d: d.control_button_handler(), # Pass dynamic_state to control_button_handler
291
  [dynamic_state],
292
  stateful_ui,
293
- show_progress=False
294
- ).then( #生成事件
295
- wrap_stream_generator, # Pass both states
296
  [convo_state, dynamic_state, prompt_input, thought_editor],
297
  [thought_editor, status_indicator, chatbot],
298
- concurrency_limit=100
299
- ).then( #生成终止后UI状态判断
300
- lambda d: d.ui_state_controller(), # Pass dynamic_state to ui_state_controller
301
  [dynamic_state],
302
  stateful_ui,
303
  show_progress=False,
304
  )
305
 
306
  next_turn_btn.click(
307
- lambda d: d.reset_workspace(), # Pass dynamic_state to reset_workspace
308
  [dynamic_state],
309
  stateful_ui + (thought_editor, prompt_input, chatbot),
310
- queue=False
311
  )
312
 
313
  lang_selector.change(
314
- lambda lang, s, d: update_interface_language(lang, s, d), # Pass dynamic_state to update_interface_language
 
 
315
  [lang_selector, convo_state, dynamic_state],
316
- [title_md, prompt_input, thought_editor, sync_threshold_slider,
317
- throughput_control, control_button, lang_selector, next_turn_btn, intro_md, chatbot],
318
- queue=False
 
 
 
 
 
 
 
 
 
 
319
  )
320
 
321
  if __name__ == "__main__":
322
  demo.queue(default_concurrency_limit=10000)
323
- demo.launch()
 
11
  required_env_vars = ["API_KEY", "API_URL", "API_MODEL"]
12
  missing_vars = [var for var in required_env_vars if not os.getenv(var)]
13
  if missing_vars:
14
+ raise EnvironmentError(
15
+ f"Missing required environment variables: {', '.join(missing_vars)}"
16
+ )
17
 
18
 
19
  class AppConfig:
20
  DEFAULT_THROUGHPUT = 10
21
  SYNC_THRESHOLD_DEFAULT = 0
22
  API_TIMEOUT = 20
23
+ LOADING_DEFAULT = (
24
+ "✅ Ready! <br> Think together with AI. Use Shift+Enter to toggle generation"
25
+ )
26
+
27
 
28
  class DynamicState:
29
  """动态UI状态"""
30
+
31
  def __init__(self):
32
  self.should_stream = False
33
  self.stream_completed = False
 
42
  self.stream_completed = False
43
  self.should_stream = True
44
  return self.ui_state_controller()
45
+
46
  def ui_state_controller(self):
47
  """生成动态UI组件状态"""
48
  print("UPDATE UI!!")
49
  # [control_button, status_indicator, thought_editor, reset_button]
50
  lang_data = LANGUAGE_CONFIG[self.current_language]
51
+ control_value = (
52
+ lang_data["pause_btn"] if self.should_stream else lang_data["generate_btn"]
53
+ )
54
  control_variant = "secondary" if self.should_stream else "primary"
55
+ status_value = (
56
+ lang_data["completed"]
57
+ if self.stream_completed
58
+ else lang_data["interrupted"]
59
+ )
60
  return (
61
+ gr.update(value=control_value, variant=control_variant),
 
 
 
62
  gr.update(
63
  value=status_value,
64
  ),
65
  gr.update(),
66
+ gr.update(interactive=not self.should_stream),
67
  )
68
+
69
  def reset_workspace(self):
70
  """重置工作区状态"""
71
  self.stream_completed = False
72
  self.should_stream = False
73
  self.in_cot = True
74
+ return self.ui_state_controller() + (
75
+ "",
76
+ "",
77
+ LANGUAGE_CONFIG["en"]["bot_default"],
78
+ )
79
+
80
+
81
  class CoordinationManager:
82
  """管理人类与AI的协同节奏"""
83
+
84
  def __init__(self, paragraph_threshold, initial_content):
85
  self.paragraph_threshold = paragraph_threshold
86
  self.initial_paragraph_count = initial_content.count("\n\n")
 
91
  return False
92
 
93
  current_paragraphs = current_content.count("\n\n")
94
+ if (
95
+ current_paragraphs - self.initial_paragraph_count
96
+ >= self.paragraph_threshold
97
+ ):
98
  self.triggered = True
99
  return True
100
  return False
 
102
 
103
  class ConvoState:
104
  """State of current ROUND of convo"""
105
+
106
  def __init__(self):
107
  self.throughput = AppConfig.DEFAULT_THROUGHPUT
108
  self.sync_threshold = AppConfig.SYNC_THRESHOLD_DEFAULT
 
117
  self.current["result"] = ""
118
  self.convo.append(self.current)
119
 
 
120
  def flatten_output(self):
121
  output = []
122
  for round in self.convo:
123
  output.append({"role": "user", "content": round["user"]})
124
+ if len(round["cot"]) > 0:
125
+ output.append(
126
+ {
127
+ "role": "assistant",
128
+ "content": round["cot"],
129
+ "metadata": {"title": f"Chain of Thought"},
130
+ }
131
+ )
132
+ if len(round["result"]) > 0:
133
  output.append({"role": "assistant", "content": round["result"]})
134
  return output
135
 
 
140
  api_client = OpenAI(
141
  api_key=os.getenv("API_KEY"),
142
  base_url=os.getenv("API_URL"),
143
+ timeout=AppConfig.API_TIMEOUT,
144
  )
145
  coordinator = CoordinationManager(self.sync_threshold, current_content)
146
 
147
  try:
148
  messages = [
149
  {"role": "user", "content": user_prompt},
150
+ {
151
+ "role": "assistant",
152
+ "content": f"<think>\n{current_content}",
153
+ "prefix": True,
154
+ },
155
  ]
156
  self.current["user"] = user_prompt
157
  response_stream = api_client.chat.completions.create(
158
  model=os.getenv("API_MODEL"),
159
  messages=messages,
160
  stream=True,
161
+ timeout=AppConfig.API_TIMEOUT,
162
  )
163
  for chunk in response_stream:
164
  chunk_content = chunk.choices[0].delta.content
 
166
  dynamic_state.should_stream = False
167
  if not dynamic_state.should_stream:
168
  break
169
+
170
  if chunk_content:
171
  full_response += chunk_content
172
  # Update Convo State
173
  think_complete = "</think>" in full_response
174
  dynamic_state.in_cot = not think_complete
175
  if think_complete:
176
+ self.current["cot"], self.current["result"] = (
177
+ full_response.split("</think>")
178
+ )
179
  else:
180
+ self.current["cot"], self.current["result"] = (
181
+ full_response,
182
+ "",
183
+ )
184
+ status = (
185
+ lang_data["loading_thinking"]
186
+ if dynamic_state.in_cot
187
+ else lang_data["loading_output"]
188
+ )
189
  yield full_response, status, self.flatten_output()
190
 
191
  interval = 1.0 / self.throughput
192
  start_time = time.time()
193
+ while (
194
+ time.time() - start_time
195
+ ) < interval and dynamic_state.should_stream:
196
  time.sleep(0.005)
197
 
198
  except Exception as e:
199
  error_msg = LANGUAGE_CONFIG[self.current_language].get("error", "Error")
200
  full_response += f"\n\n[{error_msg}: {str(e)}]"
201
+ yield full_response, error_msg, status, self.flatten_output() + [
202
+ {
203
+ "role": "assistant",
204
+ "content": error_msg,
205
+ "metadata": {"title": f"❌Error"},
206
+ }
207
+ ]
208
 
209
  finally:
210
  dynamic_state.should_stream = False
211
  if "status" not in locals():
212
  status = "Whoops... ERROR"
213
+ if "response_stream" in locals():
214
  response_stream.close()
215
  yield full_response, status, self.flatten_output()
216
 
 
222
  lang_data = LANGUAGE_CONFIG[selected_lang]
223
  return [
224
  gr.update(value=f"{lang_data['title']}"),
 
 
 
 
225
  gr.update(
226
+ label=lang_data["prompt_label"], placeholder=lang_data["prompt_placeholder"]
227
+ ),
228
+ gr.update(
229
+ label=lang_data["editor_label"], placeholder=lang_data["editor_placeholder"]
230
+ ),
231
+ gr.update(
232
+ label=lang_data["sync_threshold_label"],
233
+ info=lang_data["sync_threshold_info"],
234
+ ),
235
+ gr.update(
236
+ label=lang_data["throughput_label"], info=lang_data["throughput_info"]
237
+ ),
238
+ gr.update(
239
+ value=(
240
+ lang_data["pause_btn"]
241
+ if dynamic_state.should_stream
242
+ else lang_data["generate_btn"]
243
+ ),
244
+ variant="secondary" if dynamic_state.should_stream else "primary",
245
  ),
246
  gr.update(label=lang_data["language_label"]),
247
+ gr.update(
248
+ value=lang_data["clear_btn"], interactive=not dynamic_state.should_stream
249
+ ),
250
  gr.update(value=lang_data["introduction"]),
251
  gr.update(value=lang_data["bot_default"], label=lang_data["bot_label"]),
252
  ]
253
 
254
 
 
255
  theme = gr.themes.Base(font="system-ui", primary_hue="stone")
256
 
257
  with gr.Blocks(theme=theme, css_paths="styles.css") as demo:
258
  convo_state = gr.State(ConvoState)
259
+ dynamic_state = gr.State(DynamicState) # DynamicState is now a separate state
260
 
261
  with gr.Row(variant=""):
262
+ title_md = gr.Markdown(
263
+ f"## {LANGUAGE_CONFIG['en']['title']} \n GitHub: https://github.com/Intelligent-Internet/CoT-Lab-Demo",
264
+ container=False,
265
+ )
266
  lang_selector = gr.Dropdown(
267
+ choices=["en", "zh"],
268
  value="en",
269
  elem_id="compact_lang_selector",
270
  scale=0,
271
+ container=False,
272
  )
273
+
274
  with gr.Row(equal_height=True):
275
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
276
  # 思考编辑面板
277
  with gr.Column(scale=1, min_width=400):
278
  thought_editor = gr.Textbox(
 
280
  lines=16,
281
  placeholder=LANGUAGE_CONFIG["en"]["editor_placeholder"],
282
  autofocus=True,
283
+ elem_id="editor",
284
  )
285
  with gr.Row():
286
  sync_threshold_slider = gr.Slider(
 
289
  value=AppConfig.SYNC_THRESHOLD_DEFAULT,
290
  step=1,
291
  label=LANGUAGE_CONFIG["en"]["sync_threshold_label"],
292
+ info=LANGUAGE_CONFIG["en"]["sync_threshold_info"],
293
  )
294
  throughput_control = gr.Slider(
295
  minimum=1,
 
297
  value=AppConfig.DEFAULT_THROUGHPUT,
298
  step=1,
299
  label=LANGUAGE_CONFIG["en"]["throughput_label"],
300
+ info=LANGUAGE_CONFIG["en"]["throughput_info"],
301
+ )
302
+
303
+ # 对话面板
304
+ with gr.Column(scale=1, min_width=500):
305
+ chatbot = gr.Chatbot(
306
+ type="messages",
307
+ height=300,
308
+ value=LANGUAGE_CONFIG["en"]["bot_default"],
309
+ group_consecutive_messages=False,
310
+ show_copy_all_button=True,
311
+ show_share_button=True,
312
+ label=LANGUAGE_CONFIG["en"]["bot_label"],
313
+ )
314
+ prompt_input = gr.Textbox(
315
+ label=LANGUAGE_CONFIG["en"]["prompt_label"],
316
+ lines=2,
317
+ placeholder=LANGUAGE_CONFIG["en"]["prompt_placeholder"],
318
+ max_lines=5,
319
+ )
320
+ with gr.Row():
321
+ control_button = gr.Button(
322
+ value=LANGUAGE_CONFIG["en"]["generate_btn"], variant="primary"
323
+ )
324
+ next_turn_btn = gr.Button(
325
+ value=LANGUAGE_CONFIG["en"]["clear_btn"], interactive=True
326
  )
327
+ status_indicator = gr.Markdown(AppConfig.LOADING_DEFAULT)
328
+ intro_md = gr.Markdown(LANGUAGE_CONFIG["en"]["introduction"], visible=False)
329
 
330
  # 交互逻辑
331
 
332
+ stateful_ui = (control_button, status_indicator, thought_editor, next_turn_btn)
333
 
334
  throughput_control.change(
335
  lambda val, s: setattr(s, "throughput", val),
336
  [throughput_control, convo_state],
337
  None,
338
+ queue=False,
339
+ )
340
 
341
  sync_threshold_slider.change(
342
  lambda val, s: setattr(s, "sync_threshold", val),
343
  [sync_threshold_slider, convo_state],
344
  None,
345
+ queue=False,
346
  )
347
 
348
+ def wrap_stream_generator(
349
+ convo_state, dynamic_state, prompt, content
350
+ ): # Pass dynamic_state here
351
+ for response in convo_state.generate_ai_response(
352
+ prompt, content, dynamic_state
353
+ ): # Pass dynamic_state to generate_ai_response
354
  yield response
355
 
356
+ gr.on( # 主按钮trigger
357
  [control_button.click, prompt_input.submit, thought_editor.submit],
358
+ lambda d: d.control_button_handler(), # Pass dynamic_state to control_button_handler
359
  [dynamic_state],
360
  stateful_ui,
361
+ show_progress=False,
362
+ ).then( # 生成事件
363
+ wrap_stream_generator, # Pass both states
364
  [convo_state, dynamic_state, prompt_input, thought_editor],
365
  [thought_editor, status_indicator, chatbot],
366
+ concurrency_limit=100,
367
+ ).then( # 生成终止后UI状态判断
368
+ lambda d: d.ui_state_controller(), # Pass dynamic_state to ui_state_controller
369
  [dynamic_state],
370
  stateful_ui,
371
  show_progress=False,
372
  )
373
 
374
  next_turn_btn.click(
375
+ lambda d: d.reset_workspace(), # Pass dynamic_state to reset_workspace
376
  [dynamic_state],
377
  stateful_ui + (thought_editor, prompt_input, chatbot),
378
+ queue=False,
379
  )
380
 
381
  lang_selector.change(
382
+ lambda lang, s, d: update_interface_language(
383
+ lang, s, d
384
+ ), # Pass dynamic_state to update_interface_language
385
  [lang_selector, convo_state, dynamic_state],
386
+ [
387
+ title_md,
388
+ prompt_input,
389
+ thought_editor,
390
+ sync_threshold_slider,
391
+ throughput_control,
392
+ control_button,
393
+ lang_selector,
394
+ next_turn_btn,
395
+ intro_md,
396
+ chatbot,
397
+ ],
398
+ queue=False,
399
  )
400
 
401
  if __name__ == "__main__":
402
  demo.queue(default_concurrency_limit=10000)
403
+ demo.launch()
lang.py CHANGED
@@ -27,8 +27,11 @@ LANGUAGE_CONFIG = {
27
  "introduction": "Think together with AI. Use `Shift+Enter` to toggle generation <br>You can modify the thinking process when AI pauses",
28
  "bot_label": "Conversation Overview",
29
  "bot_default": [
30
- {"role":"assistant","content":"Welcome to our co-thinking space! Ready to synchronize our cognitive rhythms? \n Shall we start by adjusting the throughput slider to match your reading pace? \n Enter your task below, edit my thinking process when I pause, and let's begin weaving thoughts together →"},
31
- ]
 
 
 
32
  },
33
  "zh": {
34
  "title": "CoT-Lab: 人机协同思维实验室\n在一轮对话中跟随、学习、迭代思维链",
@@ -57,8 +60,11 @@ LANGUAGE_CONFIG = {
57
  "introduction": "和AI一起思考,Shift+Enter切换生成状态<br>AI暂停的时候你可以编辑思维过程",
58
  "bot_label": "对话一览",
59
  "bot_default": [
60
- {"role":"assistant","content":"欢迎来到协同思考空间!准备好同步我们的认知节奏了吗?\n 建议先调整右侧的'同步思考速度'滑块,让它匹配你的阅读速度 \n 在下方输入任务描述,在我暂停时修改我的思维,让我们开始编织思维链条 →"},
61
- {"role":"assistant","content":"**Shift+Enter** 可以暂停/继续AI生成"},
62
- ]
63
- }
64
- }
 
 
 
 
27
  "introduction": "Think together with AI. Use `Shift+Enter` to toggle generation <br>You can modify the thinking process when AI pauses",
28
  "bot_label": "Conversation Overview",
29
  "bot_default": [
30
+ {
31
+ "role": "assistant",
32
+ "content": "Welcome to our co-thinking space! Ready to synchronize our cognitive rhythms? \n Shall we start by adjusting the throughput slider to match your reading pace? \n Enter your task below, edit my thinking process when I pause, and let's begin weaving thoughts together →",
33
+ },
34
+ ],
35
  },
36
  "zh": {
37
  "title": "CoT-Lab: 人机协同思维实验室\n在一轮对话中跟随、学习、迭代思维链",
 
60
  "introduction": "和AI一起思考,Shift+Enter切换生成状态<br>AI暂停的时候你可以编辑思维过程",
61
  "bot_label": "对话一览",
62
  "bot_default": [
63
+ {
64
+ "role": "assistant",
65
+ "content": "欢迎来到协同思考空间!准备好同步我们的认知节奏了吗?\n 建议先调整右侧的'同步思考速度'滑块,让它匹配你的阅读速度 \n 在下方输入任务描述,在我暂停时修改我的思维,让我们开始编织思维链条 →",
66
+ },
67
+ {"role": "assistant", "content": "**Shift+Enter** 可以暂停/继续AI生成"},
68
+ ],
69
+ },
70
+ }