Update app.py
Browse files
app.py
CHANGED
@@ -34,7 +34,7 @@ with gr.Blocks(fill_height=True) as demo:
|
|
34 |
src=sambanova_gradio.registry,
|
35 |
multimodal=True,
|
36 |
fill_height=True,
|
37 |
-
chatbot=gr.Chatbot(height=
|
38 |
)
|
39 |
|
40 |
def update_llama_model(new_model):
|
@@ -43,7 +43,7 @@ with gr.Blocks(fill_height=True) as demo:
|
|
43 |
src=sambanova_gradio.registry,
|
44 |
multimodal=True,
|
45 |
fill_height=True,
|
46 |
-
chatbot=gr.Chatbot(height=
|
47 |
)
|
48 |
|
49 |
llama_model.change(
|
@@ -71,7 +71,7 @@ with gr.Blocks(fill_height=True) as demo:
|
|
71 |
name=gemini_model.value,
|
72 |
src=gemini_gradio.registry,
|
73 |
fill_height=True,
|
74 |
-
chatbot=gr.Chatbot(height=
|
75 |
)
|
76 |
|
77 |
def update_gemini_model(new_model):
|
@@ -79,7 +79,7 @@ with gr.Blocks(fill_height=True) as demo:
|
|
79 |
name=new_model,
|
80 |
src=gemini_gradio.registry,
|
81 |
fill_height=True,
|
82 |
-
chatbot=gr.Chatbot(height=
|
83 |
)
|
84 |
|
85 |
gemini_model.change(
|
@@ -119,7 +119,7 @@ with gr.Blocks(fill_height=True) as demo:
|
|
119 |
src=openai_gradio.registry,
|
120 |
accept_token=True,
|
121 |
fill_height=True,
|
122 |
-
chatbot=gr.Chatbot(height=
|
123 |
)
|
124 |
|
125 |
def update_model(new_model):
|
@@ -128,7 +128,7 @@ with gr.Blocks(fill_height=True) as demo:
|
|
128 |
src=openai_gradio.registry,
|
129 |
accept_token=True,
|
130 |
fill_height=True,
|
131 |
-
chatbot=gr.Chatbot(height=
|
132 |
)
|
133 |
|
134 |
model_choice.change(
|
@@ -156,7 +156,7 @@ with gr.Blocks(fill_height=True) as demo:
|
|
156 |
src=anthropic_gradio.registry,
|
157 |
accept_token=True,
|
158 |
fill_height=True,
|
159 |
-
chatbot=gr.Chatbot(height=
|
160 |
)
|
161 |
|
162 |
def update_claude_model(new_model):
|
@@ -165,7 +165,7 @@ with gr.Blocks(fill_height=True) as demo:
|
|
165 |
src=anthropic_gradio.registry,
|
166 |
accept_token=True,
|
167 |
fill_height=True,
|
168 |
-
chatbot=gr.Chatbot(height=
|
169 |
)
|
170 |
|
171 |
claude_model.change(
|
@@ -179,7 +179,7 @@ with gr.Blocks(fill_height=True) as demo:
|
|
179 |
src=xai_gradio.registry,
|
180 |
accept_token=True,
|
181 |
fill_height=True,
|
182 |
-
chatbot=gr.Chatbot(height=
|
183 |
)
|
184 |
with gr.Tab("Qwen"):
|
185 |
with gr.Row():
|
@@ -197,7 +197,7 @@ with gr.Blocks(fill_height=True) as demo:
|
|
197 |
name=qwen_model.value,
|
198 |
src=hyperbolic_gradio.registry,
|
199 |
fill_height=True,
|
200 |
-
chatbot=gr.Chatbot(height=
|
201 |
)
|
202 |
|
203 |
def update_qwen_model(new_model):
|
@@ -205,7 +205,7 @@ with gr.Blocks(fill_height=True) as demo:
|
|
205 |
name=new_model,
|
206 |
src=hyperbolic_gradio.registry,
|
207 |
fill_height=True,
|
208 |
-
chatbot=gr.Chatbot(height=
|
209 |
)
|
210 |
|
211 |
qwen_model.change(
|
@@ -246,7 +246,7 @@ with gr.Blocks(fill_height=True) as demo:
|
|
246 |
src=perplexity_gradio.registry,
|
247 |
accept_token=True,
|
248 |
fill_height=True,
|
249 |
-
chatbot=gr.Chatbot(height=
|
250 |
)
|
251 |
|
252 |
def update_perplexity_model(new_model):
|
@@ -255,7 +255,7 @@ with gr.Blocks(fill_height=True) as demo:
|
|
255 |
src=perplexity_gradio.registry,
|
256 |
accept_token=True,
|
257 |
fill_height=True,
|
258 |
-
chatbot=gr.Chatbot(height=
|
259 |
)
|
260 |
|
261 |
perplexity_model.change(
|
@@ -277,7 +277,7 @@ with gr.Blocks(fill_height=True) as demo:
|
|
277 |
name='deepseek-ai/DeepSeek-V2.5',
|
278 |
src=hyperbolic_gradio.registry,
|
279 |
fill_height=True,
|
280 |
-
chatbot=gr.Chatbot(height=
|
281 |
)
|
282 |
gr.Markdown("""
|
283 |
<div>
|
@@ -313,7 +313,7 @@ with gr.Blocks(fill_height=True) as demo:
|
|
313 |
name=mistral_model.value,
|
314 |
src=mistral_gradio.registry,
|
315 |
fill_height=True,
|
316 |
-
chatbot=gr.Chatbot(height=
|
317 |
)
|
318 |
|
319 |
def update_mistral_model(new_model):
|
@@ -321,7 +321,7 @@ with gr.Blocks(fill_height=True) as demo:
|
|
321 |
name=new_model,
|
322 |
src=mistral_gradio.registry,
|
323 |
fill_height=True,
|
324 |
-
chatbot=gr.Chatbot(height=
|
325 |
)
|
326 |
|
327 |
mistral_model.change(
|
@@ -355,7 +355,7 @@ with gr.Blocks(fill_height=True) as demo:
|
|
355 |
name=fireworks_model.value,
|
356 |
src=fireworks_gradio.registry,
|
357 |
fill_height=True,
|
358 |
-
chatbot=gr.Chatbot(height=
|
359 |
)
|
360 |
|
361 |
def update_fireworks_model(new_model):
|
@@ -363,7 +363,7 @@ with gr.Blocks(fill_height=True) as demo:
|
|
363 |
name=new_model,
|
364 |
src=fireworks_gradio.registry,
|
365 |
fill_height=True,
|
366 |
-
chatbot=gr.Chatbot(height=
|
367 |
)
|
368 |
|
369 |
fireworks_model.change(
|
|
|
34 |
src=sambanova_gradio.registry,
|
35 |
multimodal=True,
|
36 |
fill_height=True,
|
37 |
+
chatbot=gr.Chatbot(height=250)
|
38 |
)
|
39 |
|
40 |
def update_llama_model(new_model):
|
|
|
43 |
src=sambanova_gradio.registry,
|
44 |
multimodal=True,
|
45 |
fill_height=True,
|
46 |
+
chatbot=gr.Chatbot(height=250)
|
47 |
)
|
48 |
|
49 |
llama_model.change(
|
|
|
71 |
name=gemini_model.value,
|
72 |
src=gemini_gradio.registry,
|
73 |
fill_height=True,
|
74 |
+
chatbot=gr.Chatbot(height=250, type="messages")
|
75 |
)
|
76 |
|
77 |
def update_gemini_model(new_model):
|
|
|
79 |
name=new_model,
|
80 |
src=gemini_gradio.registry,
|
81 |
fill_height=True,
|
82 |
+
chatbot=gr.Chatbot(height=250, type="messages")
|
83 |
)
|
84 |
|
85 |
gemini_model.change(
|
|
|
119 |
src=openai_gradio.registry,
|
120 |
accept_token=True,
|
121 |
fill_height=True,
|
122 |
+
chatbot=gr.Chatbot(height=250)
|
123 |
)
|
124 |
|
125 |
def update_model(new_model):
|
|
|
128 |
src=openai_gradio.registry,
|
129 |
accept_token=True,
|
130 |
fill_height=True,
|
131 |
+
chatbot=gr.Chatbot(height=250)
|
132 |
)
|
133 |
|
134 |
model_choice.change(
|
|
|
156 |
src=anthropic_gradio.registry,
|
157 |
accept_token=True,
|
158 |
fill_height=True,
|
159 |
+
chatbot=gr.Chatbot(height=250)
|
160 |
)
|
161 |
|
162 |
def update_claude_model(new_model):
|
|
|
165 |
src=anthropic_gradio.registry,
|
166 |
accept_token=True,
|
167 |
fill_height=True,
|
168 |
+
chatbot=gr.Chatbot(height=250)
|
169 |
)
|
170 |
|
171 |
claude_model.change(
|
|
|
179 |
src=xai_gradio.registry,
|
180 |
accept_token=True,
|
181 |
fill_height=True,
|
182 |
+
chatbot=gr.Chatbot(height=250)
|
183 |
)
|
184 |
with gr.Tab("Qwen"):
|
185 |
with gr.Row():
|
|
|
197 |
name=qwen_model.value,
|
198 |
src=hyperbolic_gradio.registry,
|
199 |
fill_height=True,
|
200 |
+
chatbot=gr.Chatbot(height=250)
|
201 |
)
|
202 |
|
203 |
def update_qwen_model(new_model):
|
|
|
205 |
name=new_model,
|
206 |
src=hyperbolic_gradio.registry,
|
207 |
fill_height=True,
|
208 |
+
chatbot=gr.Chatbot(height=250)
|
209 |
)
|
210 |
|
211 |
qwen_model.change(
|
|
|
246 |
src=perplexity_gradio.registry,
|
247 |
accept_token=True,
|
248 |
fill_height=True,
|
249 |
+
chatbot=gr.Chatbot(height=250)
|
250 |
)
|
251 |
|
252 |
def update_perplexity_model(new_model):
|
|
|
255 |
src=perplexity_gradio.registry,
|
256 |
accept_token=True,
|
257 |
fill_height=True,
|
258 |
+
chatbot=gr.Chatbot(height=250)
|
259 |
)
|
260 |
|
261 |
perplexity_model.change(
|
|
|
277 |
name='deepseek-ai/DeepSeek-V2.5',
|
278 |
src=hyperbolic_gradio.registry,
|
279 |
fill_height=True,
|
280 |
+
chatbot=gr.Chatbot(height=250)
|
281 |
)
|
282 |
gr.Markdown("""
|
283 |
<div>
|
|
|
313 |
name=mistral_model.value,
|
314 |
src=mistral_gradio.registry,
|
315 |
fill_height=True,
|
316 |
+
chatbot=gr.Chatbot(height=250)
|
317 |
)
|
318 |
|
319 |
def update_mistral_model(new_model):
|
|
|
321 |
name=new_model,
|
322 |
src=mistral_gradio.registry,
|
323 |
fill_height=True,
|
324 |
+
chatbot=gr.Chatbot(height=250)
|
325 |
)
|
326 |
|
327 |
mistral_model.change(
|
|
|
355 |
name=fireworks_model.value,
|
356 |
src=fireworks_gradio.registry,
|
357 |
fill_height=True,
|
358 |
+
chatbot=gr.Chatbot(height=250)
|
359 |
)
|
360 |
|
361 |
def update_fireworks_model(new_model):
|
|
|
363 |
name=new_model,
|
364 |
src=fireworks_gradio.registry,
|
365 |
fill_height=True,
|
366 |
+
chatbot=gr.Chatbot(height=250)
|
367 |
)
|
368 |
|
369 |
fireworks_model.change(
|