Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
# app.py
|
2 |
import streamlit as st
|
3 |
-
from models import
|
4 |
|
5 |
# Page configuration
|
6 |
st.set_page_config(
|
@@ -18,15 +18,17 @@ with st.sidebar:
|
|
18 |
st.header("Model Configuration")
|
19 |
|
20 |
# Model selection
|
21 |
-
|
|
|
|
|
|
|
|
|
|
|
22 |
"Choose Model",
|
23 |
-
|
24 |
-
"deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
|
25 |
-
"deepseek-ai/DeepSeek-R1",
|
26 |
-
"deepseek-ai/DeepSeek-R1-Zero"
|
27 |
-
],
|
28 |
index=0
|
29 |
)
|
|
|
30 |
|
31 |
# System message
|
32 |
system_message = st.text_area(
|
@@ -37,7 +39,7 @@ with st.sidebar:
|
|
37 |
|
38 |
# Generation parameters
|
39 |
max_tokens = st.slider(
|
40 |
-
"Max
|
41 |
min_value=1,
|
42 |
max_value=4000,
|
43 |
value=512,
|
@@ -82,9 +84,9 @@ if prompt := st.chat_input("Type your message..."):
|
|
82 |
# Generate response using selected model
|
83 |
with st.spinner("Generating response..."):
|
84 |
# The model expects parameters as separate arguments
|
85 |
-
response =
|
86 |
-
f"{system_message}\n\nUser: {prompt}\nAssistant:",
|
87 |
-
|
88 |
temperature=temperature,
|
89 |
top_p=top_p
|
90 |
)
|
@@ -97,4 +99,4 @@ if prompt := st.chat_input("Type your message..."):
|
|
97 |
st.session_state.messages.append({"role": "assistant", "content": response})
|
98 |
|
99 |
except Exception as e:
|
100 |
-
st.error(f"
|
|
|
1 |
# app.py
|
2 |
import streamlit as st
|
3 |
+
from models import demo_distill, demo_r1, demo_zero
|
4 |
|
5 |
# Page configuration
|
6 |
st.set_page_config(
|
|
|
18 |
st.header("Model Configuration")
|
19 |
|
20 |
# Model selection
|
21 |
+
model_mapping = {
|
22 |
+
"deepseek-ai/DeepSeek-R1-Distill-Qwen-32B": demo_distill,
|
23 |
+
"deepseek-ai/DeepSeek-R1": demo_r1,
|
24 |
+
"deepseek-ai/DeepSeek-R1-Zero": demo_zero
|
25 |
+
}
|
26 |
+
selected_model_name = st.selectbox(
|
27 |
"Choose Model",
|
28 |
+
list(model_mapping.keys()),
|
|
|
|
|
|
|
|
|
29 |
index=0
|
30 |
)
|
31 |
+
selected_demo = model_mapping[selected_model_name]
|
32 |
|
33 |
# System message
|
34 |
system_message = st.text_area(
|
|
|
39 |
|
40 |
# Generation parameters
|
41 |
max_tokens = st.slider(
|
42 |
+
"Max Tokens",
|
43 |
min_value=1,
|
44 |
max_value=4000,
|
45 |
value=512,
|
|
|
84 |
# Generate response using selected model
|
85 |
with st.spinner("Generating response..."):
|
86 |
# The model expects parameters as separate arguments
|
87 |
+
response = selected_demo.fn(
|
88 |
+
prompt=f"{system_message}\n\nUser: {prompt}\nAssistant:",
|
89 |
+
max_length=max_tokens, # Updated from max_new_tokens to max_length
|
90 |
temperature=temperature,
|
91 |
top_p=top_p
|
92 |
)
|
|
|
99 |
st.session_state.messages.append({"role": "assistant", "content": response})
|
100 |
|
101 |
except Exception as e:
|
102 |
+
st.error(f"An error occurred: {str(e)}")
|