Spaces:
Running
Running
File size: 2,761 Bytes
3b55d4b 9ab0176 5a584aa fd1b0cf 5a584aa fd1b0cf 5a584aa fd1b0cf d9748a4 5a584aa |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 |
# app.py
import streamlit as st
from models import demo
# Page configuration
st.set_page_config(
page_title="DeepSeek Chatbot - ruslanmv.com",
page_icon="🤖",
layout="centered"
)
# Initialize session state for chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Sidebar for model selection and parameters
with st.sidebar:
st.header("Model Configuration")
# Model selection
selected_model = st.selectbox(
"Choose Model",
[
"deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
"deepseek-ai/DeepSeek-R1",
"deepseek-ai/DeepSeek-R1-Zero"
],
index=0
)
# System message
system_message = st.text_area(
"System Message",
value="You are a friendly Chatbot created by ruslanmv.com",
height=100
)
# Generation parameters
max_tokens = st.slider(
"Max Tokens",
min_value=1,
max_value=4000,
value=512,
step=10
)
temperature = st.slider(
"Temperature",
min_value=0.1,
max_value=4.0,
value=0.7,
step=0.1
)
top_p = st.slider(
"Top-p (nucleus sampling)",
min_value=0.1,
max_value=1.0,
value=0.9,
step=0.1
)
# Main chat interface
st.title("🤖 DeepSeek Chatbot")
st.caption("Powered by ruslanmv.com - Choose your model and parameters in the sidebar")
# Display chat messages
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Chat input
if prompt := st.chat_input("Type your message..."):
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
# Display user message
with st.chat_message("user"):
st.markdown(prompt)
# Prepare full prompt with system message
full_prompt = f"{system_message}\n\nUser: {prompt}\nAssistant:"
try:
# Generate response using selected model
with st.spinner("Generating response..."):
# Updated parameter names to match model expectations
response = demo.fn(
full_prompt,
max_length=max_tokens, # Changed from max_new_tokens
temperature=temperature,
top_p=top_p
)
# Display assistant response
with st.chat_message("assistant"):
st.markdown(response)
# Add assistant response to chat history
st.session_state.messages.append({"role": "assistant", "content": response})
except Exception as e:
st.error(f"Error generating response: {str(e)}") |