Spaces:
Running
Running
File size: 1,930 Bytes
ef37daa 24342ea 65a6bd0 e1ff28f 24342ea e1ff28f 24342ea 0ce6fc9 24342ea 56d5550 24342ea 56d5550 24342ea 56d5550 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 |
import gradio as gr
import os
from huggingface_hub import InferenceClient
hf_token = os.getenv("hf_token")
client = InferenceClient(api_key=hf_token)
def get_response(user_input):
messages = [
{ "role": "system", "content": "you are xylaria 1.4 senoa, developed by sk md saad amin. You give links for images if the user asks for images." },
{ "role": "user", "content": user_input }
]
stream = client.chat.completions.create(
model="Qwen/QwQ-32B-Preview",
messages=messages,
temperature=0.5,
max_tokens=10240,
top_p=0.7,
stream=True
)
response = ""
for chunk in stream:
response += chunk.choices[0].delta.content
return response
def chat_interface():
with gr.Blocks() as demo:
with gr.Row():
with gr.Column(scale=0.8):
input_textbox = gr.Textbox(
label="Type your message",
placeholder="Type to Xylaria...",
lines=1,
max_lines=3,
interactive=True,
elem_id="user-input",
show_label=False
)
with gr.Column(scale=0.2):
send_button = gr.Button("Send", elem_id="send-btn")
chat_output = gr.Chatbot(
elem_id="chat-box",
label="Xylaria 1.4 Senoa Chatbot",
show_label=False
)
def submit_input(user_input, chat_history):
response = get_response(user_input)
chat_history.append((user_input, response))
return "", chat_history
input_textbox.submit(submit_input, [input_textbox, chat_output], [input_textbox, chat_output])
send_button.click(submit_input, [input_textbox, chat_output], [input_textbox, chat_output])
return demo
demo = chat_interface()
demo.launch() |