Spaces:
Running
on
Zero
Running
on
Zero
#!/usr/bin/env python | |
import os | |
from collections.abc import Iterator | |
from threading import Thread | |
import gradio as gr | |
import spaces | |
import torch | |
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer | |
DESCRIPTION = "# rinna/deepseek-r1-distill-qwen2.5-bakeneko-32b-awq" | |
MAX_MAX_NEW_TOKENS = 4096 | |
DEFAULT_MAX_NEW_TOKENS = 4096 | |
MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096")) | |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") | |
model_id = "rinna/deepseek-r1-distill-qwen2.5-bakeneko-32b-awq" | |
tokenizer = AutoTokenizer.from_pretrained(model_id) | |
model = AutoModelForCausalLM.from_pretrained( | |
model_id, | |
device_map="auto", | |
torch_dtype=torch.bfloat16, | |
) | |
model.eval() | |
def generate( | |
message: str, | |
chat_history: list[dict], | |
max_new_tokens: int = 4096, | |
temperature: float = 0.6, | |
top_p: float = 0.95, | |
) -> Iterator[str]: | |
messages = [*chat_history, {"role": "user", "content": message}] | |
input_ids = tokenizer.apply_chat_template(messages, add_generation_prompt=True, return_tensors="pt") | |
if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH: | |
input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:] | |
gr.Warning(f"Trimmed input from conversation as it was longer than {MAX_INPUT_TOKEN_LENGTH} tokens.") | |
input_ids = input_ids.to(model.device) | |
streamer = TextIteratorStreamer(tokenizer, timeout=30.0, skip_prompt=True, skip_special_tokens=True) | |
generate_kwargs = dict( | |
{"input_ids": input_ids}, | |
streamer=streamer, | |
max_new_tokens=max_new_tokens, | |
do_sample=True, | |
top_p=top_p, | |
temperature=temperature, | |
) | |
t = Thread(target=model.generate, kwargs=generate_kwargs) | |
t.start() | |
outputs = [] | |
for text in streamer: | |
outputs.append(text) | |
yield "".join(outputs) | |
demo = gr.ChatInterface( | |
fn=generate, | |
additional_inputs=[ | |
gr.Slider( | |
label="Max new tokens", | |
minimum=1, | |
maximum=MAX_MAX_NEW_TOKENS, | |
step=1, | |
value=DEFAULT_MAX_NEW_TOKENS, | |
), | |
gr.Slider( | |
label="Temperature", | |
minimum=0.1, | |
maximum=4.0, | |
step=0.1, | |
value=0.6, | |
), | |
gr.Slider( | |
label="Top-p (nucleus sampling)", | |
minimum=0.05, | |
maximum=1.0, | |
step=0.05, | |
value=0.95, | |
), | |
], | |
stop_btn=None, | |
examples=[ | |
["微分に関する簡単な文章問題を作成し、その問題を解いてください。"], | |
], | |
cache_examples=False, | |
type="messages", | |
description=DESCRIPTION, | |
css_paths="style.css", | |
fill_height=True, | |
chatbot=gr.Chatbot(type="messages", scale=1, allow_tags=["think"]), | |
) | |
if __name__ == "__main__": | |
demo.launch() | |