Spaces:
Running
Running
revert back changes
Browse files
app.py
CHANGED
@@ -4,35 +4,9 @@ import requests
|
|
4 |
import pytz
|
5 |
import yaml
|
6 |
from tools.final_answer import FinalAnswerTool
|
7 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
8 |
|
9 |
from Gradio_UI import GradioUI
|
10 |
|
11 |
-
# Load the model locally
|
12 |
-
model_id = "Qwen/Qwen2-7B" # Replace with any local model
|
13 |
-
|
14 |
-
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
15 |
-
model = AutoModelForCausalLM.from_pretrained(model_id)
|
16 |
-
|
17 |
-
# Create a local pipeline
|
18 |
-
llm_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
19 |
-
|
20 |
-
class LocalModel:
|
21 |
-
def __init__(self, pipeline, max_tokens=2096, temperature=0.5):
|
22 |
-
self.pipeline = pipeline
|
23 |
-
self.max_tokens = max_tokens
|
24 |
-
self.temperature = temperature
|
25 |
-
|
26 |
-
def chat(self, prompt: str):
|
27 |
-
response = self.pipeline(
|
28 |
-
prompt,
|
29 |
-
max_new_tokens=self.max_tokens,
|
30 |
-
temperature=self.temperature,
|
31 |
-
do_sample=True
|
32 |
-
)
|
33 |
-
return response[0]["generated_text"]
|
34 |
-
|
35 |
-
|
36 |
# Below is an example of a tool that does nothing. Amaze us with your creativity !
|
37 |
@tool
|
38 |
def my_custom_tool(arg1:str, arg2:int)-> str: #it's import to specify the return type
|
@@ -65,14 +39,12 @@ final_answer = FinalAnswerTool()
|
|
65 |
# If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
|
66 |
# model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
|
67 |
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
model = LocalModel(llm_pipeline)
|
76 |
|
77 |
|
78 |
# Import tool from Hub
|
|
|
4 |
import pytz
|
5 |
import yaml
|
6 |
from tools.final_answer import FinalAnswerTool
|
|
|
7 |
|
8 |
from Gradio_UI import GradioUI
|
9 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
# Below is an example of a tool that does nothing. Amaze us with your creativity !
|
11 |
@tool
|
12 |
def my_custom_tool(arg1:str, arg2:int)-> str: #it's import to specify the return type
|
|
|
39 |
# If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
|
40 |
# model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
|
41 |
|
42 |
+
model = HfApiModel(
|
43 |
+
max_tokens=2096,
|
44 |
+
temperature=0.5,
|
45 |
+
model_id='Qwen/Qwen2.5-Coder-32B-Instruct',# it is possible that this model may be overloaded
|
46 |
+
custom_role_conversions=None,
|
47 |
+
)
|
|
|
|
|
48 |
|
49 |
|
50 |
# Import tool from Hub
|