Update README.md
Browse files
README.md
CHANGED
@@ -50,7 +50,21 @@ This is the model card of a 🤗 transformers model that has been pushed on the
|
|
50 |
|
51 |
## Uses
|
52 |
|
53 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
|
55 |
### Direct Use
|
56 |
|
|
|
50 |
|
51 |
## Uses
|
52 |
|
53 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
54 |
+
import torch
|
55 |
+
|
56 |
+
model_name = "ritvik77/FineTune_LoRA__AgentToolCall_Mistral-7B_Transformer"
|
57 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
58 |
+
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="auto")
|
59 |
+
|
60 |
+
input_text = """User: What's the weather like in New York?
|
61 |
+
Agent: <function_call> {"tool": "get_weather", "parameters": {"location": "New York"}}"""
|
62 |
+
|
63 |
+
inputs = tokenizer(input_text, return_tensors="pt").to("cuda" if torch.cuda.is_available() else "cpu")
|
64 |
+
outputs = model.generate(**inputs, max_new_tokens=50, do_sample=True, temperature=0.5, top_p=0.9)
|
65 |
+
|
66 |
+
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
67 |
+
print("Generated Output:\n", generated_text)
|
68 |
|
69 |
### Direct Use
|
70 |
|