Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Upload folder using huggingface_hub
Browse files
app.py
CHANGED
@@ -13,7 +13,6 @@ from fastrtc import (
|
|
13 |
AdditionalOutputs,
|
14 |
ReplyOnStopWords,
|
15 |
Stream,
|
16 |
-
WebRTCError,
|
17 |
get_stt_model,
|
18 |
get_twilio_turn_credentials,
|
19 |
)
|
@@ -39,30 +38,23 @@ def response(
|
|
39 |
):
|
40 |
gradio_chatbot = gradio_chatbot or []
|
41 |
conversation_state = conversation_state or []
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
response = {"role": "assistant", "content": request.choices[0].message.content}
|
60 |
-
|
61 |
-
except Exception as e:
|
62 |
-
import traceback
|
63 |
-
|
64 |
-
traceback.print_exc()
|
65 |
-
raise WebRTCError(str(e) + "\n" + traceback.format_exc())
|
66 |
|
67 |
conversation_state.append(response)
|
68 |
gradio_chatbot.append(response)
|
|
|
13 |
AdditionalOutputs,
|
14 |
ReplyOnStopWords,
|
15 |
Stream,
|
|
|
16 |
get_stt_model,
|
17 |
get_twilio_turn_credentials,
|
18 |
)
|
|
|
38 |
):
|
39 |
gradio_chatbot = gradio_chatbot or []
|
40 |
conversation_state = conversation_state or []
|
41 |
+
text = model.stt(audio)
|
42 |
+
print("STT in handler", text)
|
43 |
+
sample_rate, array = audio
|
44 |
+
gradio_chatbot.append(
|
45 |
+
{"role": "user", "content": gr.Audio((sample_rate, array.squeeze()))}
|
46 |
+
)
|
47 |
+
yield AdditionalOutputs(gradio_chatbot, conversation_state)
|
48 |
+
|
49 |
+
conversation_state.append({"role": "user", "content": text})
|
50 |
+
|
51 |
+
request = client.chat.completions.create(
|
52 |
+
model="Meta-Llama-3.2-3B-Instruct",
|
53 |
+
messages=conversation_state, # type: ignore
|
54 |
+
temperature=0.1,
|
55 |
+
top_p=0.1,
|
56 |
+
)
|
57 |
+
response = {"role": "assistant", "content": request.choices[0].message.content}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
58 |
|
59 |
conversation_state.append(response)
|
60 |
gradio_chatbot.append(response)
|