chibop commited on
Commit
ae3adbe
·
verified ·
1 Parent(s): 121c756

Upload folder using huggingface_hub

Browse files
Files changed (3) hide show
  1. chat.py +7 -9
  2. play.js +11 -4
  3. requirements.txt +0 -1
chat.py CHANGED
@@ -5,9 +5,6 @@ import gradio as gr
5
  import codecs
6
  import base64
7
  import json
8
- import soundfile as sf
9
- from io import BytesIO
10
- import numpy as np
11
 
12
  def login(username, password):
13
  users = json.loads(os.environ.get('users'))
@@ -64,10 +61,11 @@ def transcribe(file):
64
  def speak(history):
65
  text = history[-1][1]
66
  print("Assistant:", text)
67
- speech = client.audio.speech.create(model="tts-1", voice="alloy", input=text)
68
- data, sr = sf.read(BytesIO(speech.read()), dtype='int16')
69
- return sr, data
70
-
 
71
  def vote(data: gr.LikeData):
72
  if data.liked:
73
  print("You upvoted this response: " + data.value)
@@ -97,10 +95,10 @@ def main():
97
  speech.click(None, js=toggle_js)
98
  msg = gr.Textbox(label="Say something.", elem_id="textbox")
99
  mic = gr.Microphone(type="filepath", format="mp3", editable=False, waveform_options={"show_controls": False}, visible=False, elem_id="recorder")
100
- player = gr.Audio(editable=False,elem_id="player", visible=False)
101
  msg.submit(user, [msg, mic, chatbot, thread], [msg, chatbot]).then(
102
  bot, [chatbot, thread], chatbot).then(
103
- speak, chatbot, player
104
  )
105
  mic.stop_recording(user, [msg, mic, chatbot, thread], [msg, chatbot]).then(
106
  lambda:None, None, mic).then(
 
5
  import codecs
6
  import base64
7
  import json
 
 
 
8
 
9
  def login(username, password):
10
  users = json.loads(os.environ.get('users'))
 
61
  def speak(history):
62
  text = history[-1][1]
63
  print("Assistant:", text)
64
+ speech = client.audio.speech.create(model="tts-1", voice="alloy", input=text).read()
65
+ audio = base64.b64encode(speech).decode("utf-8")
66
+ audio_element = f'<audio src="data:audio/mpeg;base64,{audio}"></audio>'
67
+ return audio_element
68
+
69
  def vote(data: gr.LikeData):
70
  if data.liked:
71
  print("You upvoted this response: " + data.value)
 
95
  speech.click(None, js=toggle_js)
96
  msg = gr.Textbox(label="Say something.", elem_id="textbox")
97
  mic = gr.Microphone(type="filepath", format="mp3", editable=False, waveform_options={"show_controls": False}, visible=False, elem_id="recorder")
98
+ player = gr.HTML(elem_id="player", visible=False)
99
  msg.submit(user, [msg, mic, chatbot, thread], [msg, chatbot]).then(
100
  bot, [chatbot, thread], chatbot).then(
101
+ speak, chatbot, player, js=play_js
102
  )
103
  mic.stop_recording(user, [msg, mic, chatbot, thread], [msg, chatbot]).then(
104
  lambda:None, None, mic).then(
play.js CHANGED
@@ -1,6 +1,12 @@
1
  function () {
2
 
3
  function waitForElementToAppear(selector, callback) {
 
 
 
 
 
 
4
  const targetNode = document.body;
5
  const config = { childList: true, subtree: true };
6
 
@@ -11,13 +17,14 @@ function () {
11
  callback(element);
12
  }
13
  });
14
-
15
  observer.observe(targetNode, config);
16
  }
17
 
18
- waitForElementToAppear("#player button.play-pause-button", (button) => {
19
- alert("Play!");
20
- button.click();
 
21
  });
22
 
23
  }
 
1
  function () {
2
 
3
  function waitForElementToAppear(selector, callback) {
4
+ const element = document.querySelector(selector);
5
+ if (element) {
6
+ callback(element);
7
+ return;
8
+ }
9
+
10
  const targetNode = document.body;
11
  const config = { childList: true, subtree: true };
12
 
 
17
  callback(element);
18
  }
19
  });
20
+
21
  observer.observe(targetNode, config);
22
  }
23
 
24
+ console.log("Start");
25
+ waitForElementToAppear("#player audio", (button) => {
26
+ console.log("Play!");
27
+ button.play();
28
  });
29
 
30
  }
requirements.txt CHANGED
@@ -1,2 +1 @@
1
  openai
2
- soundfile
 
1
  openai