mental-health / app.py
adeelshuaib's picture
Update app.py
e3851ba verified
raw
history blame
3.93 kB
import gradio as gr
from groq import Groq
import os
import time
import speech_recognition as sr
from gtts import gTTS
api_key = os.getenv('GROQ_API_KEY')
# Initialize Groq client
client = Groq(api_key=api_key)
# Function to convert audio to text
def audio_to_text(audio_file):
recognizer = sr.Recognizer()
with sr.AudioFile(audio_file) as source:
audio_data = recognizer.record(source)
text = recognizer.recognize_google(audio_data)
return text
# Function to convert text to audio
def text_to_audio(text):
tts = gTTS(text)
audio_file = "output_audio.mp3"
tts.save(audio_file)
return audio_file
# Function to generate responses with error handling
def generate_response(user_input, chat_history: list):
try:
# Prepare messages with chat history
messages = [{"role": "system", "content": "You are a mental health assistant. Your responses should be empathetic, non-judgmental, and provide helpful advice based on mental health principles. Always encourage seeking professional help when needed. Your responses should look human as well."}]
# Iterate through chat history and add user and assistant messages
for message in chat_history:
# Ensure that each message contains only 'role' and 'content' keys
if 'role' in message and 'content' in message:
messages.append({"role": message["role"], "content": message["content"]})
else:
print(f"Skipping invalid message: {message}")
messages.append({"role": "user", "content": user_input}) # Add the current user message
# Call Groq API to get a response from LLaMA
chat_completion = client.chat.completions.create(
messages=messages,
model='llama-3.1-70b-versatile'
)
# Extract response
response = chat_completion.choices[0].message.content
return response, chat_history # Ensure you return both response and chat_history
except Exception as e:
print(f"Error occurred: {e}") # Print error to console for debugging
return "An error occurred while generating the response. Please try again.", chat_history
# Define Gradio interface
def gradio_interface():
with gr.Blocks() as demo:
# Initialize chat history
chat_history = []
# Create input textbox and button for clearing chat
gr.Markdown("## LoserHero - A Mental Health Chatbot")
chatbot = gr.Chatbot(type="messages")
audio_input = gr.Audio(source="microphone", type="filepath", label="Speak your message")
msg = gr.Textbox(placeholder="Type your message here...")
text_output = gr.Textbox(label="Response (Text)")
audio_output = gr.Audio(label="Response (Audio)")
clear = gr.Button("Clear")
def process_input(user_message, audio_file, history: list):
if audio_file:
user_message = audio_to_text(audio_file)
if user_message:
history.append({"role": "user", "content": user_message})
response, updated_history = generate_response(user_message, history)
history = updated_history
history.append({"role": "assistant", "content": response})
response_audio = text_to_audio(response)
return "", None, response, response_audio, history
return "", None, "Please provide a valid input.", None, history
msg.submit(process_input, [msg, audio_input, chatbot], [msg, audio_input, text_output, audio_output, chatbot])
audio_input.change(process_input, [msg, audio_input, chatbot], [msg, audio_input, text_output, audio_output, chatbot])
clear.click(lambda: ("", None, "", None, []), None, [msg, audio_input, text_output, audio_output, chatbot])
demo.launch()
# Run the interface
gradio_interface()