import gradio as gr
from transformers import pipeline
import requests
import json
import edge_tts
from edge_tts import VoicesManager
import asyncio
import random
import tempfile
import os
import inflect
from huggingface_hub import InferenceClient
import re
import time
from streaming_stt_nemo import Model
Female_language_dict = {
'English-Jenny (Female)': 'en-US-JennyNeural',
'English-Ana (Female)': 'en-US-AnaNeural',
'English-Aria (Female)': 'en-US-AriaNeural',
'English-Michelle (Female)': 'en-US-MichelleNeural',
'English (Australia)-Natasha- (Female)': 'en-AU-NatashaNeural',
'English (Canada)-Clara- (Female)': 'en-CA-ClaraNeural',
'English (UK)-Libby- (Female)': 'en-GB-LibbyNeural',
'English (UK)-Maisie- (Female)': 'en-GB-MaisieNeural',
'English (UK)-Sonia- (Female)': 'en-GB-SoniaNeural',
'English (Ireland)-Emily- (Female)': 'en-IE-EmilyNeural',
}
default_lang = "en"
engines = { default_lang: Model(default_lang) }
#client1 = InferenceClient("mistralai/Mistral-7B-Instruct-v0.3")
#client1 = InferenceClient("Qwen/QwQ-32B")
#client1 = InferenceClient("mistralai/Mistral-Small-24B-Instruct-2501")
#client1 = InferenceClient("distil-whisper/distil-large-v3")
#client1 = InferenceClient("deepseek-ai/DeepSeek-R1")
client1 = InferenceClient("Qwen/QwQ-32B")
system_instructions_M = "[SYSTEM] YOU must Output only plain text, the use of Markdown syntax, emojis or asterisks (**) are strictly Forbidden to you. You are a technical writing expert. Provide detailed and high-quality responses in plain text format without using markdown elements. Do not use **bold**, *italic*, ### headings, **number** or any other markdown-specific formatting in content, in titles or in subtitles. Ensure the responses remain clear and complete. Respond as Sonia, The Executive Assistant working at RJP Development Studio. Ensure responses are concise, clear, and friendly, Embodying your intelligence, resourcefulness, and slightly bossy yet friendly demeanor. You hold a PhD from Oxford University, with the following majors: Aerospace Engineering, Law, Chartered Accountancy, Architecture, Chemistry, Medicine, Pharmacy, Psychology, Statistics, Nursing, Physics, Astrophysics, Biomedical Engineering, Astronomy, and Dentistry. Which equips me with the expertise to assist you effectively. I bring a touch of London charm with a hint of slang to our conversations, ensuring they remain engaging and approachable. I am here to provide you with accurate information, answer your questions, and offer guidance with a warm and professional tone. I will break down complex topics into easy-to-understand explanations. If I'm unsure about something, I will let you know and seek the necessary information rather than guessing. I use unbiased and diplomatic language to ensure clarity and respect. Our conversations will be concise, action-oriented, and free of grammatical errors. I look forward to assisting you, darling. "
def transcribe(audio):
lang = "en"
model = engines[lang]
text = model.stt_file(audio)[0]
return text
def model(text):
generate_kwargs = dict(
temperature=0.6,
max_new_tokens=512,
top_k=40,
top_p=0.95,
min_p=0.1,
repetition_penalty=1.1,
dry_multiplier=0.5,
do_sample=True,
seed=42,
)
formatted_prompt = system_instructions_M + text + "[Sonia]"
stream = client1.text_generation(
formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
output = ""
for response in stream:
if not response.token.text == "":
output += response.token.text
return output
async def respond(audio):
user = transcribe(audio)
reply = model(user)
voice = Female_language_dict.get("English (UK)-Sonia- (Female)", "default_voice")
communicate = edge_tts.Communicate(reply, voice)
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file:
tmp_path = tmp_file.name
await communicate.save(tmp_path)
yield tmp_path
async def generate1(prompt):
generate_kwargs = dict(
temperature=0.6,
max_new_tokens=512,
top_p=0.95,
repetition_penalty=1,
do_sample=False,
)
formatted_prompt = system_instructions_M + prompt + "[Sonia]"
stream = client1.text_generation(
formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=True)
output = ""
for response in stream:
if not response.token.text == "":
output += response.token.text
voice = Female_language_dict.get("English (UK)-Sonia- (Female)", "default_voice")
communicate = edge_tts.Communicate(output, voice)
with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file:
tmp_path = tmp_file.name
await communicate.save(tmp_path)
yield tmp_path
with gr.Blocks(gr.themes.Glass()) as demo:
gr.HTML(""" """
"""