File size: 1,469 Bytes
4236b58
9b04ab5
da9eae4
 
 
 
 
9b04ab5
da9eae4
 
 
 
9b04ab5
 
da9eae4
 
 
 
 
 
 
9b04ab5
da9eae4
 
 
 
9b04ab5
da9eae4
 
 
da9b5a7
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
import transformers
import gradio as gr
from transformers import pipeline
from datasets import load_dataset
import soundfile as sf
import torch
import requests

# Image-to-text model
image_to_text_pipe = pipeline("image-to-text", model="Salesforce/blip-image-captioning-large")
API_URL = "https://api-inference.huggingface.co/models/Salesforce/blip-image-captioning-large"
headers = {} # Replace with your actual key


def image_to_text(image_path):
  with open(image_path, "rb") as f:
    data = f.read()
  response = requests.post(API_URL, headers=headers, data=data)
  response_json = response.json()
  generated_text = response_json[0]['generated_text']
  return generated_text

# Text-to-audio model
text_to_audio_pipe = pipeline("text-to-speech", model="microsoft/speecht5_tts")
embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation")
speaker_embedding = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0)

def text_to_audio(text):
  speech = text_to_audio_pipe(text, forward_params={"speaker_embeddings": speaker_embedding})
  sf.write("speech.wav", speech["audio"], samplerate=speech["sampling_rate"])
  return "speech.wav"

# Gradio Interface
iface = gr.Interface(
    fn=lambda image: text_to_audio(image_to_text(image)),
    inputs=gr.Image(type="filepath"),
    outputs=gr.Audio(label="Generated Audio"),
    title="Image to Audio",
    description="Upload an image to generate audio description."
)

iface.launch()