Spaces:
Running
Running
import transformers | |
import gradio as gr | |
from transformers import pipeline | |
from datasets import load_dataset | |
import soundfile as sf | |
import torch | |
import requests | |
# Image-to-text model | |
image_to_text_pipe = pipeline("image-to-text", model="Salesforce/blip-image-captioning-large") | |
API_URL = "https://api-inference.huggingface.co/models/Salesforce/blip-image-captioning-large" | |
headers = {} # Replace with your actual key | |
def image_to_text(image_path): | |
with open(image_path, "rb") as f: | |
data = f.read() | |
response = requests.post(API_URL, headers=headers, data=data) | |
response_json = response.json() | |
generated_text = response_json[0]['generated_text'] | |
return generated_text | |
# Text-to-audio model | |
text_to_audio_pipe = pipeline("text-to-speech", model="microsoft/speecht5_tts") | |
embeddings_dataset = load_dataset("Matthijs/cmu-arctic-xvectors", split="validation") | |
speaker_embedding = torch.tensor(embeddings_dataset[7306]["xvector"]).unsqueeze(0) | |
def text_to_audio(text): | |
speech = text_to_audio_pipe(text, forward_params={"speaker_embeddings": speaker_embedding}) | |
sf.write("speech.wav", speech["audio"], samplerate=speech["sampling_rate"]) | |
return "speech.wav" | |
# Gradio Interface | |
iface = gr.Interface( | |
fn=lambda image: text_to_audio(image_to_text(image)), | |
inputs=gr.Image(type="filepath"), | |
outputs=gr.Audio(label="Generated Audio"), | |
title="Image to Audio", | |
description="Upload an image to generate audio description." | |
) | |
iface.launch() |