Elyordev's picture
Upload 2 files
c117685 verified
import streamlit as st
import torch
from transformers import WhisperForConditionalGeneration, WhisperProcessor
from peft import PeftModel, PeftConfig
import librosa
# Model sozlamalari
peft_model_id = "Elyordev/fine_tune_whisper_uzbek"
language = "Uzbek"
task = "transcribe"
# PEFT konfiguratsiyasini yuklash
peft_config = PeftConfig.from_pretrained(peft_model_id)
# CPU uchun model yuklash
model = WhisperForConditionalGeneration.from_pretrained(
peft_config.base_model_name_or_path,
device_map="cpu"
)
model = PeftModel.from_pretrained(model, peft_model_id)
# Tokenizer va Processor sozlash
processor = WhisperProcessor.from_pretrained(peft_config.base_model_name_or_path, language=language, task=task)
# Streamlit interfeysi
st.title("Uzbek Whisper STT Hugging Face Spaces App")
st.write("Fine-tuned Whisper model for Uzbek speech recognition. Upload your audio to get the transcription.")
# Audio yuklash
uploaded_file = st.file_uploader("Ovozli fayl yuklang", type=["wav", "mp3", "m4a"])
def transcribe(audio_file):
audio, sr = librosa.load(audio_file, sr=16000)
inputs = processor(audio, sampling_rate=16000, return_tensors="pt").input_features
predicted_ids = model.generate(inputs, forced_decoder_ids=processor.get_decoder_prompt_ids(language="uz", task="transcribe"))
transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True)[0]
return transcription
if uploaded_file:
st.audio(uploaded_file, format="audio/wav")
st.write("**Transkripsiya natijasi:**")
transcription = transcribe(uploaded_file)
st.success(transcription)