# app.py import subprocess # Install dependencies subprocess.run(["pip", "install", "-r", "requirements.txt"]) # Rest of your code import gradio as gr from transformers import AutoTokenizer, AutoModelForSequenceClassification # Load the model from Hugging Face Model Hub model_name = "SamLowe/roberta-base-go_emotions" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForSequenceClassification.from_pretrained(model_name) # Define emotion labels used by the model emotion_labels = ["admiration", "amusement", "anger", "annoyance", "approval", "caring", "confusion", "curiosity", "desire", "disappointment", "disapproval", "disgust", "embarrassment", "excitement", "fear", "gratitude", "grief", "joy", "love", "nervousness", "optimism", "pride", "realization", "relief", "remorse", "sadness", "surprise", "neutral"] def predict_emotion(text): inputs = tokenizer(text, return_tensors="pt") outputs = model(**inputs) logits = outputs.logits predicted_class = logits.argmax().item() predicted_emotion = emotion_labels[predicted_class] return predicted_emotion # Return the predicted emotion directly iface = gr.Interface( fn=predict_emotion, inputs=gr.Textbox(), outputs="text", live=True, title="Emotion Prediction", description="Enter a sentence for emotion prediction.", ) iface.launch()