Spaces:
Running
Running
# import os | |
# import streamlit as st | |
# import torch | |
# from langchain.chains import LLMChain | |
# from langchain.prompts import ChatPromptTemplate | |
# from langchain_huggingface import HuggingFaceEndpoint | |
# def create_conversation_prompt(name1: str, name2: str, persona_style: str): | |
# """ | |
# Create a prompt that instructs the model to produce exactly 15 messages | |
# of conversation, alternating between name1 and name2, starting with name1. | |
# We will be very explicit and not allow any formatting except the required lines. | |
# """ | |
# prompt_template_str = f""" | |
# You are simulating a conversation of exactly 15 messages between two people: {name1} and {name2}. | |
# {name1} speaks first (message 1), then {name2} (message 2), then {name1} (message 3), and so forth, | |
# alternating until all 15 messages are complete. The 15th message is by {name1}. | |
# Requirements: | |
# - Output exactly 15 lines, no more, no less. | |
# - Each line must be a single message in the format: | |
# {name1}: <message> or {name2}: <message> | |
# - Do not add any headings, numbers, sample outputs, or explanations. | |
# - Do not mention code, programming, or instructions. | |
# - Each message should be 1-2 short sentences, friendly, natural, reflecting the style: {persona_style}. | |
# - Use everyday language, can ask questions, show opinions. | |
# - Use emojis sparingly if it fits the style (no more than 1-2 total). | |
# - No repeated lines, each message should logically follow from the previous one. | |
# - Do not produce anything after the 15th message. No extra lines or text. | |
# Produce all 15 messages now: | |
# """ | |
# return ChatPromptTemplate.from_template(prompt_template_str) | |
# def create_summary_prompt(name1: str, name2: str, conversation: str): | |
# """Prompt for generating a title and summary.""" | |
# summary_prompt_str = f""" | |
# Below is a completed 15-message conversation between {name1} and {name2}: | |
# {conversation} | |
# Please provide: | |
# Title: <A short descriptive title of the conversation> | |
# Summary: <A few short sentences highlighting the main points, tone, and conclusion> | |
# Do not continue the conversation, do not repeat it, and do not add extra formatting beyond the two lines: | |
# - One line starting with "Title:" | |
# - One line starting with "Summary:" | |
# """ | |
# return ChatPromptTemplate.from_template(summary_prompt_str) | |
# def main(): | |
# st.title("LLM Conversation Simulation") | |
# model_names = [ | |
# "meta-llama/Llama-3.3-70B-Instruct", | |
# "mistralai/Mistral-7B-v0.1", | |
# "tiiuae/falcon-7b" | |
# ] | |
# selected_model = st.selectbox("Select a model:", model_names) | |
# name1 = st.text_input("Enter the first user's name:", value="Alice") | |
# name2 = st.text_input("Enter the second user's name:", value="Bob") | |
# persona_style = st.text_area("Enter the persona style characteristics:", | |
# value="friendly, curious, and a bit sarcastic") | |
# if st.button("Start Conversation Simulation"): | |
# st.write("**Loading model...**") | |
# print("Loading model...") | |
# with st.spinner("Starting simulation..."): | |
# endpoint_url = f"https://api-inference.huggingface.co/models/{selected_model}" | |
# try: | |
# llm = HuggingFaceEndpoint( | |
# endpoint_url=endpoint_url, | |
# huggingfacehub_api_token=os.environ.get("HUGGINGFACEHUB_API_TOKEN"), | |
# task="text-generation", | |
# temperature=0.7, | |
# max_new_tokens=512 | |
# ) | |
# st.write("**Model loaded successfully!**") | |
# print("Model loaded successfully!") | |
# except Exception as e: | |
# st.error(f"Error initializing HuggingFaceEndpoint: {e}") | |
# print(f"Error initializing HuggingFaceEndpoint: {e}") | |
# return | |
# conversation_prompt = create_conversation_prompt(name1, name2, persona_style) | |
# conversation_chain = LLMChain(llm=llm, prompt=conversation_prompt) | |
# st.write("**Generating the full 15-message conversation...**") | |
# print("Generating the full 15-message conversation...") | |
# try: | |
# # Generate all 15 messages in one go | |
# conversation = conversation_chain.run(chat_history="", input="").strip() | |
# st.subheader("Final Conversation:") | |
# st.text(conversation) | |
# print("Conversation Generation Complete.\n") | |
# print("Full Conversation:\n", conversation) | |
# # Summarize the conversation | |
# summary_prompt = create_summary_prompt(name1, name2, conversation) | |
# summary_chain = LLMChain(llm=llm, prompt=summary_prompt) | |
# st.subheader("Summary and Title:") | |
# st.write("**Summarizing the conversation...**") | |
# print("Summarizing the conversation...") | |
# summary = summary_chain.run(chat_history="", input="") | |
# st.write(summary) | |
# print("Summary:\n", summary) | |
# except Exception as e: | |
# st.error(f"Error generating conversation: {e}") | |
# print(f"Error generating conversation: {e}") | |
# if __name__ == "__main__": | |
# main() | |
import os | |
import streamlit as st | |
import torch | |
from langchain.chains import LLMChain | |
from langchain.prompts import ChatPromptTemplate | |
from langchain_huggingface import HuggingFaceEndpoint | |
# Additional imports for AnimateDiff | |
from diffusers import AnimateDiffPipeline, MotionAdapter, EulerDiscreteScheduler | |
from diffusers.utils import export_to_gif | |
from huggingface_hub import hf_hub_download | |
from safetensors.torch import load_file | |
def create_conversation_prompt(name1: str, name2: str, persona_style: str): | |
""" | |
Create a prompt that instructs the model to produce exactly 15 messages | |
of conversation, alternating between name1 and name2, starting with name1. | |
""" | |
prompt_template_str = f""" | |
You are simulating a conversation of exactly 15 messages between two people: {name1} and {name2}. | |
{name1} speaks first (message 1), then {name2} (message 2), then {name1} (message 3), and so forth, | |
alternating until all 15 messages are complete. The 15th message is by {name1}. | |
Requirements: | |
- Output exactly 15 lines, no more, no less. | |
- Each line must be a single message in the format: | |
{name1}: <message> or {name2}: <message> | |
- Do not add any headings, numbers, sample outputs, or explanations. | |
- Do not mention code, programming, or instructions. | |
- Each message should be 1-2 short sentences, friendly, natural, reflecting the style: {persona_style}. | |
- Use everyday language, can ask questions, show opinions. | |
- Use emojis sparingly if it fits the style (no more than 1-2 total). | |
- No repeated lines, each message should logically follow from the previous one. | |
- Do not produce anything after the 15th message. No extra lines or text. | |
Produce all 15 messages now: | |
""" | |
return ChatPromptTemplate.from_template(prompt_template_str) | |
def create_summary_prompt(name1: str, name2: str, conversation: str): | |
"""Prompt for generating a title and summary.""" | |
summary_prompt_str = f""" | |
Below is a completed 15-message conversation between {name1} and {name2}: | |
{conversation} | |
Please provide: | |
Title: <A short descriptive title of the conversation> | |
Summary: <A few short sentences highlighting the main points, tone, and conclusion> | |
Do not continue the conversation, do not repeat it, and do not add extra formatting beyond the two lines: | |
- One line starting with "Title:" | |
- One line starting with "Summary:" | |
""" | |
return ChatPromptTemplate.from_template(summary_prompt_str) | |
def main(): | |
st.title("LLM Conversation Simulation + AnimateDiff Video") | |
model_names = [ | |
"meta-llama/Llama-3.3-70B-Instruct", | |
"mistralai/Mistral-7B-v0.1", | |
"tiiuae/falcon-7b" | |
] | |
selected_model = st.selectbox("Select a model:", model_names) | |
name1 = st.text_input("Enter the first user's name:", value="Alice") | |
name2 = st.text_input("Enter the second user's name:", value="Bob") | |
persona_style = st.text_area("Enter the persona style characteristics:", | |
value="friendly, curious, and a bit sarcastic") | |
if st.button("Start Conversation Simulation"): | |
st.write("**Loading model...**") | |
print("Loading model...") | |
with st.spinner("Starting simulation..."): | |
endpoint_url = f"https://api-inference.huggingface.co/models/{selected_model}" | |
try: | |
llm = HuggingFaceEndpoint( | |
endpoint_url=endpoint_url, | |
huggingfacehub_api_token=os.environ.get("HUGGINGFACEHUB_API_TOKEN"), | |
task="text-generation", | |
temperature=0.7, | |
max_new_tokens=512 | |
) | |
st.write("**Model loaded successfully!**") | |
print("Model loaded successfully!") | |
except Exception as e: | |
st.error(f"Error initializing HuggingFaceEndpoint: {e}") | |
print(f"Error initializing HuggingFaceEndpoint: {e}") | |
return | |
conversation_prompt = create_conversation_prompt(name1, name2, persona_style) | |
conversation_chain = LLMChain(llm=llm, prompt=conversation_prompt) | |
st.write("**Generating the full 15-message conversation...**") | |
print("Generating the full 15-message conversation...") | |
try: | |
# Generate all 15 messages in one go | |
conversation = conversation_chain.run(chat_history="", input="").strip() | |
st.subheader("Final Conversation:") | |
st.text(conversation) | |
print("Conversation Generation Complete.\n") | |
print("Full Conversation:\n", conversation) | |
# Summarize the conversation | |
summary_prompt = create_summary_prompt(name1, name2, conversation) | |
summary_chain = LLMChain(llm=llm, prompt=summary_prompt) | |
st.subheader("Summary and Title:") | |
st.write("**Summarizing the conversation...**") | |
print("Summarizing the conversation...") | |
summary = summary_chain.run(chat_history="", input="") | |
st.write(summary) | |
print("Summary:\n", summary) | |
# Extract the summary line from the summary text | |
lines = summary.split("\n") | |
summary_line = "" | |
for line in lines: | |
if line.strip().lower().startswith("summary:"): | |
summary_line = line.split("Summary:", 1)[-1].strip() | |
break | |
if not summary_line: | |
summary_line = "A friendly scene reflecting the conversation." | |
# Now integrate AnimateDiff for text-to-video generation | |
st.write("**Generating animation from summary using ByteDance/AnimateDiff-Lightning...**") | |
print("Generating animation from summary using ByteDance/AnimateDiff-Lightning...") | |
device = "cuda" if torch.cuda.is_available() else "cpu" | |
dtype = torch.float16 if torch.cuda.is_available() else torch.float32 | |
step = 4 # Adjust if needed | |
repo = "ByteDance/AnimateDiff-Lightning" | |
ckpt = f"animatediff_lightning_{step}step_diffusers.safetensors" | |
base = "emilianJR/epiCRealism" # Check if this model exists or choose a known base model | |
# Load and configure AnimateDiff pipeline | |
adapter = MotionAdapter().to(device, dtype) | |
adapter.load_state_dict(load_file(hf_hub_download(repo ,ckpt), device=device)) | |
pipe = AnimateDiffPipeline.from_pretrained( | |
base, | |
motion_adapter=adapter, | |
torch_dtype=dtype | |
).to(device) | |
pipe.scheduler = EulerDiscreteScheduler.from_config( | |
pipe.scheduler.config, | |
timestep_spacing="trailing", | |
beta_schedule="linear" | |
) | |
# Generate the animation | |
output = pipe(prompt=summary_line, guidance_scale=1.0, num_inference_steps=step) | |
# Save as GIF | |
# output.frames is a list of frames (PIL images) | |
st.write("**Exporting animation to GIF...**") | |
print("Exporting animation to GIF...") | |
export_to_gif(output.frames, "animation.gif") | |
st.subheader("Generated Animation:") | |
st.image("animation.gif", caption="Generated by AnimateDiff using summary prompt") | |
except Exception as e: | |
st.error(f"Error generating conversation or summary: {e}") | |
print(f"Error generating conversation or summary: {e}") | |
if __name__ == "__main__": | |
main() | |