Spaces:
Running
Running
RakeshUtekar
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -1,128 +1,3 @@
|
|
1 |
-
# import os
|
2 |
-
# import streamlit as st
|
3 |
-
# import torch
|
4 |
-
# from langchain.chains import LLMChain
|
5 |
-
# from langchain.prompts import ChatPromptTemplate
|
6 |
-
# from langchain_huggingface import HuggingFaceEndpoint
|
7 |
-
|
8 |
-
# def create_conversation_prompt(name1: str, name2: str, persona_style: str):
|
9 |
-
# """
|
10 |
-
# Create a prompt that instructs the model to produce exactly 15 messages
|
11 |
-
# of conversation, alternating between name1 and name2, starting with name1.
|
12 |
-
|
13 |
-
# We will be very explicit and not allow any formatting except the required lines.
|
14 |
-
# """
|
15 |
-
# prompt_template_str = f"""
|
16 |
-
# You are simulating a conversation of exactly 15 messages between two people: {name1} and {name2}.
|
17 |
-
# {name1} speaks first (message 1), then {name2} (message 2), then {name1} (message 3), and so forth,
|
18 |
-
# alternating until all 15 messages are complete. The 15th message is by {name1}.
|
19 |
-
|
20 |
-
# Requirements:
|
21 |
-
# - Output exactly 15 lines, no more, no less.
|
22 |
-
# - Each line must be a single message in the format:
|
23 |
-
# {name1}: <message> or {name2}: <message>
|
24 |
-
# - Do not add any headings, numbers, sample outputs, or explanations.
|
25 |
-
# - Do not mention code, programming, or instructions.
|
26 |
-
# - Each message should be 1-2 short sentences, friendly, natural, reflecting the style: {persona_style}.
|
27 |
-
# - Use everyday language, can ask questions, show opinions.
|
28 |
-
# - Use emojis sparingly if it fits the style (no more than 1-2 total).
|
29 |
-
# - No repeated lines, each message should logically follow from the previous one.
|
30 |
-
# - Do not produce anything after the 15th message. No extra lines or text.
|
31 |
-
|
32 |
-
# Produce all 15 messages now:
|
33 |
-
# """
|
34 |
-
# return ChatPromptTemplate.from_template(prompt_template_str)
|
35 |
-
|
36 |
-
# def create_summary_prompt(name1: str, name2: str, conversation: str):
|
37 |
-
# """Prompt for generating a title and summary."""
|
38 |
-
# summary_prompt_str = f"""
|
39 |
-
# Below is a completed 15-message conversation between {name1} and {name2}:
|
40 |
-
|
41 |
-
# {conversation}
|
42 |
-
|
43 |
-
# Please provide:
|
44 |
-
# Title: <A short descriptive title of the conversation>
|
45 |
-
# Summary: <A few short sentences highlighting the main points, tone, and conclusion>
|
46 |
-
|
47 |
-
# Do not continue the conversation, do not repeat it, and do not add extra formatting beyond the two lines:
|
48 |
-
# - One line starting with "Title:"
|
49 |
-
# - One line starting with "Summary:"
|
50 |
-
# """
|
51 |
-
# return ChatPromptTemplate.from_template(summary_prompt_str)
|
52 |
-
|
53 |
-
# def main():
|
54 |
-
# st.title("LLM Conversation Simulation")
|
55 |
-
|
56 |
-
# model_names = [
|
57 |
-
# "meta-llama/Llama-3.3-70B-Instruct",
|
58 |
-
# "mistralai/Mistral-7B-v0.1",
|
59 |
-
# "tiiuae/falcon-7b"
|
60 |
-
|
61 |
-
# ]
|
62 |
-
# selected_model = st.selectbox("Select a model:", model_names)
|
63 |
-
|
64 |
-
# name1 = st.text_input("Enter the first user's name:", value="Alice")
|
65 |
-
# name2 = st.text_input("Enter the second user's name:", value="Bob")
|
66 |
-
# persona_style = st.text_area("Enter the persona style characteristics:",
|
67 |
-
# value="friendly, curious, and a bit sarcastic")
|
68 |
-
|
69 |
-
# if st.button("Start Conversation Simulation"):
|
70 |
-
# st.write("**Loading model...**")
|
71 |
-
# print("Loading model...")
|
72 |
-
|
73 |
-
# with st.spinner("Starting simulation..."):
|
74 |
-
# endpoint_url = f"https://api-inference.huggingface.co/models/{selected_model}"
|
75 |
-
|
76 |
-
# try:
|
77 |
-
# llm = HuggingFaceEndpoint(
|
78 |
-
# endpoint_url=endpoint_url,
|
79 |
-
# huggingfacehub_api_token=os.environ.get("HUGGINGFACEHUB_API_TOKEN"),
|
80 |
-
# task="text-generation",
|
81 |
-
# temperature=0.7,
|
82 |
-
# max_new_tokens=512
|
83 |
-
# )
|
84 |
-
# st.write("**Model loaded successfully!**")
|
85 |
-
# print("Model loaded successfully!")
|
86 |
-
# except Exception as e:
|
87 |
-
# st.error(f"Error initializing HuggingFaceEndpoint: {e}")
|
88 |
-
# print(f"Error initializing HuggingFaceEndpoint: {e}")
|
89 |
-
# return
|
90 |
-
|
91 |
-
# conversation_prompt = create_conversation_prompt(name1, name2, persona_style)
|
92 |
-
# conversation_chain = LLMChain(llm=llm, prompt=conversation_prompt)
|
93 |
-
|
94 |
-
# st.write("**Generating the full 15-message conversation...**")
|
95 |
-
# print("Generating the full 15-message conversation...")
|
96 |
-
|
97 |
-
# try:
|
98 |
-
# # Generate all 15 messages in one go
|
99 |
-
# conversation = conversation_chain.run(chat_history="", input="").strip()
|
100 |
-
|
101 |
-
# st.subheader("Final Conversation:")
|
102 |
-
# st.text(conversation)
|
103 |
-
# print("Conversation Generation Complete.\n")
|
104 |
-
# print("Full Conversation:\n", conversation)
|
105 |
-
|
106 |
-
# # Summarize the conversation
|
107 |
-
# summary_prompt = create_summary_prompt(name1, name2, conversation)
|
108 |
-
# summary_chain = LLMChain(llm=llm, prompt=summary_prompt)
|
109 |
-
|
110 |
-
# st.subheader("Summary and Title:")
|
111 |
-
# st.write("**Summarizing the conversation...**")
|
112 |
-
# print("Summarizing the conversation...")
|
113 |
-
|
114 |
-
# summary = summary_chain.run(chat_history="", input="")
|
115 |
-
# st.write(summary)
|
116 |
-
# print("Summary:\n", summary)
|
117 |
-
|
118 |
-
# except Exception as e:
|
119 |
-
# st.error(f"Error generating conversation: {e}")
|
120 |
-
# print(f"Error generating conversation: {e}")
|
121 |
-
|
122 |
-
# if __name__ == "__main__":
|
123 |
-
# main()
|
124 |
-
|
125 |
-
|
126 |
import os
|
127 |
import streamlit as st
|
128 |
import torch
|
@@ -130,21 +5,18 @@ from langchain.chains import LLMChain
|
|
130 |
from langchain.prompts import ChatPromptTemplate
|
131 |
from langchain_huggingface import HuggingFaceEndpoint
|
132 |
|
133 |
-
# Additional imports for AnimateDiff
|
134 |
-
from diffusers import AnimateDiffPipeline, MotionAdapter, EulerDiscreteScheduler
|
135 |
-
from diffusers.utils import export_to_gif
|
136 |
-
from huggingface_hub import hf_hub_download
|
137 |
-
from safetensors.torch import load_file
|
138 |
-
|
139 |
def create_conversation_prompt(name1: str, name2: str, persona_style: str):
|
140 |
"""
|
141 |
Create a prompt that instructs the model to produce exactly 15 messages
|
142 |
of conversation, alternating between name1 and name2, starting with name1.
|
|
|
|
|
143 |
"""
|
144 |
prompt_template_str = f"""
|
145 |
You are simulating a conversation of exactly 15 messages between two people: {name1} and {name2}.
|
146 |
{name1} speaks first (message 1), then {name2} (message 2), then {name1} (message 3), and so forth,
|
147 |
alternating until all 15 messages are complete. The 15th message is by {name1}.
|
|
|
148 |
Requirements:
|
149 |
- Output exactly 15 lines, no more, no less.
|
150 |
- Each line must be a single message in the format:
|
@@ -156,6 +28,7 @@ def create_conversation_prompt(name1: str, name2: str, persona_style: str):
|
|
156 |
- Use emojis sparingly if it fits the style (no more than 1-2 total).
|
157 |
- No repeated lines, each message should logically follow from the previous one.
|
158 |
- Do not produce anything after the 15th message. No extra lines or text.
|
|
|
159 |
Produce all 15 messages now:
|
160 |
"""
|
161 |
return ChatPromptTemplate.from_template(prompt_template_str)
|
@@ -164,10 +37,13 @@ def create_summary_prompt(name1: str, name2: str, conversation: str):
|
|
164 |
"""Prompt for generating a title and summary."""
|
165 |
summary_prompt_str = f"""
|
166 |
Below is a completed 15-message conversation between {name1} and {name2}:
|
|
|
167 |
{conversation}
|
|
|
168 |
Please provide:
|
169 |
Title: <A short descriptive title of the conversation>
|
170 |
Summary: <A few short sentences highlighting the main points, tone, and conclusion>
|
|
|
171 |
Do not continue the conversation, do not repeat it, and do not add extra formatting beyond the two lines:
|
172 |
- One line starting with "Title:"
|
173 |
- One line starting with "Summary:"
|
@@ -175,12 +51,14 @@ def create_summary_prompt(name1: str, name2: str, conversation: str):
|
|
175 |
return ChatPromptTemplate.from_template(summary_prompt_str)
|
176 |
|
177 |
def main():
|
178 |
-
st.title("LLM Conversation Simulation
|
179 |
|
180 |
model_names = [
|
181 |
"meta-llama/Llama-3.3-70B-Instruct",
|
182 |
"mistralai/Mistral-7B-v0.1",
|
183 |
-
"tiiuae/falcon-7b"
|
|
|
|
|
184 |
]
|
185 |
selected_model = st.selectbox("Select a model:", model_names)
|
186 |
|
@@ -238,59 +116,9 @@ def main():
|
|
238 |
st.write(summary)
|
239 |
print("Summary:\n", summary)
|
240 |
|
241 |
-
# Extract the summary line from the summary text
|
242 |
-
lines = summary.split("\n")
|
243 |
-
summary_line = ""
|
244 |
-
for line in lines:
|
245 |
-
if line.strip().lower().startswith("summary:"):
|
246 |
-
summary_line = line.split("Summary:", 1)[-1].strip()
|
247 |
-
break
|
248 |
-
if not summary_line:
|
249 |
-
summary_line = "A friendly scene reflecting the conversation."
|
250 |
-
|
251 |
-
# Now integrate AnimateDiff for text-to-video generation
|
252 |
-
st.write("**Generating animation from summary using ByteDance/AnimateDiff-Lightning...**")
|
253 |
-
print("Generating animation from summary using ByteDance/AnimateDiff-Lightning...")
|
254 |
-
|
255 |
-
device = "cuda" if torch.cuda.is_available() else "cpu"
|
256 |
-
dtype = torch.float16 if torch.cuda.is_available() else torch.float32
|
257 |
-
|
258 |
-
step = 4 # Adjust if needed
|
259 |
-
repo = "ByteDance/AnimateDiff-Lightning"
|
260 |
-
ckpt = f"animatediff_lightning_{step}step_diffusers.safetensors"
|
261 |
-
base = "emilianJR/epiCRealism" # Check if this model exists or choose a known base model
|
262 |
-
|
263 |
-
# Load and configure AnimateDiff pipeline
|
264 |
-
adapter = MotionAdapter().to(device, dtype)
|
265 |
-
adapter.load_state_dict(load_file(hf_hub_download(repo ,ckpt), device=device))
|
266 |
-
|
267 |
-
pipe = AnimateDiffPipeline.from_pretrained(
|
268 |
-
base,
|
269 |
-
motion_adapter=adapter,
|
270 |
-
torch_dtype=dtype
|
271 |
-
).to(device)
|
272 |
-
|
273 |
-
pipe.scheduler = EulerDiscreteScheduler.from_config(
|
274 |
-
pipe.scheduler.config,
|
275 |
-
timestep_spacing="trailing",
|
276 |
-
beta_schedule="linear"
|
277 |
-
)
|
278 |
-
|
279 |
-
# Generate the animation
|
280 |
-
output = pipe(prompt=summary_line, guidance_scale=1.0, num_inference_steps=step)
|
281 |
-
|
282 |
-
# Save as GIF
|
283 |
-
# output.frames is a list of frames (PIL images)
|
284 |
-
st.write("**Exporting animation to GIF...**")
|
285 |
-
print("Exporting animation to GIF...")
|
286 |
-
export_to_gif(output.frames, "animation.gif")
|
287 |
-
|
288 |
-
st.subheader("Generated Animation:")
|
289 |
-
st.image("animation.gif", caption="Generated by AnimateDiff using summary prompt")
|
290 |
-
|
291 |
except Exception as e:
|
292 |
-
st.error(f"Error generating conversation
|
293 |
-
print(f"Error generating conversation
|
294 |
|
295 |
if __name__ == "__main__":
|
296 |
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import os
|
2 |
import streamlit as st
|
3 |
import torch
|
|
|
5 |
from langchain.prompts import ChatPromptTemplate
|
6 |
from langchain_huggingface import HuggingFaceEndpoint
|
7 |
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
def create_conversation_prompt(name1: str, name2: str, persona_style: str):
|
9 |
"""
|
10 |
Create a prompt that instructs the model to produce exactly 15 messages
|
11 |
of conversation, alternating between name1 and name2, starting with name1.
|
12 |
+
|
13 |
+
We will be very explicit and not allow any formatting except the required lines.
|
14 |
"""
|
15 |
prompt_template_str = f"""
|
16 |
You are simulating a conversation of exactly 15 messages between two people: {name1} and {name2}.
|
17 |
{name1} speaks first (message 1), then {name2} (message 2), then {name1} (message 3), and so forth,
|
18 |
alternating until all 15 messages are complete. The 15th message is by {name1}.
|
19 |
+
|
20 |
Requirements:
|
21 |
- Output exactly 15 lines, no more, no less.
|
22 |
- Each line must be a single message in the format:
|
|
|
28 |
- Use emojis sparingly if it fits the style (no more than 1-2 total).
|
29 |
- No repeated lines, each message should logically follow from the previous one.
|
30 |
- Do not produce anything after the 15th message. No extra lines or text.
|
31 |
+
|
32 |
Produce all 15 messages now:
|
33 |
"""
|
34 |
return ChatPromptTemplate.from_template(prompt_template_str)
|
|
|
37 |
"""Prompt for generating a title and summary."""
|
38 |
summary_prompt_str = f"""
|
39 |
Below is a completed 15-message conversation between {name1} and {name2}:
|
40 |
+
|
41 |
{conversation}
|
42 |
+
|
43 |
Please provide:
|
44 |
Title: <A short descriptive title of the conversation>
|
45 |
Summary: <A few short sentences highlighting the main points, tone, and conclusion>
|
46 |
+
|
47 |
Do not continue the conversation, do not repeat it, and do not add extra formatting beyond the two lines:
|
48 |
- One line starting with "Title:"
|
49 |
- One line starting with "Summary:"
|
|
|
51 |
return ChatPromptTemplate.from_template(summary_prompt_str)
|
52 |
|
53 |
def main():
|
54 |
+
st.title("LLM Conversation Simulation")
|
55 |
|
56 |
model_names = [
|
57 |
"meta-llama/Llama-3.3-70B-Instruct",
|
58 |
"mistralai/Mistral-7B-v0.1",
|
59 |
+
"tiiuae/falcon-7b",
|
60 |
+
"EleutherAI/gpt-neox-20b"
|
61 |
+
|
62 |
]
|
63 |
selected_model = st.selectbox("Select a model:", model_names)
|
64 |
|
|
|
116 |
st.write(summary)
|
117 |
print("Summary:\n", summary)
|
118 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
119 |
except Exception as e:
|
120 |
+
st.error(f"Error generating conversation: {e}")
|
121 |
+
print(f"Error generating conversation: {e}")
|
122 |
|
123 |
if __name__ == "__main__":
|
124 |
main()
|