Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -129,13 +129,17 @@ import torch
|
|
129 |
from langchain.chains import LLMChain
|
130 |
from langchain.prompts import ChatPromptTemplate
|
131 |
from langchain_huggingface import HuggingFaceEndpoint
|
132 |
-
|
|
|
|
|
|
|
|
|
|
|
133 |
|
134 |
def create_conversation_prompt(name1: str, name2: str, persona_style: str):
|
135 |
"""
|
136 |
Create a prompt that instructs the model to produce exactly 15 messages
|
137 |
of conversation, alternating between name1 and name2, starting with name1.
|
138 |
-
We will be very explicit and not allow any formatting except the required lines.
|
139 |
"""
|
140 |
prompt_template_str = f"""
|
141 |
You are simulating a conversation of exactly 15 messages between two people: {name1} and {name2}.
|
@@ -171,7 +175,7 @@ def create_summary_prompt(name1: str, name2: str, conversation: str):
|
|
171 |
return ChatPromptTemplate.from_template(summary_prompt_str)
|
172 |
|
173 |
def main():
|
174 |
-
st.title("LLM Conversation Simulation")
|
175 |
|
176 |
model_names = [
|
177 |
"meta-llama/Llama-3.3-70B-Instruct",
|
@@ -234,40 +238,55 @@ def main():
|
|
234 |
st.write(summary)
|
235 |
print("Summary:\n", summary)
|
236 |
|
237 |
-
#
|
238 |
-
st.write("**Generating image from summary using ByteDance/AnimateDiff-Lightning...**")
|
239 |
-
print("Generating image from summary using ByteDance/AnimateDiff-Lightning...")
|
240 |
-
|
241 |
-
# Load the text-to-image pipeline
|
242 |
-
# Adjust this code as needed depending on GPU/CPU availability and model requirements.
|
243 |
-
device = "cuda" if torch.cuda.is_available() else "cpu"
|
244 |
-
pipe = DiffusionPipeline.from_pretrained(
|
245 |
-
"ByteDance/AnimateDiff-Lightning",
|
246 |
-
torch_dtype=torch.float16 if "cuda" in device else torch.float32
|
247 |
-
).to(device)
|
248 |
-
|
249 |
-
# The summary prompt might contain both Title and Summary lines. Let's just use the Summary line as the prompt.
|
250 |
-
# Extract the Summary line
|
251 |
-
# Format:
|
252 |
-
# Title: ...
|
253 |
-
# Summary: ...
|
254 |
-
# We'll parse the summary text to get the summary line alone
|
255 |
lines = summary.split("\n")
|
256 |
summary_line = ""
|
257 |
for line in lines:
|
258 |
if line.strip().lower().startswith("summary:"):
|
259 |
-
# Extract the text after "Summary:"
|
260 |
summary_line = line.split("Summary:", 1)[-1].strip()
|
261 |
break
|
262 |
-
|
263 |
if not summary_line:
|
264 |
summary_line = "A friendly scene reflecting the conversation."
|
265 |
|
266 |
-
#
|
267 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
268 |
|
269 |
-
st.subheader("Generated
|
270 |
-
st.image(
|
271 |
|
272 |
except Exception as e:
|
273 |
st.error(f"Error generating conversation or summary: {e}")
|
|
|
129 |
from langchain.chains import LLMChain
|
130 |
from langchain.prompts import ChatPromptTemplate
|
131 |
from langchain_huggingface import HuggingFaceEndpoint
|
132 |
+
|
133 |
+
# Additional imports for AnimateDiff
|
134 |
+
from diffusers import AnimateDiffPipeline, MotionAdapter, EulerDiscreteScheduler
|
135 |
+
from diffusers.utils import export_to_gif
|
136 |
+
from huggingface_hub import hf_hub_download
|
137 |
+
from safetensors.torch import load_file
|
138 |
|
139 |
def create_conversation_prompt(name1: str, name2: str, persona_style: str):
|
140 |
"""
|
141 |
Create a prompt that instructs the model to produce exactly 15 messages
|
142 |
of conversation, alternating between name1 and name2, starting with name1.
|
|
|
143 |
"""
|
144 |
prompt_template_str = f"""
|
145 |
You are simulating a conversation of exactly 15 messages between two people: {name1} and {name2}.
|
|
|
175 |
return ChatPromptTemplate.from_template(summary_prompt_str)
|
176 |
|
177 |
def main():
|
178 |
+
st.title("LLM Conversation Simulation + AnimateDiff Video")
|
179 |
|
180 |
model_names = [
|
181 |
"meta-llama/Llama-3.3-70B-Instruct",
|
|
|
238 |
st.write(summary)
|
239 |
print("Summary:\n", summary)
|
240 |
|
241 |
+
# Extract the summary line from the summary text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
242 |
lines = summary.split("\n")
|
243 |
summary_line = ""
|
244 |
for line in lines:
|
245 |
if line.strip().lower().startswith("summary:"):
|
|
|
246 |
summary_line = line.split("Summary:", 1)[-1].strip()
|
247 |
break
|
|
|
248 |
if not summary_line:
|
249 |
summary_line = "A friendly scene reflecting the conversation."
|
250 |
|
251 |
+
# Now integrate AnimateDiff for text-to-video generation
|
252 |
+
st.write("**Generating animation from summary using ByteDance/AnimateDiff-Lightning...**")
|
253 |
+
print("Generating animation from summary using ByteDance/AnimateDiff-Lightning...")
|
254 |
+
|
255 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
256 |
+
dtype = torch.float16 if torch.cuda.is_available() else torch.float32
|
257 |
+
|
258 |
+
step = 4 # Adjust if needed
|
259 |
+
repo = "ByteDance/AnimateDiff-Lightning"
|
260 |
+
ckpt = f"animatediff_lightning_{step}step_diffusers.safetensors"
|
261 |
+
base = "emilianJR/epiCRealism" # Check if this model exists or choose a known base model
|
262 |
+
|
263 |
+
# Load and configure AnimateDiff pipeline
|
264 |
+
adapter = MotionAdapter().to(device, dtype)
|
265 |
+
adapter.load_state_dict(load_file(hf_hub_download(repo ,ckpt), device=device))
|
266 |
+
|
267 |
+
pipe = AnimateDiffPipeline.from_pretrained(
|
268 |
+
base,
|
269 |
+
motion_adapter=adapter,
|
270 |
+
torch_dtype=dtype
|
271 |
+
).to(device)
|
272 |
+
|
273 |
+
pipe.scheduler = EulerDiscreteScheduler.from_config(
|
274 |
+
pipe.scheduler.config,
|
275 |
+
timestep_spacing="trailing",
|
276 |
+
beta_schedule="linear"
|
277 |
+
)
|
278 |
+
|
279 |
+
# Generate the animation
|
280 |
+
output = pipe(prompt=summary_line, guidance_scale=1.0, num_inference_steps=step)
|
281 |
+
|
282 |
+
# Save as GIF
|
283 |
+
# output.frames is a list of frames (PIL images)
|
284 |
+
st.write("**Exporting animation to GIF...**")
|
285 |
+
print("Exporting animation to GIF...")
|
286 |
+
export_to_gif(output.frames, "animation.gif")
|
287 |
|
288 |
+
st.subheader("Generated Animation:")
|
289 |
+
st.image("animation.gif", caption="Generated by AnimateDiff using summary prompt")
|
290 |
|
291 |
except Exception as e:
|
292 |
st.error(f"Error generating conversation or summary: {e}")
|