RakeshUtekar commited on
Commit
7869791
·
verified ·
1 Parent(s): fff4a1c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +164 -9
app.py CHANGED
@@ -1,22 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
  import streamlit as st
3
  import torch
4
  from langchain.chains import LLMChain
5
  from langchain.prompts import ChatPromptTemplate
6
  from langchain_huggingface import HuggingFaceEndpoint
 
7
 
8
  def create_conversation_prompt(name1: str, name2: str, persona_style: str):
9
  """
10
  Create a prompt that instructs the model to produce exactly 15 messages
11
  of conversation, alternating between name1 and name2, starting with name1.
12
-
13
  We will be very explicit and not allow any formatting except the required lines.
14
  """
15
  prompt_template_str = f"""
16
  You are simulating a conversation of exactly 15 messages between two people: {name1} and {name2}.
17
  {name1} speaks first (message 1), then {name2} (message 2), then {name1} (message 3), and so forth,
18
  alternating until all 15 messages are complete. The 15th message is by {name1}.
19
-
20
  Requirements:
21
  - Output exactly 15 lines, no more, no less.
22
  - Each line must be a single message in the format:
@@ -28,7 +152,6 @@ def create_conversation_prompt(name1: str, name2: str, persona_style: str):
28
  - Use emojis sparingly if it fits the style (no more than 1-2 total).
29
  - No repeated lines, each message should logically follow from the previous one.
30
  - Do not produce anything after the 15th message. No extra lines or text.
31
-
32
  Produce all 15 messages now:
33
  """
34
  return ChatPromptTemplate.from_template(prompt_template_str)
@@ -37,13 +160,10 @@ def create_summary_prompt(name1: str, name2: str, conversation: str):
37
  """Prompt for generating a title and summary."""
38
  summary_prompt_str = f"""
39
  Below is a completed 15-message conversation between {name1} and {name2}:
40
-
41
  {conversation}
42
-
43
  Please provide:
44
  Title: <A short descriptive title of the conversation>
45
  Summary: <A few short sentences highlighting the main points, tone, and conclusion>
46
-
47
  Do not continue the conversation, do not repeat it, and do not add extra formatting beyond the two lines:
48
  - One line starting with "Title:"
49
  - One line starting with "Summary:"
@@ -57,7 +177,6 @@ def main():
57
  "meta-llama/Llama-3.3-70B-Instruct",
58
  "mistralai/Mistral-7B-v0.1",
59
  "tiiuae/falcon-7b"
60
-
61
  ]
62
  selected_model = st.selectbox("Select a model:", model_names)
63
 
@@ -115,9 +234,45 @@ def main():
115
  st.write(summary)
116
  print("Summary:\n", summary)
117
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118
  except Exception as e:
119
- st.error(f"Error generating conversation: {e}")
120
- print(f"Error generating conversation: {e}")
121
 
122
  if __name__ == "__main__":
123
  main()
 
 
1
+ # import os
2
+ # import streamlit as st
3
+ # import torch
4
+ # from langchain.chains import LLMChain
5
+ # from langchain.prompts import ChatPromptTemplate
6
+ # from langchain_huggingface import HuggingFaceEndpoint
7
+
8
+ # def create_conversation_prompt(name1: str, name2: str, persona_style: str):
9
+ # """
10
+ # Create a prompt that instructs the model to produce exactly 15 messages
11
+ # of conversation, alternating between name1 and name2, starting with name1.
12
+
13
+ # We will be very explicit and not allow any formatting except the required lines.
14
+ # """
15
+ # prompt_template_str = f"""
16
+ # You are simulating a conversation of exactly 15 messages between two people: {name1} and {name2}.
17
+ # {name1} speaks first (message 1), then {name2} (message 2), then {name1} (message 3), and so forth,
18
+ # alternating until all 15 messages are complete. The 15th message is by {name1}.
19
+
20
+ # Requirements:
21
+ # - Output exactly 15 lines, no more, no less.
22
+ # - Each line must be a single message in the format:
23
+ # {name1}: <message> or {name2}: <message>
24
+ # - Do not add any headings, numbers, sample outputs, or explanations.
25
+ # - Do not mention code, programming, or instructions.
26
+ # - Each message should be 1-2 short sentences, friendly, natural, reflecting the style: {persona_style}.
27
+ # - Use everyday language, can ask questions, show opinions.
28
+ # - Use emojis sparingly if it fits the style (no more than 1-2 total).
29
+ # - No repeated lines, each message should logically follow from the previous one.
30
+ # - Do not produce anything after the 15th message. No extra lines or text.
31
+
32
+ # Produce all 15 messages now:
33
+ # """
34
+ # return ChatPromptTemplate.from_template(prompt_template_str)
35
+
36
+ # def create_summary_prompt(name1: str, name2: str, conversation: str):
37
+ # """Prompt for generating a title and summary."""
38
+ # summary_prompt_str = f"""
39
+ # Below is a completed 15-message conversation between {name1} and {name2}:
40
+
41
+ # {conversation}
42
+
43
+ # Please provide:
44
+ # Title: <A short descriptive title of the conversation>
45
+ # Summary: <A few short sentences highlighting the main points, tone, and conclusion>
46
+
47
+ # Do not continue the conversation, do not repeat it, and do not add extra formatting beyond the two lines:
48
+ # - One line starting with "Title:"
49
+ # - One line starting with "Summary:"
50
+ # """
51
+ # return ChatPromptTemplate.from_template(summary_prompt_str)
52
+
53
+ # def main():
54
+ # st.title("LLM Conversation Simulation")
55
+
56
+ # model_names = [
57
+ # "meta-llama/Llama-3.3-70B-Instruct",
58
+ # "mistralai/Mistral-7B-v0.1",
59
+ # "tiiuae/falcon-7b"
60
+
61
+ # ]
62
+ # selected_model = st.selectbox("Select a model:", model_names)
63
+
64
+ # name1 = st.text_input("Enter the first user's name:", value="Alice")
65
+ # name2 = st.text_input("Enter the second user's name:", value="Bob")
66
+ # persona_style = st.text_area("Enter the persona style characteristics:",
67
+ # value="friendly, curious, and a bit sarcastic")
68
+
69
+ # if st.button("Start Conversation Simulation"):
70
+ # st.write("**Loading model...**")
71
+ # print("Loading model...")
72
+
73
+ # with st.spinner("Starting simulation..."):
74
+ # endpoint_url = f"https://api-inference.huggingface.co/models/{selected_model}"
75
+
76
+ # try:
77
+ # llm = HuggingFaceEndpoint(
78
+ # endpoint_url=endpoint_url,
79
+ # huggingfacehub_api_token=os.environ.get("HUGGINGFACEHUB_API_TOKEN"),
80
+ # task="text-generation",
81
+ # temperature=0.7,
82
+ # max_new_tokens=512
83
+ # )
84
+ # st.write("**Model loaded successfully!**")
85
+ # print("Model loaded successfully!")
86
+ # except Exception as e:
87
+ # st.error(f"Error initializing HuggingFaceEndpoint: {e}")
88
+ # print(f"Error initializing HuggingFaceEndpoint: {e}")
89
+ # return
90
+
91
+ # conversation_prompt = create_conversation_prompt(name1, name2, persona_style)
92
+ # conversation_chain = LLMChain(llm=llm, prompt=conversation_prompt)
93
+
94
+ # st.write("**Generating the full 15-message conversation...**")
95
+ # print("Generating the full 15-message conversation...")
96
+
97
+ # try:
98
+ # # Generate all 15 messages in one go
99
+ # conversation = conversation_chain.run(chat_history="", input="").strip()
100
+
101
+ # st.subheader("Final Conversation:")
102
+ # st.text(conversation)
103
+ # print("Conversation Generation Complete.\n")
104
+ # print("Full Conversation:\n", conversation)
105
+
106
+ # # Summarize the conversation
107
+ # summary_prompt = create_summary_prompt(name1, name2, conversation)
108
+ # summary_chain = LLMChain(llm=llm, prompt=summary_prompt)
109
+
110
+ # st.subheader("Summary and Title:")
111
+ # st.write("**Summarizing the conversation...**")
112
+ # print("Summarizing the conversation...")
113
+
114
+ # summary = summary_chain.run(chat_history="", input="")
115
+ # st.write(summary)
116
+ # print("Summary:\n", summary)
117
+
118
+ # except Exception as e:
119
+ # st.error(f"Error generating conversation: {e}")
120
+ # print(f"Error generating conversation: {e}")
121
+
122
+ # if __name__ == "__main__":
123
+ # main()
124
+
125
+
126
  import os
127
  import streamlit as st
128
  import torch
129
  from langchain.chains import LLMChain
130
  from langchain.prompts import ChatPromptTemplate
131
  from langchain_huggingface import HuggingFaceEndpoint
132
+ from diffusers import DiffusionPipeline
133
 
134
  def create_conversation_prompt(name1: str, name2: str, persona_style: str):
135
  """
136
  Create a prompt that instructs the model to produce exactly 15 messages
137
  of conversation, alternating between name1 and name2, starting with name1.
 
138
  We will be very explicit and not allow any formatting except the required lines.
139
  """
140
  prompt_template_str = f"""
141
  You are simulating a conversation of exactly 15 messages between two people: {name1} and {name2}.
142
  {name1} speaks first (message 1), then {name2} (message 2), then {name1} (message 3), and so forth,
143
  alternating until all 15 messages are complete. The 15th message is by {name1}.
 
144
  Requirements:
145
  - Output exactly 15 lines, no more, no less.
146
  - Each line must be a single message in the format:
 
152
  - Use emojis sparingly if it fits the style (no more than 1-2 total).
153
  - No repeated lines, each message should logically follow from the previous one.
154
  - Do not produce anything after the 15th message. No extra lines or text.
 
155
  Produce all 15 messages now:
156
  """
157
  return ChatPromptTemplate.from_template(prompt_template_str)
 
160
  """Prompt for generating a title and summary."""
161
  summary_prompt_str = f"""
162
  Below is a completed 15-message conversation between {name1} and {name2}:
 
163
  {conversation}
 
164
  Please provide:
165
  Title: <A short descriptive title of the conversation>
166
  Summary: <A few short sentences highlighting the main points, tone, and conclusion>
 
167
  Do not continue the conversation, do not repeat it, and do not add extra formatting beyond the two lines:
168
  - One line starting with "Title:"
169
  - One line starting with "Summary:"
 
177
  "meta-llama/Llama-3.3-70B-Instruct",
178
  "mistralai/Mistral-7B-v0.1",
179
  "tiiuae/falcon-7b"
 
180
  ]
181
  selected_model = st.selectbox("Select a model:", model_names)
182
 
 
234
  st.write(summary)
235
  print("Summary:\n", summary)
236
 
237
+ # Now use the summary as a prompt for the text-to-image model
238
+ st.write("**Generating image from summary using ByteDance/AnimateDiff-Lightning...**")
239
+ print("Generating image from summary using ByteDance/AnimateDiff-Lightning...")
240
+
241
+ # Load the text-to-image pipeline
242
+ # Adjust this code as needed depending on GPU/CPU availability and model requirements.
243
+ device = "cuda" if torch.cuda.is_available() else "cpu"
244
+ pipe = DiffusionPipeline.from_pretrained(
245
+ "ByteDance/AnimateDiff-Lightning",
246
+ torch_dtype=torch.float16 if "cuda" in device else torch.float32
247
+ ).to(device)
248
+
249
+ # The summary prompt might contain both Title and Summary lines. Let's just use the Summary line as the prompt.
250
+ # Extract the Summary line
251
+ # Format:
252
+ # Title: ...
253
+ # Summary: ...
254
+ # We'll parse the summary text to get the summary line alone
255
+ lines = summary.split("\n")
256
+ summary_line = ""
257
+ for line in lines:
258
+ if line.strip().lower().startswith("summary:"):
259
+ # Extract the text after "Summary:"
260
+ summary_line = line.split("Summary:", 1)[-1].strip()
261
+ break
262
+
263
+ if not summary_line:
264
+ summary_line = "A friendly scene reflecting the conversation."
265
+
266
+ # Generate image using the summary line as prompt
267
+ image = pipe(summary_line).images[0]
268
+
269
+ st.subheader("Generated Image:")
270
+ st.image(image, caption="Generated by ByteDance/AnimateDiff-Lightning")
271
+
272
  except Exception as e:
273
+ st.error(f"Error generating conversation or summary: {e}")
274
+ print(f"Error generating conversation or summary: {e}")
275
 
276
  if __name__ == "__main__":
277
  main()
278
+