RakeshUtekar commited on
Commit
2407bf5
·
verified ·
1 Parent(s): bc09c89

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +70 -250
app.py CHANGED
@@ -1,208 +1,16 @@
1
- # import os
2
- # import streamlit as st
3
- # import torch
4
- # from langchain.chains import LLMChain
5
- # from langchain.prompts import ChatPromptTemplate
6
- # from langchain_huggingface import HuggingFaceEndpoint
7
-
8
- # def create_conversation_prompt(name1: str, name2: str, persona_style: str):
9
- # """
10
- # Create a prompt that instructs the model to produce exactly 15 messages
11
- # of conversation, alternating between name1 and name2, starting with name1.
12
-
13
- # We will be very explicit and not allow any formatting except the required lines.
14
- # """
15
- # prompt_template_str = f"""
16
- # You are simulating a conversation of exactly 15 messages between two people: {name1} and {name2}.
17
- # {name1} speaks first (message 1), then {name2} (message 2), then {name1} (message 3), and so forth,
18
- # alternating until all 15 messages are complete. The 15th message is by {name1}.
19
-
20
- # Requirements:
21
- # - Output exactly 15 lines, no more, no less.
22
- # - Each line must be a single message in the format:
23
- # {name1}: <message> or {name2}: <message>
24
- # - Do not add any headings, numbers, sample outputs, or explanations.
25
- # - Do not mention code, programming, or instructions.
26
- # - Each message should be 1-2 short sentences, friendly, natural, reflecting the style: {persona_style}.
27
- # - Use everyday language, can ask questions, show opinions.
28
- # - Use emojis sparingly if it fits the style (no more than 1-2 total).
29
- # - No repeated lines, each message should logically follow from the previous one.
30
- # - Do not produce anything after the 15th message. No extra lines or text.
31
-
32
- # Produce all 15 messages now:
33
- # """
34
- # return ChatPromptTemplate.from_template(prompt_template_str)
35
-
36
- # def create_summary_prompt(name1: str, name2: str, conversation: str):
37
- # """Prompt for generating a title and summary."""
38
- # summary_prompt_str = f"""
39
- # Below is a completed 15-message conversation between {name1} and {name2}:
40
-
41
- # {conversation}
42
-
43
- # Please provide:
44
- # Title: <A short descriptive title of the conversation>
45
- # Summary: <A few short sentences highlighting the main points, tone, and conclusion>
46
-
47
- # Do not continue the conversation, do not repeat it, and do not add extra formatting beyond the two lines:
48
- # - One line starting with "Title:"
49
- # - One line starting with "Summary:"
50
- # """
51
- # return ChatPromptTemplate.from_template(summary_prompt_str)
52
-
53
- # def main():
54
- # st.title("LLM Conversation Simulation")
55
-
56
- # model_names = [
57
- # "meta-llama/Llama-3.3-70B-Instruct",
58
- # "meta-llama/Llama-3.1-405B-Instruct",
59
- # "Qwen/Qwen2.5-72B-Instruct",
60
- # "deepseek-ai/DeepSeek-V3",
61
- # "deepseek-ai/DeepSeek-V2.5"
62
-
63
- # ]
64
- # selected_model = st.selectbox("Select a model:", model_names)
65
-
66
- # name1 = st.text_input("Enter the first user's name:", value="Alice")
67
- # name2 = st.text_input("Enter the second user's name:", value="Bob")
68
- # persona_style = st.text_area("Enter the persona style characteristics:",
69
- # value="friendly, curious, and a bit sarcastic")
70
-
71
- # if st.button("Start Conversation Simulation"):
72
- # st.write("**Loading model...**")
73
- # print("Loading model...")
74
-
75
- # with st.spinner("Starting simulation..."):
76
- # endpoint_url = f"https://api-inference.huggingface.co/models/{selected_model}"
77
-
78
- # try:
79
- # llm = HuggingFaceEndpoint(
80
- # endpoint_url=endpoint_url,
81
- # huggingfacehub_api_token=os.environ.get("HUGGINGFACEHUB_API_TOKEN"),
82
- # task="text-generation",
83
- # temperature=0.7,
84
- # max_new_tokens=512
85
- # )
86
- # st.write("**Model loaded successfully!**")
87
- # print("Model loaded successfully!")
88
- # except Exception as e:
89
- # st.error(f"Error initializing HuggingFaceEndpoint: {e}")
90
- # print(f"Error initializing HuggingFaceEndpoint: {e}")
91
- # return
92
-
93
- # conversation_prompt = create_conversation_prompt(name1, name2, persona_style)
94
- # conversation_chain = LLMChain(llm=llm, prompt=conversation_prompt)
95
-
96
- # st.write("**Generating the full 15-message conversation...**")
97
- # print("Generating the full 15-message conversation...")
98
-
99
- # try:
100
- # # Generate all 15 messages in one go
101
- # conversation = conversation_chain.run(chat_history="", input="").strip()
102
-
103
- # st.subheader("Final Conversation:")
104
- # st.text(conversation)
105
- # print("Conversation Generation Complete.\n")
106
- # print("Full Conversation:\n", conversation)
107
-
108
- # # Summarize the conversation
109
- # summary_prompt = create_summary_prompt(name1, name2, conversation)
110
- # summary_chain = LLMChain(llm=llm, prompt=summary_prompt)
111
-
112
- # st.subheader("Summary and Title:")
113
- # st.write("**Summarizing the conversation...**")
114
- # print("Summarizing the conversation...")
115
-
116
- # summary = summary_chain.run(chat_history="", input="")
117
- # st.write(summary)
118
- # print("Summary:\n", summary)
119
-
120
- # except Exception as e:
121
- # st.error(f"Error generating conversation: {e}")
122
- # print(f"Error generating conversation: {e}")
123
-
124
- # if __name__ == "__main__":
125
- # main()
126
-
127
-
128
  import os
129
  import streamlit as st
130
- import google.cloud.aiplatform as aiplatform
131
  from langchain.chains import LLMChain
132
  from langchain.prompts import ChatPromptTemplate
133
- from langchain.llms.base import LLM
134
- from pydantic import BaseModel
135
- from typing import Optional, List, Mapping, Any
136
 
137
- ###############################################################################
138
- # 1. Create a Custom LLM class for LangChain to call your Vertex AI endpoint.
139
- ###############################################################################
140
- class VertexAICustomModel(LLM, BaseModel):
141
- project_id: str
142
- location: str
143
- endpoint_id: str
144
- temperature: float = 0.7
145
- max_new_tokens: int = 512
146
-
147
- @property
148
- def _llm_type(self) -> str:
149
- return "vertex_ai_custom"
150
-
151
- def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
152
- # Initialize Vertex AI with your project/region
153
- aiplatform.init(project=self.project_id, location=self.location)
154
- endpoint = aiplatform.Endpoint(
155
- endpoint_name=f"projects/{self.project_id}/locations/{self.location}/endpoints/{self.endpoint_id}"
156
- )
157
-
158
- # Construct the instance for prediction.
159
- # NOTE: Adjust 'prompt', 'temperature', etc. if your model expects different parameters.
160
- instance = {
161
- "prompt": prompt,
162
- "temperature": self.temperature,
163
- "max_new_tokens": self.max_new_tokens
164
- }
165
-
166
- # Call the endpoint
167
- response = endpoint.predict(instances=[instance])
168
-
169
- # Extract the text from the response.
170
- # This will vary depending on how your model returns predictions.
171
- # A common approach is response.predictions[0]["generated_text"],
172
- # but confirm your model's actual JSON structure.
173
- predictions = response.predictions
174
- if not predictions or "generated_text" not in predictions[0]:
175
- raise ValueError(
176
- f"Unexpected response structure from Vertex AI endpoint: {response}"
177
- )
178
-
179
- text = predictions[0]["generated_text"]
180
-
181
- # Optionally apply 'stop' tokens
182
- if stop:
183
- for s in stop:
184
- if s in text:
185
- text = text.split(s)[0]
186
- return text
187
-
188
- @property
189
- def _identifying_params(self) -> Mapping[str, Any]:
190
- """Return any identifying parameters of this LLM."""
191
- return {
192
- "endpoint_id": self.endpoint_id,
193
- "project_id": self.project_id,
194
- "location": self.location,
195
- "temperature": self.temperature,
196
- "max_new_tokens": self.max_new_tokens,
197
- }
198
-
199
- ###############################################################################
200
- # 2. Create your conversation and summary prompt templates (unchanged).
201
- ###############################################################################
202
  def create_conversation_prompt(name1: str, name2: str, persona_style: str):
203
  """
204
  Create a prompt that instructs the model to produce exactly 15 messages
205
  of conversation, alternating between name1 and name2, starting with name1.
 
 
206
  """
207
  prompt_template_str = f"""
208
  You are simulating a conversation of exactly 15 messages between two people: {name1} and {name2}.
@@ -229,77 +37,89 @@ def create_summary_prompt(name1: str, name2: str, conversation: str):
229
  """Prompt for generating a title and summary."""
230
  summary_prompt_str = f"""
231
  Below is a completed 15-message conversation between {name1} and {name2}:
 
232
  {conversation}
 
233
  Please provide:
234
  Title: <A short descriptive title of the conversation>
235
  Summary: <A few short sentences highlighting the main points, tone, and conclusion>
 
236
  Do not continue the conversation, do not repeat it, and do not add extra formatting beyond the two lines:
237
  - One line starting with "Title:"
238
  - One line starting with "Summary:"
239
  """
240
  return ChatPromptTemplate.from_template(summary_prompt_str)
241
 
242
- ###############################################################################
243
- # 3. Main Streamlit app with Vertex AI usage.
244
- ###############################################################################
245
  def main():
246
- st.title("LLM Conversation Simulation (GCP Vertex AI)")
247
-
248
- # We can remove model selection if we are always using your deployed model:
249
- # st.selectbox(... ) # => Removed
250
-
251
- # Hardcode or load your Vertex AI endpoint details here
252
- project_id = "282802344966"
253
- location = "us-west1"
254
- endpoint_id = "1106913540054188032"
 
 
255
 
256
- # Input fields for conversation
257
  name1 = st.text_input("Enter the first user's name:", value="Alice")
258
  name2 = st.text_input("Enter the second user's name:", value="Bob")
259
- persona_style = st.text_area("Enter the persona style characteristics:",
260
  value="friendly, curious, and a bit sarcastic")
261
 
262
  if st.button("Start Conversation Simulation"):
263
- st.write("**Initializing Vertex AI endpoint...**")
264
- st.spinner("Starting simulation...")
265
-
266
- # Create your custom LLM that calls Vertex AI
267
- llm = VertexAICustomModel(
268
- project_id=project_id,
269
- location=location,
270
- endpoint_id=endpoint_id,
271
- temperature=0.7,
272
- max_new_tokens=512
273
- )
274
-
275
- st.write("**Vertex AI endpoint loaded successfully!**")
276
-
277
- # Build the conversation chain
278
- conversation_prompt = create_conversation_prompt(name1, name2, persona_style)
279
- conversation_chain = LLMChain(llm=llm, prompt=conversation_prompt)
280
-
281
- st.write("**Generating the full 15-message conversation...**")
282
-
283
- try:
284
- # Generate all 15 messages in one go
285
- conversation = conversation_chain.run(chat_history="", input="").strip()
286
-
287
- st.subheader("Final Conversation:")
288
- st.text(conversation)
289
-
290
- # Summarize the conversation
291
- summary_prompt = create_summary_prompt(name1, name2, conversation)
292
- summary_chain = LLMChain(llm=llm, prompt=summary_prompt)
293
-
294
- st.subheader("Summary and Title:")
295
- st.write("**Summarizing the conversation...**")
296
-
297
- summary = summary_chain.run(chat_history="", input="")
298
- st.write(summary)
299
-
300
- except Exception as e:
301
- st.error(f"Error generating conversation: {e}")
302
- print(f"Error generating conversation: {e}")
 
 
 
 
 
 
 
 
 
 
 
303
 
304
  if __name__ == "__main__":
305
  main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
  import streamlit as st
3
+ import torch
4
  from langchain.chains import LLMChain
5
  from langchain.prompts import ChatPromptTemplate
6
+ from langchain_huggingface import HuggingFaceEndpoint
 
 
7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  def create_conversation_prompt(name1: str, name2: str, persona_style: str):
9
  """
10
  Create a prompt that instructs the model to produce exactly 15 messages
11
  of conversation, alternating between name1 and name2, starting with name1.
12
+
13
+ We will be very explicit and not allow any formatting except the required lines.
14
  """
15
  prompt_template_str = f"""
16
  You are simulating a conversation of exactly 15 messages between two people: {name1} and {name2}.
 
37
  """Prompt for generating a title and summary."""
38
  summary_prompt_str = f"""
39
  Below is a completed 15-message conversation between {name1} and {name2}:
40
+
41
  {conversation}
42
+
43
  Please provide:
44
  Title: <A short descriptive title of the conversation>
45
  Summary: <A few short sentences highlighting the main points, tone, and conclusion>
46
+
47
  Do not continue the conversation, do not repeat it, and do not add extra formatting beyond the two lines:
48
  - One line starting with "Title:"
49
  - One line starting with "Summary:"
50
  """
51
  return ChatPromptTemplate.from_template(summary_prompt_str)
52
 
 
 
 
53
  def main():
54
+ st.title("LLM Conversation Simulation")
55
+
56
+ model_names = [
57
+ "meta-llama/Llama-3.3-70B-Instruct",
58
+ "meta-llama/Llama-3.1-405B-Instruct",
59
+ "Qwen/Qwen2.5-72B-Instruct",
60
+ "deepseek-ai/DeepSeek-V3",
61
+ "deepseek-ai/DeepSeek-V2.5"
62
+
63
+ ]
64
+ selected_model = st.selectbox("Select a model:", model_names)
65
 
 
66
  name1 = st.text_input("Enter the first user's name:", value="Alice")
67
  name2 = st.text_input("Enter the second user's name:", value="Bob")
68
+ persona_style = st.text_area("Enter the persona style characteristics:",
69
  value="friendly, curious, and a bit sarcastic")
70
 
71
  if st.button("Start Conversation Simulation"):
72
+ st.write("**Loading model...**")
73
+ print("Loading model...")
74
+
75
+ with st.spinner("Starting simulation..."):
76
+ endpoint_url = f"https://api-inference.huggingface.co/models/{selected_model}"
77
+
78
+ try:
79
+ llm = HuggingFaceEndpoint(
80
+ endpoint_url=endpoint_url,
81
+ huggingfacehub_api_token=os.environ.get("HUGGINGFACEHUB_API_TOKEN"),
82
+ task="text-generation",
83
+ temperature=0.7,
84
+ max_new_tokens=512
85
+ )
86
+ st.write("**Model loaded successfully!**")
87
+ print("Model loaded successfully!")
88
+ except Exception as e:
89
+ st.error(f"Error initializing HuggingFaceEndpoint: {e}")
90
+ print(f"Error initializing HuggingFaceEndpoint: {e}")
91
+ return
92
+
93
+ conversation_prompt = create_conversation_prompt(name1, name2, persona_style)
94
+ conversation_chain = LLMChain(llm=llm, prompt=conversation_prompt)
95
+
96
+ st.write("**Generating the full 15-message conversation...**")
97
+ print("Generating the full 15-message conversation...")
98
+
99
+ try:
100
+ # Generate all 15 messages in one go
101
+ conversation = conversation_chain.run(chat_history="", input="").strip()
102
+
103
+ st.subheader("Final Conversation:")
104
+ st.text(conversation)
105
+ print("Conversation Generation Complete.\n")
106
+ print("Full Conversation:\n", conversation)
107
+
108
+ # Summarize the conversation
109
+ summary_prompt = create_summary_prompt(name1, name2, conversation)
110
+ summary_chain = LLMChain(llm=llm, prompt=summary_prompt)
111
+
112
+ st.subheader("Summary and Title:")
113
+ st.write("**Summarizing the conversation...**")
114
+ print("Summarizing the conversation...")
115
+
116
+ summary = summary_chain.run(chat_history="", input="")
117
+ st.write(summary)
118
+ print("Summary:\n", summary)
119
+
120
+ except Exception as e:
121
+ st.error(f"Error generating conversation: {e}")
122
+ print(f"Error generating conversation: {e}")
123
 
124
  if __name__ == "__main__":
125
  main()