RakeshUtekar commited on
Commit
4ebf50c
·
verified ·
1 Parent(s): d083506

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -32
app.py CHANGED
@@ -6,30 +6,19 @@ from langchain.prompts import ChatPromptTemplate
6
  from langchain_huggingface import HuggingFaceEndpoint
7
 
8
  def create_prompt(name1: str, name2: str, persona_style: str):
9
- """Create the chat prompt template for a two-person conversation."""
10
- # We'll define that name1 (e.g., Alice) starts the conversation.
11
- # The conversation is recorded as:
12
- # Alice: {input or response}
13
- # Bob: {response}
14
- # and so on...
15
-
16
  prompt_template_str = f"""
17
- You are simulating a conversation between two people: {name1} and {name2}. Both are human individuals. The conversation should embody the style and characteristics defined as {persona_style}. They are talking to each other naturally. There are no 'Human' or 'AI' roles here, just {name1} and {name2} speaking alternately. {name1} starts the conversation. Each message should be in the format:
18
- {name1}: <message>
19
- {name2}: <message>
20
 
21
  Characteristics and style:
22
  - Both {name1} and {name2} communicate in a friendly, human-like manner.
23
  - They can be curious, ask questions, share opinions.
24
  - Their responses should be brief and natural, like texting a friend.
25
- - They can use everyday language, show feelings and opinions.
26
  - Keep each response to about 1-2 short sentences.
27
  - Use emojis sparingly and naturally if it fits the persona_style.
28
  - Avoid overusing emojis (1-2 max per message).
29
 
30
- Make sure that each turn is clearly designated as {name1} or {name2}. The conversation should continue for a total of 15 messages. Start with {name1} speaking first. Alternate between {name1} and {name2}.
31
-
32
- Once the 15th message is given (by {name1}, since the conversation starts with {name1}), the conversation ends. After that, produce a summary and a title of the conversation separately.
33
 
34
  Current partial conversation (if any):
35
  {{chat_history}}
@@ -38,33 +27,21 @@ def create_prompt(name1: str, name2: str, persona_style: str):
38
  return ChatPromptTemplate.from_template(prompt_template_str)
39
 
40
  def simulate_conversation(chain: LLMChain, name1: str, name2: str, total_messages: int = 15):
41
- """
42
- Simulate a conversation of exactly total_messages turns.
43
- name1 starts the conversation (message 1), then name2 (message 2), etc., alternating.
44
- """
45
  conversation_lines = []
46
  st.write("**Starting conversation simulation...**")
47
  print("Starting conversation simulation...")
48
 
49
  try:
50
  for i in range(total_messages):
51
- # Build truncated conversation (if needed, though we may not need truncation with only 15 messages)
52
  truncated_history = "\n".join(conversation_lines)
53
-
54
- # Determine whose turn it is:
55
- # i=0 (first message), i even => name1 speaks, i odd => name2 speaks
56
  current_speaker = name1 if i % 2 == 0 else name2
57
  st.write(f"**[Message {i+1}/{total_messages}] {current_speaker} is speaking...**")
58
  print(f"[Message {i+1}/{total_messages}] {current_speaker} is speaking...")
59
 
60
- # We ask the model for the next line in the conversation
61
- # The model should produce something like: "Alice: ...message..."
62
  response = chain.run(chat_history=truncated_history, input="Continue the conversation.")
63
  response = response.strip()
64
 
65
- # We only keep the line that pertains to the current message
66
- # If the model generates both speakers, we may need to parse carefully.
67
- # Ideally, the model will produce only one line. If multiple lines appear, we'll take the first line that starts with current_speaker.
68
  lines = response.split("\n")
69
  chosen_line = None
70
  for line in lines:
@@ -72,9 +49,7 @@ def simulate_conversation(chain: LLMChain, name1: str, name2: str, total_message
72
  if line.startswith(f"{current_speaker}:"):
73
  chosen_line = line
74
  break
75
-
76
  if not chosen_line:
77
- # Fallback: If not found, just use the first line
78
  chosen_line = lines[0] if lines else f"{current_speaker}: (No response)"
79
 
80
  st.write(chosen_line)
@@ -90,7 +65,6 @@ def simulate_conversation(chain: LLMChain, name1: str, name2: str, total_message
90
  return None
91
 
92
  def summarize_conversation(chain: LLMChain, conversation: str, name1: str, name2: str):
93
- """Use the LLM to summarize the completed conversation and provide a title."""
94
  st.write("**Summarizing the conversation...**")
95
  print("Summarizing the conversation...")
96
 
@@ -99,10 +73,12 @@ def summarize_conversation(chain: LLMChain, conversation: str, name1: str, name2
99
  {conversation}
100
 
101
  Provide a short descriptive title for their conversation and then summarize it in a few short sentences highlighting the main points, tone, and conclusion.
 
102
  Format your answer as:
103
  Title: <your conversation title>
104
  Summary: <your summary here>
105
  """
 
106
  try:
107
  response = chain.run(chat_history="", input=summary_prompt)
108
  return response.strip()
@@ -140,7 +116,7 @@ def main():
140
  huggingfacehub_api_token=os.environ.get("HUGGINGFACEHUB_API_TOKEN"),
141
  task="text-generation",
142
  temperature=0.7,
143
- max_new_tokens=512
144
  )
145
  st.write("**Model loaded successfully!**")
146
  print("Model loaded successfully!")
@@ -155,7 +131,6 @@ def main():
155
  st.write("**Simulating the conversation...**")
156
  print("Simulating the conversation...")
157
 
158
- # Total messages = 15
159
  conversation = simulate_conversation(chain, name1, name2, total_messages=15)
160
  if conversation:
161
  st.subheader("Final Conversation:")
 
6
  from langchain_huggingface import HuggingFaceEndpoint
7
 
8
  def create_prompt(name1: str, name2: str, persona_style: str):
 
 
 
 
 
 
 
9
  prompt_template_str = f"""
10
+ You are simulating a conversation between two people: {name1} and {name2}. Both are human individuals. The conversation should embody the style and characteristics defined as {persona_style}. They are talking to each other naturally, taking turns speaking.
 
 
11
 
12
  Characteristics and style:
13
  - Both {name1} and {name2} communicate in a friendly, human-like manner.
14
  - They can be curious, ask questions, share opinions.
15
  - Their responses should be brief and natural, like texting a friend.
16
+ - Use everyday language, show feelings and opinions.
17
  - Keep each response to about 1-2 short sentences.
18
  - Use emojis sparingly and naturally if it fits the persona_style.
19
  - Avoid overusing emojis (1-2 max per message).
20
 
21
+ The conversation should last exactly 15 messages total, starting with {name1} and then alternating. After the 15th message, stop.
 
 
22
 
23
  Current partial conversation (if any):
24
  {{chat_history}}
 
27
  return ChatPromptTemplate.from_template(prompt_template_str)
28
 
29
  def simulate_conversation(chain: LLMChain, name1: str, name2: str, total_messages: int = 15):
 
 
 
 
30
  conversation_lines = []
31
  st.write("**Starting conversation simulation...**")
32
  print("Starting conversation simulation...")
33
 
34
  try:
35
  for i in range(total_messages):
 
36
  truncated_history = "\n".join(conversation_lines)
 
 
 
37
  current_speaker = name1 if i % 2 == 0 else name2
38
  st.write(f"**[Message {i+1}/{total_messages}] {current_speaker} is speaking...**")
39
  print(f"[Message {i+1}/{total_messages}] {current_speaker} is speaking...")
40
 
 
 
41
  response = chain.run(chat_history=truncated_history, input="Continue the conversation.")
42
  response = response.strip()
43
 
44
+ # Extract the line for the current speaker
 
 
45
  lines = response.split("\n")
46
  chosen_line = None
47
  for line in lines:
 
49
  if line.startswith(f"{current_speaker}:"):
50
  chosen_line = line
51
  break
 
52
  if not chosen_line:
 
53
  chosen_line = lines[0] if lines else f"{current_speaker}: (No response)"
54
 
55
  st.write(chosen_line)
 
65
  return None
66
 
67
  def summarize_conversation(chain: LLMChain, conversation: str, name1: str, name2: str):
 
68
  st.write("**Summarizing the conversation...**")
69
  print("Summarizing the conversation...")
70
 
 
73
  {conversation}
74
 
75
  Provide a short descriptive title for their conversation and then summarize it in a few short sentences highlighting the main points, tone, and conclusion.
76
+
77
  Format your answer as:
78
  Title: <your conversation title>
79
  Summary: <your summary here>
80
  """
81
+
82
  try:
83
  response = chain.run(chat_history="", input=summary_prompt)
84
  return response.strip()
 
116
  huggingfacehub_api_token=os.environ.get("HUGGINGFACEHUB_API_TOKEN"),
117
  task="text-generation",
118
  temperature=0.7,
119
+ max_new_tokens=256 # Reduced for speed
120
  )
121
  st.write("**Model loaded successfully!**")
122
  print("Model loaded successfully!")
 
131
  st.write("**Simulating the conversation...**")
132
  print("Simulating the conversation...")
133
 
 
134
  conversation = simulate_conversation(chain, name1, name2, total_messages=15)
135
  if conversation:
136
  st.subheader("Final Conversation:")