RakeshUtekar commited on
Commit
15844c4
·
verified ·
1 Parent(s): 81bdb0c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -16
app.py CHANGED
@@ -6,27 +6,36 @@ from langchain.prompts import ChatPromptTemplate
6
  from langchain_huggingface import HuggingFaceEndpoint
7
 
8
  def create_prompt(name1: str, name2: str, persona_style: str):
 
9
  prompt_template_str = f"""
10
- You are simulating a conversation between two people: {name1} and {name2}. Both are human individuals. The conversation should embody the style and characteristics defined as {persona_style}. They are talking to each other naturally, taking turns speaking.
 
 
 
 
 
11
 
12
  Characteristics and style:
13
  - Both {name1} and {name2} communicate in a friendly, human-like manner.
14
  - They can be curious, ask questions, share opinions.
15
- - Their responses should be brief and natural, like texting a friend.
16
  - Use everyday language, show feelings and opinions.
17
  - Keep each response to about 1-2 short sentences.
18
- - Use emojis sparingly and naturally if it fits the persona_style.
19
  - Avoid overusing emojis (1-2 max per message).
20
 
21
- The conversation should last exactly 15 messages total, starting with {name1} and then alternating. After the 15th message, stop.
22
-
23
- Current partial conversation (if any):
24
- {{chat_history}}
25
- Next message:
26
  """
27
  return ChatPromptTemplate.from_template(prompt_template_str)
28
 
29
  def simulate_conversation(chain: LLMChain, name1: str, name2: str, total_messages: int = 15):
 
 
 
 
30
  conversation_lines = []
31
  st.write("**Starting conversation simulation...**")
32
  print("Starting conversation simulation...")
@@ -34,10 +43,12 @@ def simulate_conversation(chain: LLMChain, name1: str, name2: str, total_message
34
  try:
35
  for i in range(total_messages):
36
  truncated_history = "\n".join(conversation_lines)
 
37
  current_speaker = name1 if i % 2 == 0 else name2
38
  st.write(f"**[Message {i+1}/{total_messages}] {current_speaker} is speaking...**")
39
  print(f"[Message {i+1}/{total_messages}] {current_speaker} is speaking...")
40
 
 
41
  response = chain.run(chat_history=truncated_history, input="Continue the conversation.")
42
  response = response.strip()
43
 
@@ -49,8 +60,10 @@ def simulate_conversation(chain: LLMChain, name1: str, name2: str, total_message
49
  if line.startswith(f"{current_speaker}:"):
50
  chosen_line = line
51
  break
 
52
  if not chosen_line:
53
- chosen_line = lines[0] if lines else f"{current_speaker}: (No response)"
 
54
 
55
  st.write(chosen_line)
56
  print(chosen_line)
@@ -69,20 +82,20 @@ def summarize_conversation(chain: LLMChain, conversation: str, name1: str, name2
69
  st.write("**Summarizing the conversation...**")
70
  print("Summarizing the conversation...")
71
 
72
- # Updated prompt to clearly request a title and summary in the desired format
73
  summary_prompt = f"""
74
- The following is a conversation between {name1} and {name2}:
75
  {conversation}
76
 
77
- Please provide:
78
- - A short and descriptive title of the conversation on one line, starting with "Title:".
79
- - A summary of the conversation on the next line, starting with "Summary:".
80
  The summary should be a few short sentences highlighting the main points, tone, and conclusion.
 
81
  """
82
 
83
  try:
84
  response = chain.run(chat_history="", input=summary_prompt)
85
- # Just return the response as-is. The model should follow the instructions given.
86
  return response.strip()
87
  except Exception as e:
88
  st.error(f"Error summarizing conversation: {e}")
@@ -118,7 +131,7 @@ def main():
118
  huggingfacehub_api_token=os.environ.get("HUGGINGFACEHUB_API_TOKEN"),
119
  task="text-generation",
120
  temperature=0.7,
121
- max_new_tokens=256 # Reduced for speed
122
  )
123
  st.write("**Model loaded successfully!**")
124
  print("Model loaded successfully!")
 
6
  from langchain_huggingface import HuggingFaceEndpoint
7
 
8
  def create_prompt(name1: str, name2: str, persona_style: str):
9
+ """Create the chat prompt template for a two-person conversation."""
10
  prompt_template_str = f"""
11
+ You are simulating a conversation between two people: {name1} and {name2}.
12
+ Both are human individuals. The conversation should embody the style and characteristics defined as {persona_style}.
13
+ They are talking to each other naturally. There are no 'Human' or 'AI' roles here, just {name1} and {name2} speaking alternately.
14
+ {name1} starts the conversation. Each message should be in the format:
15
+ {name1}: <message>
16
+ {name2}: <message>
17
 
18
  Characteristics and style:
19
  - Both {name1} and {name2} communicate in a friendly, human-like manner.
20
  - They can be curious, ask questions, share opinions.
21
+ - Responses should be brief and natural, like texting a friend.
22
  - Use everyday language, show feelings and opinions.
23
  - Keep each response to about 1-2 short sentences.
24
+ - Use emojis sparingly and only if it fits the persona_style.
25
  - Avoid overusing emojis (1-2 max per message).
26
 
27
+ Make sure that each turn is clearly designated as {name1} or {name2}.
28
+ The conversation should continue for a total of 15 messages. Start with {name1} speaking first. Alternate between {name1} and {name2}.
29
+ Once the 15th message (by {name1}) is given, the conversation ends.
30
+ Do not continue the conversation after 15 messages.
 
31
  """
32
  return ChatPromptTemplate.from_template(prompt_template_str)
33
 
34
  def simulate_conversation(chain: LLMChain, name1: str, name2: str, total_messages: int = 15):
35
+ """
36
+ Simulate a conversation of exactly total_messages turns.
37
+ name1 starts the conversation (message 1), then name2 (message 2), etc., alternating.
38
+ """
39
  conversation_lines = []
40
  st.write("**Starting conversation simulation...**")
41
  print("Starting conversation simulation...")
 
43
  try:
44
  for i in range(total_messages):
45
  truncated_history = "\n".join(conversation_lines)
46
+ # Determine whose turn it is:
47
  current_speaker = name1 if i % 2 == 0 else name2
48
  st.write(f"**[Message {i+1}/{total_messages}] {current_speaker} is speaking...**")
49
  print(f"[Message {i+1}/{total_messages}] {current_speaker} is speaking...")
50
 
51
+ # Prompt the chain with a generic "continue" instruction, the chain uses the template + history to produce the next message
52
  response = chain.run(chat_history=truncated_history, input="Continue the conversation.")
53
  response = response.strip()
54
 
 
60
  if line.startswith(f"{current_speaker}:"):
61
  chosen_line = line
62
  break
63
+
64
  if not chosen_line:
65
+ # Fallback: If the model didn't format properly, try first non-empty line
66
+ chosen_line = next((l for l in lines if l.strip()), f"{current_speaker}: (No response)")
67
 
68
  st.write(chosen_line)
69
  print(chosen_line)
 
82
  st.write("**Summarizing the conversation...**")
83
  print("Summarizing the conversation...")
84
 
85
+ # Updated prompt: no continuation instructions, just a direct request.
86
  summary_prompt = f"""
87
+ Below is a completed conversation between {name1} and {name2}:
88
  {conversation}
89
 
90
+ Now that the conversation has ended, please provide:
91
+ 1. A short and descriptive title of the conversation on one line, starting with "Title:".
92
+ 2. A summary of the conversation on the next line, starting with "Summary:".
93
  The summary should be a few short sentences highlighting the main points, tone, and conclusion.
94
+ Do not continue the conversation. Do not add extra lines beyond the title and summary.
95
  """
96
 
97
  try:
98
  response = chain.run(chat_history="", input=summary_prompt)
 
99
  return response.strip()
100
  except Exception as e:
101
  st.error(f"Error summarizing conversation: {e}")
 
131
  huggingfacehub_api_token=os.environ.get("HUGGINGFACEHUB_API_TOKEN"),
132
  task="text-generation",
133
  temperature=0.7,
134
+ max_new_tokens=256 # Reduced tokens for faster response
135
  )
136
  st.write("**Model loaded successfully!**")
137
  print("Model loaded successfully!")