RakeshUtekar commited on
Commit
38b6ee6
·
verified ·
1 Parent(s): 25007bd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -10
app.py CHANGED
@@ -1,11 +1,12 @@
 
1
  import streamlit as st
2
  import torch
3
  from langchain.chains import LLMChain
4
  from langchain.prompts import ChatPromptTemplate
5
- from langchain_community.llms import HuggingFaceHub # Using HuggingFaceHub now
6
 
7
  def create_prompt(name: str, persona_style: str):
8
- """Create the chat prompt template as described."""
9
  prompt_template_str = f"""
10
  In this interaction, your role is to embody the personality of a human named {name}, with characteristics defined as {persona_style}. Engage in a natural, human-like conversation, responding to each query or comment as if you were {name} with the specific traits and style of {persona_style}. Remember, your responses should reflect a human perspective, complete with emotions, opinions, and personal insights that align with {persona_style}. ////
11
  //////////
@@ -31,9 +32,9 @@ def create_prompt(name: str, persona_style: str):
31
  """
32
  return ChatPromptTemplate.from_template(prompt_template_str)
33
 
34
- def simulate_conversation(chain: LLMChain, turns: int = 15):
35
- """Simulate a conversation for a given number of turns."""
36
- chat_history = ""
37
  human_messages = [
38
  "Hey, what's up?",
39
  "That's interesting, tell me more!",
@@ -55,9 +56,19 @@ def simulate_conversation(chain: LLMChain, turns: int = 15):
55
  try:
56
  for i in range(turns):
57
  human_input = human_messages[i % len(human_messages)]
58
- response = chain.run(chat_history=chat_history, input=human_input)
59
- chat_history += f"Human: {human_input}\nAI: {response}\n"
60
- return chat_history
 
 
 
 
 
 
 
 
 
 
61
  except Exception as e:
62
  st.error(f"Error during conversation simulation: {e}")
63
  return None
@@ -75,6 +86,7 @@ def summarize_conversation(chain: LLMChain, conversation: str):
75
  def main():
76
  st.title("LLM Conversation Simulation")
77
 
 
78
  model_names = [
79
  "meta-llama/Llama-3.3-70B-Instruct",
80
  "meta-llama/Llama-3.1-405B-Instruct",
@@ -82,6 +94,7 @@ def main():
82
  ]
83
  selected_model = st.selectbox("Select a model:", model_names)
84
 
 
85
  name = st.text_input("Enter the persona's name:", value="Alex")
86
  persona_style = st.text_area("Enter the persona style characteristics:",
87
  value="friendly, curious, and a bit sarcastic")
@@ -89,7 +102,8 @@ def main():
89
  if st.button("Start Conversation Simulation"):
90
  with st.spinner("Starting simulation..."):
91
  try:
92
- # Using HuggingFaceHub for remote model inference
 
93
  llm = HuggingFaceHub(
94
  repo_id=selected_model,
95
  model_kwargs={
@@ -106,7 +120,7 @@ def main():
106
  chain = LLMChain(llm=llm, prompt=prompt)
107
 
108
  # Simulate conversation
109
- conversation = simulate_conversation(chain, turns=15)
110
  if conversation:
111
  st.subheader("Conversation:")
112
  st.text(conversation)
 
1
+ import os
2
  import streamlit as st
3
  import torch
4
  from langchain.chains import LLMChain
5
  from langchain.prompts import ChatPromptTemplate
6
+ from langchain_community.llms import HuggingFaceHub
7
 
8
  def create_prompt(name: str, persona_style: str):
9
+ """Create the chat prompt template."""
10
  prompt_template_str = f"""
11
  In this interaction, your role is to embody the personality of a human named {name}, with characteristics defined as {persona_style}. Engage in a natural, human-like conversation, responding to each query or comment as if you were {name} with the specific traits and style of {persona_style}. Remember, your responses should reflect a human perspective, complete with emotions, opinions, and personal insights that align with {persona_style}. ////
12
  //////////
 
32
  """
33
  return ChatPromptTemplate.from_template(prompt_template_str)
34
 
35
+ def simulate_conversation(chain: LLMChain, turns: int = 15, max_history_rounds=3):
36
+ """Simulate a conversation for a given number of turns, limiting chat history."""
37
+ chat_history_list = []
38
  human_messages = [
39
  "Hey, what's up?",
40
  "That's interesting, tell me more!",
 
56
  try:
57
  for i in range(turns):
58
  human_input = human_messages[i % len(human_messages)]
59
+
60
+ # Build truncated chat_history for prompt
61
+ # Keep only the last max_history_rounds * 2 lines (Human + AI pairs)
62
+ truncated_history_lines = chat_history_list[-(max_history_rounds*2):]
63
+ truncated_history = "\n".join(truncated_history_lines)
64
+
65
+ response = chain.run(chat_history=truncated_history, input=human_input)
66
+ # Update chat history
67
+ chat_history_list.append(f"Human: {human_input}")
68
+ chat_history_list.append(f"AI: {response}")
69
+
70
+ final_conversation = "\n".join(chat_history_list)
71
+ return final_conversation
72
  except Exception as e:
73
  st.error(f"Error during conversation simulation: {e}")
74
  return None
 
86
  def main():
87
  st.title("LLM Conversation Simulation")
88
 
89
+ # Model selection
90
  model_names = [
91
  "meta-llama/Llama-3.3-70B-Instruct",
92
  "meta-llama/Llama-3.1-405B-Instruct",
 
94
  ]
95
  selected_model = st.selectbox("Select a model:", model_names)
96
 
97
+ # Persona Inputs
98
  name = st.text_input("Enter the persona's name:", value="Alex")
99
  persona_style = st.text_area("Enter the persona style characteristics:",
100
  value="friendly, curious, and a bit sarcastic")
 
102
  if st.button("Start Conversation Simulation"):
103
  with st.spinner("Starting simulation..."):
104
  try:
105
+ # Use HuggingFaceHub as LLM
106
+ # Make sure you have a valid HUGGINGFACEHUB_API_TOKEN set
107
  llm = HuggingFaceHub(
108
  repo_id=selected_model,
109
  model_kwargs={
 
120
  chain = LLMChain(llm=llm, prompt=prompt)
121
 
122
  # Simulate conversation
123
+ conversation = simulate_conversation(chain, turns=15, max_history_rounds=3)
124
  if conversation:
125
  st.subheader("Conversation:")
126
  st.text(conversation)