Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -3,8 +3,6 @@ import streamlit as st
|
|
3 |
import torch
|
4 |
from langchain.chains import LLMChain
|
5 |
from langchain.prompts import ChatPromptTemplate
|
6 |
-
|
7 |
-
# Use the new package for HuggingFaceEndpoint
|
8 |
from langchain_huggingface import HuggingFaceEndpoint
|
9 |
|
10 |
def create_prompt(name: str, persona_style: str):
|
@@ -55,6 +53,9 @@ def simulate_conversation(chain: LLMChain, turns: int = 15, max_history_rounds=3
|
|
55 |
"So what’s your conclusion?"
|
56 |
]
|
57 |
|
|
|
|
|
|
|
58 |
try:
|
59 |
for i in range(turns):
|
60 |
human_input = human_messages[i % len(human_messages)]
|
@@ -62,8 +63,15 @@ def simulate_conversation(chain: LLMChain, turns: int = 15, max_history_rounds=3
|
|
62 |
# Build truncated chat_history for prompt
|
63 |
truncated_history_lines = chat_history_list[-(max_history_rounds*2):]
|
64 |
truncated_history = "\n".join(truncated_history_lines)
|
|
|
|
|
|
|
65 |
|
66 |
response = chain.run(chat_history=truncated_history, input=human_input)
|
|
|
|
|
|
|
|
|
67 |
chat_history_list.append(f"Human: {human_input}")
|
68 |
chat_history_list.append(f"AI: {response}")
|
69 |
|
@@ -71,16 +79,21 @@ def simulate_conversation(chain: LLMChain, turns: int = 15, max_history_rounds=3
|
|
71 |
return final_conversation
|
72 |
except Exception as e:
|
73 |
st.error(f"Error during conversation simulation: {e}")
|
|
|
74 |
return None
|
75 |
|
76 |
def summarize_conversation(chain: LLMChain, conversation: str):
|
77 |
"""Use the LLM to summarize the completed conversation."""
|
78 |
summary_prompt = f"Summarize the following conversation in a few short sentences highlighting the main points, tone, and conclusion:\n\n{conversation}\nSummary:"
|
|
|
|
|
|
|
79 |
try:
|
80 |
response = chain.run(chat_history="", input=summary_prompt)
|
81 |
return response.strip()
|
82 |
except Exception as e:
|
83 |
st.error(f"Error summarizing conversation: {e}")
|
|
|
84 |
return "No summary available due to error."
|
85 |
|
86 |
def main():
|
@@ -98,13 +111,14 @@ def main():
|
|
98 |
value="friendly, curious, and a bit sarcastic")
|
99 |
|
100 |
if st.button("Start Conversation Simulation"):
|
|
|
|
|
|
|
101 |
with st.spinner("Starting simulation..."):
|
102 |
# Construct the endpoint URL for the selected model
|
103 |
endpoint_url = f"https://api-inference.huggingface.co/models/{selected_model}"
|
104 |
|
105 |
try:
|
106 |
-
# Use HuggingFaceEndpoint instead of HuggingFaceHub
|
107 |
-
# Specify temperature and max_new_tokens as top-level arguments
|
108 |
llm = HuggingFaceEndpoint(
|
109 |
endpoint_url=endpoint_url,
|
110 |
huggingfacehub_api_token=os.environ.get("HUGGINGFACEHUB_API_TOKEN"),
|
@@ -112,21 +126,31 @@ def main():
|
|
112 |
temperature=0.7,
|
113 |
max_new_tokens=512
|
114 |
)
|
|
|
|
|
115 |
except Exception as e:
|
116 |
st.error(f"Error initializing HuggingFaceEndpoint: {e}")
|
|
|
117 |
return
|
118 |
|
119 |
prompt = create_prompt(name, persona_style)
|
120 |
chain = LLMChain(llm=llm, prompt=prompt)
|
121 |
|
|
|
|
|
|
|
122 |
conversation = simulate_conversation(chain, turns=15, max_history_rounds=3)
|
123 |
if conversation:
|
124 |
st.subheader("Conversation:")
|
125 |
st.text(conversation)
|
|
|
|
|
126 |
|
|
|
127 |
st.subheader("Summary:")
|
128 |
summary = summarize_conversation(chain, conversation)
|
129 |
st.write(summary)
|
|
|
130 |
|
131 |
if __name__ == "__main__":
|
132 |
main()
|
|
|
3 |
import torch
|
4 |
from langchain.chains import LLMChain
|
5 |
from langchain.prompts import ChatPromptTemplate
|
|
|
|
|
6 |
from langchain_huggingface import HuggingFaceEndpoint
|
7 |
|
8 |
def create_prompt(name: str, persona_style: str):
|
|
|
53 |
"So what’s your conclusion?"
|
54 |
]
|
55 |
|
56 |
+
st.write("**Starting conversation simulation...**")
|
57 |
+
print("Starting conversation simulation...")
|
58 |
+
|
59 |
try:
|
60 |
for i in range(turns):
|
61 |
human_input = human_messages[i % len(human_messages)]
|
|
|
63 |
# Build truncated chat_history for prompt
|
64 |
truncated_history_lines = chat_history_list[-(max_history_rounds*2):]
|
65 |
truncated_history = "\n".join(truncated_history_lines)
|
66 |
+
|
67 |
+
st.write(f"**[Turn {i+1}/{turns}] Human:** {human_input}")
|
68 |
+
print(f"[Turn {i+1}/{turns}] Human: {human_input}")
|
69 |
|
70 |
response = chain.run(chat_history=truncated_history, input=human_input)
|
71 |
+
|
72 |
+
st.write(f"**AI:** {response}")
|
73 |
+
print(f"AI: {response}")
|
74 |
+
|
75 |
chat_history_list.append(f"Human: {human_input}")
|
76 |
chat_history_list.append(f"AI: {response}")
|
77 |
|
|
|
79 |
return final_conversation
|
80 |
except Exception as e:
|
81 |
st.error(f"Error during conversation simulation: {e}")
|
82 |
+
print(f"Error during conversation simulation: {e}")
|
83 |
return None
|
84 |
|
85 |
def summarize_conversation(chain: LLMChain, conversation: str):
|
86 |
"""Use the LLM to summarize the completed conversation."""
|
87 |
summary_prompt = f"Summarize the following conversation in a few short sentences highlighting the main points, tone, and conclusion:\n\n{conversation}\nSummary:"
|
88 |
+
st.write("**Summarizing the conversation...**")
|
89 |
+
print("Summarizing the conversation...")
|
90 |
+
|
91 |
try:
|
92 |
response = chain.run(chat_history="", input=summary_prompt)
|
93 |
return response.strip()
|
94 |
except Exception as e:
|
95 |
st.error(f"Error summarizing conversation: {e}")
|
96 |
+
print(f"Error summarizing conversation: {e}")
|
97 |
return "No summary available due to error."
|
98 |
|
99 |
def main():
|
|
|
111 |
value="friendly, curious, and a bit sarcastic")
|
112 |
|
113 |
if st.button("Start Conversation Simulation"):
|
114 |
+
st.write("**Loading model...**")
|
115 |
+
print("Loading model...")
|
116 |
+
|
117 |
with st.spinner("Starting simulation..."):
|
118 |
# Construct the endpoint URL for the selected model
|
119 |
endpoint_url = f"https://api-inference.huggingface.co/models/{selected_model}"
|
120 |
|
121 |
try:
|
|
|
|
|
122 |
llm = HuggingFaceEndpoint(
|
123 |
endpoint_url=endpoint_url,
|
124 |
huggingfacehub_api_token=os.environ.get("HUGGINGFACEHUB_API_TOKEN"),
|
|
|
126 |
temperature=0.7,
|
127 |
max_new_tokens=512
|
128 |
)
|
129 |
+
st.write("**Model loaded successfully!**")
|
130 |
+
print("Model loaded successfully!")
|
131 |
except Exception as e:
|
132 |
st.error(f"Error initializing HuggingFaceEndpoint: {e}")
|
133 |
+
print(f"Error initializing HuggingFaceEndpoint: {e}")
|
134 |
return
|
135 |
|
136 |
prompt = create_prompt(name, persona_style)
|
137 |
chain = LLMChain(llm=llm, prompt=prompt)
|
138 |
|
139 |
+
st.write("**Simulating the conversation...**")
|
140 |
+
print("Simulating the conversation...")
|
141 |
+
|
142 |
conversation = simulate_conversation(chain, turns=15, max_history_rounds=3)
|
143 |
if conversation:
|
144 |
st.subheader("Conversation:")
|
145 |
st.text(conversation)
|
146 |
+
print("Conversation Simulation Complete.\n")
|
147 |
+
print("Full Conversation:\n", conversation)
|
148 |
|
149 |
+
# Summarize conversation
|
150 |
st.subheader("Summary:")
|
151 |
summary = summarize_conversation(chain, conversation)
|
152 |
st.write(summary)
|
153 |
+
print("Summary:\n", summary)
|
154 |
|
155 |
if __name__ == "__main__":
|
156 |
main()
|