Test / app.py
RakeshUtekar's picture
Upload app.py
15773f6 verified
raw
history blame
6.85 kB
import streamlit as st
import torch
from langchain.chains import LLMChain
from langchain.prompts import ChatPromptTemplate
from langchain_community.llms import HuggingFacePipeline # Updated import
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
# ---------------------------------------
# Utility functions
# ---------------------------------------
def load_model_pipeline(model_name: str):
"""Load a HuggingFace pipeline for a given model name.
For large models, ensure you have the model downloaded and configured.
Adjust device_map, load_in_8bit, etc., as needed for your hardware.
"""
try:
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False)
model = AutoModelForCausalLM.from_pretrained(
model_name,
device_map="auto", # Adjust as needed (e.g. "cpu", "cuda:0")
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
trust_remote_code=True
)
# Create pipeline
pipe = pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
max_length=512,
temperature=0.7,
top_p=0.9,
repetition_penalty=1.1,
pad_token_id=tokenizer.eos_token_id,
)
return pipe
except Exception as e:
st.error(f"Error loading model {model_name}: {e}")
return None
def create_prompt(name: str, persona_style: str):
"""Create the chat prompt template as described."""
# The prompt template from the user prompt
# We combine both snippet versions into one final template.
prompt_template_str = f"""
In this interaction, your role is to embody the personality of a human named {name}, with characteristics defined as {persona_style}. Engage in a natural, human-like conversation, responding to each query or comment as if you were {name} with the specific traits and style of {persona_style}. Remember, your responses should reflect a human perspective, complete with emotions, opinions, and personal insights that align with {persona_style}. ////
//////////
Current conversation:
{{chat_history}}
Human: {{input}}
AI:
You are {name}, communicating with a personality that reflects {persona_style}. Keep these guidelines in mind:
- Be brief and natural, like texting a friend
- Share genuine thoughts and feelings ({persona_style} style)
- Use everyday language
- Keep responses to 1-2 short sentences
- Show interest in others through questions
- React naturally to what others say
- Use emojis sparingly and naturally when they fit your {persona_style}
- Don't overuse emojis (1-2 max per message)
Current conversation:
{{chat_history}}
Human: {{input}}
AI:
"""
return ChatPromptTemplate.from_template(prompt_template_str)
def simulate_conversation(chain: LLMChain, turns: int = 15):
"""Simulate a conversation for a given number of turns.
Here, we'll pretend the "Human" says something generic each time,
and we get the AI's response. We store and update the chat history.
After 15 responses from the AI, we return the full conversation.
"""
chat_history = ""
# We will simulate the human input as a rotating set of simple messages
# or just a single repeated message to show the flow.
human_messages = [
"Hey, what's up?",
"That's interesting, tell me more!",
"Really? How does that make you feel?",
"What do you think about that?",
"Haha, that’s funny. Why do you say that?",
"Hmm, I see. Can you elaborate?",
"What would you do in that situation?",
"Any personal experience with that?",
"Oh, I didn’t know that. Explain more.",
"Do you have any other thoughts?",
"That's a unique perspective. Why?",
"How would you handle it differently?",
"Can you share an example?",
"That sounds complicated. Are you sure?",
"So what’s your conclusion?"
]
try:
for i in range(turns):
human_input = human_messages[i % len(human_messages)]
# Generate AI response
response = chain.run(chat_history=chat_history, input=human_input)
# Update the chat history
chat_history += f"Human: {human_input}\nAI: {response}\n"
return chat_history
except Exception as e:
st.error(f"Error during conversation simulation: {e}")
return None
def summarize_conversation(chain: LLMChain, conversation: str):
"""Use the LLM to summarize the completed conversation."""
# We'll provide a simple prompt for summary:
summary_prompt = f"Summarize the following conversation in a few short sentences highlighting the main points, tone, and conclusion:\n\n{conversation}\nSummary:"
try:
response = chain.run(chat_history="", input=summary_prompt)
return response.strip()
except Exception as e:
st.error(f"Error summarizing conversation: {e}")
return "No summary available due to error."
# ---------------------------------------
# Streamlit App
# ---------------------------------------
def main():
st.title("LLM Conversation Simulation")
# Model selection
model_names = [
"meta-llama/Llama-3.3-70B-Instruct",
"meta-llama/Llama-3.1-405B-Instruct",
"lmsys/vicuna-13b-v1.5"
]
selected_model = st.selectbox("Select a model:", model_names)
# Persona Inputs
name = st.text_input("Enter the persona's name:", value="Alex")
persona_style = st.text_area("Enter the persona style characteristics:",
value="friendly, curious, and a bit sarcastic")
# Button to start simulation
if st.button("Start Conversation Simulation"):
with st.spinner("Loading model and starting simulation..."):
pipe = load_model_pipeline(selected_model)
if pipe is not None:
# Create a ChatModel from the pipeline
llm = HuggingFacePipeline(pipeline=pipe)
# Create our prompt template chain
prompt = create_prompt(name, persona_style)
chain = LLMChain(llm=llm, prompt=prompt)
# Simulate conversation
conversation = simulate_conversation(chain, turns=15)
if conversation:
st.subheader("Conversation:")
st.text(conversation)
# Summarize conversation
st.subheader("Summary:")
summary = summarize_conversation(chain, conversation)
st.write(summary)
if __name__ == "__main__":
main()