Test / app.py
RakeshUtekar's picture
Update app.py
0724d85 verified
raw
history blame
6.12 kB
import os
import streamlit as st
import torch
from langchain.chains import LLMChain
from langchain.prompts import ChatPromptTemplate
from langchain_huggingface import HuggingFaceEndpoint
def create_prompt(name1: str, name2: str, persona_style: str):
prompt_template_str = f"""
You are simulating a conversation between two people: {name1} and {name2}. Both are human individuals. The conversation should embody the style and characteristics defined as {persona_style}. They are talking to each other naturally, taking turns speaking.
Characteristics and style:
- Both {name1} and {name2} communicate in a friendly, human-like manner.
- They can be curious, ask questions, share opinions.
- Their responses should be brief and natural, like texting a friend.
- Use everyday language, show feelings and opinions.
- Keep each response to about 1-2 short sentences.
- Use emojis sparingly and naturally if it fits the persona_style.
- Avoid overusing emojis (1-2 max per message).
The conversation should last exactly 15 messages total, starting with {name1} and then alternating. After the 15th message, stop.
Current partial conversation (if any):
{{chat_history}}
Next message:
"""
return ChatPromptTemplate.from_template(prompt_template_str)
def simulate_conversation(chain: LLMChain, name1: str, name2: str, total_messages: int = 15):
conversation_lines = []
st.write("**Starting conversation simulation...**")
print("Starting conversation simulation...")
try:
for i in range(total_messages):
truncated_history = "\n".join(conversation_lines)
current_speaker = name1 if i % 2 == 0 else name2
st.write(f"**[Message {i+1}/{total_messages}] {current_speaker} is speaking...**")
print(f"[Message {i+1}/{total_messages}] {current_speaker} is speaking...")
response = chain.run(chat_history=truncated_history, input="Continue the conversation.")
response = response.strip()
# Extract the line for the current speaker
lines = response.split("\n")
chosen_line = None
for line in lines:
line = line.strip()
if line.startswith(f"{current_speaker}:"):
chosen_line = line
break
if not chosen_line:
chosen_line = lines[0] if lines else f"{current_speaker}: (No response)"
st.write(chosen_line)
print(chosen_line)
conversation_lines.append(chosen_line)
final_conversation = "\n".join(conversation_lines)
return final_conversation
except Exception as e:
st.error(f"Error during conversation simulation: {e}")
print(f"Error during conversation simulation: {e}")
return None
def summarize_conversation(chain: LLMChain, conversation: str, name1: str, name2: str):
st.write("**Summarizing the conversation...**")
print("Summarizing the conversation...")
summary_prompt = f"""
The following is a conversation between {name1} and {name2}:
{final_conversation}
Provide a short descriptive title for their conversation and then summarize it in a few short sentences highlighting the main points, tone, and conclusion.
Format your answer as:
Title: <your conversation title>
Summary: <your summary here>
"""
try:
response = chain.run(chat_history="", input=summary_prompt)
return response.strip()
except Exception as e:
st.error(f"Error summarizing conversation: {e}")
print(f"Error summarizing conversation: {e}")
return "No summary available due to error."
def main():
st.title("LLM Conversation Simulation")
model_names = [
"meta-llama/Llama-3.3-70B-Instruct",
"meta-llama/Llama-3.1-405B-Instruct",
"lmsys/vicuna-13b-v1.5"
]
selected_model = st.selectbox("Select a model:", model_names)
# Two user names
name1 = st.text_input("Enter the first user's name:", value="Alice")
name2 = st.text_input("Enter the second user's name:", value="Bob")
persona_style = st.text_area("Enter the persona style characteristics:",
value="friendly, curious, and a bit sarcastic")
if st.button("Start Conversation Simulation"):
st.write("**Loading model...**")
print("Loading model...")
with st.spinner("Starting simulation..."):
endpoint_url = f"https://api-inference.huggingface.co/models/{selected_model}"
try:
llm = HuggingFaceEndpoint(
endpoint_url=endpoint_url,
huggingfacehub_api_token=os.environ.get("HUGGINGFACEHUB_API_TOKEN"),
task="text-generation",
temperature=0.7,
max_new_tokens=256 # Reduced for speed
)
st.write("**Model loaded successfully!**")
print("Model loaded successfully!")
except Exception as e:
st.error(f"Error initializing HuggingFaceEndpoint: {e}")
print(f"Error initializing HuggingFaceEndpoint: {e}")
return
prompt = create_prompt(name1, name2, persona_style)
chain = LLMChain(llm=llm, prompt=prompt)
st.write("**Simulating the conversation...**")
print("Simulating the conversation...")
conversation = simulate_conversation(chain, name1, name2, total_messages=15)
if conversation:
st.subheader("Final Conversation:")
st.text(conversation)
print("Conversation Simulation Complete.\n")
print("Full Conversation:\n", conversation)
# Summarize conversation
st.subheader("Summary and Title:")
summary = summarize_conversation(chain, conversation, name1, name2)
st.write(summary)
print("Summary:\n", summary)
if __name__ == "__main__":
main()