Spaces:
Runtime error
Runtime error
| import os | |
| import streamlit as st | |
| import torch | |
| from langchain.chains import LLMChain | |
| from langchain.prompts import ChatPromptTemplate | |
| from langchain_huggingface import HuggingFaceEndpoint | |
| def create_prompt(name1: str, name2: str, persona_style: str): | |
| """Create the chat prompt template for a two-person conversation.""" | |
| # We'll define that name1 (e.g., Alice) starts the conversation. | |
| # The conversation is recorded as: | |
| # Alice: {input or response} | |
| # Bob: {response} | |
| # and so on... | |
| prompt_template_str = f""" | |
| You are simulating a conversation between two people: {name1} and {name2}. Both are human individuals. The conversation should embody the style and characteristics defined as {persona_style}. They are talking to each other naturally. There are no 'Human' or 'AI' roles here, just {name1} and {name2} speaking alternately. {name1} starts the conversation. Each message should be in the format: | |
| {name1}: <message> | |
| {name2}: <message> | |
| Characteristics and style: | |
| - Both {name1} and {name2} communicate in a friendly, human-like manner. | |
| - They can be curious, ask questions, share opinions. | |
| - Their responses should be brief and natural, like texting a friend. | |
| - They can use everyday language, show feelings and opinions. | |
| - Keep each response to about 1-2 short sentences. | |
| - Use emojis sparingly and naturally if it fits the persona_style. | |
| - Avoid overusing emojis (1-2 max per message). | |
| Make sure that each turn is clearly designated as {name1} or {name2}. The conversation should continue for a total of 15 messages. Start with {name1} speaking first. Alternate between {name1} and {name2}. | |
| Once the 15th message is given (by {name1}, since the conversation starts with {name1}), the conversation ends. After that, produce a summary and a title of the conversation separately. | |
| Current partial conversation (if any): | |
| {{chat_history}} | |
| Next message: | |
| """ | |
| return ChatPromptTemplate.from_template(prompt_template_str) | |
| def simulate_conversation(chain: LLMChain, name1: str, name2: str, total_messages: int = 15): | |
| """ | |
| Simulate a conversation of exactly total_messages turns. | |
| name1 starts the conversation (message 1), then name2 (message 2), etc., alternating. | |
| """ | |
| conversation_lines = [] | |
| st.write("**Starting conversation simulation...**") | |
| print("Starting conversation simulation...") | |
| try: | |
| for i in range(total_messages): | |
| # Build truncated conversation (if needed, though we may not need truncation with only 15 messages) | |
| truncated_history = "\n".join(conversation_lines) | |
| # Determine whose turn it is: | |
| # i=0 (first message), i even => name1 speaks, i odd => name2 speaks | |
| current_speaker = name1 if i % 2 == 0 else name2 | |
| st.write(f"**[Message {i+1}/{total_messages}] {current_speaker} is speaking...**") | |
| print(f"[Message {i+1}/{total_messages}] {current_speaker} is speaking...") | |
| # We ask the model for the next line in the conversation | |
| # The model should produce something like: "Alice: ...message..." | |
| response = chain.run(chat_history=truncated_history, input="Continue the conversation.") | |
| response = response.strip() | |
| # We only keep the line that pertains to the current message | |
| # If the model generates both speakers, we may need to parse carefully. | |
| # Ideally, the model will produce only one line. If multiple lines appear, we'll take the first line that starts with current_speaker. | |
| lines = response.split("\n") | |
| chosen_line = None | |
| for line in lines: | |
| line = line.strip() | |
| if line.startswith(f"{current_speaker}:"): | |
| chosen_line = line | |
| break | |
| if not chosen_line: | |
| # Fallback: If not found, just use the first line | |
| chosen_line = lines[0] if lines else f"{current_speaker}: (No response)" | |
| st.write(chosen_line) | |
| print(chosen_line) | |
| conversation_lines.append(chosen_line) | |
| final_conversation = "\n".join(conversation_lines) | |
| return final_conversation | |
| except Exception as e: | |
| st.error(f"Error during conversation simulation: {e}") | |
| print(f"Error during conversation simulation: {e}") | |
| return None | |
| def summarize_conversation(chain: LLMChain, conversation: str, name1: str, name2: str): | |
| """Use the LLM to summarize the completed conversation and provide a title.""" | |
| st.write("**Summarizing the conversation...**") | |
| print("Summarizing the conversation...") | |
| summary_prompt = f""" | |
| The following is a conversation between {name1} and {name2}: | |
| {conversation} | |
| Provide a short descriptive title for their conversation and then summarize it in a few short sentences highlighting the main points, tone, and conclusion. | |
| Format your answer as: | |
| Title: <your conversation title> | |
| Summary: <your summary here> | |
| """ | |
| try: | |
| response = chain.run(chat_history="", input=summary_prompt) | |
| return response.strip() | |
| except Exception as e: | |
| st.error(f"Error summarizing conversation: {e}") | |
| print(f"Error summarizing conversation: {e}") | |
| return "No summary available due to error." | |
| def main(): | |
| st.title("LLM Conversation Simulation") | |
| model_names = [ | |
| "meta-llama/Llama-3.3-70B-Instruct", | |
| "meta-llama/Llama-3.1-405B-Instruct", | |
| "lmsys/vicuna-13b-v1.5" | |
| ] | |
| selected_model = st.selectbox("Select a model:", model_names) | |
| # Two user names | |
| name1 = st.text_input("Enter the first user's name:", value="Alice") | |
| name2 = st.text_input("Enter the second user's name:", value="Bob") | |
| persona_style = st.text_area("Enter the persona style characteristics:", | |
| value="friendly, curious, and a bit sarcastic") | |
| if st.button("Start Conversation Simulation"): | |
| st.write("**Loading model...**") | |
| print("Loading model...") | |
| with st.spinner("Starting simulation..."): | |
| endpoint_url = f"https://api-inference.huggingface.co/models/{selected_model}" | |
| try: | |
| llm = HuggingFaceEndpoint( | |
| endpoint_url=endpoint_url, | |
| huggingfacehub_api_token=os.environ.get("HUGGINGFACEHUB_API_TOKEN"), | |
| task="text-generation", | |
| temperature=0.7, | |
| max_new_tokens=512 | |
| ) | |
| st.write("**Model loaded successfully!**") | |
| print("Model loaded successfully!") | |
| except Exception as e: | |
| st.error(f"Error initializing HuggingFaceEndpoint: {e}") | |
| print(f"Error initializing HuggingFaceEndpoint: {e}") | |
| return | |
| prompt = create_prompt(name1, name2, persona_style) | |
| chain = LLMChain(llm=llm, prompt=prompt) | |
| st.write("**Simulating the conversation...**") | |
| print("Simulating the conversation...") | |
| # Total messages = 15 | |
| conversation = simulate_conversation(chain, name1, name2, total_messages=15) | |
| if conversation: | |
| st.subheader("Final Conversation:") | |
| st.text(conversation) | |
| print("Conversation Simulation Complete.\n") | |
| print("Full Conversation:\n", conversation) | |
| # Summarize conversation | |
| st.subheader("Summary and Title:") | |
| summary = summarize_conversation(chain, conversation, name1, name2) | |
| st.write(summary) | |
| print("Summary:\n", summary) | |
| if __name__ == "__main__": | |
| main() | |