File size: 6,124 Bytes
38b6ee6
15773f6
 
215e74e
15773f6
887b1f9
15773f6
d083506
15773f6
4ebf50c
d083506
 
 
 
 
4ebf50c
d083506
 
 
 
4ebf50c
d083506
 
15773f6
d083506
15773f6
 
 
d083506
 
61f4130
 
 
15773f6
d083506
 
 
 
 
 
 
 
 
4ebf50c
d083506
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38b6ee6
15773f6
 
61f4130
15773f6
 
d083506
61f4130
 
 
d083506
 
0724d85
d083506
 
4ebf50c
d083506
 
 
 
4ebf50c
15773f6
215e74e
15773f6
 
 
61f4130
15773f6
 
 
 
 
 
 
 
 
 
 
 
d083506
 
 
15773f6
 
 
 
61f4130
 
 
25007bd
887b1f9
215e74e
25007bd
887b1f9
 
215e74e
887b1f9
215e74e
4ebf50c
25007bd
61f4130
 
25007bd
887b1f9
61f4130
25007bd
 
d083506
215e74e
25007bd
61f4130
 
 
d083506
25007bd
d083506
25007bd
61f4130
 
25007bd
61f4130
d083506
 
25007bd
61f4130
15773f6
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
import os
import streamlit as st
import torch
from langchain.chains import LLMChain
from langchain.prompts import ChatPromptTemplate
from langchain_huggingface import HuggingFaceEndpoint

def create_prompt(name1: str, name2: str, persona_style: str):
    prompt_template_str = f"""
    You are simulating a conversation between two people: {name1} and {name2}. Both are human individuals. The conversation should embody the style and characteristics defined as {persona_style}. They are talking to each other naturally, taking turns speaking.

    Characteristics and style:
    - Both {name1} and {name2} communicate in a friendly, human-like manner.
    - They can be curious, ask questions, share opinions.
    - Their responses should be brief and natural, like texting a friend.
    - Use everyday language, show feelings and opinions.
    - Keep each response to about 1-2 short sentences.
    - Use emojis sparingly and naturally if it fits the persona_style.
    - Avoid overusing emojis (1-2 max per message).

    The conversation should last exactly 15 messages total, starting with {name1} and then alternating. After the 15th message, stop.

    Current partial conversation (if any):
    {{chat_history}}
    Next message:
    """
    return ChatPromptTemplate.from_template(prompt_template_str)

def simulate_conversation(chain: LLMChain, name1: str, name2: str, total_messages: int = 15):
    conversation_lines = []
    st.write("**Starting conversation simulation...**")
    print("Starting conversation simulation...")

    try:
        for i in range(total_messages):
            truncated_history = "\n".join(conversation_lines)
            current_speaker = name1 if i % 2 == 0 else name2
            st.write(f"**[Message {i+1}/{total_messages}] {current_speaker} is speaking...**")
            print(f"[Message {i+1}/{total_messages}] {current_speaker} is speaking...")

            response = chain.run(chat_history=truncated_history, input="Continue the conversation.")
            response = response.strip()

            # Extract the line for the current speaker
            lines = response.split("\n")
            chosen_line = None
            for line in lines:
                line = line.strip()
                if line.startswith(f"{current_speaker}:"):
                    chosen_line = line
                    break
            if not chosen_line:
                chosen_line = lines[0] if lines else f"{current_speaker}: (No response)"

            st.write(chosen_line)
            print(chosen_line)

            conversation_lines.append(chosen_line)

        final_conversation = "\n".join(conversation_lines)
        return final_conversation
    except Exception as e:
        st.error(f"Error during conversation simulation: {e}")
        print(f"Error during conversation simulation: {e}")
        return None

def summarize_conversation(chain: LLMChain, conversation: str, name1: str, name2: str):
    st.write("**Summarizing the conversation...**")
    print("Summarizing the conversation...")

    summary_prompt = f"""
    The following is a conversation between {name1} and {name2}:
    {final_conversation}

    Provide a short descriptive title for their conversation and then summarize it in a few short sentences highlighting the main points, tone, and conclusion.
    
    Format your answer as:
    Title: <your conversation title>
    Summary: <your summary here>
    """

    try:
        response = chain.run(chat_history="", input=summary_prompt)
        return response.strip()
    except Exception as e:
        st.error(f"Error summarizing conversation: {e}")
        print(f"Error summarizing conversation: {e}")
        return "No summary available due to error."

def main():
    st.title("LLM Conversation Simulation")

    model_names = [
        "meta-llama/Llama-3.3-70B-Instruct",
        "meta-llama/Llama-3.1-405B-Instruct",
        "lmsys/vicuna-13b-v1.5"
    ]
    selected_model = st.selectbox("Select a model:", model_names)

    # Two user names
    name1 = st.text_input("Enter the first user's name:", value="Alice")
    name2 = st.text_input("Enter the second user's name:", value="Bob")
    persona_style = st.text_area("Enter the persona style characteristics:", 
                                 value="friendly, curious, and a bit sarcastic")

    if st.button("Start Conversation Simulation"):
        st.write("**Loading model...**")
        print("Loading model...")

        with st.spinner("Starting simulation..."):
            endpoint_url = f"https://api-inference.huggingface.co/models/{selected_model}"

            try:
                llm = HuggingFaceEndpoint(
                    endpoint_url=endpoint_url,
                    huggingfacehub_api_token=os.environ.get("HUGGINGFACEHUB_API_TOKEN"),
                    task="text-generation",
                    temperature=0.7,
                    max_new_tokens=256  # Reduced for speed
                )
                st.write("**Model loaded successfully!**")
                print("Model loaded successfully!")
            except Exception as e:
                st.error(f"Error initializing HuggingFaceEndpoint: {e}")
                print(f"Error initializing HuggingFaceEndpoint: {e}")
                return

            prompt = create_prompt(name1, name2, persona_style)
            chain = LLMChain(llm=llm, prompt=prompt)

            st.write("**Simulating the conversation...**")
            print("Simulating the conversation...")

            conversation = simulate_conversation(chain, name1, name2, total_messages=15)
            if conversation:
                st.subheader("Final Conversation:")
                st.text(conversation)
                print("Conversation Simulation Complete.\n")
                print("Full Conversation:\n", conversation)

                # Summarize conversation
                st.subheader("Summary and Title:")
                summary = summarize_conversation(chain, conversation, name1, name2)
                st.write(summary)
                print("Summary:\n", summary)

if __name__ == "__main__":
    main()