File size: 4,572 Bytes
9a00962
 
 
 
b01bda4
9a00962
 
 
 
 
 
 
 
b01bda4
9a00962
 
 
 
 
 
 
 
 
 
e284564
9a00962
e284564
 
 
 
 
b01bda4
e284564
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b01bda4
 
 
 
 
 
 
 
 
 
 
 
9a00962
b01bda4
 
 
 
 
9a00962
 
 
 
b01bda4
9a00962
 
b01bda4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9a00962
 
 
 
 
 
 
b01bda4
9a00962
b01bda4
 
 
 
9a00962
 
b01bda4
 
9a00962
b01bda4
 
 
 
 
 
 
 
 
 
 
 
 
9a00962
 
 
 
 
 
b01bda4
 
9a00962
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
import os
from dotenv import load_dotenv
import chainlit as cl
from openai import AsyncOpenAI
import time

# Load environment variables from .env file
load_dotenv()

# Default model settings
DEFAULT_SETTINGS = {
    "model": "gpt-3.5-turbo",
    "temperature": 0.7,
    "max_tokens": 1000,
    "top_p": 1,
    "frequency_penalty": 0,
    "presence_penalty": 0,
}

SYSTEM_PROMPT = "You are a helpful, friendly AI assistant. Provide clear and concise responses."

@cl.on_chat_start
async def start():
    """
    Initialize the chat session
    """
    try:
        api_key = os.environ.get("OPENAI_API_KEY")
        if not api_key:
            raise ValueError("OPENAI_API_KEY environment variable is not set")
        
        # Initialize OpenAI client (without test request)
        client = AsyncOpenAI(api_key=api_key)
        cl.user_session.set("client", client)
        
        # Initialize message history with system prompt
        message_history = [{"role": "system", "content": SYSTEM_PROMPT}]
        cl.user_session.set("message_history", message_history)
        
        # Save model settings
        cl.user_session.set("settings", DEFAULT_SETTINGS)
        
        await cl.Message(
            content="Hello! I'm your AI assistant powered by OpenAI. How can I help you today?"
        ).send()
        
    except ValueError as e:
        await cl.Message(
            content=f"⚠️ Configuration Error: {str(e)}\nPlease make sure OPENAI_API_KEY is set in the environment variables."
        ).send()
    except Exception as e:
        error_msg = f"⚠️ Error: {str(e)}"
        if "session" in str(e).lower():
            error_msg = "⚠️ Session error. Please refresh the page and try again."
        await cl.Message(content=error_msg).send()

@cl.on_stop
async def on_stop():
    """Cleanup when the chat session ends"""
    try:
        cl.user_session.clear()
    except Exception:
        pass

async def handle_error(error: Exception) -> str:
    """Helper function to format error messages"""
    if "session" in str(error).lower():
        return "⚠️ Session error occurred. Please refresh the page and try again."
    return f"⚠️ An error occurred: {str(error)}"

@cl.on_message
async def main(user_message: cl.Message):
    """
    Process user messages and generate AI responses
    """
    try:
        # Retrieve session data
        client = cl.user_session.get("client")
        message_history = cl.user_session.get("message_history")
        settings = cl.user_session.get("settings")
        
        if not client or not message_history or not settings:
            raise ValueError("Session data not found. Please refresh the page.")
        
        # Add user message to history
        message_history.append({"role": "user", "content": user_message.content})
        
        # Prepare response message with loading state
        response_message = cl.Message(content="")
        await response_message.send()
        
        # Call OpenAI API to get response
        stream = await client.chat.completions.create(
            messages=message_history,
            stream=True,
            **settings
        )
        
        # Stream the response with buffering
        full_response = ""
        buffer = ""
        update_interval = 0.1  # Update every 100ms
        last_update_time = 0
        
        async for chunk in stream:
            if chunk.choices[0].delta.content:
                buffer += chunk.choices[0].delta.content
                current_time = time.time()
                
                # Update UI when buffer reaches certain size or time has passed
                if len(buffer) >= 50 or (current_time - last_update_time) >= update_interval:
                    full_response += buffer
                    response_message.content = full_response
                    await response_message.update()
                    buffer = ""  # Clear buffer
                    last_update_time = current_time
        
        # Send any remaining buffer content
        if buffer:
            full_response += buffer
            response_message.content = full_response
            await response_message.update()
        
        # Add AI response to message history
        message_history.append({"role": "assistant", "content": full_response})
        cl.user_session.set("message_history", message_history)
        
    except Exception as e:
        error_message = await handle_error(e)
        response_message.content = error_message
        await response_message.update()