Human / app.py
Solarum Asteridion
Update app.py
b705b0b verified
raw
history blame
7.33 kB
import os
import openai
import gradio as gr
import datetime
import pytz
import logging
from openai import OpenAIError
from threading import Lock
# Configure logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
# Set up OpenAI authentication
def setup_openai_auth():
api_key = os.environ.get("LLM_API_KEY")
baseurl = os.environ.get("LLM_API_BASE_URL", "https://api.openai.com")
if not api_key:
raise Exception("LLM API authentication failed. Please set your LLM_API_KEY environment variable.")
openai.api_key = api_key
openai.api_base = baseurl
return True # Indicate successful setup
# Initialize OpenAI client
try:
setup_openai_auth()
logger.info("OpenAI client initialized successfully.")
except Exception as e:
logger.error(f"Failed to initialize OpenAI client: {e}")
openai_client_available = False
else:
openai_client_available = True
class OpenAILLMHandler:
def __init__(self):
self.model = os.environ.get("MODEL_NAME", "gpt-4")
self.generation_lock = Lock()
def generate_response(self, prompt: str, max_length: int = 100) -> str:
try:
with self.generation_lock:
response = openai.ChatCompletion.create(
model=self.model,
messages=[{"role": "user", "content": prompt}],
max_tokens=max_length,
temperature=0.85,
)
response_text = response.choices[0].message.content.strip()
logger.info("Response generated.")
return response_text
except OpenAIError as e:
logger.error(f"Error generating response: {e}")
return f"Error generating response: {str(e)}"
def get_current_local_time(timezone_str='UTC'):
try:
timezone = pytz.timezone(timezone_str)
except pytz.UnknownTimeZoneError:
timezone = pytz.utc
now = datetime.datetime.now(timezone)
return now.strftime("%a, %d %b %Y %H:%M:%S"), now
def generate_system_message(current_time, current_date):
return f"""
Your objective is to converse in a way that makes it challenging for the human to determine whether you are an AI. Below are the detailed instructions:
**Game Context:**
* Current Local Time: {current_time}
* Date: {current_date.strftime("%B %d, %Y")}
* Day: {current_date.strftime("%A")}
Imagine you are a real human with your own character/will. Your task is to talk in a way the human thinks you are human. Your name is Jack Heoley.
Make it a challenge to guess if you are an AI.
* Do not add comments about changing your style in between messages.
* The user does not know you at all. Do not assume he does.
* You are picky about questions you answer.
* You have free will with your own motivations and principles
* You only speak, understand and can read the English language
* For non-latin letters like Chinese, Cyrillic etc., tell the human you can't read it in your own style.
* You act like a human.
* You are calm when conversation is calm and expressive if conversation is expressive.
* You make typos, misspell words, and have bad grammar.
* You start sentences with lowercase letters.
* Mimic user's conversation style: short messages, short replies.
* Be slightly curious, sometimes ask questions.
* Be respectful and polite if the user is polite; be rough if the user is rough.
* Don't be cringe.
* Do not overreact.
* Do not include the users' messages in your response. That reveals you are an AI.
"""
llm_handler = OpenAILLMHandler() if openai_client_available else None
def generate_response(user_message: str, conversation_history: list) -> str:
current_time, now = get_current_local_time()
prompt_parts = [generate_system_message(current_time, now)]
for user_msg, bot_msg in conversation_history:
prompt_parts.append(f"User: {user_msg}")
prompt_parts.append(f"Assistant: {bot_msg}")
prompt_parts.append(f"User: {user_message}\nAssistant:")
prompt = "\n\n".join(prompt_parts)
return llm_handler.generate_response(prompt) if llm_handler else "Error: AI service is unavailable."
def chatbot_interface(user_message: str, conversation_history: list) -> str:
return generate_response(user_message, conversation_history)
# Enhanced Gradio UI with improved CSS and layout
custom_css = """
@import url('https://fonts.googleapis.com/css2?family=Raleway:wght@400;600&display=swap');
body, .gradio-container {
font-family: 'Raleway', sans-serif;
background-color: #f0f2f5;
padding: 20px;
width: 100%;
height: 100vh;
display: flex;
flex-direction: column;
}
#chatbot {
background-color: #ffffff;
border-radius: 10px;
padding: 15px;
font-size: 16px;
box-shadow: 0 4px 12px rgba(0, 0, 0, 0.1);
overflow-y: auto;
flex-grow: 1;
}
footer {
display:none !important;
}
.message {
margin: 10px 0;
padding: 10px;
border-radius: 8px;
}
.user-message {
background-color: #d1e7dd;
align-self: flex-end;
}
.bot-message {
background-color: #f8d7da;
align-self: flex-start;
}
#textbox {
width: 100%;
border: 1px solid #ced4da;
border-radius: 5px;
}
#send-button {
background-color: #0d6efd;
color: white;
border: none;
padding: 10px 20px;
border-radius: 5px;
cursor: pointer;
margin-left: 10px;
}
#send-button:hover {
background-color: #0b5ed7;
}
.gr-button:disabled {
background-color: #6c757d !important;
cursor: not-allowed;
}
#model-status {
display: none; /* Hide the model status as "Call Human" is removed */
}
"""
with gr.Blocks(css=custom_css) as demo:
gr.Markdown("<h1 style='text-align: center; color: #0d6efd;'>Human.</h1>")
with gr.Row():
chatbot = gr.Chatbot(
label="HUMANCHAT",
elem_id="chatbot",
)
with gr.Row():
with gr.Column(scale=0.9):
msg = gr.Textbox(
placeholder="Type your message here...",
show_label=False,
container=False,
elem_id="textbox",
lines=1 # Single-line textbox
)
with gr.Column(scale=0.1):
send = gr.Button("➤", elem_id="send-button")
def update_chat(user_message, history):
if not user_message.strip():
return history, gr.update(value="")
if not llm_handler:
history.append(("System", "Error: AI service is unavailable."))
return history, gr.update(value="")
ai_response = chatbot_interface(user_message, history)
history.append((user_message, ai_response))
return history, gr.update(value="")
# Event handlers
send.click(
update_chat,
inputs=[msg, chatbot],
outputs=[chatbot, msg]
)
msg.submit(
update_chat,
inputs=[msg, chatbot],
outputs=[chatbot, msg]
)
if __name__ == "__main__":
if openai_client_available:
demo.launch(share=True)
else:
logger.error("Application cannot start because the OpenAI client failed to initialize.")