Spaces:
Sleeping
Sleeping
import os | |
import gradio as gr | |
from huggingface_hub import InferenceClient | |
import json | |
# Global variables to track state (use with caution!) | |
chat_visible_global = False | |
sidebar_collapsed_global = False | |
class XylariaChat: | |
def __init__(self): | |
# Securely load HuggingFace token | |
self.hf_token = os.getenv("HF_TOKEN") | |
if not self.hf_token: | |
raise ValueError("HuggingFace token not found in environment variables") | |
# Initialize the inference client | |
self.client = InferenceClient( | |
model="Qwen/Qwen-32B-Preview", | |
api_key=self.hf_token | |
) | |
# Initialize conversation history and persistent memory | |
self.conversation_history = [] | |
self.persistent_memory = {} | |
self.chat_file_path = "chat_history.txt" # File to save chats | |
# System prompt | |
self.system_prompt = """You are a helpful and harmless AI assistant you are Xylaria 1.4 Senoa, Made by Sk Md Saad Amin you think step by step | |
""" | |
def store_information(self, key, value): | |
"""Store important information in persistent memory""" | |
self.persistent_memory[key] = value | |
def retrieve_information(self, key): | |
"""Retrieve information from persistent memory""" | |
return self.persistent_memory.get(key) | |
def save_chat(self): | |
"""Saves the current chat history to a text file.""" | |
try: | |
with open(self.chat_file_path, "w") as f: | |
chat_data = { | |
"conversation_history": self.conversation_history, | |
"persistent_memory": self.persistent_memory | |
} | |
json.dump(chat_data, f) | |
except Exception as e: | |
print(f"Error saving chat history: {e}") | |
def load_chat(self): | |
"""Loads chat history from a text file.""" | |
try: | |
with open(self.chat_file_path, "r") as f: | |
chat_data = json.load(f) | |
self.conversation_history = chat_data.get("conversation_history", []) | |
self.persistent_memory = chat_data.get("persistent_memory", {}) | |
return self.conversation_history, self.persistent_memory | |
except FileNotFoundError: | |
print("Chat history file not found.") | |
return [], {} | |
except Exception as e: | |
print(f"Error loading chat history: {e}") | |
return [], {} | |
def reset_conversation(self): | |
""" | |
Completely reset the conversation history, persistent memory, | |
and clear API-side memory | |
""" | |
# Clear local memory | |
self.conversation_history = [] | |
self.persistent_memory.clear() | |
# Clear API-side memory by resetting the conversation | |
try: | |
# Attempt to clear any API-side session or context | |
self.client = InferenceClient( | |
model="Qwen/Qwen-32B-Preview", | |
api_key=self.hf_token | |
) | |
except Exception as e: | |
print(f"Error resetting API client: {e}") | |
self.save_chat() # Save the empty chat history | |
def get_response(self, user_input): | |
# Prepare messages with conversation context and persistent memory | |
messages = [ | |
{"role": "system", "content": self.system_prompt}, | |
*self.conversation_history, | |
{"role": "user", "content": user_input} | |
] | |
# Add persistent memory context if available | |
if self.persistent_memory: | |
memory_context = "Remembered Information:\n" + "\n".join( | |
[f"{k}: {v}" for k, v in self.persistent_memory.items()] | |
) | |
messages.insert(1, {"role": "system", "content": memory_context}) | |
# Generate response with streaming | |
try: | |
stream = self.client.chat.completions.create( | |
messages=messages, | |
temperature=0.5, | |
max_tokens=10240, | |
top_p=0.7, | |
stream=True | |
) | |
return stream | |
except Exception as e: | |
return f"Error generating response: {str(e)}" | |
def create_interface(self): | |
def streaming_response(message, chat_history): | |
response_stream = self.get_response(message) | |
if isinstance(response_stream, str): | |
# Error handling: directly append error message to chat history | |
return "", chat_history + [[message, response_stream]] | |
full_response = "" | |
updated_history = chat_history + [[message, ""]] | |
for chunk in response_stream: | |
if chunk.choices[0].delta.content: | |
chunk_content = chunk.choices[0].delta.content | |
full_response += chunk_content | |
updated_history[-1][1] = full_response | |
yield "", updated_history | |
self.conversation_history.append( | |
{"role": "user", "content": message} | |
) | |
self.conversation_history.append( | |
{"role": "assistant", "content": full_response} | |
) | |
if len(self.conversation_history) > 10: | |
self.conversation_history = self.conversation_history[-10:] | |
self.save_chat() # Save after each interaction | |
# Function to format and display chat history | |
def format_chat_history(): | |
self.load_chat() # Load the chat history first | |
if not self.conversation_history: | |
return "No chat history found." | |
formatted_history = "" | |
for chat in self.conversation_history: | |
if chat["role"] == "user": | |
formatted_history += f"**You:** {chat['content']}\n\n" | |
elif chat["role"] == "assistant": | |
formatted_history += f"**Xylaria:** {chat['content']}\n\n" | |
return formatted_history | |
# Custom CSS for improved colors and styling | |
custom_css = """ | |
@import url('https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700&display=swap'); | |
body, .gradio-container { | |
font-family: 'Inter', sans-serif !important; | |
background-color: #f8f8f8; /* Light background */ | |
} | |
/* Chatbot styling */ | |
.chatbot-container .message { | |
font-family: 'Inter', sans-serif !important; | |
padding: 10px 15px; | |
border-radius: 10px; | |
margin-bottom: 8px; /* Add margin between messages */ | |
} | |
.chatbot-container .user { | |
background-color: #e0f2f7; /* Light blue for user messages */ | |
border: 1px solid #a7d9ed; /* Light blue border */ | |
} | |
.chatbot-container .assistant { | |
background-color: #f0f0f0; /* Light gray for assistant messages */ | |
border: 1px solid #d3d3d3; /* Light gray border */ | |
} | |
.chatbot-container .message-tools { | |
margin-right: 10px; /* Add some space between text and buttons */ | |
} | |
/* Sidebar styling */ | |
#sidebar { | |
background-color: #f2f2f2; | |
border-right: 1px solid #ccc; | |
padding: 10px; | |
height: 100vh; | |
overflow-y: auto; | |
transition: width 0.3s ease; /* Smooth transition for collapse */ | |
width: 250px; /* Initial width */ | |
} | |
#sidebar.collapsed { | |
width: 50px; /* Collapsed width */ | |
} | |
#sidebar.collapsed #sidebar-content { | |
display: none; /* Hide content when collapsed */ | |
} | |
#sidebar-content { | |
display: block; | |
} | |
#collapse-button { | |
width: 100%; | |
margin-bottom: 10px; | |
background-color: transparent; | |
border: none; | |
cursor: pointer; | |
text-align: left; | |
padding: 5px; | |
} | |
/* Main chat area */ | |
#main-chat { | |
padding: 20px; | |
} | |
/* Textbox and buttons */ | |
.gradio-container input, | |
.gradio-container textarea, | |
.gradio-container button { | |
font-family: 'Inter', sans-serif !important; | |
border-radius: 5px; /* Rounded corners */ | |
} | |
.gradio-container button { | |
background-color: #4CAF50; /* Green button */ | |
color: white; | |
transition: background-color 0.2s; /* Smooth transition for hover effect */ | |
} | |
.gradio-container button:hover { | |
background-color: #3e8e41; /* Darker green on hover */ | |
} | |
""" | |
# Example prompts | |
example_prompts = [ | |
"How do I get started with coding?", | |
"Tell me a fun fact about science.", | |
"What are some good books to read?" | |
] | |
# Function to forward prompt to the textbox | |
def forward_prompt(prompt): | |
return prompt | |
# Create the interface | |
with gr.Blocks(css=custom_css) as demo: | |
# Access global variables (use with caution!) | |
global chat_visible_global | |
global sidebar_collapsed_global | |
with gr.Row(): | |
# Sidebar for displaying chat history | |
with gr.Column(scale=1) as sidebar: | |
# Collapse button | |
collapse_button = gr.Button("<<", elem_id="collapse-button") | |
# Sidebar content (in a nested column for easier hiding) | |
with gr.Column(elem_id="sidebar-content") as sidebar_content: | |
gr.Markdown("### Chat History") | |
load_button = gr.Button("Load Chat History") | |
chat_list = gr.Markdown("No chat history found.") | |
load_button.click( | |
fn=lambda *args: format_chat_history(), | |
inputs=None, | |
outputs=[chat_list] | |
) | |
# Main chat interface | |
with gr.Column(scale=3) as main_chat: | |
# Input row (stays visible) | |
with gr.Row(): | |
txt = gr.Textbox( | |
show_label=False, | |
placeholder="Type your message...", | |
container=False, | |
scale=4 | |
) | |
btn = gr.Button("Send", scale=1) | |
# Xylaria welcome and example prompts (initially visible) | |
with gr.Column(visible=True) as start_page: | |
gr.Markdown("# Xylaria") | |
with gr.Row(): | |
for prompt in example_prompts: | |
gr.Button(prompt).click( | |
fn=lambda p=prompt: p, | |
inputs=gr.State(prompt), | |
outputs=txt | |
) | |
# Chat interface (initially hidden) | |
with gr.Column(visible=False) as chat_page: | |
chatbot = gr.Chatbot( | |
label="Xylaria 1.4 Senoa", | |
height=500, | |
show_copy_button=True, | |
avatar_images=("user.png", "xylaria.png"), | |
bubble_full_width=False | |
) | |
# Clear history and memory buttons | |
clear = gr.Button("Clear Conversation") | |
clear_memory = gr.Button("Clear Memory") | |
# Toggle between start and chat pages (using visibility and global variable) | |
def toggle_page(*args): | |
global chat_visible_global | |
chat_visible_global = not chat_visible_global | |
return not chat_visible_global, chat_visible_global | |
# Toggle sidebar visibility (using global variable) | |
def toggle_sidebar(*args): | |
global sidebar_collapsed_global | |
sidebar_collapsed_global = not sidebar_collapsed_global | |
if sidebar_collapsed_global: | |
# If currently collapsed, expand | |
return False, "250px", "block" # Expand, normal width, display content | |
else: | |
# If currently expanded, collapse | |
return True, "50px", "none" # Collapse, narrow width, hide content | |
# Collapse button click (handle sidebar toggling) | |
collapse_button.click( | |
fn=toggle_sidebar, | |
inputs=[], | |
outputs=[sidebar_collapsed, sidebar, sidebar_content] | |
) | |
# Submit prompt | |
submit_event = btn.click( | |
fn=streaming_response, | |
inputs=[txt, chatbot], | |
outputs=[txt, chatbot] | |
) | |
txt_submit_event = txt.submit( | |
fn=streaming_response, | |
inputs=[txt, chatbot], | |
outputs=[txt, chatbot] | |
) | |
# Toggle to chat page after sending the first message | |
submit_event.then( | |
fn=toggle_page, | |
inputs=[], | |
outputs=[start_page, chat_page] | |
) | |
txt_submit_event.then( | |
fn=toggle_page, | |
inputs=[], | |
outputs=[start_page, chat_page] | |
) | |
# Clear conversation | |
clear.click( | |
fn=lambda *args: [], | |
inputs=None, | |
outputs=[chatbot], | |
queue=False | |
).then( | |
fn=lambda *args: (True, False), | |
inputs=None, | |
outputs=[start_page, chat_page] | |
) | |
# Clear memory | |
clear_memory.click( | |
fn=self.reset_conversation, | |
inputs=None, | |
outputs=None, | |
queue=False | |
).then( | |
fn=lambda *args: (True, False), | |
inputs=None, | |
outputs=[start_page, chat_page] | |
) | |
# Load on startup | |
demo.load(fn=self.reset_conversation, inputs=None, outputs=None) | |
return demo | |
def format_chat_history(self): | |
"""Formats the chat history for display in the sidebar.""" | |
self.load_chat() # Load the chat history first | |
if not self.conversation_history: | |
return "No chat history found." | |
formatted_history = "" | |
for chat in self.conversation_history: | |
if chat["role"] == "user": | |
formatted_history += f"**You:** {chat['content']}\n\n" | |
elif chat["role"] == "assistant": | |
formatted_history += f"**Xylaria:** {chat['content']}\n\n" | |
return formatted_history | |
def main(): | |
chat = XylariaChat() | |
interface = chat.create_interface() | |
interface.launch( | |
share=False, # Set to True to create a public link | |
debug=True # Show detailed errors | |
) | |
if __name__ == "__main__": | |
main() |