File size: 8,020 Bytes
13a0fdd 93ea34c 13a0fdd 768bbb4 6c6d09e 13a0fdd 93ea34c 13a0fdd 768bbb4 6c6d09e 768bbb4 13a0fdd 768bbb4 13a0fdd 768bbb4 13a0fdd 768bbb4 13a0fdd 768bbb4 13a0fdd 6c6d09e 768bbb4 6c6d09e 768bbb4 13a0fdd 93ea34c 768bbb4 93ea34c 6c6d09e 93ea34c 768bbb4 93ea34c 768bbb4 6c6d09e 768bbb4 93ea34c 6c6d09e 768bbb4 93ea34c 6c6d09e 768bbb4 6c6d09e 768bbb4 93ea34c 768bbb4 93ea34c 768bbb4 6c6d09e 768bbb4 93ea34c 768bbb4 6c6d09e 93ea34c 768bbb4 13a0fdd 6c6d09e 13a0fdd 6c6d09e 768bbb4 6c6d09e 13a0fdd 6c6d09e 768bbb4 6c6d09e 13a0fdd 768bbb4 13a0fdd 6c6d09e 768bbb4 13a0fdd 6c6d09e 768bbb4 6c6d09e 768bbb4 6c6d09e 768bbb4 6c6d09e 768bbb4 6c6d09e 768bbb4 6c6d09e 768bbb4 6c6d09e e105d8b 6c6d09e 768bbb4 6c6d09e 768bbb4 6c6d09e 13a0fdd 6c6d09e e105d8b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 |
import streamlit as st
from langchain_community.llms import HuggingFaceHub # Use updated import
from langchain.chains import ConversationChain
from langchain.memory import ConversationBufferMemory
import os
from datetime import datetime, timedelta
from dotenv import load_dotenv
# Load environment variables - MUST be at the top
load_dotenv('.env') # Explicitly load from .env file
# Debugging - check if token is available (remove in production)
token = os.environ.get("HUGGINGFACEHUB_API_TOKEN")
if token:
print("Token found! Length:", len(token))
else:
print("WARNING: Token not found in environment variables!")
# Configure page
st.set_page_config(
page_title="Tourism Chatbot",
page_icon="🌍",
layout="wide"
)
# Title with better styling
st.markdown("""
<h1 style='text-align: center; color: #2E86C1;'>
Tourism Assistant - مساعد السياحة
</h1>
""", unsafe_allow_html=True)
# Initialize session states
if "memory" not in st.session_state:
st.session_state.memory = ConversationBufferMemory(
return_messages=True,
memory_key="chat_history"
)
if "messages" not in st.session_state:
st.session_state.messages = []
if "last_request" not in st.session_state:
st.session_state.last_request = datetime.now() - timedelta(seconds=10)
# Language selector
language = st.selectbox(
"Choose Language / اختر اللغة",
["English", "العربية"],
key="lang_select"
)
# Get token - try both environment variable and direct input
hf_token = os.environ.get("HUGGINGFACEHUB_API_TOKEN")
# Display token status
if not hf_token:
st.error("""
API token not found in environment variables. Please:
1. Create a .env file with HUGGINGFACEHUB_API_TOKEN=your_token_here
2. Set the environment variable using setx on Windows
3. On Hugging Face Spaces, add it as a secret
""")
# Allow token input directly in the app (for development only)
hf_token = st.text_input("Enter your Hugging Face token:", type="password")
if not hf_token:
st.stop()
# Enhanced model configuration
model_config = {
"English": {
"repo_id": "google/flan-t5-base", # Using a smaller model for testing
"params": {
"temperature": 0.7,
"max_length": 512,
"max_new_tokens": 300,
"repetition_penalty": 1.2
}
},
"العربية": {
"repo_id": "aubmindlab/aragpt2-medium", # Using a smaller model for testing
"params": {
"temperature": 0.6,
"max_length": 1024,
"max_new_tokens": 400,
"repetition_penalty": 1.3
}
}
}
# Initialize the language model with enhanced error handling
try:
# This is the key fix - correct parameter name is huggingfacehub_api_token
llm = HuggingFaceHub(
repo_id=model_config[language]["repo_id"],
huggingfacehub_api_token=hf_token, # CORRECT PARAMETER NAME
model_kwargs=model_config[language]["params"]
)
conversation = ConversationChain(
llm=llm,
memory=st.session_state.memory,
verbose=False
)
# Show success message
st.success("Successfully connected to Hugging Face model!")
except Exception as e:
error_msg = str(e)
st.error(f"Initialization error: {error_msg}")
st.stop()
# Display chat history with improved formatting
for message in st.session_state.messages:
avatar = "🧑" if message["role"] == "user" else "🌍"
with st.chat_message(message["role"], avatar=avatar):
if language == "العربية":
st.markdown(f"<div style='text-align: right;'>{message['content']}</div>", unsafe_allow_html=True)
else:
st.markdown(message["content"])
# Enhanced rate limiting
if (datetime.now() - st.session_state.last_request).seconds < 3:
st.warning("Please wait a moment before sending another message." if language == "English"
else "الرجاء الانتظار قليلاً قبل إرسال رسالة أخرى")
st.stop()
# User input with better placeholder handling
prompt_placeholder = {
"English": "Ask about destinations, culture, or safety tips...",
"العربية": "اسأل عن الوجهات، الثقافة، أو نصائح السلامة..."
}
prompt = st.chat_input(prompt_placeholder[language])
if prompt:
st.session_state.last_request = datetime.now()
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user", avatar="🧑"):
st.markdown(prompt)
with st.chat_message("assistant", avatar="🌍"):
with st.spinner("Generating response..." if language == "English" else "جارٍ تحضير الرد..."):
try:
# Enhanced prompt engineering
if language == "English":
full_prompt = """You are an expert tourism assistant specializing in:
- Detailed travel destination information
- Cultural norms and etiquette
- Safety recommendations
- Local transportation options
- Authentic dining experiences
Question: {prompt}
Answer in clear, detailed points:""".format(prompt=prompt)
else:
full_prompt = """أنت مساعد سياحي خبير متخصص في:
- معلومات مفصلة عن الوجهات السياحية
- الأعراف الثقافية وآداب السلوك
- توصيات السلامة
- خيارات النقل المحلي
- تجارب تناول الطعام الأصيلة
السؤال: {prompt}
الجواب بنقاط واضحة ومفصلة:""".format(prompt=prompt)
response = conversation.predict(input=full_prompt)
# Post-process response
cleaned_response = response.strip()
if language == "العربية":
cleaned_response = f"<div style='text-align: right;'>{cleaned_response}</div>"
st.markdown(cleaned_response, unsafe_allow_html=True)
st.session_state.messages.append({"role": "assistant", "content": cleaned_response})
except Exception as e:
error_response = {
"English": f"Sorry, I encountered an error: {str(e)}. Please try again later.",
"العربية": f"عذرًا، حدث خطأ: {str(e)}. يرجى المحاولة مرة أخرى لاحقًا."
}
st.error(error_response[language])
st.session_state.messages.append({
"role": "assistant",
"content": error_response[language]
})
# Sidebar with deployment-ready information
with st.sidebar:
st.header("ℹ️ " + ("About" if language == "English" else "حول"))
about_text = {
"English": """
**Tourism Expert Chatbot**
• Provides detailed travel information
• Offers cultural insights
• Available in English/Arabic
• Remembers conversation history
""",
"العربية": """
**مساعد سياحي خبير**
• يقدم معلومات سفر مفصلة
• يوفر رؤى ثقافية
• متاح باللغتين الإنجليزية والعربية
• يحفظ تاريخ المحادثة
"""
}
st.markdown(about_text[language])
# Add reset button to clear conversation
if st.button("Reset Conversation" if language == "English" else "إعادة ضبط المحادثة"):
st.session_state.messages = []
st.session_state.memory.clear()
st.experimental_rerun() |