Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -2995,6 +2995,317 @@ async def handle_veterinary_product_followup(selection: str, from_number: str) -
|
|
2995 |
current_menu_options=list(MENU_CONFIG['main_menu']['option_descriptions'].values())
|
2996 |
)
|
2997 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2998 |
if __name__ == "__main__":
|
2999 |
# Launch FastAPI app
|
3000 |
import uvicorn
|
|
|
2995 |
current_menu_options=list(MENU_CONFIG['main_menu']['option_descriptions'].values())
|
2996 |
)
|
2997 |
|
2998 |
+
# Add or update the following functions in app.py:
|
2999 |
+
|
3000 |
+
# --- Restore handle_voice_message_complete ---
|
3001 |
+
async def handle_voice_message_complete(from_number: str, msg: dict):
|
3002 |
+
"""Complete voice message processing with OpenAI transcription - treats voice exactly like text"""
|
3003 |
+
try:
|
3004 |
+
logger.info(f"[Voice] Processing voice message from {from_number}")
|
3005 |
+
logger.info(f"[Voice] Message structure: {msg}")
|
3006 |
+
|
3007 |
+
# Check if OpenAI is available
|
3008 |
+
if not OPENAI_API_KEY:
|
3009 |
+
send_whatsjet_message(from_number,
|
3010 |
+
"🎤 Voice messages require OpenAI API. Please send a text message or type 'main' to see the menu.")
|
3011 |
+
return
|
3012 |
+
|
3013 |
+
# Extract media URL from different possible locations
|
3014 |
+
media_url = None
|
3015 |
+
logger.info(f"[Voice] Checking media URL locations...")
|
3016 |
+
|
3017 |
+
if msg.get('media', {}).get('link'):
|
3018 |
+
media_url = msg.get('media', {}).get('link')
|
3019 |
+
logger.info(f"[Voice] Found media URL in media.link: {media_url}")
|
3020 |
+
elif msg.get('media', {}).get('url'):
|
3021 |
+
media_url = msg.get('media', {}).get('url')
|
3022 |
+
logger.info(f"[Voice] Found media URL in media.url: {media_url}")
|
3023 |
+
elif msg.get('url'):
|
3024 |
+
media_url = msg.get('url')
|
3025 |
+
logger.info(f"[Voice] Found media URL in url: {media_url}")
|
3026 |
+
elif msg.get('audio', {}).get('url'):
|
3027 |
+
media_url = msg.get('audio', {}).get('url')
|
3028 |
+
logger.info(f"[Voice] Found media URL in audio.url: {media_url}")
|
3029 |
+
else:
|
3030 |
+
logger.error(f"[Voice] No media URL found in message structure")
|
3031 |
+
logger.error(f"[Voice] Available fields: {list(msg.keys())}")
|
3032 |
+
if 'media' in msg:
|
3033 |
+
logger.error(f"[Voice] Media fields: {list(msg['media'].keys())}")
|
3034 |
+
|
3035 |
+
logger.info(f"[Voice] Final extracted media URL: {media_url}")
|
3036 |
+
|
3037 |
+
if not media_url:
|
3038 |
+
send_whatsjet_message(from_number, "❌ Could not process voice message. Please try again.")
|
3039 |
+
return
|
3040 |
+
|
3041 |
+
# Generate unique filename
|
3042 |
+
filename = f"voice_{from_number}_{int(time.time())}.ogg"
|
3043 |
+
|
3044 |
+
# Download voice file
|
3045 |
+
file_path = await download_voice_file(media_url, filename)
|
3046 |
+
if not file_path:
|
3047 |
+
send_whatsjet_message(from_number, "❌ Failed to download voice message. Please try again.")
|
3048 |
+
return
|
3049 |
+
|
3050 |
+
# Transcribe with OpenAI
|
3051 |
+
transcribed_text = await transcribe_voice_with_openai(file_path)
|
3052 |
+
|
3053 |
+
# Clean up voice file immediately
|
3054 |
+
try:
|
3055 |
+
os.remove(file_path)
|
3056 |
+
except:
|
3057 |
+
pass
|
3058 |
+
|
3059 |
+
# Handle empty or failed transcription
|
3060 |
+
if not transcribed_text or transcribed_text.strip() == "":
|
3061 |
+
logger.warning(f"[Voice] Empty transcription for {from_number}")
|
3062 |
+
send_whatsjet_message(from_number,
|
3063 |
+
"🎤 *Voice Message Issue*\n\n"
|
3064 |
+
"I couldn't hear anything in your voice message. This can happen due to:\n"
|
3065 |
+
"• Very short voice note\n"
|
3066 |
+
"• Background noise\n"
|
3067 |
+
"• Microphone too far away\n"
|
3068 |
+
"• Audio quality issues\n\n"
|
3069 |
+
"💡 *Tips for better voice notes:*\n"
|
3070 |
+
"• Speak clearly and slowly\n"
|
3071 |
+
"• Keep phone close to mouth\n"
|
3072 |
+
"• Record in quiet environment\n"
|
3073 |
+
"• Make voice note at least 2-3 seconds\n\n"
|
3074 |
+
"💬 *You can also:*\n"
|
3075 |
+
"• Send a text message\n"
|
3076 |
+
"• Type 'main' to see menu options\n"
|
3077 |
+
"• Try voice note again")
|
3078 |
+
return
|
3079 |
+
|
3080 |
+
# Process transcribed text with full intelligence
|
3081 |
+
logger.info(f"[Voice] Transcribed: {transcribed_text}")
|
3082 |
+
|
3083 |
+
# Apply transcription error corrections
|
3084 |
+
corrected_text = process_voice_input(transcribed_text)
|
3085 |
+
if corrected_text != transcribed_text:
|
3086 |
+
logger.info(f"[Voice] Applied corrections: '{transcribed_text}' -> '{corrected_text}'")
|
3087 |
+
transcribed_text = corrected_text
|
3088 |
+
|
3089 |
+
# Detect language of transcribed text
|
3090 |
+
detected_lang = 'en' # Default to English
|
3091 |
+
try:
|
3092 |
+
detected_lang = detect(transcribed_text)
|
3093 |
+
logger.info(f"[Voice] Detected language: {detected_lang}")
|
3094 |
+
|
3095 |
+
# Map language codes to supported languages
|
3096 |
+
lang_mapping = {
|
3097 |
+
'ur': 'ur', # Urdu
|
3098 |
+
'ar': 'ur', # Arabic (treat as Urdu for Islamic greetings)
|
3099 |
+
'en': 'en', # English
|
3100 |
+
'hi': 'ur', # Hindi (treat as Urdu)
|
3101 |
+
'bn': 'ur', # Bengali (treat as Urdu)
|
3102 |
+
'pa': 'ur', # Punjabi (treat as Urdu)
|
3103 |
+
'id': 'ur', # Indonesian (often misdetected for Urdu/Arabic)
|
3104 |
+
'ms': 'ur', # Malay (often misdetected for Urdu/Arabic)
|
3105 |
+
'tr': 'ur', # Turkish (often misdetected for Urdu/Arabic)
|
3106 |
+
}
|
3107 |
+
|
3108 |
+
# Check if text contains Urdu/Arabic characters or Islamic greetings
|
3109 |
+
urdu_arabic_pattern = re.compile(r'[\u0600-\u06FF\u0750-\u077F\u08A0-\u08FF\uFB50-\uFDFF\uFE70-\uFEFF]')
|
3110 |
+
islamic_greetings = ['assalamu', 'assalam', 'salam', 'salaam', 'adaab', 'namaste', 'khuda', 'allah']
|
3111 |
+
|
3112 |
+
has_urdu_chars = bool(urdu_arabic_pattern.search(transcribed_text))
|
3113 |
+
has_islamic_greeting = any(greeting in transcribed_text.lower() for greeting in islamic_greetings)
|
3114 |
+
|
3115 |
+
if has_urdu_chars or has_islamic_greeting:
|
3116 |
+
detected_lang = 'ur'
|
3117 |
+
logger.info(f"[Voice] Overriding language detection to Urdu due to Arabic/Urdu characters or Islamic greeting")
|
3118 |
+
|
3119 |
+
reply_language = lang_mapping.get(detected_lang, 'en')
|
3120 |
+
logger.info(f"[Voice] Language '{detected_lang}' mapped to: {reply_language}")
|
3121 |
+
|
3122 |
+
except Exception as e:
|
3123 |
+
logger.warning(f"[Voice] Language detection failed: {e}")
|
3124 |
+
reply_language = 'en'
|
3125 |
+
|
3126 |
+
if reply_language not in ['en', 'ur']:
|
3127 |
+
logger.info(f"[Voice] Language '{reply_language}' not supported, defaulting to English")
|
3128 |
+
reply_language = 'en'
|
3129 |
+
|
3130 |
+
# For Urdu voice notes, translate to English for processing
|
3131 |
+
processing_text = transcribed_text
|
3132 |
+
if reply_language == 'ur' and detected_lang == 'ur':
|
3133 |
+
try:
|
3134 |
+
logger.info(f"[Voice] Translating Urdu voice note to English for processing")
|
3135 |
+
translated_text = GoogleTranslator(source='ur', target='en').translate(transcribed_text)
|
3136 |
+
processing_text = translated_text
|
3137 |
+
logger.info(f"[Voice] Translated to English: {translated_text}")
|
3138 |
+
except Exception as e:
|
3139 |
+
logger.error(f"[Voice] Translation failed: {e}")
|
3140 |
+
# If translation fails, use original text
|
3141 |
+
processing_text = transcribed_text
|
3142 |
+
|
3143 |
+
# Determine reply language - always respond in English or Urdu
|
3144 |
+
if detected_lang == 'ur':
|
3145 |
+
reply_language = 'ur' # Urdu voice notes get Urdu replies
|
3146 |
+
else:
|
3147 |
+
reply_language = 'en' # All other languages get English replies
|
3148 |
+
|
3149 |
+
logger.info(f"[Voice] Processing text: {processing_text}")
|
3150 |
+
logger.info(f"[Voice] Reply language set to: {reply_language}")
|
3151 |
+
|
3152 |
+
# Check if this is a greeting in voice note (check both original and translated)
|
3153 |
+
if is_greeting(transcribed_text) or is_greeting(processing_text):
|
3154 |
+
logger.info(f"[Voice] Greeting detected in voice note: {transcribed_text}")
|
3155 |
+
|
3156 |
+
# Check if user is currently in AI chat mode - if so, don't trigger menu mode
|
3157 |
+
user_context = context_manager.get_context(from_number)
|
3158 |
+
current_state = user_context.get('current_state', 'main_menu')
|
3159 |
+
|
3160 |
+
if current_state == 'ai_chat_mode':
|
3161 |
+
logger.info(f"[Voice] User is in AI chat mode, treating greeting as AI query instead of menu trigger")
|
3162 |
+
# Treat greeting as a general query in AI chat mode
|
3163 |
+
await handle_general_query_with_ai(from_number, processing_text, user_context, reply_language)
|
3164 |
+
return
|
3165 |
+
else:
|
3166 |
+
# Only trigger menu mode if not in AI chat mode
|
3167 |
+
welcome_msg = generate_veterinary_welcome_message(from_number, user_context)
|
3168 |
+
send_whatsjet_message(from_number, welcome_msg)
|
3169 |
+
context_manager.update_context(from_number, current_state='main_menu', current_menu='main_menu', current_menu_options=list(MENU_CONFIG['main_menu']['option_descriptions'].values()))
|
3170 |
+
return
|
3171 |
+
|
3172 |
+
# Process the translated text using the same strict state-based logic as text messages
|
3173 |
+
# This ensures voice messages follow the same menu and state rules as text messages
|
3174 |
+
await process_incoming_message(from_number, {
|
3175 |
+
'body': processing_text, # Use translated text for processing
|
3176 |
+
'type': 'text',
|
3177 |
+
'reply_language': reply_language,
|
3178 |
+
'original_transcription': transcribed_text # Keep original for context
|
3179 |
+
})
|
3180 |
+
|
3181 |
+
except Exception as e:
|
3182 |
+
logger.error(f"[Voice] Error processing voice message: {e}")
|
3183 |
+
logger.error(f"[Voice] Full error details: {str(e)}")
|
3184 |
+
import traceback
|
3185 |
+
logger.error(f"[Voice] Traceback: {traceback.format_exc()}")
|
3186 |
+
send_whatsjet_message(from_number,
|
3187 |
+
"❌ Error processing voice message. Please try a text message.")
|
3188 |
+
|
3189 |
+
# --- Update send_product_image_with_caption to latest version ---
|
3190 |
+
async def send_product_image_with_caption(from_number: str, product: Dict[str, Any], user_context: Dict[str, Any]):
|
3191 |
+
"""
|
3192 |
+
Send product image (if available) with product details as caption in a single WhatsApp message.
|
3193 |
+
If image is not available, send only the product details as text.
|
3194 |
+
Now supports 'Images' column in CSV (Google Drive or direct links).
|
3195 |
+
"""
|
3196 |
+
ensure_images_dir()
|
3197 |
+
product_name = product.get('Product Name', 'Unknown Product')
|
3198 |
+
details = generate_veterinary_product_response(product, user_context)
|
3199 |
+
image_url = product.get('Images', '').strip() if 'Images' in product else ''
|
3200 |
+
|
3201 |
+
# Force image URL for Respira Aid Plus (use cPanel public URL)
|
3202 |
+
if product_name.lower().strip() == "respira aid plus":
|
3203 |
+
image_url = "https://amgocus.com/uploads/images/Respira%20Aid%20Plus.jpg"
|
3204 |
+
|
3205 |
+
logger.info(f"[Product] Processing image for product: {product_name}")
|
3206 |
+
logger.info(f"[Product] Image URL from CSV: {image_url}")
|
3207 |
+
try:
|
3208 |
+
# First, check if we have an image URL from CSV
|
3209 |
+
if image_url:
|
3210 |
+
# Convert Google Drive link to direct download if needed
|
3211 |
+
if 'drive.google.com' in image_url:
|
3212 |
+
logger.info(f"[Product] Converting Google Drive link: {image_url}")
|
3213 |
+
if '/d/' in image_url:
|
3214 |
+
file_id = image_url.split('/d/')[1].split('/')[0]
|
3215 |
+
elif 'id=' in image_url:
|
3216 |
+
file_id = image_url.split('id=')[1].split('&')[0]
|
3217 |
+
else:
|
3218 |
+
file_id = ''
|
3219 |
+
if file_id:
|
3220 |
+
image_url = f"https://drive.google.com/uc?export=download&id={file_id}"
|
3221 |
+
logger.info(f"[Product] Converted to direct download URL: {image_url}")
|
3222 |
+
# Use the public URL directly for WhatsApp API
|
3223 |
+
media_type = 'image/jpeg'
|
3224 |
+
filename = f"{product_name.replace(' ', '_')}.jpg"
|
3225 |
+
# Test the image URL first
|
3226 |
+
try:
|
3227 |
+
logger.info(f"[Product] Testing image URL accessibility: {image_url}")
|
3228 |
+
headers = {
|
3229 |
+
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
|
3230 |
+
'Accept': 'image/webp,image/apng,image/*,*/*;q=0.8',
|
3231 |
+
'Accept-Language': 'en-US,en;q=0.9',
|
3232 |
+
'Accept-Encoding': 'gzip, deflate, br',
|
3233 |
+
'Connection': 'keep-alive',
|
3234 |
+
'Upgrade-Insecure-Requests': '1'
|
3235 |
+
}
|
3236 |
+
test_response = requests.head(image_url, headers=headers, timeout=10, allow_redirects=True)
|
3237 |
+
if test_response.status_code != 200:
|
3238 |
+
logger.warning(f"[Product] Image URL not accessible (status {test_response.status_code}): {image_url}")
|
3239 |
+
raise Exception(f"Image URL not accessible: {test_response.status_code}")
|
3240 |
+
logger.info(f"[Product] Image URL is accessible")
|
3241 |
+
except Exception as e:
|
3242 |
+
logger.warning(f"[Product] Failed to test image URL {image_url}: {e}")
|
3243 |
+
image_url = None
|
3244 |
+
# Send using public URL (not local file)
|
3245 |
+
if image_url:
|
3246 |
+
logger.info(f"[Product] Attempting to send image from CSV URL for: {product_name}")
|
3247 |
+
success = send_whatsjet_message(
|
3248 |
+
from_number,
|
3249 |
+
details,
|
3250 |
+
media_type=media_type,
|
3251 |
+
media_path=image_url, # Use public URL directly
|
3252 |
+
filename=filename
|
3253 |
+
)
|
3254 |
+
if success:
|
3255 |
+
logger.info(f"[Product] Successfully sent image from CSV link with caption for product: {product_name}")
|
3256 |
+
return
|
3257 |
+
else:
|
3258 |
+
logger.warning(f"[Product] Failed to send image from CSV link, trying fallback: {product_name}")
|
3259 |
+
# Fallback 1: Try with a known public test image
|
3260 |
+
logger.info(f"[Product] Trying public test image for: {product_name}")
|
3261 |
+
test_image_url = "https://www.w3schools.com/w3images/lights.jpg"
|
3262 |
+
media_type = 'image/jpeg'
|
3263 |
+
filename = f"{product_name.replace(' ', '_')}.jpg"
|
3264 |
+
success = send_whatsjet_message(
|
3265 |
+
from_number,
|
3266 |
+
details,
|
3267 |
+
media_type=media_type,
|
3268 |
+
media_path=test_image_url,
|
3269 |
+
filename=filename
|
3270 |
+
)
|
3271 |
+
if success:
|
3272 |
+
logger.info(f"[Product] Successfully sent test image with caption for product: {product_name}")
|
3273 |
+
return
|
3274 |
+
# Fallback 2: Try local uploads directory (public URL)
|
3275 |
+
logger.info(f"[Product] Trying local uploads directory for: {product_name}")
|
3276 |
+
image_path = get_product_image_path(product_name)
|
3277 |
+
if image_path and (image_path.startswith('http') or os.path.exists(image_path)):
|
3278 |
+
media_type = get_product_image_media_type(image_path)
|
3279 |
+
filename = f"{product_name.replace(' ', '_')}.jpg"
|
3280 |
+
# If it's already a public URL, use it directly
|
3281 |
+
if image_path.startswith('http'):
|
3282 |
+
media_path = image_path
|
3283 |
+
logger.info(f"[Product] Using existing public URL: {media_path}")
|
3284 |
+
else:
|
3285 |
+
# Convert local path to public URL
|
3286 |
+
media_path = f"https://dreamstream-1-chatbot.hf.space/uploads/{os.path.basename(image_path).replace(' ', '%20')}"
|
3287 |
+
logger.info(f"[Product] Converted local path to public URL: {media_path}")
|
3288 |
+
success = send_whatsjet_message(
|
3289 |
+
from_number,
|
3290 |
+
details,
|
3291 |
+
media_type=media_type,
|
3292 |
+
media_path=media_path, # Use public URL
|
3293 |
+
filename=filename
|
3294 |
+
)
|
3295 |
+
if success:
|
3296 |
+
logger.info(f"[Product] Successfully sent image with caption for product: {product_name}")
|
3297 |
+
else:
|
3298 |
+
logger.warning(f"[Product] Failed to send image, sending text only: {product_name}")
|
3299 |
+
send_whatsjet_message(from_number, details)
|
3300 |
+
else:
|
3301 |
+
# No image available, send text only
|
3302 |
+
logger.info(f"[Product] No image available, sending text only for: {product_name}")
|
3303 |
+
send_whatsjet_message(from_number, details)
|
3304 |
+
except Exception as e:
|
3305 |
+
logger.error(f"[Product] Error sending product image with caption: {e}")
|
3306 |
+
logger.info(f"[Product] Falling back to text-only message for: {product_name}")
|
3307 |
+
send_whatsjet_message(from_number, details)
|
3308 |
+
|
3309 |
if __name__ == "__main__":
|
3310 |
# Launch FastAPI app
|
3311 |
import uvicorn
|