abdull4h commited on
Commit
6116543
·
verified ·
1 Parent(s): 39d753a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +118 -131
app.py CHANGED
@@ -1,6 +1,4 @@
1
- # Minimal version for Hugging Face Spaces
2
- # File: app.py
3
-
4
  import gradio as gr
5
  import time
6
  import logging
@@ -24,13 +22,11 @@ import spaces
24
  logging.basicConfig(
25
  level=logging.INFO,
26
  format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
27
- handlers=[
28
- logging.StreamHandler()
29
- ]
30
  )
31
  logger = logging.getLogger('vision2030_assistant')
32
 
33
- # Check for GPU availability (but don't rely on it)
34
  has_gpu = torch.cuda.is_available()
35
  logger.info(f"GPU available: {has_gpu}")
36
 
@@ -84,13 +80,12 @@ class Vision2030Assistant:
84
  """Create fallback embedding methods if model loading fails"""
85
  logger.warning("Using fallback embedding methods")
86
 
87
- # Simple fallback using character-level encoding (not a real embedding, just for demo)
88
  def simple_encode(text, dim=384):
89
  import hashlib
90
  # Create a hash of the text
91
  hash_object = hashlib.md5(text.encode())
92
  # Use the hash to seed a random number generator
93
- import numpy as np
94
  np.random.seed(int(hash_object.hexdigest(), 16) % 2**32)
95
  # Generate a random vector
96
  return np.random.randn(dim).astype(np.float32)
@@ -467,8 +462,11 @@ class Vision2030Assistant:
467
  try:
468
  logger.info(f"Processing uploaded file")
469
 
470
- # Use PyPDF2 to read the file content directly
471
- reader = PyPDF2.PdfReader(file)
 
 
 
472
 
473
  # Extract text from the PDF
474
  full_text = ""
@@ -516,129 +514,118 @@ class Vision2030Assistant:
516
 
517
  # Create the Gradio interface
518
  def create_interface():
519
- try:
520
- # Initialize the assistant
521
- assistant = Vision2030Assistant()
522
-
523
- def chat(message, history):
524
- if not message or message.strip() == "":
525
- return history, ""
526
-
527
- # Generate response
528
- reply = assistant.generate_response(message)
529
-
530
- # Update history
531
- history.append((message, reply))
532
-
533
  return history, ""
534
 
535
- def provide_feedback(history, rating, feedback_text):
536
- # Record feedback for the last conversation
537
- if history and len(history) > 0:
538
- last_interaction = history[-1]
539
- assistant.record_user_feedback(last_interaction[0], last_interaction[1], rating, feedback_text)
540
- return f"Thank you for your feedback! (Rating: {rating}/5)"
541
- return "No conversation found to rate."
542
-
543
- @spaces.GPU
544
- def run_evaluation():
545
- results = assistant.evaluate_on_test_set()
546
-
547
- # Create summary text
548
- summary = f"""
549
- Evaluation Results:
550
- ------------------
551
- Total questions evaluated: {len(results['detailed_results'])}
552
- Overall factual accuracy: {results['average_factual_accuracy']:.2f}
553
- Average response time: {results['average_response_time']:.4f} seconds
554
-
555
- Detailed Results:
556
- """
557
-
558
- for i, result in enumerate(results['detailed_results']):
559
- summary += f"\nQ{i+1}: {result['question']}\n"
560
- summary += f"Reference: {result['reference']}\n"
561
- summary += f"Response: {result['response']}\n"
562
- summary += f"Accuracy: {result['factual_accuracy']:.2f}\n"
563
- summary += "-" * 40 + "\n"
564
-
565
- # Return both the results summary and visualization
566
- fig = assistant.visualize_evaluation_results(results)
567
-
568
- return summary, fig
569
-
570
- # Create the Gradio interface
571
- with gr.Blocks() as demo:
572
- gr.Markdown("# Vision 2030 Virtual Assistant 🌟")
573
- gr.Markdown("Ask questions about Saudi Arabia's Vision 2030 in both Arabic and English")
574
-
575
- with gr.Tab("Chat"):
576
- chatbot = gr.Chatbot(height=400)
577
- msg = gr.Textbox(label="Your Question", placeholder="Ask about Vision 2030...")
578
- with gr.Row():
579
- submit_btn = gr.Button("Submit")
580
- clear_btn = gr.Button("Clear Chat")
581
-
582
- gr.Markdown("### Provide Feedback")
583
- with gr.Row():
584
- rating = gr.Slider(minimum=1, maximum=5, step=1, value=3, label="Rate the Response (1-5)")
585
- feedback_text = gr.Textbox(label="Additional Comments (Optional)")
586
- feedback_btn = gr.Button("Submit Feedback")
587
- feedback_result = gr.Textbox(label="Feedback Status")
588
-
589
- with gr.Tab("Evaluation"):
590
- evaluate_btn = gr.Button("Run Evaluation on Test Set")
591
- eval_output = gr.Textbox(label="Evaluation Results", lines=20)
592
- eval_chart = gr.Plot(label="Evaluation Metrics")
593
-
594
- with gr.Tab("Upload PDF"):
595
- gr.Markdown("""
596
- ### Upload a Vision 2030 PDF Document
597
- Upload a PDF document to enhance the assistant's knowledge base.
598
- """)
599
-
600
- with gr.Row():
601
- file_input = gr.File(
602
- label="Select PDF File",
603
- file_types=[".pdf"],
604
- type="binary" # Important: Use binary mode
605
- )
606
-
607
- with gr.Row():
608
- upload_btn = gr.Button("Process PDF", variant="primary")
609
-
610
- with gr.Row():
611
- upload_status = gr.Textbox(
612
- label="Upload Status",
613
- placeholder="Upload status will appear here...",
614
- interactive=False
615
- )
616
-
617
- gr.Markdown("""
618
- ### Notes:
619
- - The PDF should contain text that can be extracted (not scanned images)
620
- - After uploading, you can return to the Chat tab to ask questions about the uploaded content
621
- - If no PDF is uploaded, the assistant will use default Vision 2030 information
622
- """)
623
-
624
- # Set up event handlers
625
- msg.submit(chat, [msg, chatbot], [chatbot, msg])
626
- submit_btn.click(chat, [msg, chatbot], [chatbot, msg])
627
- clear_btn.click(lambda: [], None, chatbot)
628
- feedback_btn.click(provide_feedback, [chatbot, rating, feedback_text], feedback_result)
629
- evaluate_btn.click(run_evaluation, None, [eval_output, eval_chart])
630
- upload_btn.click(assistant.process_uploaded_pdf, [file_input], [upload_status])
631
-
632
- return demo
 
 
 
 
 
633
 
634
- except Exception as e:
635
- logger.error(f"Error creating Gradio interface: {str(e)}")
636
- # Create a simple fallback demo if there's an error
637
- with gr.Blocks() as demo:
638
- gr.Markdown("# Vision 2030 Virtual Assistant")
639
- gr.Markdown("There was an error initializing the assistant. Please check the logs.")
640
- gr.Markdown(f"Error: {str(e)}")
641
- return demo
642
 
643
  # Launch the app
644
  demo = create_interface()
 
1
+ # Minimal app.py for Hugging Face Spaces - Vision 2030 Virtual Assistant
 
 
2
  import gradio as gr
3
  import time
4
  import logging
 
22
  logging.basicConfig(
23
  level=logging.INFO,
24
  format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
25
+ handlers=[logging.StreamHandler()]
 
 
26
  )
27
  logger = logging.getLogger('vision2030_assistant')
28
 
29
+ # Check for GPU availability
30
  has_gpu = torch.cuda.is_available()
31
  logger.info(f"GPU available: {has_gpu}")
32
 
 
80
  """Create fallback embedding methods if model loading fails"""
81
  logger.warning("Using fallback embedding methods")
82
 
83
+ # Simple fallback using character-level encoding
84
  def simple_encode(text, dim=384):
85
  import hashlib
86
  # Create a hash of the text
87
  hash_object = hashlib.md5(text.encode())
88
  # Use the hash to seed a random number generator
 
89
  np.random.seed(int(hash_object.hexdigest(), 16) % 2**32)
90
  # Generate a random vector
91
  return np.random.randn(dim).astype(np.float32)
 
462
  try:
463
  logger.info(f"Processing uploaded file")
464
 
465
+ # Convert bytes to file-like object
466
+ file_stream = io.BytesIO(file)
467
+
468
+ # Use PyPDF2 to read the file content
469
+ reader = PyPDF2.PdfReader(file_stream)
470
 
471
  # Extract text from the PDF
472
  full_text = ""
 
514
 
515
  # Create the Gradio interface
516
  def create_interface():
517
+ # Initialize the assistant
518
+ assistant = Vision2030Assistant()
519
+
520
+ def chat(message, history):
521
+ if not message or message.strip() == "":
 
 
 
 
 
 
 
 
 
522
  return history, ""
523
 
524
+ # Generate response
525
+ reply = assistant.generate_response(message)
526
+
527
+ # Update history
528
+ history.append((message, reply))
529
+
530
+ return history, ""
531
+
532
+ def provide_feedback(history, rating, feedback_text):
533
+ # Record feedback for the last conversation
534
+ if history and len(history) > 0:
535
+ last_interaction = history[-1]
536
+ assistant.record_user_feedback(last_interaction[0], last_interaction[1], rating, feedback_text)
537
+ return f"Thank you for your feedback! (Rating: {rating}/5)"
538
+ return "No conversation found to rate."
539
+
540
+ @spaces.GPU
541
+ def run_evaluation():
542
+ results = assistant.evaluate_on_test_set()
543
+
544
+ # Create summary text
545
+ summary = f"""
546
+ Evaluation Results:
547
+ ------------------
548
+ Total questions evaluated: {len(results['detailed_results'])}
549
+ Overall factual accuracy: {results['average_factual_accuracy']:.2f}
550
+ Average response time: {results['average_response_time']:.4f} seconds
551
+
552
+ Detailed Results:
553
+ """
554
+
555
+ for i, result in enumerate(results['detailed_results']):
556
+ summary += f"\nQ{i+1}: {result['question']}\n"
557
+ summary += f"Reference: {result['reference']}\n"
558
+ summary += f"Response: {result['response']}\n"
559
+ summary += f"Accuracy: {result['factual_accuracy']:.2f}\n"
560
+ summary += "-" * 40 + "\n"
561
+
562
+ # Return both the results summary and visualization
563
+ fig = assistant.visualize_evaluation_results(results)
564
+
565
+ return summary, fig
566
+
567
+ # Create the Gradio interface
568
+ with gr.Blocks() as demo:
569
+ gr.Markdown("# Vision 2030 Virtual Assistant 🌟")
570
+ gr.Markdown("Ask questions about Saudi Arabia's Vision 2030 in both Arabic and English")
571
+
572
+ with gr.Tab("Chat"):
573
+ chatbot = gr.Chatbot(height=400)
574
+ msg = gr.Textbox(label="Your Question", placeholder="Ask about Vision 2030...")
575
+ with gr.Row():
576
+ submit_btn = gr.Button("Submit")
577
+ clear_btn = gr.Button("Clear Chat")
578
+
579
+ gr.Markdown("### Provide Feedback")
580
+ with gr.Row():
581
+ rating = gr.Slider(minimum=1, maximum=5, step=1, value=3, label="Rate the Response (1-5)")
582
+ feedback_text = gr.Textbox(label="Additional Comments (Optional)")
583
+ feedback_btn = gr.Button("Submit Feedback")
584
+ feedback_result = gr.Textbox(label="Feedback Status")
585
+
586
+ with gr.Tab("Evaluation"):
587
+ evaluate_btn = gr.Button("Run Evaluation on Test Set")
588
+ eval_output = gr.Textbox(label="Evaluation Results", lines=20)
589
+ eval_chart = gr.Plot(label="Evaluation Metrics")
590
+
591
+ with gr.Tab("Upload PDF"):
592
+ gr.Markdown("""
593
+ ### Upload a Vision 2030 PDF Document
594
+ Upload a PDF document to enhance the assistant's knowledge base.
595
+ """)
596
+
597
+ with gr.Row():
598
+ file_input = gr.File(
599
+ label="Select PDF File",
600
+ file_types=[".pdf"],
601
+ type="binary" # This is critical - use binary mode
602
+ )
603
+
604
+ with gr.Row():
605
+ upload_btn = gr.Button("Process PDF", variant="primary")
606
+
607
+ with gr.Row():
608
+ upload_status = gr.Textbox(
609
+ label="Upload Status",
610
+ placeholder="Upload status will appear here...",
611
+ interactive=False
612
+ )
613
+
614
+ gr.Markdown("""
615
+ ### Notes:
616
+ - The PDF should contain text that can be extracted (not scanned images)
617
+ - After uploading, return to the Chat tab to ask questions about the uploaded content
618
+ """)
619
+
620
+ # Set up event handlers
621
+ msg.submit(chat, [msg, chatbot], [chatbot, msg])
622
+ submit_btn.click(chat, [msg, chatbot], [chatbot, msg])
623
+ clear_btn.click(lambda: [], None, chatbot)
624
+ feedback_btn.click(provide_feedback, [chatbot, rating, feedback_text], feedback_result)
625
+ evaluate_btn.click(run_evaluation, None, [eval_output, eval_chart])
626
+ upload_btn.click(assistant.process_uploaded_pdf, [file_input], [upload_status])
627
 
628
+ return demo
 
 
 
 
 
 
 
629
 
630
  # Launch the app
631
  demo = create_interface()