File size: 12,381 Bytes
5224f4e
f4c0f01
 
 
 
 
b7669f4
f4c0f01
 
ae5e187
b7669f4
9e85002
 
ae5e187
 
 
5224f4e
ae5e187
f4c0f01
 
ae5e187
6116543
f4c0f01
ae5e187
5224f4e
6116543
9e85002
 
 
f4c0f01
39d753a
ae5e187
f4c0f01
b7669f4
ae5e187
b7669f4
ae5e187
b7669f4
ae5e187
1a7b773
39d753a
 
ae5e187
39d753a
 
ae5e187
 
f63c425
ae5e187
 
 
 
1a7b773
f63c425
ae5e187
 
9e85002
b7669f4
ae5e187
f4c0f01
 
 
9e85002
 
 
f4c0f01
 
ae5e187
 
9e85002
ae5e187
 
 
 
9e85002
ae5e187
 
 
9e85002
 
 
ae5e187
9e85002
 
 
5224f4e
ae5e187
 
 
 
 
 
 
 
 
 
 
 
 
 
1a7b773
ae5e187
1a7b773
 
 
ae5e187
1a7b773
 
ae5e187
1a7b773
ae5e187
1a7b773
 
 
5224f4e
9e85002
f4c0f01
ae5e187
f4c0f01
ae5e187
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f4c0f01
ae5e187
 
f4c0f01
ae5e187
f4c0f01
ae5e187
 
f4c0f01
5224f4e
9e85002
ae5e187
 
f4c0f01
ae5e187
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f4c0f01
ae5e187
 
5224f4e
ae5e187
 
 
 
 
f4c0f01
ae5e187
f4c0f01
ae5e187
 
 
 
 
 
 
 
 
 
 
f4c0f01
ae5e187
 
 
 
 
 
 
f63c425
f4c0f01
ae5e187
 
 
 
 
 
 
 
 
 
5224f4e
1a7b773
 
ae5e187
 
 
d30267d
1a7b773
ae5e187
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1a7b773
cf43777
ae5e187
1a7b773
ae5e187
 
 
 
39d753a
6116543
 
ae5e187
 
 
6116543
 
 
ae5e187
 
 
 
 
 
 
 
 
 
39d753a
6116543
5224f4e
39d753a
b7669f4
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
import gradio as gr
import time
import logging
import os
import re
from datetime import datetime
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sentence_transformers import SentenceTransformer, util
import faiss
import torch
import spaces
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
import PyPDF2
import io

# Configure logging for debugging and monitoring
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[logging.StreamHandler()]
)
logger = logging.getLogger('Vision2030Assistant')

# Check for GPU availability
has_gpu = torch.cuda.is_available()
logger.info(f"GPU available: {has_gpu}")

class Vision2030Assistant:
    def __init__(self):
        """Initialize the assistant with enhanced features"""
        logger.info("Initializing Vision 2030 Assistant...")
        
        # Load models with error handling
        self.load_embedding_models()
        self.load_language_model()
        
        # Initialize knowledge base and indices
        self._create_knowledge_base()
        self._create_indices()
        
        # Sample evaluation data
        self._create_sample_eval_data()
        
        # Metrics storage
        self.metrics = {"response_times": [], "user_ratings": [], "factual_accuracy": []}
        
        # Session management
        self.session_history = {}
        
        # PDF content flag
        self.has_pdf_content = False
        
        logger.info("Assistant initialized successfully")

    @spaces.GPU
    def load_embedding_models(self):
        """Load embedding models with fallback"""
        try:
            self.arabic_embedder = SentenceTransformer('CAMeL-Lab/bert-base-arabic-camelbert-ca')
            self.english_embedder = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
            if has_gpu:
                self.arabic_embedder = self.arabic_embedder.to('cuda')
                self.english_embedder = self.english_embedder.to('cuda')
            logger.info("Embedding models loaded successfully")
        except Exception as e:
            logger.error(f"Failed to load embedding models: {e}")
            self._fallback_embedding()

    def _fallback_embedding(self):
        """Fallback to simple embedding if model loading fails"""
        logger.warning("Using fallback embedding method")
        def simple_embed(text):
            import hashlib
            hash_obj = hashlib.md5(text.encode())
            np.random.seed(int(hash_obj.hexdigest(), 16) % 2**32)
            return np.random.randn(384).astype(np.float32)
        
        class SimpleEmbedder:
            def encode(self, text):
                return simple_embed(text)
        
        self.arabic_embedder = SimpleEmbedder()
        self.english_embedder = SimpleEmbedder()

    @spaces.GPU
    def load_language_model(self):
        """Load language model for advanced response generation"""
        try:
            self.tokenizer = AutoTokenizer.from_pretrained("distilgpt2")
            self.model = AutoModelForCausalLM.from_pretrained("distilgpt2")
            if has_gpu:
                self.model = self.model.to('cuda')
            self.generator = pipeline('text-generation', model=self.model, tokenizer=self.tokenizer, device=0 if has_gpu else -1)
            logger.info("Language model loaded successfully")
        except Exception as e:
            logger.error(f"Failed to load language model: {e}")
            self.generator = None

    def _create_knowledge_base(self):
        """Create initial knowledge base"""
        self.english_texts = [
            "Vision 2030 is Saudi Arabia's strategic framework to reduce dependence on oil, diversify the economy, and develop public sectors.",
            "The key pillars of Vision 2030 are a vibrant society, a thriving economy, and an ambitious nation.",
            "NEOM is a planned smart city in Tabuk Province, a key Vision 2030 project."
        ]
        self.arabic_texts = [
            "رؤية 2030 هي إطار استراتيجي لتقليل الاعتماد على النفط وتنويع الاقتصاد.",
            "الركائز الرئيسية لرؤية 2030 هي مجتمع حيوي، واقتصاد مزدهر، ووطن طموح.",
            "نيوم مدينة ذكية مخططة في تبوك، مشروع رئيسي لرؤية 2030."
        ]
        self.pdf_english_texts = []
        self.pdf_arabic_texts = []

    @spaces.GPU
    def _create_indices(self):
        """Create scalable FAISS indices"""
        try:
            # English index with IVF for scalability
            english_vectors = [self.english_embedder.encode(text) for text in self.english_texts]
            dim = len(english_vectors[0])
            nlist = max(1, len(english_vectors) // 10)
            quantizer = faiss.IndexFlatL2(dim)
            self.english_index = faiss.IndexIVFFlat(quantizer, dim, nlist)
            self.english_index.train(np.array(english_vectors))
            self.english_index.add(np.array(english_vectors))
            
            # Arabic index
            arabic_vectors = [self.arabic_embedder.encode(text) for text in self.arabic_texts]
            self.arabic_index = faiss.IndexIVFFlat(quantizer, dim, nlist)
            self.arabic_index.train(np.array(arabic_vectors))
            self.arabic_index.add(np.array(arabic_vectors))
            
            logger.info("FAISS indices created successfully")
        except Exception as e:
            logger.error(f"Error creating indices: {e}")

    def _create_sample_eval_data(self):
        """Sample evaluation data"""
        self.eval_data = [
            {"question": "What are the key pillars of Vision 2030?", "lang": "en", "reference": "The key pillars of Vision 2030 are a vibrant society, a thriving economy, and an ambitious nation."},
            {"question": "ما هي الركائز الرئيسية لرؤية 2030؟", "lang": "ar", "reference": "الركائز الرئيسية لرؤية 2030 هي مجتمع حيوي، واقتصاد مزدهر، ووطن طموح."}
        ]

    @spaces.GPU
    def retrieve_context(self, query, lang, session_id):
        """Retrieve context with session history integration"""
        try:
            # Incorporate session history
            history = self.session_history.get(session_id, [])
            history_context = " ".join([f"Q: {q} A: {a}" for q, a in history[-2:]])  # Last 2 interactions
            
            # Embed query
            embedder = self.arabic_embedder if lang == "ar" else self.english_embedder
            query_vec = embedder.encode(query)
            
            # Search appropriate index
            index = self.pdf_arabic_index if (lang == "ar" and self.has_pdf_content) else \
                    self.pdf_english_index if (lang == "en" and self.has_pdf_content) else \
                    self.arabic_index if lang == "ar" else self.english_index
            texts = self.pdf_arabic_texts if (lang == "ar" and self.has_pdf_content) else \
                    self.pdf_english_texts if (lang == "en" and self.has_pdf_content) else \
                    self.arabic_texts if lang == "ar" else self.english_texts
            
            D, I = index.search(np.array([query_vec]), k=2)
            context = "\n".join([texts[i] for i in I[0] if i >= 0]) + f"\nHistory: {history_context}"
            return context if context.strip() else "No relevant information found."
        except Exception as e:
            logger.error(f"Retrieval error: {e}")
            return "Error retrieving context."

    @spaces.GPU
    def generate_response(self, query, session_id):
        """Generate advanced responses with error handling"""
        if not query.strip():
            return "Please enter a valid question."
        
        start_time = time.time()
        try:
            lang = "ar" if any('\u0600' <= c <= '\u06FF' for c in query) else "en"
            context = self.retrieve_context(query, lang, session_id)
            
            if "Error" in context or "No relevant" in context:
                reply = context
            elif self.generator:
                prompt = f"Context: {context}\nQuestion: {query}\nAnswer:"
                response = self.generator(prompt, max_length=150, num_return_sequences=1, do_sample=True, temperature=0.7)
                reply = response[0]['generated_text'].split("Answer:")[-1].strip()
            else:
                reply = context  # Fallback
            
            # Update session history
            self.session_history.setdefault(session_id, []).append((query, reply))
            self.metrics["response_times"].append(time.time() - start_time)
            return reply
        except Exception as e:
            logger.error(f"Response generation error: {e}")
            return "Sorry, an error occurred. Please try again."

    def evaluate_factual_accuracy(self, response, reference):
        """Evaluate using semantic similarity"""
        try:
            embedder = self.english_embedder  # Assuming reference is in English; extend for Arabic if needed
            response_vec = embedder.encode(response)
            reference_vec = embedder.encode(reference)
            similarity = util.cos_sim(response_vec, reference_vec).item()
            return similarity
        except Exception as e:
            logger.error(f"Evaluation error: {e}")
            return 0.0

    @spaces.GPU
    def process_pdf(self, file):
        """Process PDF with scalability and error handling"""
        if not file:
            return "Please upload a PDF file."
        
        try:
            pdf_reader = PyPDF2.PdfReader(io.BytesIO(file))
            text = "".join([page.extract_text() or "" for page in pdf_reader.pages])
            if not text.strip():
                return "No extractable text found in PDF."
            
            # Chunk text for scalability
            chunks = [text[i:i+300] for i in range(0, len(text), 300)]
            self.pdf_english_texts = [c for c in chunks if not any('\u0600' <= char <= '\u06FF' for char in c)]
            self.pdf_arabic_texts = [c for c in chunks if any('\u0600' <= char <= '\u06FF' for char in c)]
            
            # Batch process embeddings
            batch_size = 32
            for lang, texts, embedder in [("en", self.pdf_english_texts, self.english_embedder), 
                                         ("ar", self.pdf_arabic_texts, self.arabic_embedder)]:
                if texts:
                    vectors = []
                    for i in range(0, len(texts), batch_size):
                        batch = texts[i:i+batch_size]
                        vectors.extend(embedder.encode(batch))
                    dim = len(vectors[0])
                    nlist = max(1, len(vectors) // 10)
                    quantizer = faiss.IndexFlatL2(dim)
                    index = faiss.IndexIVFFlat(quantizer, dim, nlist)
                    index.train(np.array(vectors))
                    index.add(np.array(vectors))
                    setattr(self, f"pdf_{lang}_index", index)
            
            self.has_pdf_content = True
            return f"PDF processed: {len(self.pdf_english_texts)} English, {len(self.pdf_arabic_texts)} Arabic chunks."
        except Exception as e:
            logger.error(f"PDF processing error: {e}")
            return f"Error processing PDF: {e}"

# Gradio Interface
def create_interface():
    assistant = Vision2030Assistant()
    
    def chat(query, history, session_id):
        reply = assistant.generate_response(query, session_id)
        history.append((query, reply))
        return history, ""
    
    with gr.Blocks() as demo:
        gr.Markdown("# Vision 2030 Virtual Assistant")
        session_id = gr.State(value="user1")  # Simple session ID; enhance with authentication
        chatbot = gr.Chatbot()
        msg = gr.Textbox(label="Ask a question")
        submit = gr.Button("Submit")
        pdf_upload = gr.File(label="Upload PDF", type="binary")
        upload_status = gr.Textbox(label="Upload Status")
        
        submit.click(chat, [msg, chatbot, session_id], [chatbot, msg])
        pdf_upload.upload(assistant.process_pdf, pdf_upload, upload_status)
    
    return demo

demo = create_interface()
demo.launch()