Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
@@ -1,23 +1,24 @@
|
|
1 |
-
import dspy
|
|
|
2 |
import chromadb
|
3 |
import fitz # PyMuPDF
|
4 |
from sentence_transformers import SentenceTransformer
|
5 |
import json
|
6 |
-
from dspy import Example, MIPROv2,
|
7 |
|
8 |
-
# إعداد نموذج مفتوح المصدر
|
9 |
dspy.settings.configure(lm=dspy.LM("mistralai/Mistral-7B-Instruct-v0.2"))
|
10 |
|
11 |
-
# إعداد
|
12 |
client = chromadb.PersistentClient(path="./chroma_db")
|
13 |
col = client.get_or_create_collection(name="arabic_docs")
|
14 |
|
15 |
-
# نموذج
|
16 |
embedder = SentenceTransformer("sentence-transformers/LaBSE")
|
17 |
|
18 |
# تقطيع النصوص من PDF
|
19 |
-
def process_pdf(
|
20 |
-
doc = fitz.open(stream=
|
21 |
texts = []
|
22 |
for p in doc:
|
23 |
text = p.get_text()
|
@@ -26,46 +27,55 @@ def process_pdf(pdf_file):
|
|
26 |
texts.append(chunk.strip())
|
27 |
return texts
|
28 |
|
29 |
-
# إدخال
|
30 |
def ingest(pdf_file):
|
31 |
texts = process_pdf(pdf_file)
|
32 |
embeddings = embedder.encode(texts, show_progress_bar=True)
|
33 |
for i, (chunk, emb) in enumerate(zip(texts, embeddings)):
|
34 |
-
col.add(
|
|
|
|
|
|
|
|
|
35 |
return f"✅ تمت إضافة {len(texts)} مقطعاً."
|
36 |
|
37 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
class RagSig(dspy.Signature):
|
39 |
question: str = dspy.InputField()
|
40 |
context: str = dspy.InputField()
|
41 |
answer: str = dspy.OutputField()
|
42 |
|
43 |
-
# وحدة
|
44 |
class RagMod(dspy.Module):
|
45 |
def __init__(self):
|
46 |
super().__init__()
|
47 |
self.predictor = dspy.Predict(RagSig)
|
48 |
|
49 |
def forward(self, question):
|
50 |
-
|
51 |
-
results = col.query(query_embeddings=[query_embedding], n_results=1)
|
52 |
-
context_list = [m["text"] for m in results["metadatas"][0]] # ✅ تصحيح هنا
|
53 |
-
context = context_list[0] if context_list else ""
|
54 |
return self.predictor(question=question, context=context)
|
55 |
|
|
|
56 |
model = RagMod()
|
57 |
|
58 |
-
# توليد
|
59 |
def answer(question):
|
60 |
out = model(question)
|
61 |
return out.answer
|
62 |
|
63 |
-
# تحميل بيانات التدريب
|
64 |
def load_dataset(path):
|
65 |
with open(path, "r", encoding="utf-8") as f:
|
66 |
return [Example(**json.loads(l)).with_inputs("question") for l in f]
|
67 |
|
68 |
-
# تحسين النموذج
|
69 |
def optimize(train_file, val_file):
|
70 |
global model
|
71 |
trainset = load_dataset(train_file.name)
|
@@ -82,8 +92,8 @@ with gr.Blocks() as demo:
|
|
82 |
with gr.Tab("📥 تحميل وتخزين"):
|
83 |
pdf_input = gr.File(label="ارفع ملف PDF", type="binary")
|
84 |
ingest_btn = gr.Button("إضافة إلى قاعدة البيانات")
|
85 |
-
|
86 |
-
ingest_btn.click(ingest, inputs=pdf_input, outputs=
|
87 |
|
88 |
with gr.Tab("❓ سؤال"):
|
89 |
q = gr.Textbox(label="اكتب سؤالك بالعربية")
|
|
|
1 |
+
import dspy
|
2 |
+
import gradio as gr
|
3 |
import chromadb
|
4 |
import fitz # PyMuPDF
|
5 |
from sentence_transformers import SentenceTransformer
|
6 |
import json
|
7 |
+
from dspy import Example, MIPROv2, evaluate
|
8 |
|
9 |
+
# إعداد نموذج مفتوح المصدر (Mistral)
|
10 |
dspy.settings.configure(lm=dspy.LM("mistralai/Mistral-7B-Instruct-v0.2"))
|
11 |
|
12 |
+
# إعداد ChromaDB
|
13 |
client = chromadb.PersistentClient(path="./chroma_db")
|
14 |
col = client.get_or_create_collection(name="arabic_docs")
|
15 |
|
16 |
+
# نموذج التضمين LaBSE
|
17 |
embedder = SentenceTransformer("sentence-transformers/LaBSE")
|
18 |
|
19 |
# تقطيع النصوص من PDF
|
20 |
+
def process_pdf(pdf_bytes):
|
21 |
+
doc = fitz.open(stream=pdf_bytes, filetype="pdf")
|
22 |
texts = []
|
23 |
for p in doc:
|
24 |
text = p.get_text()
|
|
|
27 |
texts.append(chunk.strip())
|
28 |
return texts
|
29 |
|
30 |
+
# إدخال البيانات إلى Chroma
|
31 |
def ingest(pdf_file):
|
32 |
texts = process_pdf(pdf_file)
|
33 |
embeddings = embedder.encode(texts, show_progress_bar=True)
|
34 |
for i, (chunk, emb) in enumerate(zip(texts, embeddings)):
|
35 |
+
col.add(
|
36 |
+
ids=[f"chunk_{i}"],
|
37 |
+
embeddings=[emb.tolist()],
|
38 |
+
metadatas=[{"text": chunk}]
|
39 |
+
)
|
40 |
return f"✅ تمت إضافة {len(texts)} مقطعاً."
|
41 |
|
42 |
+
# استرجاع السياق من Chroma
|
43 |
+
def retrieve_context(query):
|
44 |
+
query_emb = embedder.encode([query])[0]
|
45 |
+
results = col.query(query_embeddings=[query_emb.tolist()], n_results=1)
|
46 |
+
context_list = [m["text"] for m in results["metadatas"]]
|
47 |
+
return context_list[0] if context_list else ""
|
48 |
+
|
49 |
+
# توقيع RAG
|
50 |
class RagSig(dspy.Signature):
|
51 |
question: str = dspy.InputField()
|
52 |
context: str = dspy.InputField()
|
53 |
answer: str = dspy.OutputField()
|
54 |
|
55 |
+
# وحدة RAG
|
56 |
class RagMod(dspy.Module):
|
57 |
def __init__(self):
|
58 |
super().__init__()
|
59 |
self.predictor = dspy.Predict(RagSig)
|
60 |
|
61 |
def forward(self, question):
|
62 |
+
context = retrieve_context(question)
|
|
|
|
|
|
|
63 |
return self.predictor(question=question, context=context)
|
64 |
|
65 |
+
# إنشاء النموذج
|
66 |
model = RagMod()
|
67 |
|
68 |
+
# توليد الإجابة
|
69 |
def answer(question):
|
70 |
out = model(question)
|
71 |
return out.answer
|
72 |
|
73 |
+
# تحميل بيانات التدريب
|
74 |
def load_dataset(path):
|
75 |
with open(path, "r", encoding="utf-8") as f:
|
76 |
return [Example(**json.loads(l)).with_inputs("question") for l in f]
|
77 |
|
78 |
+
# تحسين النموذج
|
79 |
def optimize(train_file, val_file):
|
80 |
global model
|
81 |
trainset = load_dataset(train_file.name)
|
|
|
92 |
with gr.Tab("📥 تحميل وتخزين"):
|
93 |
pdf_input = gr.File(label="ارفع ملف PDF", type="binary")
|
94 |
ingest_btn = gr.Button("إضافة إلى قاعدة البيانات")
|
95 |
+
ingest_result = gr.Textbox(label="النتيجة")
|
96 |
+
ingest_btn.click(ingest, inputs=pdf_input, outputs=ingest_result)
|
97 |
|
98 |
with gr.Tab("❓ سؤال"):
|
99 |
q = gr.Textbox(label="اكتب سؤالك بالعربية")
|