afouda commited on
Commit
e61ab70
·
verified ·
1 Parent(s): 326b67d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +288 -283
app.py CHANGED
@@ -1,283 +1,288 @@
1
- import os
2
- import re
3
- import asyncio
4
- import gradio as gr
5
- import RAG_Domain_know_doc
6
- from web_search import search_autism
7
- from RAG import rag_autism
8
- from openai import OpenAI # Corrected import
9
- from dotenv import load_dotenv
10
- import Old_Document
11
- import User_Specific_Documents
12
- from prompt_template import (
13
- Prompt_template_translation,
14
- Prompt_template_LLM_Generation,
15
- Prompt_template_Reranker,
16
- Prompt_template_Wisal,
17
- Prompt_template_Halluciations,
18
- Prompt_template_paraphrasing,
19
- Prompt_template_Translate_to_original,
20
- Prompt_template_relevance,
21
- Prompt_template_User_document_prompt
22
- )
23
-
24
- load_dotenv()
25
- DEEPINFRA_TOKEN = os.getenv("DEEPINFRA_API_KEY") or "285LUJulGIprqT6hcPhiXtcrphU04FG4"
26
- if not DEEPINFRA_TOKEN:
27
- raise ValueError("DEEPINFRA_API_KEY is not set in .env file")
28
-
29
- # Initialize OpenAI client
30
- env = os.getenv("ENVIRONMENT", "production")
31
- openai = OpenAI(
32
- api_key=DEEPINFRA_TOKEN,
33
- base_url="https://api.deepinfra.com/v1/openai",
34
- )
35
-
36
-
37
-
38
- # Rest of your code remains unchanged
39
- # Helper to call chat completion synchronously
40
- def call_llm(model: str, messages: list[dict], temperature: float = 0.0, **kwargs) -> str:
41
- resp = openai.chat.completions.create(
42
- model=model,
43
- messages=messages,
44
- temperature=temperature,
45
- **kwargs
46
- )
47
- return resp.choices[0].message.content.strip()
48
-
49
- # Basic greeting detection
50
- def is_greeting(text: str) -> bool:
51
- return bool(re.search(r"\b(hi|hello|hey|good (morning|afternoon|evening))\b", text, re.I))
52
-
53
-
54
- def process_query(query: str, first_turn: bool = False):
55
- intro = ""
56
- process_log = []
57
-
58
- if first_turn and (not query or query.strip() == ""):
59
- intro = "Hello! I’m Wisal, an AI assistant developed by Compumacy AI, specializing in Autism Spectrum Disorders. How can I help you today?"
60
- process_log.append(intro)
61
- _save_process_log(process_log)
62
- return intro
63
-
64
- # Handle Yes/No replies
65
- if query.strip().lower() == "no":
66
- no_reply = (
67
- "Hello, I’m Wisal, an AI assistant developed by Compumacy AI, "
68
- "and a knowledgeable Autism specialist.\n"
69
- "If you have any question related to autism, please submit a question specifically about autism."
70
- )
71
- process_log.append(f"User replied 'No'.\n{no_reply}")
72
- _save_process_log(process_log)
73
- return no_reply
74
- elif query.strip().lower() == "yes":
75
- process_log.append("User replied 'Yes'. Continuing system as normal.")
76
-
77
-
78
- # 0: Handle simple greetings
79
- if is_greeting(query):
80
- greeting = intro + "Hello! I’m Wisal, your AI assistant developed by Compumacy AI. How can I help you today?"
81
- process_log.append(f"Greeting detected.\n{greeting}")
82
- _save_process_log(process_log)
83
- return greeting
84
-
85
- # 1: Translation & Rephrasing
86
- corrected_query = call_llm(
87
- model="Qwen/Qwen3-32B",
88
- messages=[{"role": "user", "content": Prompt_template_translation.format(query=query)}],
89
- reasoning_effort="none"
90
- )
91
- process_log.append(f"Corrected Query: {corrected_query}")
92
-
93
- # 2: Relevance Check
94
- relevance = call_llm(
95
- model="Qwen/Qwen3-32B",
96
- messages=[{"role": "user", "content": Prompt_template_relevance.format(corrected_query=corrected_query)}],
97
- reasoning_effort="none"
98
- )
99
- process_log.append(f"Relevance: {relevance}")
100
- if relevance != "RELATED":
101
- process_log.append(f"Query not related. Returning: {relevance}")
102
- _save_process_log(process_log)
103
- return intro + relevance
104
-
105
- # Step 3: Web Search
106
- web_search_resp = asyncio.run(search_autism(corrected_query))
107
- web_answer = web_search_resp.get("answer", "")
108
- process_log.append(f"Web Search Answer: {web_answer}")
109
-
110
- # Step 4: LLM Generation
111
- gen_prompt = Prompt_template_LLM_Generation.format(new_query=corrected_query)
112
- generated = call_llm(
113
- model="Qwen/Qwen3-32B",
114
- messages=[{"role": "user", "content": gen_prompt}],
115
- reasoning_effort="none"
116
- )
117
- process_log.append(f"LLM Generated: {generated}")
118
-
119
- # Step 5: RAG
120
- rag_resp = asyncio.run(rag_autism(corrected_query, top_k=3))
121
- rag_contexts = rag_resp.get("answer", [])
122
- process_log.append(f"RAG Contexts: {rag_contexts}")
123
-
124
- # 6) Reranking (now across 3 candidates)
125
- rag_text = "\n".join(f"[{i+1}] {c}" for i, c in enumerate(rag_contexts))
126
- answers_list = f"[1] {generated}\n[2] {web_answer}\n{rag_text}"
127
- rerank_prompt = Prompt_template_Reranker.format(
128
- new_query=corrected_query,
129
- answers_list=answers_list
130
- )
131
- reranked = call_llm(
132
- model="Qwen/Qwen3-32B",
133
- messages=[{"role":"user","content":rerank_prompt}],
134
- reasoning_effort="none"
135
- )
136
- process_log.append(f"Reranked: {reranked}")
137
-
138
- # 7) Wisal final‐answer generation
139
- wisal_prompt = Prompt_template_Wisal.format(
140
- new_query=corrected_query,
141
- document=reranked # use reranked output here
142
- )
143
- wisal = call_llm(
144
- model="Qwen/Qwen3-32B",
145
- messages=[{"role":"user","content":wisal_prompt}],
146
- reasoning_effort="none"
147
- )
148
- process_log.append(f"Wisal Final Answer: {wisal}")
149
-
150
- # 8) Hallucination Check
151
- halluc_prompt = Prompt_template_Halluciations.format(
152
- new_query=corrected_query,
153
- answer=wisal,
154
- document=generated
155
- )
156
- halluc = call_llm(
157
- model="Qwen/Qwen3-32B",
158
- messages=[{"role": "user", "content": halluc_prompt}],
159
- reasoning_effort="none"
160
- )
161
- process_log.append(f"Hallucination Check: {halluc}")
162
- score = int(halluc.split("Score: ")[1]) if "Score: " in halluc else 3
163
-
164
- # 9) Paraphrase if needed
165
- if score in (2, 3):
166
- paraphrase = call_llm(
167
- model="Qwen/Qwen3-32B",
168
- messages=[{"role": "user", "content": Prompt_template_paraphrasing.format(document=generated)}],
169
- reasoning_effort="none"
170
- )
171
- process_log.append(f"Paraphrased: {paraphrase}")
172
- context_prompt = Prompt_template_Wisal.format(new_query=corrected_query, document=paraphrase)
173
- final_doc = call_llm(
174
- model="Qwen/Qwen3-32B",
175
- messages=[{"role": "user", "content": context_prompt}],
176
- reasoning_effort="none"
177
- )
178
- process_log.append(f"Wisal with Paraphrase: {final_doc}")
179
- else:
180
- final_doc = wisal
181
-
182
- # 10) Translate back if needed (improved: only if input is not English)
183
- import langdetect
184
- try:
185
- detected_lang = langdetect.detect(query)
186
- except Exception:
187
- detected_lang = "en"
188
- if detected_lang != "en":
189
- result = call_llm(
190
- model="Qwen/Qwen3-32B",
191
- messages=[{"role": "user", "content": Prompt_template_Translate_to_original.format(query=query, document=final_doc)}],
192
- reasoning_effort="none"
193
- )
194
- process_log.append(f"Translated Back: {result}")
195
- else:
196
- result = final_doc
197
- process_log.append(f"Final Result: {result}")
198
-
199
- _save_process_log(process_log)
200
- return intro + result
201
- # Utility to save process log to a txt file
202
- def _save_process_log(log_lines, filename="process_output.txt"):
203
- import datetime
204
- import os
205
- # Ensure logs directory exists
206
- logs_dir = os.path.join(os.path.dirname(__file__), "logs")
207
- os.makedirs(logs_dir, exist_ok=True)
208
- # Unique filename per question (timestamped)
209
- timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S_%f")
210
- log_filename = os.path.join(logs_dir, f"log_{timestamp}.txt")
211
- try:
212
- with open(log_filename, "w", encoding="utf-8") as f:
213
- for line in log_lines:
214
- f.write(str(line) + "\n\n")
215
- except Exception as e:
216
- pass
217
-
218
-
219
- # Gradio UI for main pipeline, RAG_Domain_know_doc, and User_Specific_Documents , Old_Document
220
- def main_pipeline_interface(query):
221
- return process_query(query, first_turn=True)
222
-
223
-
224
- def main_pipeline_with_doc(query, doc_file, doc_type):
225
- # If no document, use main pipeline
226
- if doc_file is None or doc_type == "None":
227
- return process_query(query, first_turn=True)
228
-
229
- safe_filename = os.path.basename(getattr(doc_file, 'name', str(doc_file)))
230
- upload_dir = os.path.join(os.path.dirname(__file__), "uploaded_docs")
231
- os.makedirs(upload_dir, exist_ok=True)
232
-
233
- save_path = os.path.join(upload_dir, safe_filename)
234
-
235
- # 💡 Check if doc_file is file-like (has `.read()`) or path-like (str or NamedString)
236
- if hasattr(doc_file, 'read'):
237
- # File-like object
238
- file_bytes = doc_file.read()
239
- else:
240
- # It's a path (NamedString), read from file path
241
- with open(str(doc_file), 'rb') as f:
242
- file_bytes = f.read()
243
-
244
- # Save the file content
245
- with open(save_path, "wb") as f:
246
- f.write(file_bytes)
247
-
248
-
249
- # Route to correct document handler
250
- if doc_type == "Knowledge Document":
251
- status = RAG_Domain_know_doc.ingest_file(save_path)
252
- answer = RAG_Domain_know_doc.answer_question(query)
253
- return f"[Knowledge Document Uploaded]\n{status}\n\n{answer}"
254
- elif doc_type == "User-Specific Document":
255
- status = User_Specific_Documents.ingest_file(save_path)
256
- answer = User_Specific_Documents.answer_question(query)
257
- return f"[User-Specific Document Uploaded]\n{status}\n\n{answer}"
258
- elif doc_type == "Old Document":
259
- status = Old_Document.ingest_file(save_path)
260
- answer = Old_Document.answer_question(query)
261
- return f"[Old Document Uploaded]\n{status}\n\n{answer}"
262
- else:
263
- return "Invalid document type."
264
-
265
- with gr.Blocks(title="Wisal Main Pipeline & RAG") as demo:
266
- gr.Markdown("## Wisal: Autism AI Assistant (Main Pipeline)")
267
- with gr.Tab("Main Pipeline"):
268
- q = gr.Textbox(placeholder="Your question...", lines=2, label="Ask Wisal")
269
- doc_file = gr.File(label="Optional: Upload Document (PDF, DOCX, TXT)")
270
- doc_type = gr.Radio(["None", "Knowledge Document", "User-Specific Document", "Old Document"], value="None", label="Document Type")
271
- btn = gr.Button("Submit")
272
- out = gr.Textbox(label="Wisal Answer", lines=8, interactive=False)
273
- btn.click(fn=main_pipeline_with_doc, inputs=[q, doc_file, doc_type], outputs=out)
274
- with gr.Tab("Domain Knowledge RAG"):
275
- RAG_Domain_know_doc.demo.render()
276
- with gr.Tab("User-Specific Documents"):
277
- User_Specific_Documents.demo.render()
278
- with gr.Tab("Old Documents"):
279
- Old_Document.demo.render()
280
-
281
- if __name__ == "__main__":
282
- demo.launch(debug=True)
283
-
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ import asyncio
4
+ import gradio as gr
5
+ import RAG_Domain_know_doc
6
+ from web_search import search_autism
7
+ from RAG import rag_autism
8
+ from openai import OpenAI # Corrected import
9
+ from dotenv import load_dotenv
10
+ import Old_Document
11
+ import User_Specific_Documents
12
+ from prompt_template import (
13
+ Prompt_template_translation,
14
+ Prompt_template_LLM_Generation,
15
+ Prompt_template_Reranker,
16
+ Prompt_template_Wisal,
17
+ Prompt_template_Halluciations,
18
+ Prompt_template_paraphrasing,
19
+ Prompt_template_Translate_to_original,
20
+ Prompt_template_relevance,
21
+ Prompt_template_User_document_prompt
22
+ )
23
+
24
+ GEMINI_API_KEY="AIzaSyCUCivstFpC9pq_jMHMYdlPrmh9Bx97dFo"
25
+ TAVILY_API_KEY="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"
26
+ OPENAI_API_KEY="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"
27
+ QDRANT_API_KEY="eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJhY2Nlc3MiOiJtIiwiZXhwIjoxNzUxMDUxNzg4fQ.I9J-K7OM0BtcNKgj2d4uVM8QYAHYfFCVAyP4rlZkK2E"
28
+ QDRANT_URL="https://6a3aade6-e8ad-4a6c-a579-21f5af90b7e8.us-east4-0.gcp.cloud.qdrant.io"
29
+ OPENAI_API_KEY="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"
30
+ WEAVIATE_URL="https://xbvlj5rpqyiswspww0tthq.c0.us-west3.gcp.weaviate.cloud"
31
+ WEAVIATE_API_KEY="RU9acU1CYnNRTjY1S1ZFc18zNS9tQktaWlcwTzFEUjlscEVCUGF4YU5xRWx2MDhmTUtIdUhnOWdOTGVZPV92MjAw"
32
+ DEEPINFRA_API_KEY="285LUJulGIprqT6hcPhiXtcrphU04FG4"
33
+ DEEPINFRA_BASE_URL="https://api.deepinfra.com/v1/openai"
34
+ # Initialize OpenAI client
35
+ env = os.getenv("ENVIRONMENT", "production")
36
+ openai = OpenAI(
37
+ api_key=DEEPINFRA_TOKEN,
38
+ base_url="https://api.deepinfra.com/v1/openai",
39
+ )
40
+
41
+
42
+
43
+ # Rest of your code remains unchanged
44
+ # Helper to call chat completion synchronously
45
+ def call_llm(model: str, messages: list[dict], temperature: float = 0.0, **kwargs) -> str:
46
+ resp = openai.chat.completions.create(
47
+ model=model,
48
+ messages=messages,
49
+ temperature=temperature,
50
+ **kwargs
51
+ )
52
+ return resp.choices[0].message.content.strip()
53
+
54
+ # Basic greeting detection
55
+ def is_greeting(text: str) -> bool:
56
+ return bool(re.search(r"\b(hi|hello|hey|good (morning|afternoon|evening))\b", text, re.I))
57
+
58
+
59
+ def process_query(query: str, first_turn: bool = False):
60
+ intro = ""
61
+ process_log = []
62
+
63
+ if first_turn and (not query or query.strip() == ""):
64
+ intro = "Hello! I’m Wisal, an AI assistant developed by Compumacy AI, specializing in Autism Spectrum Disorders. How can I help you today?"
65
+ process_log.append(intro)
66
+ _save_process_log(process_log)
67
+ return intro
68
+
69
+ # Handle Yes/No replies
70
+ if query.strip().lower() == "no":
71
+ no_reply = (
72
+ "Hello, I’m Wisal, an AI assistant developed by Compumacy AI, "
73
+ "and a knowledgeable Autism specialist.\n"
74
+ "If you have any question related to autism, please submit a question specifically about autism."
75
+ )
76
+ process_log.append(f"User replied 'No'.\n{no_reply}")
77
+ _save_process_log(process_log)
78
+ return no_reply
79
+ elif query.strip().lower() == "yes":
80
+ process_log.append("User replied 'Yes'. Continuing system as normal.")
81
+
82
+
83
+ # 0: Handle simple greetings
84
+ if is_greeting(query):
85
+ greeting = intro + "Hello! I’m Wisal, your AI assistant developed by Compumacy AI. How can I help you today?"
86
+ process_log.append(f"Greeting detected.\n{greeting}")
87
+ _save_process_log(process_log)
88
+ return greeting
89
+
90
+ # 1: Translation & Rephrasing
91
+ corrected_query = call_llm(
92
+ model="Qwen/Qwen3-32B",
93
+ messages=[{"role": "user", "content": Prompt_template_translation.format(query=query)}],
94
+ reasoning_effort="none"
95
+ )
96
+ process_log.append(f"Corrected Query: {corrected_query}")
97
+
98
+ # 2: Relevance Check
99
+ relevance = call_llm(
100
+ model="Qwen/Qwen3-32B",
101
+ messages=[{"role": "user", "content": Prompt_template_relevance.format(corrected_query=corrected_query)}],
102
+ reasoning_effort="none"
103
+ )
104
+ process_log.append(f"Relevance: {relevance}")
105
+ if relevance != "RELATED":
106
+ process_log.append(f"Query not related. Returning: {relevance}")
107
+ _save_process_log(process_log)
108
+ return intro + relevance
109
+
110
+ # Step 3: Web Search
111
+ web_search_resp = asyncio.run(search_autism(corrected_query))
112
+ web_answer = web_search_resp.get("answer", "")
113
+ process_log.append(f"Web Search Answer: {web_answer}")
114
+
115
+ # Step 4: LLM Generation
116
+ gen_prompt = Prompt_template_LLM_Generation.format(new_query=corrected_query)
117
+ generated = call_llm(
118
+ model="Qwen/Qwen3-32B",
119
+ messages=[{"role": "user", "content": gen_prompt}],
120
+ reasoning_effort="none"
121
+ )
122
+ process_log.append(f"LLM Generated: {generated}")
123
+
124
+ # Step 5: RAG
125
+ rag_resp = asyncio.run(rag_autism(corrected_query, top_k=3))
126
+ rag_contexts = rag_resp.get("answer", [])
127
+ process_log.append(f"RAG Contexts: {rag_contexts}")
128
+
129
+ # 6) Reranking (now across 3 candidates)
130
+ rag_text = "\n".join(f"[{i+1}] {c}" for i, c in enumerate(rag_contexts))
131
+ answers_list = f"[1] {generated}\n[2] {web_answer}\n{rag_text}"
132
+ rerank_prompt = Prompt_template_Reranker.format(
133
+ new_query=corrected_query,
134
+ answers_list=answers_list
135
+ )
136
+ reranked = call_llm(
137
+ model="Qwen/Qwen3-32B",
138
+ messages=[{"role":"user","content":rerank_prompt}],
139
+ reasoning_effort="none"
140
+ )
141
+ process_log.append(f"Reranked: {reranked}")
142
+
143
+ # 7) Wisal final‐answer generation
144
+ wisal_prompt = Prompt_template_Wisal.format(
145
+ new_query=corrected_query,
146
+ document=reranked # use reranked output here
147
+ )
148
+ wisal = call_llm(
149
+ model="Qwen/Qwen3-32B",
150
+ messages=[{"role":"user","content":wisal_prompt}],
151
+ reasoning_effort="none"
152
+ )
153
+ process_log.append(f"Wisal Final Answer: {wisal}")
154
+
155
+ # 8) Hallucination Check
156
+ halluc_prompt = Prompt_template_Halluciations.format(
157
+ new_query=corrected_query,
158
+ answer=wisal,
159
+ document=generated
160
+ )
161
+ halluc = call_llm(
162
+ model="Qwen/Qwen3-32B",
163
+ messages=[{"role": "user", "content": halluc_prompt}],
164
+ reasoning_effort="none"
165
+ )
166
+ process_log.append(f"Hallucination Check: {halluc}")
167
+ score = int(halluc.split("Score: ")[1]) if "Score: " in halluc else 3
168
+
169
+ # 9) Paraphrase if needed
170
+ if score in (2, 3):
171
+ paraphrase = call_llm(
172
+ model="Qwen/Qwen3-32B",
173
+ messages=[{"role": "user", "content": Prompt_template_paraphrasing.format(document=generated)}],
174
+ reasoning_effort="none"
175
+ )
176
+ process_log.append(f"Paraphrased: {paraphrase}")
177
+ context_prompt = Prompt_template_Wisal.format(new_query=corrected_query, document=paraphrase)
178
+ final_doc = call_llm(
179
+ model="Qwen/Qwen3-32B",
180
+ messages=[{"role": "user", "content": context_prompt}],
181
+ reasoning_effort="none"
182
+ )
183
+ process_log.append(f"Wisal with Paraphrase: {final_doc}")
184
+ else:
185
+ final_doc = wisal
186
+
187
+ # 10) Translate back if needed (improved: only if input is not English)
188
+ import langdetect
189
+ try:
190
+ detected_lang = langdetect.detect(query)
191
+ except Exception:
192
+ detected_lang = "en"
193
+ if detected_lang != "en":
194
+ result = call_llm(
195
+ model="Qwen/Qwen3-32B",
196
+ messages=[{"role": "user", "content": Prompt_template_Translate_to_original.format(query=query, document=final_doc)}],
197
+ reasoning_effort="none"
198
+ )
199
+ process_log.append(f"Translated Back: {result}")
200
+ else:
201
+ result = final_doc
202
+ process_log.append(f"Final Result: {result}")
203
+
204
+ _save_process_log(process_log)
205
+ return intro + result
206
+ # Utility to save process log to a txt file
207
+ def _save_process_log(log_lines, filename="process_output.txt"):
208
+ import datetime
209
+ import os
210
+ # Ensure logs directory exists
211
+ logs_dir = os.path.join(os.path.dirname(__file__), "logs")
212
+ os.makedirs(logs_dir, exist_ok=True)
213
+ # Unique filename per question (timestamped)
214
+ timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S_%f")
215
+ log_filename = os.path.join(logs_dir, f"log_{timestamp}.txt")
216
+ try:
217
+ with open(log_filename, "w", encoding="utf-8") as f:
218
+ for line in log_lines:
219
+ f.write(str(line) + "\n\n")
220
+ except Exception as e:
221
+ pass
222
+
223
+
224
+ # Gradio UI for main pipeline, RAG_Domain_know_doc, and User_Specific_Documents , Old_Document
225
+ def main_pipeline_interface(query):
226
+ return process_query(query, first_turn=True)
227
+
228
+
229
+ def main_pipeline_with_doc(query, doc_file, doc_type):
230
+ # If no document, use main pipeline
231
+ if doc_file is None or doc_type == "None":
232
+ return process_query(query, first_turn=True)
233
+
234
+ safe_filename = os.path.basename(getattr(doc_file, 'name', str(doc_file)))
235
+ upload_dir = os.path.join(os.path.dirname(__file__), "uploaded_docs")
236
+ os.makedirs(upload_dir, exist_ok=True)
237
+
238
+ save_path = os.path.join(upload_dir, safe_filename)
239
+
240
+ # 💡 Check if doc_file is file-like (has `.read()`) or path-like (str or NamedString)
241
+ if hasattr(doc_file, 'read'):
242
+ # File-like object
243
+ file_bytes = doc_file.read()
244
+ else:
245
+ # It's a path (NamedString), read from file path
246
+ with open(str(doc_file), 'rb') as f:
247
+ file_bytes = f.read()
248
+
249
+ # Save the file content
250
+ with open(save_path, "wb") as f:
251
+ f.write(file_bytes)
252
+
253
+
254
+ # Route to correct document handler
255
+ if doc_type == "Knowledge Document":
256
+ status = RAG_Domain_know_doc.ingest_file(save_path)
257
+ answer = RAG_Domain_know_doc.answer_question(query)
258
+ return f"[Knowledge Document Uploaded]\n{status}\n\n{answer}"
259
+ elif doc_type == "User-Specific Document":
260
+ status = User_Specific_Documents.ingest_file(save_path)
261
+ answer = User_Specific_Documents.answer_question(query)
262
+ return f"[User-Specific Document Uploaded]\n{status}\n\n{answer}"
263
+ elif doc_type == "Old Document":
264
+ status = Old_Document.ingest_file(save_path)
265
+ answer = Old_Document.answer_question(query)
266
+ return f"[Old Document Uploaded]\n{status}\n\n{answer}"
267
+ else:
268
+ return "Invalid document type."
269
+
270
+ with gr.Blocks(title="Wisal Main Pipeline & RAG") as demo:
271
+ gr.Markdown("## Wisal: Autism AI Assistant (Main Pipeline)")
272
+ with gr.Tab("Main Pipeline"):
273
+ q = gr.Textbox(placeholder="Your question...", lines=2, label="Ask Wisal")
274
+ doc_file = gr.File(label="Optional: Upload Document (PDF, DOCX, TXT)")
275
+ doc_type = gr.Radio(["None", "Knowledge Document", "User-Specific Document", "Old Document"], value="None", label="Document Type")
276
+ btn = gr.Button("Submit")
277
+ out = gr.Textbox(label="Wisal Answer", lines=8, interactive=False)
278
+ btn.click(fn=main_pipeline_with_doc, inputs=[q, doc_file, doc_type], outputs=out)
279
+ with gr.Tab("Domain Knowledge RAG"):
280
+ RAG_Domain_know_doc.demo.render()
281
+ with gr.Tab("User-Specific Documents"):
282
+ User_Specific_Documents.demo.render()
283
+ with gr.Tab("Old Documents"):
284
+ Old_Document.demo.render()
285
+
286
+ if __name__ == "__main__":
287
+ demo.launch(debug=True)
288
+