awacke1 commited on
Commit
f872af7
Β·
verified Β·
1 Parent(s): 2db9a0a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -88
app.py CHANGED
@@ -210,62 +210,33 @@ def search_glossary(query):
210
  all_results = ""
211
  st.markdown(f"- {query}")
212
 
213
-
214
  #database_choice Literal['Semantic Search', 'Arxiv Search - Latest - (EXPERIMENTAL)'] Default: "Semantic Search"
215
  #llm_model_picked Literal['mistralai/Mixtral-8x7B-Instruct-v0.1', 'mistralai/Mistral-7B-Instruct-v0.2', 'google/gemma-7b-it', 'None'] Default: "mistralai/Mistral-7B-Instruct-v0.2"
216
-
217
- # πŸ” Run 1 - ArXiv RAG researcher expert ~-<>-~ Paper Summary & Ask LLM
218
  client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
219
  response2 = client.predict(
220
  message=query, # str in 'parameter_13' Textbox component
221
- llm_results_use=5,
222
  database_choice="Semantic Search",
223
  llm_model_picked="mistralai/Mistral-7B-Instruct-v0.2",
224
  api_name="/update_with_rag_md"
225
  )
226
- st.code(response2, language="python", line_numbers=True)
 
227
 
228
-
229
  #llm_model_picked Literal['mistralai/Mixtral-8x7B-Instruct-v0.1', 'mistralai/Mistral-7B-Instruct-v0.2', 'google/gemma-7b-it', 'None'] Default: "mistralai/Mistral-7B-Instruct-v0.2"
230
-
 
231
  result = client.predict(
232
  prompt=query,
233
  llm_model_picked="mistralai/Mistral-7B-Instruct-v0.2",
234
  stream_outputs=True,
235
  api_name="/ask_llm"
236
  )
 
237
  st.code(result, language="python", line_numbers=True)
238
-
239
- response1 = client.predict(
240
- query,
241
- 10,
242
- "Semantic Search - up to 10 Mar 2024", # Search Source Dropdown component
243
- "mistralai/Mixtral-8x7B-Instruct-v0.1", # LLM Model Dropdown component
244
- api_name="/update_with_rag_md"
245
- )
246
- st.code(response1[0], language="python", line_numbers=True, wrap_lines=False)
247
-
248
-
249
- # ArXiv searcher - Paper Summary & Ask LLM
250
- # client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
251
 
252
- response1 = client.predict(
253
- message=query,
254
- llm_results_use=5,
255
- database_choice=database_choice,
256
- llm_model_picked=model_choice,
257
- api_name="/update_with_rag_md"
258
- )
259
- st.code(response1, language="python", line_numbers=True, wrap_lines=False)
260
-
261
- response2 = client.predict(
262
- prompt=query,
263
- llm_model_picked=model_choice,
264
- stream_outputs=True,
265
- api_name="/ask_llm"
266
- )
267
- st.code(response2, language="python", line_numbers=True, wrap_lines=False)
268
-
269
  # Aggregate hyperlinks and show with emojis
270
  hyperlinks = extract_hyperlinks([response1, response2])
271
  st.markdown("### πŸ”— Aggregated Hyperlinks")
@@ -277,60 +248,10 @@ def search_glossary(query):
277
  st.code(f"Response 1: \n{format_with_line_numbers(response1)}\n\nResponse 2: \n{format_with_line_numbers(response2)}", language="json")
278
 
279
  # Save both responses to Cosmos DB
280
- save_to_cosmos_db(query, response1, response2)
281
 
282
 
283
 
284
-
285
-
286
- # πŸ” Search Glossary function
287
- def search_glossaryv1(query):
288
- # πŸ•΅οΈβ€β™‚οΈ Searching the glossary for: query
289
- all_results = ""
290
- st.markdown(f"- {query}")
291
-
292
- #database_choice Literal['Semantic Search', 'Arxiv Search - Latest - (EXPERIMENTAL)'] Default: "Semantic Search"
293
- #llm_model_picked Literal['mistralai/Mixtral-8x7B-Instruct-v0.1', 'mistralai/Mistral-7B-Instruct-v0.2', 'google/gemma-7b-it', 'None'] Default: "mistralai/Mistral-7B-Instruct-v0.2"
294
-
295
- # πŸ” Run 1 - ArXiv RAG researcher expert ~-<>-~ Paper Summary & Ask LLM
296
- client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
297
- response2 = client.predict(
298
- message=query, # str in 'parameter_13' Textbox component
299
- llm_results_use=5,
300
- database_choice="Semantic Search",
301
- llm_model_picked="mistralai/Mistral-7B-Instruct-v0.2",
302
- api_name="/update_with_rag_md"
303
- )
304
-
305
- #llm_model_picked Literal['mistralai/Mixtral-8x7B-Instruct-v0.1', 'mistralai/Mistral-7B-Instruct-v0.2', 'google/gemma-7b-it', 'None'] Default: "mistralai/Mistral-7B-Instruct-v0.2"
306
-
307
- client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
308
- result = client.predict(
309
- prompt=query,
310
- llm_model_picked="mistralai/Mistral-7B-Instruct-v0.2",
311
- stream_outputs=True,
312
- api_name="/ask_llm"
313
- )
314
- st.write('πŸ” Run of Multi-Agent System Paper Summary Spec is Complete')
315
- st.markdown(response2)
316
-
317
- # ArXiv searcher ~-<>-~ Paper References - Update with RAG
318
- client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
319
- response1 = client.predict(
320
- query,
321
- 10,
322
- "Semantic Search - up to 10 Mar 2024", # Search Source Dropdown component
323
- "mistralai/Mixtral-8x7B-Instruct-v0.1", # LLM Model Dropdown component
324
- api_name="/update_with_rag_md"
325
- )
326
-
327
-
328
-
329
- #st.write('πŸ” Run of Multi-Agent System Paper References is Complete')
330
- #responseall = response2 + response1[0] + response1[1]
331
- #st.markdown(responseall)
332
- return responseall
333
-
334
  # πŸ“ Function to process text input
335
  def process_text(text_input):
336
  if text_input:
 
210
  all_results = ""
211
  st.markdown(f"- {query}")
212
 
213
+ # πŸ” ArXiv RAG researcher expert ~-<>-~ Paper Summary & Ask LLM
214
  #database_choice Literal['Semantic Search', 'Arxiv Search - Latest - (EXPERIMENTAL)'] Default: "Semantic Search"
215
  #llm_model_picked Literal['mistralai/Mixtral-8x7B-Instruct-v0.1', 'mistralai/Mistral-7B-Instruct-v0.2', 'google/gemma-7b-it', 'None'] Default: "mistralai/Mistral-7B-Instruct-v0.2"
216
+
 
217
  client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
218
  response2 = client.predict(
219
  message=query, # str in 'parameter_13' Textbox component
220
+ llm_results_use=10,
221
  database_choice="Semantic Search",
222
  llm_model_picked="mistralai/Mistral-7B-Instruct-v0.2",
223
  api_name="/update_with_rag_md"
224
  )
225
+ st.markdown(response2)
226
+ st.code(response2, language="python", line_numbers=True, wrap_lines=True)
227
 
 
228
  #llm_model_picked Literal['mistralai/Mixtral-8x7B-Instruct-v0.1', 'mistralai/Mistral-7B-Instruct-v0.2', 'google/gemma-7b-it', 'None'] Default: "mistralai/Mistral-7B-Instruct-v0.2"
229
+
230
+ # πŸ” ArXiv RAG researcher expert ~-<>-~ Paper Summary & Ask LLM
231
  result = client.predict(
232
  prompt=query,
233
  llm_model_picked="mistralai/Mistral-7B-Instruct-v0.2",
234
  stream_outputs=True,
235
  api_name="/ask_llm"
236
  )
237
+ st.markdown(result)
238
  st.code(result, language="python", line_numbers=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
239
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
240
  # Aggregate hyperlinks and show with emojis
241
  hyperlinks = extract_hyperlinks([response1, response2])
242
  st.markdown("### πŸ”— Aggregated Hyperlinks")
 
248
  st.code(f"Response 1: \n{format_with_line_numbers(response1)}\n\nResponse 2: \n{format_with_line_numbers(response2)}", language="json")
249
 
250
  # Save both responses to Cosmos DB
251
+ save_to_cosmos_db(query, response2, result)
252
 
253
 
254
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
255
  # πŸ“ Function to process text input
256
  def process_text(text_input):
257
  if text_input: