awacke1 commited on
Commit
d6c12f7
·
verified ·
1 Parent(s): 5824938

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -10
app.py CHANGED
@@ -209,7 +209,8 @@ def search_glossary(query):
209
  # 🕵️‍♂️ Searching the glossary for: query
210
  all_results = ""
211
  st.markdown(f"- {query}")
212
-
 
213
  #database_choice Literal['Semantic Search', 'Arxiv Search - Latest - (EXPERIMENTAL)'] Default: "Semantic Search"
214
  #llm_model_picked Literal['mistralai/Mixtral-8x7B-Instruct-v0.1', 'mistralai/Mistral-7B-Instruct-v0.2', 'google/gemma-7b-it', 'None'] Default: "mistralai/Mistral-7B-Instruct-v0.2"
215
 
@@ -222,23 +223,19 @@ def search_glossary(query):
222
  llm_model_picked="mistralai/Mistral-7B-Instruct-v0.2",
223
  api_name="/update_with_rag_md"
224
  )
225
- st.code(response2, language="python", *, line_numbers=True, wrap_lines=False)
226
 
 
227
  #llm_model_picked Literal['mistralai/Mixtral-8x7B-Instruct-v0.1', 'mistralai/Mistral-7B-Instruct-v0.2', 'google/gemma-7b-it', 'None'] Default: "mistralai/Mistral-7B-Instruct-v0.2"
228
- # ArXiv searcher ~-<>-~ Paper References - Update with RAG
229
- # client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
230
  result = client.predict(
231
  prompt=query,
232
  llm_model_picked="mistralai/Mistral-7B-Instruct-v0.2",
233
  stream_outputs=True,
234
  api_name="/ask_llm"
235
  )
236
- #st.write('🔍 Run of Multi-Agent System Paper Summary Spec is Complete')
237
- st.code(result, language="python", *, line_numbers=True, wrap_lines=False)
238
 
239
-
240
- # ArXiv searcher ~-<>-~ Paper References - Update with RAG
241
- # client = Client("awacke1/Arxiv-Paper-Search-And-QA-RAG-Pattern")
242
  response1 = client.predict(
243
  query,
244
  10,
@@ -246,7 +243,7 @@ def search_glossary(query):
246
  "mistralai/Mixtral-8x7B-Instruct-v0.1", # LLM Model Dropdown component
247
  api_name="/update_with_rag_md"
248
  )
249
- st.code(response1, language="python", *, line_numbers=True, wrap_lines=False)
250
 
251
 
252
  # ArXiv searcher - Paper Summary & Ask LLM
 
209
  # 🕵️‍♂️ Searching the glossary for: query
210
  all_results = ""
211
  st.markdown(f"- {query}")
212
+
213
+
214
  #database_choice Literal['Semantic Search', 'Arxiv Search - Latest - (EXPERIMENTAL)'] Default: "Semantic Search"
215
  #llm_model_picked Literal['mistralai/Mixtral-8x7B-Instruct-v0.1', 'mistralai/Mistral-7B-Instruct-v0.2', 'google/gemma-7b-it', 'None'] Default: "mistralai/Mistral-7B-Instruct-v0.2"
216
 
 
223
  llm_model_picked="mistralai/Mistral-7B-Instruct-v0.2",
224
  api_name="/update_with_rag_md"
225
  )
226
+ st.code(response2, language="python", line_numbers=True)
227
 
228
+
229
  #llm_model_picked Literal['mistralai/Mixtral-8x7B-Instruct-v0.1', 'mistralai/Mistral-7B-Instruct-v0.2', 'google/gemma-7b-it', 'None'] Default: "mistralai/Mistral-7B-Instruct-v0.2"
230
+
 
231
  result = client.predict(
232
  prompt=query,
233
  llm_model_picked="mistralai/Mistral-7B-Instruct-v0.2",
234
  stream_outputs=True,
235
  api_name="/ask_llm"
236
  )
237
+ st.code(response2, language="python", line_numbers=True)
 
238
 
 
 
 
239
  response1 = client.predict(
240
  query,
241
  10,
 
243
  "mistralai/Mixtral-8x7B-Instruct-v0.1", # LLM Model Dropdown component
244
  api_name="/update_with_rag_md"
245
  )
246
+ st.code(result, language="python", *, line_numbers=True, wrap_lines=False)
247
 
248
 
249
  # ArXiv searcher - Paper Summary & Ask LLM