farmax commited on
Commit
a62b746
·
verified ·
1 Parent(s): 155b4b5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -2
app.py CHANGED
@@ -294,7 +294,39 @@ def demo():
294
  <br><b>Avviso:</b> Questo spazio utilizza l'hardware di base CPU gratuito da Hugging Face. Alcuni passaggi e modelli LLM usati qui sotto (endpoint di inferenza gratuiti) possono richiedere del tempo per generare una risposta.
295
  """)
296
 
297
- # ... (resto del codice rimane invariato)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
298
 
299
  with gr.Tab("Passo 4 - Chatbot"):
300
  chatbot = gr.Chatbot(height=300)
@@ -344,4 +376,3 @@ def demo():
344
 
345
  if __name__ == "__main__":
346
  demo()
347
-
 
294
  <br><b>Avviso:</b> Questo spazio utilizza l'hardware di base CPU gratuito da Hugging Face. Alcuni passaggi e modelli LLM usati qui sotto (endpoint di inferenza gratuiti) possono richiedere del tempo per generare una risposta.
295
  """)
296
 
297
+ with gr.Tab("Step 1 - Carica PDFs"):
298
+ with gr.Row():
299
+ document = gr.Files(height=100, file_count="multiple", file_types=["pdf"], interactive=True, label="Upload your PDF documents (single or multiple)")
300
+ # upload_btn = gr.UploadButton("Loading document...", height=100, file_count="multiple", file_types=["pdf"], scale=1)
301
+
302
+ with gr.Tab("Step 2 - Processa i documenti"):
303
+ with gr.Row():
304
+ db_btn = gr.Radio(["ChromaDB"], label="Vector database type", value = "ChromaDB", type="index", info="Choose your vector database")
305
+ with gr.Accordion("Advanced options - Document text splitter", open=False):
306
+ with gr.Row():
307
+ slider_chunk_size = gr.Slider(minimum = 100, maximum = 1000, value=600, step=20, label="Chunk size", info="Chunk size", interactive=True)
308
+ with gr.Row():
309
+ slider_chunk_overlap = gr.Slider(minimum = 10, maximum = 200, value=40, step=10, label="Chunk overlap", info="Chunk overlap", interactive=True)
310
+ with gr.Row():
311
+ db_progress = gr.Textbox(label="Vector database initialization", value="None")
312
+ with gr.Row():
313
+ db_btn = gr.Button("Generate vector database")
314
+
315
+ with gr.Tab("Step 3 - Initializia QA chain"):
316
+ with gr.Row():
317
+ llm_btn = gr.Radio(list_llm_simple, \
318
+ label="LLM models", value = list_llm_simple[0], type="index", info="Choose your LLM model")
319
+ with gr.Accordion("Advanced options - LLM model", open=False):
320
+ with gr.Row():
321
+ slider_temperature = gr.Slider(minimum = 0.01, maximum = 1.0, value=0.7, step=0.1, label="Temperature", info="Model temperature", interactive=True)
322
+ with gr.Row():
323
+ slider_maxtokens = gr.Slider(minimum = 224, maximum = 4096, value=1024, step=32, label="Max Tokens", info="Model max tokens", interactive=True)
324
+ with gr.Row():
325
+ slider_topk = gr.Slider(minimum = 1, maximum = 10, value=3, step=1, label="top-k samples", info="Model top-k samples", interactive=True)
326
+ with gr.Row():
327
+ llm_progress = gr.Textbox(value="None",label="QA chain initialization")
328
+ with gr.Row():
329
+ qachain_btn = gr.Button("Initialize Question Answering chain")
330
 
331
  with gr.Tab("Passo 4 - Chatbot"):
332
  chatbot = gr.Chatbot(height=300)
 
376
 
377
  if __name__ == "__main__":
378
  demo()