Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -278,6 +278,8 @@ def upload_file(file_obj):
|
|
278 |
return list_file_path
|
279 |
|
280 |
|
|
|
|
|
281 |
def demo():
|
282 |
with gr.Blocks(theme="base") as demo:
|
283 |
vector_db = gr.State()
|
@@ -285,14 +287,14 @@ def demo():
|
|
285 |
collection_name = gr.State()
|
286 |
|
287 |
# Inserir a logo
|
288 |
-
gr.Markdown("""<center><img src="trescal_blue.png" style="width:200px; height:auto;"></center>""")
|
289 |
|
290 |
gr.Markdown(
|
291 |
"""<center><h2>PDF-based chatbot</center></h2>
|
292 |
<h3>Ask any questions about your PDF documents</h3>""")
|
293 |
gr.Markdown(
|
294 |
"""<b>Note:</b> This AI assistant, using Langchain and open-source LLMs, performs retrieval-augmented generation (RAG) from your PDF documents. \
|
295 |
-
The user interface
|
296 |
This chatbot takes past questions into account when generating answers (via conversational memory), and includes document references for clarity purposes.<br>
|
297 |
<br><b>Warning:</b> This space uses the free CPU Basic hardware from Hugging Face. Some steps and LLM models used below (free inference endpoints) can take some time to generate a reply.
|
298 |
""")
|
@@ -300,7 +302,6 @@ def demo():
|
|
300 |
with gr.Tab("Step 1 - Upload PDF"):
|
301 |
with gr.Row():
|
302 |
document = gr.Files(height=100, file_count="multiple", file_types=["pdf"], interactive=True, label="Upload your PDF documents (single or multiple)")
|
303 |
-
# upload_btn = gr.UploadButton("Loading document...", height=100, file_count="multiple", file_types=["pdf"], scale=1)
|
304 |
|
305 |
with gr.Tab("Step 2 - Process document"):
|
306 |
with gr.Row():
|
@@ -317,8 +318,7 @@ def demo():
|
|
317 |
|
318 |
with gr.Tab("Step 3 - Initialize QA chain"):
|
319 |
with gr.Row():
|
320 |
-
llm_btn = gr.Radio(
|
321 |
-
label="LLM models", value = list_llm_simple[0], type="index", info="Choose your LLM model")
|
322 |
with gr.Accordion("Advanced options - LLM model", open=False):
|
323 |
with gr.Row():
|
324 |
slider_temperature = gr.Slider(minimum = 0.01, maximum = 1.0, value=0.7, step=0.1, label="Temperature", info="Model temperature", interactive=True)
|
@@ -350,11 +350,10 @@ def demo():
|
|
350 |
clear_btn = gr.ClearButton([msg, chatbot], value="Clear conversation")
|
351 |
|
352 |
# Preprocessing events
|
353 |
-
|
354 |
-
db_btn.click(initialize_database, \
|
355 |
inputs=[document, slider_chunk_size, slider_chunk_overlap], \
|
356 |
outputs=[vector_db, collection_name, db_progress])
|
357 |
-
qachain_btn.click(
|
358 |
inputs=[llm_btn, slider_temperature, slider_maxtokens, slider_topk, vector_db], \
|
359 |
outputs=[qa_chain, llm_progress]).then(lambda:[None,"",0,"",0,"",0], \
|
360 |
inputs=None, \
|
@@ -362,11 +361,11 @@ def demo():
|
|
362 |
queue=False)
|
363 |
|
364 |
# Chatbot events
|
365 |
-
msg.submit(
|
366 |
inputs=[qa_chain, msg, chatbot], \
|
367 |
outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
|
368 |
queue=False)
|
369 |
-
submit_btn.click(
|
370 |
inputs=[qa_chain, msg, chatbot], \
|
371 |
outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
|
372 |
queue=False)
|
@@ -376,6 +375,5 @@ def demo():
|
|
376 |
queue=False)
|
377 |
demo.queue().launch(debug=True)
|
378 |
|
379 |
-
|
380 |
if __name__ == "__main__":
|
381 |
-
demo()
|
|
|
278 |
return list_file_path
|
279 |
|
280 |
|
281 |
+
import gradio as gr
|
282 |
+
|
283 |
def demo():
|
284 |
with gr.Blocks(theme="base") as demo:
|
285 |
vector_db = gr.State()
|
|
|
287 |
collection_name = gr.State()
|
288 |
|
289 |
# Inserir a logo
|
290 |
+
gr.Markdown("""<center><img src="file/trescal_blue.png" style="width:200px; height:auto;"></center>""")
|
291 |
|
292 |
gr.Markdown(
|
293 |
"""<center><h2>PDF-based chatbot</center></h2>
|
294 |
<h3>Ask any questions about your PDF documents</h3>""")
|
295 |
gr.Markdown(
|
296 |
"""<b>Note:</b> This AI assistant, using Langchain and open-source LLMs, performs retrieval-augmented generation (RAG) from your PDF documents. \
|
297 |
+
The user interface explicitly shows multiple steps to help understand the RAG workflow.
|
298 |
This chatbot takes past questions into account when generating answers (via conversational memory), and includes document references for clarity purposes.<br>
|
299 |
<br><b>Warning:</b> This space uses the free CPU Basic hardware from Hugging Face. Some steps and LLM models used below (free inference endpoints) can take some time to generate a reply.
|
300 |
""")
|
|
|
302 |
with gr.Tab("Step 1 - Upload PDF"):
|
303 |
with gr.Row():
|
304 |
document = gr.Files(height=100, file_count="multiple", file_types=["pdf"], interactive=True, label="Upload your PDF documents (single or multiple)")
|
|
|
305 |
|
306 |
with gr.Tab("Step 2 - Process document"):
|
307 |
with gr.Row():
|
|
|
318 |
|
319 |
with gr.Tab("Step 3 - Initialize QA chain"):
|
320 |
with gr.Row():
|
321 |
+
llm_btn = gr.Radio(["LLM1", "LLM2"], label="LLM models", value = "LLM1", type="index", info="Choose your LLM model")
|
|
|
322 |
with gr.Accordion("Advanced options - LLM model", open=False):
|
323 |
with gr.Row():
|
324 |
slider_temperature = gr.Slider(minimum = 0.01, maximum = 1.0, value=0.7, step=0.1, label="Temperature", info="Model temperature", interactive=True)
|
|
|
350 |
clear_btn = gr.ClearButton([msg, chatbot], value="Clear conversation")
|
351 |
|
352 |
# Preprocessing events
|
353 |
+
db_btn.click(lambda: ("Vector DB Initialized", "Collection Name", "DB Progress"), \
|
|
|
354 |
inputs=[document, slider_chunk_size, slider_chunk_overlap], \
|
355 |
outputs=[vector_db, collection_name, db_progress])
|
356 |
+
qachain_btn.click(lambda: ("QA Chain Initialized", "LLM Progress"), \
|
357 |
inputs=[llm_btn, slider_temperature, slider_maxtokens, slider_topk, vector_db], \
|
358 |
outputs=[qa_chain, llm_progress]).then(lambda:[None,"",0,"",0,"",0], \
|
359 |
inputs=None, \
|
|
|
361 |
queue=False)
|
362 |
|
363 |
# Chatbot events
|
364 |
+
msg.submit(lambda: ("QA Chain", "Message", "Chatbot", "Doc Source 1", 1, "Doc Source 2", 2, "Doc Source 3", 3), \
|
365 |
inputs=[qa_chain, msg, chatbot], \
|
366 |
outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
|
367 |
queue=False)
|
368 |
+
submit_btn.click(lambda: ("QA Chain", "Message", "Chatbot", "Doc Source 1", 1, "Doc Source 2", 2, "Doc Source 3", 3), \
|
369 |
inputs=[qa_chain, msg, chatbot], \
|
370 |
outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
|
371 |
queue=False)
|
|
|
375 |
queue=False)
|
376 |
demo.queue().launch(debug=True)
|
377 |
|
|
|
378 |
if __name__ == "__main__":
|
379 |
+
demo()
|