DHEIVER commited on
Commit
41abea9
·
verified ·
1 Parent(s): dd55635

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +106 -75
app.py CHANGED
@@ -218,8 +218,18 @@ body {
218
 
219
  # Interface Gradio
220
  # Interface Gradio
 
 
 
 
 
 
 
 
 
 
 
221
  def demo():
222
- # CSS personalizado com a faixa de cobertura
223
  custom_css = """
224
  #banner {
225
  display: none !important;
@@ -227,75 +237,32 @@ def demo():
227
  footer {
228
  display: none !important;
229
  }
230
- /* Estilo do botão de controle */
231
- #toggle-bar-btn {
232
- position: fixed;
233
- top: 10px;
234
- right: 10px;
235
- z-index: 10000;
236
- background: #2563eb;
237
- color: white;
238
- border: none;
239
- padding: 8px 16px;
240
- border-radius: 4px;
241
- cursor: pointer;
242
- font-size: 14px;
243
- font-weight: 500;
244
- transition: background-color 0.2s;
245
- }
246
- #toggle-bar-btn:hover {
247
- background: #1d4ed8;
248
- }
249
- /* Estilo da faixa de cobertura */
250
- #cover-bar {
251
- display: none;
252
- position: fixed;
253
- top: 0;
254
- left: 0;
255
- right: 0;
256
- height: 64px;
257
- background: white;
258
- z-index: 9999;
259
- box-shadow: 0 1px 3px rgba(0,0,0,0.1);
260
- transition: all 0.3s ease;
261
- }
262
- /* Classe para mostrar a faixa */
263
- #cover-bar.visible {
264
- display: block;
265
- }
266
  /* Ajustes do container principal */
267
  .gradio-container {
268
  margin-top: 0 !important;
269
  }
270
- """
271
-
272
- # HTML para o botão e a faixa
273
- cover_bar_html = """
274
- <div id="cover-bar"></div>
275
- <button id="toggle-bar-btn" onclick="toggleCoverBar()">Ocultar Barra</button>
276
- <script>
277
- function toggleCoverBar() {
278
- const coverBar = document.getElementById('cover-bar');
279
- const btn = document.getElementById('toggle-bar-btn');
280
- if (coverBar.classList.contains('visible')) {
281
- coverBar.classList.remove('visible');
282
- btn.textContent = 'Ocultar Barra';
283
- } else {
284
- coverBar.classList.add('visible');
285
- btn.textContent = 'Mostrar Barra';
286
- }
287
- }
288
- </script>
289
  """
290
 
291
  with gr.Blocks(theme=gr.themes.Default(primary_hue="blue", secondary_hue="gray"), css=custom_css) as demo:
292
- # Adiciona o HTML do botão e faixa de cobertura
293
- gr.HTML(cover_bar_html)
294
-
 
 
 
 
295
  vector_db = gr.State()
296
  qa_chain = gr.State()
297
  collection_name = gr.State()
298
 
 
299
  gr.Markdown(
300
  """<center><h2>Chatbot baseado em PDF</center></h2>
301
  <h3>Faça perguntas sobre seus documentos PDF</h3>"""
@@ -308,38 +275,50 @@ def demo():
308
  """
309
  )
310
 
 
311
  with gr.Tab("Passo 1 - Carregar PDF"):
312
  with gr.Row():
313
- document = gr.Files(height=100, file_count="multiple", file_types=["pdf"], interactive=True, label="Carregue seus documentos PDF (único ou múltiplos)")
 
314
 
 
315
  with gr.Tab("Passo 2 - Processar documento"):
316
  with gr.Row():
317
- db_btn = gr.Radio(["ChromaDB"], label="Tipo de banco de dados vetorial", value="ChromaDB", type="index", info="Escolha seu banco de dados vetorial")
 
318
  with gr.Accordion("Opções avançadas - Divisor de texto do documento", open=False):
319
  with gr.Row():
320
- slider_chunk_size = gr.Slider(minimum=100, maximum=1000, value=600, step=20, label="Tamanho do chunk", info="Tamanho do chunk", interactive=True)
 
321
  with gr.Row():
322
- slider_chunk_overlap = gr.Slider(minimum=10, maximum=200, value=40, step=10, label="Sobreposição do chunk", info="Sobreposição do chunk", interactive=True)
 
323
  with gr.Row():
324
  db_progress = gr.Textbox(label="Inicialização do banco de dados vetorial", value="Nenhum")
325
  with gr.Row():
326
- db_btn = gr.Button("Gerar banco de dados vetorial")
327
 
 
328
  with gr.Tab("Passo 3 - Inicializar cadeia de QA"):
329
  with gr.Row():
330
- llm_btn = gr.Radio(list_llm_simple, label="Modelos LLM", value=list_llm_simple[0], type="index", info="Escolha seu modelo LLM")
 
331
  with gr.Accordion("Opções avançadas - Modelo LLM", open=False):
332
  with gr.Row():
333
- slider_temperature = gr.Slider(minimum=0.01, maximum=1.0, value=0.7, step=0.1, label="Temperatura", info="Temperatura do modelo", interactive=True)
 
334
  with gr.Row():
335
- slider_maxtokens = gr.Slider(minimum=224, maximum=4096, value=1024, step=32, label="Máximo de Tokens", info="Máximo de tokens do modelo", interactive=True)
 
336
  with gr.Row():
337
- slider_topk = gr.Slider(minimum=1, maximum=10, value=3, step=1, label="Amostras top-k", info="Amostras top-k do modelo", interactive=True)
 
338
  with gr.Row():
339
  llm_progress = gr.Textbox(value="Nenhum", label="Inicialização da cadeia de QA")
340
  with gr.Row():
341
- qachain_btn = gr.Button("Inicializar cadeia de Perguntas e Respostas")
342
 
 
343
  with gr.Tab("Passo 4 - Chatbot"):
344
  chatbot = gr.Chatbot(height=300)
345
  with gr.Accordion("Avançado - Referências de documentos", open=False):
@@ -358,15 +337,67 @@ def demo():
358
  submit_btn = gr.Button("Enviar mensagem")
359
  clear_btn = gr.ClearButton([msg, chatbot], value="Limpar conversa")
360
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
361
  # Eventos de pré-processamento
362
- db_btn.click(initialize_database, inputs=[document, slider_chunk_size, slider_chunk_overlap], outputs=[vector_db, collection_name, db_progress])
363
- qachain_btn.click(initialize_LLM, inputs=[llm_btn, slider_temperature, slider_maxtokens, slider_topk, vector_db], outputs=[qa_chain, llm_progress]).then(
364
- lambda: [None, "", 0, "", 0, "", 0], inputs=None, outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], queue=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
365
 
366
  # Eventos do chatbot
367
- msg.submit(conversation, inputs=[qa_chain, msg, chatbot], outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], queue=False)
368
- submit_btn.click(conversation, inputs=[qa_chain, msg, chatbot], outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], queue=False)
369
- clear_btn.click(lambda: [None, "", 0, "", 0, "", 0], inputs=None, outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], queue=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
370
 
371
  demo.queue().launch(debug=True)
372
 
 
218
 
219
  # Interface Gradio
220
  # Interface Gradio
221
+ def toggle_cover():
222
+ cover_html = """
223
+ <div style="position: fixed; top: 0; left: 0; right: 0; height: 64px;
224
+ background: white; z-index: 9999; box-shadow: 0 1px 3px rgba(0,0,0,0.1);">
225
+ </div>
226
+ """
227
+ return gr.update(value=cover_html), gr.update(value="Mostrar Barra")
228
+
229
+ def remove_cover():
230
+ return gr.update(value=""), gr.update(value="Ocultar Barra")
231
+
232
  def demo():
 
233
  custom_css = """
234
  #banner {
235
  display: none !important;
 
237
  footer {
238
  display: none !important;
239
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
240
  /* Ajustes do container principal */
241
  .gradio-container {
242
  margin-top: 0 !important;
243
  }
244
+ /* Estilo do botão */
245
+ .cover-button {
246
+ position: fixed !important;
247
+ top: 10px !important;
248
+ right: 10px !important;
249
+ z-index: 10000 !important;
250
+ }
 
 
 
 
 
 
 
 
 
 
 
 
251
  """
252
 
253
  with gr.Blocks(theme=gr.themes.Default(primary_hue="blue", secondary_hue="gray"), css=custom_css) as demo:
254
+ # Componentes para controle da barra
255
+ with gr.Row():
256
+ cover_html = gr.HTML("")
257
+ toggle_btn = gr.Button("Ocultar Barra", elem_classes="cover-button")
258
+ is_covered = gr.State(False)
259
+
260
+ # Estados do sistema
261
  vector_db = gr.State()
262
  qa_chain = gr.State()
263
  collection_name = gr.State()
264
 
265
+ # Cabeçalho
266
  gr.Markdown(
267
  """<center><h2>Chatbot baseado em PDF</center></h2>
268
  <h3>Faça perguntas sobre seus documentos PDF</h3>"""
 
275
  """
276
  )
277
 
278
+ # Passo 1 - Upload do PDF
279
  with gr.Tab("Passo 1 - Carregar PDF"):
280
  with gr.Row():
281
+ document = gr.Files(height=100, file_count="multiple", file_types=["pdf"], interactive=True,
282
+ label="Carregue seus documentos PDF (único ou múltiplos)")
283
 
284
+ # Passo 2 - Processamento do documento
285
  with gr.Tab("Passo 2 - Processar documento"):
286
  with gr.Row():
287
+ db_btn = gr.Radio(["ChromaDB"], label="Tipo de banco de dados vetorial", value="ChromaDB",
288
+ type="index", info="Escolha seu banco de dados vetorial")
289
  with gr.Accordion("Opções avançadas - Divisor de texto do documento", open=False):
290
  with gr.Row():
291
+ slider_chunk_size = gr.Slider(minimum=100, maximum=1000, value=600, step=20,
292
+ label="Tamanho do chunk", info="Tamanho do chunk", interactive=True)
293
  with gr.Row():
294
+ slider_chunk_overlap = gr.Slider(minimum=10, maximum=200, value=40, step=10,
295
+ label="Sobreposição do chunk", info="Sobreposição do chunk", interactive=True)
296
  with gr.Row():
297
  db_progress = gr.Textbox(label="Inicialização do banco de dados vetorial", value="Nenhum")
298
  with gr.Row():
299
+ process_btn = gr.Button("Gerar banco de dados vetorial")
300
 
301
+ # Passo 3 - Configuração da cadeia QA
302
  with gr.Tab("Passo 3 - Inicializar cadeia de QA"):
303
  with gr.Row():
304
+ llm_btn = gr.Radio(list_llm_simple, label="Modelos LLM", value=list_llm_simple[0],
305
+ type="index", info="Escolha seu modelo LLM")
306
  with gr.Accordion("Opções avançadas - Modelo LLM", open=False):
307
  with gr.Row():
308
+ slider_temperature = gr.Slider(minimum=0.01, maximum=1.0, value=0.7, step=0.1,
309
+ label="Temperatura", info="Temperatura do modelo", interactive=True)
310
  with gr.Row():
311
+ slider_maxtokens = gr.Slider(minimum=224, maximum=4096, value=1024, step=32,
312
+ label="Máximo de Tokens", info="Máximo de tokens do modelo", interactive=True)
313
  with gr.Row():
314
+ slider_topk = gr.Slider(minimum=1, maximum=10, value=3, step=1,
315
+ label="Amostras top-k", info="Amostras top-k do modelo", interactive=True)
316
  with gr.Row():
317
  llm_progress = gr.Textbox(value="Nenhum", label="Inicialização da cadeia de QA")
318
  with gr.Row():
319
+ qa_init_btn = gr.Button("Inicializar cadeia de Perguntas e Respostas")
320
 
321
+ # Passo 4 - Interface do Chatbot
322
  with gr.Tab("Passo 4 - Chatbot"):
323
  chatbot = gr.Chatbot(height=300)
324
  with gr.Accordion("Avançado - Referências de documentos", open=False):
 
337
  submit_btn = gr.Button("Enviar mensagem")
338
  clear_btn = gr.ClearButton([msg, chatbot], value="Limpar conversa")
339
 
340
+ # Eventos do sistema
341
+ toggle_btn.click(
342
+ fn=toggle_cover,
343
+ inputs=[],
344
+ outputs=[cover_html, toggle_btn],
345
+ show_progress=False
346
+ ).then(
347
+ fn=lambda: True,
348
+ outputs=[is_covered]
349
+ )
350
+
351
+ toggle_btn.click(
352
+ fn=remove_cover,
353
+ inputs=[],
354
+ outputs=[cover_html, toggle_btn],
355
+ show_progress=False,
356
+ trigger_mode=lambda s: s
357
+ ).then(
358
+ fn=lambda: False,
359
+ outputs=[is_covered]
360
+ )
361
+
362
  # Eventos de pré-processamento
363
+ process_btn.click(
364
+ initialize_database,
365
+ inputs=[document, slider_chunk_size, slider_chunk_overlap],
366
+ outputs=[vector_db, collection_name, db_progress]
367
+ )
368
+
369
+ qa_init_btn.click(
370
+ initialize_LLM,
371
+ inputs=[llm_btn, slider_temperature, slider_maxtokens, slider_topk, vector_db],
372
+ outputs=[qa_chain, llm_progress]
373
+ ).then(
374
+ lambda: [None, "", 0, "", 0, "", 0],
375
+ inputs=None,
376
+ outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page],
377
+ queue=False
378
+ )
379
 
380
  # Eventos do chatbot
381
+ msg.submit(
382
+ conversation,
383
+ inputs=[qa_chain, msg, chatbot],
384
+ outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page],
385
+ queue=False
386
+ )
387
+
388
+ submit_btn.click(
389
+ conversation,
390
+ inputs=[qa_chain, msg, chatbot],
391
+ outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page],
392
+ queue=False
393
+ )
394
+
395
+ clear_btn.click(
396
+ lambda: [None, "", 0, "", 0, "", 0],
397
+ inputs=None,
398
+ outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page],
399
+ queue=False
400
+ )
401
 
402
  demo.queue().launch(debug=True)
403