DHEIVER commited on
Commit
a0d7f95
·
verified ·
1 Parent(s): ca17588

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +88 -110
app.py CHANGED
@@ -1,60 +1,53 @@
1
  import gradio as gr
2
  import os
3
-
4
  from langchain_community.document_loaders import PyPDFLoader
5
  from langchain.text_splitter import RecursiveCharacterTextSplitter
6
  from langchain_community.vectorstores import Chroma
7
  from langchain.chains import ConversationalRetrievalChain
8
- from langchain_community.embeddings import HuggingFaceEmbeddings
9
- from langchain_community.llms import HuggingFacePipeline
10
- from langchain.chains import ConversationChain
11
- from langchain.memory import ConversationBufferMemory
12
  from langchain_community.llms import HuggingFaceEndpoint
13
-
14
  from pathlib import Path
15
  import chromadb
16
  from unidecode import unidecode
17
-
18
- from transformers import AutoTokenizer
19
- import transformers
20
- import torch
21
- import tqdm
22
- import accelerate
23
  import re
24
 
25
- # default_persist_directory = './chroma_HF/'
26
  list_llm = [
27
- "mistralai/Mistral-7B-Instruct-v0.2",
28
- "mistralai/Mixtral-8x7B-Instruct-v0.1",
29
- "mistralai/Mistral-7B-Instruct-v0.1",
30
- "google/gemma-7b-it",
31
- "google/gemma-2b-it",
32
- "HuggingFaceH4/zephyr-7b-beta",
33
- "HuggingFaceH4/zephyr-7b-gemma-v0.1",
34
- "meta-llama/Llama-2-7b-chat-hf",
35
- "microsoft/phi-2",
36
- "TinyLlama/TinyLlama-1.1B-Chat-v1.0",
37
- "mosaicml/mpt-7b-instruct",
38
- "tiiuae/falcon-7b-instruct",
39
- "google/flan-t5-xxl"]
 
40
  list_llm_simple = [os.path.basename(llm) for llm in list_llm]
41
 
42
- # Load PDF document and create doc splits
43
  def load_doc(list_file_path, chunk_size, chunk_overlap):
44
  loaders = [PyPDFLoader(x) for x in list_file_path]
45
  pages = []
46
  for loader in loaders:
47
  pages.extend(loader.load())
48
  text_splitter = RecursiveCharacterTextSplitter(
49
- chunk_size = chunk_size,
50
- chunk_overlap = chunk_overlap)
 
51
  doc_splits = text_splitter.split_documents(pages)
52
  return doc_splits
53
 
54
- # Create vector database
55
  def create_db(splits, collection_name):
56
  embedding = HuggingFaceEmbeddings()
57
- new_client = chromadb.EphemeralClient()
 
58
  vectordb = Chroma.from_documents(
59
  documents=splits,
60
  embedding=embedding,
@@ -63,65 +56,58 @@ def create_db(splits, collection_name):
63
  )
64
  return vectordb
65
 
66
- # Load vector database
67
- def load_db():
68
- embedding = HuggingFaceEmbeddings()
69
- vectordb = Chroma(
70
- embedding_function=embedding)
71
- return vectordb
72
-
73
- # Initialize langchain LLM chain
74
  def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
75
  progress(0.1, desc="Inicializando tokenizer da HF...")
76
  progress(0.5, desc="Inicializando Hub da HF...")
77
  if llm_model == "mistralai/Mixtral-8x7B-Instruct-v0.1":
78
  llm = HuggingFaceEndpoint(
79
- repo_id=llm_model,
80
- temperature = temperature,
81
- max_new_tokens = max_tokens,
82
- top_k = top_k,
83
- load_in_8bit = True,
84
  )
85
- elif llm_model in ["HuggingFaceH4/zephyr-7b-gemma-v0.1","mosaicml/mpt-7b-instruct"]:
86
  raise gr.Error("O modelo LLM é muito grande para ser carregado automaticamente no endpoint de inferência gratuito")
87
  elif llm_model == "microsoft/phi-2":
88
  llm = HuggingFaceEndpoint(
89
- repo_id=llm_model,
90
- temperature = temperature,
91
- max_new_tokens = max_tokens,
92
- top_k = top_k,
93
- trust_remote_code = True,
94
- torch_dtype = "auto",
95
  )
96
  elif llm_model == "TinyLlama/TinyLlama-1.1B-Chat-v1.0":
97
  llm = HuggingFaceEndpoint(
98
- repo_id=llm_model,
99
- temperature = temperature,
100
- max_new_tokens = 250,
101
- top_k = top_k,
102
  )
103
  elif llm_model == "meta-llama/Llama-2-7b-chat-hf":
104
  raise gr.Error("O modelo Llama-2-7b-chat-hf requer uma assinatura Pro...")
105
  else:
106
  llm = HuggingFaceEndpoint(
107
- repo_id=llm_model,
108
- temperature = temperature,
109
- max_new_tokens = max_tokens,
110
- top_k = top_k,
111
  )
112
-
113
  progress(0.75, desc="Definindo memória de buffer...")
114
  memory = ConversationBufferMemory(
115
  memory_key="chat_history",
116
  output_key='answer',
117
  return_messages=True
118
  )
119
- retriever=vector_db.as_retriever()
120
  progress(0.8, desc="Definindo cadeia de recuperação...")
121
  qa_chain = ConversationalRetrievalChain.from_llm(
122
  llm,
123
  retriever=retriever,
124
- chain_type="stuff",
125
  memory=memory,
126
  return_source_documents=True,
127
  verbose=False,
@@ -129,10 +115,10 @@ def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db, pr
129
  progress(0.9, desc="Concluído!")
130
  return qa_chain
131
 
132
- # Generate collection name for vector database
133
  def create_collection_name(filepath):
134
  collection_name = Path(filepath).stem
135
- collection_name = collection_name.replace(" ","-")
136
  collection_name = unidecode(collection_name)
137
  collection_name = re.sub('[^A-Za-z0-9]+', '-', collection_name)
138
  collection_name = collection_name[:50]
@@ -146,7 +132,7 @@ def create_collection_name(filepath):
146
  print('Nome da coleção: ', collection_name)
147
  return collection_name
148
 
149
- # Initialize database
150
  def initialize_database(list_file_obj, chunk_size, chunk_overlap, progress=gr.Progress()):
151
  list_file_path = [x.name for x in list_file_obj if x is not None]
152
  progress(0.1, desc="Criando nome da coleção...")
@@ -158,12 +144,14 @@ def initialize_database(list_file_obj, chunk_size, chunk_overlap, progress=gr.Pr
158
  progress(0.9, desc="Concluído!")
159
  return vector_db, collection_name, "Completo!"
160
 
 
161
  def initialize_LLM(llm_option, llm_temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
162
  llm_name = list_llm[llm_option]
163
- print("Nome do LLM: ",llm_name)
164
  qa_chain = initialize_llmchain(llm_name, llm_temperature, max_tokens, top_k, vector_db, progress)
165
  return qa_chain, "Completo!"
166
 
 
167
  def format_chat_history(message, chat_history):
168
  formatted_chat_history = []
169
  for user_message, bot_message in chat_history:
@@ -171,6 +159,7 @@ def format_chat_history(message, chat_history):
171
  formatted_chat_history.append(f"Assistente: {bot_message}")
172
  return formatted_chat_history
173
 
 
174
  def conversation(qa_chain, message, history):
175
  formatted_chat_history = format_chat_history(message, history)
176
  response = qa_chain({"question": message, "chat_history": formatted_chat_history})
@@ -187,6 +176,7 @@ def conversation(qa_chain, message, history):
187
  new_history = history + [(message, response_answer)]
188
  return qa_chain, gr.update(value=""), new_history, response_source1, response_source1_page, response_source2, response_source2_page, response_source3, response_source3_page
189
 
 
190
  def upload_file(file_obj):
191
  list_file_path = []
192
  for idx, file in enumerate(file_obj):
@@ -194,52 +184,54 @@ def upload_file(file_obj):
194
  list_file_path.append(file_path)
195
  return list_file_path
196
 
 
197
  def demo():
198
  with gr.Blocks(theme=gr.themes.Default(primary_hue="blue", secondary_hue="gray")) as demo:
199
  vector_db = gr.State()
200
  qa_chain = gr.State()
201
  collection_name = gr.State()
202
-
203
  gr.Markdown(
204
- """<center><h2>Chatbot baseado em PDF</center></h2>
205
- <h3>Faça perguntas sobre seus documentos PDF</h3>""")
 
206
  gr.Markdown(
207
- """<b>Nota:</b> Este assistente AI, usando Langchain e LLMs de código aberto, realiza geração aumentada por recuperação (RAG) a partir de seus documentos PDF. \
208
- A interface do usuário explicitamente mostra múltiplos passos para ajudar a entender o fluxo de trabalho do RAG.
209
- Este chatbot leva em consideração perguntas anteriores ao gerar respostas (via memória conversacional) e inclui referências de documentos para maior clareza.<br>
210
- <br><b>Aviso:</b> Este espaço usa o hardware básico gratuito da Hugging Face. Alguns passos e modelos LLM usados abaixo (endpoints de inferência gratuitos) podem levar algum tempo para gerar uma resposta.
211
- """)
212
-
 
213
  with gr.Tab("Passo 1 - Carregar PDF"):
214
  with gr.Row():
215
  document = gr.Files(height=100, file_count="multiple", file_types=["pdf"], interactive=True, label="Carregue seus documentos PDF (único ou múltiplos)")
216
-
217
  with gr.Tab("Passo 2 - Processar documento"):
218
  with gr.Row():
219
- db_btn = gr.Radio(["ChromaDB"], label="Tipo de banco de dados vetorial", value = "ChromaDB", type="index", info="Escolha seu banco de dados vetorial")
220
  with gr.Accordion("Opções avançadas - Divisor de texto do documento", open=False):
221
  with gr.Row():
222
- slider_chunk_size = gr.Slider(minimum = 100, maximum = 1000, value=600, step=20, label="Tamanho do chunk", info="Tamanho do chunk", interactive=True)
223
  with gr.Row():
224
- slider_chunk_overlap = gr.Slider(minimum = 10, maximum = 200, value=40, step=10, label="Sobreposição do chunk", info="Sobreposição do chunk", interactive=True)
225
  with gr.Row():
226
  db_progress = gr.Textbox(label="Inicialização do banco de dados vetorial", value="Nenhum")
227
  with gr.Row():
228
  db_btn = gr.Button("Gerar banco de dados vetorial")
229
-
230
  with gr.Tab("Passo 3 - Inicializar cadeia de QA"):
231
  with gr.Row():
232
- llm_btn = gr.Radio(list_llm_simple, \
233
- label="Modelos LLM", value = list_llm_simple[0], type="index", info="Escolha seu modelo LLM")
234
  with gr.Accordion("Opções avançadas - Modelo LLM", open=False):
235
  with gr.Row():
236
- slider_temperature = gr.Slider(minimum = 0.01, maximum = 1.0, value=0.7, step=0.1, label="Temperatura", info="Temperatura do modelo", interactive=True)
237
  with gr.Row():
238
- slider_maxtokens = gr.Slider(minimum = 224, maximum = 4096, value=1024, step=32, label="Máximo de Tokens", info="Máximo de tokens do modelo", interactive=True)
239
  with gr.Row():
240
- slider_topk = gr.Slider(minimum = 1, maximum = 10, value=3, step=1, label="Amostras top-k", info="Amostras top-k do modelo", interactive=True)
241
  with gr.Row():
242
- llm_progress = gr.Textbox(value="Nenhum",label="Inicialização da cadeia de QA")
243
  with gr.Row():
244
  qachain_btn = gr.Button("Inicializar cadeia de Perguntas e Respostas")
245
 
@@ -260,31 +252,17 @@ def demo():
260
  with gr.Row():
261
  submit_btn = gr.Button("Enviar mensagem")
262
  clear_btn = gr.ClearButton([msg, chatbot], value="Limpar conversa")
263
-
264
- # Preprocessing events
265
- db_btn.click(initialize_database, \
266
- inputs=[document, slider_chunk_size, slider_chunk_overlap], \
267
- outputs=[vector_db, collection_name, db_progress])
268
- qachain_btn.click(initialize_LLM, \
269
- inputs=[llm_btn, slider_temperature, slider_maxtokens, slider_topk, vector_db], \
270
- outputs=[qa_chain, llm_progress]).then(lambda:[None,"",0,"",0,"",0], \
271
- inputs=None, \
272
- outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
273
- queue=False)
274
 
275
- # Chatbot events
276
- msg.submit(conversation, \
277
- inputs=[qa_chain, msg, chatbot], \
278
- outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
279
- queue=False)
280
- submit_btn.click(conversation, \
281
- inputs=[qa_chain, msg, chatbot], \
282
- outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
283
- queue=False)
284
- clear_btn.click(lambda:[None,"",0,"",0,"",0], \
285
- inputs=None, \
286
- outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], \
287
- queue=False)
288
  demo.queue().launch(debug=True)
289
 
290
  if __name__ == "__main__":
 
1
  import gradio as gr
2
  import os
 
3
  from langchain_community.document_loaders import PyPDFLoader
4
  from langchain.text_splitter import RecursiveCharacterTextSplitter
5
  from langchain_community.vectorstores import Chroma
6
  from langchain.chains import ConversationalRetrievalChain
7
+ from langchain_community.embeddings import HuggingFaceEmbeddings
 
 
 
8
  from langchain_community.llms import HuggingFaceEndpoint
9
+ from langchain.memory import ConversationBufferMemory
10
  from pathlib import Path
11
  import chromadb
12
  from unidecode import unidecode
 
 
 
 
 
 
13
  import re
14
 
15
+ # Lista de modelos LLM disponíveis
16
  list_llm = [
17
+ "mistralai/Mistral-7B-Instruct-v0.2",
18
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
19
+ "mistralai/Mistral-7B-Instruct-v0.1",
20
+ "google/gemma-7b-it",
21
+ "google/gemma-2b-it",
22
+ "HuggingFaceH4/zephyr-7b-beta",
23
+ "HuggingFaceH4/zephyr-7b-gemma-v0.1",
24
+ "meta-llama/Llama-2-7b-chat-hf",
25
+ "microsoft/phi-2",
26
+ "TinyLlama/TinyLlama-1.1B-Chat-v1.0",
27
+ "mosaicml/mpt-7b-instruct",
28
+ "tiiuae/falcon-7b-instruct",
29
+ "google/flan-t5-xxl"
30
+ ]
31
  list_llm_simple = [os.path.basename(llm) for llm in list_llm]
32
 
33
+ # Função para carregar documentos PDF e dividir em chunks
34
  def load_doc(list_file_path, chunk_size, chunk_overlap):
35
  loaders = [PyPDFLoader(x) for x in list_file_path]
36
  pages = []
37
  for loader in loaders:
38
  pages.extend(loader.load())
39
  text_splitter = RecursiveCharacterTextSplitter(
40
+ chunk_size=chunk_size,
41
+ chunk_overlap=chunk_overlap
42
+ )
43
  doc_splits = text_splitter.split_documents(pages)
44
  return doc_splits
45
 
46
+ # Função para criar o banco de dados vetorial
47
  def create_db(splits, collection_name):
48
  embedding = HuggingFaceEmbeddings()
49
+ # Usando PersistentClient para persistir o banco de dados
50
+ new_client = chromadb.PersistentClient(path="./chroma_db")
51
  vectordb = Chroma.from_documents(
52
  documents=splits,
53
  embedding=embedding,
 
56
  )
57
  return vectordb
58
 
59
+ # Função para inicializar a cadeia de QA com o modelo LLM
 
 
 
 
 
 
 
60
  def initialize_llmchain(llm_model, temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
61
  progress(0.1, desc="Inicializando tokenizer da HF...")
62
  progress(0.5, desc="Inicializando Hub da HF...")
63
  if llm_model == "mistralai/Mixtral-8x7B-Instruct-v0.1":
64
  llm = HuggingFaceEndpoint(
65
+ repo_id=llm_model,
66
+ temperature=temperature,
67
+ max_new_tokens=max_tokens,
68
+ top_k=top_k,
69
+ load_in_8bit=True,
70
  )
71
+ elif llm_model in ["HuggingFaceH4/zephyr-7b-gemma-v0.1", "mosaicml/mpt-7b-instruct"]:
72
  raise gr.Error("O modelo LLM é muito grande para ser carregado automaticamente no endpoint de inferência gratuito")
73
  elif llm_model == "microsoft/phi-2":
74
  llm = HuggingFaceEndpoint(
75
+ repo_id=llm_model,
76
+ temperature=temperature,
77
+ max_new_tokens=max_tokens,
78
+ top_k=top_k,
79
+ trust_remote_code=True,
80
+ torch_dtype="auto",
81
  )
82
  elif llm_model == "TinyLlama/TinyLlama-1.1B-Chat-v1.0":
83
  llm = HuggingFaceEndpoint(
84
+ repo_id=llm_model,
85
+ temperature=temperature,
86
+ max_new_tokens=250,
87
+ top_k=top_k,
88
  )
89
  elif llm_model == "meta-llama/Llama-2-7b-chat-hf":
90
  raise gr.Error("O modelo Llama-2-7b-chat-hf requer uma assinatura Pro...")
91
  else:
92
  llm = HuggingFaceEndpoint(
93
+ repo_id=llm_model,
94
+ temperature=temperature,
95
+ max_new_tokens=max_tokens,
96
+ top_k=top_k,
97
  )
98
+
99
  progress(0.75, desc="Definindo memória de buffer...")
100
  memory = ConversationBufferMemory(
101
  memory_key="chat_history",
102
  output_key='answer',
103
  return_messages=True
104
  )
105
+ retriever = vector_db.as_retriever()
106
  progress(0.8, desc="Definindo cadeia de recuperação...")
107
  qa_chain = ConversationalRetrievalChain.from_llm(
108
  llm,
109
  retriever=retriever,
110
+ chain_type="stuff",
111
  memory=memory,
112
  return_source_documents=True,
113
  verbose=False,
 
115
  progress(0.9, desc="Concluído!")
116
  return qa_chain
117
 
118
+ # Função para gerar um nome de coleção válido
119
  def create_collection_name(filepath):
120
  collection_name = Path(filepath).stem
121
+ collection_name = collection_name.replace(" ", "-")
122
  collection_name = unidecode(collection_name)
123
  collection_name = re.sub('[^A-Za-z0-9]+', '-', collection_name)
124
  collection_name = collection_name[:50]
 
132
  print('Nome da coleção: ', collection_name)
133
  return collection_name
134
 
135
+ # Função para inicializar o banco de dados
136
  def initialize_database(list_file_obj, chunk_size, chunk_overlap, progress=gr.Progress()):
137
  list_file_path = [x.name for x in list_file_obj if x is not None]
138
  progress(0.1, desc="Criando nome da coleção...")
 
144
  progress(0.9, desc="Concluído!")
145
  return vector_db, collection_name, "Completo!"
146
 
147
+ # Função para inicializar o modelo LLM
148
  def initialize_LLM(llm_option, llm_temperature, max_tokens, top_k, vector_db, progress=gr.Progress()):
149
  llm_name = list_llm[llm_option]
150
+ print("Nome do LLM: ", llm_name)
151
  qa_chain = initialize_llmchain(llm_name, llm_temperature, max_tokens, top_k, vector_db, progress)
152
  return qa_chain, "Completo!"
153
 
154
+ # Função para formatar o histórico de conversa
155
  def format_chat_history(message, chat_history):
156
  formatted_chat_history = []
157
  for user_message, bot_message in chat_history:
 
159
  formatted_chat_history.append(f"Assistente: {bot_message}")
160
  return formatted_chat_history
161
 
162
+ # Função para realizar a conversa com o chatbot
163
  def conversation(qa_chain, message, history):
164
  formatted_chat_history = format_chat_history(message, history)
165
  response = qa_chain({"question": message, "chat_history": formatted_chat_history})
 
176
  new_history = history + [(message, response_answer)]
177
  return qa_chain, gr.update(value=""), new_history, response_source1, response_source1_page, response_source2, response_source2_page, response_source3, response_source3_page
178
 
179
+ # Função para carregar arquivos
180
  def upload_file(file_obj):
181
  list_file_path = []
182
  for idx, file in enumerate(file_obj):
 
184
  list_file_path.append(file_path)
185
  return list_file_path
186
 
187
+ # Interface Gradio
188
  def demo():
189
  with gr.Blocks(theme=gr.themes.Default(primary_hue="blue", secondary_hue="gray")) as demo:
190
  vector_db = gr.State()
191
  qa_chain = gr.State()
192
  collection_name = gr.State()
193
+
194
  gr.Markdown(
195
+ """<center><h2>Chatbot baseado em PDF</center></h2>
196
+ <h3>Faça perguntas sobre seus documentos PDF</h3>"""
197
+ )
198
  gr.Markdown(
199
+ """<b>Nota:</b> Este assistente AI, usando Langchain e LLMs de código aberto, realiza geração aumentada por recuperação (RAG) a partir de seus documentos PDF. \
200
+ A interface do usuário explicitamente mostra múltiplos passos para ajudar a entender o fluxo de trabalho do RAG.
201
+ Este chatbot leva em consideração perguntas anteriores ao gerar respostas (via memória conversacional) e inclui referências de documentos para maior clareza.<br>
202
+ <br><b>Aviso:</b> Este espaço usa o hardware básico gratuito da Hugging Face. Alguns passos e modelos LLM usados abaixo (endpoints de inferência gratuitos) podem levar algum tempo para gerar uma resposta.
203
+ """
204
+ )
205
+
206
  with gr.Tab("Passo 1 - Carregar PDF"):
207
  with gr.Row():
208
  document = gr.Files(height=100, file_count="multiple", file_types=["pdf"], interactive=True, label="Carregue seus documentos PDF (único ou múltiplos)")
209
+
210
  with gr.Tab("Passo 2 - Processar documento"):
211
  with gr.Row():
212
+ db_btn = gr.Radio(["ChromaDB"], label="Tipo de banco de dados vetorial", value="ChromaDB", type="index", info="Escolha seu banco de dados vetorial")
213
  with gr.Accordion("Opções avançadas - Divisor de texto do documento", open=False):
214
  with gr.Row():
215
+ slider_chunk_size = gr.Slider(minimum=100, maximum=1000, value=600, step=20, label="Tamanho do chunk", info="Tamanho do chunk", interactive=True)
216
  with gr.Row():
217
+ slider_chunk_overlap = gr.Slider(minimum=10, maximum=200, value=40, step=10, label="Sobreposição do chunk", info="Sobreposição do chunk", interactive=True)
218
  with gr.Row():
219
  db_progress = gr.Textbox(label="Inicialização do banco de dados vetorial", value="Nenhum")
220
  with gr.Row():
221
  db_btn = gr.Button("Gerar banco de dados vetorial")
222
+
223
  with gr.Tab("Passo 3 - Inicializar cadeia de QA"):
224
  with gr.Row():
225
+ llm_btn = gr.Radio(list_llm_simple, label="Modelos LLM", value=list_llm_simple[0], type="index", info="Escolha seu modelo LLM")
 
226
  with gr.Accordion("Opções avançadas - Modelo LLM", open=False):
227
  with gr.Row():
228
+ slider_temperature = gr.Slider(minimum=0.01, maximum=1.0, value=0.7, step=0.1, label="Temperatura", info="Temperatura do modelo", interactive=True)
229
  with gr.Row():
230
+ slider_maxtokens = gr.Slider(minimum=224, maximum=4096, value=1024, step=32, label="Máximo de Tokens", info="Máximo de tokens do modelo", interactive=True)
231
  with gr.Row():
232
+ slider_topk = gr.Slider(minimum=1, maximum=10, value=3, step=1, label="Amostras top-k", info="Amostras top-k do modelo", interactive=True)
233
  with gr.Row():
234
+ llm_progress = gr.Textbox(value="Nenhum", label="Inicialização da cadeia de QA")
235
  with gr.Row():
236
  qachain_btn = gr.Button("Inicializar cadeia de Perguntas e Respostas")
237
 
 
252
  with gr.Row():
253
  submit_btn = gr.Button("Enviar mensagem")
254
  clear_btn = gr.ClearButton([msg, chatbot], value="Limpar conversa")
 
 
 
 
 
 
 
 
 
 
 
255
 
256
+ # Eventos de pré-processamento
257
+ db_btn.click(initialize_database, inputs=[document, slider_chunk_size, slider_chunk_overlap], outputs=[vector_db, collection_name, db_progress])
258
+ qachain_btn.click(initialize_LLM, inputs=[llm_btn, slider_temperature, slider_maxtokens, slider_topk, vector_db], outputs=[qa_chain, llm_progress]).then(
259
+ lambda: [None, "", 0, "", 0, "", 0], inputs=None, outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], queue=False)
260
+
261
+ # Eventos do chatbot
262
+ msg.submit(conversation, inputs=[qa_chain, msg, chatbot], outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], queue=False)
263
+ submit_btn.click(conversation, inputs=[qa_chain, msg, chatbot], outputs=[qa_chain, msg, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], queue=False)
264
+ clear_btn.click(lambda: [None, "", 0, "", 0, "", 0], inputs=None, outputs=[chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page], queue=False)
265
+
 
 
 
266
  demo.queue().launch(debug=True)
267
 
268
  if __name__ == "__main__":