Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -194,9 +194,37 @@ footer {
|
|
194 |
}
|
195 |
"""
|
196 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
197 |
# Interface Gradio
|
198 |
def demo():
|
199 |
-
with gr.Blocks(theme=gr.themes.Default(primary_hue="blue", secondary_hue="gray"), css=
|
200 |
# Barra superior personalizada
|
201 |
with gr.Row(visible=True, elem_id="top_bar"):
|
202 |
gr.Image(value="https://huggingface.co/front/assets/huggingface_logo-noborder.svg",
|
@@ -227,8 +255,6 @@ def demo():
|
|
227 |
"""
|
228 |
)
|
229 |
|
230 |
-
|
231 |
-
|
232 |
# Passo 1 - Upload do PDF
|
233 |
with gr.Tab("Passo 1 - Carregar PDF"):
|
234 |
with gr.Row():
|
@@ -255,7 +281,7 @@ def demo():
|
|
255 |
# Passo 3 - Configuração da cadeia QA
|
256 |
with gr.Tab("Passo 3 - Inicializar cadeia de QA"):
|
257 |
with gr.Row():
|
258 |
-
llm_btn = gr.Radio(
|
259 |
type="index", info="Escolha seu modelo LLM")
|
260 |
with gr.Accordion("Opções avançadas - Modelo LLM", open=False):
|
261 |
with gr.Row():
|
@@ -295,13 +321,13 @@ def demo():
|
|
295 |
process_btn.click(
|
296 |
initialize_database,
|
297 |
inputs=[document, slider_chunk_size, slider_chunk_overlap],
|
298 |
-
outputs=[
|
299 |
)
|
300 |
|
301 |
qa_init_btn.click(
|
302 |
initialize_LLM,
|
303 |
-
inputs=[llm_btn, slider_temperature, slider_maxtokens, slider_topk,
|
304 |
-
outputs=[
|
305 |
).then(
|
306 |
lambda: [None, "", 0, "", 0, "", 0],
|
307 |
inputs=None,
|
@@ -334,4 +360,4 @@ def demo():
|
|
334 |
demo.queue().launch(debug=True)
|
335 |
|
336 |
if __name__ == "__main__":
|
337 |
-
demo()
|
|
|
194 |
}
|
195 |
"""
|
196 |
|
197 |
+
import gradio as gr
|
198 |
+
|
199 |
+
# Funções fictícias para os eventos
|
200 |
+
def initialize_database(document, chunk_size, chunk_overlap):
|
201 |
+
# Lógica para inicializar o banco de dados vetorial
|
202 |
+
vector_db = "Banco de Dados Vetorial Inicializado"
|
203 |
+
collection_name = "Coleção 1"
|
204 |
+
db_progress = "Banco de Dados Inicializado"
|
205 |
+
return vector_db, collection_name, db_progress
|
206 |
+
|
207 |
+
def initialize_LLM(llm_model, temperature, max_tokens, top_k, vector_db):
|
208 |
+
# Lógica para inicializar a cadeia LLM
|
209 |
+
qa_chain = "Cadeia de QA Inicializada"
|
210 |
+
llm_progress = "Cadeia LLM Inicializada"
|
211 |
+
return qa_chain, llm_progress
|
212 |
+
|
213 |
+
def conversation(qa_chain, message, chatbot):
|
214 |
+
# Lógica de processamento de mensagem do chatbot
|
215 |
+
response = f"Resposta para: {message}"
|
216 |
+
doc_source1 = "Fonte 1"
|
217 |
+
source1_page = 1
|
218 |
+
doc_source2 = "Fonte 2"
|
219 |
+
source2_page = 2
|
220 |
+
doc_source3 = "Fonte 3"
|
221 |
+
source3_page = 3
|
222 |
+
chatbot.append((message, response))
|
223 |
+
return qa_chain, message, chatbot, doc_source1, source1_page, doc_source2, source2_page, doc_source3, source3_page
|
224 |
+
|
225 |
# Interface Gradio
|
226 |
def demo():
|
227 |
+
with gr.Blocks(theme=gr.themes.Default(primary_hue="blue", secondary_hue="gray"), css=None) as demo:
|
228 |
# Barra superior personalizada
|
229 |
with gr.Row(visible=True, elem_id="top_bar"):
|
230 |
gr.Image(value="https://huggingface.co/front/assets/huggingface_logo-noborder.svg",
|
|
|
255 |
"""
|
256 |
)
|
257 |
|
|
|
|
|
258 |
# Passo 1 - Upload do PDF
|
259 |
with gr.Tab("Passo 1 - Carregar PDF"):
|
260 |
with gr.Row():
|
|
|
281 |
# Passo 3 - Configuração da cadeia QA
|
282 |
with gr.Tab("Passo 3 - Inicializar cadeia de QA"):
|
283 |
with gr.Row():
|
284 |
+
llm_btn = gr.Radio(["Model 1", "Model 2"], label="Modelos LLM", value="Model 1",
|
285 |
type="index", info="Escolha seu modelo LLM")
|
286 |
with gr.Accordion("Opções avançadas - Modelo LLM", open=False):
|
287 |
with gr.Row():
|
|
|
321 |
process_btn.click(
|
322 |
initialize_database,
|
323 |
inputs=[document, slider_chunk_size, slider_chunk_overlap],
|
324 |
+
outputs=[db_progress]
|
325 |
)
|
326 |
|
327 |
qa_init_btn.click(
|
328 |
initialize_LLM,
|
329 |
+
inputs=[llm_btn, slider_temperature, slider_maxtokens, slider_topk, db_progress],
|
330 |
+
outputs=[llm_progress]
|
331 |
).then(
|
332 |
lambda: [None, "", 0, "", 0, "", 0],
|
333 |
inputs=None,
|
|
|
360 |
demo.queue().launch(debug=True)
|
361 |
|
362 |
if __name__ == "__main__":
|
363 |
+
demo()
|