portalprogramando / interface.py
aldohenrique's picture
Update interface.py
ba345bc verified
raw
history blame
9.05 kB
# interface.py
import gradio as gr
import uuid
from ai_logic import (
responder_como_aldo,
build_and_save_vector_store,
MODELS,
DEFAULT_MODEL,
inicializar_sistema # <--- Import the initialization function
)
css_customizado = """
.gradio-container {
max-width: 1400px !important;
margin: 0 auto;
width: 99%;
height: 100vh !important;
display: flex;
flex-direction: column;
overflow: hidden;
}
.main-content {
display: flex;
flex-direction: column;
height: 100vh;
overflow: hidden;
flex-shrink: 0;
}
.titulo-principal {
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important;
color: white !important;
padding: 10px !important;
border-radius: 10px !important;
margin-bottom: 10px !important;
text-align: center !important;
flex-shrink: 0;
}
.chat-area {
flex: 1;
display: flex;
flex-direction: column;
overflow: hidden;
}
.chat-container {
flex: 1;
overflow-y: auto;
margin-bottom: 10px;
}
.input-container {
flex-shrink: 0;
padding: 10px 0;
display: flex;
flex-direction: column;
justify-content: center;
}
.additional-content {
overflow-y: auto;
padding-top: 20px;
}
.gr-textbox textarea {
font-size: 14px !important;
line-height: 1.5 !important;
}
.resposta-container {
background-color: #ffffff !important;
color: #1a1a1a !important;
border: 1px solid #e0e0e0 !important;
border-radius: 20px !important;
padding: 20px !important;
margin: 10px 0 !important;
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.05) !important;
}
.resposta-container pre code {
color: #1a1a1a !important;
background-color: #f8f9fa !important;
}
.pergunta-container {
background-color: #f0f8ff !important;
border-radius: 8px !important;
padding: 15px !important;
}
.modelo-dropdown {
margin-bottom: 15px !important;
}
#entrada_usuario textarea {
color: white !important;
font-size: large !important;
background-color: #1a1a1a !important;
min-height: 60px !important;
}
.message-content {
opacity: 1 !important;
font-size: larger;
color: white !important;
background-color: #1a1a1a !important;
}
/* Responsivo */
@media (max-width: 768px) {
.titulo-principal {
padding: 10px !important;
}
#entrada_usuario textarea {
min-height: 50px !important;
font-size: 16px !important;
}
}
#component-6{flex-grow: initial !important;}
#component-7{flex-grow: initial !important;}
.message-wrap.svelte-gjtrl6 .prose.chatbot.md {
opacity: 1 !important;
}
"""
def criar_interface():
with gr.Blocks(title="Dr. Aldo Henrique - API Externa", theme=gr.themes.Soft(), css=css_customizado) as interface:
session_id_state = gr.State(str(uuid.uuid4())) # Geração do session_id único
with gr.Column(elem_classes="main-content"):
gr.HTML("""
<div class="titulo-principal">
<h4 style="margin: 0;">🤖 iAldo - Converse com o <a href="https://aldohenrique.com.br/" style="color: white; text-decoration: underline;">Prof. Dr. Aldo Henrique</a></h4>
</div>
""")
with gr.Column(elem_classes="chat-area"):
with gr.Column(elem_classes="chat-container"):
chatbot = gr.Chatbot(
label="💬 Área do chat",
elem_id="chat",
height="100%"
)
with gr.Column(elem_classes="input-container"):
with gr.Row():
# The choices for the dropdown now depend on the globally updated MODELS from ai_logic
modelo_select = gr.Dropdown(
choices=list(MODELS.keys()), # <--- Uses the MODELS dict from ai_logic
value=DEFAULT_MODEL,
label="🧠 Selecione o Modelo de Pensamento",
elem_classes="modelo-dropdown"
)
with gr.Row():
user_input = gr.Textbox(
show_label=False,
placeholder="Digite sua pergunta e pressione Enter ou clique em Enviar",
lines=2,
elem_id="entrada_usuario"
)
enviar_btn = gr.Button("Enviar", variant="primary")
with gr.Column(elem_classes="additional-content"):
with gr.Accordion("⚙️ Controle do Conhecimento (RAG)", open=False):
status_rag = gr.Textbox(label="Status do Retreino", interactive=False)
botao_retreinar = gr.Button("🔄 Atualizar Conhecimento do Blog", variant="stop")
download_faiss_file = gr.File(label="Download do Índice FAISS", interactive=False, file_count="single", file_types=[".pkl"])
download_urls_file = gr.File(label="Download das URLs Processadas", interactive=False, file_count="single", file_types=[".pkl"])
with gr.Accordion("📚 Exemplos de Perguntas", open=False):
gr.Examples(
examples=[
["Como implementar uma lista ligada em C com todas as operações básicas?", DEFAULT_MODEL],
["Qual a sua opinião sobre o uso de ponteiros em C++ moderno, baseada no seu blog?", "Mistral 7B (Mais acertivo)"], # Use label, not model name
["Resuma o que você escreveu sobre machine learning no seu blog.", "Zephyr 7B (Meio Termo)"], # Use label, not model name
],
inputs=[user_input, modelo_select]
)
with gr.Accordion("🔧 Status da API", open=False):
# We can't directly show model status here without re-running tests.
# For simplicity, we'll assume if the page loads, models are good.
status_api = gr.Textbox(label="Status dos Modelos", interactive=False, lines=8,
value="Modelos carregados com sucesso! Verifique o console para detalhes.")
with gr.Accordion("ℹ️ Informações", open=False):
gr.Markdown("""
### Sobre o Dr. Aldo Henrique:
- **Especialidade**: Linguagens C, Java, Desenvolvimento Web, Inteligência Artificial
- **Conhecimento Adicional**: Conteúdo do blog aldohenrique.com.br
### Dicas para melhores respostas:
- Faça perguntas específicas sobre o conteúdo do blog para ver o RAG em ação!
- Peça resumos ou opiniões sobre temas que o professor aborda.
""")
# ✅ Função corrigida com uso de session_id
def responder(chat_history, user_msg, modelo, session_id):
if not user_msg.strip():
return chat_history, ""
chat_history = chat_history + [[user_msg, "Dr. Aldo Henrique está digitando..."]]
yield chat_history, ""
resposta_final = responder_como_aldo(session_id, user_msg, modelo)
chat_history[-1][1] = resposta_final
yield chat_history, ""
# ✅ Botão e Enter usam o novo estado de sessão
enviar_btn.click(
fn=responder,
inputs=[chatbot, user_input, modelo_select, session_id_state],
outputs=[chatbot, user_input],
show_progress=True
)
user_input.submit(
fn=responder,
inputs=[chatbot, user_input, modelo_select, session_id_state],
outputs=[chatbot, user_input],
show_progress=True
)
botao_retreinar.click(
fn=build_and_save_vector_store,
outputs=[status_rag, download_faiss_file, download_urls_file],
show_progress=True
)
gr.HTML("""
<script>
window.addEventListener("load", function() {
const textarea = document.querySelector("#entrada_usuario textarea");
if (textarea) {
setTimeout(() => textarea.focus(), 100);
}
});
</script>
""")
return interface
def configurar_interface():
# Attempt to initialize the backend system and check for model availability
if inicializar_sistema(): # <--- This is the key change
return criar_interface() # Only create the Gradio interface if successful
else:
# If initialization fails, display an error page instead of the full interface
return gr.HTML("<h1>Erro ao carregar a página: Não há modelos de IA suficientes disponíveis.</h1><p>Verifique o console para mais detalhes sobre os modelos e sua conexão com o Hugging Face.</p>")
# This part runs when you execute interface.py
if __name__ == "__main__":
app = configurar_interface()
app.launch()