Update modules/semantic/semantic_live_interface.py
Browse files
modules/semantic/semantic_live_interface.py
CHANGED
@@ -1,197 +1,236 @@
|
|
1 |
-
# modules/semantic/semantic_live_interface.py
|
2 |
-
import streamlit as st
|
3 |
-
from streamlit_float import *
|
4 |
-
from streamlit_antd_components import *
|
5 |
-
import pandas as pd
|
6 |
-
import logging
|
7 |
-
|
8 |
-
# Configuración del logger
|
9 |
-
logger = logging.getLogger(__name__)
|
10 |
-
|
11 |
-
# Importaciones locales
|
12 |
-
from .semantic_process import (
|
13 |
-
process_semantic_input,
|
14 |
-
format_semantic_results
|
15 |
-
)
|
16 |
-
|
17 |
-
from ..utils.widget_utils import generate_unique_key
|
18 |
-
from ..database.semantic_mongo_db import store_student_semantic_result
|
19 |
-
from ..database.chat_mongo_db import store_chat_history, get_chat_history
|
20 |
-
|
21 |
-
def display_semantic_live_interface(lang_code, nlp_models, semantic_t):
|
22 |
-
"""
|
23 |
-
Interfaz para el análisis semántico en vivo con proporciones de columna ajustadas
|
24 |
-
"""
|
25 |
-
try:
|
26 |
-
# 1. Inicializar el estado de la sesión de manera más robusta
|
27 |
-
if 'semantic_live_state' not in st.session_state:
|
28 |
-
st.session_state.semantic_live_state = {
|
29 |
-
'analysis_count': 0,
|
30 |
-
'current_text': '',
|
31 |
-
'last_result': None,
|
32 |
-
'text_changed': False
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
st.session_state.
|
39 |
-
st.session_state.semantic_live_state['
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
st.
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
color: #
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# modules/semantic/semantic_live_interface.py
|
2 |
+
import streamlit as st
|
3 |
+
from streamlit_float import *
|
4 |
+
from streamlit_antd_components import *
|
5 |
+
import pandas as pd
|
6 |
+
import logging
|
7 |
+
|
8 |
+
# Configuración del logger
|
9 |
+
logger = logging.getLogger(__name__)
|
10 |
+
|
11 |
+
# Importaciones locales
|
12 |
+
from .semantic_process import (
|
13 |
+
process_semantic_input,
|
14 |
+
format_semantic_results
|
15 |
+
)
|
16 |
+
|
17 |
+
from ..utils.widget_utils import generate_unique_key
|
18 |
+
from ..database.semantic_mongo_db import store_student_semantic_result
|
19 |
+
from ..database.chat_mongo_db import store_chat_history, get_chat_history
|
20 |
+
|
21 |
+
def display_semantic_live_interface(lang_code, nlp_models, semantic_t):
|
22 |
+
"""
|
23 |
+
Interfaz para el análisis semántico en vivo con proporciones de columna ajustadas
|
24 |
+
"""
|
25 |
+
try:
|
26 |
+
# 1. Inicializar el estado de la sesión de manera más robusta
|
27 |
+
if 'semantic_live_state' not in st.session_state:
|
28 |
+
st.session_state.semantic_live_state = {
|
29 |
+
'analysis_count': 0,
|
30 |
+
'current_text': '',
|
31 |
+
'last_result': None,
|
32 |
+
'text_changed': False,
|
33 |
+
'pending_analysis': False # Nuevo flag para análisis pendiente
|
34 |
+
}
|
35 |
+
|
36 |
+
# 2. Función para manejar cambios en el texto
|
37 |
+
def on_text_change():
|
38 |
+
current_text = st.session_state.semantic_live_text
|
39 |
+
st.session_state.semantic_live_state['current_text'] = current_text
|
40 |
+
st.session_state.semantic_live_state['text_changed'] = True
|
41 |
+
|
42 |
+
# 3. Crear columnas con nueva proporción (1:3)
|
43 |
+
input_col, result_col = st.columns([1, 3])
|
44 |
+
|
45 |
+
# Columna izquierda: Entrada de texto
|
46 |
+
with input_col:
|
47 |
+
st.subheader(semantic_t.get('enter_text', 'Ingrese su texto'))
|
48 |
+
|
49 |
+
# Área de texto con manejo de eventos
|
50 |
+
text_input = st.text_area(
|
51 |
+
semantic_t.get('text_input_label', 'Escriba o pegue su texto aquí'),
|
52 |
+
height=500,
|
53 |
+
key="semantic_live_text",
|
54 |
+
value=st.session_state.semantic_live_state.get('current_text', ''),
|
55 |
+
on_change=on_text_change,
|
56 |
+
label_visibility="collapsed"
|
57 |
+
)
|
58 |
+
|
59 |
+
# Botón de análisis y procesamiento
|
60 |
+
analyze_button = st.button(
|
61 |
+
semantic_t.get('analyze_button', 'Analizar'),
|
62 |
+
key="semantic_live_analyze",
|
63 |
+
type="primary",
|
64 |
+
icon="🔍",
|
65 |
+
disabled=not text_input,
|
66 |
+
use_container_width=True
|
67 |
+
)
|
68 |
+
|
69 |
+
# 4. Procesar análisis cuando se presiona el botón
|
70 |
+
if analyze_button and text_input:
|
71 |
+
st.session_state.semantic_live_state['pending_analysis'] = True
|
72 |
+
st.rerun()
|
73 |
+
|
74 |
+
# 5. Manejar análisis pendiente
|
75 |
+
if st.session_state.semantic_live_state.get('pending_analysis', False):
|
76 |
+
try:
|
77 |
+
with st.spinner(semantic_t.get('processing', 'Procesando...')):
|
78 |
+
analysis_result = process_semantic_input(
|
79 |
+
text_input,
|
80 |
+
lang_code,
|
81 |
+
nlp_models,
|
82 |
+
semantic_t
|
83 |
+
)
|
84 |
+
|
85 |
+
if analysis_result['success']:
|
86 |
+
st.session_state.semantic_live_state['last_result'] = analysis_result
|
87 |
+
st.session_state.semantic_live_state['analysis_count'] += 1
|
88 |
+
st.session_state.semantic_live_state['text_changed'] = False
|
89 |
+
|
90 |
+
# Guardar en la colección live
|
91 |
+
store_result = store_student_semantic_live_result(
|
92 |
+
st.session_state.username,
|
93 |
+
text_input,
|
94 |
+
analysis_result['analysis'],
|
95 |
+
lang_code
|
96 |
+
)
|
97 |
+
|
98 |
+
if not store_result:
|
99 |
+
st.error(semantic_t.get('error_saving', 'Error al guardar el análisis'))
|
100 |
+
else:
|
101 |
+
st.success(semantic_t.get('analysis_saved', 'Análisis guardado correctamente'))
|
102 |
+
else:
|
103 |
+
st.error(analysis_result.get('message', 'Error en el análisis'))
|
104 |
+
|
105 |
+
except Exception as e:
|
106 |
+
logger.error(f"Error en análisis: {str(e)}")
|
107 |
+
st.error(semantic_t.get('error_processing', 'Error al procesar el texto'))
|
108 |
+
finally:
|
109 |
+
st.session_state.semantic_live_state['pending_analysis'] = False
|
110 |
+
|
111 |
+
# Columna derecha: Visualización de resultados
|
112 |
+
with result_col:
|
113 |
+
st.subheader(semantic_t.get('live_results', 'Resultados en vivo'))
|
114 |
+
|
115 |
+
if 'last_result' in st.session_state.semantic_live_state and \
|
116 |
+
st.session_state.semantic_live_state['last_result'] is not None:
|
117 |
+
|
118 |
+
analysis = st.session_state.semantic_live_state['last_result']['analysis']
|
119 |
+
|
120 |
+
if 'key_concepts' in analysis and analysis['key_concepts'] and \
|
121 |
+
'concept_graph' in analysis and analysis['concept_graph'] is not None:
|
122 |
+
|
123 |
+
st.markdown("""
|
124 |
+
<style>
|
125 |
+
.unified-container {
|
126 |
+
background-color: white;
|
127 |
+
border-radius: 10px;
|
128 |
+
overflow: hidden;
|
129 |
+
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
|
130 |
+
width: 100%;
|
131 |
+
margin-bottom: 1rem;
|
132 |
+
}
|
133 |
+
.concept-table {
|
134 |
+
display: flex;
|
135 |
+
flex-wrap: nowrap;
|
136 |
+
gap: 6px;
|
137 |
+
padding: 10px;
|
138 |
+
background-color: #f8f9fa;
|
139 |
+
overflow-x: auto;
|
140 |
+
white-space: nowrap;
|
141 |
+
}
|
142 |
+
.concept-item {
|
143 |
+
background-color: white;
|
144 |
+
border-radius: 4px;
|
145 |
+
padding: 4px 8px;
|
146 |
+
display: inline-flex;
|
147 |
+
align-items: center;
|
148 |
+
gap: 4px;
|
149 |
+
box-shadow: 0 1px 2px rgba(0,0,0,0.1);
|
150 |
+
flex-shrink: 0;
|
151 |
+
}
|
152 |
+
.concept-name {
|
153 |
+
font-weight: 500;
|
154 |
+
color: #1f2937;
|
155 |
+
font-size: 0.8em;
|
156 |
+
}
|
157 |
+
.concept-freq {
|
158 |
+
color: #6b7280;
|
159 |
+
font-size: 0.75em;
|
160 |
+
}
|
161 |
+
.graph-section {
|
162 |
+
padding: 20px;
|
163 |
+
background-color: white;
|
164 |
+
}
|
165 |
+
</style>
|
166 |
+
""", unsafe_allow_html=True)
|
167 |
+
|
168 |
+
with st.container():
|
169 |
+
# Conceptos en una sola línea
|
170 |
+
concepts_html = """
|
171 |
+
<div class="unified-container">
|
172 |
+
<div class="concept-table">
|
173 |
+
"""
|
174 |
+
concepts_html += ''.join(
|
175 |
+
f'<div class="concept-item"><span class="concept-name">{concept}</span>'
|
176 |
+
f'<span class="concept-freq">({freq:.2f})</span></div>'
|
177 |
+
for concept, freq in analysis['key_concepts']
|
178 |
+
)
|
179 |
+
concepts_html += "</div></div>"
|
180 |
+
st.markdown(concepts_html, unsafe_allow_html=True)
|
181 |
+
|
182 |
+
# Grafo
|
183 |
+
if 'concept_graph' in analysis and analysis['concept_graph'] is not None:
|
184 |
+
st.image(
|
185 |
+
analysis['concept_graph'],
|
186 |
+
use_container_width=True
|
187 |
+
)
|
188 |
+
|
189 |
+
# Controles en dos columnas
|
190 |
+
col1, col2 = st.columns([1, 3])
|
191 |
+
|
192 |
+
with col1:
|
193 |
+
# Botón para consultar con el asistente (NUEVO)
|
194 |
+
if st.button("💬 Consultar con Asistente",
|
195 |
+
key="semantic_live_chat_button",
|
196 |
+
use_container_width=True):
|
197 |
+
if 'last_result' not in st.session_state.semantic_live_state:
|
198 |
+
st.error("Primero complete el análisis semántico")
|
199 |
+
else:
|
200 |
+
st.session_state.semantic_agent_data = {
|
201 |
+
'text': st.session_state.semantic_live_state['current_text'],
|
202 |
+
'metrics': analysis,
|
203 |
+
'graph_data': analysis.get('concept_graph')
|
204 |
+
}
|
205 |
+
st.session_state.semantic_agent_active = True
|
206 |
+
st.rerun()
|
207 |
+
|
208 |
+
# Botón de descarga
|
209 |
+
st.download_button(
|
210 |
+
label="📥 " + semantic_t.get('download_graph', "Descargar"),
|
211 |
+
data=analysis['concept_graph'],
|
212 |
+
file_name="semantic_live_graph.png",
|
213 |
+
mime="image/png",
|
214 |
+
use_container_width=True
|
215 |
+
)
|
216 |
+
|
217 |
+
# Notificación si el agente está activo
|
218 |
+
if st.session_state.get('semantic_agent_active', False):
|
219 |
+
st.success(semantic_t.get('semantic_agent_ready_message',
|
220 |
+
'El agente virtual está listo. Abre el chat en la barra lateral.'))
|
221 |
+
|
222 |
+
with st.expander("📊 " + semantic_t.get('graph_help', "Interpretación del gráfico")):
|
223 |
+
st.markdown("""
|
224 |
+
- 🔀 Las flechas indican la dirección de la relación entre conceptos
|
225 |
+
- 🎨 Los colores más intensos indican conceptos más centrales
|
226 |
+
- ⭕ El tamaño de los nodos representa la frecuencia del concepto
|
227 |
+
- ↔️ El grosor de las líneas indica la fuerza de la conexión
|
228 |
+
""")
|
229 |
+
else:
|
230 |
+
st.info(semantic_t.get('no_graph', 'No hay datos para mostrar'))
|
231 |
+
else:
|
232 |
+
st.info(semantic_t.get('analysis_prompt', 'Realice un análisis para ver los resultados'))
|
233 |
+
|
234 |
+
except Exception as e:
|
235 |
+
logger.error(f"Error general en interfaz semántica en vivo: {str(e)}")
|
236 |
+
st.error(semantic_t.get('general_error', "Se produjo un error. Por favor, intente de nuevo."))
|