|
|
|
|
|
|
|
import streamlit as st
|
|
import re
|
|
import io
|
|
from io import BytesIO
|
|
import pandas as pd
|
|
import numpy as np
|
|
import time
|
|
import matplotlib.pyplot as plt
|
|
from datetime import datetime, timedelta
|
|
from spacy import displacy
|
|
import random
|
|
import base64
|
|
import seaborn as sns
|
|
import logging
|
|
|
|
|
|
from ..database.morphosintax_mongo_db import get_student_morphosyntax_analysis
|
|
from ..database.semantic_mongo_db import get_student_semantic_analysis
|
|
from ..database.discourse_mongo_db import get_student_discourse_analysis
|
|
from ..database.chat_mongo_db import get_chat_history
|
|
from ..database.current_situation_mongo_db import get_current_situation_analysis
|
|
from ..database.claude_recommendations_mongo_db import get_claude_recommendations
|
|
|
|
|
|
from ..utils.widget_utils import generate_unique_key
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
def display_student_activities(username: str, lang_code: str, t: dict):
|
|
"""
|
|
Muestra todas las actividades del estudiante
|
|
Args:
|
|
username: Nombre del estudiante
|
|
lang_code: Código del idioma
|
|
t: Diccionario de traducciones
|
|
"""
|
|
try:
|
|
|
|
|
|
|
|
|
|
|
|
tabs = st.tabs([
|
|
t.get('current_situation_activities', 'Registros de la función: Mi Situación Actual'),
|
|
t.get('morpho_activities', 'Registros de mis análisis morfosintácticos'),
|
|
t.get('semantic_activities', 'Registros de mis análisis semánticos'),
|
|
t.get('discourse_activities', 'Registros de mis análisis comparado de textos'),
|
|
t.get('chat_activities', 'Registros de mis conversaciones con el tutor virtual')
|
|
])
|
|
|
|
|
|
with tabs[0]:
|
|
display_current_situation_activities(username, t)
|
|
|
|
|
|
with tabs[1]:
|
|
display_morphosyntax_activities(username, t)
|
|
|
|
|
|
with tabs[2]:
|
|
display_semantic_activities(username, t)
|
|
|
|
|
|
with tabs[3]:
|
|
display_discourse_activities(username, t)
|
|
|
|
|
|
with tabs[4]:
|
|
display_chat_activities(username, t)
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error mostrando actividades: {str(e)}")
|
|
st.error(t.get('error_loading_activities', 'Error al cargar las actividades'))
|
|
|
|
|
|
|
|
|
|
def display_current_situation_activities(username: str, t: dict):
|
|
"""
|
|
Muestra análisis de situación actual junto con las recomendaciones de Claude
|
|
unificando la información de ambas colecciones y emparejándolas por cercanía temporal.
|
|
"""
|
|
try:
|
|
|
|
logger.info(f"Recuperando análisis de situación actual para {username}")
|
|
situation_analyses = get_current_situation_analysis(username, limit=10)
|
|
|
|
|
|
if situation_analyses:
|
|
logger.info(f"Recuperados {len(situation_analyses)} análisis de situación")
|
|
|
|
for i, analysis in enumerate(situation_analyses):
|
|
logger.info(f"Análisis #{i+1}: Claves disponibles: {list(analysis.keys())}")
|
|
if 'metrics' in analysis:
|
|
logger.info(f"Métricas disponibles: {list(analysis['metrics'].keys())}")
|
|
else:
|
|
logger.warning("No se encontraron análisis de situación actual")
|
|
|
|
logger.info(f"Recuperando recomendaciones de Claude para {username}")
|
|
claude_recommendations = get_claude_recommendations(username)
|
|
|
|
if claude_recommendations:
|
|
logger.info(f"Recuperadas {len(claude_recommendations)} recomendaciones de Claude")
|
|
else:
|
|
logger.warning("No se encontraron recomendaciones de Claude")
|
|
|
|
|
|
if not situation_analyses and not claude_recommendations:
|
|
logger.info("No se encontraron análisis de situación actual ni recomendaciones")
|
|
st.info(t.get('no_current_situation', 'No hay análisis de situación actual registrados'))
|
|
return
|
|
|
|
|
|
logger.info("Creando emparejamientos temporales de análisis")
|
|
|
|
|
|
situation_times = []
|
|
for analysis in situation_analyses:
|
|
if 'timestamp' in analysis:
|
|
try:
|
|
timestamp_str = analysis['timestamp']
|
|
dt = datetime.fromisoformat(timestamp_str.replace('Z', '+00:00'))
|
|
situation_times.append((dt, analysis))
|
|
except Exception as e:
|
|
logger.error(f"Error parseando timestamp de situación: {str(e)}")
|
|
|
|
recommendation_times = []
|
|
for recommendation in claude_recommendations:
|
|
if 'timestamp' in recommendation:
|
|
try:
|
|
timestamp_str = recommendation['timestamp']
|
|
dt = datetime.fromisoformat(timestamp_str.replace('Z', '+00:00'))
|
|
recommendation_times.append((dt, recommendation))
|
|
except Exception as e:
|
|
logger.error(f"Error parseando timestamp de recomendación: {str(e)}")
|
|
|
|
|
|
situation_times.sort(key=lambda x: x[0], reverse=True)
|
|
recommendation_times.sort(key=lambda x: x[0], reverse=True)
|
|
|
|
|
|
combined_items = []
|
|
|
|
|
|
for sit_time, situation in situation_times:
|
|
|
|
best_match = None
|
|
min_diff = timedelta(minutes=30)
|
|
best_rec_time = None
|
|
|
|
for rec_time, recommendation in recommendation_times:
|
|
time_diff = abs(sit_time - rec_time)
|
|
if time_diff < min_diff:
|
|
min_diff = time_diff
|
|
best_match = recommendation
|
|
best_rec_time = rec_time
|
|
|
|
|
|
if best_match:
|
|
timestamp_key = sit_time.isoformat()
|
|
combined_items.append((timestamp_key, {
|
|
'situation': situation,
|
|
'recommendation': best_match,
|
|
'time_diff': min_diff.total_seconds()
|
|
}))
|
|
|
|
recommendation_times = [(t, r) for t, r in recommendation_times if t != best_rec_time]
|
|
logger.info(f"Emparejado: Diagnóstico {sit_time} con Recomendación {best_rec_time} (diferencia: {min_diff})")
|
|
else:
|
|
|
|
timestamp_key = sit_time.isoformat()
|
|
combined_items.append((timestamp_key, {
|
|
'situation': situation
|
|
}))
|
|
logger.info(f"Sin emparejar: Diagnóstico {sit_time} sin recomendación cercana")
|
|
|
|
|
|
for rec_time, recommendation in recommendation_times:
|
|
timestamp_key = rec_time.isoformat()
|
|
combined_items.append((timestamp_key, {
|
|
'recommendation': recommendation
|
|
}))
|
|
logger.info(f"Sin emparejar: Recomendación {rec_time} sin diagnóstico cercano")
|
|
|
|
|
|
combined_items.sort(key=lambda x: x[0], reverse=True)
|
|
|
|
logger.info(f"Procesando {len(combined_items)} elementos combinados")
|
|
|
|
|
|
for i, (timestamp_key, analysis_pair) in enumerate(combined_items):
|
|
try:
|
|
|
|
situation_data = analysis_pair.get('situation', {})
|
|
recommendation_data = analysis_pair.get('recommendation', {})
|
|
time_diff = analysis_pair.get('time_diff')
|
|
|
|
|
|
if not situation_data and not recommendation_data:
|
|
continue
|
|
|
|
|
|
text_to_show = situation_data.get('text', recommendation_data.get('text', ''))
|
|
text_type = situation_data.get('text_type', recommendation_data.get('text_type', ''))
|
|
|
|
|
|
try:
|
|
|
|
dt = datetime.fromisoformat(timestamp_key)
|
|
formatted_date = dt.strftime("%d/%m/%Y %H:%M:%S")
|
|
except Exception as date_error:
|
|
logger.error(f"Error formateando fecha: {str(date_error)}")
|
|
formatted_date = timestamp_key
|
|
|
|
|
|
title = f"{t.get('analysis_date', 'Fecha')}: {formatted_date}"
|
|
if text_type:
|
|
text_type_display = {
|
|
'academic_article': t.get('academic_article', 'Artículo académico'),
|
|
'student_essay': t.get('student_essay', 'Trabajo universitario'),
|
|
'general_communication': t.get('general_communication', 'Comunicación general')
|
|
}.get(text_type, text_type)
|
|
title += f" - {text_type_display}"
|
|
|
|
|
|
if time_diff is not None:
|
|
if time_diff < 60:
|
|
title += f" 🔄 (emparejados)"
|
|
else:
|
|
title += f" 🔄 (emparejados, diferencia: {int(time_diff//60)} min)"
|
|
|
|
|
|
expander_id = f"analysis_{i}_{timestamp_key.replace(':', '_')}"
|
|
|
|
|
|
with st.expander(title, expanded=False):
|
|
|
|
st.subheader(t.get('analyzed_text', 'Texto analizado'))
|
|
st.text_area(
|
|
"Text Content",
|
|
value=text_to_show,
|
|
height=100,
|
|
disabled=True,
|
|
label_visibility="collapsed",
|
|
key=f"text_area_{expander_id}"
|
|
)
|
|
|
|
|
|
diagnosis_tab, recommendations_tab = st.tabs([
|
|
t.get('diagnosis_tab', 'Diagnóstico'),
|
|
t.get('recommendations_tab', 'Recomendaciones')
|
|
])
|
|
|
|
|
|
with diagnosis_tab:
|
|
if situation_data and 'metrics' in situation_data:
|
|
metrics = situation_data['metrics']
|
|
|
|
|
|
col1, col2 = st.columns(2)
|
|
|
|
|
|
with col1:
|
|
st.subheader(t.get('key_metrics', 'Métricas clave'))
|
|
|
|
|
|
for metric_name, metric_data in metrics.items():
|
|
try:
|
|
|
|
score = None
|
|
if isinstance(metric_data, dict):
|
|
|
|
if 'normalized_score' in metric_data:
|
|
score = metric_data['normalized_score']
|
|
elif 'score' in metric_data:
|
|
score = metric_data['score']
|
|
elif 'value' in metric_data:
|
|
score = metric_data['value']
|
|
elif isinstance(metric_data, (int, float)):
|
|
score = metric_data
|
|
|
|
if score is not None:
|
|
|
|
if isinstance(score, (int, float)):
|
|
|
|
if score < 0.5:
|
|
emoji = "🔴"
|
|
color = "#ffcccc"
|
|
elif score < 0.75:
|
|
emoji = "🟡"
|
|
color = "#ffffcc"
|
|
else:
|
|
emoji = "🟢"
|
|
color = "#ccffcc"
|
|
|
|
|
|
st.markdown(f"""
|
|
<div style="background-color:{color}; padding:10px; border-radius:5px; margin-bottom:10px;">
|
|
<b>{emoji} {metric_name.capitalize()}:</b> {score:.2f}
|
|
</div>
|
|
""", unsafe_allow_html=True)
|
|
else:
|
|
|
|
st.markdown(f"""
|
|
<div style="background-color:#f0f0f0; padding:10px; border-radius:5px; margin-bottom:10px;">
|
|
<b>ℹ️ {metric_name.capitalize()}:</b> {str(score)}
|
|
</div>
|
|
""", unsafe_allow_html=True)
|
|
except Exception as e:
|
|
logger.error(f"Error procesando métrica {metric_name}: {str(e)}")
|
|
|
|
|
|
with col2:
|
|
st.subheader(t.get('details', 'Detalles'))
|
|
|
|
|
|
for metric_name, metric_data in metrics.items():
|
|
try:
|
|
if isinstance(metric_data, dict):
|
|
|
|
details = None
|
|
if 'details' in metric_data and metric_data['details']:
|
|
details = metric_data['details']
|
|
else:
|
|
|
|
details = {k: v for k, v in metric_data.items()
|
|
if k not in ['normalized_score', 'score', 'value']}
|
|
|
|
if details:
|
|
st.write(f"**{metric_name.capitalize()}**")
|
|
st.json(details, expanded=False)
|
|
except Exception as e:
|
|
logger.error(f"Error mostrando detalles de {metric_name}: {str(e)}")
|
|
else:
|
|
st.info(t.get('no_diagnosis', 'No hay datos de diagnóstico disponibles'))
|
|
|
|
|
|
with recommendations_tab:
|
|
if recommendation_data and 'recommendations' in recommendation_data:
|
|
st.markdown(f"""
|
|
<div style="padding: 20px; border-radius: 10px;
|
|
background-color: #f8f9fa; margin-bottom: 20px;">
|
|
{recommendation_data['recommendations']}
|
|
</div>
|
|
""", unsafe_allow_html=True)
|
|
elif recommendation_data and 'feedback' in recommendation_data:
|
|
st.markdown(f"""
|
|
<div style="padding: 20px; border-radius: 10px;
|
|
background-color: #f8f9fa; margin-bottom: 20px;">
|
|
{recommendation_data['feedback']}
|
|
</div>
|
|
""", unsafe_allow_html=True)
|
|
else:
|
|
st.info(t.get('no_recommendations', 'No hay recomendaciones disponibles'))
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error procesando par de análisis: {str(e)}")
|
|
continue
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error mostrando actividades de situación actual: {str(e)}")
|
|
st.error(t.get('error_current_situation', 'Error al mostrar análisis de situación actual'))
|
|
|
|
|
|
|
|
def display_morphosyntax_activities(username: str, t: dict):
|
|
"""
|
|
Muestra actividades de análisis morfosintáctico, incluyendo base e iteraciones
|
|
desde las nuevas colecciones: student_morphosyntax_analysis_base y student_morphosyntax_iterations
|
|
"""
|
|
try:
|
|
|
|
|
|
from ..database.morphosyntax_iterative_mongo_db import get_student_morphosyntax_analysis
|
|
|
|
logger.info(f"Recuperando análisis morfosintáctico para {username}")
|
|
|
|
|
|
base_analyses = get_student_morphosyntax_analysis(username)
|
|
|
|
if not base_analyses:
|
|
logger.info("No se encontraron análisis morfosintácticos")
|
|
st.info(t.get('no_morpho_analyses', 'No hay análisis morfosintácticos registrados'))
|
|
return
|
|
|
|
logger.info(f"Procesando {len(base_analyses)} análisis morfosintácticos base")
|
|
|
|
|
|
for base_analysis in base_analyses:
|
|
try:
|
|
|
|
timestamp = datetime.fromisoformat(base_analysis['timestamp'].replace('Z', '+00:00'))
|
|
formatted_date = timestamp.strftime("%d/%m/%Y %H:%M:%S")
|
|
|
|
|
|
expander_title = f"{t.get('analysis_date', 'Fecha')}: {formatted_date}"
|
|
if base_analysis.get('has_iterations', False):
|
|
expander_title += f" ({t.get('has_iterations', 'Con iteraciones')})"
|
|
|
|
with st.expander(expander_title, expanded=False):
|
|
|
|
st.subheader(t.get('base_text', 'Texto original'))
|
|
st.text_area(
|
|
"Base Text Content",
|
|
value=base_analysis.get('text', ''),
|
|
height=100,
|
|
disabled=True,
|
|
label_visibility="collapsed",
|
|
key=f"base_text_{str(base_analysis['_id'])}"
|
|
)
|
|
|
|
|
|
if 'arc_diagrams' in base_analysis and base_analysis['arc_diagrams']:
|
|
st.subheader(t.get('syntactic_diagrams', 'Diagrama sintáctico (original)'))
|
|
|
|
for diagram in base_analysis['arc_diagrams']:
|
|
st.write(diagram, unsafe_allow_html=True)
|
|
|
|
|
|
if 'iterations' in base_analysis and base_analysis['iterations']:
|
|
st.markdown("---")
|
|
st.subheader(t.get('iterations', 'Versiones mejoradas'))
|
|
|
|
|
|
iteration_tabs = st.tabs([
|
|
f"{t.get('iteration', 'Versión')} {i+1}"
|
|
for i in range(len(base_analysis['iterations']))
|
|
])
|
|
|
|
|
|
for i, (tab, iteration) in enumerate(zip(iteration_tabs, base_analysis['iterations'])):
|
|
with tab:
|
|
|
|
iter_timestamp = datetime.fromisoformat(
|
|
iteration['timestamp'].replace('Z', '+00:00'))
|
|
iter_formatted_date = iter_timestamp.strftime("%d/%m/%Y %H:%M:%S")
|
|
st.caption(f"{t.get('iteration_date', 'Fecha de versión')}: {iter_formatted_date}")
|
|
|
|
|
|
st.text_area(
|
|
f"Iteration Text {i+1}",
|
|
value=iteration.get('iteration_text', ''),
|
|
height=100,
|
|
disabled=True,
|
|
label_visibility="collapsed",
|
|
key=f"iter_text_{str(iteration['_id'])}"
|
|
)
|
|
|
|
|
|
if 'arc_diagrams' in iteration and iteration['arc_diagrams']:
|
|
st.subheader(t.get('iteration_diagram', 'Diagrama sintáctico (mejorado)'))
|
|
for diagram in iteration['arc_diagrams']:
|
|
st.write(diagram, unsafe_allow_html=True)
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error procesando análisis morfosintáctico: {str(e)}")
|
|
st.error(t.get('error_processing_analysis', 'Error procesando este análisis'))
|
|
continue
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error mostrando análisis morfosintáctico: {str(e)}")
|
|
st.error(t.get('error_morpho', 'Error al mostrar análisis morfosintáctico'))
|
|
|
|
|
|
|
|
|
|
def display_semantic_activities(username: str, t: dict):
|
|
"""Muestra actividades de análisis semántico"""
|
|
try:
|
|
logger.info(f"Recuperando análisis semántico para {username}")
|
|
analyses = get_student_semantic_analysis(username)
|
|
|
|
if not analyses:
|
|
logger.info("No se encontraron análisis semánticos")
|
|
st.info(t.get('no_semantic_analyses', 'No hay análisis semánticos registrados'))
|
|
return
|
|
|
|
logger.info(f"Procesando {len(analyses)} análisis semánticos")
|
|
|
|
for analysis in analyses:
|
|
try:
|
|
|
|
if not all(key in analysis for key in ['timestamp', 'concept_graph']):
|
|
logger.warning(f"Análisis incompleto: {analysis.keys()}")
|
|
continue
|
|
|
|
|
|
timestamp = datetime.fromisoformat(analysis['timestamp'].replace('Z', '+00:00'))
|
|
formatted_date = timestamp.strftime("%d/%m/%Y %H:%M:%S")
|
|
|
|
|
|
with st.expander(f"{t.get('analysis_date', 'Fecha')}: {formatted_date}", expanded=False):
|
|
|
|
if analysis.get('concept_graph'):
|
|
try:
|
|
|
|
logger.debug("Decodificando gráfico de conceptos")
|
|
image_data = analysis['concept_graph']
|
|
|
|
|
|
if isinstance(image_data, bytes):
|
|
image_bytes = image_data
|
|
else:
|
|
|
|
image_bytes = base64.b64decode(image_data)
|
|
|
|
logger.debug(f"Longitud de bytes de imagen: {len(image_bytes)}")
|
|
|
|
|
|
st.image(
|
|
image_bytes,
|
|
caption=t.get('concept_network', 'Red de Conceptos'),
|
|
use_container_width=True
|
|
)
|
|
logger.debug("Gráfico mostrado exitosamente")
|
|
|
|
except Exception as img_error:
|
|
logger.error(f"Error procesando gráfico: {str(img_error)}")
|
|
st.error(t.get('error_loading_graph', 'Error al cargar el gráfico'))
|
|
else:
|
|
st.info(t.get('no_graph', 'No hay visualización disponible'))
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error procesando análisis individual: {str(e)}")
|
|
continue
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error mostrando análisis semántico: {str(e)}")
|
|
st.error(t.get('error_semantic', 'Error al mostrar análisis semántico'))
|
|
|
|
|
|
|
|
|
|
def display_discourse_activities(username: str, t: dict):
|
|
"""Muestra actividades de análisis del discurso (mostrado como 'Análisis comparado de textos' en la UI)"""
|
|
try:
|
|
logger.info(f"Recuperando análisis del discurso para {username}")
|
|
analyses = get_student_discourse_analysis(username)
|
|
|
|
if not analyses:
|
|
logger.info("No se encontraron análisis del discurso")
|
|
|
|
st.info(t.get('no_discourse_analyses', 'No hay análisis comparados de textos registrados'))
|
|
return
|
|
|
|
logger.info(f"Procesando {len(analyses)} análisis del discurso")
|
|
for analysis in analyses:
|
|
try:
|
|
|
|
if not all(key in analysis for key in ['timestamp']):
|
|
logger.warning(f"Análisis incompleto: {analysis.keys()}")
|
|
continue
|
|
|
|
|
|
timestamp = datetime.fromisoformat(analysis['timestamp'].replace('Z', '+00:00'))
|
|
formatted_date = timestamp.strftime("%d/%m/%Y %H:%M:%S")
|
|
|
|
with st.expander(f"{t.get('analysis_date', 'Fecha')}: {formatted_date}", expanded=False):
|
|
|
|
col1, col2 = st.columns(2)
|
|
|
|
|
|
with col1:
|
|
st.subheader(t.get('doc1_title', 'Documento 1'))
|
|
st.markdown(t.get('key_concepts', 'Conceptos Clave'))
|
|
|
|
|
|
if 'key_concepts1' in analysis and analysis['key_concepts1']:
|
|
concepts_html = f"""
|
|
<div style="display: flex; flex-wrap: nowrap; gap: 8px; padding: 12px;
|
|
background-color: #f8f9fa; border-radius: 8px; overflow-x: auto;
|
|
margin-bottom: 15px; white-space: nowrap;">
|
|
{''.join([
|
|
f'<div style="background-color: white; border-radius: 4px; padding: 6px 10px; display: inline-flex; align-items: center; gap: 4px; box-shadow: 0 1px 2px rgba(0,0,0,0.1); flex-shrink: 0;">'
|
|
f'<span style="font-weight: 500; color: #1f2937; font-size: 0.85em;">{concept}</span>'
|
|
f'<span style="color: #6b7280; font-size: 0.75em;">({freq:.2f})</span></div>'
|
|
for concept, freq in analysis['key_concepts1']
|
|
])}
|
|
</div>
|
|
"""
|
|
st.markdown(concepts_html, unsafe_allow_html=True)
|
|
else:
|
|
st.info(t.get('no_concepts', 'No hay conceptos disponibles'))
|
|
|
|
|
|
if 'graph1' in analysis:
|
|
try:
|
|
if isinstance(analysis['graph1'], bytes):
|
|
st.image(
|
|
analysis['graph1'],
|
|
use_container_width=True
|
|
)
|
|
else:
|
|
logger.warning(f"graph1 no es bytes: {type(analysis['graph1'])}")
|
|
st.warning(t.get('graph_not_available', 'Gráfico no disponible'))
|
|
except Exception as e:
|
|
logger.error(f"Error mostrando graph1: {str(e)}")
|
|
st.error(t.get('error_loading_graph', 'Error al cargar el gráfico'))
|
|
else:
|
|
st.info(t.get('no_visualization', 'No hay visualización disponible'))
|
|
|
|
|
|
st.markdown("**📊 Interpretación del grafo:**")
|
|
st.markdown("""
|
|
- 🔀 Las flechas indican la dirección de la relación entre conceptos
|
|
- 🎨 Los colores más intensos indican conceptos más centrales en el texto
|
|
- ⭕ El tamaño de los nodos representa la frecuencia del concepto
|
|
- ↔️ El grosor de las líneas indica la fuerza de la conexión
|
|
""")
|
|
|
|
|
|
with col2:
|
|
st.subheader(t.get('doc2_title', 'Documento 2'))
|
|
st.markdown(t.get('key_concepts', 'Conceptos Clave'))
|
|
|
|
|
|
if 'key_concepts2' in analysis and analysis['key_concepts2']:
|
|
concepts_html = f"""
|
|
<div style="display: flex; flex-wrap: nowrap; gap: 8px; padding: 12px;
|
|
background-color: #f8f9fa; border-radius: 8px; overflow-x: auto;
|
|
margin-bottom: 15px; white-space: nowrap;">
|
|
{''.join([
|
|
f'<div style="background-color: white; border-radius: 4px; padding: 6px 10px; display: inline-flex; align-items: center; gap: 4px; box-shadow: 0 1px 2px rgba(0,0,0,0.1); flex-shrink: 0;">'
|
|
f'<span style="font-weight: 500; color: #1f2937; font-size: 0.85em;">{concept}</span>'
|
|
f'<span style="color: #6b7280; font-size: 0.75em;">({freq:.2f})</span></div>'
|
|
for concept, freq in analysis['key_concepts2']
|
|
])}
|
|
</div>
|
|
"""
|
|
st.markdown(concepts_html, unsafe_allow_html=True)
|
|
else:
|
|
st.info(t.get('no_concepts', 'No hay conceptos disponibles'))
|
|
|
|
|
|
if 'graph2' in analysis:
|
|
try:
|
|
if isinstance(analysis['graph2'], bytes):
|
|
st.image(
|
|
analysis['graph2'],
|
|
use_container_width=True
|
|
)
|
|
else:
|
|
logger.warning(f"graph2 no es bytes: {type(analysis['graph2'])}")
|
|
st.warning(t.get('graph_not_available', 'Gráfico no disponible'))
|
|
except Exception as e:
|
|
logger.error(f"Error mostrando graph2: {str(e)}")
|
|
st.error(t.get('error_loading_graph', 'Error al cargar el gráfico'))
|
|
else:
|
|
st.info(t.get('no_visualization', 'No hay visualización disponible'))
|
|
|
|
|
|
st.markdown("**📊 Interpretación del grafo:**")
|
|
st.markdown("""
|
|
- 🔀 Las flechas indican la dirección de la relación entre conceptos
|
|
- 🎨 Los colores más intensos indican conceptos más centrales en el texto
|
|
- ⭕ El tamaño de los nodos representa la frecuencia del concepto
|
|
- ↔️ El grosor de las líneas indica la fuerza de la conexión
|
|
""")
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error procesando análisis individual: {str(e)}")
|
|
continue
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error mostrando análisis del discurso: {str(e)}")
|
|
|
|
st.error(t.get('error_discourse', 'Error al mostrar análisis comparado de textos'))
|
|
|
|
|
|
|
|
|
|
|
|
def display_discourse_comparison(analysis: dict, t: dict):
|
|
"""
|
|
Muestra la comparación de conceptos clave en análisis del discurso.
|
|
Formato horizontal simplificado.
|
|
"""
|
|
st.subheader(t.get('comparison_results', 'Resultados de la comparación'))
|
|
|
|
|
|
if not ('key_concepts1' in analysis and analysis['key_concepts1']):
|
|
st.info(t.get('no_concepts', 'No hay conceptos disponibles para comparar'))
|
|
return
|
|
|
|
|
|
st.markdown(f"**{t.get('concepts_text_1', 'Conceptos Texto 1')}:**")
|
|
try:
|
|
|
|
if isinstance(analysis['key_concepts1'], list) and len(analysis['key_concepts1']) > 0:
|
|
if isinstance(analysis['key_concepts1'][0], list) and len(analysis['key_concepts1'][0]) == 2:
|
|
|
|
concepts_text = ", ".join([f"{c[0]} ({c[1]})" for c in analysis['key_concepts1'][:10]])
|
|
st.markdown(f"*{concepts_text}*")
|
|
else:
|
|
|
|
st.markdown(", ".join(str(c) for c in analysis['key_concepts1'][:10]))
|
|
else:
|
|
st.write(str(analysis['key_concepts1']))
|
|
except Exception as e:
|
|
logger.error(f"Error mostrando key_concepts1: {str(e)}")
|
|
st.error(t.get('error_concepts1', 'Error mostrando conceptos del Texto 1'))
|
|
|
|
|
|
st.markdown(f"**{t.get('concepts_text_2', 'Conceptos Texto 2')}:**")
|
|
if 'key_concepts2' in analysis and analysis['key_concepts2']:
|
|
try:
|
|
|
|
if isinstance(analysis['key_concepts2'], list) and len(analysis['key_concepts2']) > 0:
|
|
if isinstance(analysis['key_concepts2'][0], list) and len(analysis['key_concepts2'][0]) == 2:
|
|
|
|
concepts_text = ", ".join([f"{c[0]} ({c[1]})" for c in analysis['key_concepts2'][:10]])
|
|
st.markdown(f"*{concepts_text}*")
|
|
else:
|
|
|
|
st.markdown(", ".join(str(c) for c in analysis['key_concepts2'][:10]))
|
|
else:
|
|
st.write(str(analysis['key_concepts2']))
|
|
except Exception as e:
|
|
logger.error(f"Error mostrando key_concepts2: {str(e)}")
|
|
st.error(t.get('error_concepts2', 'Error mostrando conceptos del Texto 2'))
|
|
else:
|
|
st.info(t.get('no_concepts2', 'No hay conceptos disponibles para el Texto 2'))
|
|
|
|
|
|
|
|
def display_chat_activities(username: str, t: dict):
|
|
"""
|
|
Muestra historial de conversaciones del chat
|
|
"""
|
|
try:
|
|
|
|
chat_history = get_chat_history(
|
|
username=username,
|
|
analysis_type='sidebar',
|
|
limit=50
|
|
)
|
|
|
|
if not chat_history:
|
|
st.info(t.get('no_chat_history', 'No hay conversaciones registradas'))
|
|
return
|
|
|
|
for chat in reversed(chat_history):
|
|
try:
|
|
|
|
timestamp = datetime.fromisoformat(chat['timestamp'].replace('Z', '+00:00'))
|
|
formatted_date = timestamp.strftime("%d/%m/%Y %H:%M:%S")
|
|
|
|
with st.expander(
|
|
f"{t.get('chat_date', 'Fecha de conversación')}: {formatted_date}",
|
|
expanded=False
|
|
):
|
|
if 'messages' in chat and chat['messages']:
|
|
|
|
for message in chat['messages']:
|
|
role = message.get('role', 'unknown')
|
|
content = message.get('content', '')
|
|
|
|
|
|
with st.chat_message(role):
|
|
st.markdown(content)
|
|
|
|
|
|
st.divider()
|
|
else:
|
|
st.warning(t.get('invalid_chat_format', 'Formato de chat no válido'))
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error mostrando conversación: {str(e)}")
|
|
continue
|
|
|
|
except Exception as e:
|
|
logger.error(f"Error mostrando historial del chat: {str(e)}")
|
|
st.error(t.get('error_chat', 'Error al mostrar historial del chat'))
|
|
|
|
|
|
|