de-Rodrigo commited on
Commit
640dcd1
·
1 Parent(s): aaf8876

Multiple Principal Components Combinations

Browse files
Files changed (1) hide show
  1. app.py +181 -151
app.py CHANGED
@@ -13,9 +13,10 @@ import ot
13
  from sklearn.linear_model import LinearRegression
14
  from scipy.stats import binned_statistic_2d
15
  import json
 
16
 
17
 
18
- N_COMPONENTS = 2
19
  TSNE_NEIGHBOURS = 150
20
  # WEIGHT_FACTOR = 0.05
21
 
@@ -508,6 +509,12 @@ def compute_global_regression(df_combined, embedding_cols, tsne_params, df_f1, r
508
 
509
  return results
510
 
 
 
 
 
 
 
511
  def optimize_tsne_params(df_combined, embedding_cols, df_f1, distance_metric):
512
  perplexity_range = np.linspace(30, 50, 10)
513
  learning_rate_range = np.linspace(200, 1000, 20)
@@ -730,21 +737,22 @@ def run_model(model_name):
730
  key=f"download_button_excel_{model_name}"
731
  )
732
 
733
- # Nuevo bloque: PCA solo para df_real
734
  if reduction_method == "PCA":
735
  st.markdown("## PCA - Solo Muestras Reales")
736
- # Extraemos únicamente las muestras reales
 
737
  df_real_only = embeddings["real"].copy()
738
  pca_real = PCA(n_components=N_COMPONENTS)
739
  reduced_real = pca_real.fit_transform(df_real_only[embedding_cols].values)
740
- df_real_only['embedding'] = list(reduced_real)
741
- if reduced_real.shape[1] == 2:
742
- df_real_only['x'] = reduced_real[:, 0]
743
- df_real_only['y'] = reduced_real[:, 1]
 
744
  explained_variance_real = pca_real.explained_variance_ratio_
745
  unique_labels_real = sorted(df_real_only['label'].unique().tolist())
746
 
747
- # Definir mapeo de colores usando la paleta Reds9
748
  num_labels = len(unique_labels_real)
749
  if num_labels <= 9:
750
  red_palette = Reds9[:num_labels]
@@ -752,6 +760,7 @@ def run_model(model_name):
752
  red_palette = (Reds9 * ((num_labels // 9) + 1))[:num_labels]
753
  real_color_mapping = {label: red_palette[i] for i, label in enumerate(unique_labels_real)}
754
 
 
755
  st.subheader("PCA - Real: Explained Variance Ratio")
756
  component_names_real = [f"PC{i+1}" for i in range(len(explained_variance_real))]
757
  variance_df_real = pd.DataFrame({
@@ -759,8 +768,8 @@ def run_model(model_name):
759
  "Explained Variance": explained_variance_real
760
  })
761
  st.table(variance_df_real)
762
-
763
- # Mostrar los plots de loadings (Component Loadings)
764
  st.subheader("PCA - Real: Component Loadings")
765
  st.markdown("### Pesos de las Componentes Principales (Loadings) - Conjunto Combinado")
766
  for i, comp in enumerate(pca_real.components_):
@@ -777,62 +786,52 @@ def run_model(model_name):
777
  tools="pan,wheel_zoom,reset,save,hover",
778
  active_scroll="wheel_zoom"
779
  )
780
- # Fondo blanco y solo grid horizontal
781
  p.background_fill_color = "white"
782
  p.xgrid.grid_line_color = None
783
  p.ygrid.grid_line_color = "gray"
784
  p.vbar(x='dimensions', top='weight', width=0.8, source=source,
785
  fill_color="#2b83ba", line_color="#2b83ba")
786
- # No se muestran etiquetas en el eje horizontal
787
  p.xaxis.axis_label = "Dimensiones Originales"
788
  p.xaxis.major_label_text_font_size = '0pt'
789
- # Configurar el HoverTool
790
  hover = p.select_one(HoverTool)
791
  hover.tooltips = [("Dimensión", "@dimensions"), ("Peso", "@weight")]
792
  st.bokeh_chart(p)
793
 
794
- # Segundo PCA: Proyección de todos los subconjuntos usando los loadings calculados con df_real_only
795
- st.subheader("PCA - Todos los subconjuntos proyectados (usando loadings de df_real)")
796
-
797
- # Crear un diccionario para almacenar las proyecciones usando el PCA calculado con las muestras reales (pca_real)
798
  df_all = {}
799
-
800
- # Proyectar las muestras reales
801
  df_real_proj = embeddings["real"].copy()
802
  proj_real = pca_real.transform(df_real_proj[embedding_cols].values)
803
- df_real_proj['pc1'] = proj_real[:, 0]
804
- df_real_proj['pc2'] = proj_real[:, 1]
805
  df_all["real"] = df_real_proj
806
 
807
- # Proyectar el subconjunto synthetic, si existe
808
  if "synthetic" in embeddings:
809
  df_synth_proj = embeddings["synthetic"].copy()
810
  proj_synth = pca_real.transform(df_synth_proj[embedding_cols].values)
811
- df_synth_proj['pc1'] = proj_synth[:, 0]
812
- df_synth_proj['pc2'] = proj_synth[:, 1]
813
  df_all["synthetic"] = df_synth_proj
814
 
815
- # Proyectar el subconjunto pretrained, si existe
816
  if "pretrained" in embeddings:
817
  df_pretr_proj = embeddings["pretrained"].copy()
818
  proj_pretr = pca_real.transform(df_pretr_proj[embedding_cols].values)
819
- df_pretr_proj['pc1'] = proj_pretr[:, 0]
820
- df_pretr_proj['pc2'] = proj_pretr[:, 1]
821
  df_all["pretrained"] = df_pretr_proj
822
 
823
- # Para utilizar las mismas funciones de plot (create_figure, add_dataset_to_fig, add_synthetic_dataset_to_fig),
824
- # renombramos las columnas 'pc1' y 'pc2' a 'x' y 'y' en cada dataframe
825
  for key in df_all:
826
- df_all[key]["x"] = df_all[key]["pc1"]
827
- df_all[key]["y"] = df_all[key]["pc2"]
828
 
829
- # Construir los subconjuntos únicos con la granularidad deseada:
830
- # - Para "real" y "pretrained": agrupamos por label.
831
- # - Para "synthetic": agrupamos por la columna "source" (cada source tendrá sus labels).
832
  unique_subsets = {}
833
- # Real:
834
  unique_subsets["real"] = sorted(df_all["real"]['label'].unique().tolist())
835
- # Synthetic:
836
  if "synthetic" in df_all:
837
  unique_synth = {}
838
  for source in df_all["synthetic"]["source"].unique():
@@ -840,16 +839,15 @@ def run_model(model_name):
840
  unique_subsets["synthetic"] = unique_synth
841
  else:
842
  unique_subsets["synthetic"] = {}
843
- # Pretrained:
844
  if "pretrained" in df_all:
845
  unique_subsets["pretrained"] = sorted(df_all["pretrained"]['label'].unique().tolist())
846
  else:
847
  unique_subsets["pretrained"] = []
848
-
849
- # Obtener los mapeos de colores utilizando la función ya definida
850
  color_maps = get_color_maps(unique_subsets)
851
-
852
- # Definir un mapeo de marcadores para los subconjuntos synthetic (granularidad por source)
853
  marker_mapping = {
854
  "es-digital-paragraph-degradation-seq": "x",
855
  "es-digital-line-degradation-seq": "cross",
@@ -858,9 +856,9 @@ def run_model(model_name):
858
  "es-digital-zoom-degradation-seq": "asterisk",
859
  "es-render-seq": "inverted_triangle"
860
  }
861
-
862
- # Ahora, crear la figura utilizando las funciones existentes para mantener la granularidad:
863
- # Se plotean las muestras reales, synthetic (por source) y pretrained con sus respectivos marcadores y colores.
864
  fig_all = figure(
865
  title="PCA - Todos los subconjuntos proyectados",
866
  plot_width=600,
@@ -870,11 +868,10 @@ def run_model(model_name):
870
  background_fill_color="white",
871
  tooltips=TOOLTIPS
872
  )
873
- # Solo grid horizontal
874
  fig_all.xgrid.grid_line_color = None
875
  fig_all.ygrid.grid_line_color = "gray"
876
-
877
- # Ploteamos los puntos de las muestras reales (agrupados por label)
878
  for label in unique_subsets["real"]:
879
  subset = df_all["real"][df_all["real"]['label'] == label]
880
  source = ColumnDataSource(data={
@@ -883,28 +880,28 @@ def run_model(model_name):
883
  'label': subset['label'],
884
  'img': subset['img']
885
  })
886
- # Usamos 'circle' para las reales
887
  fig_all.circle('x', 'y', size=10,
888
  fill_color=color_maps["real"][label],
889
  line_color=color_maps["real"][label],
890
  legend_label=f"Real: {label}",
891
  source=source)
892
-
893
  show_real_only = st.checkbox("Show only real samples", value=True, key=f"show_real_only_{model_name}")
894
-
895
  if not show_real_only:
896
-
897
- # Ploteamos los puntos de synthetic, diferenciando cada source con su marcador
898
  if unique_subsets["synthetic"]:
899
  for source_name, labels in unique_subsets["synthetic"].items():
900
  df_source = df_all["synthetic"][df_all["synthetic"]["source"] == source_name]
901
  marker = marker_mapping.get(source_name, "square")
902
- # Para cada label en ese source, usamos la función auxiliar
903
- renderers = add_synthetic_dataset_to_fig(fig_all, df_source, labels,
904
- marker=marker,
905
- color_mapping=color_maps["synthetic"][source_name],
906
- group_label=source_name)
907
- # Ploteamos los puntos de pretrained (agrupados por label)
 
 
 
908
  if unique_subsets["pretrained"]:
909
  for label in unique_subsets["pretrained"]:
910
  subset = df_all["pretrained"][df_all["pretrained"]['label'] == label]
@@ -914,76 +911,151 @@ def run_model(model_name):
914
  'label': subset['label'],
915
  'img': subset['img']
916
  })
917
- # Usamos 'triangle' para pretrained (por ejemplo)
918
  fig_all.triangle('x', 'y', size=10,
919
  fill_color=color_maps["pretrained"][label],
920
  line_color=color_maps["pretrained"][label],
921
  legend_label=f"Pretrained: {label}",
922
  source=source)
923
-
924
- # Calcular el centroide y el radio (usando solo las muestras reales)
 
 
 
 
 
 
925
  center_x = df_all["real"]['x'].mean()
926
  center_y = df_all["real"]['y'].mean()
927
  distances = np.sqrt((df_all["real"]['x'] - center_x)**2 + (df_all["real"]['y'] - center_y)**2)
928
  radius = distances.max()
929
-
930
- # Dibujar el centroide y la circunferencia en el plot
931
  centroid_glyph = fig_all.circle(
932
  x=center_x, y=center_y, size=15,
933
  fill_color="white", line_color="black",
934
  legend_label="Centroide",
935
- name="centroid" # Asigna un nombre único
936
  )
937
-
938
  circumference_glyph = fig_all.circle(
939
  x=center_x, y=center_y, radius=radius,
940
  fill_color=None, line_color="black",
941
  line_dash="dashed",
942
  legend_label="Circunferencia",
943
- name="circumference" # Asigna un nombre único
944
  )
945
-
 
946
  fig_all.xaxis.axis_label = "PC1"
947
  fig_all.yaxis.axis_label = "PC2"
948
  hover_all = fig_all.select_one(HoverTool)
949
  hover_all.renderers = [r for r in fig_all.renderers if r.name not in ["centroid", "circumference"]]
950
-
951
- # hover_all.tooltips = [("Label", "@label"), ("PC1", "@x"), ("PC2", "@y")]
952
-
953
- # Agregar checkbox para mostrar u ocultar la leyenda, igual que en el primer PCA
954
- show_legend_second = st.checkbox("Show Legend", value=False, key=f"legend_second_{model_name}")
955
- fig_all.legend.visible = show_legend_second
956
- fig_all.legend.location = "top_right"
957
- fig_all.match_aspect = True
958
-
959
- st.bokeh_chart(fig_all)
960
-
961
- # Mostrar el valor del radio debajo del gráfico
962
  st.write(f"El radio de la circunferencia (calculado a partir de las muestras reales) es: {radius:.4f}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
963
 
964
-
965
- # --- Cálculo de distancias y scatter plot de Distance vs F1 para el nuevo PCA ---
966
-
967
- # Se calcula la distancia de cada subset synthetic a cada subset real usando los datos proyectados (df_all)
968
- # Se utiliza la función compute_cluster_distances_synthetic_individual ya definida
969
  real_labels_new = sorted(df_all["real"]['label'].unique().tolist())
970
  df_distances_new = compute_cluster_distances_synthetic_individual(
971
  df_all["synthetic"],
972
  df_all["real"],
973
  real_labels_new,
974
- metric="wasserstein", # Puedes cambiar la métrica según lo requieras
975
  bins=20
976
  )
977
-
978
- # Extraer las distancias globales (por cada source) del dataframe obtenido,
979
- # buscando filas cuyo índice comience con "Global" (formato "Global (source)")
980
  global_distances_new = {}
981
  for idx in df_distances_new.index:
982
  if idx.startswith("Global"):
983
  source_name = idx.split("(")[1].rstrip(")")
984
  global_distances_new[source_name] = df_distances_new.loc[idx].values
985
-
986
- # Ahora, relacionar estas distancias con los valores de F1 (ya cargados en df_f1)
987
  all_x_new = []
988
  all_y_new = []
989
  for source in df_f1.columns:
@@ -994,14 +1066,12 @@ def run_model(model_name):
994
  all_y_new.extend(y_vals)
995
  all_x_arr_new = np.array(all_x_new).reshape(-1, 1)
996
  all_y_arr_new = np.array(all_y_new)
997
-
998
- # Realizar la regresión lineal global sobre estos datos
999
  model_global_new = LinearRegression().fit(all_x_arr_new, all_y_arr_new)
1000
  r2_new = model_global_new.score(all_x_arr_new, all_y_arr_new)
1001
  slope_new = model_global_new.coef_[0]
1002
  intercept_new = model_global_new.intercept_
1003
-
1004
- # Crear el scatter plot
1005
  scatter_fig_new = figure(
1006
  width=600,
1007
  height=600,
@@ -1011,12 +1081,10 @@ def run_model(model_name):
1011
  background_fill_color="white",
1012
  y_range=(0, 1)
1013
  )
1014
- # Configurar únicamente grid horizontal
1015
  scatter_fig_new.xgrid.grid_line_color = None
1016
  scatter_fig_new.ygrid.grid_line_color = "gray"
1017
  scatter_fig_new.match_aspect = True
1018
-
1019
- # Mantenemos el mismo código de colores que en el otro scatter plot
1020
  source_colors = {
1021
  "es-digital-paragraph-degradation-seq": "blue",
1022
  "es-digital-line-degradation-seq": "green",
@@ -1026,8 +1094,7 @@ def run_model(model_name):
1026
  "es-digital-rotation-zoom-degradation-seq": "brown",
1027
  "es-render-seq": "cyan"
1028
  }
1029
-
1030
- # Dibujar cada conjunto: para cada source (por ejemplo, es-render-seq, etc.)
1031
  for source in df_f1.columns:
1032
  if source in global_distances_new:
1033
  x_vals = global_distances_new[source]
@@ -1040,81 +1107,59 @@ def run_model(model_name):
1040
  line_color=source_colors.get(source, "gray"),
1041
  legend_label=source
1042
  )
1043
-
1044
  scatter_fig_new.xaxis.axis_label = "Distance (Global, por Colegio) - Nueva PCA"
1045
  scatter_fig_new.yaxis.axis_label = "F1 Score"
1046
  scatter_fig_new.legend.location = "top_right"
1047
-
1048
  hover_tool_new = scatter_fig_new.select_one(HoverTool)
1049
  hover_tool_new.tooltips = [("Distance", "@x"), ("F1", "@y"), ("Subset", "@Fuente")]
1050
-
1051
- # Dibujar la línea de regresión global
1052
  x_line_new = np.linspace(all_x_arr_new.min(), all_x_arr_new.max(), 100)
1053
  y_line_new = model_global_new.predict(x_line_new.reshape(-1,1))
1054
  scatter_fig_new.line(x_line_new, y_line_new, line_width=2, line_color="black", legend_label="Global Regression")
1055
-
1056
  st.bokeh_chart(scatter_fig_new)
1057
-
1058
  st.write(f"Regresión global (Nueva PCA): R² = {r2_new:.4f}, Slope = {slope_new:.4f}, Intercept = {intercept_new:.4f}")
1059
-
1060
-
1061
-
1062
-
1063
-
1064
-
1065
-
1066
-
1067
-
1068
- # --- INICIO DEL BLOQUE: Heatmap de características ---
1069
  st.markdown("## Heatmap de Características")
1070
-
1071
  try:
1072
  df_heat = pd.read_csv("data/heatmaps.csv")
1073
- # Si fuera necesario, se pueden limpiar los nombres de las columnas:
1074
- # df_heat.columns = [col.strip("'\"") for col in df_heat.columns]
1075
  except Exception as e:
1076
  st.error(f"Error al cargar heatmaps.csv: {e}")
1077
  df_heat = None
1078
-
1079
  if df_heat is not None:
1080
- # Verificamos que la columna 'img' esté presente en df_all["real"]
1081
  if 'img' not in df_all["real"].columns:
1082
  st.error("La columna 'img' no se encuentra en las muestras reales para hacer el merge con heatmaps.csv.")
1083
  else:
1084
- # Crear la columna 'name' extrayendo el nombre final de la URL y removiendo ".png"
1085
  df_all["real"]["name"] = df_all["real"]["img"].apply(
1086
  lambda x: x.split("/")[-1].replace(".png", "") if isinstance(x, str) else x
1087
  )
1088
-
1089
- # Hacemos merge de las posiciones reales con el CSV de heatmaps usando la columna 'name'
1090
  df_heatmap = pd.merge(df_all["real"], df_heat, on="name", how="inner")
1091
 
1092
- # Extraemos las características disponibles (excluyendo 'name')
1093
  feature_options = [col for col in df_heat.columns if col != "name"]
1094
  selected_feature = st.selectbox("Select heatmap feature:",
1095
  options=feature_options, key=f"heatmap_{model_name}")
1096
-
1097
  select_extra_dataset_hm = st.selectbox("Select a dataset:",
1098
  options=["-", "es-digital-line-degradation-seq", "es-digital-seq", "es-digital-rotation-degradation-seq", "es-digital-zoom-degradation-seq", "es-render-seq"], key=f"heatmap_extra_dataset_{model_name}")
1099
 
1100
- # Determinar el rango de las posiciones (x, y) de las muestras reales
1101
  x_min, x_max = df_heatmap['x'].min(), df_heatmap['x'].max()
1102
  y_min, y_max = df_heatmap['y'].min(), df_heatmap['y'].max()
1103
 
1104
- # Definir resolución de la rejilla (por ejemplo, 50x50)
1105
  grid_size = 50
1106
  x_bins = np.linspace(x_min, x_max, grid_size + 1)
1107
  y_bins = np.linspace(y_min, y_max, grid_size + 1)
1108
 
1109
- # Si la variable seleccionada no es numérica, la convertimos a códigos numéricos
1110
- # y guardamos la correspondencia para la leyenda.
1111
  cat_mapping = None
1112
  if df_heatmap[selected_feature].dtype == bool or not pd.api.types.is_numeric_dtype(df_heatmap[selected_feature]):
1113
  cat = df_heatmap[selected_feature].astype('category')
1114
  cat_mapping = list(cat.cat.categories)
1115
  df_heatmap[selected_feature] = cat.cat.codes
1116
 
1117
- # Intentamos calcular el heatmap; si falla, aplicamos la conversión a categoría
1118
  try:
1119
  heat_stat, x_edges, y_edges, binnumber = binned_statistic_2d(
1120
  df_heatmap['x'], df_heatmap['y'], df_heatmap[selected_feature],
@@ -1129,32 +1174,23 @@ def run_model(model_name):
1129
  statistic='mean', bins=[x_bins, y_bins]
1130
  )
1131
 
1132
- # La función image de Bokeh espera una lista de arrays; se transpone para alinear los ejes.
1133
  heatmap_data = heat_stat.T
1134
 
1135
- # Crear el mapa de color
1136
- color_mapper = LinearColorMapper(palette="Viridis256", low=np.nanmin(heatmap_data), high=np.nanmax(heatmap_data), nan_color = 'rgba(0, 0, 0, 0)')
1137
 
1138
- # Crear la figura para el heatmap con fondo blanco
1139
  heatmap_fig = figure(title=f"Heatmap de '{selected_feature}'",
1140
  x_range=(x_min, x_max), y_range=(y_min, y_max),
1141
  width=600, height=600,
1142
  tools="pan,wheel_zoom,reset,save", active_scroll="wheel_zoom", tooltips=TOOLTIPS)
1143
-
1144
- # Dibujar el heatmap usando la imagen
1145
  heatmap_fig.image(image=[heatmap_data], x=x_min, y=y_min,
1146
  dw=x_max - x_min, dh=y_max - y_min,
1147
  color_mapper=color_mapper)
1148
 
1149
- # Crear la barra de colores
1150
  color_bar = ColorBar(color_mapper=color_mapper, location=(0, 0))
1151
- # Si se usó conversión a categoría, formateamos la barra para mostrar las etiquetas originales
1152
  if cat_mapping is not None:
1153
-
1154
- # Creamos ticks fijos solo para cada categoría
1155
  ticks = list(range(len(cat_mapping)))
1156
  color_bar.ticker = FixedTicker(ticks=ticks)
1157
-
1158
  categories_json = json.dumps(cat_mapping)
1159
  color_bar.formatter = FuncTickFormatter(code=f"""
1160
  var categories = {categories_json};
@@ -1166,22 +1202,17 @@ def run_model(model_name):
1166
  }}
1167
  """)
1168
  heatmap_fig.add_layout(color_bar, 'right')
1169
-
1170
-
1171
-
1172
- # Agregar renderer de puntos invisibles para tooltips
1173
  source_points = ColumnDataSource(data={
1174
  'x': df_heatmap['x'],
1175
  'y': df_heatmap['y'],
1176
  'img': df_heatmap['img'],
1177
- 'label': df_heatmap['name'] # Asegúrate de que esta columna exista; si no, usa otra
1178
  })
1179
- # Dibujar círculos con transparencia total (no se verán)
1180
  invisible_renderer = heatmap_fig.circle('x', 'y', size=10, source=source_points, fill_alpha=0, line_alpha=0.5)
1181
-
1182
  if select_extra_dataset_hm != "-":
1183
  df_extra = df_all["synthetic"][df_all["synthetic"]["source"] == select_extra_dataset_hm]
1184
- # Asegurarse de que exista la columna 'name'
1185
  if 'name' not in df_extra.columns:
1186
  df_extra["name"] = df_extra["img"].apply(
1187
  lambda x: x.split("/")[-1].replace(".png", "") if isinstance(x, str) else x
@@ -1192,9 +1223,8 @@ def run_model(model_name):
1192
  'img': df_extra['img'],
1193
  'label': df_extra['name']
1194
  })
1195
- # Agregar renderer para el dataset extra
1196
  extra_renderer = heatmap_fig.circle('x', 'y', size=10, source=source_extra_points, fill_alpha=0, line_alpha=0.5, color="red")
1197
-
1198
  hover_tool_points = HoverTool(renderers=[invisible_renderer], tooltips=TOOLTIPS)
1199
  heatmap_fig.add_tools(hover_tool_points)
1200
 
 
13
  from sklearn.linear_model import LinearRegression
14
  from scipy.stats import binned_statistic_2d
15
  import json
16
+ import itertools
17
 
18
 
19
+ N_COMPONENTS = 3
20
  TSNE_NEIGHBOURS = 150
21
  # WEIGHT_FACTOR = 0.05
22
 
 
509
 
510
  return results
511
 
512
+ # def get_color(color_entry):
513
+ # if isinstance(color_entry, dict):
514
+ # # Extrae el primer valor (o ajusta según convenga)
515
+ # return list(color_entry.values())[0]
516
+ # return color_entry
517
+
518
  def optimize_tsne_params(df_combined, embedding_cols, df_f1, distance_metric):
519
  perplexity_range = np.linspace(30, 50, 10)
520
  learning_rate_range = np.linspace(200, 1000, 20)
 
737
  key=f"download_button_excel_{model_name}"
738
  )
739
 
 
740
  if reduction_method == "PCA":
741
  st.markdown("## PCA - Solo Muestras Reales")
742
+ # -------------------------------------------------------------------------
743
+ # 1. PCA sobre las muestras reales
744
  df_real_only = embeddings["real"].copy()
745
  pca_real = PCA(n_components=N_COMPONENTS)
746
  reduced_real = pca_real.fit_transform(df_real_only[embedding_cols].values)
747
+
748
+ # Agregar columnas PC1, PC2, … a df_real_only
749
+ for i in range(reduced_real.shape[1]):
750
+ df_real_only[f'PC{i+1}'] = reduced_real[:, i]
751
+
752
  explained_variance_real = pca_real.explained_variance_ratio_
753
  unique_labels_real = sorted(df_real_only['label'].unique().tolist())
754
 
755
+ # Mapeo de colores para las muestras reales usando la paleta Reds9
756
  num_labels = len(unique_labels_real)
757
  if num_labels <= 9:
758
  red_palette = Reds9[:num_labels]
 
760
  red_palette = (Reds9 * ((num_labels // 9) + 1))[:num_labels]
761
  real_color_mapping = {label: red_palette[i] for i, label in enumerate(unique_labels_real)}
762
 
763
+ # Mostrar tabla de Explained Variance Ratio
764
  st.subheader("PCA - Real: Explained Variance Ratio")
765
  component_names_real = [f"PC{i+1}" for i in range(len(explained_variance_real))]
766
  variance_df_real = pd.DataFrame({
 
768
  "Explained Variance": explained_variance_real
769
  })
770
  st.table(variance_df_real)
771
+
772
+ # Mostrar los plots de loadings para cada componente
773
  st.subheader("PCA - Real: Component Loadings")
774
  st.markdown("### Pesos de las Componentes Principales (Loadings) - Conjunto Combinado")
775
  for i, comp in enumerate(pca_real.components_):
 
786
  tools="pan,wheel_zoom,reset,save,hover",
787
  active_scroll="wheel_zoom"
788
  )
 
789
  p.background_fill_color = "white"
790
  p.xgrid.grid_line_color = None
791
  p.ygrid.grid_line_color = "gray"
792
  p.vbar(x='dimensions', top='weight', width=0.8, source=source,
793
  fill_color="#2b83ba", line_color="#2b83ba")
 
794
  p.xaxis.axis_label = "Dimensiones Originales"
795
  p.xaxis.major_label_text_font_size = '0pt'
 
796
  hover = p.select_one(HoverTool)
797
  hover.tooltips = [("Dimensión", "@dimensions"), ("Peso", "@weight")]
798
  st.bokeh_chart(p)
799
 
800
+ # -------------------------------------------------------------------------
801
+ # 2. Proyección de todos los subconjuntos usando los loadings de df_real (para PC completos)
802
+ # Se proyectan real, synthetic y pretrained (si existen) y se agregan todas las PC's.
 
803
  df_all = {}
804
+ # Real
 
805
  df_real_proj = embeddings["real"].copy()
806
  proj_real = pca_real.transform(df_real_proj[embedding_cols].values)
807
+ for i in range(proj_real.shape[1]):
808
+ df_real_proj[f'PC{i+1}'] = proj_real[:, i]
809
  df_all["real"] = df_real_proj
810
 
811
+ # Synthetic
812
  if "synthetic" in embeddings:
813
  df_synth_proj = embeddings["synthetic"].copy()
814
  proj_synth = pca_real.transform(df_synth_proj[embedding_cols].values)
815
+ for i in range(proj_synth.shape[1]):
816
+ df_synth_proj[f'PC{i+1}'] = proj_synth[:, i]
817
  df_all["synthetic"] = df_synth_proj
818
 
819
+ # Pretrained
820
  if "pretrained" in embeddings:
821
  df_pretr_proj = embeddings["pretrained"].copy()
822
  proj_pretr = pca_real.transform(df_pretr_proj[embedding_cols].values)
823
+ for i in range(proj_pretr.shape[1]):
824
+ df_pretr_proj[f'PC{i+1}'] = proj_pretr[:, i]
825
  df_all["pretrained"] = df_pretr_proj
826
 
827
+ # Para el plot global usaremos PC1 y PC2 (se asignan a 'x' y 'y')
 
828
  for key in df_all:
829
+ df_all[key]["x"] = df_all[key]["PC1"]
830
+ df_all[key]["y"] = df_all[key]["PC2"]
831
 
832
+ # Construir los subconjuntos únicos para agrupar:
 
 
833
  unique_subsets = {}
 
834
  unique_subsets["real"] = sorted(df_all["real"]['label'].unique().tolist())
 
835
  if "synthetic" in df_all:
836
  unique_synth = {}
837
  for source in df_all["synthetic"]["source"].unique():
 
839
  unique_subsets["synthetic"] = unique_synth
840
  else:
841
  unique_subsets["synthetic"] = {}
 
842
  if "pretrained" in df_all:
843
  unique_subsets["pretrained"] = sorted(df_all["pretrained"]['label'].unique().tolist())
844
  else:
845
  unique_subsets["pretrained"] = []
846
+
847
+ # Obtener mapeo de colores para cada subconjunto (función definida externamente)
848
  color_maps = get_color_maps(unique_subsets)
849
+
850
+ # Mapeo de marcadores para synthetic (por source)
851
  marker_mapping = {
852
  "es-digital-paragraph-degradation-seq": "x",
853
  "es-digital-line-degradation-seq": "cross",
 
856
  "es-digital-zoom-degradation-seq": "asterisk",
857
  "es-render-seq": "inverted_triangle"
858
  }
859
+
860
+ # Plot global: se muestran real, synthetic y pretrained (según checkbox)
861
+ st.subheader("PCA - Todos los subconjuntos proyectados (PC1 vs PC2)")
862
  fig_all = figure(
863
  title="PCA - Todos los subconjuntos proyectados",
864
  plot_width=600,
 
868
  background_fill_color="white",
869
  tooltips=TOOLTIPS
870
  )
 
871
  fig_all.xgrid.grid_line_color = None
872
  fig_all.ygrid.grid_line_color = "gray"
873
+
874
+ # Plotear las muestras reales, agrupadas por label
875
  for label in unique_subsets["real"]:
876
  subset = df_all["real"][df_all["real"]['label'] == label]
877
  source = ColumnDataSource(data={
 
880
  'label': subset['label'],
881
  'img': subset['img']
882
  })
 
883
  fig_all.circle('x', 'y', size=10,
884
  fill_color=color_maps["real"][label],
885
  line_color=color_maps["real"][label],
886
  legend_label=f"Real: {label}",
887
  source=source)
888
+
889
  show_real_only = st.checkbox("Show only real samples", value=True, key=f"show_real_only_{model_name}")
 
890
  if not show_real_only:
891
+ # Agregar synthetic
 
892
  if unique_subsets["synthetic"]:
893
  for source_name, labels in unique_subsets["synthetic"].items():
894
  df_source = df_all["synthetic"][df_all["synthetic"]["source"] == source_name]
895
  marker = marker_mapping.get(source_name, "square")
896
+ # Se usa el mapeo de colores para synthetic
897
+ color_val = color_maps["synthetic"][source_name]
898
+ renderers = add_synthetic_dataset_to_fig(
899
+ fig_all, df_source, labels,
900
+ marker=marker,
901
+ color_mapping=color_val,
902
+ group_label=source_name
903
+ )
904
+ # Agregar pretrained
905
  if unique_subsets["pretrained"]:
906
  for label in unique_subsets["pretrained"]:
907
  subset = df_all["pretrained"][df_all["pretrained"]['label'] == label]
 
911
  'label': subset['label'],
912
  'img': subset['img']
913
  })
 
914
  fig_all.triangle('x', 'y', size=10,
915
  fill_color=color_maps["pretrained"][label],
916
  line_color=color_maps["pretrained"][label],
917
  legend_label=f"Pretrained: {label}",
918
  source=source)
919
+
920
+ show_legend_global = st.checkbox("Show Legend", value=False, key=f"legend_global_{model_name}")
921
+ fig_all.legend.visible = show_legend_global
922
+ fig_all.legend.location = "top_right"
923
+ fig_all.match_aspect = True
924
+ st.bokeh_chart(fig_all)
925
+
926
+ # Calcular centroide y radio (usando solo las muestras reales)
927
  center_x = df_all["real"]['x'].mean()
928
  center_y = df_all["real"]['y'].mean()
929
  distances = np.sqrt((df_all["real"]['x'] - center_x)**2 + (df_all["real"]['y'] - center_y)**2)
930
  radius = distances.max()
931
+
932
+ # Dibujar el centroide y la circunferencia
933
  centroid_glyph = fig_all.circle(
934
  x=center_x, y=center_y, size=15,
935
  fill_color="white", line_color="black",
936
  legend_label="Centroide",
937
+ name="centroid"
938
  )
 
939
  circumference_glyph = fig_all.circle(
940
  x=center_x, y=center_y, radius=radius,
941
  fill_color=None, line_color="black",
942
  line_dash="dashed",
943
  legend_label="Circunferencia",
944
+ name="circumference"
945
  )
946
+
947
+ # Ajustar ejes y tooltips
948
  fig_all.xaxis.axis_label = "PC1"
949
  fig_all.yaxis.axis_label = "PC2"
950
  hover_all = fig_all.select_one(HoverTool)
951
  hover_all.renderers = [r for r in fig_all.renderers if r.name not in ["centroid", "circumference"]]
952
+
 
 
 
 
 
 
 
 
 
 
 
953
  st.write(f"El radio de la circunferencia (calculado a partir de las muestras reales) es: {radius:.4f}")
954
+
955
+ # -------------------------------------------------------------------------
956
+ # Calcular el rango global: recorrer todas las proyecciones de todos los subconjuntos
957
+ all_vals = []
958
+ for key in df_all:
959
+ for comp in [f'PC{i+1}' for i in range(N_COMPONENTS)]:
960
+ all_vals.append(df_all[key][comp])
961
+ all_vals = pd.concat(all_vals)
962
+ # Tomar el máximo valor absoluto de todas las proyecciones
963
+ max_val = all_vals.abs().max()
964
+ global_range = (-max_val, max_val)
965
+
966
+ # 3. Scatter plots para cada combinación (vistas planta, alzado y perfil)
967
+ st.subheader("Scatter Plots: Vistas de Componentes (Combinaciones)")
968
+ pairs = list(itertools.combinations(range(N_COMPONENTS), 2))
969
+ for (i, j) in pairs:
970
+ x_comp = f'PC{i+1}'
971
+ y_comp = f'PC{j+1}'
972
+
973
+ st.markdown(f"### Scatter Plot: {x_comp} vs {y_comp}")
974
+ # Usar el rango global para ambos ejes
975
+ p = figure(
976
+ title=f"{x_comp} vs {y_comp}",
977
+ plot_width=700,
978
+ plot_height=700,
979
+ x_range=global_range,
980
+ y_range=global_range,
981
+ tools="pan,wheel_zoom,reset,save,hover",
982
+ active_scroll="wheel_zoom",
983
+ background_fill_color="white",
984
+ tooltips=TOOLTIPS
985
+ )
986
+ # Etiquetas de ejes
987
+ p.xaxis.axis_label = x_comp
988
+ p.yaxis.axis_label = y_comp
989
+
990
+ # Muestras reales: se usan directamente los valores de PC{i+1} y PC{j+1}
991
+ for label in unique_subsets["real"]:
992
+ subset = df_all["real"][df_all["real"]['label'] == label]
993
+ source = ColumnDataSource(data={
994
+ 'x': subset[x_comp],
995
+ 'y': subset[y_comp],
996
+ 'label': subset['label'],
997
+ 'img': subset['img']
998
+ })
999
+ p.circle('x', 'y', size=10,
1000
+ fill_color=color_maps["real"][label],
1001
+ line_color=color_maps["real"][label],
1002
+ legend_label=f"Real: {label}",
1003
+ source=source)
1004
+
1005
+ # Selector para incluir o no synthetic y pretrained en este gráfico
1006
+ show_pair_only_real = st.checkbox("Show only real samples", value=True, key=f"pair_show_real_{i}_{j}_{model_name}")
1007
+ if not show_pair_only_real:
1008
+ # Synthetic
1009
+ if "synthetic" in df_all:
1010
+ for source_name, labels in unique_subsets["synthetic"].items():
1011
+ # Obtener las filas de synthetic para ese source y asignar el rango adecuado
1012
+ df_source = df_all["synthetic"][df_all["synthetic"]["source"] == source_name].copy()
1013
+ df_source["x"] = df_source[x_comp]
1014
+ df_source["y"] = df_source[y_comp]
1015
+ marker = marker_mapping.get(source_name, "square")
1016
+ renderers = add_synthetic_dataset_to_fig(
1017
+ p, df_source, labels,
1018
+ marker=marker,
1019
+ color_mapping=color_maps["synthetic"][source_name],
1020
+ group_label=source_name
1021
+ )
1022
+ # Pretrained
1023
+ if "pretrained" in df_all:
1024
+ for label in unique_subsets["pretrained"]:
1025
+ subset = df_all["pretrained"][df_all["pretrained"]['label'] == label]
1026
+ source = ColumnDataSource(data={
1027
+ 'x': subset[x_comp],
1028
+ 'y': subset[y_comp],
1029
+ 'label': subset['label'],
1030
+ 'img': subset['img']
1031
+ })
1032
+ p.triangle('x', 'y', size=10,
1033
+ fill_color=color_maps["pretrained"][label],
1034
+ line_color=color_maps["pretrained"][label],
1035
+ legend_label=f"Pretrained: {label}",
1036
+ source=source)
1037
+ show_legend_pair = st.checkbox("Show Legend", value=False, key=f"legend_pair_{i}_{j}_{model_name}")
1038
+ p.legend.visible = show_legend_pair
1039
+ st.bokeh_chart(p)
1040
 
1041
+
1042
+ # -------------------------------------------------------------------------
1043
+ # 4. Cálculo de distancias y scatter plot: Distance vs F1 (usando PC1 y PC2 globales)
 
 
1044
  real_labels_new = sorted(df_all["real"]['label'].unique().tolist())
1045
  df_distances_new = compute_cluster_distances_synthetic_individual(
1046
  df_all["synthetic"],
1047
  df_all["real"],
1048
  real_labels_new,
1049
+ metric="wasserstein", # O la métrica que prefieras
1050
  bins=20
1051
  )
1052
+
 
 
1053
  global_distances_new = {}
1054
  for idx in df_distances_new.index:
1055
  if idx.startswith("Global"):
1056
  source_name = idx.split("(")[1].rstrip(")")
1057
  global_distances_new[source_name] = df_distances_new.loc[idx].values
1058
+
 
1059
  all_x_new = []
1060
  all_y_new = []
1061
  for source in df_f1.columns:
 
1066
  all_y_new.extend(y_vals)
1067
  all_x_arr_new = np.array(all_x_new).reshape(-1, 1)
1068
  all_y_arr_new = np.array(all_y_new)
1069
+
 
1070
  model_global_new = LinearRegression().fit(all_x_arr_new, all_y_arr_new)
1071
  r2_new = model_global_new.score(all_x_arr_new, all_y_arr_new)
1072
  slope_new = model_global_new.coef_[0]
1073
  intercept_new = model_global_new.intercept_
1074
+
 
1075
  scatter_fig_new = figure(
1076
  width=600,
1077
  height=600,
 
1081
  background_fill_color="white",
1082
  y_range=(0, 1)
1083
  )
 
1084
  scatter_fig_new.xgrid.grid_line_color = None
1085
  scatter_fig_new.ygrid.grid_line_color = "gray"
1086
  scatter_fig_new.match_aspect = True
1087
+
 
1088
  source_colors = {
1089
  "es-digital-paragraph-degradation-seq": "blue",
1090
  "es-digital-line-degradation-seq": "green",
 
1094
  "es-digital-rotation-zoom-degradation-seq": "brown",
1095
  "es-render-seq": "cyan"
1096
  }
1097
+
 
1098
  for source in df_f1.columns:
1099
  if source in global_distances_new:
1100
  x_vals = global_distances_new[source]
 
1107
  line_color=source_colors.get(source, "gray"),
1108
  legend_label=source
1109
  )
1110
+
1111
  scatter_fig_new.xaxis.axis_label = "Distance (Global, por Colegio) - Nueva PCA"
1112
  scatter_fig_new.yaxis.axis_label = "F1 Score"
1113
  scatter_fig_new.legend.location = "top_right"
 
1114
  hover_tool_new = scatter_fig_new.select_one(HoverTool)
1115
  hover_tool_new.tooltips = [("Distance", "@x"), ("F1", "@y"), ("Subset", "@Fuente")]
 
 
1116
  x_line_new = np.linspace(all_x_arr_new.min(), all_x_arr_new.max(), 100)
1117
  y_line_new = model_global_new.predict(x_line_new.reshape(-1,1))
1118
  scatter_fig_new.line(x_line_new, y_line_new, line_width=2, line_color="black", legend_label="Global Regression")
 
1119
  st.bokeh_chart(scatter_fig_new)
 
1120
  st.write(f"Regresión global (Nueva PCA): R² = {r2_new:.4f}, Slope = {slope_new:.4f}, Intercept = {intercept_new:.4f}")
1121
+
1122
+ # -------------------------------------------------------------------------
1123
+ # 5. BLOQUE: Heatmap de Características
 
 
 
 
 
 
 
1124
  st.markdown("## Heatmap de Características")
 
1125
  try:
1126
  df_heat = pd.read_csv("data/heatmaps.csv")
 
 
1127
  except Exception as e:
1128
  st.error(f"Error al cargar heatmaps.csv: {e}")
1129
  df_heat = None
1130
+
1131
  if df_heat is not None:
 
1132
  if 'img' not in df_all["real"].columns:
1133
  st.error("La columna 'img' no se encuentra en las muestras reales para hacer el merge con heatmaps.csv.")
1134
  else:
1135
+ # Crear columna 'name' a partir del nombre final de la URL de la imagen
1136
  df_all["real"]["name"] = df_all["real"]["img"].apply(
1137
  lambda x: x.split("/")[-1].replace(".png", "") if isinstance(x, str) else x
1138
  )
1139
+ # Realizar merge de las posiciones reales con el CSV de heatmaps
 
1140
  df_heatmap = pd.merge(df_all["real"], df_heat, on="name", how="inner")
1141
 
1142
+ # Extraer las características disponibles (excluyendo 'name')
1143
  feature_options = [col for col in df_heat.columns if col != "name"]
1144
  selected_feature = st.selectbox("Select heatmap feature:",
1145
  options=feature_options, key=f"heatmap_{model_name}")
 
1146
  select_extra_dataset_hm = st.selectbox("Select a dataset:",
1147
  options=["-", "es-digital-line-degradation-seq", "es-digital-seq", "es-digital-rotation-degradation-seq", "es-digital-zoom-degradation-seq", "es-render-seq"], key=f"heatmap_extra_dataset_{model_name}")
1148
 
1149
+ # Definir el rango de posiciones (x, y)
1150
  x_min, x_max = df_heatmap['x'].min(), df_heatmap['x'].max()
1151
  y_min, y_max = df_heatmap['y'].min(), df_heatmap['y'].max()
1152
 
 
1153
  grid_size = 50
1154
  x_bins = np.linspace(x_min, x_max, grid_size + 1)
1155
  y_bins = np.linspace(y_min, y_max, grid_size + 1)
1156
 
 
 
1157
  cat_mapping = None
1158
  if df_heatmap[selected_feature].dtype == bool or not pd.api.types.is_numeric_dtype(df_heatmap[selected_feature]):
1159
  cat = df_heatmap[selected_feature].astype('category')
1160
  cat_mapping = list(cat.cat.categories)
1161
  df_heatmap[selected_feature] = cat.cat.codes
1162
 
 
1163
  try:
1164
  heat_stat, x_edges, y_edges, binnumber = binned_statistic_2d(
1165
  df_heatmap['x'], df_heatmap['y'], df_heatmap[selected_feature],
 
1174
  statistic='mean', bins=[x_bins, y_bins]
1175
  )
1176
 
1177
+ # Transponer la matriz para alinear correctamente los ejes
1178
  heatmap_data = heat_stat.T
1179
 
1180
+ color_mapper = LinearColorMapper(palette="Viridis256", low=np.nanmin(heatmap_data), high=np.nanmax(heatmap_data), nan_color='rgba(0, 0, 0, 0)')
 
1181
 
 
1182
  heatmap_fig = figure(title=f"Heatmap de '{selected_feature}'",
1183
  x_range=(x_min, x_max), y_range=(y_min, y_max),
1184
  width=600, height=600,
1185
  tools="pan,wheel_zoom,reset,save", active_scroll="wheel_zoom", tooltips=TOOLTIPS)
 
 
1186
  heatmap_fig.image(image=[heatmap_data], x=x_min, y=y_min,
1187
  dw=x_max - x_min, dh=y_max - y_min,
1188
  color_mapper=color_mapper)
1189
 
 
1190
  color_bar = ColorBar(color_mapper=color_mapper, location=(0, 0))
 
1191
  if cat_mapping is not None:
 
 
1192
  ticks = list(range(len(cat_mapping)))
1193
  color_bar.ticker = FixedTicker(ticks=ticks)
 
1194
  categories_json = json.dumps(cat_mapping)
1195
  color_bar.formatter = FuncTickFormatter(code=f"""
1196
  var categories = {categories_json};
 
1202
  }}
1203
  """)
1204
  heatmap_fig.add_layout(color_bar, 'right')
1205
+
 
 
 
1206
  source_points = ColumnDataSource(data={
1207
  'x': df_heatmap['x'],
1208
  'y': df_heatmap['y'],
1209
  'img': df_heatmap['img'],
1210
+ 'label': df_heatmap['name']
1211
  })
 
1212
  invisible_renderer = heatmap_fig.circle('x', 'y', size=10, source=source_points, fill_alpha=0, line_alpha=0.5)
1213
+
1214
  if select_extra_dataset_hm != "-":
1215
  df_extra = df_all["synthetic"][df_all["synthetic"]["source"] == select_extra_dataset_hm]
 
1216
  if 'name' not in df_extra.columns:
1217
  df_extra["name"] = df_extra["img"].apply(
1218
  lambda x: x.split("/")[-1].replace(".png", "") if isinstance(x, str) else x
 
1223
  'img': df_extra['img'],
1224
  'label': df_extra['name']
1225
  })
 
1226
  extra_renderer = heatmap_fig.circle('x', 'y', size=10, source=source_extra_points, fill_alpha=0, line_alpha=0.5, color="red")
1227
+
1228
  hover_tool_points = HoverTool(renderers=[invisible_renderer], tooltips=TOOLTIPS)
1229
  heatmap_fig.add_tools(hover_tool_points)
1230