Spaces:
Sleeping
Sleeping
# --------------------------------------------------------------------------------- | |
# Aplicaci贸n principal para cargar el modelo, generar prompts y explicar los datos | |
# --------------------------------------------------------------------------------- | |
import streamlit as st # type: ignore | |
import os | |
import re | |
import pandas as pd # type: ignore | |
from dotenv import load_dotenv # type: ignore # Para cambios locales | |
from supabase import create_client, Client # type: ignore | |
from pandasai import Agent | |
# from pandasai import SmartDataframe # type: ignore | |
from pandasai.llm.local_llm import LocalLLM | |
from pandasai import Agent | |
import matplotlib.pyplot as plt | |
# --------------------------------------------------------------------------------- | |
# Funciones auxiliares | |
# --------------------------------------------------------------------------------- | |
# Ejemplo de prompt generado: | |
# generate_graph_prompt("Germany", "France", "fertility rate", 2020, 2030) | |
def generate_graph_prompt(user_query): | |
prompt = f""" | |
You are a highly skilled data scientist working with European demographic data. | |
Given the user's request: "{user_query}" | |
1. Plot the relevant data according to the user's request. | |
2. After generating the plot, write a clear, human-readable explanation of the plot (no code). | |
3. Save the explanation in a variable called "explanation". | |
VERY IMPORTANT: | |
- Declare a result variable as a dictionary that includes: | |
- type = "plot" | |
- value = the path to the saved plot | |
- explanation = the explanation text you wrote | |
Example of expected result dictionary: | |
result = {{ | |
"type": "plot", | |
"value": "temp_chart.png", | |
"explanation": explanation | |
}} | |
Only respond with valid Python code. | |
IMPORTANT: Stick strictly to using the data available in the database. | |
""" | |
return prompt | |
# TODO: Mejorar prompt | |
# --------------------------------------------------------------------------------- | |
# Configuraci贸n de conexi贸n a Supabase | |
# --------------------------------------------------------------------------------- | |
# Cargar variables de entorno desde archivo .env | |
load_dotenv() | |
# Conectar las credenciales de Supabase (ubicadas en "Secrets" en Streamlit) | |
SUPABASE_URL = os.getenv("SUPABASE_URL") | |
SUPABASE_KEY = os.getenv("SUPABASE_KEY") | |
# Crear cliente Supabase | |
supabase: Client = create_client(SUPABASE_URL, SUPABASE_KEY) | |
# Funci贸n para cargar datos de una tabla de Supabase | |
# Tablas posibles: fertility, geo data, labor, population, predictions | |
def load_data(table): | |
try: | |
if supabase: | |
response = supabase.from_(table).select("*").execute() | |
print(f"Response object: {response}") # Inspeccionar objeto completo | |
print(f"Response type: {type(response)}") # Verificar tipo de objeto | |
# Acceder a atributos relacionados a error o data | |
if hasattr(response, 'data'): | |
print(f"Response data: {response.data}") | |
return pd.DataFrame(response.data) | |
elif hasattr(response, 'status_code'): | |
print(f"Response status code: {response.status_code}") | |
elif hasattr(response, '_error'): # Versiones antiguas | |
print(f"Older error attribute: {response._error}") | |
st.error(f"Error fetching data: {response._error}") | |
return pd.DataFrame() | |
else: | |
st.info("Response object does not have 'data' or known error attributes. Check the logs.") | |
return pd.DataFrame() | |
else: | |
st.error("Supabase client not initialized. Check environment variables.") | |
return pd.DataFrame() | |
except Exception as e: | |
st.error(f"An error occurred during data loading: {e}") | |
return pd.DataFrame() | |
# --------------------------------------------------------------------------------- | |
# Cargar datos iniciales | |
# --------------------------------------------------------------------------------- | |
# # Cargar datos desde la tabla "labor" | |
data = load_data("labor") | |
# TODO: La idea es luego usar todas las tablas, cuando ya funcione. | |
# Se puede si el modelo funciona con las gr谩ficas, sino que toca mejorarlo | |
# porque ser铆an consultas m谩s complejas. | |
# labor_data = load_data("labor") | |
# fertility_data = load_data("fertility") | |
# population_data = load_data("population") | |
# predictions_data = load_data("predictions") | |
# --------------------------------------------------------------------------------- | |
# Inicializar modelo | |
# --------------------------------------------------------------------------------- | |
# ollama_llm = LocalLLM(api_base="http://localhost:11434/v1", | |
# model="gemma3:12b", | |
# temperature=0.1, | |
# max_tokens=8000) | |
lm_studio_llm = LocalLLM(api_base="http://localhost:1234/v1") # el modelo es gemma-3-12b-it-qat | |
agent = Agent([labor_data], config={"llm": lm_studio_llm}) # Inicializar agent | |
# --------------------------------------------------------------------------------- | |
# Configuraci贸n de la app en Streamlit | |
# --------------------------------------------------------------------------------- | |
# T铆tulo de la app | |
st.title("_Europe GraphGen_ :blue[Graph generator] :flag-eu:") | |
# TODO: Poner instrucciones al usuario sobre c贸mo hacer un muy buen prompt (sin tecnisismos, pensando en el usuario final) | |
# Entrada de usuario para describir el gr谩fico | |
user_input = st.text_input("What graphics do you have in mind") | |
generate_button = st.button("Generate") | |
# Procesar el input del usuario con PandasAI | |
if generate_button and user_input: | |
with st.spinner('Generating answer...'): | |
try: | |
prompt = generate_graph_prompt(user_input) | |
answer = agent.chat(prompt) | |
explanation = agent.explain() | |
print(f"\nAnswer type: {type(answer)}\n") # Verificar tipo de objeto | |
print(f"\nAnswer content: {answer}\n") # Inspeccionar contenido de la respuesta | |
print(f"\n explanation type: {type(explanation)}\n") # Verificar tipo de objeto | |
print(f"\n explanation content: {explanation}\n") | |
if isinstance(answer, str) and os.path.isfile(answer): | |
# Si el output es una ruta v谩lida a imagen | |
im = plt.imread(answer) | |
st.image(im) | |
os.remove(answer) # Limpiar archivo temporal | |
st.markdown(str(explanation)) | |
else: | |
# Si no es una ruta v谩lida, mostrar como texto | |
st.markdown(str(answer)) | |
except Exception as e: | |
st.error(f"Error generating answer: {e}") | |
# TODO: Output estructurado si vemos que es necesario. |