Mariamm1 / app.py
Docfile's picture
Update app.py
62aefff verified
import streamlit as st
from google import genai
import logging
import sys
from pathlib import Path
from typing import Generator
# Configuration du logging
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
handlers=[
logging.StreamHandler(sys.stdout),
logging.FileHandler(Path('app.log'))
]
)
logger = logging.getLogger(__name__)
class GeminiClient:
"""Classe pour gérer les interactions avec l'API Gemini"""
def __init__(self, api_key: str):
self.client = None
self.init_client(api_key)
def init_client(self, api_key: str) -> None:
"""Initialise le client Gemini"""
try:
self.client = genai.Client(
api_key=api_key,
http_options={'api_version': 'v1alpha'}
)
except Exception as e:
logger.error(f"Erreur d'initialisation du client Gemini: {e}")
raise RuntimeError(f"Impossible d'initialiser le client Gemini: {e}")
def get_response(self, question: str, model_name: str) -> Generator:
"""Obtient une réponse de Gemini"""
if not self.client:
raise RuntimeError("Client Gemini non initialisé")
try:
response = self.client.models.generate_content_stream(
model=model_name,
config={'thinking_config': {'include_thoughts': True}},
contents=[question]
)
return response
except Exception as e:
logger.error(f"Erreur lors de la génération de la réponse: {e}")
raise
def stream_response(container, response: Generator) -> None:
"""Gère le streaming de la réponse"""
thinking_placeholder = None
answer_placeholder = None
thinking_text = ""
answer_text = ""
mode = 'starting'
try:
for chunk in response:
if hasattr(chunk, 'candidates') and chunk.candidates:
content = chunk.candidates[0].content
if hasattr(content, 'parts'):
for part in content.parts: # Correction ici: content.parts au lieu de parts
has_thought = hasattr(part, 'thought') and part.thought
text = getattr(part, 'text', '')
if not text:
continue
if has_thought:
if mode != "thinking":
if thinking_placeholder is None:
with container.expander("Voir le raisonnement", expanded=False):
thinking_placeholder = st.empty()
mode = "thinking"
thinking_text += text
thinking_placeholder.markdown(thinking_text)
else:
if mode != "answering":
if answer_placeholder is None:
answer_placeholder = container.empty()
container.subheader("~•")
mode = "answering"
answer_text += text
answer_placeholder.markdown(answer_text)
except Exception as e:
logger.error(f"Erreur dans le streaming de la réponse: {e}")
if not answer_text and not thinking_text:
container.error("Une erreur est survenue lors de l'analyse. Veuillez réessayer.")
raise
finally:
if not answer_text and not thinking_text:
container.warning("Aucune réponse n'a pu être générée. Veuillez réessayer.")
def main():
st.set_page_config(
page_title="Mariam M1",
page_icon="💭",
layout="wide",
initial_sidebar_state="collapsed"
)
st.title("Mariam M-0")
# Récupération de la clé API
try:
api_key = st.secrets["GEMINI_API_KEY"]
except Exception as e:
logger.error(f"Erreur dans la récupération des secrets: {e}")
st.error("Erreur: Impossible d'accéder aux secrets de l'application.")
return
# Initialisation du client
try:
gemini_client = GeminiClient(api_key)
except Exception as e:
st.error(f"Erreur lors de l'initialisation du client Gemini: {e}")
return
# Interface utilisateur
question = st.text_area(
"Posez votre question",
height=100,
help="Entrez votre question ici"
)
if question:
model_name = "gemini-2.0-flash-thinking-exp-01-21"
if st.button("Obtenir une réponse", type="primary"):
response_container = st.container()
with st.spinner("Génération de la réponse en cours..."):
try:
print(question)
response = gemini_client.get_response(question, model_name)
stream_response(response_container, response)
except Exception as e:
logger.error(f"Erreur lors de la génération: {e}", exc_info=True)
st.error("Une erreur est survenue. Veuillez réessayer.")
if __name__ == "__main__":
main()