Spaces:
Sleeping
Sleeping
from flask import Flask, render_template, request, Response | |
from google import genai | |
from google.genai import types | |
import os | |
import logging | |
import json | |
def load_prompt(): | |
return " fais une dissertation " | |
# Configuration du logging | |
logging.basicConfig( | |
level=logging.INFO, | |
format='%(asctime)s - %(levelname)s - %(message)s' | |
) | |
app = Flask(__name__) | |
# Configuration du client Gemini | |
token = os.environ.get("TOKEN") | |
client = genai.Client(api_key=token) | |
# Configuration par défaut | |
default_generation_config = types.GenerateContentConfig( | |
temperature=1, | |
max_output_tokens=8192 | |
) | |
STANDARD_MODEL_NAME = "gemini-2.5-flash" | |
DEEPTHINK_MODEL_NAME = "gemini-2.5-pro" | |
def index(): | |
logging.info("Page index demandée.") | |
return render_template('index.html') | |
def gpt_francais(): | |
# FIX: The frontend uses EventSource which sends a GET request. | |
# Data must be read from `request.args` (query string) instead of `request.form`. | |
logging.info(f"Requête {request.method} reçue sur /api/francais") | |
french_prompt = request.args.get('sujet', '').strip() | |
choix = request.args.get('choix', '').strip() | |
style = request.args.get('style', '').strip() | |
use_deepthink = request.args.get('use_deepthink', 'false').lower() == 'true' | |
logging.info(f"Données reçues : sujet='{french_prompt[:50]}', choix='{choix}', style='{style}', deepthink={use_deepthink}") | |
if not french_prompt: | |
logging.warning("Sujet vide, retour erreur.") | |
return Response(f"data: {json.dumps({'type': 'error', 'content': 'Erreur: Le sujet ne peut pas être vide.'})}\n\n", | |
mimetype='text/event-stream'), 400 | |
model_to_use = DEEPTHINK_MODEL_NAME if use_deepthink else STANDARD_MODEL_NAME | |
logging.info(f"Modèle utilisé : {model_to_use}") | |
try: | |
system_instruction = load_prompt() | |
except Exception as e: | |
logging.exception("Erreur lors du chargement du prompt système.") | |
system_instruction = "Tu es un assistant spécialisé en français." | |
user_prompt = f"Sujet: {french_prompt}\nType: {choix}\nStyle: {style}" | |
config = types.GenerateContentConfig( | |
system_instruction=system_instruction, | |
temperature=1, | |
max_output_tokens=8192 | |
) | |
if use_deepthink: | |
config.thinking_config = types.ThinkingConfig(include_thoughts=True) | |
def generate_stream(): | |
try: | |
logging.info("Démarrage du streaming de génération...") | |
thoughts = "" | |
answer = "" | |
for chunk in client.models.generate_content_stream( | |
model=model_to_use, | |
contents=[user_prompt], | |
config=config | |
): | |
for part in chunk.candidates[0].content.parts: | |
if not part.text: | |
continue | |
elif hasattr(part, 'thought') and part.thought: | |
if not thoughts: | |
logging.info("Premiers éléments de réflexion envoyés.") | |
yield f"data: {json.dumps({'type': 'thoughts_start'})}\n\n" | |
thoughts += part.text | |
yield f"data: {json.dumps({'type': 'thought', 'content': part.text})}\n\n" | |
else: | |
if not answer: | |
logging.info("Premiers éléments de réponse envoyés.") | |
yield f"data: {json.dumps({'type': 'answer_start'})}\n\n" | |
answer += part.text | |
yield f"data: {json.dumps({'type': 'answer', 'content': part.text})}\n\n" | |
logging.info("Fin du streaming de génération.") | |
yield f"data: {json.dumps({'type': 'done'})}\n\n" | |
except Exception: | |
logging.exception("Erreur pendant la génération de contenu.") | |
yield f"data: {json.dumps({'type': 'error', 'content': 'Erreur serveur pendant la génération.'})}\n\n" | |
return Response(generate_stream(), mimetype='text/event-stream') | |
def gpt_francais_cc(): | |
logging.info("Requête POST reçue sur /api/etude-texte") | |
if 'images' not in request.files: | |
logging.warning("Aucun fichier image reçu.") | |
return Response(f"data: {json.dumps({'type': 'error', 'content': 'Aucun fichier image envoyé.'})}\n\n", | |
mimetype='text/event-stream'), 400 | |
images = request.files.getlist('images') | |
if not images or not images[0].filename: | |
logging.warning("Liste d'images vide.") | |
return Response(f"data: {json.dumps({'type': 'error', 'content': 'Aucune image sélectionnée.'})}\n\n", | |
mimetype='text/event-stream'), 400 | |
def generate_image_analysis(): | |
try: | |
logging.info(f"Nombre d'images reçues : {len(images)}") | |
try: | |
system_instruction = load_prompt() | |
except Exception: | |
logging.exception("Erreur lors du chargement du prompt système pour analyse texte.") | |
system_instruction = "Tu es un assistant spécialisé dans l'analyse de textes et de documents." | |
content = ["Réponds aux questions présentes dans les images."] | |
for img in images: | |
if img.filename: | |
logging.info(f"Traitement image : {img.filename}") | |
img_data = img.read() | |
img_part = types.Part.from_bytes( | |
data=img_data, | |
mime_type=img.content_type or 'image/jpeg' | |
) | |
content.append(img_part) | |
config = types.GenerateContentConfig( | |
system_instruction=system_instruction, | |
temperature=0.7, | |
max_output_tokens=4096 | |
) | |
logging.info("Démarrage du streaming d'analyse d'image...") | |
for chunk in client.models.generate_content_stream( | |
model=STANDARD_MODEL_NAME, | |
contents=content, | |
config=config | |
): | |
for part in chunk.candidates[0].content.parts: | |
if part.text: | |
yield f"data: {json.dumps({'type': 'content', 'content': part.text})}\n\n" | |
logging.info("Fin du streaming d'analyse d'image.") | |
# Note: The original code was missing a final "done" signal here. | |
yield f"data: {json.dumps({'type': 'done'})}\n\n" | |
except Exception: | |
logging.exception("Erreur pendant l'analyse d'image.") | |
yield f"data: {json.dumps({'type': 'error', 'content': 'Erreur serveur pendant l\\'analyse de l\\'image.'})}\n\n" | |
return Response(generate_image_analysis(), mimetype='text/event-stream') | |
if __name__ == '__main__': | |
logging.info("Démarrage du serveur Flask avec Gemini SDK...") | |
app.run(debug=True) |