Spaces:
Sleeping
Sleeping
from flask import Flask, render_template, request, jsonify, Response | |
from google import genai | |
from google.genai import types | |
import os | |
from PIL import Image | |
import io | |
import logging | |
import json | |
def load_prompt(): | |
return " fais une dissertation " | |
# Configuration du logging | |
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') | |
app = Flask(__name__) | |
# Configuration du client Gemini | |
token = os.environ.get("TOKEN") | |
client = genai.Client(api_key=token) | |
# Configuration de génération par défaut | |
default_generation_config = types.GenerateContentConfig( | |
temperature=1, | |
max_output_tokens=8192 | |
) | |
# Define model names | |
STANDARD_MODEL_NAME = "gemini-2.5-flash" | |
DEEPTHINK_MODEL_NAME = "gemini-2.5-pro" | |
def index(): | |
logging.info("Rendering index.html") | |
return render_template('index.html') | |
def gpt_francais(): | |
"""Handles French questions with streaming enabled by default.""" | |
logging.info("Received request at /api/francais") | |
french_prompt = request.form.get('sujet', '').strip() | |
choix = request.form.get('choix', '').strip() | |
style = request.form.get('style', '').strip() | |
use_deepthink_str = request.form.get('use_deepthink', 'false') | |
use_deepthink = use_deepthink_str.lower() == 'true' | |
logging.info(f"Received data: french_prompt='{french_prompt}', choix='{choix}', style='{style}', use_deepthink='{use_deepthink}'") | |
if not french_prompt: | |
logging.warning("French prompt is empty.") | |
return Response(f"data: {json.dumps({'type': 'error', 'content': 'Erreur'})}\n\n", | |
mimetype='text/event-stream'), 400 | |
# Sélectionner le modèle | |
model_to_use = DEEPTHINK_MODEL_NAME if use_deepthink else STANDARD_MODEL_NAME | |
logging.info(f"Using model: {model_to_use}") | |
# Charger l'instruction système | |
try: | |
system_instruction = load_prompt() | |
except Exception: | |
logging.error("Erreur") | |
system_instruction = "Tu es un assistant spécialisé en français." | |
# Construire le prompt utilisateur | |
user_prompt = f"Sujet: {french_prompt}\nType: {choix}\nStyle: {style}" | |
# Configuration pour cette requête | |
config = types.GenerateContentConfig( | |
system_instruction=system_instruction, | |
temperature=1, | |
max_output_tokens=8192 | |
) | |
# Ajouter la configuration de pensée pour DeepThink | |
if use_deepthink: | |
config.thinking_config = types.ThinkingConfig( | |
include_thoughts=True | |
) | |
def generate_stream(): | |
try: | |
thoughts = "" | |
answer = "" | |
for chunk in client.models.generate_content_stream( | |
model=model_to_use, | |
contents=[user_prompt], | |
config=config | |
): | |
for part in chunk.candidates[0].content.parts: | |
if not part.text: | |
continue | |
elif hasattr(part, 'thought') and part.thought: | |
if not thoughts: | |
yield f"data: {json.dumps({'type': 'thoughts_start'})}\n\n" | |
thoughts += part.text | |
yield f"data: {json.dumps({'type': 'thought', 'content': part.text})}\n\n" | |
else: | |
if not answer: | |
yield f"data: {json.dumps({'type': 'answer_start'})}\n\n" | |
answer += part.text | |
yield f"data: {json.dumps({'type': 'answer', 'content': part.text})}\n\n" | |
yield f"data: {json.dumps({'type': 'done'})}\n\n" | |
except Exception: | |
logging.error("Erreur") | |
yield f"data: {json.dumps({'type': 'error', 'content': 'Erreur'})}\n\n" | |
return Response(generate_stream(), mimetype='text/event-stream') | |
def gpt_francais_cc(): | |
"""Analyse d'images avec streaming imposé.""" | |
if 'images' not in request.files: | |
return Response(f"data: {json.dumps({'type': 'error', 'content': 'Erreur'})}\n\n", | |
mimetype='text/event-stream'), 400 | |
images = request.files.getlist('images') | |
if not images: | |
return Response(f"data: {json.dumps({'type': 'error', 'content': 'Erreur'})}\n\n", | |
mimetype='text/event-stream'), 400 | |
def generate_image_analysis(): | |
try: | |
# Charger l'instruction système | |
try: | |
system_instruction = load_prompt() | |
except Exception: | |
logging.error("Erreur") | |
system_instruction = "Tu es un assistant spécialisé dans l'analyse de textes et de documents." | |
content = ["Réponds aux questions présentes dans les images."] | |
for image in images: | |
if image.filename: | |
img_data = image.read() | |
img_part = types.Part.from_bytes( | |
data=img_data, | |
mime_type=image.content_type or 'image/jpeg' | |
) | |
content.append(img_part) | |
config = types.GenerateContentConfig( | |
system_instruction=system_instruction, | |
temperature=0.7, | |
max_output_tokens=4096 | |
) | |
for chunk in client.models.generate_content_stream( | |
model=STANDARD_MODEL_NAME, | |
contents=content, | |
config=config | |
): | |
for part in chunk.candidates[0].content.parts: | |
if part.text: | |
yield f"data: {json.dumps({'type': 'content', 'content': part.text})}\n\n" | |
yield f"data: {json.dumps({'type': 'done'})}\n\n" | |
except Exception: | |
logging.error("Erreur") | |
yield f"data: {json.dumps({'type': 'error', 'content': 'Erreur'})}\n\n" | |
return Response(generate_image_analysis(), mimetype='text/event-stream') | |
if __name__ == '__main__': | |
logging.info("Starting the Flask app with new Gemini SDK...") | |
app.run(debug=True) |