File size: 6,166 Bytes
39019f1
 
 
7755c1f
 
7f4ad42
affe748
39019f1
 
 
 
affe748
 
 
7755c1f
 
 
39019f1
7755c1f
39019f1
7755c1f
39019f1
 
 
 
 
7755c1f
d783c87
39019f1
 
7755c1f
 
 
affe748
7755c1f
 
 
 
39019f1
affe748
7755c1f
 
 
d783c87
 
7755c1f
affe748
 
7755c1f
affe748
ed3c8c4
 
9e487ad
39019f1
 
 
7755c1f
39019f1
7755c1f
ed3c8c4
 
 
39019f1
 
ed3c8c4
39019f1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ed3c8c4
39019f1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ed3c8c4
39019f1
ed3c8c4
 
 
39019f1
ed3c8c4
b1fdf62
7755c1f
 
 
39019f1
0b9d6a6
ed3c8c4
 
7755c1f
0b9d6a6
91bbba2
ed3c8c4
 
39019f1
 
 
 
 
ed3c8c4
 
 
39019f1
 
 
ed3c8c4
39019f1
 
 
 
 
 
 
 
ed3c8c4
39019f1
 
 
 
 
ed3c8c4
39019f1
 
 
 
 
 
 
 
 
ed3c8c4
39019f1
ed3c8c4
 
 
39019f1
ed3c8c4
b1fdf62
7755c1f
0b9d6a6
39019f1
affe748
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
from flask import Flask, render_template, request, jsonify, Response
from google import genai
from google.genai import types
import os
from PIL import Image
import io
import logging
import json

def load_prompt():
    return " fais une dissertation "

# Configuration du logging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

app = Flask(__name__)

# Configuration du client Gemini
token = os.environ.get("TOKEN")
client = genai.Client(api_key=token)

# Configuration de génération par défaut
default_generation_config = types.GenerateContentConfig(
    temperature=1,
    max_output_tokens=8192
)

# Define model names
STANDARD_MODEL_NAME = "gemini-2.5-flash"
DEEPTHINK_MODEL_NAME = "gemini-2.5-pro"

@app.route('/')
def index():
    logging.info("Rendering index.html")
    return render_template('index.html')

@app.route('/api/francais', methods=['POST'])
def gpt_francais():
    """Handles French questions with streaming enabled by default."""
    logging.info("Received request at /api/francais")
    french_prompt = request.form.get('sujet', '').strip()
    choix = request.form.get('choix', '').strip()
    style = request.form.get('style', '').strip()
    use_deepthink_str = request.form.get('use_deepthink', 'false')
    use_deepthink = use_deepthink_str.lower() == 'true'

    logging.info(f"Received data: french_prompt='{french_prompt}', choix='{choix}', style='{style}', use_deepthink='{use_deepthink}'")

    if not french_prompt:
        logging.warning("French prompt is empty.")
        return Response(f"data: {json.dumps({'type': 'error', 'content': 'Erreur'})}\n\n",
                        mimetype='text/event-stream'), 400

    # Sélectionner le modèle
    model_to_use = DEEPTHINK_MODEL_NAME if use_deepthink else STANDARD_MODEL_NAME
    logging.info(f"Using model: {model_to_use}")

    # Charger l'instruction système
    try:
        system_instruction = load_prompt()
    except Exception:
        logging.error("Erreur")
        system_instruction = "Tu es un assistant spécialisé en français."

    # Construire le prompt utilisateur
    user_prompt = f"Sujet: {french_prompt}\nType: {choix}\nStyle: {style}"

    # Configuration pour cette requête
    config = types.GenerateContentConfig(
        system_instruction=system_instruction,
        temperature=1,
        max_output_tokens=8192
    )

    # Ajouter la configuration de pensée pour DeepThink
    if use_deepthink:
        config.thinking_config = types.ThinkingConfig(
            include_thoughts=True
        )

    def generate_stream():
        try:
            thoughts = ""
            answer = ""

            for chunk in client.models.generate_content_stream(
                model=model_to_use,
                contents=[user_prompt],
                config=config
            ):
                for part in chunk.candidates[0].content.parts:
                    if not part.text:
                        continue
                    elif hasattr(part, 'thought') and part.thought:
                        if not thoughts:
                            yield f"data: {json.dumps({'type': 'thoughts_start'})}\n\n"
                        thoughts += part.text
                        yield f"data: {json.dumps({'type': 'thought', 'content': part.text})}\n\n"
                    else:
                        if not answer:
                            yield f"data: {json.dumps({'type': 'answer_start'})}\n\n"
                        answer += part.text
                        yield f"data: {json.dumps({'type': 'answer', 'content': part.text})}\n\n"

            yield f"data: {json.dumps({'type': 'done'})}\n\n"

        except Exception:
            logging.error("Erreur")
            yield f"data: {json.dumps({'type': 'error', 'content': 'Erreur'})}\n\n"

    return Response(generate_stream(), mimetype='text/event-stream')


@app.route('/api/etude-texte', methods=['POST'])
def gpt_francais_cc():
    """Analyse d'images avec streaming imposé."""
    if 'images' not in request.files:
        return Response(f"data: {json.dumps({'type': 'error', 'content': 'Erreur'})}\n\n",
                        mimetype='text/event-stream'), 400

    images = request.files.getlist('images')
    if not images:
        return Response(f"data: {json.dumps({'type': 'error', 'content': 'Erreur'})}\n\n",
                        mimetype='text/event-stream'), 400

    def generate_image_analysis():
        try:
            # Charger l'instruction système
            try:
                system_instruction = load_prompt()
            except Exception:
                logging.error("Erreur")
                system_instruction = "Tu es un assistant spécialisé dans l'analyse de textes et de documents."

            content = ["Réponds aux questions présentes dans les images."]

            for image in images:
                if image.filename:
                    img_data = image.read()
                    img_part = types.Part.from_bytes(
                        data=img_data,
                        mime_type=image.content_type or 'image/jpeg'
                    )
                    content.append(img_part)

            config = types.GenerateContentConfig(
                system_instruction=system_instruction,
                temperature=0.7,
                max_output_tokens=4096
            )

            for chunk in client.models.generate_content_stream(
                model=STANDARD_MODEL_NAME,
                contents=content,
                config=config
            ):
                for part in chunk.candidates[0].content.parts:
                    if part.text:
                        yield f"data: {json.dumps({'type': 'content', 'content': part.text})}\n\n"

            yield f"data: {json.dumps({'type': 'done'})}\n\n"

        except Exception:
            logging.error("Erreur")
            yield f"data: {json.dumps({'type': 'error', 'content': 'Erreur'})}\n\n"

    return Response(generate_image_analysis(), mimetype='text/event-stream')


if __name__ == '__main__':
    logging.info("Starting the Flask app with new Gemini SDK...")
    app.run(debug=True)