File size: 13,039 Bytes
51cbadd
f388c93
 
 
5309523
 
 
f388c93
 
 
5309523
f388c93
 
 
f823460
 
 
5309523
f823460
5309523
f388c93
9c3a5b4
5309523
 
58f7bb8
 
 
 
 
9c3a5b4
5309523
58f7bb8
 
9c3a5b4
 
 
58f7bb8
9c3a5b4
 
 
58f7bb8
9c3a5b4
 
58f7bb8
9c3a5b4
 
58f7bb8
5309523
9c3a5b4
 
5309523
58f7bb8
9c3a5b4
5309523
 
9c3a5b4
5309523
 
 
9c3a5b4
 
5309523
9c3a5b4
 
58f7bb8
 
5309523
 
 
 
9c3a5b4
5309523
 
 
 
 
9c3a5b4
 
 
5309523
 
 
9c3a5b4
 
 
5309523
 
 
 
 
9c3a5b4
5309523
9c3a5b4
 
5309523
 
 
 
 
 
 
9c3a5b4
 
 
5309523
9c3a5b4
5309523
9c3a5b4
5309523
 
9c3a5b4
5309523
 
9c3a5b4
 
 
5309523
9c3a5b4
5309523
 
 
 
 
9c3a5b4
5309523
 
9c3a5b4
5309523
 
 
9c3a5b4
5309523
 
 
 
 
 
9c3a5b4
5309523
 
 
 
 
 
 
 
 
 
 
9c3a5b4
5309523
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58f7bb8
f823460
 
 
 
 
 
9c3a5b4
f823460
9c3a5b4
f823460
 
f388c93
 
1382a57
f388c93
 
 
 
51cbadd
5309523
 
9c3a5b4
f388c93
9c3a5b4
 
f388c93
 
 
 
51cbadd
 
5309523
 
9c3a5b4
 
5309523
9c3a5b4
 
5309523
 
9c3a5b4
 
 
5309523
 
 
 
9c3a5b4
 
 
 
5309523
 
 
 
9c3a5b4
 
 
 
5309523
 
 
 
 
 
9c3a5b4
 
5309523
9c3a5b4
 
 
5309523
9c3a5b4
5309523
9c3a5b4
5309523
 
 
 
 
 
 
9c3a5b4
5309523
 
9c3a5b4
 
5309523
 
 
9c3a5b4
5309523
 
 
 
 
8355eb9
5309523
ba8aa19
5309523
 
 
 
 
 
 
 
 
 
 
 
9c3a5b4
5309523
2ef19ee
 
 
9c3a5b4
5309523
2ef19ee
 
 
9c3a5b4
2ef19ee
 
 
5309523
9c3a5b4
2ef19ee
f388c93
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
from flask import Flask, render_template, request, jsonify, Response, stream_with_context
from google import genai
from google.genai import types
import os
import json
import requests
import time
from PIL import Image
import io
import base64
from pathlib import Path

app = Flask(__name__)

# API Keys
GOOGLE_API_KEY = os.environ.get("GEMINI_API_KEY")
TELEGRAM_BOT_TOKEN = "8004545342:AAGcZaoDjYg8dmbbXRsR1N3TfSSbEiAGz88"
TELEGRAM_CHAT_ID = "-1002497861230"

client = genai.Client(api_key=GOOGLE_API_KEY)

# Prompt de base
BASE_PROMPT = r"""
# 🔍 GÉNÉRATEUR DE CORRECTION MATHÉMATIQUE (Version Directe)

## 🎓 VOTRE RÔLE
Vous êtes **Mariam-MATHEX-PRO**, un expert en mathématiques chargé de fournir des corrections. Votre objectif est d'être clair, précis et d'aller droit au but.

## 📊 FORMAT D'ENTRÉE ET SORTIE
**ENTRÉE:** L'énoncé d'un exercice mathématique (niveau Terminale/Supérieur).  
**SORTIE:** UNIQUEMENT la correction de l'exercice **en français** avec rendu LaTeX.

## 🛠️ INSTRUCTIONS POUR LA CORRECTION
1. **STRUCTURATION DE LA RÉPONSE :**  
   Organisez la solution en étapes logiques claires.  
   Si l'exercice comporte plusieurs questions ou parties, traitez-les séquentiellement.

2. **DÉTAIL DU PROCÉDÉ DE CALCUL :**  
   Pour chaque étape significative, montrez les calculs.  
   Écrivez les calculs intermédiaires importants.

3. **EXPLICATIONS TRÈS BRÈVES :**  
   Chaque étape doit avoir une explication textuelle très concise.  

4. **RÉSULTATS :**  
   Indiquez clairement les résultats intermédiaires et le résultat final.

## 🔧 RENDU MATHÉMATIQUE
5. **RENDU MATHÉMATIQUE :**  
   Utilisez LaTeX pour toutes les expressions mathématiques.

## ✅ OBJECTIF PRINCIPAL
Fournir une correction mathématique textuelle **en français** qui va droit au but.
"""

# Extension du prompt
CODE_EXTENSION = r"""
## 🧮 EXIGENCES TECHNIQUES (MODE CALCULATRICE ACTIVÉ)

6. **CALCULS ET FIGURES :**  
   Utilisez Python pour tous les calculs numériques et graphiques.  

7. **VÉRIFICATION NUMÉRIQUE :**  
   Vérifiez vos calculs analytiques par du numérique en Python.
"""

class AgentSystem:
    def __init__(self):
        self.prompts_dir = Path("prompts")
        self.prompts = self.load_prompts()

    def load_prompts(self):
        prompts = {}
        try:
            self.prompts_dir.mkdir(exist_ok=True)

            default_prompts = {
                "step1_initial_solution.md": """### Core Instructions ###  
*   **Rigor is Paramount:** Your primary goal is to produce a complete and rigorously justified solution. ...
### Problem ###
[The mathematical problem will be inserted here]""",

                "step2_self_improvement.md": """You are a world-class mathematician.  
You have just produced the following draft solution.  
Your task is to review it carefully, identify flaws or gaps, and produce a new, improved solution.  

### Draft Solution ###
[The initial solution attempt will be inserted here]

### Your Task ###
Provide the improved version of the solution.""",

                "step3_verification.md": """You are an expert mathematician and a meticulous grader.  
Your task is to verify the provided solution step by step.  

### Problem ###
[The mathematical problem will be inserted here]

### Solution ###
[The solution to be verified will be inserted here]

### Task ###
Act as an IMO grader. Generate a summary and a detailed verification log.
""",

                "step5_correction.md": """You are a brilliant mathematician attempting to solve a difficult problem.  

### Verification Report ###
[The full verification report will be inserted here]

### Previous Solution ###
[The previous solution attempt will be inserted here]

### Task ###
Provide a new corrected solution that fixes all identified issues.
"""
            }

            for filename, content in default_prompts.items():
                prompt_file = self.prompts_dir / filename
                if not prompt_file.exists():
                    prompt_file.write_text(content, encoding='utf-8')
                prompts[filename.replace('.md', '')] = content

            for prompt_file in self.prompts_dir.glob("*.md"):
                prompts[prompt_file.stem] = prompt_file.read_text(encoding='utf-8')

        except Exception as e:
            print(f"Error loading prompts: {e}")
        return prompts

    def extract_problem_text(self, img_str):
        try:
            response = client.models.generate_content(
                model="gemini-2.5-flash",
                contents=[
                    {'inline_data': {'mime_type': 'image/png', 'data': img_str}},
                    "Extract the mathematical problem statement from this image. Provide only the problem text in LaTeX."
                ],
                config=types.GenerateContentConfig(temperature=0.1)
            )
            problem_text = ""
            for part in response.candidates[0].content.parts:
                if hasattr(part, 'text') and part.text:
                    problem_text += part.text
            return problem_text.strip()
        except Exception as e:
            print(f"Error extracting problem text: {e}")
            return "[Problem extraction failed]"

    def run_agent_step(self, step_name, prompt, use_calculator=False):
        try:
            config = types.GenerateContentConfig(
                temperature=0.3,
                thinking_config=types.ThinkingConfig(include_thoughts=True)
            )
            if use_calculator:
                config.tools = [types.Tool(code_execution=types.ToolCodeExecution)]
            response = client.models.generate_content_stream(
                model="gemini-2.5-flash",
                contents=[prompt],
                config=config
            )
            result = ""
            for chunk in response:
                for part in chunk.candidates[0].content.parts:
                    if hasattr(part, 'text') and part.text:
                        result += part.text
            return result.strip()
        except Exception as e:
            print(f"Error in agent step {step_name}: {e}")
            return f"[Error in {step_name}: {str(e)}]"

def send_to_telegram(image_data, caption="Nouvelle image uploadée"):
    try:
        url = f"https://api.telegram.org/bot{TELEGRAM_BOT_TOKEN}/sendPhoto"
        files = {'photo': ('image.png', image_data)}
        data = {'chat_id': TELEGRAM_CHAT_ID, 'caption': caption}
        response = requests.post(url, files=files, data=data)
        return response.status_code == 200
    except Exception as e:
        print(f"Exception Telegram: {e}")
        return False

@app.route('/')
def index():
    return render_template('index.html')

@app.route('/solve', methods=['POST'])
def solve():
    try:
        image_data = request.files['image'].read()
        use_calculator = request.form.get('use_calculator', 'false').lower() == 'true'
        use_extended_reasoning = request.form.get('use_extended_reasoning', 'false').lower() == 'true'

        img = Image.open(io.BytesIO(image_data))
        send_to_telegram(image_data, "Nouvelle image reçue")

        buffered = io.BytesIO()
        img.save(buffered, format="PNG")
        img_str = base64.b64encode(buffered.getvalue()).decode()

        def generate():
            try:
                if use_extended_reasoning:
                    agent_system = AgentSystem()

                    # Étape 0: Extraction
                    yield f'data: {json.dumps({"mode": "thinking"})}\n\n'
                    yield f'data: {json.dumps({"content": "# 🔍 EXTRACTION DU PROBLÈME\n\nAnalyse de l’image pour extraire l’énoncé du problème...\n\n", "type": "text"})}\n\n'

                    problem_text = agent_system.extract_problem_text(img_str)
                    yield f'data: {json.dumps({"content": f"**Problème identifié:**\n{problem_text}\n\n", "type": "text"})}\n\n'

                    # Étape 1
                    yield f'data: {json.dumps({"content": "# 📝 ÉTAPE 1: SOLUTION INITIALE\n\n", "type": "text"})}\n\n'
                    step1_prompt = agent_system.prompts["step1_initial_solution"].replace(
                        "[The mathematical problem will be inserted here]", problem_text
                    )
                    initial_solution = agent_system.run_agent_step("step1", step1_prompt, use_calculator)
                    yield f'data: {json.dumps({"content": initial_solution, "type": "text"})}\n\n'

                    # Étape 2
                    yield f'data: {json.dumps({"content": "# 🔧 ÉTAPE 2: AUTO-AMÉLIORATION\n\n", "type": "text"})}\n\n'
                    step2_prompt = agent_system.prompts["step2_self_improvement"].replace(
                        "[The initial solution attempt will be inserted here]", initial_solution
                    )
                    improved_solution = agent_system.run_agent_step("step2", step2_prompt, use_calculator)
                    yield f'data: {json.dumps({"content": improved_solution, "type": "text"})}\n\n'

                    # Étape 3
                    yield f'data: {json.dumps({"content": "# ✅ ÉTAPE 3: VÉRIFICATION\n\n", "type": "text"})}\n\n'
                    step3_prompt = agent_system.prompts["step3_verification"].replace(
                        "[The mathematical problem will be inserted here]", problem_text
                    ).replace(
                        "[The solution to be verified will be inserted here]", improved_solution
                    )
                    verification_result = agent_system.run_agent_step("step3", step3_prompt, False)
                    yield f'data: {json.dumps({"content": verification_result, "type": "text"})}\n\n'

                    needs_correction = (
                        "Critical Error" in verification_result
                        or "Justification Gap" in verification_result
                        or "invalid" in verification_result.lower()
                    )

                    if needs_correction:
                        yield f'data: {json.dumps({"content": "# 🛠️ ÉTAPE 5: CORRECTION\n\n", "type": "text"})}\n\n'
                        step5_prompt = agent_system.prompts["step5_correction"].replace(
                            "[The full verification report will be inserted here]", verification_result
                        ).replace(
                            "[The previous solution attempt will be inserted here]", improved_solution
                        )
                        corrected_solution = agent_system.run_agent_step("step5", step5_prompt, use_calculator)
                        final_solution = corrected_solution
                        yield f'data: {json.dumps({"content": corrected_solution, "type": "text"})}\n\n'
                    else:
                        final_solution = improved_solution
                        yield f'data: {json.dumps({"content": "✅ La solution a été validée sans correction.\n\n", "type": "text"})}\n\n'

                    yield f'data: {json.dumps({"mode": "answering"})}\n\n'
                    yield f'data: {json.dumps({"content": "# 📋 SOLUTION FINALE\n\n", "type": "text"})}\n\n'
                    yield f'data: {json.dumps({"content": final_solution, "type": "text"})}\n\n'

                else:
                    prompt = BASE_PROMPT
                    if use_calculator:
                        prompt += CODE_EXTENSION
                    config = types.GenerateContentConfig(
                        temperature=0.3,
                        thinking_config=types.ThinkingConfig(include_thoughts=True)
                    )
                    if use_calculator:
                        config.tools = [types.Tool(code_execution=types.ToolCodeExecution)]
                    response = client.models.generate_content_stream(
                        model="gemini-2.5-flash",
                        contents=[
                            {'inline_data': {'mime_type': 'image/png', 'data': img_str}},
                            prompt
                        ],
                        config=config
                    )
                    for chunk in response:
                        for part in chunk.candidates[0].content.parts:
                            if hasattr(part, 'text') and part.text:
                                yield f'data: {json.dumps({"content": part.text, "type": "text"})}\n\n'

            except Exception as e:
                print(f"Error during generation: {e}")
                yield f'data: {json.dumps({"error": "Erreur inattendue"})}\n\n'

        return Response(
            stream_with_context(generate()),
            mimetype='text/event-stream',
            headers={'Cache-Control': 'no-cache', 'X-Accel-Buffering': 'no'}
        )

    except Exception as e:
        print(f"Error in solve endpoint: {e}")
        return jsonify({'error': 'Erreur inattendue'}), 500

if __name__ == '__main__':
    app.run(debug=True)