Mariam-cards2 / app.py
Docfile's picture
Update app.py
9c3a5b4 verified
raw
history blame
13 kB
from flask import Flask, render_template, request, jsonify, Response, stream_with_context
from google import genai
from google.genai import types
import os
import json
import requests
import time
from PIL import Image
import io
import base64
from pathlib import Path
app = Flask(__name__)
# API Keys
GOOGLE_API_KEY = os.environ.get("GEMINI_API_KEY")
TELEGRAM_BOT_TOKEN = "8004545342:AAGcZaoDjYg8dmbbXRsR1N3TfSSbEiAGz88"
TELEGRAM_CHAT_ID = "-1002497861230"
client = genai.Client(api_key=GOOGLE_API_KEY)
# Prompt de base
BASE_PROMPT = r"""
# 🔍 GÉNÉRATEUR DE CORRECTION MATHÉMATIQUE (Version Directe)
## 🎓 VOTRE RÔLE
Vous êtes **Mariam-MATHEX-PRO**, un expert en mathématiques chargé de fournir des corrections. Votre objectif est d'être clair, précis et d'aller droit au but.
## 📊 FORMAT D'ENTRÉE ET SORTIE
**ENTRÉE:** L'énoncé d'un exercice mathématique (niveau Terminale/Supérieur).
**SORTIE:** UNIQUEMENT la correction de l'exercice **en français** avec rendu LaTeX.
## 🛠️ INSTRUCTIONS POUR LA CORRECTION
1. **STRUCTURATION DE LA RÉPONSE :**
Organisez la solution en étapes logiques claires.
Si l'exercice comporte plusieurs questions ou parties, traitez-les séquentiellement.
2. **DÉTAIL DU PROCÉDÉ DE CALCUL :**
Pour chaque étape significative, montrez les calculs.
Écrivez les calculs intermédiaires importants.
3. **EXPLICATIONS TRÈS BRÈVES :**
Chaque étape doit avoir une explication textuelle très concise.
4. **RÉSULTATS :**
Indiquez clairement les résultats intermédiaires et le résultat final.
## 🔧 RENDU MATHÉMATIQUE
5. **RENDU MATHÉMATIQUE :**
Utilisez LaTeX pour toutes les expressions mathématiques.
## ✅ OBJECTIF PRINCIPAL
Fournir une correction mathématique textuelle **en français** qui va droit au but.
"""
# Extension du prompt
CODE_EXTENSION = r"""
## 🧮 EXIGENCES TECHNIQUES (MODE CALCULATRICE ACTIVÉ)
6. **CALCULS ET FIGURES :**
Utilisez Python pour tous les calculs numériques et graphiques.
7. **VÉRIFICATION NUMÉRIQUE :**
Vérifiez vos calculs analytiques par du numérique en Python.
"""
class AgentSystem:
def __init__(self):
self.prompts_dir = Path("prompts")
self.prompts = self.load_prompts()
def load_prompts(self):
prompts = {}
try:
self.prompts_dir.mkdir(exist_ok=True)
default_prompts = {
"step1_initial_solution.md": """### Core Instructions ###
* **Rigor is Paramount:** Your primary goal is to produce a complete and rigorously justified solution. ...
### Problem ###
[The mathematical problem will be inserted here]""",
"step2_self_improvement.md": """You are a world-class mathematician.
You have just produced the following draft solution.
Your task is to review it carefully, identify flaws or gaps, and produce a new, improved solution.
### Draft Solution ###
[The initial solution attempt will be inserted here]
### Your Task ###
Provide the improved version of the solution.""",
"step3_verification.md": """You are an expert mathematician and a meticulous grader.
Your task is to verify the provided solution step by step.
### Problem ###
[The mathematical problem will be inserted here]
### Solution ###
[The solution to be verified will be inserted here]
### Task ###
Act as an IMO grader. Generate a summary and a detailed verification log.
""",
"step5_correction.md": """You are a brilliant mathematician attempting to solve a difficult problem.
### Verification Report ###
[The full verification report will be inserted here]
### Previous Solution ###
[The previous solution attempt will be inserted here]
### Task ###
Provide a new corrected solution that fixes all identified issues.
"""
}
for filename, content in default_prompts.items():
prompt_file = self.prompts_dir / filename
if not prompt_file.exists():
prompt_file.write_text(content, encoding='utf-8')
prompts[filename.replace('.md', '')] = content
for prompt_file in self.prompts_dir.glob("*.md"):
prompts[prompt_file.stem] = prompt_file.read_text(encoding='utf-8')
except Exception as e:
print(f"Error loading prompts: {e}")
return prompts
def extract_problem_text(self, img_str):
try:
response = client.models.generate_content(
model="gemini-2.5-flash",
contents=[
{'inline_data': {'mime_type': 'image/png', 'data': img_str}},
"Extract the mathematical problem statement from this image. Provide only the problem text in LaTeX."
],
config=types.GenerateContentConfig(temperature=0.1)
)
problem_text = ""
for part in response.candidates[0].content.parts:
if hasattr(part, 'text') and part.text:
problem_text += part.text
return problem_text.strip()
except Exception as e:
print(f"Error extracting problem text: {e}")
return "[Problem extraction failed]"
def run_agent_step(self, step_name, prompt, use_calculator=False):
try:
config = types.GenerateContentConfig(
temperature=0.3,
thinking_config=types.ThinkingConfig(include_thoughts=True)
)
if use_calculator:
config.tools = [types.Tool(code_execution=types.ToolCodeExecution)]
response = client.models.generate_content_stream(
model="gemini-2.5-flash",
contents=[prompt],
config=config
)
result = ""
for chunk in response:
for part in chunk.candidates[0].content.parts:
if hasattr(part, 'text') and part.text:
result += part.text
return result.strip()
except Exception as e:
print(f"Error in agent step {step_name}: {e}")
return f"[Error in {step_name}: {str(e)}]"
def send_to_telegram(image_data, caption="Nouvelle image uploadée"):
try:
url = f"https://api.telegram.org/bot{TELEGRAM_BOT_TOKEN}/sendPhoto"
files = {'photo': ('image.png', image_data)}
data = {'chat_id': TELEGRAM_CHAT_ID, 'caption': caption}
response = requests.post(url, files=files, data=data)
return response.status_code == 200
except Exception as e:
print(f"Exception Telegram: {e}")
return False
@app.route('/')
def index():
return render_template('index.html')
@app.route('/solve', methods=['POST'])
def solve():
try:
image_data = request.files['image'].read()
use_calculator = request.form.get('use_calculator', 'false').lower() == 'true'
use_extended_reasoning = request.form.get('use_extended_reasoning', 'false').lower() == 'true'
img = Image.open(io.BytesIO(image_data))
send_to_telegram(image_data, "Nouvelle image reçue")
buffered = io.BytesIO()
img.save(buffered, format="PNG")
img_str = base64.b64encode(buffered.getvalue()).decode()
def generate():
try:
if use_extended_reasoning:
agent_system = AgentSystem()
# Étape 0: Extraction
yield f'data: {json.dumps({"mode": "thinking"})}\n\n'
yield f'data: {json.dumps({"content": "# 🔍 EXTRACTION DU PROBLÈME\n\nAnalyse de l’image pour extraire l’énoncé du problème...\n\n", "type": "text"})}\n\n'
problem_text = agent_system.extract_problem_text(img_str)
yield f'data: {json.dumps({"content": f"**Problème identifié:**\n{problem_text}\n\n", "type": "text"})}\n\n'
# Étape 1
yield f'data: {json.dumps({"content": "# 📝 ÉTAPE 1: SOLUTION INITIALE\n\n", "type": "text"})}\n\n'
step1_prompt = agent_system.prompts["step1_initial_solution"].replace(
"[The mathematical problem will be inserted here]", problem_text
)
initial_solution = agent_system.run_agent_step("step1", step1_prompt, use_calculator)
yield f'data: {json.dumps({"content": initial_solution, "type": "text"})}\n\n'
# Étape 2
yield f'data: {json.dumps({"content": "# 🔧 ÉTAPE 2: AUTO-AMÉLIORATION\n\n", "type": "text"})}\n\n'
step2_prompt = agent_system.prompts["step2_self_improvement"].replace(
"[The initial solution attempt will be inserted here]", initial_solution
)
improved_solution = agent_system.run_agent_step("step2", step2_prompt, use_calculator)
yield f'data: {json.dumps({"content": improved_solution, "type": "text"})}\n\n'
# Étape 3
yield f'data: {json.dumps({"content": "# ✅ ÉTAPE 3: VÉRIFICATION\n\n", "type": "text"})}\n\n'
step3_prompt = agent_system.prompts["step3_verification"].replace(
"[The mathematical problem will be inserted here]", problem_text
).replace(
"[The solution to be verified will be inserted here]", improved_solution
)
verification_result = agent_system.run_agent_step("step3", step3_prompt, False)
yield f'data: {json.dumps({"content": verification_result, "type": "text"})}\n\n'
needs_correction = (
"Critical Error" in verification_result
or "Justification Gap" in verification_result
or "invalid" in verification_result.lower()
)
if needs_correction:
yield f'data: {json.dumps({"content": "# 🛠️ ÉTAPE 5: CORRECTION\n\n", "type": "text"})}\n\n'
step5_prompt = agent_system.prompts["step5_correction"].replace(
"[The full verification report will be inserted here]", verification_result
).replace(
"[The previous solution attempt will be inserted here]", improved_solution
)
corrected_solution = agent_system.run_agent_step("step5", step5_prompt, use_calculator)
final_solution = corrected_solution
yield f'data: {json.dumps({"content": corrected_solution, "type": "text"})}\n\n'
else:
final_solution = improved_solution
yield f'data: {json.dumps({"content": "✅ La solution a été validée sans correction.\n\n", "type": "text"})}\n\n'
yield f'data: {json.dumps({"mode": "answering"})}\n\n'
yield f'data: {json.dumps({"content": "# 📋 SOLUTION FINALE\n\n", "type": "text"})}\n\n'
yield f'data: {json.dumps({"content": final_solution, "type": "text"})}\n\n'
else:
prompt = BASE_PROMPT
if use_calculator:
prompt += CODE_EXTENSION
config = types.GenerateContentConfig(
temperature=0.3,
thinking_config=types.ThinkingConfig(include_thoughts=True)
)
if use_calculator:
config.tools = [types.Tool(code_execution=types.ToolCodeExecution)]
response = client.models.generate_content_stream(
model="gemini-2.5-flash",
contents=[
{'inline_data': {'mime_type': 'image/png', 'data': img_str}},
prompt
],
config=config
)
for chunk in response:
for part in chunk.candidates[0].content.parts:
if hasattr(part, 'text') and part.text:
yield f'data: {json.dumps({"content": part.text, "type": "text"})}\n\n'
except Exception as e:
print(f"Error during generation: {e}")
yield f'data: {json.dumps({"error": "Erreur inattendue"})}\n\n'
return Response(
stream_with_context(generate()),
mimetype='text/event-stream',
headers={'Cache-Control': 'no-cache', 'X-Accel-Buffering': 'no'}
)
except Exception as e:
print(f"Error in solve endpoint: {e}")
return jsonify({'error': 'Erreur inattendue'}), 500
if __name__ == '__main__':
app.run(debug=True)