Spaces:
Sleeping
Sleeping
| from flask import Flask, render_template, request, jsonify, Response, stream_with_context | |
| from google import genai | |
| from google.genai import types | |
| import os | |
| from PIL import Image | |
| import io | |
| import base64 | |
| import json | |
| import re | |
| app = Flask(__name__) | |
| GOOGLE_API_KEY = os.environ.get("GEMINI_API_KEY") | |
| client = genai.Client( | |
| api_key=GOOGLE_API_KEY, | |
| ) | |
| def index(): | |
| return render_template('index.html') | |
| def indexx(): | |
| return render_template('maj.html') | |
| def process_markdown_and_code(text): | |
| """Traite le texte pour identifier et formater le code et le markdown""" | |
| # Convertit le texte en HTML formaté | |
| # Cette fonction pourrait être étendue pour utiliser une bibliothèque de markdown | |
| return text | |
| def format_code_execution_result(response_parts): | |
| """Formate les résultats d'exécution de code pour l'affichage HTML""" | |
| result = [] | |
| for part in response_parts: | |
| # Traitement du texte (équivalent à display(Markdown(part.text))) | |
| if hasattr(part, 'text') and part.text is not None: | |
| result.append({ | |
| 'type': 'markdown', | |
| 'content': part.text | |
| }) | |
| # Traitement du code exécutable | |
| if hasattr(part, 'executable_code') and part.executable_code is not None: | |
| result.append({ | |
| 'type': 'code', | |
| 'content': part.executable_code.code | |
| }) | |
| # Traitement des résultats d'exécution | |
| if hasattr(part, 'code_execution_result') and part.code_execution_result is not None: | |
| result.append({ | |
| 'type': 'execution_result', | |
| 'content': part.code_execution_result.output | |
| }) | |
| # Traitement des images (équivalent à display(Image(data=part.inline_data.data))) | |
| if hasattr(part, 'inline_data') and part.inline_data is not None: | |
| # Encodage de l'image en base64 pour l'affichage HTML | |
| img_data = base64.b64encode(part.inline_data.data).decode('utf-8') | |
| result.append({ | |
| 'type': 'image', | |
| 'content': img_data, | |
| 'format': 'png' # Supposé comme png par défaut | |
| }) | |
| return result | |
| def solve(): | |
| try: | |
| image_data = request.files['image'].read() | |
| img = Image.open(io.BytesIO(image_data)) | |
| buffered = io.BytesIO() | |
| img.save(buffered, format="PNG") | |
| img_str = base64.b64encode(buffered.getvalue()).decode() | |
| def generate(): | |
| mode = 'starting' | |
| try: | |
| response = client.models.generate_content_stream( | |
| model="gemini-2.5-pro-exp-03-25", | |
| contents=[ | |
| {'inline_data': {'mime_type': 'image/png', 'data': img_str}}, | |
| """Résous ça en français with rendering latex""" | |
| ], | |
| config=types.GenerateContentConfig( | |
| thinking_config=types.ThinkingConfig( | |
| thinking_budget=8000 | |
| ), | |
| # Ajouter l'outil d'exécution de code | |
| tools=[types.Tool( | |
| code_execution=types.ToolCodeExecution | |
| )] | |
| ) | |
| ) | |
| for chunk in response: | |
| for part in chunk.candidates[0].content.parts: | |
| if hasattr(part, 'thought') and part.thought: | |
| if mode != "thinking": | |
| yield f'data: {json.dumps({"mode": "thinking"})}\n\n' | |
| mode = "thinking" | |
| else: | |
| if mode != "answering": | |
| yield f'data: {json.dumps({"mode": "answering"})}\n\n' | |
| mode = "answering" | |
| # Gestion des différents types de contenu | |
| if hasattr(part, 'text') and part.text is not None: | |
| yield f'data: {json.dumps({"content": part.text, "type": "text"})}\n\n' | |
| if hasattr(part, 'executable_code') and part.executable_code is not None: | |
| yield f'data: {json.dumps({"content": part.executable_code.code, "type": "code"})}\n\n' | |
| if hasattr(part, 'code_execution_result') and part.code_execution_result is not None: | |
| yield f'data: {json.dumps({"content": part.code_execution_result.output, "type": "result"})}\n\n' | |
| if hasattr(part, 'inline_data') and part.inline_data is not None: | |
| img_data = base64.b64encode(part.inline_data.data).decode('utf-8') | |
| yield f'data: {json.dumps({"content": img_data, "type": "image"})}\n\n' | |
| except Exception as e: | |
| print(f"Error during generation: {e}") | |
| yield f'data: {json.dumps({"error": str(e)})}\n\n' | |
| return Response( | |
| stream_with_context(generate()), | |
| mimetype='text/event-stream', | |
| headers={ | |
| 'Cache-Control': 'no-cache', | |
| 'X-Accel-Buffering': 'no' | |
| } | |
| ) | |
| except Exception as e: | |
| return jsonify({'error': str(e)}), 500 | |
| def solved(): | |
| try: | |
| image_data = request.files['image'].read() | |
| img = Image.open(io.BytesIO(image_data)) | |
| buffered = io.BytesIO() | |
| img.save(buffered, format="PNG") | |
| img_str = base64.b64encode(buffered.getvalue()).decode() | |
| def generate(): | |
| mode = 'starting' | |
| try: | |
| response = client.models.generate_content_stream( | |
| model="gemini-2.5-flash-preview-04-17", | |
| contents=[ | |
| {'inline_data': {'mime_type': 'image/png', 'data': img_str}}, | |
| """Résous ça en français with rendering latex. utilise python pour les calculs et figures.""" | |
| ], | |
| config=types.GenerateContentConfig( | |
| # Ajouter l'outil d'exécution de code | |
| tools=[types.Tool( | |
| code_execution=types.ToolCodeExecution | |
| )] | |
| ) | |
| ) | |
| for chunk in response: | |
| for part in chunk.candidates[0].content.parts: | |
| if hasattr(part, 'thought') and part.thought: | |
| if mode != "thinking": | |
| yield f'data: {json.dumps({"mode": "thinking"})}\n\n' | |
| mode = "thinking" | |
| else: | |
| if mode != "answering": | |
| yield f'data: {json.dumps({"mode": "answering"})}\n\n' | |
| mode = "answering" | |
| # Gestion des différents types de contenu | |
| if hasattr(part, 'text') and part.text is not None: | |
| yield f'data: {json.dumps({"content": part.text, "type": "text"})}\n\n' | |
| if hasattr(part, 'executable_code') and part.executable_code is not None: | |
| yield f'data: {json.dumps({"content": part.executable_code.code, "type": "code"})}\n\n' | |
| if hasattr(part, 'code_execution_result') and part.code_execution_result is not None: | |
| yield f'data: {json.dumps({"content": part.code_execution_result.output, "type": "result"})}\n\n' | |
| if hasattr(part, 'inline_data') and part.inline_data is not None: | |
| img_data = base64.b64encode(part.inline_data.data).decode('utf-8') | |
| yield f'data: {json.dumps({"content": img_data, "type": "image"})}\n\n' | |
| except Exception as e: | |
| print(f"Error during generation: {e}") | |
| yield f'data: {json.dumps({"error": str(e)})}\n\n' | |
| return Response( | |
| stream_with_context(generate()), | |
| mimetype='text/event-stream', | |
| headers={ | |
| 'Cache-Control': 'no-cache', | |
| 'X-Accel-Buffering': 'no' | |
| } | |
| ) | |
| except Exception as e: | |
| return jsonify({'error': str(e)}), 500 | |
| if __name__ == '__main__': | |
| app.run(debug=True) |