Spaces:
Sleeping
Sleeping
File size: 3,127 Bytes
b53d6b9 a10ea6b 04084ef f012284 b53d6b9 4c4251c 5f9325a 04084ef b30ad3b 04084ef b30ad3b 04084ef b30ad3b f8ae468 04084ef b30ad3b 5f9325a 4c4251c 04084ef ed72a3e 04084ef 4c4251c 04084ef f012284 b53d6b9 f012284 4c4251c b53d6b9 4c4251c b53d6b9 4c4251c b53d6b9 4c4251c b53d6b9 4c4251c b53d6b9 ae30a13 b30ad3b a10ea6b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 |
from flask import Flask, render_template, request, jsonify, Response
import google.generativeai as genai
import os
from PIL import Image
import tempfile
import io
import uuid
import time
app = Flask(__name__)
# Configuration Gemini
token = os.environ.get("TOKEN")
genai.configure(api_key=token)
safety_settings = [
{"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE"},
{"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE"},
{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_NONE"},
{"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_NONE"},
]
mm = """ resous cet exercice. tu répondras en détaillant au maximum ton procédé de calcul. réponse attendue uniquement en Latex"""
model = genai.GenerativeModel(
model_name="gemini-exp-1206",
safety_settings=safety_settings
)
# Dictionnaire pour stocker les réponses en cours de génération
pending_responses = {}
@app.route('/')
def home():
return render_template('index.html')
@app.route('/generate', methods=['POST'])
def generate():
if 'image' not in request.files:
return jsonify({'error': 'No image uploaded'}), 400
image_file = request.files['image']
request_id = str(uuid.uuid4()) # Générer un identifiant unique
# Sauvegarder temporairement l'image
with tempfile.NamedTemporaryFile(delete=False, suffix='.png') as temp_file:
image_file.save(temp_file.name)
try:
image = Image.open(temp_file.name)
# Convertir l'image en bytes pour le streaming
img_byte_arr = io.BytesIO()
image.save(img_byte_arr, format='PNG')
img_byte_arr = img_byte_arr.getvalue()
# Stocker la tâche de génération dans le dictionnaire
pending_responses[request_id] = {
'status': 'processing',
'response': model.generate_content([mm, {"mime_type": "image/png", "data": img_byte_arr}], stream=True)
}
return jsonify({'request_id': request_id})
except Exception as e:
return jsonify({'error': str(e)}), 500
finally:
# Nettoyer le fichier temporaire
os.unlink(temp_file.name)
@app.route('/stream/<request_id>')
def stream(request_id):
def generate_stream():
while request_id in pending_responses and pending_responses[request_id]['status'] == 'processing':
try:
chunk = next(pending_responses[request_id]['response'])
yield f"data: {chunk.text}\n\n"
except StopIteration:
pending_responses[request_id]['status'] = 'completed'
except Exception as e:
yield f"data: Error: {str(e)}\n\n"
pending_responses[request_id]['status'] = 'error'
time.sleep(0.1) # Attendre un peu avant de vérifier à nouveau
if request_id in pending_responses:
del pending_responses[request_id]
return Response(generate_stream(), mimetype='text/event-stream')
if __name__ == '__main__':
app.run(debug=True) |