kuro223 commited on
Commit
a4a9f03
·
1 Parent(s): fc37930
Files changed (4) hide show
  1. __pycache__/app.cpython-311.pyc +0 -0
  2. app.py +107 -99
  3. opencode.json +22 -0
  4. templates/index.html +92 -45
__pycache__/app.cpython-311.pyc ADDED
Binary file (29.1 kB). View file
 
app.py CHANGED
@@ -80,6 +80,8 @@ Deux axes de lecture distincts.
80
  Chaque axe subdivisé en deux sous-axes.
81
  Chaque sous-axe analysé via trois outils d'analyse spécifiques, nommés individuellement avec leur terme technique reconnu et officiel.
82
  Résultats présentés en tableau(x) ("Outils d'analyse", "Repérage / Citation", "Interprétation"), en respectant la règle un seul outil par entrée dans la première colonne.
 
 
83
  """
84
 
85
  prompt_redaction = """
@@ -147,17 +149,18 @@ MODEL_ID_DEEPTHINK = "gemini-2.5-flash" # Advanced model for DeepThink
147
  # --- End Model IDs ---
148
 
149
  # --- Stream Generation Functions ---
150
- def generate_table_stream(image, consignes="", model_id=MODEL_ID_STANDARD):
151
- """Génère le tableau d'analyse à partir de l'image en intégrant éventuellement des consignes"""
152
  prompt = prompt_tableau
153
  if consignes:
154
  prompt += "\n\nConsignes supplémentaires de l'utilisateur :\n" + consignes # Make consignes clearer in prompt
155
 
156
- logging.info(f"Generating table using model: {model_id}")
157
  try:
 
158
  response_stream = genai_client.models.generate_content_stream(
159
- model=model_id, # Use the passed model_id
160
- contents=[prompt, image],
161
  config=generate_config
162
  )
163
 
@@ -212,11 +215,15 @@ def index():
212
 
213
  @app.route('/analyze', methods=['POST'])
214
  def analyze():
215
- if 'image' not in request.files:
216
- logging.warning("Analyze request received without image file.")
217
- return jsonify({'error': 'No image uploaded'}), 400
 
 
 
 
 
218
 
219
- image_file = request.files['image']
220
  consignes = request.form.get("consignes", "")
221
  # Check for the DeepThink flag
222
  use_deepthink = request.form.get('use_deepthink', 'false').lower() == 'true'
@@ -229,110 +236,111 @@ def analyze():
229
  model_id = MODEL_ID_STANDARD
230
  logging.info("Standard analysis requested, using standard model.")
231
 
232
- # Use a temporary file to handle the image upload
233
- temp_file = None # Initialize to None
 
234
  try:
235
- # Create a temporary file with a specific suffix if needed, or let NamedTemporaryFile handle it
236
- # Using 'delete=False' requires manual cleanup
237
- with tempfile.NamedTemporaryFile(delete=False, suffix=".jpg") as temp_file:
 
238
  image_file.save(temp_file.name)
239
- # Ensure image is valid before proceeding
 
 
240
  try:
241
  image = Image.open(temp_file.name)
242
- image.verify() # Verify image header
243
- # Re-open after verify
244
- image = Image.open(temp_file.name)
245
  except (IOError, SyntaxError) as e:
246
  logging.error(f"Invalid image file uploaded: {e}")
247
  return jsonify({'error': f'Invalid or corrupted image file: {e}'}), 400
248
 
249
- # Now 'image' holds the PIL Image object
250
- # We need to pass the image object to the stream function
251
- # Note: google.generativeai often works directly with PIL Image objects
252
-
253
- @stream_with_context
254
- def generate():
255
- temp_file_path = temp_file.name # Store path for finally block
256
- full_tableau_content = "" # Accumulate full table content for dissertation
257
- try:
258
- logging.info("Starting table generation stream...")
259
- # Phase 1: Génération du tableau, passing the selected model_id
260
- for chunk_json in generate_table_stream(image, consignes, model_id):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
261
  try:
262
  chunk_data = json.loads(chunk_json)
263
  if chunk_data.get("type") == "error":
264
- logging.error(f"Error received from table stream: {chunk_data.get('error')}")
265
- yield chunk_json # Forward the error to the client
266
- return # Stop generation if table fails
267
- elif chunk_data.get("type") == "tableau":
268
- full_tableau_content += chunk_data.get("chunk", "")
269
- yield chunk_json # Stream chunk to client
270
  except json.JSONDecodeError:
271
- logging.error(f"Received invalid JSON from table stream: {chunk_json}")
272
- # Decide how to handle: yield error, ignore, etc.
273
- yield json.dumps({"type": "error", "error": "Invalid data received during table generation."}) + "\n"
274
- return # Stop if data is corrupt
275
-
276
- logging.info("Table generation stream finished.")
277
- logging.info("Starting dissertation generation stream...")
278
-
279
- # Phase 2: Génération de la dissertation basée sur le tableau COMPLET, passing the selected model_id
280
- if full_tableau_content: # Only generate if table content exists
281
- for chunk_json in generate_dissertation_stream(full_tableau_content, model_id):
282
- try:
283
- chunk_data = json.loads(chunk_json)
284
- if chunk_data.get("type") == "error":
285
- logging.error(f"Error received from dissertation stream: {chunk_data.get('error')}")
286
- yield chunk_json # Forward the error
287
- # Decide if you want to return here or let it finish
288
- elif chunk_data.get("type") == "dissertation":
289
- yield chunk_json # Stream chunk to client
290
- except json.JSONDecodeError:
291
- logging.error(f"Received invalid JSON from dissertation stream: {chunk_json}")
292
- yield json.dumps({"type": "error", "error": "Invalid data received during dissertation generation."}) + "\n"
293
- # Potentially return here too
294
- else:
295
- logging.warning("Tableau content is empty, skipping dissertation generation.")
296
- yield json.dumps({"type": "error", "error": "Tableau d'analyse non généré, impossible de créer la dissertation."}) + "\n"
297
-
298
- logging.info("Dissertation generation stream finished.")
299
-
300
- except Exception as e:
301
- logging.error(f"Error during streaming generation: {e}", exc_info=True)
302
- # Yield a JSON error message for the client
303
- yield json.dumps({"type": "error", "error": f"Une erreur interne est survenue: {e}"}) + "\n"
304
- finally:
305
- # Nettoyer le fichier temporaire in the finally block of generate
306
- # Ensure the image object is closed if necessary (PIL handles this reasonably well)
307
- # image.close() # Usually not needed with 'with open' or tempfile context
308
- pass # temp_file is closed by with statement, path needed for unlink
309
-
310
- # Return the streaming response
311
- # Make sure temp_file path is accessible for cleanup *after* streaming finishes
312
- response = Response(generate(), content_type='text/event-stream')
313
-
314
- # Use response.call_on_close for reliable cleanup after stream finishes/closes
315
- if temp_file and os.path.exists(temp_file.name):
316
- cleanup_path = temp_file.name
317
- response.call_on_close(lambda: os.unlink(cleanup_path) if os.path.exists(cleanup_path) else None)
318
- logging.info(f"Scheduled cleanup for temp file: {cleanup_path}")
319
-
320
-
321
- return response
322
 
323
  except Exception as e:
324
- # Catch errors during file handling or initial PIL processing
325
- logging.error(f"Error processing upload before streaming: {e}", exc_info=True)
326
- # Ensure cleanup if temp_file was created before the error
327
- if temp_file and os.path.exists(temp_file.name):
328
- try:
329
- os.unlink(temp_file.name)
330
- logging.info(f"Cleaned up temp file due to pre-stream error: {temp_file.name}")
331
- except OSError as unlink_error:
332
- logging.error(f"Error unlinking temp file during error handling: {unlink_error}")
333
- return jsonify({'error': f'Error processing file: {e}'}), 500
334
- # Note: The 'finally' block for the 'with tempfile' is implicitly handled
335
- # but explicit cleanup using response.call_on_close is better for streaming
336
 
337
 
338
  # --- End Flask Routes ---
 
80
  Chaque axe subdivisé en deux sous-axes.
81
  Chaque sous-axe analysé via trois outils d'analyse spécifiques, nommés individuellement avec leur terme technique reconnu et officiel.
82
  Résultats présentés en tableau(x) ("Outils d'analyse", "Repérage / Citation", "Interprétation"), en respectant la règle un seul outil par entrée dans la première colonne.
83
+
84
+ Note : Si plusieurs images sont fournies, analysez le texte de manière cohérente à travers toutes les images, en considérant l'ensemble comme un texte continu ou relié.
85
  """
86
 
87
  prompt_redaction = """
 
149
  # --- End Model IDs ---
150
 
151
  # --- Stream Generation Functions ---
152
+ def generate_table_stream(uploaded_files, consignes="", model_id=MODEL_ID_STANDARD):
153
+ """Génère le tableau d'analyse à partir des images uploadées en intégrant éventuellement des consignes"""
154
  prompt = prompt_tableau
155
  if consignes:
156
  prompt += "\n\nConsignes supplémentaires de l'utilisateur :\n" + consignes # Make consignes clearer in prompt
157
 
158
+ logging.info(f"Generating table using model: {model_id} with {len(uploaded_files)} images")
159
  try:
160
+ contents = [prompt] + uploaded_files
161
  response_stream = genai_client.models.generate_content_stream(
162
+ model=model_id,
163
+ contents=contents,
164
  config=generate_config
165
  )
166
 
 
215
 
216
  @app.route('/analyze', methods=['POST'])
217
  def analyze():
218
+ if 'images' not in request.files:
219
+ logging.warning("Analyze request received without image files.")
220
+ return jsonify({'error': 'No images uploaded'}), 400
221
+
222
+ image_files = request.files.getlist('images')
223
+ if not image_files or all(f.filename == '' for f in image_files):
224
+ logging.warning("No valid image files provided.")
225
+ return jsonify({'error': 'No valid images uploaded'}), 400
226
 
 
227
  consignes = request.form.get("consignes", "")
228
  # Check for the DeepThink flag
229
  use_deepthink = request.form.get('use_deepthink', 'false').lower() == 'true'
 
236
  model_id = MODEL_ID_STANDARD
237
  logging.info("Standard analysis requested, using standard model.")
238
 
239
+ # Handle multiple image uploads using File API
240
+ temp_files = []
241
+ uploaded_files = []
242
  try:
243
+ for image_file in image_files:
244
+ # Create temporary file for each image
245
+ temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=".jpg")
246
+ temp_files.append(temp_file)
247
  image_file.save(temp_file.name)
248
+ temp_file.close() # Close file handle so it can be read by PIL and uploaded
249
+
250
+ # Verify image is valid
251
  try:
252
  image = Image.open(temp_file.name)
253
+ image.verify()
254
+ image.close()
 
255
  except (IOError, SyntaxError) as e:
256
  logging.error(f"Invalid image file uploaded: {e}")
257
  return jsonify({'error': f'Invalid or corrupted image file: {e}'}), 400
258
 
259
+ # Upload to Gemini File API
260
+ try:
261
+ uploaded_file = genai_client.files.upload(file=temp_file.name)
262
+ uploaded_files.append(uploaded_file)
263
+ logging.info(f"Uploaded image to Gemini File API: {uploaded_file.name}")
264
+ except Exception as e:
265
+ logging.error(f"Error uploading to Gemini File API: {e}")
266
+ return jsonify({'error': f'Error uploading image to Gemini: {e}'}), 500
267
+
268
+ @stream_with_context
269
+ def generate():
270
+ full_tableau_content = ""
271
+ try:
272
+ logging.info("Starting table generation stream...")
273
+ # Phase 1: Génération du tableau, passing the selected model_id and uploaded files
274
+ for chunk_json in generate_table_stream(uploaded_files, consignes, model_id):
275
+ try:
276
+ chunk_data = json.loads(chunk_json)
277
+ if chunk_data.get("type") == "error":
278
+ logging.error(f"Error received from table stream: {chunk_data.get('error')}")
279
+ yield chunk_json
280
+ return
281
+ elif chunk_data.get("type") == "tableau":
282
+ full_tableau_content += chunk_data.get("chunk", "")
283
+ yield chunk_json
284
+ except json.JSONDecodeError:
285
+ logging.error(f"Received invalid JSON from table stream: {chunk_json}")
286
+ yield json.dumps({"type": "error", "error": "Invalid data received during table generation."}) + "\n"
287
+ return
288
+
289
+ logging.info("Table generation stream finished.")
290
+ logging.info("Starting dissertation generation stream...")
291
+
292
+ # Phase 2: Génération de la dissertation basée sur le tableau COMPLET
293
+ if full_tableau_content:
294
+ for chunk_json in generate_dissertation_stream(full_tableau_content, model_id):
295
  try:
296
  chunk_data = json.loads(chunk_json)
297
  if chunk_data.get("type") == "error":
298
+ logging.error(f"Error received from dissertation stream: {chunk_data.get('error')}")
299
+ yield chunk_json
300
+ elif chunk_data.get("type") == "dissertation":
301
+ yield chunk_json
 
 
302
  except json.JSONDecodeError:
303
+ logging.error(f"Received invalid JSON from dissertation stream: {chunk_json}")
304
+ yield json.dumps({"type": "error", "error": "Invalid data received during dissertation generation."}) + "\n"
305
+ else:
306
+ logging.warning("Tableau content is empty, skipping dissertation generation.")
307
+ yield json.dumps({"type": "error", "error": "Tableau d'analyse non généré, impossible de créer la dissertation."}) + "\n"
308
+
309
+ logging.info("Dissertation generation stream finished.")
310
+
311
+ except Exception as e:
312
+ logging.error(f"Error during streaming generation: {e}", exc_info=True)
313
+ yield json.dumps({"type": "error", "error": f"Une erreur interne est survenue: {e}"}) + "\n"
314
+ finally:
315
+ # Cleanup uploaded files from Gemini (optional, they expire automatically)
316
+ pass
317
+
318
+ response = Response(generate(), content_type='text/event-stream')
319
+
320
+ # Cleanup temp files after streaming
321
+ def cleanup_temp_files():
322
+ for temp_file in temp_files:
323
+ try:
324
+ if os.path.exists(temp_file.name):
325
+ os.unlink(temp_file.name)
326
+ logging.info(f"Cleaned up temp file: {temp_file.name}")
327
+ except OSError as e:
328
+ logging.error(f"Error cleaning up temp file {temp_file.name}: {e}")
329
+
330
+ response.call_on_close(cleanup_temp_files)
331
+
332
+ return response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
333
 
334
  except Exception as e:
335
+ logging.error(f"Error processing uploads before streaming: {e}", exc_info=True)
336
+ # Cleanup any temp files created
337
+ for temp_file in temp_files:
338
+ try:
339
+ if os.path.exists(temp_file.name):
340
+ os.unlink(temp_file.name)
341
+ except OSError:
342
+ pass
343
+ return jsonify({'error': f'Error processing files: {e}'}), 500
 
 
 
344
 
345
 
346
  # --- End Flask Routes ---
opencode.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "$schema": "https://opencode.ai/config.json",
3
+ "mcp": {
4
+ "context7": {
5
+ "type": "remote",
6
+ "url": "https://mcp.context7.com/mcp",
7
+ "headers": {
8
+ "CONTEXT7_API_KEY": "ctx7sk-7b0179e4-f420-4459-8eb8-da652aca9cb8"
9
+ },
10
+ "enabled": true
11
+ },
12
+ "sequential-thinking": {
13
+ "type": "local",
14
+ "command": [
15
+ "npx",
16
+ "-y",
17
+ "@modelcontextprotocol/server-sequential-thinking"
18
+ ],
19
+ "enabled": true
20
+ }
21
+ }
22
+ }
templates/index.html CHANGED
@@ -134,12 +134,24 @@
134
  overflow: hidden;
135
  position: relative;
136
  }
 
 
 
 
 
 
 
 
 
 
137
  .preview-image {
138
  max-width: 100%;
139
  max-height: 15rem; /* Limite hauteur aperçu */
140
  height: auto;
141
  margin: 0 auto;
142
  display: block;
 
 
143
  }
144
  .btn {
145
  display: block;
@@ -351,13 +363,13 @@
351
  <main>
352
  <div class="card">
353
  <form id="uploadForm">
354
- <div class="input-group">
355
- <label for="imageInput" class="label">Votre document</label>
356
  <div id="uploadArea" class="upload-area">
357
  <svg class="upload-icon" xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke="currentColor"><path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M7 16a4 4 0 01-.88-7.903A5 5 0 1115.9 6L16 6a5 5 0 011 9.9M15 13l-3-3m0 0l-3 3m3-3v12" /></svg>
358
  <p>Touchez pour télécharger</p>
359
- <p class="upload-instructions">PNG, JPG jusqu'à 10MB</p>
360
- <input id="imageInput" type="file" class="file-input" accept="image/*">
361
  </div>
362
  </div>
363
  <div class="input-group">
@@ -365,8 +377,8 @@
365
  <input id="consignesInput" type="text" class="input" placeholder="Entrez vos consignes ici">
366
  </div>
367
  <div id="previewContainer" class="preview-container" style="display: none;">
368
- <p class="preview-title">Aperçu du document</p>
369
- <div class="preview-image-container"><img id="previewImage" class="preview-image" src="#" alt="Aperçu"></div>
370
  </div>
371
  <div class="button-container">
372
  <button type="submit" class="btn btn-primary">Lancer l'analyse standard</button>
@@ -496,22 +508,51 @@
496
  });
497
 
498
 
499
- imageInput.addEventListener('change', function() { const file = this.files[0]; if (file) { const reader = new FileReader(); reader.onload = function(e) { previewImage.src = e.target.result; previewContainer.style.display = 'block'; previewContainer.classList.add('slide-up'); uploadArea.classList.add('active'); }; reader.readAsDataURL(file); } else { previewContainer.style.display = 'none'; uploadArea.classList.remove('active'); } updateButtonStates(); });
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
500
  dissertationTab.addEventListener('click', () => switchTab('dissertation'));
501
  tableauTab.addEventListener('click', () => switchTab('tableau'));
502
  function switchTab(tabName) { const isDissertation = tabName === 'dissertation'; dissertationTab.classList.toggle('active', isDissertation); tableauTab.classList.toggle('active', !isDissertation); dissertationContentWrapper.style.display = isDissertation ? 'block' : 'none'; tableauContentWrapper.style.display = !isDissertation ? 'block' : 'none'; }
503
 
504
- // --- Button State Management ---
505
- function updateButtonStates() {
506
- const hasImage = imageInput.files.length > 0;
507
- submitBtn.disabled = !hasImage;
508
 
509
- const lastUsedDate = localStorage.getItem(DEEPTHINK_STORAGE_KEY);
510
- const todayDate = new Date().toDateString();
511
- const limitReached = lastUsedDate === todayDate;
512
- deepThinkBtn.disabled = !hasImage || limitReached;
513
- deepThinkBtn.textContent = limitReached ? "DeepThink (Utilisé)" : "DeepThink (1/jour)";
514
- }
515
 
516
  function recordDeepThinkUsage() {
517
  const todayDate = new Date().toDateString();
@@ -611,36 +652,42 @@
611
  .catch(error => { console.error('Erreur Fetch:', error); loadingIndicator.style.display = 'none'; alert("Une erreur de connexion s'est produite. Veuillez vérifier votre connexion internet et réessayer."); downloadPdfBtn.disabled = true; streamComplete = true; });
612
  }
613
 
614
- // --- Form Submission Handlers ---
615
- uploadForm.addEventListener('submit', function(e) {
616
- e.preventDefault(); // Prevent default form submission
617
-
618
- const formData = new FormData();
619
- formData.append('image', imageInput.files[0]);
620
- formData.append('consignes', consignesInput.value);
621
- // NO use_deepthink flag here for standard analysis
622
-
623
- startAnalysis(formData); // Call the reusable analysis function
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
624
  });
625
 
626
- deepThinkBtn.addEventListener('click', function() {
627
- // Since disabled when no image or limit reached, but double check limit
628
- const lastUsedDate = localStorage.getItem(DEEPTHINK_STORAGE_KEY);
629
- const todayDate = new Date().toDateString();
630
- if(lastUsedDate === todayDate) {
631
- alert("DeepThink a déjà été utilisé aujourd'hui.");
632
- updateButtonStates(); // Update button state if needed
633
- return;
634
- }
635
-
636
- const formData = new FormData();
637
- formData.append('image', imageInput.files[0]);
638
- formData.append('consignes', consignesInput.value);
639
- formData.append('use_deepthink', 'true'); // ADD THE FLAG FOR DEEPTHINK
640
-
641
- startAnalysis(formData); // Call the reusable analysis function
642
- });
643
-
644
 
645
  // --- PDF Download Logic (Unchanged logic, should still work) ---
646
  downloadPdfBtn.addEventListener('click', () => {
 
134
  overflow: hidden;
135
  position: relative;
136
  }
137
+ .preview-images-container {
138
+ display: flex;
139
+ flex-wrap: wrap;
140
+ gap: 1rem;
141
+ justify-content: center;
142
+ }
143
+ .preview-image-container {
144
+ flex: 0 0 auto;
145
+ max-width: 200px;
146
+ }
147
  .preview-image {
148
  max-width: 100%;
149
  max-height: 15rem; /* Limite hauteur aperçu */
150
  height: auto;
151
  margin: 0 auto;
152
  display: block;
153
+ border-radius: var(--radius-md);
154
+ box-shadow: var(--shadow-sm);
155
  }
156
  .btn {
157
  display: block;
 
363
  <main>
364
  <div class="card">
365
  <form id="uploadForm">
366
+ <div class="input-group">
367
+ <label for="imageInput" class="label">Vos documents</label>
368
  <div id="uploadArea" class="upload-area">
369
  <svg class="upload-icon" xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke="currentColor"><path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M7 16a4 4 0 01-.88-7.903A5 5 0 1115.9 6L16 6a5 5 0 011 9.9M15 13l-3-3m0 0l-3 3m3-3v12" /></svg>
370
  <p>Touchez pour télécharger</p>
371
+ <p class="upload-instructions">PNG, JPG jusqu'à 10MB chacun (plusieurs fichiers possibles)</p>
372
+ <input id="imageInput" type="file" class="file-input" accept="image/*" multiple>
373
  </div>
374
  </div>
375
  <div class="input-group">
 
377
  <input id="consignesInput" type="text" class="input" placeholder="Entrez vos consignes ici">
378
  </div>
379
  <div id="previewContainer" class="preview-container" style="display: none;">
380
+ <p class="preview-title">Aperçu des documents</p>
381
+ <div id="previewImagesContainer" class="preview-images-container"></div>
382
  </div>
383
  <div class="button-container">
384
  <button type="submit" class="btn btn-primary">Lancer l'analyse standard</button>
 
508
  });
509
 
510
 
511
+ imageInput.addEventListener('change', function() {
512
+ const files = this.files;
513
+ const previewImagesContainer = document.getElementById('previewImagesContainer');
514
+ previewImagesContainer.innerHTML = ''; // Clear previous previews
515
+
516
+ if (files.length > 0) {
517
+ for (let i = 0; i < files.length; i++) {
518
+ const file = files[i];
519
+ const reader = new FileReader();
520
+ reader.onload = function(e) {
521
+ const container = document.createElement('div');
522
+ container.className = 'preview-image-container';
523
+ const img = document.createElement('img');
524
+ img.className = 'preview-image';
525
+ img.src = e.target.result;
526
+ img.alt = `Aperçu ${i + 1}`;
527
+ container.appendChild(img);
528
+ previewImagesContainer.appendChild(container);
529
+ };
530
+ reader.readAsDataURL(file);
531
+ }
532
+ previewContainer.style.display = 'block';
533
+ previewContainer.classList.add('slide-up');
534
+ uploadArea.classList.add('active');
535
+ } else {
536
+ previewContainer.style.display = 'none';
537
+ uploadArea.classList.remove('active');
538
+ }
539
+ updateButtonStates();
540
+ });
541
  dissertationTab.addEventListener('click', () => switchTab('dissertation'));
542
  tableauTab.addEventListener('click', () => switchTab('tableau'));
543
  function switchTab(tabName) { const isDissertation = tabName === 'dissertation'; dissertationTab.classList.toggle('active', isDissertation); tableauTab.classList.toggle('active', !isDissertation); dissertationContentWrapper.style.display = isDissertation ? 'block' : 'none'; tableauContentWrapper.style.display = !isDissertation ? 'block' : 'none'; }
544
 
545
+ // --- Button State Management ---
546
+ function updateButtonStates() {
547
+ const hasImages = imageInput.files.length > 0;
548
+ submitBtn.disabled = !hasImages;
549
 
550
+ const lastUsedDate = localStorage.getItem(DEEPTHINK_STORAGE_KEY);
551
+ const todayDate = new Date().toDateString();
552
+ const limitReached = lastUsedDate === todayDate;
553
+ deepThinkBtn.disabled = !hasImages || limitReached;
554
+ deepThinkBtn.textContent = limitReached ? "DeepThink (Utilisé)" : "DeepThink (1/jour)";
555
+ }
556
 
557
  function recordDeepThinkUsage() {
558
  const todayDate = new Date().toDateString();
 
652
  .catch(error => { console.error('Erreur Fetch:', error); loadingIndicator.style.display = 'none'; alert("Une erreur de connexion s'est produite. Veuillez vérifier votre connexion internet et réessayer."); downloadPdfBtn.disabled = true; streamComplete = true; });
653
  }
654
 
655
+ // --- Form Submission Handlers ---
656
+ uploadForm.addEventListener('submit', function(e) {
657
+ e.preventDefault(); // Prevent default form submission
658
+
659
+ const formData = new FormData();
660
+ const files = imageInput.files;
661
+ for (let i = 0; i < files.length; i++) {
662
+ formData.append('images', files[i]);
663
+ }
664
+ formData.append('consignes', consignesInput.value);
665
+ // NO use_deepthink flag here for standard analysis
666
+
667
+ startAnalysis(formData); // Call the reusable analysis function
668
+ });
669
+
670
+ deepThinkBtn.addEventListener('click', function() {
671
+ // Since disabled when no image or limit reached, but double check limit
672
+ const lastUsedDate = localStorage.getItem(DEEPTHINK_STORAGE_KEY);
673
+ const todayDate = new Date().toDateString();
674
+ if(lastUsedDate === todayDate) {
675
+ alert("DeepThink a déjà été utilisé aujourd'hui.");
676
+ updateButtonStates(); // Update button state if needed
677
+ return;
678
+ }
679
+
680
+ const formData = new FormData();
681
+ const files = imageInput.files;
682
+ for (let i = 0; i < files.length; i++) {
683
+ formData.append('images', files[i]);
684
+ }
685
+ formData.append('consignes', consignesInput.value);
686
+ formData.append('use_deepthink', 'true'); // ADD THE FLAG FOR DEEPTHINK
687
+
688
+ startAnalysis(formData); // Call the reusable analysis function
689
  });
690
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
691
 
692
  // --- PDF Download Logic (Unchanged logic, should still work) ---
693
  downloadPdfBtn.addEventListener('click', () => {