Update app.py
Browse files
app.py
CHANGED
@@ -1,13 +1,9 @@
|
|
1 |
import streamlit as st
|
2 |
from google import genai
|
3 |
-
from google.genai import types
|
4 |
-
from PIL import Image
|
5 |
-
import json
|
6 |
import logging
|
7 |
-
import re
|
8 |
-
from typing import Optional, Generator, Any, Dict
|
9 |
import sys
|
10 |
from pathlib import Path
|
|
|
11 |
|
12 |
# Configuration du logging
|
13 |
logging.basicConfig(
|
@@ -20,64 +16,6 @@ logging.basicConfig(
|
|
20 |
)
|
21 |
logger = logging.getLogger(__name__)
|
22 |
|
23 |
-
class LatexFormatter:
|
24 |
-
"""Classe améliorée pour le formatage LaTeX"""
|
25 |
-
|
26 |
-
@staticmethod
|
27 |
-
def cleanup_latex_fractions(text: str) -> str:
|
28 |
-
"""Nettoie et formate correctement les fractions LaTeX"""
|
29 |
-
# Améliore le rendu des fractions
|
30 |
-
text = re.sub(r'\\frac\{([^}]*)\}\{([^}]*)\}', r'$$\\frac{\1}{\2}$$', text)
|
31 |
-
# Gère les délimiteurs left/right
|
32 |
-
text = re.sub(r'\\left\(', r'\\left(', text)
|
33 |
-
text = re.sub(r'\\right\)', r'\\right)', text)
|
34 |
-
return text
|
35 |
-
|
36 |
-
@staticmethod
|
37 |
-
def format_inline_math(text: str) -> str:
|
38 |
-
"""Formate les expressions mathématiques en ligne"""
|
39 |
-
# Améliore l'espacement autour des expressions inline
|
40 |
-
text = re.sub(r'(?<!\\)\$([^$]+)(?<!\\)\$', r' $\1$ ', text)
|
41 |
-
# Gère les cas spéciaux où il y a déjà des $$ mais en inline
|
42 |
-
text = re.sub(r'(?<!\\)\$\$([^$]+)(?<!\\)\$\$', r' $\1$ ', text)
|
43 |
-
return text
|
44 |
-
|
45 |
-
@staticmethod
|
46 |
-
def format_display_math(text: str) -> str:
|
47 |
-
"""Formate les expressions mathématiques en mode display"""
|
48 |
-
# Ajoute des sauts de ligne et de l'espacement
|
49 |
-
text = re.sub(r'\$\$(.*?)\$\$', r'\n\n$$\1$$\n\n', text, flags=re.DOTALL)
|
50 |
-
return text
|
51 |
-
|
52 |
-
@staticmethod
|
53 |
-
def format_special_environments(text: str) -> str:
|
54 |
-
"""Formate les environnements mathématiques spéciaux"""
|
55 |
-
# Gère les environnements align, gather, etc.
|
56 |
-
envs = ['align', 'gather', 'equation', 'array', 'matrix']
|
57 |
-
for env in envs:
|
58 |
-
pattern = f'\\\\begin{{{env}}}(.*?)\\\\end{{{env}}}'
|
59 |
-
text = re.sub(pattern, f'\n\n$$\\\\begin{{{env}}}\\1\\\\end{{{env}}}$$\n\n',
|
60 |
-
text, flags=re.DOTALL)
|
61 |
-
return text
|
62 |
-
|
63 |
-
@staticmethod
|
64 |
-
def enhance_latex_display(text: str) -> str:
|
65 |
-
"""Améliore globalement l'affichage LaTeX"""
|
66 |
-
# Prétraitement
|
67 |
-
text = text.replace('\\[', '$$').replace('\\]', '$$')
|
68 |
-
text = text.replace('\\(', '$').replace('\\)', '$')
|
69 |
-
|
70 |
-
# Application des différentes améliorations
|
71 |
-
text = LatexFormatter.cleanup_latex_fractions(text)
|
72 |
-
text = LatexFormatter.format_special_environments(text)
|
73 |
-
text = LatexFormatter.format_inline_math(text)
|
74 |
-
text = LatexFormatter.format_display_math(text)
|
75 |
-
|
76 |
-
# Assure un bon espacement des équations
|
77 |
-
text = re.sub(r'(\n\s*\$\$[^$]+\$\$)', r'\n\n\1\n\n', text)
|
78 |
-
|
79 |
-
return text
|
80 |
-
|
81 |
class GeminiClient:
|
82 |
"""Classe pour gérer les interactions avec l'API Gemini"""
|
83 |
def __init__(self, api_key: str):
|
@@ -95,8 +33,8 @@ class GeminiClient:
|
|
95 |
logger.error(f"Erreur d'initialisation du client Gemini: {e}")
|
96 |
raise RuntimeError(f"Impossible d'initialiser le client Gemini: {e}")
|
97 |
|
98 |
-
def
|
99 |
-
"""
|
100 |
if not self.client:
|
101 |
raise RuntimeError("Client Gemini non initialisé")
|
102 |
|
@@ -104,164 +42,53 @@ class GeminiClient:
|
|
104 |
response = self.client.models.generate_content_stream(
|
105 |
model=model_name,
|
106 |
config={'thinking_config': {'include_thoughts': True}},
|
107 |
-
contents=[
|
108 |
-
image,
|
109 |
-
prompt + " Utilise la notation LaTeX appropriée avec $...$ pour les expressions en ligne et $$...$$ pour les équations importantes. Pour les fractions, utilise \\frac{num}{den}."
|
110 |
-
]
|
111 |
)
|
112 |
return response
|
113 |
except Exception as e:
|
114 |
-
logger.error(f"Erreur lors de
|
115 |
raise
|
116 |
|
117 |
-
|
118 |
-
def setup_latex_display():
|
119 |
-
"""Configure l'affichage LaTeX dans Streamlit"""
|
120 |
-
st.markdown("""
|
121 |
-
<script>
|
122 |
-
window.MathJax = {
|
123 |
-
tex: {
|
124 |
-
inlineMath: [['$', '$'], ['\\\\(', '\\\\)']],
|
125 |
-
displayMath: [['$$', '$$'], ['\\\\[', '\\\\]']],
|
126 |
-
processEscapes: true,
|
127 |
-
macros: {
|
128 |
-
R: "{\\\\mathbb{R}}",
|
129 |
-
N: "{\\\\mathbb{N}}",
|
130 |
-
Z: "{\\\\mathbb{Z}}",
|
131 |
-
vecv: ["\\\\begin{pmatrix}#1\\\\\\\\#2\\\\\\\\#3\\\\end{pmatrix}", 3]
|
132 |
-
}
|
133 |
-
}
|
134 |
-
};
|
135 |
-
</script>
|
136 |
-
<style>
|
137 |
-
/* Styles de base pour LaTeX */
|
138 |
-
.katex {
|
139 |
-
font-size: 1.2em !important;
|
140 |
-
padding: 0.2em 0;
|
141 |
-
}
|
142 |
-
.katex-display {
|
143 |
-
margin: 1.5em 0 !important;
|
144 |
-
overflow: auto hidden;
|
145 |
-
background: rgba(248, 249, 250, 0.05);
|
146 |
-
padding: 0.5em;
|
147 |
-
border-radius: 4px;
|
148 |
-
}
|
149 |
-
/* Amélioration des fractions */
|
150 |
-
.katex .frac-line {
|
151 |
-
border-bottom-width: 0.08em;
|
152 |
-
}
|
153 |
-
.katex .mfrac .frac-line {
|
154 |
-
margin: 0.1em 0;
|
155 |
-
}
|
156 |
-
/* Style des matrices */
|
157 |
-
.katex .mord.matrix {
|
158 |
-
margin: 0.2em 0;
|
159 |
-
}
|
160 |
-
/* Amélioration des indices et exposants */
|
161 |
-
.katex .msupsub {
|
162 |
-
font-size: 0.9em;
|
163 |
-
}
|
164 |
-
/* Meilleure lisibilité des symboles */
|
165 |
-
.katex .mathdefault {
|
166 |
-
color: inherit;
|
167 |
-
}
|
168 |
-
/* Adaptation mobile */
|
169 |
-
@media (max-width: 768px) {
|
170 |
-
.katex {
|
171 |
-
font-size: 1.1em !important;
|
172 |
-
}
|
173 |
-
.katex-display {
|
174 |
-
padding: 0.3em;
|
175 |
-
margin: 1em 0 !important;
|
176 |
-
font-size: 0.9em !important;
|
177 |
-
}
|
178 |
-
}
|
179 |
-
/* Conteneur de réponse */
|
180 |
-
.response-container {
|
181 |
-
margin: 1em 0;
|
182 |
-
padding: 1em;
|
183 |
-
border-radius: 8px;
|
184 |
-
background: rgba(255, 255, 255, 0.05);
|
185 |
-
}
|
186 |
-
/* Mise en évidence des équations importantes */
|
187 |
-
.important-equation {
|
188 |
-
border-left: 3px solid #4CAF50;
|
189 |
-
padding-left: 1em;
|
190 |
-
}
|
191 |
-
</style>
|
192 |
-
""", unsafe_allow_html=True)
|
193 |
-
|
194 |
def stream_response(container, response: Generator) -> None:
|
195 |
-
"""Gère le streaming de la réponse
|
196 |
-
mode = 'starting'
|
197 |
thinking_placeholder = None
|
198 |
answer_placeholder = None
|
199 |
thinking_text = ""
|
200 |
answer_text = ""
|
201 |
-
|
202 |
-
setup_latex_display()
|
203 |
|
204 |
try:
|
205 |
for chunk in response:
|
206 |
-
|
207 |
-
|
208 |
-
if not isinstance(chunk, (dict, types.GenerateContentResponse)):
|
209 |
-
logger.warning(f"Format de chunk invalide reçu: {type(chunk)}")
|
210 |
-
continue
|
211 |
|
212 |
-
|
213 |
-
|
214 |
-
if not candidates or not len(candidates):
|
215 |
-
logger.warning("Pas de candidats dans le chunk")
|
216 |
-
continue
|
217 |
-
|
218 |
-
content = getattr(candidates[0], 'content', None)
|
219 |
-
if not content:
|
220 |
-
logger.warning("Pas de contenu dans le premier candidat")
|
221 |
-
continue
|
222 |
-
|
223 |
-
parts = getattr(content, 'parts', [])
|
224 |
-
for part in parts:
|
225 |
-
has_thought = False
|
226 |
-
try:
|
227 |
has_thought = hasattr(part, 'thought') and part.thought
|
228 |
-
|
229 |
-
logger.warning(f"Erreur lors de la vérification de thought: {e}")
|
230 |
-
|
231 |
-
text = getattr(part, 'text', '')
|
232 |
-
if not text:
|
233 |
-
continue
|
234 |
-
|
235 |
-
# Formatage LaTeX du texte
|
236 |
-
formatted_text = LatexFormatter.enhance_latex_display(text)
|
237 |
-
|
238 |
-
if has_thought:
|
239 |
-
if mode != "thinking":
|
240 |
-
if thinking_placeholder is None:
|
241 |
-
with container.expander("Voir le raisonnement", expanded=False):
|
242 |
-
thinking_placeholder = st.empty()
|
243 |
-
mode = "thinking"
|
244 |
-
thinking_text += formatted_text
|
245 |
-
thinking_placeholder.markdown(thinking_text)
|
246 |
-
else:
|
247 |
-
if mode != "answering":
|
248 |
-
if answer_placeholder is None:
|
249 |
-
answer_placeholder = container.empty()
|
250 |
-
container.subheader("Réponse")
|
251 |
-
mode = "answering"
|
252 |
-
answer_text += formatted_text
|
253 |
-
answer_placeholder.markdown(answer_text)
|
254 |
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
263 |
except Exception as e:
|
264 |
-
logger.error(f"Erreur
|
265 |
if not answer_text and not thinking_text:
|
266 |
container.error("Une erreur est survenue lors de l'analyse. Veuillez réessayer.")
|
267 |
raise
|
@@ -269,26 +96,15 @@ def stream_response(container, response: Generator) -> None:
|
|
269 |
if not answer_text and not thinking_text:
|
270 |
container.warning("Aucune réponse n'a pu être générée. Veuillez réessayer.")
|
271 |
|
272 |
-
def validate_image(uploaded_file) -> Optional[Image.Image]:
|
273 |
-
"""Valide et ouvre une image téléchargée"""
|
274 |
-
try:
|
275 |
-
image = Image.open(uploaded_file)
|
276 |
-
return image
|
277 |
-
except Exception as e:
|
278 |
-
logger.error(f"Erreur lors de l'ouverture de l'image: {e}")
|
279 |
-
st.error("L'image n'a pas pu être ouverte. Veuillez vérifier le format.")
|
280 |
-
return None
|
281 |
-
|
282 |
def main():
|
283 |
st.set_page_config(
|
284 |
-
page_title="
|
285 |
-
page_icon="
|
286 |
layout="wide",
|
287 |
initial_sidebar_state="collapsed"
|
288 |
)
|
289 |
|
290 |
-
st.title("
|
291 |
-
setup_latex_display()
|
292 |
|
293 |
# Récupération de la clé API
|
294 |
try:
|
@@ -306,31 +122,25 @@ def main():
|
|
306 |
return
|
307 |
|
308 |
# Interface utilisateur
|
309 |
-
|
310 |
-
"
|
311 |
-
|
312 |
-
help="
|
313 |
)
|
314 |
|
315 |
-
if
|
316 |
-
|
317 |
-
|
318 |
-
|
319 |
-
|
320 |
-
model_name = "gemini-2.0-flash-thinking-exp-01-21"
|
321 |
-
prompt = "Résous cet exercice mathématique. La réponse doit être bien présentée et espacée pour faciliter la lecture. Réponds en français et utilise la notation LaTeX pour toutes les expressions mathématiques."
|
322 |
-
|
323 |
|
324 |
-
|
325 |
-
|
326 |
-
|
327 |
-
|
328 |
-
|
329 |
-
|
330 |
-
|
331 |
-
except Exception as e:
|
332 |
-
logger.error(f"Erreur lors de l'analyse: {e}", exc_info=True)
|
333 |
-
st.error("Une erreur est survenue lors de l'analyse. Veuillez réessayer.")
|
334 |
|
335 |
if __name__ == "__main__":
|
336 |
main()
|
|
|
1 |
import streamlit as st
|
2 |
from google import genai
|
|
|
|
|
|
|
3 |
import logging
|
|
|
|
|
4 |
import sys
|
5 |
from pathlib import Path
|
6 |
+
from typing import Generator
|
7 |
|
8 |
# Configuration du logging
|
9 |
logging.basicConfig(
|
|
|
16 |
)
|
17 |
logger = logging.getLogger(__name__)
|
18 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
class GeminiClient:
|
20 |
"""Classe pour gérer les interactions avec l'API Gemini"""
|
21 |
def __init__(self, api_key: str):
|
|
|
33 |
logger.error(f"Erreur d'initialisation du client Gemini: {e}")
|
34 |
raise RuntimeError(f"Impossible d'initialiser le client Gemini: {e}")
|
35 |
|
36 |
+
def get_response(self, question: str, model_name: str) -> Generator:
|
37 |
+
"""Obtient une réponse de Gemini"""
|
38 |
if not self.client:
|
39 |
raise RuntimeError("Client Gemini non initialisé")
|
40 |
|
|
|
42 |
response = self.client.models.generate_content_stream(
|
43 |
model=model_name,
|
44 |
config={'thinking_config': {'include_thoughts': True}},
|
45 |
+
contents=[question]
|
|
|
|
|
|
|
46 |
)
|
47 |
return response
|
48 |
except Exception as e:
|
49 |
+
logger.error(f"Erreur lors de la génération de la réponse: {e}")
|
50 |
raise
|
51 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
def stream_response(container, response: Generator) -> None:
|
53 |
+
"""Gère le streaming de la réponse"""
|
|
|
54 |
thinking_placeholder = None
|
55 |
answer_placeholder = None
|
56 |
thinking_text = ""
|
57 |
answer_text = ""
|
58 |
+
mode = 'starting'
|
|
|
59 |
|
60 |
try:
|
61 |
for chunk in response:
|
62 |
+
if hasattr(chunk, 'candidates') and chunk.candidates:
|
63 |
+
content = chunk.candidates[0].content
|
|
|
|
|
|
|
64 |
|
65 |
+
if hasattr(content, 'parts'):
|
66 |
+
for part in parts:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
67 |
has_thought = hasattr(part, 'thought') and part.thought
|
68 |
+
text = getattr(part, 'text', '')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
69 |
|
70 |
+
if not text:
|
71 |
+
continue
|
72 |
+
|
73 |
+
if has_thought:
|
74 |
+
if mode != "thinking":
|
75 |
+
if thinking_placeholder is None:
|
76 |
+
with container.expander("Voir le raisonnement", expanded=False):
|
77 |
+
thinking_placeholder = st.empty()
|
78 |
+
mode = "thinking"
|
79 |
+
thinking_text += text
|
80 |
+
thinking_placeholder.markdown(thinking_text)
|
81 |
+
else:
|
82 |
+
if mode != "answering":
|
83 |
+
if answer_placeholder is None:
|
84 |
+
answer_placeholder = container.empty()
|
85 |
+
container.subheader("Réponse")
|
86 |
+
mode = "answering"
|
87 |
+
answer_text += text
|
88 |
+
answer_placeholder.markdown(answer_text)
|
89 |
+
|
90 |
except Exception as e:
|
91 |
+
logger.error(f"Erreur dans le streaming de la réponse: {e}")
|
92 |
if not answer_text and not thinking_text:
|
93 |
container.error("Une erreur est survenue lors de l'analyse. Veuillez réessayer.")
|
94 |
raise
|
|
|
96 |
if not answer_text and not thinking_text:
|
97 |
container.warning("Aucune réponse n'a pu être générée. Veuillez réessayer.")
|
98 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
99 |
def main():
|
100 |
st.set_page_config(
|
101 |
+
page_title="Assistant Gemini",
|
102 |
+
page_icon="💭",
|
103 |
layout="wide",
|
104 |
initial_sidebar_state="collapsed"
|
105 |
)
|
106 |
|
107 |
+
st.title("Assistant Gemini")
|
|
|
108 |
|
109 |
# Récupération de la clé API
|
110 |
try:
|
|
|
122 |
return
|
123 |
|
124 |
# Interface utilisateur
|
125 |
+
question = st.text_area(
|
126 |
+
"Posez votre question",
|
127 |
+
height=100,
|
128 |
+
help="Entrez votre question ici"
|
129 |
)
|
130 |
|
131 |
+
if question:
|
132 |
+
model_name = "gemini-2.0-flash-thinking-exp-01-21"
|
133 |
+
|
134 |
+
if st.button("Obtenir une réponse", type="primary"):
|
135 |
+
response_container = st.container()
|
|
|
|
|
|
|
136 |
|
137 |
+
with st.spinner("Génération de la réponse en cours..."):
|
138 |
+
try:
|
139 |
+
response = gemini_client.get_response(question, model_name)
|
140 |
+
stream_response(response_container, response)
|
141 |
+
except Exception as e:
|
142 |
+
logger.error(f"Erreur lors de la génération: {e}", exc_info=True)
|
143 |
+
st.error("Une erreur est survenue. Veuillez réessayer.")
|
|
|
|
|
|
|
144 |
|
145 |
if __name__ == "__main__":
|
146 |
main()
|