|
import streamlit as st |
|
from google import genai |
|
from google.genai import types |
|
from PIL import Image |
|
import io |
|
import base64 |
|
import time |
|
import json |
|
|
|
|
|
GOOGLE_API_KEY = "YOUR_API_KEY" |
|
genai.configure(api_key=GOOGLE_API_KEY) |
|
client = genai.GenerativeModel('gemini-pro-vision') |
|
|
|
|
|
def solve_math_problem(image_data): |
|
img = Image.open(io.BytesIO(image_data)) |
|
|
|
|
|
buffered = io.BytesIO() |
|
img.save(buffered, format="PNG") |
|
img_str = base64.b64encode(buffered.getvalue()).decode() |
|
|
|
|
|
contents = [ |
|
{'parts': [{'mime_type': 'image/png', 'data': img_str}]}, |
|
{'parts': [{'text': "Résous ce problème?"}]}, |
|
] |
|
|
|
|
|
config = { |
|
'thinking_config': {'include_thoughts': True} |
|
} |
|
|
|
response_stream = client.generate_content( |
|
contents=contents, |
|
model="gemini-2.0-flash-thinking-exp-01-21", |
|
stream=True, |
|
generation_config=config |
|
) |
|
|
|
for chunk in response_stream: |
|
for part in chunk.parts: |
|
if part.text: |
|
yield part.text |
|
|
|
|
|
st.set_page_config(page_title="Mariam M-0", page_icon="🧮", layout="centered") |
|
|
|
st.title("Mariam M-0") |
|
st.subheader("Solution Mathématique Intelligente") |
|
|
|
uploaded_file = st.file_uploader("Déposez votre image ici", type=["jpg", "jpeg", "png"]) |
|
|
|
if uploaded_file is not None: |
|
image_data = uploaded_file.getvalue() |
|
st.image(image_data, caption="Image téléchargée.", use_column_width=True) |
|
|
|
if st.button("Résoudre le problème"): |
|
with st.spinner("Analyse en cours..."): |
|
|
|
thoughts_container = st.empty() |
|
answer_container = st.empty() |
|
full_thoughts = "" |
|
full_answer = "" |
|
|
|
for response_text in solve_math_problem(image_data): |
|
try: |
|
response_json = json.loads(response_text) |
|
|
|
if 'thoughts' in response_json: |
|
thoughts_content = response_json['thoughts'] |
|
full_thoughts += thoughts_content + " \n" |
|
thoughts_container.markdown(f"**Processus de Réflexion:**\n\n{full_thoughts}") |
|
elif 'answer' in response_json: |
|
answer_content = response_json['answer'] |
|
full_answer += answer_content + " \n" |
|
answer_container.markdown(f"**Solution:**\n\n{full_answer}") |
|
except json.JSONDecodeError: |
|
print(f"Could not parse as JSON: {response_text}") |
|
continue |
|
|
|
|
|
st.success("Problème résolu!") |