Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -2,8 +2,6 @@ import streamlit as st
|
|
2 |
import google.generativeai as genai
|
3 |
import os
|
4 |
from dotenv import load_dotenv
|
5 |
-
from mimetypes import guess_type as guessmime
|
6 |
-
from io import BytesIO
|
7 |
|
8 |
load_dotenv()
|
9 |
# Configure the API key
|
@@ -16,36 +14,15 @@ safety_settings = [
|
|
16 |
{"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_NONE"},
|
17 |
]
|
18 |
|
19 |
-
genai.
|
20 |
-
|
21 |
-
|
22 |
-
system_instruction="Tu es un assistant intelligent. ton but est d'assister au mieux que tu peux. tu as été créé par Aenir et tu t'appelles Mariam")
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
# Function to get response from the model
|
27 |
-
# Gemini uses 'model' for assistant; Streamlit uses 'assistant'
|
28 |
|
29 |
def role_to_streamlit(role):
|
30 |
if role == "model":
|
31 |
return "assistant"
|
32 |
else:
|
33 |
return role
|
34 |
-
|
35 |
-
def create_media_part(data, mimetype,filename=None):
|
36 |
-
"""Creates a media part for the GenerativeModel.
|
37 |
-
|
38 |
-
Args:
|
39 |
-
data: The image data as bytes.
|
40 |
-
mimetype: The mimetype of the image.
|
41 |
-
filename: optional filename for the image part
|
42 |
-
|
43 |
-
Returns:
|
44 |
-
A Part object representing the image.
|
45 |
-
"""
|
46 |
-
# The API expects raw bytes so avoid having it base64 encode the input
|
47 |
-
blob = genai.types.Blob(mimetype, genai.types.bytes_to_data(data, mime_type=mimetype))
|
48 |
-
return genai.types.Part(filename=filename, inline_data=blob)
|
49 |
|
50 |
# Add a Gemini Chat history object to Streamlit session state
|
51 |
if "chat" not in st.session_state:
|
@@ -54,41 +31,54 @@ if "chat" not in st.session_state:
|
|
54 |
# Display Form Title
|
55 |
st.title("Mariam AI!")
|
56 |
|
57 |
-
#
|
58 |
-
uploaded_file = st.file_uploader("
|
59 |
|
60 |
# Display chat messages from history above current input box
|
61 |
for message in st.session_state.chat.history:
|
62 |
with st.chat_message(role_to_streamlit(message.role)):
|
63 |
st.markdown(message.parts[0].text)
|
64 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
65 |
# Accept user's next message, add to context, resubmit context to Gemini
|
66 |
if prompt := st.chat_input("Hey?"):
|
|
|
|
|
|
|
|
|
|
|
67 |
# Display user's last message
|
68 |
st.chat_message("user").markdown(prompt)
|
69 |
|
70 |
-
|
71 |
-
|
72 |
-
if
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
st.
|
|
|
|
|
|
|
|
|
78 |
|
79 |
-
|
80 |
-
|
81 |
-
media_part = create_media_part(file_bytes, mime_type,filename=uploaded_file.name)
|
82 |
-
|
83 |
-
# Combine prompt and uploaded file for Gemini
|
84 |
-
parts = [media_part, "\n\n", prompt]
|
85 |
-
response = st.session_state.chat.send_message(parts)
|
86 |
-
|
87 |
-
else:
|
88 |
-
|
89 |
-
# Send user entry to Gemini and read the response
|
90 |
-
response = st.session_state.chat.send_message(prompt)
|
91 |
|
92 |
-
|
93 |
-
|
94 |
-
st.markdown(response.text)
|
|
|
2 |
import google.generativeai as genai
|
3 |
import os
|
4 |
from dotenv import load_dotenv
|
|
|
|
|
5 |
|
6 |
load_dotenv()
|
7 |
# Configure the API key
|
|
|
14 |
{"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_NONE"},
|
15 |
]
|
16 |
|
17 |
+
model = genai.GenerativeModel('gemini-1.5-flash',
|
18 |
+
safety_settings=safety_settings,
|
19 |
+
system_instruction="Tu es un assistant intelligent. ton but est d'assister au mieux que tu peux. tu as été créé par Aenir et tu t'appelles Mariam")
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
|
21 |
def role_to_streamlit(role):
|
22 |
if role == "model":
|
23 |
return "assistant"
|
24 |
else:
|
25 |
return role
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
|
27 |
# Add a Gemini Chat history object to Streamlit session state
|
28 |
if "chat" not in st.session_state:
|
|
|
31 |
# Display Form Title
|
32 |
st.title("Mariam AI!")
|
33 |
|
34 |
+
# File upload section
|
35 |
+
uploaded_file = st.file_uploader("Télécharger un fichier (image/document)", type=['jpg', 'jpeg', 'png', 'pdf', 'txt'])
|
36 |
|
37 |
# Display chat messages from history above current input box
|
38 |
for message in st.session_state.chat.history:
|
39 |
with st.chat_message(role_to_streamlit(message.role)):
|
40 |
st.markdown(message.parts[0].text)
|
41 |
|
42 |
+
# Function to handle file upload with Gemini
|
43 |
+
def process_uploaded_file(file):
|
44 |
+
if file is not None:
|
45 |
+
# Save the uploaded file temporarily
|
46 |
+
with open(os.path.join("temp", file.name), "wb") as f:
|
47 |
+
f.write(file.getbuffer())
|
48 |
+
|
49 |
+
# Upload the file to Gemini
|
50 |
+
try:
|
51 |
+
gemini_file = genai.upload_file(os.path.join("temp", file.name))
|
52 |
+
return gemini_file
|
53 |
+
except Exception as e:
|
54 |
+
st.error(f"Erreur lors du téléchargement du fichier : {e}")
|
55 |
+
return None
|
56 |
+
|
57 |
# Accept user's next message, add to context, resubmit context to Gemini
|
58 |
if prompt := st.chat_input("Hey?"):
|
59 |
+
# Process any uploaded file
|
60 |
+
uploaded_gemini_file = None
|
61 |
+
if uploaded_file:
|
62 |
+
uploaded_gemini_file = process_uploaded_file(uploaded_file)
|
63 |
+
|
64 |
# Display user's last message
|
65 |
st.chat_message("user").markdown(prompt)
|
66 |
|
67 |
+
# Send user entry to Gemini with optional file
|
68 |
+
try:
|
69 |
+
if uploaded_gemini_file:
|
70 |
+
# If a file is uploaded, include it in the context
|
71 |
+
response = st.session_state.chat.send_message([uploaded_gemini_file, "\n\n", prompt])
|
72 |
+
else:
|
73 |
+
# Normal text-only conversation
|
74 |
+
response = st.session_state.chat.send_message(prompt)
|
75 |
+
|
76 |
+
# Display last response
|
77 |
+
with st.chat_message("assistant"):
|
78 |
+
st.markdown(response.text)
|
79 |
|
80 |
+
except Exception as e:
|
81 |
+
st.error(f"Erreur lors de l'envoi du message : {e}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
82 |
|
83 |
+
# Create temp directory if it doesn't exist
|
84 |
+
os.makedirs("temp", exist_ok=True)
|
|