Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -2,18 +2,12 @@ import streamlit as st
|
|
2 |
import google.generativeai as genai
|
3 |
import os
|
4 |
from dotenv import load_dotenv
|
5 |
-
from PIL import Image
|
6 |
-
import tempfile
|
7 |
-
import time
|
8 |
-
import ssl
|
9 |
|
10 |
-
# Charger les variables d'environnement
|
11 |
load_dotenv()
|
12 |
-
|
13 |
-
# Configurer la clé API
|
14 |
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
|
15 |
|
16 |
-
|
17 |
safety_settings = [
|
18 |
{"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE"},
|
19 |
{"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE"},
|
@@ -21,192 +15,45 @@ safety_settings = [
|
|
21 |
{"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_NONE"},
|
22 |
]
|
23 |
|
24 |
-
def role_to_streamlit(role):
|
25 |
-
return "assistant" if role == "model" else role
|
26 |
|
27 |
-
|
28 |
-
max_retries = 3
|
29 |
-
retry_delay = 2
|
30 |
|
31 |
-
|
32 |
-
|
33 |
-
if not os.path.exists(file_path):
|
34 |
-
raise FileNotFoundError(f"Le fichier {file_path} n'existe pas")
|
35 |
|
36 |
-
file_size = os.path.getsize(file_path)
|
37 |
-
if file_size == 0:
|
38 |
-
raise ValueError(f"Le fichier {file_path} est vide")
|
39 |
|
40 |
-
uploaded_file = genai.upload_file(path=file_path)
|
41 |
|
42 |
-
|
43 |
-
|
44 |
|
45 |
-
while uploaded_file.state.name == "PROCESSING":
|
46 |
-
if time.time() - start_time > timeout:
|
47 |
-
raise TimeoutError("Timeout pendant le traitement du fichier")
|
48 |
-
time.sleep(10)
|
49 |
-
uploaded_file = genai.get_file(uploaded_file.name)
|
50 |
|
51 |
-
|
52 |
-
|
|
|
|
|
|
|
53 |
|
54 |
-
return uploaded_file
|
55 |
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
else:
|
60 |
-
raise
|
61 |
|
62 |
-
|
63 |
-
|
64 |
-
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
|
65 |
|
66 |
-
#
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
|
71 |
-
#
|
72 |
-
st.
|
73 |
-
|
|
|
74 |
|
75 |
-
#
|
76 |
-
|
77 |
-
st.session_state.chat = model.start_chat(history=[])
|
78 |
|
79 |
-
#
|
80 |
-
st.
|
81 |
-
|
82 |
-
/* Conteneur principal pour fixer les éléments en haut et en bas */
|
83 |
-
.main-container {
|
84 |
-
display: flex;
|
85 |
-
flex-direction: column;
|
86 |
-
height: 95vh; /* Ajustez selon vos besoins */
|
87 |
-
}
|
88 |
-
|
89 |
-
/* Conteneur pour l'historique du chat */
|
90 |
-
.chat-history {
|
91 |
-
flex-grow: 1;
|
92 |
-
overflow-y: auto;
|
93 |
-
display: flex;
|
94 |
-
flex-direction: column-reverse;
|
95 |
-
}
|
96 |
-
|
97 |
-
/* Conteneur pour la zone de saisie et l'upload */
|
98 |
-
.input-area {
|
99 |
-
display: flex;
|
100 |
-
align-items: center;
|
101 |
-
gap: 10px;
|
102 |
-
margin-top: 10px; /* Espace entre l'historique et la zone de saisie */
|
103 |
-
}
|
104 |
-
|
105 |
-
/* Style pour la zone de saisie */
|
106 |
-
.chat-input {
|
107 |
-
flex-grow: 1; /* Permet à la zone de saisie de prendre l'espace disponible */
|
108 |
-
}
|
109 |
-
|
110 |
-
/* Style pour l'icône */
|
111 |
-
.upload-icon {
|
112 |
-
font-size: 1.5em;
|
113 |
-
}
|
114 |
-
</style>
|
115 |
-
""", unsafe_allow_html=True)
|
116 |
-
|
117 |
-
# Conteneur principal
|
118 |
-
main_container = st.container()
|
119 |
-
|
120 |
-
with main_container:
|
121 |
-
# Conteneur pour l'historique du chat (maintenant inversé avec column-reverse)
|
122 |
-
chat_history_container = st.container()
|
123 |
-
with chat_history_container:
|
124 |
-
# Afficher l'historique des messages
|
125 |
-
for message in st.session_state.chat.history:
|
126 |
-
with st.chat_message(role_to_streamlit(message.role)):
|
127 |
-
st.markdown(message.parts[0].text)
|
128 |
-
if len(message.parts) > 1:
|
129 |
-
for part in message.parts[1:]:
|
130 |
-
if hasattr(part, 'image'):
|
131 |
-
st.image(part.image)
|
132 |
-
# Conteneur pour la zone de saisie et l'upload (en bas)
|
133 |
-
input_area = st.container()
|
134 |
-
with input_area:
|
135 |
-
# Colonnes pour l'icône et l'uploader
|
136 |
-
ucol1, ucol2 = st.columns([1, 5])
|
137 |
-
|
138 |
-
with ucol1:
|
139 |
-
# Icône d'upload de fichiers
|
140 |
-
st.markdown("<span class='upload-icon'>📁</span>", unsafe_allow_html=True)
|
141 |
-
|
142 |
-
with ucol2:
|
143 |
-
# Upload de fichiers (tous types)
|
144 |
-
uploaded_file = st.file_uploader("", type=["txt", "mp4", "mp3", "pdf", "png", "jpg", "jpeg", "gif"],
|
145 |
-
accept_multiple_files=True, key="files",
|
146 |
-
label_visibility="collapsed")
|
147 |
-
|
148 |
-
# Zone de saisie (placée après l'uploader pour qu'elle soit en bas)
|
149 |
-
prompt = st.chat_input("Que puis-je faire pour vous ?", key="chat_input")
|
150 |
-
|
151 |
-
# Appliquer les styles aux éléments (simplifié)
|
152 |
-
st.markdown(f"""
|
153 |
-
<style>
|
154 |
-
div[data-testid='stChatInput'] {{
|
155 |
-
flex-grow: 1;
|
156 |
-
}}
|
157 |
-
div[data-testid='stFileUploader'] {{
|
158 |
-
display: inline-flex;
|
159 |
-
padding-left: 0;
|
160 |
-
padding-right: 0;
|
161 |
-
padding-bottom: 0;
|
162 |
-
padding-top: 0;
|
163 |
-
margin-left: 0;
|
164 |
-
margin-right: 0;
|
165 |
-
}}
|
166 |
-
div[data-testid='stFileUploader'] > div:nth-child(2) {{
|
167 |
-
display: none;
|
168 |
-
}}
|
169 |
-
</style>
|
170 |
-
""", unsafe_allow_html=True)
|
171 |
-
|
172 |
-
if prompt:
|
173 |
-
content = [prompt]
|
174 |
-
temp_files = []
|
175 |
-
|
176 |
-
try:
|
177 |
-
# Traitement des fichiers uploadés
|
178 |
-
if uploaded_file:
|
179 |
-
for file in uploaded_file:
|
180 |
-
if allowed_file(file.name):
|
181 |
-
file_extension = os.path.splitext(file.name)[1].lower()
|
182 |
-
if file_extension in ['.jpg', '.jpeg', '.png', '.gif']:
|
183 |
-
# Si c'est une image, la traiter comme une image
|
184 |
-
image = Image.open(file)
|
185 |
-
content.append(image)
|
186 |
-
st.chat_message("user").image(image)
|
187 |
-
else:
|
188 |
-
# Sinon, la traiter comme un fichier
|
189 |
-
with tempfile.NamedTemporaryFile(delete=False, suffix=file_extension) as temp_file:
|
190 |
-
temp_file.write(file.getvalue())
|
191 |
-
temp_files.append(temp_file.name)
|
192 |
-
uploaded_file = upload_and_process_file(temp_file.name)
|
193 |
-
content.append(uploaded_file)
|
194 |
-
|
195 |
-
# Afficher le message utilisateur
|
196 |
-
st.chat_message("user").markdown(prompt)
|
197 |
-
|
198 |
-
# Envoyer le message et afficher la réponse
|
199 |
-
response = st.session_state.chat.send_message(content)
|
200 |
-
with st.chat_message("assistant"):
|
201 |
-
st.markdown(response.text)
|
202 |
-
|
203 |
-
except Exception as e:
|
204 |
-
st.error(f"Une erreur est survenue : {str(e)}")
|
205 |
-
|
206 |
-
finally:
|
207 |
-
# Nettoyage des fichiers temporaires
|
208 |
-
for temp_file in temp_files:
|
209 |
-
try:
|
210 |
-
os.unlink(temp_file)
|
211 |
-
except Exception as e:
|
212 |
-
print(f"Erreur lors de la suppression du fichier temporaire {temp_file}: {e}")
|
|
|
2 |
import google.generativeai as genai
|
3 |
import os
|
4 |
from dotenv import load_dotenv
|
|
|
|
|
|
|
|
|
5 |
|
|
|
6 |
load_dotenv()
|
7 |
+
# Configure the API key
|
|
|
8 |
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
|
9 |
|
10 |
+
|
11 |
safety_settings = [
|
12 |
{"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE"},
|
13 |
{"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE"},
|
|
|
15 |
{"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_NONE"},
|
16 |
]
|
17 |
|
|
|
|
|
18 |
|
19 |
+
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
|
|
|
|
|
20 |
|
21 |
+
model = genai.GenerativeModel('gemini-1.5-flash',safety_settings=safety_settings,
|
22 |
+
system_instruction="Tu es un assistant intelligent. ton but est d'assister au mieux que tu peux. tu as été créé par Aenir et tu t'appelles Mariam")
|
|
|
|
|
23 |
|
|
|
|
|
|
|
24 |
|
|
|
25 |
|
26 |
+
# Function to get response from the model
|
27 |
+
# Gemini uses 'model' for assistant; Streamlit uses 'assistant'
|
28 |
|
|
|
|
|
|
|
|
|
|
|
29 |
|
30 |
+
def role_to_streamlit(role):
|
31 |
+
if role == "model":
|
32 |
+
return "assistant"
|
33 |
+
else:
|
34 |
+
return role
|
35 |
|
|
|
36 |
|
37 |
+
# Add a Gemini Chat history object to Streamlit session state
|
38 |
+
if "chat" not in st.session_state:
|
39 |
+
st.session_state.chat = model.start_chat(history=[])
|
|
|
|
|
40 |
|
41 |
+
# Display Form Title
|
42 |
+
st.title("Mariam AI!")
|
|
|
43 |
|
44 |
+
# Display chat messages from history above current input box
|
45 |
+
for message in st.session_state.chat.history:
|
46 |
+
with st.chat_message(role_to_streamlit(message.role)):
|
47 |
+
st.markdown(message.parts[0].text)
|
48 |
|
49 |
+
# Accept user's next message, add to context, resubmit context to Gemini
|
50 |
+
if prompt := st.chat_input("Hey?"):
|
51 |
+
# Display user's last message
|
52 |
+
st.chat_message("user").markdown(prompt)
|
53 |
|
54 |
+
# Send user entry to Gemini and read the response
|
55 |
+
response = st.session_state.chat.send_message(prompt)
|
|
|
56 |
|
57 |
+
# Display last
|
58 |
+
with st.chat_message("assistant"):
|
59 |
+
st.markdown(response.text)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|