Spaces:
Sleeping
Sleeping
File size: 6,058 Bytes
d5a6a33 188c1ca 436aefa d5a6a33 188c1ca d5a6a33 188c1ca 2ca54a6 188c1ca 331c814 12bed17 436aefa 1f93fbc 436aefa 188c1ca 436aefa d5a6a33 188c1ca 2ca54a6 d3aa32d 12bed17 d3aa32d 12bed17 e94a8e0 12bed17 d5a6a33 d3aa32d e94a8e0 188c1ca d5a6a33 e94a8e0 2ca54a6 436aefa 188c1ca e94a8e0 12bed17 e94a8e0 d3aa32d 12bed17 436aefa d3aa32d 436aefa d3aa32d 436aefa 188c1ca 436aefa 2ca54a6 436aefa |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 |
import streamlit as st
import google.generativeai as genai
import os
from dotenv import load_dotenv
from PIL import Image
import tempfile
import time
import ssl
# Charger les variables d'environnement
load_dotenv()
# Configurer la clé API
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
# Paramètres de sécurité
safety_settings = [
{"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE"},
{"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE"},
{"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_NONE"},
{"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_NONE"},
]
def role_to_streamlit(role):
return "assistant" if role == "model" else role
def upload_and_process_file(file_path):
max_retries = 3
retry_delay = 2
for attempt in range(max_retries):
try:
if not os.path.exists(file_path):
raise FileNotFoundError(f"Le fichier {file_path} n'existe pas")
file_size = os.path.getsize(file_path)
if file_size == 0:
raise ValueError(f"Le fichier {file_path} est vide")
uploaded_file = genai.upload_file(path=file_path)
timeout = 300
start_time = time.time()
while uploaded_file.state.name == "PROCESSING":
if time.time() - start_time > timeout:
raise TimeoutError("Timeout pendant le traitement du fichier")
time.sleep(10)
uploaded_file = genai.get_file(uploaded_file.name)
if uploaded_file.state.name == "FAILED":
raise ValueError(f"Échec du traitement: {uploaded_file.state.name}")
return uploaded_file
except Exception as e:
if attempt < max_retries - 1:
time.sleep(retry_delay * (attempt + 1))
else:
raise
def allowed_file(filename):
ALLOWED_EXTENSIONS = {'txt','mp4','mp3','pdf', 'png', 'jpg', 'jpeg', 'gif'}
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
# Initialiser le modèle
model = genai.GenerativeModel('gemini-1.5-flash',
safety_settings=safety_settings,
system_instruction="Tu es un assistant intelligent. ton but est d'assister au mieux que tu peux. tu as été créé par Aenir et tu t'appelles Mariam")
# Configuration de la page Streamlit
st.set_page_config(page_title="Mariam - Assistant IA", page_icon="🤖")
st.title("Mariam AI - Chat Intelligent")
# CSS personnalisé
st.markdown("""
<style>
.stFileUploader {
margin-bottom: 10px;
}
.upload-container {
display: flex;
align-items: center;
gap: 10px;
}
.fixed-input {
position: fixed;
bottom: 0;
left: 0;
right: 0;
z-index: 1000;
background-color: white;
padding: 10px;
box-shadow: 0 -2px 5px rgba(0,0,0,0.1);
}
.main-content {
margin-bottom: 80px; /* Pour laisser de l'espace pour l'input fixe */
}
</style>
""", unsafe_allow_html=True)
# Initialiser l'historique de chat
if "chat" not in st.session_state:
st.session_state.chat = model.start_chat(history=[])
# Conteneur principal avec marge en bas
main_container = st.container()
main_container.markdown('<div class="main-content">', unsafe_allow_html=True)
# Zone d'upload en haut
upload_container = st.container()
with upload_container:
uploaded_files = st.file_uploader("📁",
type=["txt","mp4","mp3","pdf", "jpg", "jpeg", "png", "gif"],
accept_multiple_files=True)
# Afficher l'historique des messages
for message in st.session_state.chat.history:
with main_container.chat_message(role_to_streamlit(message.role)):
st.markdown(message.parts[0].text)
if len(message.parts) > 1:
for part in message.parts[1:]:
if hasattr(part, 'image'):
st.image(part.image)
# Fermer le div de marge
main_container.markdown('</div>', unsafe_allow_html=True)
# Input fixé en bas
st.markdown('<div class="fixed-input">', unsafe_allow_html=True)
prompt = st.chat_input("Que puis-je faire pour vous ?")
st.markdown('</div>', unsafe_allow_html=True)
if prompt:
content = [prompt]
temp_files = []
try:
# Traitement des fichiers
if uploaded_files:
for file in uploaded_files:
if allowed_file(file.name):
# Si c'est une image
if file.type.startswith('image/'):
image = Image.open(file)
content.append(image)
st.chat_message("user").image(image)
else:
# Pour les autres types de fichiers
with tempfile.NamedTemporaryFile(delete=False, suffix=os.path.splitext(file.name)[1]) as temp_file:
temp_file.write(file.getvalue())
temp_files.append(temp_file.name)
uploaded_file = upload_and_process_file(temp_file.name)
content.append(uploaded_file)
# Afficher le message utilisateur
st.chat_message("user").markdown(prompt)
# Envoyer le message et afficher la réponse
response = st.session_state.chat.send_message(content)
with st.chat_message("assistant"):
st.markdown(response.text)
except Exception as e:
st.error(f"Une erreur est survenue : {str(e)}")
finally:
# Nettoyage des fichiers temporaires
for temp_file in temp_files:
try:
os.unlink(temp_file)
except Exception as e:
print(f"Erreur lors de la suppression du fichier temporaire {temp_file}: {e}") |