import streamlit as st import google.generativeai as genai import os from dotenv import load_dotenv import http.client import json import asyncio from typing import AsyncGenerator load_dotenv() # Configure the API key genai.configure(api_key=os.getenv("GOOGLE_API_KEY")) safety_settings = [ {"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE"}, {"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE"}, {"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_NONE"}, {"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_NONE"}, ] model = genai.GenerativeModel('gemini-2.0-flash-exp', tools='code_execution', safety_settings=safety_settings, system_instruction="Tu es un assistant intelligent. ton but est d'assister au mieux que tu peux. tu as été créé par Aenir et tu t'appelles Mariam") def perform_web_search(query): conn = http.client.HTTPSConnection("google.serper.dev") payload = json.dumps({"q": query}) headers = { 'X-API-KEY': '9b90a274d9e704ff5b21c0367f9ae1161779b573', 'Content-Type': 'application/json' } try: conn.request("POST", "/search", payload, headers) res = conn.getresponse() data = json.loads(res.read().decode("utf-8")) return data except Exception as e: st.error(f"Erreur lors de la recherche web : {e}") return None finally: conn.close() def format_search_results(data): if not data: return "Aucun résultat trouvé" result = "" if 'knowledgeGraph' in data: kg = data['knowledgeGraph'] result += f"### {kg.get('title', '')}\n" result += f"*{kg.get('type', '')}*\n\n" result += f"{kg.get('description', '')}\n\n" if 'organic' in data: result += "### Résultats principaux:\n" for item in data['organic'][:3]: result += f"- **{item['title']}**\n" result += f" {item['snippet']}\n" result += f" [Lien]({item['link']})\n\n" if 'peopleAlsoAsk' in data: result += "### Questions fréquentes:\n" for item in data['peopleAlsoAsk'][:2]: result += f"- **{item['question']}**\n" result += f" {item['snippet']}\n\n" return result def role_to_streamlit(role): if role == "model": return "assistant" else: return role # Initialize session state if "chat" not in st.session_state: st.session_state.chat = model.start_chat(history=[]) if "web_search" not in st.session_state: st.session_state.web_search = False # Display Form Title st.title("Mariam AI!") # Settings section with st.sidebar: st.title("Paramètres") st.session_state.web_search = st.toggle("Activer la recherche web", value=st.session_state.web_search) # File upload section uploaded_file = st.file_uploader("Télécharger un fichier (image/document)", type=['jpg', 'mp4', 'mp3', 'jpeg', 'png', 'pdf', 'txt']) # Display chat messages for message in st.session_state.chat.history: with st.chat_message(role_to_streamlit(message.role)): st.markdown(message.parts[0].text) def process_uploaded_file(file): if file is not None: with open(os.path.join("temp", file.name), "wb") as f: f.write(file.getbuffer()) try: gemini_file = genai.upload_file(os.path.join("temp", file.name)) return gemini_file except Exception as e: st.error(f"Erreur lors du téléchargement du fichier : {e}") return None async def stream_response(prompt: str, uploaded_gemini_file=None) -> AsyncGenerator[str, None]: try: if uploaded_gemini_file: response = await st.session_state.chat.send_message_async([uploaded_gemini_file, "\n\n", prompt], stream=True) else: response = await st.session_state.chat.send_message_async(prompt, stream=True) async for chunk in response: if chunk.text: yield chunk.text except Exception as e: st.error(f"Erreur lors du streaming : {e}") yield "Désolé, une erreur s'est produite lors de la génération de la réponse." # Chat input and processing if prompt := st.chat_input("Hey?"): uploaded_gemini_file = None if uploaded_file: uploaded_gemini_file = process_uploaded_file(uploaded_file) # Display user message st.chat_message("user").markdown(prompt) try: # Perform web search if enabled web_results = None if st.session_state.web_search: with st.spinner("Recherche web en cours..."): web_results = perform_web_search(prompt) if web_results: formatted_results = format_search_results(web_results) prompt = f"""Question: {prompt}\n\nRésultats de recherche web:\n{formatted_results}\n\nPourrais-tu analyser ces informations et me donner une réponse complète?""" # Create a placeholder for the streaming response with st.chat_message("assistant"): response_placeholder = st.empty() full_response = "" # Stream the response for response_chunk in asyncio.run(stream_response(prompt, uploaded_gemini_file)): full_response += response_chunk response_placeholder.markdown(full_response + "▌") # Update the placeholder with the complete response response_placeholder.markdown(full_response) except Exception as e: st.error(f"Erreur lors de l'envoi du message : {e}") # Create temp directory os.makedirs("temp", exist_ok=True)