File size: 5,830 Bytes
d5a6a33
 
 
 
ae34e36
 
392d67d
 
d5a6a33
 
9b159d4
1db4ff7
188c1ca
2ca54a6
331c814
 
 
 
 
 
 
392d67d
 
ae34e36
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
392d67d
ae34e36
 
 
 
 
 
392d67d
ae34e36
 
 
 
b16530f
1db4ff7
 
 
 
 
ee96e70
392d67d
1db4ff7
 
ae34e36
 
436aefa
1db4ff7
 
436aefa
ae34e36
 
 
 
 
586d97d
ae34e36
5ae354b
ae34e36
1db4ff7
 
 
d5a6a33
586d97d
 
 
 
 
 
 
 
 
 
 
392d67d
 
 
 
 
 
 
 
 
 
 
 
 
 
ae34e36
1db4ff7
586d97d
 
 
 
ae34e36
1db4ff7
392d67d
586d97d
ae34e36
 
 
 
 
 
 
 
 
392d67d
586d97d
392d67d
 
 
 
 
 
 
 
 
 
b16530f
586d97d
 
b16530f
ae34e36
586d97d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
import streamlit as st
import google.generativeai as genai
import os
from dotenv import load_dotenv
import http.client
import json
import asyncio
from typing import AsyncGenerator

load_dotenv()

# Configure the API key
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))

safety_settings = [
    {"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE"},
    {"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE"},
    {"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_NONE"},
    {"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_NONE"},
]

model = genai.GenerativeModel('gemini-2.0-flash-exp', 
                            tools='code_execution',
                            safety_settings=safety_settings,
                            system_instruction="Tu es un assistant intelligent. ton but est d'assister au mieux que tu peux. tu as été créé par Aenir et tu t'appelles Mariam")

def perform_web_search(query):
    conn = http.client.HTTPSConnection("google.serper.dev")
    payload = json.dumps({"q": query})
    headers = {
        'X-API-KEY': '9b90a274d9e704ff5b21c0367f9ae1161779b573',
        'Content-Type': 'application/json'
    }
    try:
        conn.request("POST", "/search", payload, headers)
        res = conn.getresponse()
        data = json.loads(res.read().decode("utf-8"))
        return data
    except Exception as e:
        st.error(f"Erreur lors de la recherche web : {e}")
        return None
    finally:
        conn.close()

def format_search_results(data):
    if not data:
        return "Aucun résultat trouvé"
    
    result = ""
    
    if 'knowledgeGraph' in data:
        kg = data['knowledgeGraph']
        result += f"### {kg.get('title', '')}\n"
        result += f"*{kg.get('type', '')}*\n\n"
        result += f"{kg.get('description', '')}\n\n"
    
    if 'organic' in data:
        result += "### Résultats principaux:\n"
        for item in data['organic'][:3]:
            result += f"- **{item['title']}**\n"
            result += f"  {item['snippet']}\n"
            result += f"  [Lien]({item['link']})\n\n"
    
    if 'peopleAlsoAsk' in data:
        result += "### Questions fréquentes:\n"
        for item in data['peopleAlsoAsk'][:2]:
            result += f"- **{item['question']}**\n"
            result += f"  {item['snippet']}\n\n"
    
    return result

def role_to_streamlit(role):
    if role == "model":
        return "assistant"
    else:
        return role

# Initialize session state
if "chat" not in st.session_state:
    st.session_state.chat = model.start_chat(history=[])
if "web_search" not in st.session_state:
    st.session_state.web_search = False

# Display Form Title
st.title("Mariam AI!")

# Settings section
with st.sidebar:
    st.title("Paramètres")
    st.session_state.web_search = st.toggle("Activer la recherche web", value=st.session_state.web_search)

# File upload section
uploaded_file = st.file_uploader("Télécharger un fichier (image/document)", type=['jpg', 'mp4', 'mp3', 'jpeg', 'png', 'pdf', 'txt'])

# Display chat messages
for message in st.session_state.chat.history:
    with st.chat_message(role_to_streamlit(message.role)):
        st.markdown(message.parts[0].text)

def process_uploaded_file(file):
    if file is not None:
        with open(os.path.join("temp", file.name), "wb") as f:
            f.write(file.getbuffer())
        try:
            gemini_file = genai.upload_file(os.path.join("temp", file.name))
            return gemini_file
        except Exception as e:
            st.error(f"Erreur lors du téléchargement du fichier : {e}")
            return None

async def stream_response(prompt: str, uploaded_gemini_file=None) -> AsyncGenerator[str, None]:
    try:
        if uploaded_gemini_file:
            response = await st.session_state.chat.send_message_async([uploaded_gemini_file, "\n\n", prompt], stream=True)
        else:
            response = await st.session_state.chat.send_message_async(prompt, stream=True)
        
        async for chunk in response:
            if chunk.text:
                yield chunk.text
    except Exception as e:
        st.error(f"Erreur lors du streaming : {e}")
        yield "Désolé, une erreur s'est produite lors de la génération de la réponse."

# Chat input and processing
if prompt := st.chat_input("Hey?"):
    uploaded_gemini_file = None
    if uploaded_file:
        uploaded_gemini_file = process_uploaded_file(uploaded_file)
    
    # Display user message
    st.chat_message("user").markdown(prompt)
    
    try:
        # Perform web search if enabled
        web_results = None
        if st.session_state.web_search:
            with st.spinner("Recherche web en cours..."):
                web_results = perform_web_search(prompt)
                if web_results:
                    formatted_results = format_search_results(web_results)
                    prompt = f"""Question: {prompt}\n\nRésultats de recherche web:\n{formatted_results}\n\nPourrais-tu analyser ces informations et me donner une réponse complète?"""
        
        # Create a placeholder for the streaming response
        with st.chat_message("assistant"):
            response_placeholder = st.empty()
            full_response = ""
            
            # Stream the response
            for response_chunk in asyncio.run(stream_response(prompt, uploaded_gemini_file)):
                full_response += response_chunk
                response_placeholder.markdown(full_response + "▌")
            
            # Update the placeholder with the complete response
            response_placeholder.markdown(full_response)
    
    except Exception as e:
        st.error(f"Erreur lors de l'envoi du message : {e}")

# Create temp directory
os.makedirs("temp", exist_ok=True)