File size: 3,496 Bytes
d5a6a33
 
 
 
b16530f
 
d5a6a33
 
1db4ff7
188c1ca
2ca54a6
331c814
 
 
 
 
 
 
b16530f
 
 
 
 
 
ee96e70
1db4ff7
 
b16530f
1db4ff7
 
 
 
 
b16530f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ee96e70
1db4ff7
 
 
436aefa
1db4ff7
 
436aefa
5ae354b
 
 
1db4ff7
 
 
 
d5a6a33
1db4ff7
 
 
 
2ca54a6
b16530f
 
 
 
 
 
 
 
 
 
 
 
929689d
b16530f
 
 
 
 
 
57456fd
b16530f
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
import streamlit as st
import google.generativeai as genai
import os
from dotenv import load_dotenv
from mimetypes import guess_type as guessmime
from io import BytesIO

load_dotenv()
# Configure the API key
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))

safety_settings = [
    {"category": "HARM_CATEGORY_HARASSMENT", "threshold": "BLOCK_NONE"},
    {"category": "HARM_CATEGORY_HATE_SPEECH", "threshold": "BLOCK_NONE"},
    {"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT", "threshold": "BLOCK_NONE"},
    {"category": "HARM_CATEGORY_DANGEROUS_CONTENT", "threshold": "BLOCK_NONE"},
]

genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))

model = genai.GenerativeModel('gemini-1.5-flash',safety_settings=safety_settings,
            system_instruction="Tu es un assistant intelligent. ton but est d'assister au mieux que tu peux. tu as été créé par Aenir et tu t'appelles Mariam")



# Function to get response from the model
# Gemini uses 'model' for assistant; Streamlit uses 'assistant'

def role_to_streamlit(role):
    if role == "model":
        return "assistant"
    else:
        return role
    
def create_media_part(data, mimetype,filename=None):
        """Creates a media part for the GenerativeModel.

        Args:
            data: The image data as bytes.
            mimetype: The mimetype of the image.
            filename: optional filename for the image part

        Returns:
            A Part object representing the image.
        """
        # The API expects raw bytes so avoid having it base64 encode the input
        blob = genai.types.Blob(mimetype, genai.types.bytes_to_data(data, mime_type=mimetype))
        return genai.types.Part(filename=filename, inline_data=blob)

# Add a Gemini Chat history object to Streamlit session state
if "chat" not in st.session_state:
    st.session_state.chat = model.start_chat(history=[])

# Display Form Title
st.title("Mariam AI!")

# Handle file uploads outside the input loop
uploaded_file = st.file_uploader("Choose a file", type=["png", "jpg", "jpeg", "mp3", "wav", "mp4", "avi"])

# Display chat messages from history above current input box
for message in st.session_state.chat.history:
    with st.chat_message(role_to_streamlit(message.role)):
        st.markdown(message.parts[0].text)

# Accept user's next message, add to context, resubmit context to Gemini
if prompt := st.chat_input("Hey?"):
    # Display user's last message
    st.chat_message("user").markdown(prompt)

    if uploaded_file is not None:
        # Display the uploaded file
        if uploaded_file.type.startswith('image'):
            st.image(uploaded_file, caption="Uploaded Image.", use_column_width=True)
        elif uploaded_file.type.startswith('audio'):
            st.audio(uploaded_file, format=uploaded_file.type)
        elif uploaded_file.type.startswith('video'):
            st.video(uploaded_file, format=uploaded_file.type)
    
        file_bytes = uploaded_file.getvalue()
        mime_type = guessmime(uploaded_file.name)[0]
        media_part = create_media_part(file_bytes, mime_type,filename=uploaded_file.name)
        
        # Combine prompt and uploaded file for Gemini
        parts = [media_part, "\n\n", prompt]
        response = st.session_state.chat.send_message(parts)

    else:

        # Send user entry to Gemini and read the response
        response = st.session_state.chat.send_message(prompt)

    # Display last
    with st.chat_message("assistant"):
        st.markdown(response.text)