File size: 2,244 Bytes
dd6e891
a1445fb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dd6e891
a1445fb
 
 
 
 
 
 
 
dd6e891
a1445fb
 
dd6e891
a1445fb
 
 
 
 
dd6e891
a1445fb
 
dd6e891
a1445fb
 
dd6e891
a1445fb
 
 
 
 
 
dd6e891
a1445fb
 
dd6e891
a1445fb
 
 
dd6e891
a1445fb
 
 
 
dd6e891
a1445fb
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
import os
import streamlit as st
import google.generativeai as genai
from PIL import Image

# Set up the Streamlit App
st.set_page_config(page_title="Multimodal Chatbot with Gemini Flash", layout="wide")
st.title("Multimodal Chatbot with Gemini Flash ⚡️")
st.caption("Chat with Google's Gemini Flash model using image and text input to get lightning fast results. 🌟")

# Get OpenAI API key from user
api_key =  "AIzaSyC_zxN9IHjEAxIoshWPzMfgb9qwMsu5t5Y"
# Set up the Gemini model
genai.configure(api_key=api_key)
model = genai.GenerativeModel(model_name="gemini-1.5-flash-latest")

if api_key:
    # Initialize the chat history
    if "messages" not in st.session_state:
        st.session_state.messages = []

    # Sidebar for image upload
    with st.sidebar:
        st.title("Chat with Images")
        uploaded_file = st.file_uploader("Upload an image...", type=["jpg", "jpeg", "png"])
    
    if uploaded_file:
        image = Image.open(uploaded_file)
        st.image(image, caption='Uploaded Image', use_column_width=True)

    # Main layout
    chat_placeholder = st.container()

    with chat_placeholder:
        # Display the chat history
        for message in st.session_state.messages:
            with st.chat_message(message["role"]):
                st.markdown(message["content"])

    # User input area at the bottom
    prompt = st.chat_input("What do you want to know?")

    if prompt:
        inputs = [prompt]
        
        # Add user message to chat history
        st.session_state.messages.append({"role": "user", "content": prompt})
        # Display user message in chat message container
        with chat_placeholder:
            with st.chat_message("user"):
                st.markdown(prompt)
        
        if uploaded_file:
            inputs.append(image)

        with st.spinner('Generating response...'):
            # Generate response
            response = model.generate_content(inputs)
    
        # Display assistant response in chat message container
        with chat_placeholder:
            with st.chat_message("assistant"):
                st.markdown(response.text)

    if uploaded_file and not prompt:
        st.warning("Please enter a text query to accompany the image.")