Docfile commited on
Commit
62e94e2
·
verified ·
1 Parent(s): a1445fb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +60 -64
app.py CHANGED
@@ -1,66 +1,62 @@
1
- import os
2
  import streamlit as st
3
- import google.generativeai as genai
4
- from PIL import Image
5
-
6
- # Set up the Streamlit App
7
- st.set_page_config(page_title="Multimodal Chatbot with Gemini Flash", layout="wide")
8
- st.title("Multimodal Chatbot with Gemini Flash ⚡️")
9
- st.caption("Chat with Google's Gemini Flash model using image and text input to get lightning fast results. 🌟")
10
-
11
- # Get OpenAI API key from user
12
- api_key = "AIzaSyC_zxN9IHjEAxIoshWPzMfgb9qwMsu5t5Y"
13
- # Set up the Gemini model
14
- genai.configure(api_key=api_key)
15
- model = genai.GenerativeModel(model_name="gemini-1.5-flash-latest")
16
-
17
- if api_key:
18
- # Initialize the chat history
19
- if "messages" not in st.session_state:
20
- st.session_state.messages = []
21
-
22
- # Sidebar for image upload
23
- with st.sidebar:
24
- st.title("Chat with Images")
25
- uploaded_file = st.file_uploader("Upload an image...", type=["jpg", "jpeg", "png"])
26
-
27
- if uploaded_file:
28
- image = Image.open(uploaded_file)
29
- st.image(image, caption='Uploaded Image', use_column_width=True)
30
-
31
- # Main layout
32
- chat_placeholder = st.container()
33
-
34
- with chat_placeholder:
35
- # Display the chat history
36
- for message in st.session_state.messages:
37
- with st.chat_message(message["role"]):
38
- st.markdown(message["content"])
39
-
40
- # User input area at the bottom
41
- prompt = st.chat_input("What do you want to know?")
42
-
43
- if prompt:
44
- inputs = [prompt]
45
-
46
- # Add user message to chat history
47
- st.session_state.messages.append({"role": "user", "content": prompt})
48
- # Display user message in chat message container
49
- with chat_placeholder:
50
- with st.chat_message("user"):
51
- st.markdown(prompt)
52
-
53
- if uploaded_file:
54
- inputs.append(image)
55
-
56
- with st.spinner('Generating response...'):
57
- # Generate response
58
- response = model.generate_content(inputs)
59
-
60
- # Display assistant response in chat message container
61
- with chat_placeholder:
62
- with st.chat_message("assistant"):
63
- st.markdown(response.text)
64
 
65
- if uploaded_file and not prompt:
66
- st.warning("Please enter a text query to accompany the image.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
+ from phi.agent import Agent
3
+ from phi.model.google import Gemini
4
+ import tempfile
5
+ import os
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
+ def main():
8
+ # Set up the reasoning agent
9
+ agent = Agent(
10
+ model=Gemini(id="gemini-2.0-flash-thinking-exp-1219"),
11
+ markdown=True
12
+ )
13
+
14
+ # Streamlit app title
15
+ st.title("Multimodal Reasoning AI Agent 🧠")
16
+
17
+ # Instruction
18
+ st.write(
19
+ "Upload an image and provide a reasoning-based task for the AI Agent. "
20
+ "The AI Agent will analyze the image and respond based on your input."
21
+ )
22
+
23
+ # File uploader for image
24
+ uploaded_file = st.file_uploader("Upload Image", type=["jpg", "jpeg", "png"])
25
+
26
+ if uploaded_file is not None:
27
+ try:
28
+ # Save uploaded file to temporary file
29
+ with tempfile.NamedTemporaryFile(delete=False, suffix='.jpg') as tmp_file:
30
+ tmp_file.write(uploaded_file.getvalue())
31
+ temp_path = tmp_file.name
32
+
33
+ # Display the uploaded image
34
+ st.image(uploaded_file, caption="Uploaded Image", use_container_width=True)
35
+
36
+ # Input for dynamic task
37
+ task_input = st.text_area(
38
+ "Enter your task/question for the AI Agent:"
39
+ )
40
+
41
+ # Button to process the image and task
42
+ if st.button("Analyze Image") and task_input:
43
+ with st.spinner("AI is thinking... 🤖"):
44
+ try:
45
+ # Call the agent with the dynamic task and image path
46
+ response = agent.run(task_input, images=[temp_path])
47
+
48
+ # Display the response from the model
49
+ st.markdown("### AI Response:")
50
+ st.markdown(response.content)
51
+ except Exception as e:
52
+ st.error(f"An error occurred during analysis: {str(e)}")
53
+ finally:
54
+ # Clean up temp file
55
+ if os.path.exists(temp_path):
56
+ os.unlink(temp_path)
57
+
58
+ except Exception as e:
59
+ st.error(f"An error occurred while processing the image: {str(e)}")
60
+
61
+ if __name__ == "__main__":
62
+ main()