Spaces:
Sleeping
Sleeping
File size: 5,236 Bytes
422cae6 46f1f02 422cae6 46f1f02 7cb1958 46f1f02 7cb1958 46f1f02 7cb1958 46f1f02 b970dfa 422cae6 b970dfa d94240d 7cb1958 b970dfa 46f1f02 b970dfa 7cb1958 d94240d b970dfa 7cb1958 d94240d 422cae6 b970dfa d94240d 614613a b29c175 259faf6 e4f79b8 614613a e4f79b8 c9deabe b29c175 b970dfa 614613a e4f79b8 b29c175 b970dfa b29c175 259faf6 614613a e4f79b8 614613a b29c175 614613a e4f79b8 614613a b29c175 614613a e4f79b8 b29c175 e4f79b8 b29c175 e4f79b8 b970dfa b29c175 b970dfa b29c175 e4f79b8 259faf6 b29c175 259faf6 b29c175 259faf6 b29c175 d94240d b970dfa 422cae6 e4f79b8 422cae6 d94240d 422cae6 b29c175 422cae6 e4f79b8 422cae6 b970dfa d94240d b970dfa d94240d 422cae6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 |
import gradio as gr
from transformers import BlipProcessor, BlipForConditionalGeneration
import torch
from PIL import Image
# Load the BLIP image captioning model and processor
processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
# Function to generate description for an image using BLIP
def describe_image(image: Image.Image):
try:
# Preprocess the image and pass it to the model
inputs = processor(images=image, return_tensors="pt")
# Generate caption using the model
out = model.generate(**inputs)
description = processor.decode(out[0], skip_special_tokens=True)
return description
except Exception as e:
return f"Error describing the image: {e}"
# Define chatbot interaction function
def chat(user_input, chat_history, image):
try:
# Generate text response (text processing happens via your custom function)
response = f"AI Response: {user_input}" # Placeholder response for now
# If an image is uploaded, describe it using the image captioning model
if image is not None:
image_description = describe_image(image)
response += f"\n\n[Image Description]: {image_description}"
else:
image_description = "No image uploaded."
# Update chat history with both user input and AI response
chat_history.append(("User", user_input))
chat_history.append(("AI", response))
# Format chat history for display
formatted_history = "\n".join(
[f"{role}: {message}" for role, message in chat_history]
)
return formatted_history, chat_history
except Exception as e:
return f"Error: {e}", chat_history
# Create Gradio interface with updated light boundary design and image upload
with gr.Blocks(css="""
body {
background-color: #f7f7f7;
color: #333;
font-family: 'Roboto', sans-serif;
display: flex;
justify-content: center;
align-items: center;
height: 100vh;
margin: 0;
}
.gradio-container {
width: 70%;
max-width: 500px;
background-color: #ffffff;
padding: 30px;
border-radius: 15px;
box-shadow: 0px 10px 30px rgba(0, 0, 0, 0.1);
border: 1px solid #dcdcdc; /* Light border */
}
.gradio-container .textbox, .gradio-container .button {
background-color: #eeeeee;
color: #333;
border: 1px solid #dcdcdc; /* Light border */
border-radius: 10px;
padding: 12px;
font-size: 16px;
}
.gradio-container .textbox:focus, .gradio-container .button:focus {
border-color: #007bff;
box-shadow: 0px 0px 5px rgba(0, 123, 255, 0.5);
}
.gradio-container .button:hover {
background-color: #007bff;
color: #fff;
cursor: pointer;
}
.textbox {
margin-bottom: 20px;
}
#chatbox {
height: 350px;
overflow-y: auto;
border: 1px solid #dcdcdc; /* Light border */
padding: 20px;
border-radius: 10px;
background-color: #f9f9f9;
color: #333;
margin-bottom: 20px;
font-size: 14px;
line-height: 1.6;
font-family: 'Arial', sans-serif;
}
.user-message {
background-color: #e0f7fa;
padding: 10px;
border-radius: 10px;
margin-bottom: 8px;
max-width: 75%;
}
.ai-message {
background-color: #e8f5e9;
padding: 10px;
border-radius: 10px;
margin-bottom: 8px;
max-width: 75%;
margin-left: auto;
}
""") as demo:
gr.Markdown("## 🤖 **Professional Groq Chatbot with Image Description**")
gr.Markdown("Type your message below or upload an image to get a description!")
# Define layout with vertical alignment
with gr.Column():
user_input = gr.Textbox(
label="Your Message",
placeholder="Ask me anything!",
lines=2,
interactive=True,
)
submit_button = gr.Button("Send")
clear_button = gr.Button("Clear Chat")
chatbot_output = gr.Textbox(
label="Chat History",
placeholder="AI's responses will appear here.",
lines=15,
interactive=False,
elem_id="chatbox",
)
# Image upload component
image_input = gr.Image(
label="Upload Image for Description",
type="pil", # PIL (Python Imaging Library) type for image input
interactive=True,
elem_id="image-upload",
)
# State to hold chat history
chat_history = gr.State([])
# Button functionalities
submit_button.click(
fn=chat,
inputs=[user_input, chat_history, image_input],
outputs=[chatbot_output, chat_history],
)
clear_button.click(
fn=lambda: ("", []),
inputs=[],
outputs=[chatbot_output, chat_history],
)
# Launch locally
if __name__ == "__main__":
demo.launch()
|