Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from utils import generate_response | |
| from transformers import BlipProcessor, BlipForConditionalGeneration | |
| import torch | |
| from PIL import Image | |
| # Load the BLIP image captioning model and processor | |
| processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base") | |
| model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base") | |
| # Function to generate description for an image using BLIP | |
| def describe_image(image): | |
| try: | |
| # Convert the image to the format the model expects | |
| inputs = processor(images=image, return_tensors="pt") | |
| # Generate a caption for the image | |
| out = model.generate(**inputs) | |
| description = processor.decode(out[0], skip_special_tokens=True) | |
| return description | |
| except Exception as e: | |
| return f"Error describing the image: {e}" | |
| # Define chatbot interaction function | |
| def chat(user_input, chat_history, image): | |
| try: | |
| # Generate text response | |
| response = generate_response(user_input) | |
| # If an image is uploaded, describe it using the image captioning model | |
| if image is not None: | |
| image_description = describe_image(image) | |
| response += f"\n\n[Image Description]: {image_description}" | |
| else: | |
| image_description = "No image uploaded." | |
| # Update chat history | |
| chat_history.append(("User", user_input)) | |
| chat_history.append(("AI", response)) | |
| # Format chat history for display with a conversational style | |
| formatted_history = "\n".join( | |
| [f"{role}: {message}" for role, message in chat_history] | |
| ) | |
| return formatted_history, chat_history | |
| except Exception as e: | |
| return f"Error: {e}", chat_history | |
| # Create Gradio interface with updated light boundary design and image upload | |
| with gr.Blocks(css=""" | |
| body { | |
| background-color: #f7f7f7; | |
| color: #333; | |
| font-family: 'Roboto', sans-serif; | |
| display: flex; | |
| justify-content: center; | |
| align-items: center; | |
| height: 100vh; | |
| margin: 0; | |
| } | |
| .gradio-container { | |
| width: 100%; | |
| max-width: 500px; | |
| background-color: #ffffff; | |
| padding: 30px; | |
| border-radius: 15px; | |
| box-shadow: 0px 10px 30px rgba(0, 0, 0, 0.1); | |
| border: 1px solid #dcdcdc; /* Light border */ | |
| } | |
| .gradio-container .textbox, .gradio-container .button { | |
| background-color: #eeeeee; | |
| color: #333; | |
| border: 1px solid #dcdcdc; /* Light border */ | |
| border-radius: 10px; | |
| padding: 12px; | |
| font-size: 16px; | |
| } | |
| .gradio-container .textbox:focus, .gradio-container .button:focus { | |
| border-color: #007bff; | |
| box-shadow: 0px 0px 5px rgba(0, 123, 255, 0.5); | |
| } | |
| .gradio-container .button:hover { | |
| background-color: #007bff; | |
| color: #fff; | |
| cursor: pointer; | |
| } | |
| .textbox { | |
| margin-bottom: 20px; | |
| } | |
| #chatbox { | |
| height: 350px; | |
| overflow-y: auto; | |
| border: 1px solid #dcdcdc; /* Light border */ | |
| padding: 20px; | |
| border-radius: 10px; | |
| background-color: #f9f9f9; | |
| color: #333; | |
| margin-bottom: 20px; | |
| font-size: 14px; | |
| line-height: 1.6; | |
| font-family: 'Arial', sans-serif; | |
| } | |
| .user-message { | |
| background-color: #e0f7fa; | |
| padding: 10px; | |
| border-radius: 10px; | |
| margin-bottom: 8px; | |
| max-width: 75%; | |
| } | |
| .ai-message { | |
| background-color: #e8f5e9; | |
| padding: 10px; | |
| border-radius: 10px; | |
| margin-bottom: 8px; | |
| max-width: 75%; | |
| margin-left: auto; | |
| } | |
| """) as demo: | |
| gr.Markdown("## 🤖 **Professional Groq Chatbot with Image Description**") | |
| gr.Markdown("Type your message below or upload an image to get a description!") | |
| # Define layout with vertical alignment | |
| with gr.Column(): | |
| user_input = gr.Textbox( | |
| label="Your Message", | |
| placeholder="Ask me anything!", | |
| lines=2, | |
| interactive=True, | |
| ) | |
| submit_button = gr.Button("Send") | |
| clear_button = gr.Button("Clear Chat") | |
| chatbot_output = gr.Textbox( | |
| label="Chat History", | |
| placeholder="AI's responses will appear here.", | |
| lines=15, | |
| interactive=False, | |
| elem_id="chatbox", | |
| ) | |
| # Image upload component | |
| image_input = gr.Image( | |
| label="Upload Image for Description", | |
| type="pil", # PIL (Python Imaging Library) type for image input | |
| interactive=True, | |
| elem_id="image-upload", | |
| ) | |
| # State to hold chat history | |
| chat_history = gr.State([]) | |
| # Button functionalities | |
| submit_button.click( | |
| fn=chat, | |
| inputs=[user_input, chat_history, image_input], | |
| outputs=[chatbot_output, chat_history], | |
| ) | |
| clear_button.click( | |
| fn=lambda: ("", []), | |
| inputs=[], | |
| outputs=[chatbot_output, chat_history], | |
| ) | |
| # Launch locally | |
| if __name__ == "__main__": | |
| demo.launch() | |