Kvikontent's picture
Update app.py
f139b64 verified
raw
history blame
1.6 kB
import gradio as gr
import pathlib
import textwrap
import google.generativeai as genai
def to_markdown(text):
"""Converts text to Markdown format with proper indentation."""
text = text.replace('•', ' *')
return textwrap.indent(text, '> ', lambda line: True)
def chat(message, history, img=None):
"""Generates a response to the user's message, optionally using an image."""
genai.configure(api_key='AIzaSyCMBk81YmILNTok8hd6tYtJaevp1qbl6I0') # Replace with your actual API key
text_model = genai.GenerativeModel('gemini-pro')
vision_model = genai.GenerativeModel('gemini-pro-vision')
try:
if img is not None:
# Process image with vision model
image_response = vision_model.generate_content(img)
image_response.resolve() # Wait for response completion
image_text = to_markdown(image_response.text)
# Combine image and text for unified response
prompt = f"{message}\n{image_text}"
response = text_model.generate_content(prompt, stream=True)
else:
# Use only text model
response = text_model.generate_content(message, stream=True)
for chunk in response:
return to_markdown(chunk.text) # Format as Markdown
except Exception as e:
print(f"Error during generation: {e}")
return "An error occurred while generating the response. Please try again later."
chat_interface = gr.ChatInterface(
fn=chat,
title="Gemini Chat",
description="Chat with an AI assistant powered by Gemini",
theme="soft",
inputs=[gr.Textbox(lines=1, label="Enter your message"), gr.Image()],
)
chat_interface.launch()