CrispChat / app.py
cstr's picture
Update app.py
1ca78b8 verified
raw
history blame
2.66 kB
import gradio as gr
import requests
import json
import os
# Set OpenRouter API key in the Space's secrets as "OPENROUTER_API_KEY"
OPENROUTER_API_KEY = os.getenv("OPENROUTER_API_KEY")
HEADERS = {
"Authorization": f"Bearer {OPENROUTER_API_KEY}",
"HTTP-Referer": "https://huggingface.co/spaces/YOUR_SPACE", # Optional
"X-Title": "CrispChat" # Optional
}
# List of free OpenRouter models
FREE_MODELS = {
"Google: Gemini Pro 2.5 Experimental (free)": ("google/gemini-2.5-pro-exp-03-25:free", 1000000),
"DeepSeek: DeepSeek V3 (free)": ("deepseek/deepseek-chat:free", 131072),
"Meta: Llama 3.2 11B Vision Instruct (free)": ("meta-llama/llama-3.2-11b-vision-instruct:free", 131072),
"Qwen: Qwen2.5 VL 72B Instruct (free)": ("qwen/qwen2.5-vl-72b-instruct:free", 131072),
}
def query_openrouter_model(model_id, prompt, image=None):
messages = [{"role": "user", "content": prompt}]
# If image is included, add it to the message content as a dict
if image is not None:
with open(image, "rb") as f:
image_bytes = f.read()
base64_image = base64.b64encode(image_bytes).decode("utf-8")
messages[0]["content"] = [
{"type": "text", "text": prompt},
{"type": "image_url", "image_url": {"url": f"data:image/png;base64,{base64_image}"}}
]
payload = {
"model": model_id,
"messages": messages
}
response = requests.post(
url="https://openrouter.ai/api/v1/chat/completions",
headers=HEADERS,
data=json.dumps(payload)
)
try:
response.raise_for_status()
data = response.json()
return data["choices"][0]["message"]["content"]
except Exception as e:
return f"Error: {str(e)}\n{response.text}"
def chat_interface(prompt, image, model_label):
model_id, _ = FREE_MODELS[model_label]
return query_openrouter_model(model_id, prompt, image)
with gr.Blocks(title="CrispChat") as demo:
gr.Markdown("""
# 🌟 CrispChat
Multi-modal chat with free OpenRouter models
""")
with gr.Row():
prompt = gr.Textbox(label="Enter your message", lines=4, placeholder="Ask me anything...")
image = gr.Image(type="filepath", label="Optional image input")
model_choice = gr.Dropdown(
choices=list(FREE_MODELS.keys()),
value="Google: Gemini Pro 2.5 Experimental (free)",
label="Select model"
)
output = gr.Textbox(label="Response", lines=6)
submit = gr.Button("Submit")
submit.click(fn=chat_interface, inputs=[prompt, image, model_choice], outputs=output)
if __name__ == "__main__":
demo.launch()