|
import os |
|
import gradio as gr |
|
import requests |
|
import json |
|
import base64 |
|
from PIL import Image |
|
import io |
|
import logging |
|
|
|
|
|
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') |
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
OPENROUTER_API_KEY = os.environ.get("OPENROUTER_API_KEY", "") |
|
|
|
|
|
MODELS = [ |
|
|
|
("Meta: Llama 3.2 11B Vision Instruct (free)", "meta-llama/llama-3.2-11b-vision-instruct:free", 131072), |
|
("Qwen: Qwen2.5 VL 72B Instruct (free)", "qwen/qwen2.5-vl-72b-instruct:free", 131072), |
|
("Qwen: Qwen2.5 VL 32B Instruct (free)", "qwen/qwen2.5-vl-32b-instruct:free", 8192), |
|
("Qwen: Qwen2.5 VL 7B Instruct (free)", "qwen/qwen-2.5-vl-7b-instruct:free", 64000), |
|
("Qwen: Qwen2.5 VL 3B Instruct (free)", "qwen/qwen2.5-vl-3b-instruct:free", 64000), |
|
|
|
|
|
("Google: Gemini Pro 2.0 Experimental (free)", "google/gemini-2.0-pro-exp-02-05:free", 2000000), |
|
("Google: Gemini Pro 2.5 Experimental (free)", "google/gemini-2.5-pro-exp-03-25:free", 1000000), |
|
("Google: Gemini 2.0 Flash Thinking Experimental 01-21 (free)", "google/gemini-2.0-flash-thinking-exp:free", 1048576), |
|
("Google: Gemini Flash 2.0 Experimental (free)", "google/gemini-2.0-flash-exp:free", 1048576), |
|
("Google: Gemini Flash 1.5 8B Experimental", "google/gemini-flash-1.5-8b-exp", 1000000), |
|
("Google: Gemini 2.0 Flash Thinking Experimental (free)", "google/gemini-2.0-flash-thinking-exp-1219:free", 40000), |
|
("Google: LearnLM 1.5 Pro Experimental (free)", "google/learnlm-1.5-pro-experimental:free", 40960), |
|
|
|
|
|
("Meta: Llama 3.3 70B Instruct (free)", "meta-llama/llama-3.3-70b-instruct:free", 8000), |
|
("Meta: Llama 3.2 3B Instruct (free)", "meta-llama/llama-3.2-3b-instruct:free", 20000), |
|
("Meta: Llama 3.2 1B Instruct (free)", "meta-llama/llama-3.2-1b-instruct:free", 131072), |
|
("Meta: Llama 3.1 8B Instruct (free)", "meta-llama/llama-3.1-8b-instruct:free", 131072), |
|
("Meta: Llama 3 8B Instruct (free)", "meta-llama/llama-3-8b-instruct:free", 8192), |
|
("NVIDIA: Llama 3.1 Nemotron 70B Instruct (free)", "nvidia/llama-3.1-nemotron-70b-instruct:free", 131072), |
|
|
|
|
|
("DeepSeek: DeepSeek R1 Zero (free)", "deepseek/deepseek-r1-zero:free", 163840), |
|
("DeepSeek: R1 (free)", "deepseek/deepseek-r1:free", 163840), |
|
("DeepSeek: DeepSeek V3 Base (free)", "deepseek/deepseek-v3-base:free", 131072), |
|
("DeepSeek: DeepSeek V3 0324 (free)", "deepseek/deepseek-v3-0324:free", 131072), |
|
("DeepSeek: DeepSeek V3 (free)", "deepseek/deepseek-chat:free", 131072), |
|
("DeepSeek: R1 Distill Qwen 14B (free)", "deepseek/deepseek-r1-distill-qwen-14b:free", 64000), |
|
("DeepSeek: R1 Distill Qwen 32B (free)", "deepseek/deepseek-r1-distill-qwen-32b:free", 16000), |
|
("DeepSeek: R1 Distill Llama 70B (free)", "deepseek/deepseek-r1-distill-llama-70b:free", 8192), |
|
|
|
|
|
("Google: Gemma 3 27B (free)", "google/gemma-3-27b-it:free", 96000), |
|
("Google: Gemma 3 12B (free)", "google/gemma-3-12b-it:free", 131072), |
|
("Google: Gemma 3 4B (free)", "google/gemma-3-4b-it:free", 131072), |
|
("Google: Gemma 3 1B (free)", "google/gemma-3-1b-it:free", 32768), |
|
("Google: Gemma 2 9B (free)", "google/gemma-2-9b-it:free", 8192), |
|
|
|
|
|
("Mistral: Mistral Nemo (free)", "mistralai/mistral-nemo:free", 128000), |
|
("Mistral: Mistral Small 3.1 24B (free)", "mistralai/mistral-small-3.1-24b-instruct:free", 96000), |
|
("Mistral: Mistral Small 3 (free)", "mistralai/mistral-small-24b-instruct-2501:free", 32768), |
|
("Mistral: Mistral 7B Instruct (free)", "mistralai/mistral-7b-instruct:free", 8192), |
|
|
|
|
|
("Qwen: Qwen2.5 72B Instruct (free)", "qwen/qwen-2.5-72b-instruct:free", 32768), |
|
("Qwen: QwQ 32B (free)", "qwen/qwq-32b:free", 40000), |
|
("Qwen: QwQ 32B Preview (free)", "qwen/qwq-32b-preview:free", 16384), |
|
("Qwen2.5 Coder 32B Instruct (free)", "qwen/qwen-2.5-coder-32b-instruct:free", 32768), |
|
("Qwen 2 7B Instruct (free)", "qwen/qwen-2-7b-instruct:free", 8192), |
|
|
|
|
|
("Nous: DeepHermes 3 Llama 3 8B Preview (free)", "nousresearch/deephermes-3-llama-3-8b-preview:free", 131072), |
|
("Moonshot AI: Moonlight 16B A3B Instruct (free)", "moonshotai/moonlight-16b-a3b-instruct:free", 8192), |
|
("Microsoft: Phi-3 Mini 128K Instruct (free)", "microsoft/phi-3-mini-128k-instruct:free", 8192), |
|
("Microsoft: Phi-3 Medium 128K Instruct (free)", "microsoft/phi-3-medium-128k-instruct:free", 8192), |
|
("OpenChat 3.5 7B (free)", "openchat/openchat-7b:free", 8192), |
|
("Reka: Flash 3 (free)", "rekaai/reka-flash-3:free", 32768), |
|
("Dolphin3.0 R1 Mistral 24B (free)", "cognitivecomputations/dolphin3.0-r1-mistral-24b:free", 32768), |
|
("Dolphin3.0 Mistral 24B (free)", "cognitivecomputations/dolphin3.0-mistral-24b:free", 32768), |
|
("Bytedance: UI-TARS 72B (free)", "bytedance-research/ui-tars-72b:free", 32768), |
|
("Qwerky 72b (free)", "featherless/qwerky-72b:free", 32768), |
|
("OlympicCoder 7B (free)", "open-r1/olympiccoder-7b:free", 32768), |
|
("OlympicCoder 32B (free)", "open-r1/olympiccoder-32b:free", 32768), |
|
("Rogue Rose 103B v0.2 (free)", "sophosympatheia/rogue-rose-103b-v0.2:free", 4096), |
|
("Toppy M 7B (free)", "undi95/toppy-m-7b:free", 4096), |
|
("Hugging Face: Zephyr 7B (free)", "huggingfaceh4/zephyr-7b-beta:free", 4096), |
|
("MythoMax 13B (free)", "gryphe/mythomax-l2-13b:free", 4096), |
|
("AllenAI: Molmo 7B D (free)", "allenai/molmo-7b-d:free", 4096), |
|
] |
|
|
|
def format_to_message_dict(history): |
|
"""Convert history to proper message format""" |
|
messages = [] |
|
for pair in history: |
|
if len(pair) == 2: |
|
human, ai = pair |
|
if human: |
|
messages.append({"role": "user", "content": human}) |
|
if ai: |
|
messages.append({"role": "assistant", "content": ai}) |
|
return messages |
|
|
|
def encode_image_to_base64(image_path): |
|
"""Encode an image file to base64 string""" |
|
try: |
|
if isinstance(image_path, str): |
|
with open(image_path, "rb") as image_file: |
|
encoded_string = base64.b64encode(image_file.read()).decode('utf-8') |
|
file_extension = image_path.split('.')[-1].lower() |
|
mime_type = f"image/{file_extension}" |
|
if file_extension == "jpg" or file_extension == "jpeg": |
|
mime_type = "image/jpeg" |
|
return f"data:{mime_type};base64,{encoded_string}" |
|
else: |
|
buffered = io.BytesIO() |
|
image_path.save(buffered, format="PNG") |
|
encoded_string = base64.b64encode(buffered.getvalue()).decode('utf-8') |
|
return f"data:image/png;base64,{encoded_string}" |
|
except Exception as e: |
|
logger.error(f"Error encoding image: {str(e)}") |
|
return None |
|
|
|
def prepare_message_with_images(text, images): |
|
"""Prepare a message with text and images""" |
|
if not images: |
|
return text |
|
|
|
content = [{"type": "text", "text": text}] |
|
|
|
for img in images: |
|
if img is None: |
|
continue |
|
|
|
encoded_image = encode_image_to_base64(img) |
|
if encoded_image: |
|
content.append({ |
|
"type": "image_url", |
|
"image_url": {"url": encoded_image} |
|
}) |
|
|
|
return content |
|
|
|
def ask_ai(message, chatbot, model_choice, temperature, max_tokens, uploaded_files): |
|
"""Enhanced AI query function with file upload support and detailed logging""" |
|
if not message.strip() and not uploaded_files: |
|
return chatbot, "" |
|
|
|
|
|
model_id = None |
|
context_size = 0 |
|
for name, model_id_value, ctx_size in MODELS: |
|
if name == model_choice: |
|
model_id = model_id_value |
|
context_size = ctx_size |
|
break |
|
|
|
if model_id is None: |
|
logger.error(f"Model not found: {model_choice}") |
|
return chatbot + [[message, "Error: Model not found"]], "" |
|
|
|
|
|
messages = format_to_message_dict(chatbot) |
|
|
|
|
|
if uploaded_files: |
|
content = prepare_message_with_images(message, uploaded_files) |
|
else: |
|
content = message |
|
|
|
|
|
messages.append({"role": "user", "content": content}) |
|
|
|
|
|
try: |
|
logger.info(f"Sending request to model: {model_id}") |
|
logger.info(f"Messages: {json.dumps(messages)}") |
|
|
|
payload = { |
|
"model": model_id, |
|
"messages": messages, |
|
"temperature": temperature, |
|
"max_tokens": max_tokens |
|
} |
|
|
|
logger.info(f"Request payload: {json.dumps(payload)}") |
|
|
|
response = requests.post( |
|
"https://openrouter.ai/api/v1/chat/completions", |
|
headers={ |
|
"Content-Type": "application/json", |
|
"Authorization": f"Bearer {OPENROUTER_API_KEY}", |
|
"HTTP-Referer": "https://huggingface.co/spaces" |
|
}, |
|
json=payload, |
|
timeout=60 |
|
) |
|
|
|
logger.info(f"Response status: {response.status_code}") |
|
logger.info(f"Response headers: {response.headers}") |
|
|
|
response_text = response.text |
|
logger.info(f"Response body: {response_text}") |
|
|
|
if response.status_code == 200: |
|
result = response.json() |
|
ai_response = result.get("choices", [{}])[0].get("message", {}).get("content", "") |
|
chatbot = chatbot + [[message, ai_response]] |
|
|
|
|
|
if "usage" in result: |
|
logger.info(f"Token usage: {result['usage']}") |
|
else: |
|
error_message = f"Error: Status code {response.status_code}\n\nResponse: {response_text}" |
|
chatbot = chatbot + [[message, error_message]] |
|
except Exception as e: |
|
logger.error(f"Exception during API call: {str(e)}") |
|
chatbot = chatbot + [[message, f"Error: {str(e)}"]] |
|
|
|
return chatbot, "" |
|
|
|
def clear_chat(): |
|
return [], "", [], 0.7, 1000 |
|
|
|
|
|
with gr.Blocks(css="footer {visibility: hidden}") as demo: |
|
gr.Markdown(""" |
|
# Enhanced AI Chat |
|
|
|
This interface allows you to chat with various free AI models from OpenRouter. |
|
You can upload images for vision-capable models and adjust parameters. |
|
""") |
|
|
|
with gr.Row(): |
|
with gr.Column(scale=2): |
|
chatbot = gr.Chatbot(height=500, show_copy_button=True, show_label=False) |
|
|
|
with gr.Row(): |
|
message = gr.Textbox( |
|
placeholder="Type your message here...", |
|
label="Message", |
|
lines=2 |
|
) |
|
|
|
with gr.Row(): |
|
with gr.Column(scale=3): |
|
submit_btn = gr.Button("Send", variant="primary") |
|
|
|
with gr.Column(scale=1): |
|
clear_btn = gr.Button("Clear Chat", variant="secondary") |
|
|
|
with gr.Row(): |
|
uploaded_files = gr.Gallery( |
|
label="Uploaded Images", |
|
show_label=True, |
|
elem_id="gallery", |
|
columns=4, |
|
height=150, |
|
visible=False |
|
) |
|
|
|
with gr.Row(): |
|
upload_btn = gr.UploadButton( |
|
label="Upload Images (for vision models)", |
|
file_types=["image"], |
|
file_count="multiple" |
|
) |
|
|
|
with gr.Column(scale=1): |
|
with gr.Group(): |
|
gr.Markdown("### Model Selection") |
|
model_names = [name for name, _, _ in MODELS] |
|
model_choice = gr.Radio( |
|
model_names, |
|
value=model_names[0], |
|
label="Choose a Model" |
|
) |
|
|
|
with gr.Accordion("Model Context", open=False): |
|
context_info = gr.HTML(value="<p>Select a model to see its context window</p>") |
|
|
|
with gr.Accordion("Parameters", open=False): |
|
temperature = gr.Slider( |
|
minimum=0.1, |
|
maximum=2.0, |
|
value=0.7, |
|
step=0.1, |
|
label="Temperature" |
|
) |
|
|
|
max_tokens = gr.Slider( |
|
minimum=100, |
|
maximum=4000, |
|
value=1000, |
|
step=100, |
|
label="Max Tokens" |
|
) |
|
|
|
|
|
def update_context_info(model_name): |
|
for name, _, ctx_size in MODELS: |
|
if name == model_name: |
|
return f"<p><b>Context window:</b> {ctx_size:,} tokens</p>" |
|
return "<p>Model information not found</p>" |
|
|
|
model_choice.change( |
|
fn=update_context_info, |
|
inputs=[model_choice], |
|
outputs=[context_info] |
|
) |
|
|
|
|
|
def process_uploaded_files(files): |
|
file_paths = [file.name for file in files] |
|
return file_paths, gr.update(visible=True) |
|
|
|
upload_btn.upload( |
|
fn=process_uploaded_files, |
|
inputs=[upload_btn], |
|
outputs=[uploaded_files, uploaded_files] |
|
) |
|
|
|
|
|
submit_btn.click( |
|
fn=ask_ai, |
|
inputs=[message, chatbot, model_choice, temperature, max_tokens, uploaded_files], |
|
outputs=[chatbot, message] |
|
) |
|
|
|
message.submit( |
|
fn=ask_ai, |
|
inputs=[message, chatbot, model_choice, temperature, max_tokens, uploaded_files], |
|
outputs=[chatbot, message] |
|
) |
|
|
|
clear_btn.click( |
|
fn=clear_chat, |
|
inputs=[], |
|
outputs=[chatbot, message, uploaded_files, temperature, max_tokens] |
|
) |
|
|
|
|
|
if __name__ == "__main__": |
|
demo.launch(server_name="0.0.0.0", server_port=7860) |