Nymbo's picture
Update app.py
bc17fe3 verified
raw
history blame
18.4 kB
import gradio as gr
from huggingface_hub import InferenceClient
import os
import json
import base64
from PIL import Image
import io
ACCESS_TOKEN = os.getenv("HF_TOKEN")
print("Access token loaded.")
# Function to encode image to base64
def encode_image(image_path):
if not image_path:
print("No image path provided")
return None
try:
print(f"Encoding image from path: {image_path}")
# If it's already a PIL Image
if isinstance(image_path, Image.Image):
image = image_path
else:
# Try to open the image file
image = Image.open(image_path)
# Convert to RGB if image has an alpha channel (RGBA)
if image.mode == 'RGBA':
image = image.convert('RGB')
# Encode to base64
buffered = io.BytesIO()
image.save(buffered, format="JPEG")
img_str = base64.b64encode(buffered.getvalue()).decode("utf-8")
print("Image encoded successfully")
return img_str
except Exception as e:
print(f"Error encoding image: {e}")
return None
def respond(
message,
image_files,
history: list[tuple[str, str]],
system_message,
max_tokens,
temperature,
top_p,
frequency_penalty,
seed,
provider,
custom_api_key,
custom_model,
model_search_term,
selected_model
):
print(f"Received message: {message}")
print(f"Received {len(image_files) if image_files else 0} images")
print(f"History: {history}")
print(f"System message: {system_message}")
print(f"Max tokens: {max_tokens}, Temperature: {temperature}, Top-P: {top_p}")
print(f"Frequency Penalty: {frequency_penalty}, Seed: {seed}")
print(f"Selected provider: {provider}")
print(f"Custom API Key provided: {bool(custom_api_key.strip())}")
print(f"Selected model (custom_model): {custom_model}")
print(f"Model search term: {model_search_term}")
print(f"Selected model from radio: {selected_model}")
# Determine which token to use
token_to_use = custom_api_key if custom_api_key.strip() != "" else ACCESS_TOKEN
if custom_api_key.strip() != "":
print("USING CUSTOM API KEY: BYOK token provided by user is being used for authentication")
else:
print("USING DEFAULT API KEY: Environment variable HF_TOKEN is being used for authentication")
# Initialize the Inference Client with the provider and appropriate token
client = InferenceClient(token=token_to_use, provider=provider)
print(f"Hugging Face Inference Client initialized with {provider} provider.")
# Convert seed to None if -1 (meaning random)
if seed == -1:
seed = None
# Prepare messages for the API
user_content = []
# Add text if there is any
if message and message.strip():
user_content.append({
"type": "text",
"text": message
})
# Add images if any
if image_files and len(image_files) > 0:
for file_path in image_files:
if not file_path:
continue
try:
print(f"Processing image file: {file_path}")
# For direct file paths, no need to encode as base64
user_content.append({
"type": "image_url",
"image_url": {
"url": f"file://{file_path}"
}
})
except Exception as e:
print(f"Error processing image file: {e}")
# If empty content, set to text only
if not user_content:
user_content = ""
# Prepare messages in the format expected by the API
messages = [{"role": "system", "content": system_message}]
print("Initial messages array constructed.")
# Add conversation history to the context
for val in history:
user_msg = val[0]
assistant_msg = val[1]
# Process user message
if user_msg:
if isinstance(user_msg, dict) and "text" in user_msg:
# This is a MultimodalTextbox message
hist_text = user_msg.get("text", "")
hist_files = user_msg.get("files", [])
hist_content = []
if hist_text:
hist_content.append({
"type": "text",
"text": hist_text
})
for hist_file in hist_files:
if hist_file:
hist_content.append({
"type": "image_url",
"image_url": {
"url": f"file://{hist_file}"
}
})
if hist_content:
messages.append({"role": "user", "content": hist_content})
else:
# Regular text message
messages.append({"role": "user", "content": user_msg})
# Process assistant message
if assistant_msg:
messages.append({"role": "assistant", "content": assistant_msg})
# Append the latest user message
messages.append({"role": "user", "content": user_content})
print(f"Latest user message appended (content type: {type(user_content)})")
# Determine which model to use, prioritizing custom_model if provided
model_to_use = custom_model.strip() if custom_model.strip() != "" else selected_model
print(f"Model selected for inference: {model_to_use}")
# Start with an empty string to build the response as tokens stream in
response = ""
print(f"Sending request to {provider} provider.")
# Prepare parameters for the chat completion request
parameters = {
"max_tokens": max_tokens,
"temperature": temperature,
"top_p": top_p,
"frequency_penalty": frequency_penalty,
}
if seed is not None:
parameters["seed"] = seed
# Use the InferenceClient for making the request
try:
# Create a generator for the streaming response
stream = client.chat_completion(
model=model_to_use,
messages=messages,
stream=True,
**parameters
)
print("Received tokens: ", end="", flush=True)
# Process the streaming response
for chunk in stream:
if hasattr(chunk, 'choices') and len(chunk.choices) > 0:
# Extract the content from the response
if hasattr(chunk.choices[0], 'delta') and hasattr(chunk.choices[0].delta, 'content'):
token_text = chunk.choices[0].delta.content
if token_text:
print(token_text, end="", flush=True)
response += token_text
yield response
print()
except Exception as e:
print(f"Error during inference: {e}")
response += f"\nError: {str(e)}"
yield response
print("Completed response generation.")
# Function to validate provider selection based on BYOK
def validate_provider(api_key, provider):
if not api_key.strip() and provider != "hf-inference":
return gr.update(value="hf-inference")
return gr.update(value=provider)
# GRADIO UI
with gr.Blocks(theme="Nymbo/Nymbo_Theme") as demo:
# Create the chatbot component
chatbot = gr.Chatbot(
height=600,
show_copy_button=True,
placeholder="Select a model and begin chatting",
layout="panel"
)
print("Chatbot interface created.")
# Multimodal textbox for messages (combines text and file uploads)
msg = gr.MultimodalTextbox(
placeholder="Type a message or upload images...",
show_label=False,
container=False,
scale=12,
file_types=["image"],
file_count="multiple",
sources=["upload"]
)
# Note: We're removing the separate submit button since MultimodalTextbox has its own
# Create accordion for settings
with gr.Accordion("Settings", open=False):
# System message
system_message_box = gr.Textbox(
value="You are a helpful AI assistant that can understand images and text.",
placeholder="You are a helpful assistant.",
label="System Prompt"
)
# Generation parameters
with gr.Row():
with gr.Column():
max_tokens_slider = gr.Slider(
minimum=1,
maximum=4096,
value=512,
step=1,
label="Max tokens"
)
temperature_slider = gr.Slider(
minimum=0.1,
maximum=4.0,
value=0.7,
step=0.1,
label="Temperature"
)
top_p_slider = gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.95,
step=0.05,
label="Top-P"
)
with gr.Column():
frequency_penalty_slider = gr.Slider(
minimum=-2.0,
maximum=2.0,
value=0.0,
step=0.1,
label="Frequency Penalty"
)
seed_slider = gr.Slider(
minimum=-1,
maximum=65535,
value=-1,
step=1,
label="Seed (-1 for random)"
)
# Provider selection
providers_list = [
"hf-inference", # Default Hugging Face Inference
"cerebras", # Cerebras provider
"together", # Together AI
"sambanova", # SambaNova
"novita", # Novita AI
"cohere", # Cohere
"fireworks-ai", # Fireworks AI
"hyperbolic", # Hyperbolic
"nebius", # Nebius
]
provider_radio = gr.Radio(
choices=providers_list,
value="hf-inference",
label="Inference Provider",
info="[View all models here](https://huggingface.co/models?inference_provider=all&sort=trending)"
)
# New BYOK textbox
byok_textbox = gr.Textbox(
value="",
label="BYOK (Bring Your Own Key)",
info="Enter a custom Hugging Face API key here. When empty, only 'hf-inference' provider can be used.",
placeholder="Enter your Hugging Face API token",
type="password" # Hide the API key for security
)
# Custom model box
custom_model_box = gr.Textbox(
value="",
label="Custom Model",
info="(Optional) Provide a custom Hugging Face model path. Overrides any selected featured model.",
placeholder="meta-llama/Llama-3.3-70B-Instruct"
)
# Model search
model_search_box = gr.Textbox(
label="Filter Models",
placeholder="Search for a featured model...",
lines=1
)
# Featured models list
# Updated to include multimodal models
models_list = [
# Multimodal models
"meta-llama/Llama-3.3-70B-Vision",
"Alibaba-NLP/NephilaV-16B-Chat",
"mistralai/Mistral-Large-Vision-2407",
"OpenGVLab/InternVL-Chat-V1-5",
"microsoft/Phi-3.5-vision-instruct",
"Qwen/Qwen2.5-VL-7B-Instruct",
"liuhaotian/llava-v1.6-mistral-7b",
# Standard text models
"meta-llama/Llama-3.3-70B-Instruct",
"meta-llama/Llama-3.1-70B-Instruct",
"meta-llama/Llama-3.0-70B-Instruct",
"meta-llama/Llama-3.2-3B-Instruct",
"meta-llama/Llama-3.2-1B-Instruct",
"meta-llama/Llama-3.1-8B-Instruct",
"NousResearch/Hermes-3-Llama-3.1-8B",
"NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
"mistralai/Mistral-Nemo-Instruct-2407",
"mistralai/Mixtral-8x7B-Instruct-v0.1",
"mistralai/Mistral-7B-Instruct-v0.3",
"mistralai/Mistral-7B-Instruct-v0.2",
"Qwen/Qwen3-235B-A22B",
"Qwen/Qwen3-32B",
"Qwen/Qwen2.5-72B-Instruct",
"Qwen/Qwen2.5-3B-Instruct",
"Qwen/Qwen2.5-0.5B-Instruct",
"Qwen/QwQ-32B",
"Qwen/Qwen2.5-Coder-32B-Instruct",
"microsoft/Phi-3.5-mini-instruct",
"microsoft/Phi-3-mini-128k-instruct",
"microsoft/Phi-3-mini-4k-instruct",
]
featured_model_radio = gr.Radio(
label="Select a model below",
choices=models_list,
value="meta-llama/Llama-3.3-70B-Vision", # Default to a multimodal model
interactive=True
)
gr.Markdown("[View all multimodal models](https://huggingface.co/models?pipeline_tag=image-to-text&sort=trending)")
# Chat history state
chat_history = gr.State([])
# Function to filter models
def filter_models(search_term):
print(f"Filtering models with search term: {search_term}")
filtered = [m for m in models_list if search_term.lower() in m.lower()]
print(f"Filtered models: {filtered}")
return gr.update(choices=filtered)
# Function to set custom model from radio
def set_custom_model_from_radio(selected):
print(f"Featured model selected: {selected}")
return selected
# Function for the chat interface
def user(user_message, history):
# Debug logging for troubleshooting
print(f"User message received: {user_message}")
# Skip if message is empty (no text and no files)
if not user_message or (not user_message.get("text") and not user_message.get("files")):
print("Empty message, skipping")
return history
# Extract data from the MultimodalTextbox
text_content = user_message.get("text", "").strip()
file_paths = user_message.get("files", [])
print(f"Text content: {text_content}")
print(f"Files: {file_paths}")
# Process the message
if file_paths and len(file_paths) > 0:
# We have files - create a multimodal message
file_path = file_paths[0] # For simplicity, use the first file
print(f"Using file: {file_path}")
# Add the message with both text and file as separate components
history.append([user_message, None]) # Keep the original format for processing
else:
# Text-only message
history.append([{"text": text_content, "files": []}, None])
return history
# Define bot response function
def bot(history, system_msg, max_tokens, temperature, top_p, freq_penalty, seed, provider, api_key, custom_model, search_term, selected_model):
# Check if history is valid
if not history or len(history) == 0:
print("No history to process")
return history
# Extract the last user message
user_message = history[-1][0]
print(f"Processing user message: {user_message}")
# Get text and files from the message
if isinstance(user_message, dict) and "text" in user_message:
text_content = user_message.get("text", "")
image_files = user_message.get("files", [])
else:
text_content = ""
image_files = []
# Process message through respond function
history[-1][1] = ""
for response in respond(
text_content,
image_files,
history[:-1],
system_msg,
max_tokens,
temperature,
top_p,
freq_penalty,
seed,
provider,
api_key,
custom_model,
search_term,
selected_model
):
history[-1][1] = response
yield history
# Event handlers - only using the MultimodalTextbox's built-in submit functionality
msg.submit(
user,
[msg, chatbot],
[chatbot],
queue=False
).then(
bot,
[chatbot, system_message_box, max_tokens_slider, temperature_slider, top_p_slider,
frequency_penalty_slider, seed_slider, provider_radio, byok_textbox, custom_model_box,
model_search_box, featured_model_radio],
[chatbot]
).then(
lambda: {"text": "", "files": []}, # Clear inputs after submission
None,
[msg]
)
# Connect the model filter to update the radio choices
model_search_box.change(
fn=filter_models,
inputs=model_search_box,
outputs=featured_model_radio
)
print("Model search box change event linked.")
# Connect the featured model radio to update the custom model box
featured_model_radio.change(
fn=set_custom_model_from_radio,
inputs=featured_model_radio,
outputs=custom_model_box
)
print("Featured model radio button change event linked.")
# Connect the BYOK textbox to validate provider selection
byok_textbox.change(
fn=validate_provider,
inputs=[byok_textbox, provider_radio],
outputs=provider_radio
)
print("BYOK textbox change event linked.")
# Also validate provider when the radio changes to ensure consistency
provider_radio.change(
fn=validate_provider,
inputs=[byok_textbox, provider_radio],
outputs=provider_radio
)
print("Provider radio button change event linked.")
print("Gradio interface initialized.")
if __name__ == "__main__":
print("Launching the demo application.")
demo.launch(show_api=True)