Spaces:
Running
on
Zero
Running
on
Zero
import gradio as gr | |
import numpy as np | |
import random | |
import os | |
import tempfile | |
from PIL import Image, ImageOps | |
import pillow_heif # For HEIF/AVIF support | |
from huggingface_hub import InferenceClient | |
import io | |
# --- Constants --- | |
MAX_SEED = np.iinfo(np.int32).max | |
# --- Global client variable --- | |
client = None | |
def load_client(): | |
"""Initialize the Inference Client""" | |
global client | |
if client is None: | |
# Register HEIF opener with PIL for AVIF/HEIF support | |
pillow_heif.register_heif_opener() | |
# Get token from environment variable | |
hf_token = os.getenv("HF_TOKEN") | |
if hf_token: | |
client = InferenceClient( | |
provider="fal-ai", | |
api_key=hf_token, | |
bill_to="huggingface", | |
) | |
else: | |
raise gr.Error("HF_TOKEN environment variable not found. Please add your Hugging Face token to the Space settings.") | |
return client | |
# --- Core Inference Function for ChatInterface --- | |
def chat_fn(message, chat_history, seed, randomize_seed, guidance_scale, steps, progress=gr.Progress()): | |
""" | |
Performs image generation or editing based on user input from the chat interface. | |
""" | |
# Load client | |
client = load_client() | |
prompt = message["text"] | |
files = message["files"] | |
if not prompt and not files: | |
raise gr.Error("Please provide a prompt and/or upload an image.") | |
if randomize_seed: | |
seed = random.randint(0, MAX_SEED) | |
input_image = None | |
if files: | |
print(f"Received image: {files[0]}") | |
try: | |
# Try to open and convert the image | |
input_image = Image.open(files[0]) | |
# Convert to RGB if needed (handles RGBA, P, etc.) | |
if input_image.mode != "RGB": | |
input_image = input_image.convert("RGB") | |
# Auto-orient the image based on EXIF data | |
input_image = ImageOps.exif_transpose(input_image) | |
# Convert PIL image to bytes for the API | |
img_byte_arr = io.BytesIO() | |
input_image.save(img_byte_arr, format='PNG') | |
input_image_bytes = img_byte_arr.getvalue() | |
except Exception as e: | |
raise gr.Error(f"Could not process the uploaded image: {str(e)}. Please try uploading a different image format (JPEG, PNG, WebP).") | |
# Use image_to_image for editing | |
progress(0.1, desc="Processing image...") | |
image = client.image_to_image( | |
input_image_bytes, | |
prompt=prompt, | |
model="black-forest-labs/FLUX.1-Kontext-dev", | |
# Note: guidance_scale and steps might not be supported by the API | |
# Check the API documentation for available parameters | |
) | |
progress(1.0, desc="Complete!") | |
else: | |
print(f"Received prompt for text-to-image: {prompt}") | |
# Use text_to_image for generation | |
progress(0.1, desc="Generating image...") | |
image = client.text_to_image( | |
prompt=prompt, | |
model="black-forest-labs/FLUX.1-Kontext-dev", | |
# Note: guidance_scale and steps might not be supported by the API | |
# Check the API documentation for available parameters | |
) | |
progress(1.0, desc="Complete!") | |
# The client returns a PIL Image object | |
return gr.Image(value=image) | |
# --- UI Definition using gr.ChatInterface --- | |
seed_slider = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=42) | |
randomize_checkbox = gr.Checkbox(label="Randomize seed", value=False) | |
guidance_slider = gr.Slider(label="Guidance Scale", minimum=1.0, maximum=10.0, step=0.1, value=2.5) | |
steps_slider = gr.Slider(label="Steps", minimum=1, maximum=30, value=28, step=1) | |
# Note: The Inference Client API may not support all parameters like guidance_scale and steps | |
# Check the API documentation for supported parameters | |
demo = gr.ChatInterface( | |
fn=chat_fn, | |
title="FLUX.1 Kontext [dev] - Inference Client", | |
description="""<p style='text-align: center;'> | |
A simple chat UI for the <b>FLUX.1 Kontext</b> model using Hugging Face Inference Client with fal-ai provider. | |
<br> | |
To edit an image, upload it and type your instructions (e.g., "Add a hat"). | |
<br> | |
To generate an image, just type a prompt (e.g., "A photo of an astronaut on a horse"). | |
<br> | |
Find the model on <a href='https://huggingface.co/black-forest-labs/FLUX.1-Kontext-dev' target='_blank'>Hugging Face</a>. | |
</p>""", | |
multimodal=True, | |
textbox=gr.MultimodalTextbox( | |
file_types=["image"], | |
placeholder="Type a prompt and/or upload an image...", | |
render=False | |
), | |
additional_inputs=[ | |
seed_slider, | |
randomize_checkbox, | |
guidance_slider, | |
steps_slider | |
], | |
theme="soft" | |
) | |
if __name__ == "__main__": | |
demo.launch() |