Spaces:
Running
Running
import gradio as gr | |
from huggingface_hub import InferenceClient | |
from PIL import Image | |
import time | |
import os | |
# Get the Hugging Face token from the environment variable, or a secret if available. | |
HF_TOKEN = os.environ.get("HF_TOKEN") | |
# Check if HF_TOKEN is set; if not, raise a configuration error (handled later) | |
if not HF_TOKEN: | |
HF_TOKEN_ERROR = "Hugging Face API token (HF_TOKEN) not found. Please set it as an environment variable or Gradio secret." | |
else: | |
HF_TOKEN_ERROR = None # No error if the token is found | |
client = InferenceClient(token=HF_TOKEN) # Use token instead of provider and api_key | |
def generate_image(prompt, progress=gr.Progress()): | |
"""Generates an image using the InferenceClient and provides progress updates.""" | |
if HF_TOKEN_ERROR: | |
raise gr.Error(HF_TOKEN_ERROR) | |
progress(0, desc="Sending request to Hugging Face...") | |
try: | |
# Use the client.text_to_image method. Assume xylaria-iris is valid here. | |
image = client.text_to_image(prompt, model="black-forest-labs/FLUX.1-schnell") | |
if not isinstance(image, Image.Image): # Basic type checking. | |
raise Exception(f"Expected a PIL Image, but got: {type(image)}") | |
progress(0.8, desc="Processing image...") | |
time.sleep(0.5) # Simulate some processing | |
progress(1.0, desc="Done!") | |
return image | |
except Exception as e: # Catch all exceptions from the API call | |
# Check for rate limit errors (different with InferenceClient). This is a best-effort check. | |
if "rate limit" in str(e).lower(): # Check message, case-insensitively. | |
error_message = f"Rate limit exceeded. Please try again later. Error: {e}" | |
else: | |
error_message = f"An error occurred: {e}" # Generic error message | |
raise gr.Error(error_message) | |
# Gradio Interface (same CSS as before, for consistency) | |
css = """ | |
.container { | |
max-width: 800px; | |
margin: auto; | |
padding: 20px; | |
border: 1px solid #ddd; | |
border-radius: 10px; | |
box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1); | |
} | |
.title { | |
text-align: center; | |
font-size: 2.5em; | |
margin-bottom: 0.5em; | |
color: #333; | |
font-family: 'Arial', sans-serif; /* More readable font */ | |
} | |
.description { | |
text-align: center; | |
font-size: 1.1em; | |
margin-bottom: 1.5em; | |
color: #555; | |
} | |
.input-section, .output-section { | |
margin-bottom: 1.5em; | |
} | |
/* Animation for the image appearance - subtle fade-in */ | |
@keyframes fadeIn { | |
from { opacity: 0; transform: translateY(20px); } | |
to { opacity: 1; transform: translateY(0); } | |
} | |
/* Improve button style */ | |
.submit-button { | |
display: block; | |
margin: auto; | |
padding: 10px 20px; | |
font-size: 1.1em; | |
color: white; | |
background-color: #4CAF50; | |
border: none; | |
border-radius: 5px; | |
cursor: pointer; | |
transition: background-color 0.3s ease; | |
} | |
.submit-button:hover { | |
background-color: #367c39; | |
} | |
/* Style the error messages */ | |
.error-message { | |
color: red; | |
text-align: center; | |
margin-top: 1em; | |
font-weight: bold; | |
} | |
label{ | |
font-weight: bold; /* Make labels bold */ | |
display: block; /* Each label on its own line */ | |
margin-bottom: 0.5em; /* Space between label and input */ | |
} | |
""" | |
with gr.Blocks(css=css) as demo: | |
gr.Markdown( | |
""" | |
# Xylaria Iris Image Generator | |
Enter a text prompt and generate an image using the Xylaria Iris model! | |
""", | |
elem_classes="title" | |
) | |
with gr.Row(): | |
with gr.Column(): | |
with gr.Group(elem_classes="input-section"): | |
prompt_input = gr.Textbox(label="Enter your prompt", placeholder="e.g., A beautiful landscape with a magical tree", lines=3) | |
generate_button = gr.Button("Generate Image", elem_classes="submit-button") | |
with gr.Column(): | |
with gr.Group(elem_classes="output-section") as output_group: | |
image_output = gr.Image(label="Generated Image") # Removed width and height | |
def on_generate_click(prompt): | |
output_group.elem_classes = ["output-section", "animate"] | |
image = generate_image(prompt) | |
output_group.elem_classes = ["output-section"] | |
return image | |
generate_button.click(on_generate_click, inputs=prompt_input, outputs=image_output) | |
prompt_input.submit(on_generate_click, inputs=prompt_input, outputs=image_output) | |
gr.Examples( | |
[["A futuristic cityscape at night"], | |
["A mystical forest with glowing mushrooms"], | |
["An astronaut exploring a new planet"], | |
["A cat wearing a top hat"]], | |
inputs=prompt_input | |
) | |
if __name__ == "__main__": | |
demo.queue().launch() |