File size: 4,701 Bytes
9e9ece5
71a1f99
 
 
 
9e9ece5
71a1f99
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b164a47
71a1f99
 
 
 
 
 
b164a47
71a1f99
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
import gradio as gr
from huggingface_hub import InferenceClient
from PIL import Image
import time
import os

# Get the Hugging Face token from the environment variable, or a secret if available.
HF_TOKEN = os.environ.get("HF_TOKEN")

# Check if HF_TOKEN is set; if not, raise a configuration error (handled later)
if not HF_TOKEN:
    HF_TOKEN_ERROR = "Hugging Face API token (HF_TOKEN) not found.  Please set it as an environment variable or Gradio secret."
else:
    HF_TOKEN_ERROR = None  # No error if the token is found


client = InferenceClient(token=HF_TOKEN)  # Use token instead of provider and api_key

def generate_image(prompt, progress=gr.Progress()):
    """Generates an image using the InferenceClient and provides progress updates."""

    if HF_TOKEN_ERROR:
        raise gr.Error(HF_TOKEN_ERROR)

    progress(0, desc="Sending request to Hugging Face...")
    try:
        # Use the client.text_to_image method.  Assume xylaria-iris is valid here.
        image = client.text_to_image(prompt, model="black-forest-labs/FLUX.1-schnell")

        if not isinstance(image, Image.Image): # Basic type checking.
             raise Exception(f"Expected a PIL Image, but got: {type(image)}")

        progress(0.8, desc="Processing image...")
        time.sleep(0.5)  # Simulate some processing
        progress(1.0, desc="Done!")
        return image
    except Exception as e:  # Catch all exceptions from the API call
        # Check for rate limit errors (different with InferenceClient).  This is a best-effort check.
        if "rate limit" in str(e).lower(): # Check message, case-insensitively.
            error_message = f"Rate limit exceeded. Please try again later.  Error: {e}"
        else:
            error_message = f"An error occurred: {e}"  # Generic error message
        raise gr.Error(error_message)



# Gradio Interface (same CSS as before, for consistency)
css = """
.container {
    max-width: 800px;
    margin: auto;
    padding: 20px;
    border: 1px solid #ddd;
    border-radius: 10px;
    box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
}
.title {
    text-align: center;
    font-size: 2.5em;
    margin-bottom: 0.5em;
    color: #333;
    font-family: 'Arial', sans-serif; /* More readable font */
}
.description {
    text-align: center;
    font-size: 1.1em;
    margin-bottom: 1.5em;
    color: #555;
}
.input-section, .output-section {
    margin-bottom: 1.5em;
}


/* Animation for the image appearance - subtle fade-in */
@keyframes fadeIn {
    from { opacity: 0; transform: translateY(20px); }
    to   { opacity: 1; transform: translateY(0); }
}


/* Improve button style */
.submit-button {
    display: block;
    margin: auto;
    padding: 10px 20px;
    font-size: 1.1em;
    color: white;
    background-color: #4CAF50;
    border: none;
    border-radius: 5px;
    cursor: pointer;
    transition: background-color 0.3s ease;
}
.submit-button:hover {
    background-color: #367c39;
}

/* Style the error messages */
.error-message {
    color: red;
    text-align: center;
    margin-top: 1em;
    font-weight: bold;
}
label{
    font-weight: bold; /* Make labels bold */
    display: block;      /* Each label on its own line */
    margin-bottom: 0.5em; /* Space between label and input */
}
"""


with gr.Blocks(css=css) as demo:
    gr.Markdown(
        """
        # Xylaria Iris Image Generator
        Enter a text prompt and generate an image using the Xylaria Iris model!
        """,
        elem_classes="title"
    )


    with gr.Row():
        with gr.Column():
            with gr.Group(elem_classes="input-section"):
                prompt_input = gr.Textbox(label="Enter your prompt", placeholder="e.g., A beautiful landscape with a magical tree", lines=3)
                generate_button = gr.Button("Generate Image", elem_classes="submit-button")
        with gr.Column():
            with gr.Group(elem_classes="output-section") as output_group:
                image_output = gr.Image(label="Generated Image") # Removed width and height


    def on_generate_click(prompt):
        output_group.elem_classes = ["output-section", "animate"]
        image = generate_image(prompt)
        output_group.elem_classes = ["output-section"]
        return image


    generate_button.click(on_generate_click, inputs=prompt_input, outputs=image_output)
    prompt_input.submit(on_generate_click, inputs=prompt_input, outputs=image_output)

    gr.Examples(
        [["A futuristic cityscape at night"],
         ["A mystical forest with glowing mushrooms"],
         ["An astronaut exploring a new planet"],
         ["A cat wearing a top hat"]],
        inputs=prompt_input
    )

if __name__ == "__main__":
    demo.queue().launch()