Spaces:
Paused
Paused
Update app.py
Browse files
app.py
CHANGED
@@ -12,24 +12,12 @@ from diffusers import FluxKontextPipeline
|
|
12 |
# --- Constants and Model Loading ---
|
13 |
MAX_SEED = np.iinfo(np.int32).max
|
14 |
|
15 |
-
# --- FIX 1: Handle Hugging Face Authentication ---
|
16 |
-
# This is a gated model. You must have access on Hugging Face and provide a token.
|
17 |
-
# 1. Visit https://huggingface.co/black-forest-labs/FLUX.1-Kontext-dev and accept the terms.
|
18 |
-
# 2. Get an access token from https://huggingface.co/settings/tokens
|
19 |
-
# 3. Add the token below or set it as an environment variable `HF_TOKEN`.
|
20 |
-
HF_TOKEN = os.getenv("HF_TOKEN", "YOUR_HUGGING_FACE_TOKEN") # Replace with your token
|
21 |
-
|
22 |
# Load the pretrained model
|
23 |
try:
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
pipe = FluxKontextPipeline.from_pretrained(
|
29 |
-
"black-forest-labs/FLUX.1-Kontext-dev",
|
30 |
-
torch_dtype=torch.bfloat16,
|
31 |
-
token=HF_TOKEN, # Use the token for authentication
|
32 |
-
).to("cuda")
|
33 |
except Exception as e:
|
34 |
pipe = None
|
35 |
print(f"Warning: Could not load the model on CUDA. GPU is required. Error: {e}")
|
@@ -42,7 +30,7 @@ def chat_fn(message, chat_history, seed, randomize_seed, guidance_scale, steps,
|
|
42 |
Performs image generation or editing based on user input from the chat interface.
|
43 |
"""
|
44 |
if pipe is None:
|
45 |
-
raise gr.Error("Model could not be loaded. This could be due to
|
46 |
|
47 |
prompt = message["text"]
|
48 |
files = message["files"]
|
@@ -84,19 +72,19 @@ randomize_checkbox = gr.Checkbox(label="Randomize seed", value=False)
|
|
84 |
guidance_slider = gr.Slider(label="Guidance Scale", minimum=1.0, maximum=10.0, step=0.1, value=2.5)
|
85 |
steps_slider = gr.Slider(label="Steps", minimum=1, maximum=30, value=28, step=1)
|
86 |
|
87 |
-
# --- FIX 2:
|
88 |
-
#
|
89 |
examples = [
|
90 |
[
|
91 |
-
{"text": "A cute robot reading a book", "files": []},
|
92 |
42, False, 2.5, 28
|
93 |
],
|
94 |
[
|
95 |
-
{"text": "
|
96 |
12345, False, 3.0, 25
|
97 |
],
|
98 |
[
|
99 |
-
{"text": "
|
100 |
54321, False, 2.0, 30
|
101 |
],
|
102 |
]
|
@@ -124,7 +112,7 @@ demo = gr.ChatInterface(
|
|
124 |
guidance_slider,
|
125 |
steps_slider
|
126 |
],
|
127 |
-
examples=examples,
|
128 |
theme="soft"
|
129 |
)
|
130 |
|
|
|
12 |
# --- Constants and Model Loading ---
|
13 |
MAX_SEED = np.iinfo(np.int32).max
|
14 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
# Load the pretrained model
|
16 |
try:
|
17 |
+
pipe = FluxKontextPipeline.from_pretrained(
|
18 |
+
"black-forest-labs/FLUX.1-Kontext-dev",
|
19 |
+
torch_dtype=torch.bfloat16,
|
20 |
+
).to("cuda")
|
|
|
|
|
|
|
|
|
|
|
21 |
except Exception as e:
|
22 |
pipe = None
|
23 |
print(f"Warning: Could not load the model on CUDA. GPU is required. Error: {e}")
|
|
|
30 |
Performs image generation or editing based on user input from the chat interface.
|
31 |
"""
|
32 |
if pipe is None:
|
33 |
+
raise gr.Error("Model could not be loaded. This could be due to no access to the model or no CUDA-enabled GPU.")
|
34 |
|
35 |
prompt = message["text"]
|
36 |
files = message["files"]
|
|
|
72 |
guidance_slider = gr.Slider(label="Guidance Scale", minimum=1.0, maximum=10.0, step=0.1, value=2.5)
|
73 |
steps_slider = gr.Slider(label="Steps", minimum=1, maximum=30, value=28, step=1)
|
74 |
|
75 |
+
# --- FIX 2: Remove examples with external URLs that cause 403 errors ---
|
76 |
+
# Instead, provide text-only examples that work without external image dependencies
|
77 |
examples = [
|
78 |
[
|
79 |
+
{"text": "A cute robot reading a book in a cozy library", "files": []},
|
80 |
42, False, 2.5, 28
|
81 |
],
|
82 |
[
|
83 |
+
{"text": "A majestic lion standing on a rocky cliff at sunset", "files": []},
|
84 |
12345, False, 3.0, 25
|
85 |
],
|
86 |
[
|
87 |
+
{"text": "A futuristic cityscape with flying cars and neon lights", "files": []},
|
88 |
54321, False, 2.0, 30
|
89 |
],
|
90 |
]
|
|
|
112 |
guidance_slider,
|
113 |
steps_slider
|
114 |
],
|
115 |
+
examples=examples,
|
116 |
theme="soft"
|
117 |
)
|
118 |
|