ZennyKenny commited on
Commit
678ea0a
·
verified ·
1 Parent(s): c0d5436

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -30
app.py CHANGED
@@ -11,39 +11,37 @@ from diffusers import DiffusionPipeline, AutoencoderTiny
11
  from huggingface_hub import login
12
  from live_preview_helpers import flux_pipe_call_that_returns_an_iterable_of_images
13
 
14
- # Authenticate for gated repo access
15
- hf_token = os.environ.get("HF_TOKEN")
16
- if hf_token:
17
- login(token=hf_token)
 
18
 
19
  dtype = torch.bfloat16
20
  device = "cuda" if torch.cuda.is_available() else "cpu"
21
 
22
- # ✅ DO NOT CHANGE: Working pipeline using taef1
23
  taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
24
 
25
  pipe = DiffusionPipeline.from_pretrained(
26
  "black-forest-labs/FLUX.1-dev",
27
  torch_dtype=dtype,
28
- token=hf_token,
29
  vae=taef1
30
  ).to(device)
31
 
32
  pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe)
33
  pipe.load_lora_weights("ZennyKenny/flux_lora_natalie-diffusion")
34
 
35
- # Ensure image_preview dir exists
36
- os.makedirs("image_preview", exist_ok=True)
37
-
38
- MAX_SEED = np.iinfo(np.int32).max
39
- MAX_IMAGE_SIZE = 2048
40
 
41
  def sanitize_filename(name):
42
  return re.sub(r"[^a-zA-Z0-9_-]", "_", name)[:80]
43
 
 
44
  @spaces.GPU(duration=75)
45
- def infer(user_token, prompt, seed=42, randomize_seed=False, width=1024, height=1024, guidance_scale=3.5, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
46
- login(token=user_token) # Authenticate for this call only
 
 
47
 
48
  if randomize_seed:
49
  seed = random.randint(0, MAX_SEED)
@@ -60,14 +58,16 @@ def infer(user_token, prompt, seed=42, randomize_seed=False, width=1024, height=
60
  generator=generator,
61
  output_type="pil",
62
  ):
63
- # Save image to /image_preview
64
  safe_name = sanitize_filename(prompt)
65
  img_path = f"image_preview/{safe_name}_{seed}.jpg"
66
  img.convert("RGB").save(img_path, "JPEG", quality=60)
67
 
 
68
  previews = [f"image_preview/{f}" for f in sorted(os.listdir("image_preview")) if f.endswith(".jpg")]
69
  return img, seed, previews
70
 
 
71
  examples = [
72
  "a man walking in the forest",
73
  "a viking ship sailing down a river",
@@ -86,13 +86,13 @@ Generate images in the surreal style of artist [Natalie Kav](https://www.behance
86
  > This space is designed for prototyping concept art for a forthcoming game called **ХТОНЬ**. All outputs are generated locally in the browser using GPU acceleration.
87
  """)
88
 
89
- with gr.Row():
90
- hf_token_input = gr.Textbox(
91
- label="Your Hugging Face API Token",
92
- placeholder="Paste your token here",
93
- type="password",
94
- )
95
 
 
96
  prompt = gr.Text(
97
  label="Prompt",
98
  show_label=False,
@@ -116,19 +116,17 @@ Generate images in the surreal style of artist [Natalie Kav](https://www.behance
116
 
117
  result_example = gr.Image(visible=False)
118
 
119
- gr.Examples(
120
- examples=examples,
121
- fn=infer,
122
- inputs=[hf_token_input, prompt],
123
- outputs=[result_example, seed, gr.Gallery(visible=False)],
124
- cache_examples=False, # Don't cache examples with real tokens!
125
- )
126
-
127
 
128
  with gr.Column(scale=1, elem_id="right-column"):
129
  result = gr.Image(label="", show_label=False, elem_id="generated-image")
130
 
131
- gr_state = gr.State([]) # internal list of previews
132
  with gr.Column():
133
  gr.Markdown("<h3 style='text-align:center;'>Generated Images Preview</h3>")
134
  gallery = gr.Gallery(label="", columns=4, height="auto", object_fit="cover")
@@ -140,6 +138,5 @@ Generate images in the surreal style of artist [Natalie Kav](https://www.behance
140
  outputs=[result, seed, gallery],
141
  )
142
 
143
-
144
  if __name__ == "__main__":
145
  natalie_diffusion.launch()
 
11
  from huggingface_hub import login
12
  from live_preview_helpers import flux_pipe_call_that_returns_an_iterable_of_images
13
 
14
+ # Ensure image_preview dir exists
15
+ os.makedirs("image_preview", exist_ok=True)
16
+
17
+ MAX_SEED = np.iinfo(np.int32).max
18
+ MAX_IMAGE_SIZE = 2048
19
 
20
  dtype = torch.bfloat16
21
  device = "cuda" if torch.cuda.is_available() else "cpu"
22
 
23
+ # ✅ Load model only once
24
  taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
25
 
26
  pipe = DiffusionPipeline.from_pretrained(
27
  "black-forest-labs/FLUX.1-dev",
28
  torch_dtype=dtype,
 
29
  vae=taef1
30
  ).to(device)
31
 
32
  pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe)
33
  pipe.load_lora_weights("ZennyKenny/flux_lora_natalie-diffusion")
34
 
 
 
 
 
 
35
 
36
  def sanitize_filename(name):
37
  return re.sub(r"[^a-zA-Z0-9_-]", "_", name)[:80]
38
 
39
+
40
  @spaces.GPU(duration=75)
41
+ def infer(user_token, prompt, seed=42, randomize_seed=False, width=1024, height=1024,
42
+ guidance_scale=3.5, num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
43
+ # Authenticate using user's token for this session
44
+ login(token=user_token)
45
 
46
  if randomize_seed:
47
  seed = random.randint(0, MAX_SEED)
 
58
  generator=generator,
59
  output_type="pil",
60
  ):
61
+ # Save low-quality JPG
62
  safe_name = sanitize_filename(prompt)
63
  img_path = f"image_preview/{safe_name}_{seed}.jpg"
64
  img.convert("RGB").save(img_path, "JPEG", quality=60)
65
 
66
+ # Collect previews
67
  previews = [f"image_preview/{f}" for f in sorted(os.listdir("image_preview")) if f.endswith(".jpg")]
68
  return img, seed, previews
69
 
70
+
71
  examples = [
72
  "a man walking in the forest",
73
  "a viking ship sailing down a river",
 
86
  > This space is designed for prototyping concept art for a forthcoming game called **ХТОНЬ**. All outputs are generated locally in the browser using GPU acceleration.
87
  """)
88
 
89
+ hf_token_input = gr.Textbox(
90
+ label="Your Hugging Face API Token",
91
+ placeholder="Paste your token here",
92
+ type="password"
93
+ )
 
94
 
95
+ with gr.Row():
96
  prompt = gr.Text(
97
  label="Prompt",
98
  show_label=False,
 
116
 
117
  result_example = gr.Image(visible=False)
118
 
119
+ gr.Examples(
120
+ examples=examples,
121
+ fn=infer,
122
+ inputs=[hf_token_input, prompt],
123
+ outputs=[result_example, seed, gr.Gallery(visible=False)],
124
+ cache_examples=False,
125
+ )
 
126
 
127
  with gr.Column(scale=1, elem_id="right-column"):
128
  result = gr.Image(label="", show_label=False, elem_id="generated-image")
129
 
 
130
  with gr.Column():
131
  gr.Markdown("<h3 style='text-align:center;'>Generated Images Preview</h3>")
132
  gallery = gr.Gallery(label="", columns=4, height="auto", object_fit="cover")
 
138
  outputs=[result, seed, gallery],
139
  )
140
 
 
141
  if __name__ == "__main__":
142
  natalie_diffusion.launch()