danhtran2mind commited on
Commit
716d084
·
verified ·
1 Parent(s): ecfa625

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -4
app.py CHANGED
@@ -1,3 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import dataclasses
2
  import json
3
  from pathlib import Path
@@ -41,7 +55,7 @@ def get_examples(examples_dir: str = "assets/examples") -> list:
41
 
42
  if not ans:
43
  ans = [
44
- ["a serene landscape in Ghibli style", 64, 64, 50, 3.5, 42, None]
45
  ]
46
  return ans
47
 
@@ -49,8 +63,9 @@ def create_demo(
49
  model_name: str = "danhtran2mind/ghibli-fine-tuned-sd-2.1",
50
  device: str = "cuda" if torch.cuda.is_available() else "cpu",
51
  ):
52
- # Set device and dtype
53
- dtype = torch.float16 if torch.cuda.is_available() else torch.float32
 
54
 
55
  # Load models with consistent dtype
56
  vae = AutoencoderKL.from_pretrained(model_name, subfolder="vae", torch_dtype=dtype).to(device)
@@ -114,7 +129,7 @@ def create_demo(
114
 
115
  noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
116
  noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
117
- latents = scheduler.step(noise_pred, t, latents).prev_sample
118
 
119
  with torch.no_grad():
120
  latents = latents / vae.config.scaling_factor
@@ -193,6 +208,7 @@ if __name__ == "__main__":
193
  model_name: str = "danhtran2mind/ghibli-fine-tuned-sd-2.1"
194
  device: str = "cuda" if torch.cuda.is_available() else "cpu"
195
  # port: int = 7860
 
196
 
197
  parser = HfArgumentParser([AppArgs])
198
  args_tuple = parser.parse_args_into_dataclasses()
 
1
+ # Copyright (c) 2025 xAI and/or its affiliates. All rights reserved.
2
+
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
  import dataclasses
16
  import json
17
  from pathlib import Path
 
55
 
56
  if not ans:
57
  ans = [
58
+ ["a serene landscape in Ghibli style", 64, 64, 50, 3.5, 42 Enemies, None]
59
  ]
60
  return ans
61
 
 
63
  model_name: str = "danhtran2mind/ghibli-fine-tuned-sd-2.1",
64
  device: str = "cuda" if torch.cuda.is_available() else "cpu",
65
  ):
66
+ # Convert device string to torch.device
67
+ device = torch.device(device)
68
+ dtype = torch.float16 if device.type == "cuda" else torch.float32
69
 
70
  # Load models with consistent dtype
71
  vae = AutoencoderKL.from_pretrained(model_name, subfolder="vae", torch_dtype=dtype).to(device)
 
129
 
130
  noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
131
  noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
132
+ latents = scheduler.step noisy_pred, t, latents).prev_sample
133
 
134
  with torch.no_grad():
135
  latents = latents / vae.config.scaling_factor
 
208
  model_name: str = "danhtran2mind/ghibli-fine-tuned-sd-2.1"
209
  device: str = "cuda" if torch.cuda.is_available() else "cpu"
210
  # port: int = 7860
211
+ # share: bool = False # Set to True for public sharing (Hugging Face Spaces)
212
 
213
  parser = HfArgumentParser([AppArgs])
214
  args_tuple = parser.parse_args_into_dataclasses()