mobenta commited on
Commit
cae7a7e
·
verified ·
1 Parent(s): 7a29b29

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -5
app.py CHANGED
@@ -1,11 +1,17 @@
1
  import os
2
  import random
3
  import numpy as np
4
- import spaces # Import spaces before torch
5
  import torch
6
  from diffusers import FluxPipeline
7
  import gradio as gr
8
 
 
 
 
 
 
 
9
  # Check if GPU is available
10
  if torch.cuda.is_available():
11
  device = "cuda"
@@ -14,14 +20,15 @@ else:
14
  device = "cpu"
15
  print("Using CPU")
16
 
17
- # login hf token
18
- HF_TOKEN = os.getenv("HF_TOKEN")
19
-
20
  MAX_SEED = np.iinfo(np.int32).max
21
  CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES", "0") == "1"
22
 
23
  # Initialize the pipeline and download the model
24
- pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16)
 
 
 
 
25
  pipe.to(device)
26
 
27
  # Enable memory optimizations
 
1
  import os
2
  import random
3
  import numpy as np
4
+ import spaces
5
  import torch
6
  from diffusers import FluxPipeline
7
  import gradio as gr
8
 
9
+ # Access the Hugging Face token from environment variables
10
+ hf_token = os.getenv("HF_TOKEN")
11
+
12
+ if hf_token is None:
13
+ raise ValueError("Hugging Face token is not set. Please set the HF_TOKEN environment variable.")
14
+
15
  # Check if GPU is available
16
  if torch.cuda.is_available():
17
  device = "cuda"
 
20
  device = "cpu"
21
  print("Using CPU")
22
 
 
 
 
23
  MAX_SEED = np.iinfo(np.int32).max
24
  CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES", "0") == "1"
25
 
26
  # Initialize the pipeline and download the model
27
+ pipe = FluxPipeline.from_pretrained(
28
+ "black-forest-labs/FLUX.1-dev",
29
+ torch_dtype=torch.bfloat16,
30
+ use_auth_token=hf_token # Use the token from the environment variable
31
+ )
32
  pipe.to(device)
33
 
34
  # Enable memory optimizations