Ephemeral182 commited on
Commit
044ef1c
·
verified ·
1 Parent(s): 603626a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -16
app.py CHANGED
@@ -5,21 +5,19 @@ import numpy as np
5
  import gradio as gr
6
  import spaces
7
  import torch
8
- from diffusers import FluxPipeline, FluxTransformer2DModel
9
- from transformers import AutoModelForCausalLM, AutoTokenizer
10
  from huggingface_hub import login, whoami
11
 
12
  # ------------------------------------------------------------------
13
  # 1. Authentication and Global Configuration
14
  # ------------------------------------------------------------------
15
  # Authenticate with HF token
16
- hf_token = os.getenv("HF_TOKEN")
17
  auth_status = "🔴 Not Authenticated"
18
 
19
- if hf_token:
20
  try:
21
- login(token=hf_token, add_to_git_credential=True)
22
- user_info = whoami(hf_token)
23
  auth_status = f"✅ Authenticated as {user_info['name']}"
24
  logging.info(f"Successfully authenticated with Hugging Face as {user_info['name']}")
25
  except Exception as e:
@@ -64,8 +62,8 @@ def download_model_weights(target_dir, repo_id, subdir=None):
64
  "local_dir_use_symlinks": False,
65
  }
66
 
67
- if hf_token:
68
- download_kwargs["token"] = hf_token
69
 
70
  if subdir:
71
  download_kwargs["allow_patterns"] = os.path.join(subdir, "**")
@@ -129,7 +127,7 @@ class QwenRecapAgent:
129
  try:
130
  self.tokenizer = AutoTokenizer.from_pretrained(
131
  model_path,
132
- token=hf_token,
133
  use_fast=True, # 强制使用 fast tokenizer
134
  trust_remote_code=True
135
  )
@@ -138,7 +136,7 @@ class QwenRecapAgent:
138
  logging.warning(f"Fast tokenizer failed, falling back to slow: {e}")
139
  self.tokenizer = AutoTokenizer.from_pretrained(
140
  model_path,
141
- token=hf_token,
142
  use_fast=False,
143
  trust_remote_code=True
144
  )
@@ -148,8 +146,8 @@ class QwenRecapAgent:
148
  "device_map": device_map if device_map == "auto" else None,
149
  "trust_remote_code": True
150
  }
151
- if hf_token:
152
- model_kwargs["token"] = hf_token
153
 
154
  self.model = AutoModelForCausalLM.from_pretrained(model_path, **model_kwargs)
155
  if device_map != "auto":
@@ -257,7 +255,7 @@ class PosterGenerator:
257
  self.pipeline = FluxPipeline.from_pretrained(
258
  self.pipeline_path,
259
  torch_dtype=torch.bfloat16,
260
- token=hf_token
261
  )
262
 
263
  # 加载自定义权重
@@ -267,7 +265,7 @@ class PosterGenerator:
267
  transformer = FluxTransformer2DModel.from_pretrained(
268
  custom_weights_local,
269
  torch_dtype=torch.bfloat16,
270
- token=hf_token
271
  )
272
  self.pipeline.transformer = transformer
273
  elif self.custom_weights_path and os.path.exists(self.custom_weights_path):
@@ -275,7 +273,7 @@ class PosterGenerator:
275
  transformer = FluxTransformer2DModel.from_pretrained(
276
  self.custom_weights_path,
277
  torch_dtype=torch.bfloat16,
278
- token=hf_token
279
  )
280
  self.pipeline.transformer = transformer
281
 
@@ -319,7 +317,7 @@ def generate_image_interface(
319
  return None, "❌ Prompt cannot be empty!", ""
320
 
321
  try:
322
- if not hf_token:
323
  return None, "❌ Error: HF_TOKEN not found. Please configure authentication.", ""
324
 
325
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
 
5
  import gradio as gr
6
  import spaces
7
  import torch
 
 
8
  from huggingface_hub import login, whoami
9
 
10
  # ------------------------------------------------------------------
11
  # 1. Authentication and Global Configuration
12
  # ------------------------------------------------------------------
13
  # Authenticate with HF token
14
+ HF_TOKEN = os.getenv("HF_TOKEN") or os.getenv("HUGGINGFACEHUB_API_TOKEN")
15
  auth_status = "🔴 Not Authenticated"
16
 
17
+ if HF_TOKEN:
18
  try:
19
+ login(token=HF_TOKEN, add_to_git_credential=True)
20
+ user_info = whoami(HF_TOKEN)
21
  auth_status = f"✅ Authenticated as {user_info['name']}"
22
  logging.info(f"Successfully authenticated with Hugging Face as {user_info['name']}")
23
  except Exception as e:
 
62
  "local_dir_use_symlinks": False,
63
  }
64
 
65
+ if HF_TOKEN:
66
+ download_kwargs["token"] = HF_TOKEN
67
 
68
  if subdir:
69
  download_kwargs["allow_patterns"] = os.path.join(subdir, "**")
 
127
  try:
128
  self.tokenizer = AutoTokenizer.from_pretrained(
129
  model_path,
130
+ token=HF_TOKEN,
131
  use_fast=True, # 强制使用 fast tokenizer
132
  trust_remote_code=True
133
  )
 
136
  logging.warning(f"Fast tokenizer failed, falling back to slow: {e}")
137
  self.tokenizer = AutoTokenizer.from_pretrained(
138
  model_path,
139
+ token=HF_TOKEN,
140
  use_fast=False,
141
  trust_remote_code=True
142
  )
 
146
  "device_map": device_map if device_map == "auto" else None,
147
  "trust_remote_code": True
148
  }
149
+ if HF_TOKEN:
150
+ model_kwargs["token"] = HF_TOKEN
151
 
152
  self.model = AutoModelForCausalLM.from_pretrained(model_path, **model_kwargs)
153
  if device_map != "auto":
 
255
  self.pipeline = FluxPipeline.from_pretrained(
256
  self.pipeline_path,
257
  torch_dtype=torch.bfloat16,
258
+ token=HF_TOKEN
259
  )
260
 
261
  # 加载自定义权重
 
265
  transformer = FluxTransformer2DModel.from_pretrained(
266
  custom_weights_local,
267
  torch_dtype=torch.bfloat16,
268
+ token=HF_TOKEN
269
  )
270
  self.pipeline.transformer = transformer
271
  elif self.custom_weights_path and os.path.exists(self.custom_weights_path):
 
273
  transformer = FluxTransformer2DModel.from_pretrained(
274
  self.custom_weights_path,
275
  torch_dtype=torch.bfloat16,
276
+ token=HF_TOKEN
277
  )
278
  self.pipeline.transformer = transformer
279
 
 
317
  return None, "❌ Prompt cannot be empty!", ""
318
 
319
  try:
320
+ if not HF_TOKEN:
321
  return None, "❌ Error: HF_TOKEN not found. Please configure authentication.", ""
322
 
323
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")