prithivMLmods commited on
Commit
8ee92ca
·
verified ·
1 Parent(s): 0426da1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -13
app.py CHANGED
@@ -32,18 +32,6 @@ MAX_IMAGE_SIZE = 2048
32
  dtype = torch.bfloat16
33
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
34
 
35
- # ---- CUDA Check ----
36
- print("CUDA_VISIBLE_DEVICES=", os.environ.get("CUDA_VISIBLE_DEVICES"))
37
- print("torch.__version__ =", torch.__version__)
38
- print("torch.version.cuda =", torch.version.cuda)
39
- print("cuda available:", torch.cuda.is_available())
40
- print("cuda device count:", torch.cuda.device_count())
41
- if torch.cuda.is_available():
42
- print("current device:", torch.cuda.current_device())
43
- print("device name:", torch.cuda.get_device_name(torch.cuda.current_device()))
44
-
45
- print("Using device:", device)
46
-
47
  # --- Model Loading ---
48
  pipe_qwen = DiffusionPipeline.from_pretrained("Qwen/Qwen-Image", torch_dtype=dtype).to(device)
49
 
@@ -290,4 +278,4 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
290
  )
291
 
292
  if __name__ == "__main__":
293
- demo.queue(max_size=50).launch(share=True, mcp_server=True, ssr_mode=False, show_error=True)
 
32
  dtype = torch.bfloat16
33
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
34
 
 
 
 
 
 
 
 
 
 
 
 
 
35
  # --- Model Loading ---
36
  pipe_qwen = DiffusionPipeline.from_pretrained("Qwen/Qwen-Image", torch_dtype=dtype).to(device)
37
 
 
278
  )
279
 
280
  if __name__ == "__main__":
281
+ demo.queue(max_size=50).launch(share=False, mcp_server=True, ssr_mode=False, show_error=True)