skallewag commited on
Commit
8ccd13a
·
verified ·
1 Parent(s): d782dc9

Create bootstrap.py

Browse files
Files changed (1) hide show
  1. bootstrap.py +50 -0
bootstrap.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # Bootstrap file for SEEM on Hugging Face Spaces
3
+
4
+ import os
5
+ import sys
6
+ import subprocess
7
+
8
+ print("Setting up SEEM environment...")
9
+
10
+ # Install detectron2 first (before other imports)
11
+ print("Installing detectron2...")
12
+ os.system("pip install -q git+https://github.com/MaureenZOU/detectron2-xyz.git")
13
+
14
+ # Set Python path to include the repository root
15
+ os.environ["PYTHONPATH"] = os.getcwd()
16
+ print(f"Set PYTHONPATH to: {os.getcwd()}")
17
+
18
+ # Create a patched version of app.py with CPU support
19
+ app_path = "app.py"
20
+ app_patched_path = "app_patched.py"
21
+
22
+ with open(app_path, "r") as f:
23
+ app_code = f.read()
24
+
25
+ # Replace the device setup code to work with either CPU or GPU
26
+ app_code = app_code.replace(
27
+ "model = BaseModel(opt, build_model(opt)).from_pretrained(pretrained_pth).eval().cuda()",
28
+ "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n"
29
+ "print(f\"Using device: {device}\")\n"
30
+ "model = BaseModel(opt, build_model(opt)).from_pretrained(pretrained_pth).eval().to(device)"
31
+ )
32
+
33
+ # Replace the inference function to handle both CPU and GPU
34
+ app_code = app_code.replace(
35
+ "@torch.no_grad()\ndef inference(image, task, *args, **kwargs):\n with torch.autocast(device_type='cuda', dtype=torch.float16):",
36
+ "@torch.no_grad()\ndef inference(image, task, *args, **kwargs):\n if torch.cuda.is_available():\n with torch.autocast(device_type='cuda', dtype=torch.float16):"
37
+ )
38
+
39
+ # Add CPU fallback to the inference function
40
+ app_code = app_code.replace(
41
+ " if 'Video' in task:\n return interactive_infer_video(model, audio, image, task, *args, **kwargs)\n else:\n return interactive_infer_image(model, audio, image, task, *args, **kwargs)",
42
+ " if 'Video' in task:\n return interactive_infer_video(model, audio, image, task, *args, **kwargs)\n else:\n return interactive_infer_image(model, audio, image, task, *args, **kwargs)\n else:\n # Run without autocast on CPU\n if 'Video' in task:\n return interactive_infer_video(model, audio, image, task, *args, **kwargs)\n else:\n return interactive_infer_image(model, audio, image, task, *args, **kwargs)"
43
+ )
44
+
45
+ with open(app_patched_path, "w") as f:
46
+ f.write(app_code)
47
+
48
+ # Run the patched app
49
+ print("Starting SEEM demo...")
50
+ os.system(f"python {app_patched_path}")