SEEM / bootstrap.py
skallewag's picture
Update bootstrap.py
f7a08f8 verified
raw
history blame
2.57 kB
#!/usr/bin/env python3
# Bootstrap file for SEEM on Hugging Face Spaces
import os
import sys
import subprocess
print("Setting up SEEM environment...")
# Install detectron2 first (before other imports)
print("Installing detectron2...")
os.system("pip install -q git+https://github.com/MaureenZOU/detectron2-xyz.git")
# Set Python path to include the repository root
os.environ["PYTHONPATH"] = os.getcwd()
print(f"Set PYTHONPATH to: {os.getcwd()}")
# Create a patched version of app.py with CPU support
app_path = "app.py"
app_patched_path = "app_patched.py"
with open(app_path, "r") as f:
app_code = f.read()
# Replace the device setup code to work with either CPU or GPU
app_code = app_code.replace(
"model = BaseModel(opt, build_model(opt)).from_pretrained(pretrained_pth).eval().cuda()",
"device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n"
"print(f\"Using device: {device}\")\n"
"model = BaseModel(opt, build_model(opt)).from_pretrained(pretrained_pth).eval().to(device)"
)
# Replace the inference function to handle both CPU and GPU
app_code = app_code.replace(
"@torch.no_grad()\ndef inference(image, task, *args, **kwargs):\n with torch.autocast(device_type='cuda', dtype=torch.float16):",
"@torch.no_grad()\ndef inference(image, task, *args, **kwargs):\n if torch.cuda.is_available():\n with torch.autocast(device_type='cuda', dtype=torch.float16):"
)
# Add CPU fallback to the inference function
app_code = app_code.replace(
" if 'Video' in task:\n return interactive_infer_video(model, audio, image, task, *args, **kwargs)\n else:\n return interactive_infer_image(model, audio, image, task, *args, **kwargs)",
" if 'Video' in task:\n return interactive_infer_video(model, audio, image, task, *args, **kwargs)\n else:\n return interactive_infer_image(model, audio, image, task, *args, **kwargs)\n else:\n # Run without autocast on CPU\n if 'Video' in task:\n return interactive_infer_video(model, audio, image, task, *args, **kwargs)\n else:\n return interactive_infer_image(model, audio, image, task, *args, **kwargs)"
)
# Also fix example paths if they include demo/seem prefix
if "demo/seem/examples/" in app_code:
app_code = app_code.replace(
"\"demo/seem/examples/",
"\"examples/"
)
with open(app_patched_path, "w") as f:
f.write(app_code)
# Run the patched app
print("Starting SEEM demo...")
os.system(f"python {app_patched_path}")