Daniel-F commited on
Commit
ccb7bbe
·
1 Parent(s): 50f6f96
Files changed (41) hide show
  1. app.py +94 -0
  2. checkpoints/sam2_hiera_s.yaml +116 -0
  3. checkpoints/sam2_hiera_small.pt +3 -0
  4. install.sh +8 -0
  5. requirements.txt +274 -0
  6. sam_utils.py +227 -0
  7. segment-anything-2/.clang-format +85 -0
  8. segment-anything-2/.gitignore +24 -0
  9. segment-anything-2/LICENSE +20 -0
  10. segment-anything-2/README.md +3 -0
  11. segment-anything-2/pyproject.toml +6 -0
  12. segment-anything-2/sam2/__init__.py +9 -0
  13. segment-anything-2/sam2/automatic_mask_generator.py +454 -0
  14. segment-anything-2/sam2/build_sam.py +132 -0
  15. segment-anything-2/sam2/csrc/connected_components.cu +289 -0
  16. segment-anything-2/sam2/modeling/__init__.py +5 -0
  17. segment-anything-2/sam2/modeling/backbones/__init__.py +5 -0
  18. segment-anything-2/sam2/modeling/backbones/hieradet.py +291 -0
  19. segment-anything-2/sam2/modeling/backbones/image_encoder.py +133 -0
  20. segment-anything-2/sam2/modeling/backbones/utils.py +95 -0
  21. segment-anything-2/sam2/modeling/memory_attention.py +169 -0
  22. segment-anything-2/sam2/modeling/memory_encoder.py +181 -0
  23. segment-anything-2/sam2/modeling/position_encoding.py +221 -0
  24. segment-anything-2/sam2/modeling/sam/__init__.py +5 -0
  25. segment-anything-2/sam2/modeling/sam/mask_decoder.py +295 -0
  26. segment-anything-2/sam2/modeling/sam/prompt_encoder.py +182 -0
  27. segment-anything-2/sam2/modeling/sam/transformer.py +360 -0
  28. segment-anything-2/sam2/modeling/sam2_base.py +829 -0
  29. segment-anything-2/sam2/modeling/sam2_utils.py +149 -0
  30. segment-anything-2/sam2/sam2_image_predictor.py +466 -0
  31. segment-anything-2/sam2/sam2_video_predictor.py +983 -0
  32. segment-anything-2/sam2/utils/__init__.py +5 -0
  33. segment-anything-2/sam2/utils/amg.py +348 -0
  34. segment-anything-2/sam2/utils/misc.py +297 -0
  35. segment-anything-2/sam2/utils/transforms.py +118 -0
  36. segment-anything-2/sam2_configs/__init__.py +5 -0
  37. segment-anything-2/sam2_configs/sam2_hiera_b+.yaml +113 -0
  38. segment-anything-2/sam2_configs/sam2_hiera_l.yaml +117 -0
  39. segment-anything-2/sam2_configs/sam2_hiera_s.yaml +116 -0
  40. segment-anything-2/sam2_configs/sam2_hiera_t.yaml +118 -0
  41. segment-anything-2/setup.py +146 -0
app.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ from PIL import Image
4
+ import sam_utils
5
+ import matplotlib.pyplot as plt
6
+ from io import BytesIO
7
+
8
+ from sam2.sam2_image_predictor import SAM2ImagePredictor
9
+
10
+ # Dummy placeholders for SAM2 functions (replace with real logic)
11
+ def segment_reference(image, click):
12
+ # click = [x, y]
13
+ # Replace this with your SAM2 model's inference logic
14
+ # Return a binary mask (numpy array with shape [H, W], values 0 or 1)
15
+ print(f"Segmenting reference at point: {click}")
16
+ width, height = image.size
17
+ click = np.array(click)
18
+ input_label = np.array([1 for _ in range(len(click))])
19
+ sam2_img.set_image(image)
20
+
21
+ masks, _, _ = sam2_img.predict(
22
+ point_coords=click,
23
+ point_labels=input_label,
24
+ multimask_output=False,
25
+ )
26
+
27
+ return masks
28
+
29
+ def segment_target(target_image, ref_image, ref_mask):
30
+ target_image = np.array(target_image)
31
+ ref_image = np.array(ref_image)
32
+ state = sam_utils.load_masks(sam2_vid, [target_image], ref_image, ref_mask)
33
+ out = sam_utils.propagate_masks(sam2_vid, state)[-1]['segmentation']
34
+ return out # Just for placeholder demo
35
+
36
+ def visualize_segmentation(image, masks, target_image, target_mask):
37
+ # Visualize the segmentation result
38
+ fig, ax = plt.subplots(1, 2, figsize=(12, 6))
39
+ ax[0].imshow(image.convert("L"), cmap='gray')
40
+ for i, mask in enumerate(masks):
41
+ sam_utils.show_mask(mask, ax[0], obj_id=i, alpha=0.75)
42
+ ax[0].axis('off')
43
+ ax[0].set_title("Reference Image with Expert Segmentation")
44
+ ax[1].imshow(target_image.convert("L"), cmap='gray')
45
+ for i, mask in enumerate(target_mask):
46
+ sam_utils.show_mask(mask, ax[1], obj_id=i, alpha=0.75)
47
+ ax[1].axis('off')
48
+ ax[1].set_title("Target Image with Inferred Segmentation")
49
+ # save it to buffer
50
+ plt.tight_layout()
51
+ buf = BytesIO()
52
+ plt.savefig(buf, format='png')
53
+ buf.seek(0)
54
+ vis = Image.open(buf).copy()
55
+ plt.close(fig)
56
+ buf.close()
57
+ return vis
58
+
59
+ # Store click coords globally (can be improved with state)
60
+ click_coords = []
61
+
62
+ def record_click(img, evt: gr.SelectData):
63
+ global click_coords
64
+ click_coords.append([evt.index[0], evt.index[1]])
65
+ return f"Clicked at: {click_coords}"
66
+
67
+ def generate(reference_image, target_image):
68
+ if not click_coords:
69
+ return None, "Click on the reference image first!"
70
+ ref_mask = segment_reference(reference_image, click_coords)
71
+ tgt_mask = segment_target(target_image, reference_image, ref_mask)
72
+ vis = visualize_segmentation(reference_image, ref_mask, target_image, tgt_mask)
73
+ return vis, "Done!"
74
+
75
+ with gr.Blocks() as demo:
76
+ gr.Markdown("### SST Demo: Label-Efficient Trait Segmentation")
77
+
78
+ with gr.Row():
79
+ reference_img = gr.Image(type="pil", label="Reference Image")
80
+ target_img = gr.Image(type="pil", label="Target Image")
81
+
82
+ click_info = gr.Textbox(label="Click Info")
83
+ generate_btn = gr.Button("Generate")
84
+ output_mask = gr.Image(type="pil", label="Generated Mask")
85
+
86
+ reference_img.select(fn=record_click, inputs=[reference_img], outputs=[click_info])
87
+ generate_btn.click(fn=generate, inputs=[reference_img, target_img], outputs=[output_mask, click_info])
88
+
89
+ global sam2_img
90
+ sam2_img = sam_utils.load_SAM2(ckpt_path="checkpoints/sam2_hiera_small.pt", model_cfg_path="checkpoints/sam2_hiera_s.yaml")
91
+ sam2_img = SAM2ImagePredictor(sam2_img)
92
+ global sam2_vid
93
+ sam2_vid = sam_utils.build_sam2_predictor(checkpoint="checkpoints/sam2_hiera_small.pt", model_cfg="checkpoints/sam2_hiera_s.yaml")
94
+ demo.launch()
checkpoints/sam2_hiera_s.yaml ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # @package _global_
2
+
3
+ # Model
4
+ model:
5
+ _target_: sam2.modeling.sam2_base.SAM2Base
6
+ image_encoder:
7
+ _target_: sam2.modeling.backbones.image_encoder.ImageEncoder
8
+ scalp: 1
9
+ trunk:
10
+ _target_: sam2.modeling.backbones.hieradet.Hiera
11
+ embed_dim: 96
12
+ num_heads: 1
13
+ stages: [1, 2, 11, 2]
14
+ global_att_blocks: [7, 10, 13]
15
+ window_pos_embed_bkg_spatial_size: [7, 7]
16
+ neck:
17
+ _target_: sam2.modeling.backbones.image_encoder.FpnNeck
18
+ position_encoding:
19
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
20
+ num_pos_feats: 256
21
+ normalize: true
22
+ scale: null
23
+ temperature: 10000
24
+ d_model: 256
25
+ backbone_channel_list: [768, 384, 192, 96]
26
+ fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features
27
+ fpn_interp_model: nearest
28
+
29
+ memory_attention:
30
+ _target_: sam2.modeling.memory_attention.MemoryAttention
31
+ d_model: 256
32
+ pos_enc_at_input: true
33
+ layer:
34
+ _target_: sam2.modeling.memory_attention.MemoryAttentionLayer
35
+ activation: relu
36
+ dim_feedforward: 2048
37
+ dropout: 0.1
38
+ pos_enc_at_attn: false
39
+ self_attention:
40
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
41
+ rope_theta: 10000.0
42
+ feat_sizes: [32, 32]
43
+ embedding_dim: 256
44
+ num_heads: 1
45
+ downsample_rate: 1
46
+ dropout: 0.1
47
+ d_model: 256
48
+ pos_enc_at_cross_attn_keys: true
49
+ pos_enc_at_cross_attn_queries: false
50
+ cross_attention:
51
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
52
+ rope_theta: 10000.0
53
+ feat_sizes: [32, 32]
54
+ rope_k_repeat: True
55
+ embedding_dim: 256
56
+ num_heads: 1
57
+ downsample_rate: 1
58
+ dropout: 0.1
59
+ kv_in_dim: 64
60
+ num_layers: 4
61
+
62
+ memory_encoder:
63
+ _target_: sam2.modeling.memory_encoder.MemoryEncoder
64
+ out_dim: 64
65
+ position_encoding:
66
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
67
+ num_pos_feats: 64
68
+ normalize: true
69
+ scale: null
70
+ temperature: 10000
71
+ mask_downsampler:
72
+ _target_: sam2.modeling.memory_encoder.MaskDownSampler
73
+ kernel_size: 3
74
+ stride: 2
75
+ padding: 1
76
+ fuser:
77
+ _target_: sam2.modeling.memory_encoder.Fuser
78
+ layer:
79
+ _target_: sam2.modeling.memory_encoder.CXBlock
80
+ dim: 256
81
+ kernel_size: 7
82
+ padding: 3
83
+ layer_scale_init_value: 1e-6
84
+ use_dwconv: True # depth-wise convs
85
+ num_layers: 2
86
+
87
+ num_maskmem: 7
88
+ image_size: 1024
89
+ # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask
90
+ sigmoid_scale_for_mem_enc: 20.0
91
+ sigmoid_bias_for_mem_enc: -10.0
92
+ use_mask_input_as_output_without_sam: true
93
+ # Memory
94
+ directly_add_no_mem_embed: true
95
+ # use high-resolution feature map in the SAM mask decoder
96
+ use_high_res_features_in_sam: true
97
+ # output 3 masks on the first click on initial conditioning frames
98
+ multimask_output_in_sam: true
99
+ # SAM heads
100
+ iou_prediction_use_sigmoid: True
101
+ # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
102
+ use_obj_ptrs_in_encoder: true
103
+ add_tpos_enc_to_obj_ptrs: false
104
+ only_obj_ptrs_in_the_past_for_eval: true
105
+ # object occlusion prediction
106
+ pred_obj_scores: true
107
+ pred_obj_scores_mlp: true
108
+ fixed_no_obj_ptr: true
109
+ # multimask tracking settings
110
+ multimask_output_for_tracking: true
111
+ use_multimask_token_for_obj_ptr: true
112
+ multimask_min_pt_num: 0
113
+ multimask_max_pt_num: 1
114
+ use_mlp_for_obj_ptr_proj: true
115
+ # Compilation flag
116
+ compile_image_encoder: False
checkpoints/sam2_hiera_small.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:95949964d4e548409021d47b22712d5f1abf2564cc0c3c765ba599a24ac7dce3
3
+ size 184309650
install.sh ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ # conda create -y --name sst python=3.10.14
4
+ # source ~/anaconda3/etc/profile.d/conda.sh
5
+ # conda activate sst
6
+ pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu
7
+ (cd segment-anything-2 && pip install -e .)
8
+ pip install -r requirements.txt --no-dependencies
requirements.txt ADDED
@@ -0,0 +1,274 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ absl-py==2.1.0
2
+ accelerate==0.23.0
3
+ addict==2.4.0
4
+ aiofiles==23.2.1
5
+ aiohappyeyeballs==2.4.0
6
+ aiohttp==3.10.5
7
+ aiosignal==1.3.1
8
+ altair==5.4.1
9
+ antlr4-python3-runtime==4.9.3
10
+ anyio==4.4.0
11
+ appdirs==1.4.4
12
+ argcomplete==3.5.0
13
+ argon2-cffi==23.1.0
14
+ argon2-cffi-bindings==21.2.0
15
+ arrow==1.3.0
16
+ astunparse==1.6.3
17
+ async-lru==2.0.4
18
+ async-timeout==4.0.3
19
+ attrs==24.2.0
20
+ av==12.3.0
21
+ av2==0.2.1
22
+ babel==2.16.0
23
+ backcall==0.2.0
24
+ beautifulsoup4==4.12.3
25
+ black==21.4b2
26
+ bleach==6.1.0
27
+ cachetools==5.5.0
28
+ certifi==2024.7.4
29
+ cffi==1.17.0
30
+ charset-normalizer==3.3.2
31
+ click==8.1.7
32
+ cloudpickle==3.0.0
33
+ colored==2.2.4
34
+ coloredlogs==15.0.1
35
+ colorlog==6.8.2
36
+ contourpy==1.2.1
37
+ cycler==0.12.1
38
+ Cython==3.0.2
39
+ datasets==3.0.2
40
+ debugpy==1.6.0
41
+ deepspeed==0.10.3
42
+ defusedxml==0.7.1
43
+ descartes==1.1.0
44
+ diffdist==0.1
45
+ dill==0.3.8
46
+ distlib==0.3.8
47
+ docker-pycreds==0.4.0
48
+ easydict==1.13
49
+ einops==0.7.0
50
+ exceptiongroup
51
+ fairscale==0.4.13
52
+ fastapi==0.115.0
53
+ fastjsonschema==2.20.0
54
+ ffmpeg-python==0.2.0
55
+ ffmpy==0.4.0
56
+ filelock==3.15.4
57
+ fire==0.6.0
58
+ flatbuffers==24.3.25
59
+ fonttools==4.53.1
60
+ fqdn==1.5.1
61
+ frozenlist==1.4.1
62
+ fsspec==2024.6.1
63
+ ftfy==6.1.1
64
+ future==0.18.2
65
+ fvcore==0.1.5.post20221221
66
+ gast==0.6.0
67
+ gdown==5.2.0
68
+ gitdb==4.0.11
69
+ GitPython==3.1.43
70
+ google-pasta==0.2.0
71
+ gradio==3.32.0
72
+ gradio_client==0.2.5
73
+ grpcio==1.66.1
74
+ h11==0.14.0
75
+ h5py==3.11.0
76
+ hjson==3.1.0
77
+ httpcore==1.0.5
78
+ httpx==0.27.0
79
+ huggingface-hub==0.26.2
80
+ humanfriendly==10.0
81
+ hydra-core==1.3.2
82
+ idna==3.7
83
+ imageio==2.35.1
84
+ importlib_resources==6.4.5
85
+ infinibatch==0.1.1
86
+ iopath==0.1.9
87
+ ipython==8.16.1
88
+ ipywidgets==8.1.5
89
+ isoduration==20.11.0
90
+ Jinja2==3.1.4
91
+ joblib==1.4.2
92
+ json-tricks==3.17.3
93
+ json5==0.9.25
94
+ jsonpatch==1.33
95
+ jsonpointer==3.0.0
96
+ jsonschema==4.23.0
97
+ jsonschema-specifications==2023.12.1
98
+ jupyter==1.0.0
99
+ jupyter-console==6.6.3
100
+ jupyter-events==0.10.0
101
+ jupyter-lsp==2.2.5
102
+ jupyter_server==2.14.2
103
+ jupyter_server_terminals==0.5.3
104
+ jupyterlab==4.2.4
105
+ jupyterlab_pygments==0.3.0
106
+ jupyterlab_server==2.27.3
107
+ jupyterlab_widgets==3.0.13
108
+ jupytext==1.16.4
109
+ kagglehub==0.3.4
110
+ keras==3.6.0
111
+ kiwisolver==1.4.5
112
+ kornia==0.6.4
113
+ lazy_loader==0.4
114
+ libclang==18.1.1
115
+ linkify-it-py==2.0.3
116
+ llvmlite==0.43.0
117
+ Markdown==3.7
118
+ markdown-it-py==2.2.0
119
+ MarkupSafe==2.1.5
120
+ matplotlib==3.6
121
+ mdit-py-plugins==0.3.3
122
+ mdurl==0.1.2
123
+ mistune==3.0.2
124
+ ml-dtypes==0.4.1
125
+ more-itertools==10.5.0
126
+ mpmath==1.3.0
127
+ multidict==6.1.0
128
+ multiprocess==0.70.16
129
+ MultiScaleDeformableAttention==1.0
130
+ mup==1.0.0
131
+ mypy-extensions==1.0.0
132
+ namex==0.0.8
133
+ narwhals==1.8.1
134
+ nbclient==0.10.0
135
+ nbconvert==7.16.4
136
+ nbformat==5.10.4
137
+ networkx==3.3
138
+ ninja==1.11.1.1
139
+ nltk==3.8.1
140
+ notebook==7.2.1
141
+ notebook_shim==0.2.4
142
+ nox==2024.4.15
143
+ numba==0.60.0
144
+ numpy==1.23.1
145
+ nvidia-cublas-cu11==11.10.3.66
146
+ nvidia-cublas-cu12==12.1.3.1
147
+ nvidia-cuda-cupti-cu12==12.1.105
148
+ nvidia-cuda-nvrtc-cu11==11.7.99
149
+ nvidia-cuda-nvrtc-cu12==12.1.105
150
+ nvidia-cuda-runtime-cu11==11.7.99
151
+ nvidia-cuda-runtime-cu12==12.1.105
152
+ nvidia-cudnn-cu11==8.5.0.96
153
+ nvidia-cudnn-cu12==9.1.0.70
154
+ nvidia-cufft-cu12==11.0.2.54
155
+ nvidia-curand-cu12==10.3.2.106
156
+ nvidia-cusolver-cu12==11.4.5.107
157
+ nvidia-cusparse-cu12==12.1.0.106
158
+ nvidia-nccl-cu12==2.21.5
159
+ nvidia-nvjitlink-cu12==12.4.127
160
+ nvidia-nvtx-cu12==12.1.105
161
+ omegaconf==2.3.0
162
+ openai-whisper==20230306
163
+ opencv-python==4.6.0.66
164
+ opencv-python-headless==4.10.0.84
165
+ opt_einsum==3.4.0
166
+ optree==0.13.0
167
+ orjson==3.10.7
168
+ overrides==7.7.0
169
+ pandas==2.0.3
170
+ pandocfilters==1.5.1
171
+ panopticapi @ git+https://github.com/cocodataset/panopticapi.git@7bb4655548f98f3fedc07bf37e9040a992b054b0
172
+ pathspec==0.12.1
173
+ pathtools==0.1.2
174
+ peft==0.12.0
175
+ Pillow==9.4.0
176
+ plotly==5.23.0
177
+ portalocker==2.10.1
178
+ POT==0.9.0
179
+ progressbar==2.5
180
+ prometheus_client==0.20.0
181
+ protobuf==4.25.5
182
+ py-cpuinfo==9.0.0
183
+ pyarrow==18.1.0
184
+ pycocotools==2.0.7
185
+ pycparser==2.22
186
+ pydantic==1.10.18
187
+ pydot==3.0.1
188
+ pydub==0.25.1
189
+ pyparsing==3.1.2
190
+ pyproj==3.6.1
191
+ pyquaternion==0.9.9
192
+ PySocks==1.7.1
193
+ python-dateutil
194
+ python-json-logger==2.0.7
195
+ python-multipart==0.0.9
196
+ pytz==2024.1
197
+ PyWavelets==1.7.0
198
+ PyYAML==6.0.1
199
+ qtconsole==5.5.2
200
+ QtPy==2.4.1
201
+ referencing==0.35.1
202
+ regex==2023.10.3
203
+ requests==2.32.3
204
+ rfc3339-validator==0.1.4
205
+ rfc3986-validator==0.1.1
206
+ rich==13.8.0
207
+ rpds-py==0.20.0
208
+ safetensors==0.4.5
209
+ scann==1.3.4
210
+ scikit-image==0.21.0
211
+ scikit-learn==1.3.1
212
+ scipy==1.11.4
213
+ seaborn==0.13.2
214
+ segmentation-refinement==0.6
215
+ semantic-version==2.10.0
216
+ Send2Trash==1.8.3
217
+ sentencepiece==0.1.99
218
+ sentry-sdk==2.14.0
219
+ setproctitle==1.3.3
220
+ Shapely==1.8.0
221
+ slugify==0.0.1
222
+ smmap==5.0.1
223
+ sniffio==1.3.1
224
+ soupsieve==2.6
225
+ starlette==0.38.5
226
+ submitit==1.5.1
227
+ supervision==0.23.0
228
+ sympy==1.13.1
229
+ tabulate==0.9.0
230
+ tenacity==9.0.0
231
+ tensorboard==2.17.1
232
+ tensorboard-data-server==0.7.2
233
+ tensorboardX==1.8
234
+ tensorflow==2.17.1
235
+ tensorflow-io-gcs-filesystem==0.37.1
236
+ termcolor==2.4.0
237
+ terminado==0.18.1
238
+ thop==0.1.1.post2209072238
239
+ threadpoolctl==3.5.0
240
+ tifffile==2024.8.30
241
+ timm==0.6.12
242
+ tinycss2==1.3.0
243
+ tokenizers==0.14.1
244
+ toml==0.10.2
245
+ tomli==2.0.1
246
+ torchmetrics==0.11.0
247
+ torchshow==0.5.0
248
+ tqdm==4.66.3
249
+ transformers==4.34.0
250
+ triton==3.1.0
251
+ types-python-dateutil==2.9.0.20240821
252
+ typing==3.7.4.3
253
+ tzdata==2024.1
254
+ uc-micro-py==1.0.3
255
+ ultralytics==8.0.196
256
+ uri-template==1.3.0
257
+ urllib3==2.2.2
258
+ uvicorn==0.30.6
259
+ virtualenv==20.26.3
260
+ visdom==0.2.4
261
+ vision-datasets==0.2.2
262
+ wandb==0.18.6
263
+ webcolors==24.8.0
264
+ webencodings==0.5.1
265
+ websocket-client==1.8.0
266
+ websockets==11.0.3
267
+ Werkzeug==3.0.4
268
+ widgetsnbextension==4.0.13
269
+ wrapt==1.16.0
270
+ xformers==0.0.28.post3
271
+ xxhash==3.5.0
272
+ yacs==0.1.8
273
+ yapf==0.40.2
274
+ yarl==1.11.1
sam_utils.py ADDED
@@ -0,0 +1,227 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import torch
3
+
4
+ from sam2.automatic_mask_generator import SAM2AutomaticMaskGenerator
5
+ from sam2.build_sam import build_sam2
6
+ from sam2.build_sam import build_sam2_video_predictor
7
+ import sam2
8
+ from PIL import Image
9
+ import os
10
+ import numpy as np
11
+ import matplotlib.pyplot as plt
12
+
13
+ import argparse
14
+
15
+ def area(mask):
16
+ if mask.size == 0: return 0
17
+ return np.count_nonzero(mask) / mask.size
18
+
19
+ def show_mask(mask, ax, obj_id=None, random_color=False, borders = True, alpha=0.5):
20
+ if random_color:
21
+ color = np.concatenate([np.random.random(3), np.array([alpha])], axis=0)
22
+ else:
23
+ color = np.array([30/255, 144/255, 255/255, alpha])
24
+ if not random_color and obj_id is not None:
25
+ color = np.array([*plt.get_cmap("tab10")(obj_id)[:3], alpha])
26
+ h, w = mask.shape[-2:]
27
+ mask = mask.astype(np.uint8)
28
+ mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1)
29
+ if borders:
30
+ import cv2
31
+ contours, _ = cv2.findContours(mask,cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
32
+ # Try to smooth contours
33
+ contours = [cv2.approxPolyDP(contour, epsilon=0.01, closed=True) for contour in contours]
34
+ mask_image = cv2.drawContours(mask_image, contours, -1, (1, 1, 1, 0.5), thickness=2)
35
+ ax.imshow(mask_image)
36
+
37
+ def area(mask):
38
+ if mask.size == 0: return 0
39
+ return np.count_nonzero(mask) / mask.size
40
+
41
+ def nms_bbox_removal(boxes_xyxy, iou_thresh=0.25 ):
42
+ remove_indices = []
43
+ for i, box in enumerate(boxes_xyxy):
44
+ for j in range(i+1, len(boxes_xyxy)):
45
+ box2 = boxes_xyxy[j]
46
+ iou1 = compute_iou(box, box2)
47
+ iou2 = compute_iou(box2, box)
48
+ if iou1 > iou_thresh or iou2 > iou_thresh:
49
+ if iou1 > iou2:
50
+ remove_indices.append(j)
51
+ else:
52
+ remove_indices.append(i)
53
+ return [box for i, box in enumerate(boxes_xyxy) if i not in remove_indices]
54
+
55
+ def load_SAM2(ckpt_path, model_cfg_path):
56
+ if torch.cuda.is_available():
57
+ print("Using CUDA")
58
+ device = "cuda"
59
+ else:
60
+ print("CUDA device not found, using CPU instead")
61
+ device = "cpu"
62
+ sam2 = build_sam2(model_cfg_path, ckpt_path, device=device, apply_postprocessing=False)
63
+ return sam2
64
+
65
+ def compute_iou(box1, box2):
66
+ # intersection / area of box1
67
+ x1, y1, x2, y2 = box1
68
+ x3, y3, x4, y4 = box2
69
+ x5, y5 = max(x1, x3), max(y1, y3)
70
+ x6, y6 = min(x2, x4), min(y2, y4)
71
+ if x5 >= x6 or y5 >= y6:
72
+ return 0
73
+ intersection = (x6 - x5) * (y6 - y5)
74
+ union = (x2 - x1) * (y2 - y1)
75
+ return intersection / union
76
+
77
+ def show_anns(anns, color=None, borders=True):
78
+ if len(anns) == 0:
79
+ return
80
+ sorted_anns = sorted(anns, key=(lambda x: x['area']), reverse=True)
81
+ ax = plt.gca()
82
+ ax.set_autoscale_on(False)
83
+
84
+ img = np.ones((sorted_anns[0]['segmentation'].squeeze().shape[0], sorted_anns[0]['segmentation'].squeeze().shape[1], 4))
85
+ img[:, :, 3] = 0
86
+ for ann in sorted_anns:
87
+ m = ann['segmentation'].squeeze()
88
+ if color is None:
89
+ color_mask = np.concatenate([np.random.random(3), [0.75]])
90
+ else:
91
+ color_mask = color
92
+ img[m] = color_mask
93
+ if borders:
94
+ import cv2
95
+ contours, _ = cv2.findContours(m.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
96
+ # Try to smooth contours
97
+ contours = [cv2.approxPolyDP(contour, epsilon=0.01, closed=True) for contour in contours]
98
+ cv2.drawContours(img, contours, -1, (0, 0, 1, 0.4), thickness=2)
99
+
100
+ ax.imshow(img)
101
+
102
+ def build_sam2_predictor(checkpoint="checkpoints/sam2_hiera_large.pt", model_cfg="sam2_hiera_l"):
103
+ device = "cuda" if torch.cuda.is_available() else "cpu"
104
+ video_predictor = build_sam2_video_predictor(model_cfg, checkpoint, device=device, apply_postprocessing=False)
105
+ return video_predictor
106
+
107
+ def load_masks(video_predictor, query_images, support_image, support_masks, offload_video_to_cpu=True, offload_state_to_cpu=True, verbose=False):
108
+ '''
109
+ video_predictor: sam2 predictor
110
+ query_images: list of np.array of shape (H, W, 3)
111
+ support_image: np.array of shape (H, W, 3)
112
+ support_masks: list of np.array of shape (H, W)
113
+ offload_video_to_cpu: for long video sequences, offload the video to the CPU to save GPU memory
114
+ offload_state_to_cpu: save GPU memory by offloading the state to the CPU
115
+ '''
116
+ query_images.insert(0, support_image)
117
+ with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16):
118
+ state = video_predictor.init_state(None, image_inputs=query_images, async_loading_frames=False, offload_video_to_cpu=offload_video_to_cpu, offload_state_to_cpu=offload_state_to_cpu, verbose=verbose)
119
+ video_predictor.reset_state(state)
120
+ for i, patch_mask in enumerate(support_masks):
121
+ ann_frame_idx = 0
122
+ ann_obj_id = i # give a unique id to each object we interact with
123
+ patch_mask = np.array(patch_mask, dtype=np.uint8)
124
+ patch_mask = cv2.resize(patch_mask, (1024, 1024))
125
+ _, _, _ = video_predictor.add_new_mask(
126
+ inference_state=state,
127
+ frame_idx=ann_frame_idx,
128
+ obj_id=ann_obj_id,
129
+ mask=patch_mask,
130
+ )
131
+ return state
132
+
133
+ def propagate_masks(video_predictor, state, verbose=False):
134
+ """
135
+ returns: list[dict] with keys 'obj_ids', 'segmentation', 'area'
136
+ list['segmentation']: np.array of shape (H, W) with dtype bool
137
+ """
138
+ frame_info = []
139
+ # run propagation throughout the video and collect the results in a dict
140
+ with torch.inference_mode(), torch.autocast("cuda", dtype=torch.bfloat16):
141
+ for _, out_obj_ids, out_mask_logits in video_predictor.propagate_in_video(state, verbose=verbose):
142
+ out_mask_logits = (out_mask_logits>0).cpu().numpy().squeeze()
143
+ if out_mask_logits.ndim == 2:
144
+ out_mask_logits = np.expand_dims(out_mask_logits, axis=0)
145
+ frame_info.append({'obj_ids': out_obj_ids, 'segmentation': out_mask_logits, 'area': area(out_mask_logits)})
146
+ return frame_info
147
+
148
+ def show_video_masks(image, frame_info):
149
+ img_resized = cv2.resize(image, (1024, 1024))
150
+ plt.imshow(img_resized)
151
+ for obj_ids, mask in zip(frame_info['obj_ids'], frame_info['masks']):
152
+ mask = cv2.resize(mask.astype(np.uint8), (1024, 1024))
153
+ show_mask(mask, plt.gca(), obj_id=obj_ids, borders=True, alpha=0.75)
154
+ plt.axis('off')
155
+ plt.show()
156
+
157
+ def get_parser(inputs):
158
+ parser = argparse.ArgumentParser(description="Detectron2 demo for builtin configs")
159
+ parser.add_argument(
160
+ "--config-file",
161
+ default="configs/quick_schedules/mask_rcnn_R_50_FPN_inference_acc_test.yaml",
162
+ metavar="FILE",
163
+ help="path to config file",
164
+ )
165
+ parser.add_argument(
166
+ "--opts",
167
+ help="Modify config options using the command-line 'KEY VALUE' pairs",
168
+ default=[],
169
+ nargs=argparse.REMAINDER,
170
+ )
171
+ args = parser.parse_args(inputs)
172
+ return args
173
+
174
+ def auto_segment_SAM(boxes_xyxy, img, iou_thresh=0.9, stability_score_thresh=0.95, min_mask_region_area=10000, verbose=False):
175
+ checkpoint = "../../checkpoints/sam2_hiera_large.pt"
176
+ model_cfg = "../../sam2_configs/sam2_hiera_l.yaml"
177
+ sam2 = load_SAM2(checkpoint, model_cfg)
178
+ auto_mask_predictor = SAM2AutomaticMaskGenerator(sam2,
179
+ points_per_batch=128,
180
+ pred_iou_thresh=iou_thresh,
181
+ stability_score_thresh=stability_score_thresh,
182
+ min_mask_region_area=min_mask_region_area,
183
+ multimask_output=True)
184
+ masks_list = []
185
+ for box_xyxy in boxes_xyxy:
186
+ wing = img[int(box_xyxy[1]):int(box_xyxy[3]), int(box_xyxy[0]):int(box_xyxy[2])]
187
+ mask = auto_mask_predictor.generate(wing)
188
+ # for mask_
189
+ # dict in mask:
190
+ # mask_dict['segmentation'] = np.bitwise_not(mask_dict['segmentation'])
191
+ if verbose:
192
+ plt.imshow(wing)
193
+ show_anns(mask)
194
+ # remove axis
195
+ plt.axis('off')
196
+ plt.show()
197
+ # translate the mask to the original image
198
+ binary_masks = [e['segmentation'] for e in mask]
199
+
200
+ for e in binary_masks:
201
+ new_mask = np.zeros((img.shape[0], img.shape[1]), dtype=bool)
202
+ new_mask[int(box_xyxy[1]):int(box_xyxy[3]), int(box_xyxy[0]):int(box_xyxy[2])] = e
203
+ new_mask_dict = {
204
+ 'segmentation': new_mask,
205
+ 'area': area(new_mask)
206
+ }
207
+ masks_list.append(new_mask_dict)
208
+ return masks_list
209
+
210
+ def show_masks(masks_list, img, verbose=True, imshow=True, grey=False):
211
+ if imshow:
212
+ if grey:
213
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
214
+ plt.imshow(img, cmap='gray')
215
+ else:
216
+ plt.imshow(img)
217
+ plt.axis('off')
218
+ show_anns(masks_list)
219
+ if verbose:
220
+ plt.show()
221
+
222
+ def show_individual_masks(masks_list, img):
223
+ for mask in masks_list:
224
+ plt.imshow(img)
225
+ plt.axis('off')
226
+ show_anns([mask])
227
+ plt.show()
segment-anything-2/.clang-format ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ AccessModifierOffset: -1
2
+ AlignAfterOpenBracket: AlwaysBreak
3
+ AlignConsecutiveAssignments: false
4
+ AlignConsecutiveDeclarations: false
5
+ AlignEscapedNewlinesLeft: true
6
+ AlignOperands: false
7
+ AlignTrailingComments: false
8
+ AllowAllParametersOfDeclarationOnNextLine: false
9
+ AllowShortBlocksOnASingleLine: false
10
+ AllowShortCaseLabelsOnASingleLine: false
11
+ AllowShortFunctionsOnASingleLine: Empty
12
+ AllowShortIfStatementsOnASingleLine: false
13
+ AllowShortLoopsOnASingleLine: false
14
+ AlwaysBreakAfterReturnType: None
15
+ AlwaysBreakBeforeMultilineStrings: true
16
+ AlwaysBreakTemplateDeclarations: true
17
+ BinPackArguments: false
18
+ BinPackParameters: false
19
+ BraceWrapping:
20
+ AfterClass: false
21
+ AfterControlStatement: false
22
+ AfterEnum: false
23
+ AfterFunction: false
24
+ AfterNamespace: false
25
+ AfterObjCDeclaration: false
26
+ AfterStruct: false
27
+ AfterUnion: false
28
+ BeforeCatch: false
29
+ BeforeElse: false
30
+ IndentBraces: false
31
+ BreakBeforeBinaryOperators: None
32
+ BreakBeforeBraces: Attach
33
+ BreakBeforeTernaryOperators: true
34
+ BreakConstructorInitializersBeforeComma: false
35
+ BreakAfterJavaFieldAnnotations: false
36
+ BreakStringLiterals: false
37
+ ColumnLimit: 80
38
+ CommentPragmas: '^ IWYU pragma:'
39
+ ConstructorInitializerAllOnOneLineOrOnePerLine: true
40
+ ConstructorInitializerIndentWidth: 4
41
+ ContinuationIndentWidth: 4
42
+ Cpp11BracedListStyle: true
43
+ DerivePointerAlignment: false
44
+ DisableFormat: false
45
+ ForEachMacros: [ FOR_EACH, FOR_EACH_R, FOR_EACH_RANGE, ]
46
+ IncludeCategories:
47
+ - Regex: '^<.*\.h(pp)?>'
48
+ Priority: 1
49
+ - Regex: '^<.*'
50
+ Priority: 2
51
+ - Regex: '.*'
52
+ Priority: 3
53
+ IndentCaseLabels: true
54
+ IndentWidth: 2
55
+ IndentWrappedFunctionNames: false
56
+ KeepEmptyLinesAtTheStartOfBlocks: false
57
+ MacroBlockBegin: ''
58
+ MacroBlockEnd: ''
59
+ MaxEmptyLinesToKeep: 1
60
+ NamespaceIndentation: None
61
+ ObjCBlockIndentWidth: 2
62
+ ObjCSpaceAfterProperty: false
63
+ ObjCSpaceBeforeProtocolList: false
64
+ PenaltyBreakBeforeFirstCallParameter: 1
65
+ PenaltyBreakComment: 300
66
+ PenaltyBreakFirstLessLess: 120
67
+ PenaltyBreakString: 1000
68
+ PenaltyExcessCharacter: 1000000
69
+ PenaltyReturnTypeOnItsOwnLine: 200
70
+ PointerAlignment: Left
71
+ ReflowComments: true
72
+ SortIncludes: true
73
+ SpaceAfterCStyleCast: false
74
+ SpaceBeforeAssignmentOperators: true
75
+ SpaceBeforeParens: ControlStatements
76
+ SpaceInEmptyParentheses: false
77
+ SpacesBeforeTrailingComments: 1
78
+ SpacesInAngles: false
79
+ SpacesInContainerLiterals: true
80
+ SpacesInCStyleCastParentheses: false
81
+ SpacesInParentheses: false
82
+ SpacesInSquareBrackets: false
83
+ Standard: Cpp11
84
+ TabWidth: 8
85
+ UseTab: Never
segment-anything-2/.gitignore ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .vscode/
2
+ .DS_Store
3
+ __pycache__/
4
+ *-checkpoint.ipynb
5
+ .venv
6
+ *.egg*
7
+ build/*
8
+ _C.*
9
+ outputs/*
10
+ *.pt
11
+ *.sh
12
+ data/*
13
+ *.jpg
14
+ *.png
15
+ results/*
16
+ sav_dataset/*
17
+ *.gz
18
+ *.zip
19
+ *.out
20
+ *.log
21
+ *.pkl
22
+ *.pdf
23
+ *.gif
24
+
segment-anything-2/LICENSE ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright (c) 2023-2024 Imageomics Institute
2
+
3
+ Permission is hereby granted, free of charge, to any person obtaining
4
+ a copy of this software and associated documentation files (the
5
+ "Software"), to deal in the Software without restriction, including
6
+ without limitation the rights to use, copy, modify, merge, publish,
7
+ distribute, sublicense, and/or sell copies of the Software, and to
8
+ permit persons to whom the Software is furnished to do so, subject to
9
+ the following conditions:
10
+
11
+ The above copyright notice and this permission notice shall be
12
+ included in all copies or substantial portions of the Software.
13
+
14
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
16
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
18
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
19
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
20
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
segment-anything-2/README.md ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # SST
2
+
3
+ Repository for the paper Static Segmentation by Tracking: A Frustratingly Label-Efficient Approach to Fine-Grained Segmentation.
segment-anything-2/pyproject.toml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ [build-system]
2
+ requires = [
3
+ "setuptools>=61.0",
4
+ "torch>=2.3.1",
5
+ ]
6
+ build-backend = "setuptools.build_meta"
segment-anything-2/sam2/__init__.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ from hydra import initialize_config_module
8
+
9
+ initialize_config_module("sam2_configs", version_base="1.2")
segment-anything-2/sam2/automatic_mask_generator.py ADDED
@@ -0,0 +1,454 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ # Adapted from https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/automatic_mask_generator.py
8
+ from typing import Any, Dict, List, Optional, Tuple
9
+
10
+ import numpy as np
11
+ import torch
12
+ from torchvision.ops.boxes import batched_nms, box_area # type: ignore
13
+
14
+ from sam2.modeling.sam2_base import SAM2Base
15
+ from sam2.sam2_image_predictor import SAM2ImagePredictor
16
+ from sam2.utils.amg import (
17
+ area_from_rle,
18
+ batch_iterator,
19
+ batched_mask_to_box,
20
+ box_xyxy_to_xywh,
21
+ build_all_layer_point_grids,
22
+ calculate_stability_score,
23
+ coco_encode_rle,
24
+ generate_crop_boxes,
25
+ is_box_near_crop_edge,
26
+ mask_to_rle_pytorch,
27
+ MaskData,
28
+ remove_small_regions,
29
+ rle_to_mask,
30
+ uncrop_boxes_xyxy,
31
+ uncrop_masks,
32
+ uncrop_points,
33
+ )
34
+
35
+
36
+ class SAM2AutomaticMaskGenerator:
37
+ def __init__(
38
+ self,
39
+ model: SAM2Base,
40
+ points_per_side: Optional[int] = 32,
41
+ points_per_batch: int = 64,
42
+ pred_iou_thresh: float = 0.8,
43
+ stability_score_thresh: float = 0.95,
44
+ stability_score_offset: float = 1.0,
45
+ mask_threshold: float = 0.0,
46
+ box_nms_thresh: float = 0.7,
47
+ crop_n_layers: int = 0,
48
+ crop_nms_thresh: float = 0.7,
49
+ crop_overlap_ratio: float = 512 / 1500,
50
+ crop_n_points_downscale_factor: int = 1,
51
+ point_grids: Optional[List[np.ndarray]] = None,
52
+ min_mask_region_area: int = 0,
53
+ output_mode: str = "binary_mask",
54
+ use_m2m: bool = False,
55
+ multimask_output: bool = True,
56
+ **kwargs,
57
+ ) -> None:
58
+ """
59
+ Using a SAM 2 model, generates masks for the entire image.
60
+ Generates a grid of point prompts over the image, then filters
61
+ low quality and duplicate masks. The default settings are chosen
62
+ for SAM 2 with a HieraL backbone.
63
+
64
+ Arguments:
65
+ model (Sam): The SAM 2 model to use for mask prediction.
66
+ points_per_side (int or None): The number of points to be sampled
67
+ along one side of the image. The total number of points is
68
+ points_per_side**2. If None, 'point_grids' must provide explicit
69
+ point sampling.
70
+ points_per_batch (int): Sets the number of points run simultaneously
71
+ by the model. Higher numbers may be faster but use more GPU memory.
72
+ pred_iou_thresh (float): A filtering threshold in [0,1], using the
73
+ model's predicted mask quality.
74
+ stability_score_thresh (float): A filtering threshold in [0,1], using
75
+ the stability of the mask under changes to the cutoff used to binarize
76
+ the model's mask predictions.
77
+ stability_score_offset (float): The amount to shift the cutoff when
78
+ calculated the stability score.
79
+ mask_threshold (float): Threshold for binarizing the mask logits
80
+ box_nms_thresh (float): The box IoU cutoff used by non-maximal
81
+ suppression to filter duplicate masks.
82
+ crop_n_layers (int): If >0, mask prediction will be run again on
83
+ crops of the image. Sets the number of layers to run, where each
84
+ layer has 2**i_layer number of image crops.
85
+ crop_nms_thresh (float): The box IoU cutoff used by non-maximal
86
+ suppression to filter duplicate masks between different crops.
87
+ crop_overlap_ratio (float): Sets the degree to which crops overlap.
88
+ In the first crop layer, crops will overlap by this fraction of
89
+ the image length. Later layers with more crops scale down this overlap.
90
+ crop_n_points_downscale_factor (int): The number of points-per-side
91
+ sampled in layer n is scaled down by crop_n_points_downscale_factor**n.
92
+ point_grids (list(np.ndarray) or None): A list over explicit grids
93
+ of points used for sampling, normalized to [0,1]. The nth grid in the
94
+ list is used in the nth crop layer. Exclusive with points_per_side.
95
+ min_mask_region_area (int): If >0, postprocessing will be applied
96
+ to remove disconnected regions and holes in masks with area smaller
97
+ than min_mask_region_area. Requires opencv.
98
+ output_mode (str): The form masks are returned in. Can be 'binary_mask',
99
+ 'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools.
100
+ For large resolutions, 'binary_mask' may consume large amounts of
101
+ memory.
102
+ use_m2m (bool): Whether to add a one step refinement using previous mask predictions.
103
+ multimask_output (bool): Whether to output multimask at each point of the grid.
104
+ """
105
+
106
+ assert (points_per_side is None) != (
107
+ point_grids is None
108
+ ), "Exactly one of points_per_side or point_grid must be provided."
109
+ if points_per_side is not None:
110
+ self.point_grids = build_all_layer_point_grids(
111
+ points_per_side,
112
+ crop_n_layers,
113
+ crop_n_points_downscale_factor,
114
+ )
115
+ elif point_grids is not None:
116
+ self.point_grids = point_grids
117
+ else:
118
+ raise ValueError("Can't have both points_per_side and point_grid be None.")
119
+
120
+ assert output_mode in [
121
+ "binary_mask",
122
+ "uncompressed_rle",
123
+ "coco_rle",
124
+ ], f"Unknown output_mode {output_mode}."
125
+ if output_mode == "coco_rle":
126
+ try:
127
+ from pycocotools import mask as mask_utils # type: ignore # noqa: F401
128
+ except ImportError as e:
129
+ print("Please install pycocotools")
130
+ raise e
131
+
132
+ self.predictor = SAM2ImagePredictor(
133
+ model,
134
+ max_hole_area=min_mask_region_area,
135
+ max_sprinkle_area=min_mask_region_area,
136
+ )
137
+ self.points_per_batch = points_per_batch
138
+ self.pred_iou_thresh = pred_iou_thresh
139
+ self.stability_score_thresh = stability_score_thresh
140
+ self.stability_score_offset = stability_score_offset
141
+ self.mask_threshold = mask_threshold
142
+ self.box_nms_thresh = box_nms_thresh
143
+ self.crop_n_layers = crop_n_layers
144
+ self.crop_nms_thresh = crop_nms_thresh
145
+ self.crop_overlap_ratio = crop_overlap_ratio
146
+ self.crop_n_points_downscale_factor = crop_n_points_downscale_factor
147
+ self.min_mask_region_area = min_mask_region_area
148
+ self.output_mode = output_mode
149
+ self.use_m2m = use_m2m
150
+ self.multimask_output = multimask_output
151
+
152
+ @classmethod
153
+ def from_pretrained(cls, model_id: str, **kwargs) -> "SAM2AutomaticMaskGenerator":
154
+ """
155
+ Load a pretrained model from the Hugging Face hub.
156
+
157
+ Arguments:
158
+ model_id (str): The Hugging Face repository ID.
159
+ **kwargs: Additional arguments to pass to the model constructor.
160
+
161
+ Returns:
162
+ (SAM2AutomaticMaskGenerator): The loaded model.
163
+ """
164
+ from sam2.build_sam import build_sam2_hf
165
+
166
+ sam_model = build_sam2_hf(model_id, **kwargs)
167
+ return cls(sam_model, **kwargs)
168
+
169
+ @torch.no_grad()
170
+ def generate(self, image: np.ndarray) -> List[Dict[str, Any]]:
171
+ """
172
+ Generates masks for the given image.
173
+
174
+ Arguments:
175
+ image (np.ndarray): The image to generate masks for, in HWC uint8 format.
176
+
177
+ Returns:
178
+ list(dict(str, any)): A list over records for masks. Each record is
179
+ a dict containing the following keys:
180
+ segmentation (dict(str, any) or np.ndarray): The mask. If
181
+ output_mode='binary_mask', is an array of shape HW. Otherwise,
182
+ is a dictionary containing the RLE.
183
+ bbox (list(float)): The box around the mask, in XYWH format.
184
+ area (int): The area in pixels of the mask.
185
+ predicted_iou (float): The model's own prediction of the mask's
186
+ quality. This is filtered by the pred_iou_thresh parameter.
187
+ point_coords (list(list(float))): The point coordinates input
188
+ to the model to generate this mask.
189
+ stability_score (float): A measure of the mask's quality. This
190
+ is filtered on using the stability_score_thresh parameter.
191
+ crop_box (list(float)): The crop of the image used to generate
192
+ the mask, given in XYWH format.
193
+ """
194
+
195
+ # Generate masks
196
+ mask_data = self._generate_masks(image)
197
+
198
+ # Encode masks
199
+ if self.output_mode == "coco_rle":
200
+ mask_data["segmentations"] = [
201
+ coco_encode_rle(rle) for rle in mask_data["rles"]
202
+ ]
203
+ elif self.output_mode == "binary_mask":
204
+ mask_data["segmentations"] = [rle_to_mask(rle) for rle in mask_data["rles"]]
205
+ else:
206
+ mask_data["segmentations"] = mask_data["rles"]
207
+
208
+ # Write mask records
209
+ curr_anns = []
210
+ for idx in range(len(mask_data["segmentations"])):
211
+ ann = {
212
+ "segmentation": mask_data["segmentations"][idx],
213
+ "area": area_from_rle(mask_data["rles"][idx]),
214
+ "bbox": box_xyxy_to_xywh(mask_data["boxes"][idx]).tolist(),
215
+ "predicted_iou": mask_data["iou_preds"][idx].item(),
216
+ "point_coords": [mask_data["points"][idx].tolist()],
217
+ "stability_score": mask_data["stability_score"][idx].item(),
218
+ "crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(),
219
+ }
220
+ curr_anns.append(ann)
221
+
222
+ return curr_anns
223
+
224
+ def _generate_masks(self, image: np.ndarray) -> MaskData:
225
+ orig_size = image.shape[:2]
226
+ crop_boxes, layer_idxs = generate_crop_boxes(
227
+ orig_size, self.crop_n_layers, self.crop_overlap_ratio
228
+ )
229
+
230
+ # Iterate over image crops
231
+ data = MaskData()
232
+ for crop_box, layer_idx in zip(crop_boxes, layer_idxs):
233
+ crop_data = self._process_crop(image, crop_box, layer_idx, orig_size)
234
+ data.cat(crop_data)
235
+
236
+ # Remove duplicate masks between crops
237
+ if len(crop_boxes) > 1:
238
+ # Prefer masks from smaller crops
239
+ scores = 1 / box_area(data["crop_boxes"])
240
+ scores = scores.to(data["boxes"].device)
241
+ keep_by_nms = batched_nms(
242
+ data["boxes"].float(),
243
+ scores,
244
+ torch.zeros_like(data["boxes"][:, 0]), # categories
245
+ iou_threshold=self.crop_nms_thresh,
246
+ )
247
+ data.filter(keep_by_nms)
248
+ data.to_numpy()
249
+ return data
250
+
251
+ def _process_crop(
252
+ self,
253
+ image: np.ndarray,
254
+ crop_box: List[int],
255
+ crop_layer_idx: int,
256
+ orig_size: Tuple[int, ...],
257
+ ) -> MaskData:
258
+ # Crop the image and calculate embeddings
259
+ x0, y0, x1, y1 = crop_box
260
+ cropped_im = image[y0:y1, x0:x1, :]
261
+ cropped_im_size = cropped_im.shape[:2]
262
+ self.predictor.set_image(cropped_im)
263
+
264
+ # Get points for this crop
265
+ points_scale = np.array(cropped_im_size)[None, ::-1]
266
+ points_for_image = self.point_grids[crop_layer_idx] * points_scale
267
+
268
+ # Generate masks for this crop in batches
269
+ data = MaskData()
270
+ for (points,) in batch_iterator(self.points_per_batch, points_for_image):
271
+ batch_data = self._process_batch(
272
+ points, cropped_im_size, crop_box, orig_size, normalize=True
273
+ )
274
+ data.cat(batch_data)
275
+ del batch_data
276
+ self.predictor.reset_predictor()
277
+
278
+ # Remove duplicates within this crop.
279
+ keep_by_nms = batched_nms(
280
+ data["boxes"].float(),
281
+ data["iou_preds"],
282
+ torch.zeros_like(data["boxes"][:, 0]), # categories
283
+ iou_threshold=self.box_nms_thresh,
284
+ )
285
+ data.filter(keep_by_nms)
286
+
287
+ # Return to the original image frame
288
+ data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box)
289
+ data["points"] = uncrop_points(data["points"], crop_box)
290
+ data["crop_boxes"] = torch.tensor([crop_box for _ in range(len(data["rles"]))])
291
+
292
+ return data
293
+
294
+ def _process_batch(
295
+ self,
296
+ points: np.ndarray,
297
+ im_size: Tuple[int, ...],
298
+ crop_box: List[int],
299
+ orig_size: Tuple[int, ...],
300
+ normalize=False,
301
+ ) -> MaskData:
302
+ orig_h, orig_w = orig_size
303
+
304
+ # Run model on this batch
305
+ points = torch.as_tensor(
306
+ points, dtype=torch.float32, device=self.predictor.device
307
+ )
308
+ in_points = self.predictor._transforms.transform_coords(
309
+ points, normalize=normalize, orig_hw=im_size
310
+ )
311
+ in_labels = torch.ones(
312
+ in_points.shape[0], dtype=torch.int, device=in_points.device
313
+ )
314
+ masks, iou_preds, low_res_masks = self.predictor._predict(
315
+ in_points[:, None, :],
316
+ in_labels[:, None],
317
+ multimask_output=self.multimask_output,
318
+ return_logits=True,
319
+ )
320
+
321
+ # Serialize predictions and store in MaskData
322
+ data = MaskData(
323
+ masks=masks.flatten(0, 1),
324
+ iou_preds=iou_preds.flatten(0, 1),
325
+ points=points.repeat_interleave(masks.shape[1], dim=0),
326
+ low_res_masks=low_res_masks.flatten(0, 1),
327
+ )
328
+ del masks
329
+
330
+ if not self.use_m2m:
331
+ # Filter by predicted IoU
332
+ if self.pred_iou_thresh > 0.0:
333
+ keep_mask = data["iou_preds"] > self.pred_iou_thresh
334
+ data.filter(keep_mask)
335
+
336
+ # Calculate and filter by stability score
337
+ data["stability_score"] = calculate_stability_score(
338
+ data["masks"], self.mask_threshold, self.stability_score_offset
339
+ )
340
+ if self.stability_score_thresh > 0.0:
341
+ keep_mask = data["stability_score"] >= self.stability_score_thresh
342
+ data.filter(keep_mask)
343
+ else:
344
+ # One step refinement using previous mask predictions
345
+ in_points = self.predictor._transforms.transform_coords(
346
+ data["points"], normalize=normalize, orig_hw=im_size
347
+ )
348
+ labels = torch.ones(
349
+ in_points.shape[0], dtype=torch.int, device=in_points.device
350
+ )
351
+ masks, ious = self.refine_with_m2m(
352
+ in_points, labels, data["low_res_masks"], self.points_per_batch
353
+ )
354
+ data["masks"] = masks.squeeze(1)
355
+ data["iou_preds"] = ious.squeeze(1)
356
+
357
+ if self.pred_iou_thresh > 0.0:
358
+ keep_mask = data["iou_preds"] > self.pred_iou_thresh
359
+ data.filter(keep_mask)
360
+
361
+ data["stability_score"] = calculate_stability_score(
362
+ data["masks"], self.mask_threshold, self.stability_score_offset
363
+ )
364
+ if self.stability_score_thresh > 0.0:
365
+ keep_mask = data["stability_score"] >= self.stability_score_thresh
366
+ data.filter(keep_mask)
367
+
368
+ # Threshold masks and calculate boxes
369
+ data["masks"] = data["masks"] > self.mask_threshold
370
+ data["boxes"] = batched_mask_to_box(data["masks"])
371
+
372
+ # Filter boxes that touch crop boundaries
373
+ keep_mask = ~is_box_near_crop_edge(
374
+ data["boxes"], crop_box, [0, 0, orig_w, orig_h]
375
+ )
376
+ if not torch.all(keep_mask):
377
+ data.filter(keep_mask)
378
+
379
+ # Compress to RLE
380
+ data["masks"] = uncrop_masks(data["masks"], crop_box, orig_h, orig_w)
381
+ data["rles"] = mask_to_rle_pytorch(data["masks"])
382
+ del data["masks"]
383
+
384
+ return data
385
+
386
+ @staticmethod
387
+ def postprocess_small_regions(
388
+ mask_data: MaskData, min_area: int, nms_thresh: float
389
+ ) -> MaskData:
390
+ """
391
+ Removes small disconnected regions and holes in masks, then reruns
392
+ box NMS to remove any new duplicates.
393
+
394
+ Edits mask_data in place.
395
+
396
+ Requires open-cv as a dependency.
397
+ """
398
+ if len(mask_data["rles"]) == 0:
399
+ return mask_data
400
+
401
+ # Filter small disconnected regions and holes
402
+ new_masks = []
403
+ scores = []
404
+ for rle in mask_data["rles"]:
405
+ mask = rle_to_mask(rle)
406
+
407
+ mask, changed = remove_small_regions(mask, min_area, mode="holes")
408
+ unchanged = not changed
409
+ mask, changed = remove_small_regions(mask, min_area, mode="islands")
410
+ unchanged = unchanged and not changed
411
+
412
+ new_masks.append(torch.as_tensor(mask).unsqueeze(0))
413
+ # Give score=0 to changed masks and score=1 to unchanged masks
414
+ # so NMS will prefer ones that didn't need postprocessing
415
+ scores.append(float(unchanged))
416
+
417
+ # Recalculate boxes and remove any new duplicates
418
+ masks = torch.cat(new_masks, dim=0)
419
+ boxes = batched_mask_to_box(masks)
420
+ keep_by_nms = batched_nms(
421
+ boxes.float(),
422
+ torch.as_tensor(scores),
423
+ torch.zeros_like(boxes[:, 0]), # categories
424
+ iou_threshold=nms_thresh,
425
+ )
426
+
427
+ # Only recalculate RLEs for masks that have changed
428
+ for i_mask in keep_by_nms:
429
+ if scores[i_mask] == 0.0:
430
+ mask_torch = masks[i_mask].unsqueeze(0)
431
+ mask_data["rles"][i_mask] = mask_to_rle_pytorch(mask_torch)[0]
432
+ mask_data["boxes"][i_mask] = boxes[i_mask] # update res directly
433
+ mask_data.filter(keep_by_nms)
434
+
435
+ return mask_data
436
+
437
+ def refine_with_m2m(self, points, point_labels, low_res_masks, points_per_batch):
438
+ new_masks = []
439
+ new_iou_preds = []
440
+
441
+ for cur_points, cur_point_labels, low_res_mask in batch_iterator(
442
+ points_per_batch, points, point_labels, low_res_masks
443
+ ):
444
+ best_masks, best_iou_preds, _ = self.predictor._predict(
445
+ cur_points[:, None, :],
446
+ cur_point_labels[:, None],
447
+ mask_input=low_res_mask[:, None, :],
448
+ multimask_output=False,
449
+ return_logits=True,
450
+ )
451
+ new_masks.append(best_masks)
452
+ new_iou_preds.append(best_iou_preds)
453
+ masks = torch.cat(new_masks, dim=0)
454
+ return masks, torch.cat(new_iou_preds, dim=0)
segment-anything-2/sam2/build_sam.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import logging
8
+
9
+ import torch
10
+ from hydra import compose
11
+ from hydra.utils import instantiate
12
+ from omegaconf import OmegaConf
13
+
14
+
15
+ def build_sam2(
16
+ config_file,
17
+ ckpt_path=None,
18
+ device="cuda",
19
+ mode="eval",
20
+ hydra_overrides_extra=[],
21
+ apply_postprocessing=True,
22
+ **kwargs,
23
+ ):
24
+
25
+ cfg = OmegaConf.load(config_file)
26
+
27
+ # Apply any overrides manually if needed
28
+ if apply_postprocessing:
29
+ cfg.model.sam_mask_decoder_extra_args.dynamic_multimask_via_stability = True
30
+ cfg.model.sam_mask_decoder_extra_args.dynamic_multimask_stability_delta = 0.05
31
+ cfg.model.sam_mask_decoder_extra_args.dynamic_multimask_stability_thresh = 0.98
32
+ OmegaConf.resolve(cfg)
33
+ model = instantiate(cfg.model, _recursive_=True)
34
+ _load_checkpoint(model, ckpt_path)
35
+ model = model.to(device)
36
+ if mode == "eval":
37
+ model.eval()
38
+ return model
39
+
40
+
41
+ def build_sam2_video_predictor(
42
+ config_file,
43
+ ckpt_path=None,
44
+ device="cuda",
45
+ mode="eval",
46
+ hydra_overrides_extra=[],
47
+ apply_postprocessing=True,
48
+ **kwargs,
49
+ ):
50
+ hydra_overrides = [
51
+ "++model._target_=sam2.sam2_video_predictor.SAM2VideoPredictor",
52
+ ]
53
+ if apply_postprocessing:
54
+ hydra_overrides_extra = hydra_overrides_extra.copy()
55
+ hydra_overrides_extra += [
56
+ # dynamically fall back to multi-mask if the single mask is not stable
57
+ "++model.sam_mask_decoder_extra_args.dynamic_multimask_via_stability=true",
58
+ "++model.sam_mask_decoder_extra_args.dynamic_multimask_stability_delta=0.05",
59
+ "++model.sam_mask_decoder_extra_args.dynamic_multimask_stability_thresh=0.98",
60
+ # the sigmoid mask logits on interacted frames with clicks in the memory encoder so that the encoded masks are exactly as what users see from clicking
61
+ "++model.binarize_mask_from_pts_for_mem_enc=true",
62
+ # fill small holes in the low-res masks up to `fill_hole_area` (before resizing them to the original video resolution)
63
+ "++model.fill_hole_area=8",
64
+ ]
65
+ hydra_overrides.extend(hydra_overrides_extra)
66
+
67
+ cfg = OmegaConf.load("checkpoints/sam2_hiera_s.yaml")
68
+
69
+ # Manually apply overrides
70
+ OmegaConf.set_struct(cfg, False) # allow setting new keys if needed
71
+ for override in hydra_overrides:
72
+ key, value = override.split("=")
73
+ OmegaConf.update(cfg, key.strip("+"), eval(value) if value.lower() in ['true', 'false'] else value)
74
+
75
+ OmegaConf.resolve(cfg)
76
+ model = instantiate(cfg.model, _recursive_=True)
77
+ _load_checkpoint(model, ckpt_path)
78
+ model = model.to(device)
79
+ if mode == "eval":
80
+ model.eval()
81
+ return model
82
+
83
+
84
+ def build_sam2_hf(model_id, **kwargs):
85
+
86
+ from huggingface_hub import hf_hub_download
87
+
88
+ model_id_to_filenames = {
89
+ "facebook/sam2-hiera-tiny": ("sam2_hiera_t.yaml", "sam2_hiera_tiny.pt"),
90
+ "facebook/sam2-hiera-small": ("sam2_hiera_s.yaml", "sam2_hiera_small.pt"),
91
+ "facebook/sam2-hiera-base-plus": (
92
+ "sam2_hiera_b+.yaml",
93
+ "sam2_hiera_base_plus.pt",
94
+ ),
95
+ "facebook/sam2-hiera-large": ("sam2_hiera_l.yaml", "sam2_hiera_large.pt"),
96
+ }
97
+ config_name, checkpoint_name = model_id_to_filenames[model_id]
98
+ ckpt_path = hf_hub_download(repo_id=model_id, filename=checkpoint_name)
99
+ return build_sam2(config_file=config_name, ckpt_path=ckpt_path, **kwargs)
100
+
101
+
102
+ def build_sam2_video_predictor_hf(model_id, **kwargs):
103
+
104
+ from huggingface_hub import hf_hub_download
105
+
106
+ model_id_to_filenames = {
107
+ "facebook/sam2-hiera-tiny": ("sam2_hiera_t.yaml", "sam2_hiera_tiny.pt"),
108
+ "facebook/sam2-hiera-small": ("sam2_hiera_s.yaml", "sam2_hiera_small.pt"),
109
+ "facebook/sam2-hiera-base-plus": (
110
+ "sam2_hiera_b+.yaml",
111
+ "sam2_hiera_base_plus.pt",
112
+ ),
113
+ "facebook/sam2-hiera-large": ("sam2_hiera_l.yaml", "sam2_hiera_large.pt"),
114
+ }
115
+ config_name, checkpoint_name = model_id_to_filenames[model_id]
116
+ ckpt_path = hf_hub_download(repo_id=model_id, filename=checkpoint_name)
117
+ return build_sam2_video_predictor(
118
+ config_file=config_name, ckpt_path=ckpt_path, **kwargs
119
+ )
120
+
121
+
122
+ def _load_checkpoint(model, ckpt_path):
123
+ if ckpt_path is not None:
124
+ sd = torch.load(ckpt_path, map_location="cpu")["model"]
125
+ missing_keys, unexpected_keys = model.load_state_dict(sd)
126
+ if missing_keys:
127
+ logging.error(missing_keys)
128
+ raise RuntimeError()
129
+ if unexpected_keys:
130
+ logging.error(unexpected_keys)
131
+ raise RuntimeError()
132
+ logging.info("Loaded checkpoint sucessfully")
segment-anything-2/sam2/csrc/connected_components.cu ADDED
@@ -0,0 +1,289 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ // All rights reserved.
3
+
4
+ // This source code is licensed under the license found in the
5
+ // LICENSE file in the root directory of this source tree.
6
+
7
+ // adapted from https://github.com/zsef123/Connected_components_PyTorch
8
+ // with license found in the LICENSE_cctorch file in the root directory.
9
+ #include <ATen/cuda/CUDAContext.h>
10
+ #include <cuda.h>
11
+ #include <cuda_runtime.h>
12
+ #include <torch/extension.h>
13
+ #include <torch/script.h>
14
+ #include <vector>
15
+
16
+ // 2d
17
+ #define BLOCK_ROWS 16
18
+ #define BLOCK_COLS 16
19
+
20
+ namespace cc2d {
21
+
22
+ template <typename T>
23
+ __device__ __forceinline__ unsigned char hasBit(T bitmap, unsigned char pos) {
24
+ return (bitmap >> pos) & 1;
25
+ }
26
+
27
+ __device__ int32_t find(const int32_t* s_buf, int32_t n) {
28
+ while (s_buf[n] != n)
29
+ n = s_buf[n];
30
+ return n;
31
+ }
32
+
33
+ __device__ int32_t find_n_compress(int32_t* s_buf, int32_t n) {
34
+ const int32_t id = n;
35
+ while (s_buf[n] != n) {
36
+ n = s_buf[n];
37
+ s_buf[id] = n;
38
+ }
39
+ return n;
40
+ }
41
+
42
+ __device__ void union_(int32_t* s_buf, int32_t a, int32_t b) {
43
+ bool done;
44
+ do {
45
+ a = find(s_buf, a);
46
+ b = find(s_buf, b);
47
+
48
+ if (a < b) {
49
+ int32_t old = atomicMin(s_buf + b, a);
50
+ done = (old == b);
51
+ b = old;
52
+ } else if (b < a) {
53
+ int32_t old = atomicMin(s_buf + a, b);
54
+ done = (old == a);
55
+ a = old;
56
+ } else
57
+ done = true;
58
+
59
+ } while (!done);
60
+ }
61
+
62
+ __global__ void
63
+ init_labeling(int32_t* label, const uint32_t W, const uint32_t H) {
64
+ const uint32_t row = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
65
+ const uint32_t col = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
66
+ const uint32_t idx = row * W + col;
67
+
68
+ if (row < H && col < W)
69
+ label[idx] = idx;
70
+ }
71
+
72
+ __global__ void
73
+ merge(uint8_t* img, int32_t* label, const uint32_t W, const uint32_t H) {
74
+ const uint32_t row = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
75
+ const uint32_t col = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
76
+ const uint32_t idx = row * W + col;
77
+
78
+ if (row >= H || col >= W)
79
+ return;
80
+
81
+ uint32_t P = 0;
82
+
83
+ if (img[idx])
84
+ P |= 0x777;
85
+ if (row + 1 < H && img[idx + W])
86
+ P |= 0x777 << 4;
87
+ if (col + 1 < W && img[idx + 1])
88
+ P |= 0x777 << 1;
89
+
90
+ if (col == 0)
91
+ P &= 0xEEEE;
92
+ if (col + 1 >= W)
93
+ P &= 0x3333;
94
+ else if (col + 2 >= W)
95
+ P &= 0x7777;
96
+
97
+ if (row == 0)
98
+ P &= 0xFFF0;
99
+ if (row + 1 >= H)
100
+ P &= 0xFF;
101
+
102
+ if (P > 0) {
103
+ // If need check about top-left pixel(if flag the first bit) and hit the
104
+ // top-left pixel
105
+ if (hasBit(P, 0) && img[idx - W - 1]) {
106
+ union_(label, idx, idx - 2 * W - 2); // top left block
107
+ }
108
+
109
+ if ((hasBit(P, 1) && img[idx - W]) || (hasBit(P, 2) && img[idx - W + 1]))
110
+ union_(label, idx, idx - 2 * W); // top bottom block
111
+
112
+ if (hasBit(P, 3) && img[idx + 2 - W])
113
+ union_(label, idx, idx - 2 * W + 2); // top right block
114
+
115
+ if ((hasBit(P, 4) && img[idx - 1]) || (hasBit(P, 8) && img[idx + W - 1]))
116
+ union_(label, idx, idx - 2); // just left block
117
+ }
118
+ }
119
+
120
+ __global__ void compression(int32_t* label, const int32_t W, const int32_t H) {
121
+ const uint32_t row = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
122
+ const uint32_t col = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
123
+ const uint32_t idx = row * W + col;
124
+
125
+ if (row < H && col < W)
126
+ find_n_compress(label, idx);
127
+ }
128
+
129
+ __global__ void final_labeling(
130
+ const uint8_t* img,
131
+ int32_t* label,
132
+ const int32_t W,
133
+ const int32_t H) {
134
+ const uint32_t row = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
135
+ const uint32_t col = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
136
+ const uint32_t idx = row * W + col;
137
+
138
+ if (row >= H || col >= W)
139
+ return;
140
+
141
+ int32_t y = label[idx] + 1;
142
+
143
+ if (img[idx])
144
+ label[idx] = y;
145
+ else
146
+ label[idx] = 0;
147
+
148
+ if (col + 1 < W) {
149
+ if (img[idx + 1])
150
+ label[idx + 1] = y;
151
+ else
152
+ label[idx + 1] = 0;
153
+
154
+ if (row + 1 < H) {
155
+ if (img[idx + W + 1])
156
+ label[idx + W + 1] = y;
157
+ else
158
+ label[idx + W + 1] = 0;
159
+ }
160
+ }
161
+
162
+ if (row + 1 < H) {
163
+ if (img[idx + W])
164
+ label[idx + W] = y;
165
+ else
166
+ label[idx + W] = 0;
167
+ }
168
+ }
169
+
170
+ __global__ void init_counting(
171
+ const int32_t* label,
172
+ int32_t* count_init,
173
+ const int32_t W,
174
+ const int32_t H) {
175
+ const uint32_t row = (blockIdx.y * blockDim.y + threadIdx.y);
176
+ const uint32_t col = (blockIdx.x * blockDim.x + threadIdx.x);
177
+ const uint32_t idx = row * W + col;
178
+
179
+ if (row >= H || col >= W)
180
+ return;
181
+
182
+ int32_t y = label[idx];
183
+ if (y > 0) {
184
+ int32_t count_idx = y - 1;
185
+ atomicAdd(count_init + count_idx, 1);
186
+ }
187
+ }
188
+
189
+ __global__ void final_counting(
190
+ const int32_t* label,
191
+ const int32_t* count_init,
192
+ int32_t* count_final,
193
+ const int32_t W,
194
+ const int32_t H) {
195
+ const uint32_t row = (blockIdx.y * blockDim.y + threadIdx.y);
196
+ const uint32_t col = (blockIdx.x * blockDim.x + threadIdx.x);
197
+ const uint32_t idx = row * W + col;
198
+
199
+ if (row >= H || col >= W)
200
+ return;
201
+
202
+ int32_t y = label[idx];
203
+ if (y > 0) {
204
+ int32_t count_idx = y - 1;
205
+ count_final[idx] = count_init[count_idx];
206
+ } else {
207
+ count_final[idx] = 0;
208
+ }
209
+ }
210
+
211
+ } // namespace cc2d
212
+
213
+ std::vector<torch::Tensor> get_connected_componnets(
214
+ const torch::Tensor& inputs) {
215
+ AT_ASSERTM(inputs.is_cuda(), "inputs must be a CUDA tensor");
216
+ AT_ASSERTM(inputs.ndimension() == 4, "inputs must be [N, 1, H, W] shape");
217
+ AT_ASSERTM(
218
+ inputs.scalar_type() == torch::kUInt8, "inputs must be a uint8 type");
219
+
220
+ const uint32_t N = inputs.size(0);
221
+ const uint32_t C = inputs.size(1);
222
+ const uint32_t H = inputs.size(2);
223
+ const uint32_t W = inputs.size(3);
224
+
225
+ AT_ASSERTM(C == 1, "inputs must be [N, 1, H, W] shape");
226
+ AT_ASSERTM((H % 2) == 0, "height must be an even number");
227
+ AT_ASSERTM((W % 2) == 0, "width must be an even number");
228
+
229
+ // label must be uint32_t
230
+ auto label_options =
231
+ torch::TensorOptions().dtype(torch::kInt32).device(inputs.device());
232
+ torch::Tensor labels = torch::zeros({N, C, H, W}, label_options);
233
+ torch::Tensor counts_init = torch::zeros({N, C, H, W}, label_options);
234
+ torch::Tensor counts_final = torch::zeros({N, C, H, W}, label_options);
235
+
236
+ dim3 grid = dim3(
237
+ ((W + 1) / 2 + BLOCK_COLS - 1) / BLOCK_COLS,
238
+ ((H + 1) / 2 + BLOCK_ROWS - 1) / BLOCK_ROWS);
239
+ dim3 block = dim3(BLOCK_COLS, BLOCK_ROWS);
240
+ dim3 grid_count =
241
+ dim3((W + BLOCK_COLS) / BLOCK_COLS, (H + BLOCK_ROWS) / BLOCK_ROWS);
242
+ dim3 block_count = dim3(BLOCK_COLS, BLOCK_ROWS);
243
+ cudaStream_t stream = at::cuda::getCurrentCUDAStream();
244
+
245
+ for (int n = 0; n < N; n++) {
246
+ uint32_t offset = n * H * W;
247
+
248
+ cc2d::init_labeling<<<grid, block, 0, stream>>>(
249
+ labels.data_ptr<int32_t>() + offset, W, H);
250
+ cc2d::merge<<<grid, block, 0, stream>>>(
251
+ inputs.data_ptr<uint8_t>() + offset,
252
+ labels.data_ptr<int32_t>() + offset,
253
+ W,
254
+ H);
255
+ cc2d::compression<<<grid, block, 0, stream>>>(
256
+ labels.data_ptr<int32_t>() + offset, W, H);
257
+ cc2d::final_labeling<<<grid, block, 0, stream>>>(
258
+ inputs.data_ptr<uint8_t>() + offset,
259
+ labels.data_ptr<int32_t>() + offset,
260
+ W,
261
+ H);
262
+
263
+ // get the counting of each pixel
264
+ cc2d::init_counting<<<grid_count, block_count, 0, stream>>>(
265
+ labels.data_ptr<int32_t>() + offset,
266
+ counts_init.data_ptr<int32_t>() + offset,
267
+ W,
268
+ H);
269
+ cc2d::final_counting<<<grid_count, block_count, 0, stream>>>(
270
+ labels.data_ptr<int32_t>() + offset,
271
+ counts_init.data_ptr<int32_t>() + offset,
272
+ counts_final.data_ptr<int32_t>() + offset,
273
+ W,
274
+ H);
275
+ }
276
+
277
+ // returned values are [labels, counts]
278
+ std::vector<torch::Tensor> outputs;
279
+ outputs.push_back(labels);
280
+ outputs.push_back(counts_final);
281
+ return outputs;
282
+ }
283
+
284
+ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
285
+ m.def(
286
+ "get_connected_componnets",
287
+ &get_connected_componnets,
288
+ "get_connected_componnets");
289
+ }
segment-anything-2/sam2/modeling/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
segment-anything-2/sam2/modeling/backbones/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
segment-anything-2/sam2/modeling/backbones/hieradet.py ADDED
@@ -0,0 +1,291 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ from functools import partial
8
+ from typing import List, Tuple, Union
9
+
10
+ import torch
11
+ import torch.nn as nn
12
+ import torch.nn.functional as F
13
+
14
+ from sam2.modeling.backbones.utils import (
15
+ PatchEmbed,
16
+ window_partition,
17
+ window_unpartition,
18
+ )
19
+
20
+ from sam2.modeling.sam2_utils import DropPath, MLP
21
+
22
+
23
+ def do_pool(x: torch.Tensor, pool: nn.Module, norm: nn.Module = None) -> torch.Tensor:
24
+ if pool is None:
25
+ return x
26
+ # (B, H, W, C) -> (B, C, H, W)
27
+ x = x.permute(0, 3, 1, 2)
28
+ x = pool(x)
29
+ # (B, C, H', W') -> (B, H', W', C)
30
+ x = x.permute(0, 2, 3, 1)
31
+ if norm:
32
+ x = norm(x)
33
+
34
+ return x
35
+
36
+
37
+ class MultiScaleAttention(nn.Module):
38
+ def __init__(
39
+ self,
40
+ dim: int,
41
+ dim_out: int,
42
+ num_heads: int,
43
+ q_pool: nn.Module = None,
44
+ ):
45
+ super().__init__()
46
+
47
+ self.dim = dim
48
+ self.dim_out = dim_out
49
+ self.num_heads = num_heads
50
+ self.q_pool = q_pool
51
+ self.qkv = nn.Linear(dim, dim_out * 3)
52
+ self.proj = nn.Linear(dim_out, dim_out)
53
+
54
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
55
+ B, H, W, _ = x.shape
56
+ # qkv with shape (B, H * W, 3, nHead, C)
57
+ qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, -1)
58
+ # q, k, v with shape (B, H * W, nheads, C)
59
+ q, k, v = torch.unbind(qkv, 2)
60
+
61
+ # Q pooling (for downsample at stage changes)
62
+ if self.q_pool:
63
+ q = do_pool(q.reshape(B, H, W, -1), self.q_pool)
64
+ H, W = q.shape[1:3] # downsampled shape
65
+ q = q.reshape(B, H * W, self.num_heads, -1)
66
+
67
+ # Torch's SDPA expects [B, nheads, H*W, C] so we transpose
68
+ x = F.scaled_dot_product_attention(
69
+ q.transpose(1, 2),
70
+ k.transpose(1, 2),
71
+ v.transpose(1, 2),
72
+ )
73
+ # Transpose back
74
+ x = x.transpose(1, 2)
75
+ x = x.reshape(B, H, W, -1)
76
+
77
+ x = self.proj(x)
78
+
79
+ return x
80
+
81
+
82
+ class MultiScaleBlock(nn.Module):
83
+ def __init__(
84
+ self,
85
+ dim: int,
86
+ dim_out: int,
87
+ num_heads: int,
88
+ mlp_ratio: float = 4.0,
89
+ drop_path: float = 0.0,
90
+ norm_layer: Union[nn.Module, str] = "LayerNorm",
91
+ q_stride: Tuple[int, int] = None,
92
+ act_layer: nn.Module = nn.GELU,
93
+ window_size: int = 0,
94
+ ):
95
+ super().__init__()
96
+
97
+ if isinstance(norm_layer, str):
98
+ norm_layer = partial(getattr(nn, norm_layer), eps=1e-6)
99
+
100
+ self.dim = dim
101
+ self.dim_out = dim_out
102
+ self.norm1 = norm_layer(dim)
103
+
104
+ self.window_size = window_size
105
+
106
+ self.pool, self.q_stride = None, q_stride
107
+ if self.q_stride:
108
+ self.pool = nn.MaxPool2d(
109
+ kernel_size=q_stride, stride=q_stride, ceil_mode=False
110
+ )
111
+
112
+ self.attn = MultiScaleAttention(
113
+ dim,
114
+ dim_out,
115
+ num_heads=num_heads,
116
+ q_pool=self.pool,
117
+ )
118
+ self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
119
+
120
+ self.norm2 = norm_layer(dim_out)
121
+ self.mlp = MLP(
122
+ dim_out,
123
+ int(dim_out * mlp_ratio),
124
+ dim_out,
125
+ num_layers=2,
126
+ activation=act_layer,
127
+ )
128
+
129
+ if dim != dim_out:
130
+ self.proj = nn.Linear(dim, dim_out)
131
+
132
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
133
+ shortcut = x # B, H, W, C
134
+ x = self.norm1(x)
135
+
136
+ # Skip connection
137
+ if self.dim != self.dim_out:
138
+ shortcut = do_pool(self.proj(x), self.pool)
139
+
140
+ # Window partition
141
+ window_size = self.window_size
142
+ if window_size > 0:
143
+ H, W = x.shape[1], x.shape[2]
144
+ x, pad_hw = window_partition(x, window_size)
145
+
146
+ # Window Attention + Q Pooling (if stage change)
147
+ x = self.attn(x)
148
+ if self.q_stride:
149
+ # Shapes have changed due to Q pooling
150
+ window_size = self.window_size // self.q_stride[0]
151
+ H, W = shortcut.shape[1:3]
152
+
153
+ pad_h = (window_size - H % window_size) % window_size
154
+ pad_w = (window_size - W % window_size) % window_size
155
+ pad_hw = (H + pad_h, W + pad_w)
156
+
157
+ # Reverse window partition
158
+ if self.window_size > 0:
159
+ x = window_unpartition(x, window_size, pad_hw, (H, W))
160
+
161
+ x = shortcut + self.drop_path(x)
162
+ # MLP
163
+ x = x + self.drop_path(self.mlp(self.norm2(x)))
164
+ return x
165
+
166
+
167
+ class Hiera(nn.Module):
168
+ """
169
+ Reference: https://arxiv.org/abs/2306.00989
170
+ """
171
+
172
+ def __init__(
173
+ self,
174
+ embed_dim: int = 96, # initial embed dim
175
+ num_heads: int = 1, # initial number of heads
176
+ drop_path_rate: float = 0.0, # stochastic depth
177
+ q_pool: int = 3, # number of q_pool stages
178
+ q_stride: Tuple[int, int] = (2, 2), # downsample stride bet. stages
179
+ stages: Tuple[int, ...] = (2, 3, 16, 3), # blocks per stage
180
+ dim_mul: float = 2.0, # dim_mul factor at stage shift
181
+ head_mul: float = 2.0, # head_mul factor at stage shift
182
+ window_pos_embed_bkg_spatial_size: Tuple[int, int] = (14, 14),
183
+ # window size per stage, when not using global att.
184
+ window_spec: Tuple[int, ...] = (
185
+ 8,
186
+ 4,
187
+ 14,
188
+ 7,
189
+ ),
190
+ # global attn in these blocks
191
+ global_att_blocks: Tuple[int, ...] = (
192
+ 12,
193
+ 16,
194
+ 20,
195
+ ),
196
+ return_interm_layers=True, # return feats from every stage
197
+ ):
198
+ super().__init__()
199
+
200
+ assert len(stages) == len(window_spec)
201
+ self.window_spec = window_spec
202
+
203
+ depth = sum(stages)
204
+ self.q_stride = q_stride
205
+ self.stage_ends = [sum(stages[:i]) - 1 for i in range(1, len(stages) + 1)]
206
+ assert 0 <= q_pool <= len(self.stage_ends[:-1])
207
+ self.q_pool_blocks = [x + 1 for x in self.stage_ends[:-1]][:q_pool]
208
+ self.return_interm_layers = return_interm_layers
209
+
210
+ self.patch_embed = PatchEmbed(
211
+ embed_dim=embed_dim,
212
+ )
213
+ # Which blocks have global att?
214
+ self.global_att_blocks = global_att_blocks
215
+
216
+ # Windowed positional embedding (https://arxiv.org/abs/2311.05613)
217
+ self.window_pos_embed_bkg_spatial_size = window_pos_embed_bkg_spatial_size
218
+ self.pos_embed = nn.Parameter(
219
+ torch.zeros(1, embed_dim, *self.window_pos_embed_bkg_spatial_size)
220
+ )
221
+ self.pos_embed_window = nn.Parameter(
222
+ torch.zeros(1, embed_dim, self.window_spec[0], self.window_spec[0])
223
+ )
224
+
225
+ dpr = [
226
+ x.item() for x in torch.linspace(0, drop_path_rate, depth)
227
+ ] # stochastic depth decay rule
228
+
229
+ cur_stage = 1
230
+ self.blocks = nn.ModuleList()
231
+
232
+ for i in range(depth):
233
+ dim_out = embed_dim
234
+ # lags by a block, so first block of
235
+ # next stage uses an initial window size
236
+ # of previous stage and final window size of current stage
237
+ window_size = self.window_spec[cur_stage - 1]
238
+
239
+ if self.global_att_blocks is not None:
240
+ window_size = 0 if i in self.global_att_blocks else window_size
241
+
242
+ if i - 1 in self.stage_ends:
243
+ dim_out = int(embed_dim * dim_mul)
244
+ num_heads = int(num_heads * head_mul)
245
+ cur_stage += 1
246
+
247
+ block = MultiScaleBlock(
248
+ dim=embed_dim,
249
+ dim_out=dim_out,
250
+ num_heads=num_heads,
251
+ drop_path=dpr[i],
252
+ q_stride=self.q_stride if i in self.q_pool_blocks else None,
253
+ window_size=window_size,
254
+ )
255
+
256
+ embed_dim = dim_out
257
+ self.blocks.append(block)
258
+
259
+ self.channel_list = (
260
+ [self.blocks[i].dim_out for i in self.stage_ends[::-1]]
261
+ if return_interm_layers
262
+ else [self.blocks[-1].dim_out]
263
+ )
264
+
265
+ def _get_pos_embed(self, hw: Tuple[int, int]) -> torch.Tensor:
266
+ h, w = hw
267
+ window_embed = self.pos_embed_window
268
+ pos_embed = F.interpolate(self.pos_embed, size=(h, w), mode="bicubic")
269
+ pos_embed = pos_embed + window_embed.tile(
270
+ [x // y for x, y in zip(pos_embed.shape, window_embed.shape)]
271
+ )
272
+ pos_embed = pos_embed.permute(0, 2, 3, 1)
273
+ return pos_embed
274
+
275
+ def forward(self, x: torch.Tensor) -> List[torch.Tensor]:
276
+ x = self.patch_embed(x)
277
+ # x: (B, H, W, C)
278
+
279
+ # Add pos embed
280
+ x = x + self._get_pos_embed(x.shape[1:3])
281
+
282
+ outputs = []
283
+ for i, blk in enumerate(self.blocks):
284
+ x = blk(x)
285
+ if (i == self.stage_ends[-1]) or (
286
+ i in self.stage_ends and self.return_interm_layers
287
+ ):
288
+ feats = x.permute(0, 3, 1, 2)
289
+ outputs.append(feats)
290
+
291
+ return outputs
segment-anything-2/sam2/modeling/backbones/image_encoder.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ from typing import List, Optional
8
+
9
+ import torch
10
+ import torch.nn as nn
11
+ import torch.nn.functional as F
12
+
13
+
14
+ class ImageEncoder(nn.Module):
15
+ def __init__(
16
+ self,
17
+ trunk: nn.Module,
18
+ neck: nn.Module,
19
+ scalp: int = 0,
20
+ ):
21
+ super().__init__()
22
+ self.trunk = trunk
23
+ self.neck = neck
24
+ self.scalp = scalp
25
+ assert (
26
+ self.trunk.channel_list == self.neck.backbone_channel_list
27
+ ), f"Channel dims of trunk and neck do not match. Trunk: {self.trunk.channel_list}, neck: {self.neck.backbone_channel_list}"
28
+
29
+ def forward(self, sample: torch.Tensor):
30
+ # Forward through backbone
31
+ features, pos = self.neck(self.trunk(sample))
32
+ if self.scalp > 0:
33
+ # Discard the lowest resolution features
34
+ features, pos = features[: -self.scalp], pos[: -self.scalp]
35
+
36
+ src = features[-1]
37
+ output = {
38
+ "vision_features": src,
39
+ "vision_pos_enc": pos,
40
+ "backbone_fpn": features,
41
+ }
42
+ return output
43
+
44
+
45
+ class FpnNeck(nn.Module):
46
+ """
47
+ A modified variant of Feature Pyramid Network (FPN) neck
48
+ (we remove output conv and also do bicubic interpolation similar to ViT
49
+ pos embed interpolation)
50
+ """
51
+
52
+ def __init__(
53
+ self,
54
+ position_encoding: nn.Module,
55
+ d_model: int,
56
+ backbone_channel_list: List[int],
57
+ kernel_size: int = 1,
58
+ stride: int = 1,
59
+ padding: int = 0,
60
+ fpn_interp_model: str = "bilinear",
61
+ fuse_type: str = "sum",
62
+ fpn_top_down_levels: Optional[List[int]] = None,
63
+ ):
64
+ """Initialize the neck
65
+ :param trunk: the backbone
66
+ :param position_encoding: the positional encoding to use
67
+ :param d_model: the dimension of the model
68
+ :param neck_norm: the normalization to use
69
+ """
70
+ super().__init__()
71
+ self.position_encoding = position_encoding
72
+ self.convs = nn.ModuleList()
73
+ self.backbone_channel_list = backbone_channel_list
74
+ for dim in backbone_channel_list:
75
+ current = nn.Sequential()
76
+ current.add_module(
77
+ "conv",
78
+ nn.Conv2d(
79
+ in_channels=dim,
80
+ out_channels=d_model,
81
+ kernel_size=kernel_size,
82
+ stride=stride,
83
+ padding=padding,
84
+ ),
85
+ )
86
+
87
+ self.convs.append(current)
88
+ self.fpn_interp_model = fpn_interp_model
89
+ assert fuse_type in ["sum", "avg"]
90
+ self.fuse_type = fuse_type
91
+
92
+ # levels to have top-down features in its outputs
93
+ # e.g. if fpn_top_down_levels is [2, 3], then only outputs of level 2 and 3
94
+ # have top-down propagation, while outputs of level 0 and level 1 have only
95
+ # lateral features from the same backbone level.
96
+ if fpn_top_down_levels is None:
97
+ # default is to have top-down features on all levels
98
+ fpn_top_down_levels = range(len(self.convs))
99
+ self.fpn_top_down_levels = list(fpn_top_down_levels)
100
+
101
+ def forward(self, xs: List[torch.Tensor]):
102
+
103
+ out = [None] * len(self.convs)
104
+ pos = [None] * len(self.convs)
105
+ assert len(xs) == len(self.convs)
106
+ # fpn forward pass
107
+ # see https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/fpn.py
108
+ prev_features = None
109
+ # forward in top-down order (from low to high resolution)
110
+ n = len(self.convs) - 1
111
+ for i in range(n, -1, -1):
112
+ x = xs[i]
113
+ lateral_features = self.convs[n - i](x)
114
+ if i in self.fpn_top_down_levels and prev_features is not None:
115
+ top_down_features = F.interpolate(
116
+ prev_features.to(dtype=torch.float32),
117
+ scale_factor=2.0,
118
+ mode=self.fpn_interp_model,
119
+ align_corners=(
120
+ None if self.fpn_interp_model == "nearest" else False
121
+ ),
122
+ antialias=False,
123
+ )
124
+ prev_features = lateral_features + top_down_features
125
+ if self.fuse_type == "avg":
126
+ prev_features /= 2
127
+ else:
128
+ prev_features = lateral_features
129
+ x_out = prev_features
130
+ out[i] = x_out
131
+ pos[i] = self.position_encoding(x_out).to(x_out.dtype)
132
+
133
+ return out, pos
segment-anything-2/sam2/modeling/backbones/utils.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ """Some utilities for backbones, in particular for windowing"""
8
+
9
+ from typing import Tuple
10
+
11
+ import torch
12
+ import torch.nn as nn
13
+ import torch.nn.functional as F
14
+
15
+
16
+ def window_partition(x, window_size):
17
+ """
18
+ Partition into non-overlapping windows with padding if needed.
19
+ Args:
20
+ x (tensor): input tokens with [B, H, W, C].
21
+ window_size (int): window size.
22
+ Returns:
23
+ windows: windows after partition with [B * num_windows, window_size, window_size, C].
24
+ (Hp, Wp): padded height and width before partition
25
+ """
26
+ B, H, W, C = x.shape
27
+
28
+ pad_h = (window_size - H % window_size) % window_size
29
+ pad_w = (window_size - W % window_size) % window_size
30
+ if pad_h > 0 or pad_w > 0:
31
+ x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h))
32
+ Hp, Wp = H + pad_h, W + pad_w
33
+
34
+ x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C)
35
+ windows = (
36
+ x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
37
+ )
38
+ return windows, (Hp, Wp)
39
+
40
+
41
+ def window_unpartition(windows, window_size, pad_hw, hw):
42
+ """
43
+ Window unpartition into original sequences and removing padding.
44
+ Args:
45
+ x (tensor): input tokens with [B * num_windows, window_size, window_size, C].
46
+ window_size (int): window size.
47
+ pad_hw (Tuple): padded height and width (Hp, Wp).
48
+ hw (Tuple): original height and width (H, W) before padding.
49
+ Returns:
50
+ x: unpartitioned sequences with [B, H, W, C].
51
+ """
52
+ Hp, Wp = pad_hw
53
+ H, W = hw
54
+ B = windows.shape[0] // (Hp * Wp // window_size // window_size)
55
+ x = windows.view(
56
+ B, Hp // window_size, Wp // window_size, window_size, window_size, -1
57
+ )
58
+ x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1)
59
+
60
+ if Hp > H or Wp > W:
61
+ x = x[:, :H, :W, :].contiguous()
62
+ return x
63
+
64
+
65
+ class PatchEmbed(nn.Module):
66
+ """
67
+ Image to Patch Embedding.
68
+ """
69
+
70
+ def __init__(
71
+ self,
72
+ kernel_size: Tuple[int, ...] = (7, 7),
73
+ stride: Tuple[int, ...] = (4, 4),
74
+ padding: Tuple[int, ...] = (3, 3),
75
+ in_chans: int = 3,
76
+ embed_dim: int = 768,
77
+ ):
78
+ """
79
+ Args:
80
+ kernel_size (Tuple): kernel size of the projection layer.
81
+ stride (Tuple): stride of the projection layer.
82
+ padding (Tuple): padding size of the projection layer.
83
+ in_chans (int): Number of input image channels.
84
+ embed_dim (int): embed_dim (int): Patch embedding dimension.
85
+ """
86
+ super().__init__()
87
+ self.proj = nn.Conv2d(
88
+ in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding
89
+ )
90
+
91
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
92
+ x = self.proj(x)
93
+ # B C H W -> B H W C
94
+ x = x.permute(0, 2, 3, 1)
95
+ return x
segment-anything-2/sam2/modeling/memory_attention.py ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ from typing import Optional
8
+
9
+ import torch
10
+ from torch import nn, Tensor
11
+
12
+ from sam2.modeling.sam.transformer import RoPEAttention
13
+
14
+ from sam2.modeling.sam2_utils import get_activation_fn, get_clones
15
+
16
+
17
+ class MemoryAttentionLayer(nn.Module):
18
+
19
+ def __init__(
20
+ self,
21
+ activation: str,
22
+ cross_attention: nn.Module,
23
+ d_model: int,
24
+ dim_feedforward: int,
25
+ dropout: float,
26
+ pos_enc_at_attn: bool,
27
+ pos_enc_at_cross_attn_keys: bool,
28
+ pos_enc_at_cross_attn_queries: bool,
29
+ self_attention: nn.Module,
30
+ ):
31
+ super().__init__()
32
+ self.d_model = d_model
33
+ self.dim_feedforward = dim_feedforward
34
+ self.dropout_value = dropout
35
+ self.self_attn = self_attention
36
+ self.cross_attn_image = cross_attention
37
+
38
+ # Implementation of Feedforward model
39
+ self.linear1 = nn.Linear(d_model, dim_feedforward)
40
+ self.dropout = nn.Dropout(dropout)
41
+ self.linear2 = nn.Linear(dim_feedforward, d_model)
42
+
43
+ self.norm1 = nn.LayerNorm(d_model)
44
+ self.norm2 = nn.LayerNorm(d_model)
45
+ self.norm3 = nn.LayerNorm(d_model)
46
+ self.dropout1 = nn.Dropout(dropout)
47
+ self.dropout2 = nn.Dropout(dropout)
48
+ self.dropout3 = nn.Dropout(dropout)
49
+
50
+ self.activation_str = activation
51
+ self.activation = get_activation_fn(activation)
52
+
53
+ # Where to add pos enc
54
+ self.pos_enc_at_attn = pos_enc_at_attn
55
+ self.pos_enc_at_cross_attn_queries = pos_enc_at_cross_attn_queries
56
+ self.pos_enc_at_cross_attn_keys = pos_enc_at_cross_attn_keys
57
+
58
+ def _forward_sa(self, tgt, query_pos):
59
+ # Self-Attention
60
+ tgt2 = self.norm1(tgt)
61
+ q = k = tgt2 + query_pos if self.pos_enc_at_attn else tgt2
62
+ tgt2 = self.self_attn(q, k, v=tgt2)
63
+ tgt = tgt + self.dropout1(tgt2)
64
+ return tgt
65
+
66
+ def _forward_ca(self, tgt, memory, query_pos, pos, num_k_exclude_rope=0):
67
+ kwds = {}
68
+ if num_k_exclude_rope > 0:
69
+ assert isinstance(self.cross_attn_image, RoPEAttention)
70
+ kwds = {"num_k_exclude_rope": num_k_exclude_rope}
71
+
72
+ # Cross-Attention
73
+ tgt2 = self.norm2(tgt)
74
+ tgt2 = self.cross_attn_image(
75
+ q=tgt2 + query_pos if self.pos_enc_at_cross_attn_queries else tgt2,
76
+ k=memory + pos if self.pos_enc_at_cross_attn_keys else memory,
77
+ v=memory,
78
+ **kwds,
79
+ )
80
+ tgt = tgt + self.dropout2(tgt2)
81
+ return tgt
82
+
83
+ def forward(
84
+ self,
85
+ tgt,
86
+ memory,
87
+ pos: Optional[Tensor] = None,
88
+ query_pos: Optional[Tensor] = None,
89
+ num_k_exclude_rope: int = 0,
90
+ ) -> torch.Tensor:
91
+
92
+ # Self-Attn, Cross-Attn
93
+ tgt = self._forward_sa(tgt, query_pos)
94
+ tgt = self._forward_ca(tgt, memory, query_pos, pos, num_k_exclude_rope)
95
+ # MLP
96
+ tgt2 = self.norm3(tgt)
97
+ tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
98
+ tgt = tgt + self.dropout3(tgt2)
99
+ return tgt
100
+
101
+
102
+ class MemoryAttention(nn.Module):
103
+ def __init__(
104
+ self,
105
+ d_model: int,
106
+ pos_enc_at_input: bool,
107
+ layer: nn.Module,
108
+ num_layers: int,
109
+ batch_first: bool = True, # Do layers expect batch first input?
110
+ ):
111
+ super().__init__()
112
+ self.d_model = d_model
113
+ self.layers = get_clones(layer, num_layers)
114
+ self.num_layers = num_layers
115
+ self.norm = nn.LayerNorm(d_model)
116
+ self.pos_enc_at_input = pos_enc_at_input
117
+ self.batch_first = batch_first
118
+
119
+ def forward(
120
+ self,
121
+ curr: torch.Tensor, # self-attention inputs
122
+ memory: torch.Tensor, # cross-attention inputs
123
+ curr_pos: Optional[Tensor] = None, # pos_enc for self-attention inputs
124
+ memory_pos: Optional[Tensor] = None, # pos_enc for cross-attention inputs
125
+ num_obj_ptr_tokens: int = 0, # number of object pointer *tokens*
126
+ ):
127
+ if isinstance(curr, list):
128
+ assert isinstance(curr_pos, list)
129
+ assert len(curr) == len(curr_pos) == 1
130
+ curr, curr_pos = (
131
+ curr[0],
132
+ curr_pos[0],
133
+ )
134
+
135
+ assert (
136
+ curr.shape[1] == memory.shape[1]
137
+ ), "Batch size must be the same for curr and memory"
138
+
139
+ output = curr
140
+ if self.pos_enc_at_input and curr_pos is not None:
141
+ output = output + 0.1 * curr_pos
142
+
143
+ if self.batch_first:
144
+ # Convert to batch first
145
+ output = output.transpose(0, 1)
146
+ curr_pos = curr_pos.transpose(0, 1)
147
+ memory = memory.transpose(0, 1)
148
+ memory_pos = memory_pos.transpose(0, 1)
149
+
150
+ for layer in self.layers:
151
+ kwds = {}
152
+ if isinstance(layer.cross_attn_image, RoPEAttention):
153
+ kwds = {"num_k_exclude_rope": num_obj_ptr_tokens}
154
+
155
+ output = layer(
156
+ tgt=output,
157
+ memory=memory,
158
+ pos=memory_pos,
159
+ query_pos=curr_pos,
160
+ **kwds,
161
+ )
162
+ normed_output = self.norm(output)
163
+
164
+ if self.batch_first:
165
+ # Convert back to seq first
166
+ normed_output = normed_output.transpose(0, 1)
167
+ curr_pos = curr_pos.transpose(0, 1)
168
+
169
+ return normed_output
segment-anything-2/sam2/modeling/memory_encoder.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import math
8
+ from typing import Tuple
9
+
10
+ import torch
11
+ import torch.nn as nn
12
+ import torch.nn.functional as F
13
+
14
+ from sam2.modeling.sam2_utils import DropPath, get_clones, LayerNorm2d
15
+
16
+
17
+ class MaskDownSampler(nn.Module):
18
+ """
19
+ Progressively downsample a mask by total_stride, each time by stride.
20
+ Note that LayerNorm is applied per *token*, like in ViT.
21
+
22
+ With each downsample (by a factor stride**2), channel capacity increases by the same factor.
23
+ In the end, we linearly project to embed_dim channels.
24
+ """
25
+
26
+ def __init__(
27
+ self,
28
+ embed_dim=256,
29
+ kernel_size=4,
30
+ stride=4,
31
+ padding=0,
32
+ total_stride=16,
33
+ activation=nn.GELU,
34
+ ):
35
+ super().__init__()
36
+ num_layers = int(math.log2(total_stride) // math.log2(stride))
37
+ assert stride**num_layers == total_stride
38
+ self.encoder = nn.Sequential()
39
+ mask_in_chans, mask_out_chans = 1, 1
40
+ for _ in range(num_layers):
41
+ mask_out_chans = mask_in_chans * (stride**2)
42
+ self.encoder.append(
43
+ nn.Conv2d(
44
+ mask_in_chans,
45
+ mask_out_chans,
46
+ kernel_size=kernel_size,
47
+ stride=stride,
48
+ padding=padding,
49
+ )
50
+ )
51
+ self.encoder.append(LayerNorm2d(mask_out_chans))
52
+ self.encoder.append(activation())
53
+ mask_in_chans = mask_out_chans
54
+
55
+ self.encoder.append(nn.Conv2d(mask_out_chans, embed_dim, kernel_size=1))
56
+
57
+ def forward(self, x):
58
+ return self.encoder(x)
59
+
60
+
61
+ # Lightly adapted from ConvNext (https://github.com/facebookresearch/ConvNeXt)
62
+ class CXBlock(nn.Module):
63
+ r"""ConvNeXt Block. There are two equivalent implementations:
64
+ (1) DwConv -> LayerNorm (channels_first) -> 1x1 Conv -> GELU -> 1x1 Conv; all in (N, C, H, W)
65
+ (2) DwConv -> Permute to (N, H, W, C); LayerNorm (channels_last) -> Linear -> GELU -> Linear; Permute back
66
+ We use (2) as we find it slightly faster in PyTorch
67
+
68
+ Args:
69
+ dim (int): Number of input channels.
70
+ drop_path (float): Stochastic depth rate. Default: 0.0
71
+ layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6.
72
+ """
73
+
74
+ def __init__(
75
+ self,
76
+ dim,
77
+ kernel_size=7,
78
+ padding=3,
79
+ drop_path=0.0,
80
+ layer_scale_init_value=1e-6,
81
+ use_dwconv=True,
82
+ ):
83
+ super().__init__()
84
+ self.dwconv = nn.Conv2d(
85
+ dim,
86
+ dim,
87
+ kernel_size=kernel_size,
88
+ padding=padding,
89
+ groups=dim if use_dwconv else 1,
90
+ ) # depthwise conv
91
+ self.norm = LayerNorm2d(dim, eps=1e-6)
92
+ self.pwconv1 = nn.Linear(
93
+ dim, 4 * dim
94
+ ) # pointwise/1x1 convs, implemented with linear layers
95
+ self.act = nn.GELU()
96
+ self.pwconv2 = nn.Linear(4 * dim, dim)
97
+ self.gamma = (
98
+ nn.Parameter(layer_scale_init_value * torch.ones((dim)), requires_grad=True)
99
+ if layer_scale_init_value > 0
100
+ else None
101
+ )
102
+ self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
103
+
104
+ def forward(self, x):
105
+ input = x
106
+ x = self.dwconv(x)
107
+ x = self.norm(x)
108
+ x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C)
109
+ x = self.pwconv1(x)
110
+ x = self.act(x)
111
+ x = self.pwconv2(x)
112
+ if self.gamma is not None:
113
+ x = self.gamma * x
114
+ x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W)
115
+
116
+ x = input + self.drop_path(x)
117
+ return x
118
+
119
+
120
+ class Fuser(nn.Module):
121
+ def __init__(self, layer, num_layers, dim=None, input_projection=False):
122
+ super().__init__()
123
+ self.proj = nn.Identity()
124
+ self.layers = get_clones(layer, num_layers)
125
+
126
+ if input_projection:
127
+ assert dim is not None
128
+ self.proj = nn.Conv2d(dim, dim, kernel_size=1)
129
+
130
+ def forward(self, x):
131
+ # normally x: (N, C, H, W)
132
+ x = self.proj(x)
133
+ for layer in self.layers:
134
+ x = layer(x)
135
+ return x
136
+
137
+
138
+ class MemoryEncoder(nn.Module):
139
+ def __init__(
140
+ self,
141
+ out_dim,
142
+ mask_downsampler,
143
+ fuser,
144
+ position_encoding,
145
+ in_dim=256, # in_dim of pix_feats
146
+ ):
147
+ super().__init__()
148
+
149
+ self.mask_downsampler = mask_downsampler
150
+
151
+ self.pix_feat_proj = nn.Conv2d(in_dim, in_dim, kernel_size=1)
152
+ self.fuser = fuser
153
+ self.position_encoding = position_encoding
154
+ self.out_proj = nn.Identity()
155
+ if out_dim != in_dim:
156
+ self.out_proj = nn.Conv2d(in_dim, out_dim, kernel_size=1)
157
+
158
+ def forward(
159
+ self,
160
+ pix_feat: torch.Tensor,
161
+ masks: torch.Tensor,
162
+ skip_mask_sigmoid: bool = False,
163
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
164
+ ## Process masks
165
+ # sigmoid, so that less domain shift from gt masks which are bool
166
+ if not skip_mask_sigmoid:
167
+ masks = F.sigmoid(masks)
168
+ masks = self.mask_downsampler(masks)
169
+
170
+ ## Fuse pix_feats and downsampled masks
171
+ # in case the visual features are on CPU, cast them to CUDA
172
+ pix_feat = pix_feat.to(masks.device)
173
+
174
+ x = self.pix_feat_proj(pix_feat)
175
+ x = x + masks
176
+ x = self.fuser(x)
177
+ x = self.out_proj(x)
178
+
179
+ pos = self.position_encoding(x).to(x.dtype)
180
+
181
+ return {"vision_features": x, "vision_pos_enc": [pos]}
segment-anything-2/sam2/modeling/position_encoding.py ADDED
@@ -0,0 +1,221 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import math
8
+ from typing import Any, Optional, Tuple
9
+
10
+ import numpy as np
11
+
12
+ import torch
13
+ from torch import nn
14
+
15
+
16
+ class PositionEmbeddingSine(nn.Module):
17
+ """
18
+ This is a more standard version of the position embedding, very similar to the one
19
+ used by the Attention Is All You Need paper, generalized to work on images.
20
+ """
21
+
22
+ def __init__(
23
+ self,
24
+ num_pos_feats,
25
+ temperature: int = 10000,
26
+ normalize: bool = True,
27
+ scale: Optional[float] = None,
28
+ ):
29
+ super().__init__()
30
+ assert num_pos_feats % 2 == 0, "Expecting even model width"
31
+ self.num_pos_feats = num_pos_feats // 2
32
+ self.temperature = temperature
33
+ self.normalize = normalize
34
+ if scale is not None and normalize is False:
35
+ raise ValueError("normalize should be True if scale is passed")
36
+ if scale is None:
37
+ scale = 2 * math.pi
38
+ self.scale = scale
39
+
40
+ self.cache = {}
41
+
42
+ def _encode_xy(self, x, y):
43
+ # The positions are expected to be normalized
44
+ assert len(x) == len(y) and x.ndim == y.ndim == 1
45
+ x_embed = x * self.scale
46
+ y_embed = y * self.scale
47
+
48
+ dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
49
+ dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
50
+
51
+ pos_x = x_embed[:, None] / dim_t
52
+ pos_y = y_embed[:, None] / dim_t
53
+ pos_x = torch.stack(
54
+ (pos_x[:, 0::2].sin(), pos_x[:, 1::2].cos()), dim=2
55
+ ).flatten(1)
56
+ pos_y = torch.stack(
57
+ (pos_y[:, 0::2].sin(), pos_y[:, 1::2].cos()), dim=2
58
+ ).flatten(1)
59
+ return pos_x, pos_y
60
+
61
+ @torch.no_grad()
62
+ def encode_boxes(self, x, y, w, h):
63
+ pos_x, pos_y = self._encode_xy(x, y)
64
+ pos = torch.cat((pos_y, pos_x, h[:, None], w[:, None]), dim=1)
65
+ return pos
66
+
67
+ encode = encode_boxes # Backwards compatibility
68
+
69
+ @torch.no_grad()
70
+ def encode_points(self, x, y, labels):
71
+ (bx, nx), (by, ny), (bl, nl) = x.shape, y.shape, labels.shape
72
+ assert bx == by and nx == ny and bx == bl and nx == nl
73
+ pos_x, pos_y = self._encode_xy(x.flatten(), y.flatten())
74
+ pos_x, pos_y = pos_x.reshape(bx, nx, -1), pos_y.reshape(by, ny, -1)
75
+ pos = torch.cat((pos_y, pos_x, labels[:, :, None]), dim=2)
76
+ return pos
77
+
78
+ @torch.no_grad()
79
+ def forward(self, x: torch.Tensor):
80
+ cache_key = (x.shape[-2], x.shape[-1])
81
+ if cache_key in self.cache:
82
+ return self.cache[cache_key][None].repeat(x.shape[0], 1, 1, 1)
83
+ y_embed = (
84
+ torch.arange(1, x.shape[-2] + 1, dtype=torch.float32, device=x.device)
85
+ .view(1, -1, 1)
86
+ .repeat(x.shape[0], 1, x.shape[-1])
87
+ )
88
+ x_embed = (
89
+ torch.arange(1, x.shape[-1] + 1, dtype=torch.float32, device=x.device)
90
+ .view(1, 1, -1)
91
+ .repeat(x.shape[0], x.shape[-2], 1)
92
+ )
93
+
94
+ if self.normalize:
95
+ eps = 1e-6
96
+ y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
97
+ x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
98
+
99
+ dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
100
+ dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
101
+
102
+ pos_x = x_embed[:, :, :, None] / dim_t
103
+ pos_y = y_embed[:, :, :, None] / dim_t
104
+ pos_x = torch.stack(
105
+ (pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4
106
+ ).flatten(3)
107
+ pos_y = torch.stack(
108
+ (pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4
109
+ ).flatten(3)
110
+ pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
111
+ self.cache[cache_key] = pos[0]
112
+ return pos
113
+
114
+
115
+ class PositionEmbeddingRandom(nn.Module):
116
+ """
117
+ Positional encoding using random spatial frequencies.
118
+ """
119
+
120
+ def __init__(self, num_pos_feats: int = 64, scale: Optional[float] = None) -> None:
121
+ super().__init__()
122
+ if scale is None or scale <= 0.0:
123
+ scale = 1.0
124
+ self.register_buffer(
125
+ "positional_encoding_gaussian_matrix",
126
+ scale * torch.randn((2, num_pos_feats)),
127
+ )
128
+
129
+ def _pe_encoding(self, coords: torch.Tensor) -> torch.Tensor:
130
+ """Positionally encode points that are normalized to [0,1]."""
131
+ # assuming coords are in [0, 1]^2 square and have d_1 x ... x d_n x 2 shape
132
+ coords = 2 * coords - 1
133
+ coords = coords @ self.positional_encoding_gaussian_matrix
134
+ coords = 2 * np.pi * coords
135
+ # outputs d_1 x ... x d_n x C shape
136
+ return torch.cat([torch.sin(coords), torch.cos(coords)], dim=-1)
137
+
138
+ def forward(self, size: Tuple[int, int]) -> torch.Tensor:
139
+ """Generate positional encoding for a grid of the specified size."""
140
+ h, w = size
141
+ device: Any = self.positional_encoding_gaussian_matrix.device
142
+ grid = torch.ones((h, w), device=device, dtype=torch.float32)
143
+ y_embed = grid.cumsum(dim=0) - 0.5
144
+ x_embed = grid.cumsum(dim=1) - 0.5
145
+ y_embed = y_embed / h
146
+ x_embed = x_embed / w
147
+
148
+ pe = self._pe_encoding(torch.stack([x_embed, y_embed], dim=-1))
149
+ return pe.permute(2, 0, 1) # C x H x W
150
+
151
+ def forward_with_coords(
152
+ self, coords_input: torch.Tensor, image_size: Tuple[int, int]
153
+ ) -> torch.Tensor:
154
+ """Positionally encode points that are not normalized to [0,1]."""
155
+ coords = coords_input.clone()
156
+ coords[:, :, 0] = coords[:, :, 0] / image_size[1]
157
+ coords[:, :, 1] = coords[:, :, 1] / image_size[0]
158
+ return self._pe_encoding(coords.to(torch.float)) # B x N x C
159
+
160
+
161
+ # Rotary Positional Encoding, adapted from:
162
+ # 1. https://github.com/meta-llama/codellama/blob/main/llama/model.py
163
+ # 2. https://github.com/naver-ai/rope-vit
164
+ # 3. https://github.com/lucidrains/rotary-embedding-torch
165
+
166
+
167
+ def init_t_xy(end_x: int, end_y: int):
168
+ t = torch.arange(end_x * end_y, dtype=torch.float32)
169
+ t_x = (t % end_x).float()
170
+ t_y = torch.div(t, end_x, rounding_mode="floor").float()
171
+ return t_x, t_y
172
+
173
+
174
+ def compute_axial_cis(dim: int, end_x: int, end_y: int, theta: float = 10000.0):
175
+ freqs_x = 1.0 / (theta ** (torch.arange(0, dim, 4)[: (dim // 4)].float() / dim))
176
+ freqs_y = 1.0 / (theta ** (torch.arange(0, dim, 4)[: (dim // 4)].float() / dim))
177
+
178
+ t_x, t_y = init_t_xy(end_x, end_y)
179
+ freqs_x = torch.outer(t_x, freqs_x)
180
+ freqs_y = torch.outer(t_y, freqs_y)
181
+ freqs_cis_x = torch.polar(torch.ones_like(freqs_x), freqs_x)
182
+ freqs_cis_y = torch.polar(torch.ones_like(freqs_y), freqs_y)
183
+ return torch.cat([freqs_cis_x, freqs_cis_y], dim=-1)
184
+
185
+
186
+ def reshape_for_broadcast(freqs_cis: torch.Tensor, x: torch.Tensor):
187
+ ndim = x.ndim
188
+ assert 0 <= 1 < ndim
189
+ assert freqs_cis.shape == (x.shape[-2], x.shape[-1])
190
+ shape = [d if i >= ndim - 2 else 1 for i, d in enumerate(x.shape)]
191
+ return freqs_cis.view(*shape)
192
+
193
+
194
+ def apply_rotary_enc(
195
+ xq: torch.Tensor,
196
+ xk: torch.Tensor,
197
+ freqs_cis: torch.Tensor,
198
+ repeat_freqs_k: bool = False,
199
+ ):
200
+ xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2))
201
+ xk_ = (
202
+ torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2))
203
+ if xk.shape[-2] != 0
204
+ else None
205
+ )
206
+ freqs_cis = reshape_for_broadcast(freqs_cis, xq_)
207
+ xq_out = torch.view_as_real(xq_ * freqs_cis).flatten(3)
208
+ if xk_ is None:
209
+ # no keys to rotate, due to dropout
210
+ return xq_out.type_as(xq).to(xq.device), xk
211
+ # repeat freqs along seq_len dim to match k seq_len
212
+ if repeat_freqs_k:
213
+ r = xk_.shape[-2] // xq_.shape[-2]
214
+ if freqs_cis.is_cuda:
215
+ freqs_cis = freqs_cis.repeat(*([1] * (freqs_cis.ndim - 2)), r, 1)
216
+ else:
217
+ # torch.repeat on complex numbers may not be supported on non-CUDA devices
218
+ # (freqs_cis has 4 dims and we repeat on dim 2) so we use expand + flatten
219
+ freqs_cis = freqs_cis.unsqueeze(2).expand(-1, -1, r, -1, -1).flatten(2, 3)
220
+ xk_out = torch.view_as_real(xk_ * freqs_cis).flatten(3)
221
+ return xq_out.type_as(xq).to(xq.device), xk_out.type_as(xk).to(xk.device)
segment-anything-2/sam2/modeling/sam/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
segment-anything-2/sam2/modeling/sam/mask_decoder.py ADDED
@@ -0,0 +1,295 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ from typing import List, Optional, Tuple, Type
8
+
9
+ import torch
10
+ from torch import nn
11
+
12
+ from sam2.modeling.sam2_utils import LayerNorm2d, MLP
13
+
14
+
15
+ class MaskDecoder(nn.Module):
16
+ def __init__(
17
+ self,
18
+ *,
19
+ transformer_dim: int,
20
+ transformer: nn.Module,
21
+ num_multimask_outputs: int = 3,
22
+ activation: Type[nn.Module] = nn.GELU,
23
+ iou_head_depth: int = 3,
24
+ iou_head_hidden_dim: int = 256,
25
+ use_high_res_features: bool = False,
26
+ iou_prediction_use_sigmoid=False,
27
+ dynamic_multimask_via_stability=False,
28
+ dynamic_multimask_stability_delta=0.05,
29
+ dynamic_multimask_stability_thresh=0.98,
30
+ pred_obj_scores: bool = False,
31
+ pred_obj_scores_mlp: bool = False,
32
+ use_multimask_token_for_obj_ptr: bool = False,
33
+ ) -> None:
34
+ """
35
+ Predicts masks given an image and prompt embeddings, using a
36
+ transformer architecture.
37
+
38
+ Arguments:
39
+ transformer_dim (int): the channel dimension of the transformer
40
+ transformer (nn.Module): the transformer used to predict masks
41
+ num_multimask_outputs (int): the number of masks to predict
42
+ when disambiguating masks
43
+ activation (nn.Module): the type of activation to use when
44
+ upscaling masks
45
+ iou_head_depth (int): the depth of the MLP used to predict
46
+ mask quality
47
+ iou_head_hidden_dim (int): the hidden dimension of the MLP
48
+ used to predict mask quality
49
+ """
50
+ super().__init__()
51
+ self.transformer_dim = transformer_dim
52
+ self.transformer = transformer
53
+
54
+ self.num_multimask_outputs = num_multimask_outputs
55
+
56
+ self.iou_token = nn.Embedding(1, transformer_dim)
57
+ self.num_mask_tokens = num_multimask_outputs + 1
58
+ self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim)
59
+
60
+ self.pred_obj_scores = pred_obj_scores
61
+ if self.pred_obj_scores:
62
+ self.obj_score_token = nn.Embedding(1, transformer_dim)
63
+ self.use_multimask_token_for_obj_ptr = use_multimask_token_for_obj_ptr
64
+
65
+ self.output_upscaling = nn.Sequential(
66
+ nn.ConvTranspose2d(
67
+ transformer_dim, transformer_dim // 4, kernel_size=2, stride=2
68
+ ),
69
+ LayerNorm2d(transformer_dim // 4),
70
+ activation(),
71
+ nn.ConvTranspose2d(
72
+ transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2
73
+ ),
74
+ activation(),
75
+ )
76
+ self.use_high_res_features = use_high_res_features
77
+ if use_high_res_features:
78
+ self.conv_s0 = nn.Conv2d(
79
+ transformer_dim, transformer_dim // 8, kernel_size=1, stride=1
80
+ )
81
+ self.conv_s1 = nn.Conv2d(
82
+ transformer_dim, transformer_dim // 4, kernel_size=1, stride=1
83
+ )
84
+
85
+ self.output_hypernetworks_mlps = nn.ModuleList(
86
+ [
87
+ MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3)
88
+ for i in range(self.num_mask_tokens)
89
+ ]
90
+ )
91
+
92
+ self.iou_prediction_head = MLP(
93
+ transformer_dim,
94
+ iou_head_hidden_dim,
95
+ self.num_mask_tokens,
96
+ iou_head_depth,
97
+ sigmoid_output=iou_prediction_use_sigmoid,
98
+ )
99
+ if self.pred_obj_scores:
100
+ self.pred_obj_score_head = nn.Linear(transformer_dim, 1)
101
+ if pred_obj_scores_mlp:
102
+ self.pred_obj_score_head = MLP(transformer_dim, transformer_dim, 1, 3)
103
+
104
+ # When outputting a single mask, optionally we can dynamically fall back to the best
105
+ # multimask output token if the single mask output token gives low stability scores.
106
+ self.dynamic_multimask_via_stability = dynamic_multimask_via_stability
107
+ self.dynamic_multimask_stability_delta = dynamic_multimask_stability_delta
108
+ self.dynamic_multimask_stability_thresh = dynamic_multimask_stability_thresh
109
+
110
+ def forward(
111
+ self,
112
+ image_embeddings: torch.Tensor,
113
+ image_pe: torch.Tensor,
114
+ sparse_prompt_embeddings: torch.Tensor,
115
+ dense_prompt_embeddings: torch.Tensor,
116
+ multimask_output: bool,
117
+ repeat_image: bool,
118
+ high_res_features: Optional[List[torch.Tensor]] = None,
119
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
120
+ """
121
+ Predict masks given image and prompt embeddings.
122
+
123
+ Arguments:
124
+ image_embeddings (torch.Tensor): the embeddings from the image encoder
125
+ image_pe (torch.Tensor): positional encoding with the shape of image_embeddings
126
+ sparse_prompt_embeddings (torch.Tensor): the embeddings of the points and boxes
127
+ dense_prompt_embeddings (torch.Tensor): the embeddings of the mask inputs
128
+ multimask_output (bool): Whether to return multiple masks or a single
129
+ mask.
130
+
131
+ Returns:
132
+ torch.Tensor: batched predicted masks
133
+ torch.Tensor: batched predictions of mask quality
134
+ torch.Tensor: batched SAM token for mask output
135
+ """
136
+ masks, iou_pred, mask_tokens_out, object_score_logits = self.predict_masks(
137
+ image_embeddings=image_embeddings,
138
+ image_pe=image_pe,
139
+ sparse_prompt_embeddings=sparse_prompt_embeddings,
140
+ dense_prompt_embeddings=dense_prompt_embeddings,
141
+ repeat_image=repeat_image,
142
+ high_res_features=high_res_features,
143
+ )
144
+
145
+ # Select the correct mask or masks for output
146
+ if multimask_output:
147
+ masks = masks[:, 1:, :, :]
148
+ iou_pred = iou_pred[:, 1:]
149
+ elif self.dynamic_multimask_via_stability and not self.training:
150
+ masks, iou_pred = self._dynamic_multimask_via_stability(masks, iou_pred)
151
+ else:
152
+ masks = masks[:, 0:1, :, :]
153
+ iou_pred = iou_pred[:, 0:1]
154
+
155
+ if multimask_output and self.use_multimask_token_for_obj_ptr:
156
+ sam_tokens_out = mask_tokens_out[:, 1:] # [b, 3, c] shape
157
+ else:
158
+ # Take the mask output token. Here we *always* use the token for single mask output.
159
+ # At test time, even if we track after 1-click (and using multimask_output=True),
160
+ # we still take the single mask token here. The rationale is that we always track
161
+ # after multiple clicks during training, so the past tokens seen during training
162
+ # are always the single mask token (and we'll let it be the object-memory token).
163
+ sam_tokens_out = mask_tokens_out[:, 0:1] # [b, 1, c] shape
164
+
165
+ # Prepare output
166
+ return masks, iou_pred, sam_tokens_out, object_score_logits
167
+
168
+ def predict_masks(
169
+ self,
170
+ image_embeddings: torch.Tensor,
171
+ image_pe: torch.Tensor,
172
+ sparse_prompt_embeddings: torch.Tensor,
173
+ dense_prompt_embeddings: torch.Tensor,
174
+ repeat_image: bool,
175
+ high_res_features: Optional[List[torch.Tensor]] = None,
176
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
177
+ """Predicts masks. See 'forward' for more details."""
178
+ # Concatenate output tokens
179
+ s = 0
180
+ if self.pred_obj_scores:
181
+ output_tokens = torch.cat(
182
+ [
183
+ self.obj_score_token.weight,
184
+ self.iou_token.weight,
185
+ self.mask_tokens.weight,
186
+ ],
187
+ dim=0,
188
+ )
189
+ s = 1
190
+ else:
191
+ output_tokens = torch.cat(
192
+ [self.iou_token.weight, self.mask_tokens.weight], dim=0
193
+ )
194
+ output_tokens = output_tokens.unsqueeze(0).expand(
195
+ sparse_prompt_embeddings.size(0), -1, -1
196
+ )
197
+ tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1)
198
+
199
+ # Expand per-image data in batch direction to be per-mask
200
+ if repeat_image:
201
+ src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0)
202
+ else:
203
+ assert image_embeddings.shape[0] == tokens.shape[0]
204
+ src = image_embeddings
205
+ src = src + dense_prompt_embeddings
206
+ assert (
207
+ image_pe.size(0) == 1
208
+ ), "image_pe should have size 1 in batch dim (from `get_dense_pe()`)"
209
+ pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0)
210
+ b, c, h, w = src.shape
211
+
212
+ # Run the transformer
213
+ hs, src = self.transformer(src, pos_src, tokens)
214
+ iou_token_out = hs[:, s, :]
215
+ mask_tokens_out = hs[:, s + 1 : (s + 1 + self.num_mask_tokens), :]
216
+
217
+ # Upscale mask embeddings and predict masks using the mask tokens
218
+ src = src.transpose(1, 2).view(b, c, h, w)
219
+ if not self.use_high_res_features:
220
+ upscaled_embedding = self.output_upscaling(src)
221
+ else:
222
+ dc1, ln1, act1, dc2, act2 = self.output_upscaling
223
+ feat_s0, feat_s1 = high_res_features
224
+ upscaled_embedding = act1(ln1(dc1(src) + feat_s1))
225
+ upscaled_embedding = act2(dc2(upscaled_embedding) + feat_s0)
226
+
227
+ hyper_in_list: List[torch.Tensor] = []
228
+ for i in range(self.num_mask_tokens):
229
+ hyper_in_list.append(
230
+ self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :])
231
+ )
232
+ hyper_in = torch.stack(hyper_in_list, dim=1)
233
+ b, c, h, w = upscaled_embedding.shape
234
+ masks = (hyper_in @ upscaled_embedding.view(b, c, h * w)).view(b, -1, h, w)
235
+
236
+ # Generate mask quality predictions
237
+ iou_pred = self.iou_prediction_head(iou_token_out)
238
+ if self.pred_obj_scores:
239
+ assert s == 1
240
+ object_score_logits = self.pred_obj_score_head(hs[:, 0, :])
241
+ else:
242
+ # Obj scores logits - default to 10.0, i.e. assuming the object is present, sigmoid(10)=1
243
+ object_score_logits = 10.0 * iou_pred.new_ones(iou_pred.shape[0], 1)
244
+
245
+ return masks, iou_pred, mask_tokens_out, object_score_logits
246
+
247
+ def _get_stability_scores(self, mask_logits):
248
+ """
249
+ Compute stability scores of the mask logits based on the IoU between upper and
250
+ lower thresholds, similar to https://github.com/fairinternal/onevision/pull/568.
251
+ """
252
+ mask_logits = mask_logits.flatten(-2)
253
+ stability_delta = self.dynamic_multimask_stability_delta
254
+ area_i = torch.sum(mask_logits > stability_delta, dim=-1).float()
255
+ area_u = torch.sum(mask_logits > -stability_delta, dim=-1).float()
256
+ stability_scores = torch.where(area_u > 0, area_i / area_u, 1.0)
257
+ return stability_scores
258
+
259
+ def _dynamic_multimask_via_stability(self, all_mask_logits, all_iou_scores):
260
+ """
261
+ When outputting a single mask, if the stability score from the current single-mask
262
+ output (based on output token 0) falls below a threshold, we instead select from
263
+ multi-mask outputs (based on output token 1~3) the mask with the highest predicted
264
+ IoU score. This is intended to ensure a valid mask for both clicking and tracking.
265
+ """
266
+ # The best mask from multimask output tokens (1~3)
267
+ multimask_logits = all_mask_logits[:, 1:, :, :]
268
+ multimask_iou_scores = all_iou_scores[:, 1:]
269
+ best_scores_inds = torch.argmax(multimask_iou_scores, dim=-1)
270
+ batch_inds = torch.arange(
271
+ multimask_iou_scores.size(0), device=all_iou_scores.device
272
+ )
273
+ best_multimask_logits = multimask_logits[batch_inds, best_scores_inds]
274
+ best_multimask_logits = best_multimask_logits.unsqueeze(1)
275
+ best_multimask_iou_scores = multimask_iou_scores[batch_inds, best_scores_inds]
276
+ best_multimask_iou_scores = best_multimask_iou_scores.unsqueeze(1)
277
+
278
+ # The mask from singlemask output token 0 and its stability score
279
+ singlemask_logits = all_mask_logits[:, 0:1, :, :]
280
+ singlemask_iou_scores = all_iou_scores[:, 0:1]
281
+ stability_scores = self._get_stability_scores(singlemask_logits)
282
+ is_stable = stability_scores >= self.dynamic_multimask_stability_thresh
283
+
284
+ # Dynamically fall back to best multimask output upon low stability scores.
285
+ mask_logits_out = torch.where(
286
+ is_stable[..., None, None].expand_as(singlemask_logits),
287
+ singlemask_logits,
288
+ best_multimask_logits,
289
+ )
290
+ iou_scores_out = torch.where(
291
+ is_stable.expand_as(singlemask_iou_scores),
292
+ singlemask_iou_scores,
293
+ best_multimask_iou_scores,
294
+ )
295
+ return mask_logits_out, iou_scores_out
segment-anything-2/sam2/modeling/sam/prompt_encoder.py ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ from typing import Optional, Tuple, Type
8
+
9
+ import torch
10
+ from torch import nn
11
+
12
+ from sam2.modeling.position_encoding import PositionEmbeddingRandom
13
+
14
+ from sam2.modeling.sam2_utils import LayerNorm2d
15
+
16
+
17
+ class PromptEncoder(nn.Module):
18
+ def __init__(
19
+ self,
20
+ embed_dim: int,
21
+ image_embedding_size: Tuple[int, int],
22
+ input_image_size: Tuple[int, int],
23
+ mask_in_chans: int,
24
+ activation: Type[nn.Module] = nn.GELU,
25
+ ) -> None:
26
+ """
27
+ Encodes prompts for input to SAM's mask decoder.
28
+
29
+ Arguments:
30
+ embed_dim (int): The prompts' embedding dimension
31
+ image_embedding_size (tuple(int, int)): The spatial size of the
32
+ image embedding, as (H, W).
33
+ input_image_size (int): The padded size of the image as input
34
+ to the image encoder, as (H, W).
35
+ mask_in_chans (int): The number of hidden channels used for
36
+ encoding input masks.
37
+ activation (nn.Module): The activation to use when encoding
38
+ input masks.
39
+ """
40
+ super().__init__()
41
+ self.embed_dim = embed_dim
42
+ self.input_image_size = input_image_size
43
+ self.image_embedding_size = image_embedding_size
44
+ self.pe_layer = PositionEmbeddingRandom(embed_dim // 2)
45
+
46
+ self.num_point_embeddings: int = 4 # pos/neg point + 2 box corners
47
+ point_embeddings = [
48
+ nn.Embedding(1, embed_dim) for i in range(self.num_point_embeddings)
49
+ ]
50
+ self.point_embeddings = nn.ModuleList(point_embeddings)
51
+ self.not_a_point_embed = nn.Embedding(1, embed_dim)
52
+
53
+ self.mask_input_size = (
54
+ 4 * image_embedding_size[0],
55
+ 4 * image_embedding_size[1],
56
+ )
57
+ self.mask_downscaling = nn.Sequential(
58
+ nn.Conv2d(1, mask_in_chans // 4, kernel_size=2, stride=2),
59
+ LayerNorm2d(mask_in_chans // 4),
60
+ activation(),
61
+ nn.Conv2d(mask_in_chans // 4, mask_in_chans, kernel_size=2, stride=2),
62
+ LayerNorm2d(mask_in_chans),
63
+ activation(),
64
+ nn.Conv2d(mask_in_chans, embed_dim, kernel_size=1),
65
+ )
66
+ self.no_mask_embed = nn.Embedding(1, embed_dim)
67
+
68
+ def get_dense_pe(self) -> torch.Tensor:
69
+ """
70
+ Returns the positional encoding used to encode point prompts,
71
+ applied to a dense set of points the shape of the image encoding.
72
+
73
+ Returns:
74
+ torch.Tensor: Positional encoding with shape
75
+ 1x(embed_dim)x(embedding_h)x(embedding_w)
76
+ """
77
+ return self.pe_layer(self.image_embedding_size).unsqueeze(0)
78
+
79
+ def _embed_points(
80
+ self,
81
+ points: torch.Tensor,
82
+ labels: torch.Tensor,
83
+ pad: bool,
84
+ ) -> torch.Tensor:
85
+ """Embeds point prompts."""
86
+ points = points + 0.5 # Shift to center of pixel
87
+ if pad:
88
+ padding_point = torch.zeros((points.shape[0], 1, 2), device=points.device)
89
+ padding_label = -torch.ones((labels.shape[0], 1), device=labels.device)
90
+ points = torch.cat([points, padding_point], dim=1)
91
+ labels = torch.cat([labels, padding_label], dim=1)
92
+ point_embedding = self.pe_layer.forward_with_coords(
93
+ points, self.input_image_size
94
+ )
95
+ point_embedding[labels == -1] = 0.0
96
+ point_embedding[labels == -1] += self.not_a_point_embed.weight
97
+ point_embedding[labels == 0] += self.point_embeddings[0].weight
98
+ point_embedding[labels == 1] += self.point_embeddings[1].weight
99
+ point_embedding[labels == 2] += self.point_embeddings[2].weight
100
+ point_embedding[labels == 3] += self.point_embeddings[3].weight
101
+ return point_embedding
102
+
103
+ def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor:
104
+ """Embeds box prompts."""
105
+ boxes = boxes + 0.5 # Shift to center of pixel
106
+ coords = boxes.reshape(-1, 2, 2)
107
+ corner_embedding = self.pe_layer.forward_with_coords(
108
+ coords, self.input_image_size
109
+ )
110
+ corner_embedding[:, 0, :] += self.point_embeddings[2].weight
111
+ corner_embedding[:, 1, :] += self.point_embeddings[3].weight
112
+ return corner_embedding
113
+
114
+ def _embed_masks(self, masks: torch.Tensor) -> torch.Tensor:
115
+ """Embeds mask inputs."""
116
+ mask_embedding = self.mask_downscaling(masks)
117
+ return mask_embedding
118
+
119
+ def _get_batch_size(
120
+ self,
121
+ points: Optional[Tuple[torch.Tensor, torch.Tensor]],
122
+ boxes: Optional[torch.Tensor],
123
+ masks: Optional[torch.Tensor],
124
+ ) -> int:
125
+ """
126
+ Gets the batch size of the output given the batch size of the input prompts.
127
+ """
128
+ if points is not None:
129
+ return points[0].shape[0]
130
+ elif boxes is not None:
131
+ return boxes.shape[0]
132
+ elif masks is not None:
133
+ return masks.shape[0]
134
+ else:
135
+ return 1
136
+
137
+ def _get_device(self) -> torch.device:
138
+ return self.point_embeddings[0].weight.device
139
+
140
+ def forward(
141
+ self,
142
+ points: Optional[Tuple[torch.Tensor, torch.Tensor]],
143
+ boxes: Optional[torch.Tensor],
144
+ masks: Optional[torch.Tensor],
145
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
146
+ """
147
+ Embeds different types of prompts, returning both sparse and dense
148
+ embeddings.
149
+
150
+ Arguments:
151
+ points (tuple(torch.Tensor, torch.Tensor) or none): point coordinates
152
+ and labels to embed.
153
+ boxes (torch.Tensor or none): boxes to embed
154
+ masks (torch.Tensor or none): masks to embed
155
+
156
+ Returns:
157
+ torch.Tensor: sparse embeddings for the points and boxes, with shape
158
+ BxNx(embed_dim), where N is determined by the number of input points
159
+ and boxes.
160
+ torch.Tensor: dense embeddings for the masks, in the shape
161
+ Bx(embed_dim)x(embed_H)x(embed_W)
162
+ """
163
+ bs = self._get_batch_size(points, boxes, masks)
164
+ sparse_embeddings = torch.empty(
165
+ (bs, 0, self.embed_dim), device=self._get_device()
166
+ )
167
+ if points is not None:
168
+ coords, labels = points
169
+ point_embeddings = self._embed_points(coords, labels, pad=(boxes is None))
170
+ sparse_embeddings = torch.cat([sparse_embeddings, point_embeddings], dim=1)
171
+ if boxes is not None:
172
+ box_embeddings = self._embed_boxes(boxes)
173
+ sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=1)
174
+
175
+ if masks is not None:
176
+ dense_embeddings = self._embed_masks(masks)
177
+ else:
178
+ dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand(
179
+ bs, -1, self.image_embedding_size[0], self.image_embedding_size[1]
180
+ )
181
+
182
+ return sparse_embeddings, dense_embeddings
segment-anything-2/sam2/modeling/sam/transformer.py ADDED
@@ -0,0 +1,360 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import contextlib
8
+ import math
9
+ import warnings
10
+ from functools import partial
11
+ from typing import Tuple, Type
12
+
13
+ import torch
14
+ import torch.nn.functional as F
15
+ from torch import nn, Tensor
16
+
17
+ from sam2.modeling.position_encoding import apply_rotary_enc, compute_axial_cis
18
+ from sam2.modeling.sam2_utils import MLP
19
+ from sam2.utils.misc import get_sdpa_settings
20
+
21
+ warnings.simplefilter(action="ignore", category=FutureWarning)
22
+ # Check whether Flash Attention is available (and use it by default)
23
+ OLD_GPU, USE_FLASH_ATTN, MATH_KERNEL_ON = get_sdpa_settings()
24
+ # A fallback setting to allow all available kernels if Flash Attention fails
25
+ ALLOW_ALL_KERNELS = False
26
+
27
+
28
+ def sdp_kernel_context(dropout_p):
29
+ """
30
+ Get the context for the attention scaled dot-product kernel. We use Flash Attention
31
+ by default, but fall back to all available kernels if Flash Attention fails.
32
+ """
33
+ if ALLOW_ALL_KERNELS:
34
+ return contextlib.nullcontext()
35
+
36
+ return torch.backends.cuda.sdp_kernel(
37
+ enable_flash=USE_FLASH_ATTN,
38
+ # if Flash attention kernel is off, then math kernel needs to be enabled
39
+ enable_math=(OLD_GPU and dropout_p > 0.0) or MATH_KERNEL_ON,
40
+ enable_mem_efficient=OLD_GPU,
41
+ )
42
+
43
+
44
+ class TwoWayTransformer(nn.Module):
45
+ def __init__(
46
+ self,
47
+ depth: int,
48
+ embedding_dim: int,
49
+ num_heads: int,
50
+ mlp_dim: int,
51
+ activation: Type[nn.Module] = nn.ReLU,
52
+ attention_downsample_rate: int = 2,
53
+ ) -> None:
54
+ """
55
+ A transformer decoder that attends to an input image using
56
+ queries whose positional embedding is supplied.
57
+
58
+ Args:
59
+ depth (int): number of layers in the transformer
60
+ embedding_dim (int): the channel dimension for the input embeddings
61
+ num_heads (int): the number of heads for multihead attention. Must
62
+ divide embedding_dim
63
+ mlp_dim (int): the channel dimension internal to the MLP block
64
+ activation (nn.Module): the activation to use in the MLP block
65
+ """
66
+ super().__init__()
67
+ self.depth = depth
68
+ self.embedding_dim = embedding_dim
69
+ self.num_heads = num_heads
70
+ self.mlp_dim = mlp_dim
71
+ self.layers = nn.ModuleList()
72
+
73
+ for i in range(depth):
74
+ self.layers.append(
75
+ TwoWayAttentionBlock(
76
+ embedding_dim=embedding_dim,
77
+ num_heads=num_heads,
78
+ mlp_dim=mlp_dim,
79
+ activation=activation,
80
+ attention_downsample_rate=attention_downsample_rate,
81
+ skip_first_layer_pe=(i == 0),
82
+ )
83
+ )
84
+
85
+ self.final_attn_token_to_image = Attention(
86
+ embedding_dim, num_heads, downsample_rate=attention_downsample_rate
87
+ )
88
+ self.norm_final_attn = nn.LayerNorm(embedding_dim)
89
+
90
+ def forward(
91
+ self,
92
+ image_embedding: Tensor,
93
+ image_pe: Tensor,
94
+ point_embedding: Tensor,
95
+ ) -> Tuple[Tensor, Tensor]:
96
+ """
97
+ Args:
98
+ image_embedding (torch.Tensor): image to attend to. Should be shape
99
+ B x embedding_dim x h x w for any h and w.
100
+ image_pe (torch.Tensor): the positional encoding to add to the image. Must
101
+ have the same shape as image_embedding.
102
+ point_embedding (torch.Tensor): the embedding to add to the query points.
103
+ Must have shape B x N_points x embedding_dim for any N_points.
104
+
105
+ Returns:
106
+ torch.Tensor: the processed point_embedding
107
+ torch.Tensor: the processed image_embedding
108
+ """
109
+ # BxCxHxW -> BxHWxC == B x N_image_tokens x C
110
+ bs, c, h, w = image_embedding.shape
111
+ image_embedding = image_embedding.flatten(2).permute(0, 2, 1)
112
+ image_pe = image_pe.flatten(2).permute(0, 2, 1)
113
+
114
+ # Prepare queries
115
+ queries = point_embedding
116
+ keys = image_embedding
117
+
118
+ # Apply transformer blocks and final layernorm
119
+ for layer in self.layers:
120
+ queries, keys = layer(
121
+ queries=queries,
122
+ keys=keys,
123
+ query_pe=point_embedding,
124
+ key_pe=image_pe,
125
+ )
126
+
127
+ # Apply the final attention layer from the points to the image
128
+ q = queries + point_embedding
129
+ k = keys + image_pe
130
+ attn_out = self.final_attn_token_to_image(q=q, k=k, v=keys)
131
+ queries = queries + attn_out
132
+ queries = self.norm_final_attn(queries)
133
+
134
+ return queries, keys
135
+
136
+
137
+ class TwoWayAttentionBlock(nn.Module):
138
+ def __init__(
139
+ self,
140
+ embedding_dim: int,
141
+ num_heads: int,
142
+ mlp_dim: int = 2048,
143
+ activation: Type[nn.Module] = nn.ReLU,
144
+ attention_downsample_rate: int = 2,
145
+ skip_first_layer_pe: bool = False,
146
+ ) -> None:
147
+ """
148
+ A transformer block with four layers: (1) self-attention of sparse
149
+ inputs, (2) cross attention of sparse inputs to dense inputs, (3) mlp
150
+ block on sparse inputs, and (4) cross attention of dense inputs to sparse
151
+ inputs.
152
+
153
+ Arguments:
154
+ embedding_dim (int): the channel dimension of the embeddings
155
+ num_heads (int): the number of heads in the attention layers
156
+ mlp_dim (int): the hidden dimension of the mlp block
157
+ activation (nn.Module): the activation of the mlp block
158
+ skip_first_layer_pe (bool): skip the PE on the first layer
159
+ """
160
+ super().__init__()
161
+ self.self_attn = Attention(embedding_dim, num_heads)
162
+ self.norm1 = nn.LayerNorm(embedding_dim)
163
+
164
+ self.cross_attn_token_to_image = Attention(
165
+ embedding_dim, num_heads, downsample_rate=attention_downsample_rate
166
+ )
167
+ self.norm2 = nn.LayerNorm(embedding_dim)
168
+
169
+ self.mlp = MLP(
170
+ embedding_dim, mlp_dim, embedding_dim, num_layers=2, activation=activation
171
+ )
172
+ self.norm3 = nn.LayerNorm(embedding_dim)
173
+
174
+ self.norm4 = nn.LayerNorm(embedding_dim)
175
+ self.cross_attn_image_to_token = Attention(
176
+ embedding_dim, num_heads, downsample_rate=attention_downsample_rate
177
+ )
178
+
179
+ self.skip_first_layer_pe = skip_first_layer_pe
180
+
181
+ def forward(
182
+ self, queries: Tensor, keys: Tensor, query_pe: Tensor, key_pe: Tensor
183
+ ) -> Tuple[Tensor, Tensor]:
184
+ # Self attention block
185
+ if self.skip_first_layer_pe:
186
+ queries = self.self_attn(q=queries, k=queries, v=queries)
187
+ else:
188
+ q = queries + query_pe
189
+ attn_out = self.self_attn(q=q, k=q, v=queries)
190
+ queries = queries + attn_out
191
+ queries = self.norm1(queries)
192
+
193
+ # Cross attention block, tokens attending to image embedding
194
+ q = queries + query_pe
195
+ k = keys + key_pe
196
+ attn_out = self.cross_attn_token_to_image(q=q, k=k, v=keys)
197
+ queries = queries + attn_out
198
+ queries = self.norm2(queries)
199
+
200
+ # MLP block
201
+ mlp_out = self.mlp(queries)
202
+ queries = queries + mlp_out
203
+ queries = self.norm3(queries)
204
+
205
+ # Cross attention block, image embedding attending to tokens
206
+ q = queries + query_pe
207
+ k = keys + key_pe
208
+ attn_out = self.cross_attn_image_to_token(q=k, k=q, v=queries)
209
+ keys = keys + attn_out
210
+ keys = self.norm4(keys)
211
+
212
+ return queries, keys
213
+
214
+
215
+ class Attention(nn.Module):
216
+ """
217
+ An attention layer that allows for downscaling the size of the embedding
218
+ after projection to queries, keys, and values.
219
+ """
220
+
221
+ def __init__(
222
+ self,
223
+ embedding_dim: int,
224
+ num_heads: int,
225
+ downsample_rate: int = 1,
226
+ dropout: float = 0.0,
227
+ kv_in_dim: int = None,
228
+ ) -> None:
229
+ super().__init__()
230
+ self.embedding_dim = embedding_dim
231
+ self.kv_in_dim = kv_in_dim if kv_in_dim is not None else embedding_dim
232
+ self.internal_dim = embedding_dim // downsample_rate
233
+ self.num_heads = num_heads
234
+ assert (
235
+ self.internal_dim % num_heads == 0
236
+ ), "num_heads must divide embedding_dim."
237
+
238
+ self.q_proj = nn.Linear(embedding_dim, self.internal_dim)
239
+ self.k_proj = nn.Linear(self.kv_in_dim, self.internal_dim)
240
+ self.v_proj = nn.Linear(self.kv_in_dim, self.internal_dim)
241
+ self.out_proj = nn.Linear(self.internal_dim, embedding_dim)
242
+
243
+ self.dropout_p = dropout
244
+
245
+ def _separate_heads(self, x: Tensor, num_heads: int) -> Tensor:
246
+ b, n, c = x.shape
247
+ x = x.reshape(b, n, num_heads, c // num_heads)
248
+ return x.transpose(1, 2) # B x N_heads x N_tokens x C_per_head
249
+
250
+ def _recombine_heads(self, x: Tensor) -> Tensor:
251
+ b, n_heads, n_tokens, c_per_head = x.shape
252
+ x = x.transpose(1, 2)
253
+ return x.reshape(b, n_tokens, n_heads * c_per_head) # B x N_tokens x C
254
+
255
+ def forward(self, q: Tensor, k: Tensor, v: Tensor) -> Tensor:
256
+ # Input projections
257
+ q = self.q_proj(q)
258
+ k = self.k_proj(k)
259
+ v = self.v_proj(v)
260
+
261
+ # Separate into heads
262
+ q = self._separate_heads(q, self.num_heads)
263
+ k = self._separate_heads(k, self.num_heads)
264
+ v = self._separate_heads(v, self.num_heads)
265
+
266
+ dropout_p = self.dropout_p if self.training else 0.0
267
+ # Attention
268
+ try:
269
+ with sdp_kernel_context(dropout_p):
270
+ out = F.scaled_dot_product_attention(q, k, v, dropout_p=dropout_p)
271
+ except Exception as e:
272
+ # Fall back to all kernels if the Flash attention kernel fails
273
+ warnings.warn(
274
+ f"Flash Attention kernel failed due to: {e}\nFalling back to all available "
275
+ f"kernels for scaled_dot_product_attention (which may have a slower speed).",
276
+ category=UserWarning,
277
+ stacklevel=2,
278
+ )
279
+ global ALLOW_ALL_KERNELS
280
+ ALLOW_ALL_KERNELS = True
281
+ out = F.scaled_dot_product_attention(q, k, v, dropout_p=dropout_p)
282
+
283
+ out = self._recombine_heads(out)
284
+ out = self.out_proj(out)
285
+
286
+ return out
287
+
288
+
289
+ class RoPEAttention(Attention):
290
+ """Attention with rotary position encoding."""
291
+
292
+ def __init__(
293
+ self,
294
+ *args,
295
+ rope_theta=10000.0,
296
+ # whether to repeat q rope to match k length
297
+ # this is needed for cross-attention to memories
298
+ rope_k_repeat=False,
299
+ feat_sizes=(32, 32), # [w, h] for stride 16 feats at 512 resolution
300
+ **kwargs,
301
+ ):
302
+ super().__init__(*args, **kwargs)
303
+
304
+ self.compute_cis = partial(
305
+ compute_axial_cis, dim=self.internal_dim // self.num_heads, theta=rope_theta
306
+ )
307
+ freqs_cis = self.compute_cis(end_x=feat_sizes[0], end_y=feat_sizes[1])
308
+ self.freqs_cis = freqs_cis
309
+ self.rope_k_repeat = rope_k_repeat
310
+
311
+ def forward(
312
+ self, q: Tensor, k: Tensor, v: Tensor, num_k_exclude_rope: int = 0
313
+ ) -> Tensor:
314
+ # Input projections
315
+ q = self.q_proj(q)
316
+ k = self.k_proj(k)
317
+ v = self.v_proj(v)
318
+
319
+ # Separate into heads
320
+ q = self._separate_heads(q, self.num_heads)
321
+ k = self._separate_heads(k, self.num_heads)
322
+ v = self._separate_heads(v, self.num_heads)
323
+
324
+ # Apply rotary position encoding
325
+ w = h = math.sqrt(q.shape[-2])
326
+ self.freqs_cis = self.freqs_cis.to(q.device)
327
+ if self.freqs_cis.shape[0] != q.shape[-2]:
328
+ self.freqs_cis = self.compute_cis(end_x=w, end_y=h).to(q.device)
329
+ if q.shape[-2] != k.shape[-2]:
330
+ assert self.rope_k_repeat
331
+
332
+ num_k_rope = k.size(-2) - num_k_exclude_rope
333
+ q, k[:, :, :num_k_rope] = apply_rotary_enc(
334
+ q,
335
+ k[:, :, :num_k_rope],
336
+ freqs_cis=self.freqs_cis,
337
+ repeat_freqs_k=self.rope_k_repeat,
338
+ )
339
+
340
+ dropout_p = self.dropout_p if self.training else 0.0
341
+ # Attention
342
+ try:
343
+ with sdp_kernel_context(dropout_p):
344
+ out = F.scaled_dot_product_attention(q, k, v, dropout_p=dropout_p)
345
+ except Exception as e:
346
+ # Fall back to all kernels if the Flash attention kernel fails
347
+ warnings.warn(
348
+ f"Flash Attention kernel failed due to: {e}\nFalling back to all available "
349
+ f"kernels for scaled_dot_product_attention (which may have a slower speed).",
350
+ category=UserWarning,
351
+ stacklevel=2,
352
+ )
353
+ global ALLOW_ALL_KERNELS
354
+ ALLOW_ALL_KERNELS = True
355
+ out = F.scaled_dot_product_attention(q, k, v, dropout_p=dropout_p)
356
+
357
+ out = self._recombine_heads(out)
358
+ out = self.out_proj(out)
359
+
360
+ return out
segment-anything-2/sam2/modeling/sam2_base.py ADDED
@@ -0,0 +1,829 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import torch
8
+ import torch.distributed
9
+ import torch.nn.functional as F
10
+
11
+ from torch.nn.init import trunc_normal_
12
+
13
+ from sam2.modeling.sam.mask_decoder import MaskDecoder
14
+ from sam2.modeling.sam.prompt_encoder import PromptEncoder
15
+ from sam2.modeling.sam.transformer import TwoWayTransformer
16
+ from sam2.modeling.sam2_utils import get_1d_sine_pe, MLP, select_closest_cond_frames
17
+
18
+ # a large negative value as a placeholder score for missing objects
19
+ NO_OBJ_SCORE = -1024.0
20
+
21
+
22
+ class SAM2Base(torch.nn.Module):
23
+ def __init__(
24
+ self,
25
+ image_encoder,
26
+ memory_attention,
27
+ memory_encoder,
28
+ num_maskmem=7, # default 1 input frame + 6 previous frames
29
+ image_size=512,
30
+ backbone_stride=16, # stride of the image backbone output
31
+ sigmoid_scale_for_mem_enc=1.0, # scale factor for mask sigmoid prob
32
+ sigmoid_bias_for_mem_enc=0.0, # bias factor for mask sigmoid prob
33
+ # During evaluation, whether to binarize the sigmoid mask logits on interacted frames with clicks
34
+ binarize_mask_from_pts_for_mem_enc=False,
35
+ use_mask_input_as_output_without_sam=False, # on frames with mask input, whether to directly output the input mask without using a SAM prompt encoder + mask decoder
36
+ # The maximum number of conditioning frames to participate in the memory attention (-1 means no limit; if there are more conditioning frames than this limit,
37
+ # we only cross-attend to the temporally closest `max_cond_frames_in_attn` conditioning frames in the encoder when tracking each frame). This gives the model
38
+ # a temporal locality when handling a large number of annotated frames (since closer frames should be more important) and also avoids GPU OOM.
39
+ max_cond_frames_in_attn=-1,
40
+ # on the first frame, whether to directly add the no-memory embedding to the image feature
41
+ # (instead of using the transformer encoder)
42
+ directly_add_no_mem_embed=False,
43
+ # whether to use high-resolution feature maps in the SAM mask decoder
44
+ use_high_res_features_in_sam=False,
45
+ # whether to output multiple (3) masks for the first click on initial conditioning frames
46
+ multimask_output_in_sam=False,
47
+ # the minimum and maximum number of clicks to use multimask_output_in_sam (only relevant when `multimask_output_in_sam=True`;
48
+ # default is 1 for both, meaning that only the first click gives multimask output; also note that a box counts as two points)
49
+ multimask_min_pt_num=1,
50
+ multimask_max_pt_num=1,
51
+ # whether to also use multimask output for tracking (not just for the first click on initial conditioning frames; only relevant when `multimask_output_in_sam=True`)
52
+ multimask_output_for_tracking=False,
53
+ # Whether to use multimask tokens for obj ptr; Only relevant when both
54
+ # use_obj_ptrs_in_encoder=True and multimask_output_for_tracking=True
55
+ use_multimask_token_for_obj_ptr: bool = False,
56
+ # whether to use sigmoid to restrict ious prediction to [0-1]
57
+ iou_prediction_use_sigmoid=False,
58
+ # The memory bank's temporal stride during evaluation (i.e. the `r` parameter in XMem and Cutie; XMem and Cutie use r=5).
59
+ # For r>1, the (self.num_maskmem - 1) non-conditioning memory frames consist of
60
+ # (self.num_maskmem - 2) nearest frames from every r-th frames, plus the last frame.
61
+ memory_temporal_stride_for_eval=1,
62
+ # if `add_all_frames_to_correct_as_cond` is True, we also append to the conditioning frame list any frame that receives a later correction click
63
+ # if `add_all_frames_to_correct_as_cond` is False, we conditioning frame list to only use those initial conditioning frames
64
+ add_all_frames_to_correct_as_cond=False,
65
+ # whether to apply non-overlapping constraints on the object masks in the memory encoder during evaluation (to avoid/alleviate superposing masks)
66
+ non_overlap_masks_for_mem_enc=False,
67
+ # whether to cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
68
+ use_obj_ptrs_in_encoder=False,
69
+ # the maximum number of object pointers from other frames in encoder cross attention (only relevant when `use_obj_ptrs_in_encoder=True`)
70
+ max_obj_ptrs_in_encoder=16,
71
+ # whether to add temporal positional encoding to the object pointers in the encoder (only relevant when `use_obj_ptrs_in_encoder=True`)
72
+ add_tpos_enc_to_obj_ptrs=True,
73
+ # whether to add an extra linear projection layer for the temporal positional encoding in the object pointers to avoid potential interference
74
+ # with spatial positional encoding (only relevant when both `use_obj_ptrs_in_encoder=True` and `add_tpos_enc_to_obj_ptrs=True`)
75
+ proj_tpos_enc_in_obj_ptrs=False,
76
+ # whether to only attend to object pointers in the past (before the current frame) in the encoder during evaluation
77
+ # (only relevant when `use_obj_ptrs_in_encoder=True`; this might avoid pointer information too far in the future to distract the initial tracking)
78
+ only_obj_ptrs_in_the_past_for_eval=False,
79
+ # Whether to predict if there is an object in the frame
80
+ pred_obj_scores: bool = False,
81
+ # Whether to use an MLP to predict object scores
82
+ pred_obj_scores_mlp: bool = False,
83
+ # Only relevant if pred_obj_scores=True and use_obj_ptrs_in_encoder=True;
84
+ # Whether to have a fixed no obj pointer when there is no object present
85
+ # or to use it as an additive embedding with obj_ptr produced by decoder
86
+ fixed_no_obj_ptr: bool = False,
87
+ # Soft no object, i.e. mix in no_obj_ptr softly,
88
+ # hope to make recovery easier if there is a mistake and mitigate accumulation of errors
89
+ soft_no_obj_ptr: bool = False,
90
+ use_mlp_for_obj_ptr_proj: bool = False,
91
+ # extra arguments used to construct the SAM mask decoder; if not None, it should be a dict of kwargs to be passed into `MaskDecoder` class.
92
+ sam_mask_decoder_extra_args=None,
93
+ compile_image_encoder: bool = False,
94
+ ):
95
+ super().__init__()
96
+
97
+ # Part 1: the image backbone
98
+ self.image_encoder = image_encoder
99
+ # Use level 0, 1, 2 for high-res setting, or just level 2 for the default setting
100
+ self.use_high_res_features_in_sam = use_high_res_features_in_sam
101
+ self.num_feature_levels = 3 if use_high_res_features_in_sam else 1
102
+ self.use_obj_ptrs_in_encoder = use_obj_ptrs_in_encoder
103
+ self.max_obj_ptrs_in_encoder = max_obj_ptrs_in_encoder
104
+ if use_obj_ptrs_in_encoder:
105
+ # A conv layer to downsample the mask prompt to stride 4 (the same stride as
106
+ # low-res SAM mask logits) and to change its scales from 0~1 to SAM logit scale,
107
+ # so that it can be fed into the SAM mask decoder to generate a pointer.
108
+ self.mask_downsample = torch.nn.Conv2d(1, 1, kernel_size=4, stride=4)
109
+ self.add_tpos_enc_to_obj_ptrs = add_tpos_enc_to_obj_ptrs
110
+ if proj_tpos_enc_in_obj_ptrs:
111
+ assert add_tpos_enc_to_obj_ptrs # these options need to be used together
112
+ self.proj_tpos_enc_in_obj_ptrs = proj_tpos_enc_in_obj_ptrs
113
+ self.only_obj_ptrs_in_the_past_for_eval = only_obj_ptrs_in_the_past_for_eval
114
+
115
+ # Part 2: memory attention to condition current frame's visual features
116
+ # with memories (and obj ptrs) from past frames
117
+ self.memory_attention = memory_attention
118
+ self.hidden_dim = memory_attention.d_model
119
+
120
+ # Part 3: memory encoder for the previous frame's outputs
121
+ self.memory_encoder = memory_encoder
122
+ self.mem_dim = self.hidden_dim
123
+ if hasattr(self.memory_encoder, "out_proj") and hasattr(
124
+ self.memory_encoder.out_proj, "weight"
125
+ ):
126
+ # if there is compression of memories along channel dim
127
+ self.mem_dim = self.memory_encoder.out_proj.weight.shape[0]
128
+ self.num_maskmem = num_maskmem # Number of memories accessible
129
+ # Temporal encoding of the memories
130
+ self.maskmem_tpos_enc = torch.nn.Parameter(
131
+ torch.zeros(num_maskmem, 1, 1, self.mem_dim)
132
+ )
133
+ trunc_normal_(self.maskmem_tpos_enc, std=0.02)
134
+ # a single token to indicate no memory embedding from previous frames
135
+ self.no_mem_embed = torch.nn.Parameter(torch.zeros(1, 1, self.hidden_dim))
136
+ self.no_mem_pos_enc = torch.nn.Parameter(torch.zeros(1, 1, self.hidden_dim))
137
+ trunc_normal_(self.no_mem_embed, std=0.02)
138
+ trunc_normal_(self.no_mem_pos_enc, std=0.02)
139
+ self.directly_add_no_mem_embed = directly_add_no_mem_embed
140
+ # Apply sigmoid to the output raw mask logits (to turn them from
141
+ # range (-inf, +inf) to range (0, 1)) before feeding them into the memory encoder
142
+ self.sigmoid_scale_for_mem_enc = sigmoid_scale_for_mem_enc
143
+ self.sigmoid_bias_for_mem_enc = sigmoid_bias_for_mem_enc
144
+ self.binarize_mask_from_pts_for_mem_enc = binarize_mask_from_pts_for_mem_enc
145
+ self.non_overlap_masks_for_mem_enc = non_overlap_masks_for_mem_enc
146
+ self.memory_temporal_stride_for_eval = memory_temporal_stride_for_eval
147
+ # On frames with mask input, whether to directly output the input mask without
148
+ # using a SAM prompt encoder + mask decoder
149
+ self.use_mask_input_as_output_without_sam = use_mask_input_as_output_without_sam
150
+ self.multimask_output_in_sam = multimask_output_in_sam
151
+ self.multimask_min_pt_num = multimask_min_pt_num
152
+ self.multimask_max_pt_num = multimask_max_pt_num
153
+ self.multimask_output_for_tracking = multimask_output_for_tracking
154
+ self.use_multimask_token_for_obj_ptr = use_multimask_token_for_obj_ptr
155
+ self.iou_prediction_use_sigmoid = iou_prediction_use_sigmoid
156
+
157
+ # Part 4: SAM-style prompt encoder (for both mask and point inputs)
158
+ # and SAM-style mask decoder for the final mask output
159
+ self.image_size = image_size
160
+ self.backbone_stride = backbone_stride
161
+ self.sam_mask_decoder_extra_args = sam_mask_decoder_extra_args
162
+ self.pred_obj_scores = pred_obj_scores
163
+ self.pred_obj_scores_mlp = pred_obj_scores_mlp
164
+ self.fixed_no_obj_ptr = fixed_no_obj_ptr
165
+ self.soft_no_obj_ptr = soft_no_obj_ptr
166
+ if self.fixed_no_obj_ptr:
167
+ assert self.pred_obj_scores
168
+ assert self.use_obj_ptrs_in_encoder
169
+ if self.pred_obj_scores and self.use_obj_ptrs_in_encoder:
170
+ self.no_obj_ptr = torch.nn.Parameter(torch.zeros(1, self.hidden_dim))
171
+ trunc_normal_(self.no_obj_ptr, std=0.02)
172
+ self.use_mlp_for_obj_ptr_proj = use_mlp_for_obj_ptr_proj
173
+
174
+ self._build_sam_heads()
175
+ self.add_all_frames_to_correct_as_cond = add_all_frames_to_correct_as_cond
176
+ self.max_cond_frames_in_attn = max_cond_frames_in_attn
177
+
178
+ # Model compilation
179
+ if compile_image_encoder:
180
+ # Compile the forward function (not the full module) to allow loading checkpoints.
181
+ print(
182
+ "Image encoder compilation is enabled. First forward pass will be slow."
183
+ )
184
+ self.image_encoder.forward = torch.compile(
185
+ self.image_encoder.forward,
186
+ mode="max-autotune",
187
+ fullgraph=True,
188
+ dynamic=False,
189
+ )
190
+
191
+ @property
192
+ def device(self):
193
+ return next(self.parameters()).device
194
+
195
+ def forward(self, *args, **kwargs):
196
+ raise NotImplementedError(
197
+ "Please use the corresponding methods in SAM2VideoPredictor for inference."
198
+ "See notebooks/video_predictor_example.ipynb for an example."
199
+ )
200
+
201
+ def _build_sam_heads(self):
202
+ """Build SAM-style prompt encoder and mask decoder."""
203
+ self.sam_prompt_embed_dim = self.hidden_dim
204
+ self.sam_image_embedding_size = self.image_size // self.backbone_stride
205
+
206
+ # build PromptEncoder and MaskDecoder from SAM
207
+ # (their hyperparameters like `mask_in_chans=16` are from SAM code)
208
+ self.sam_prompt_encoder = PromptEncoder(
209
+ embed_dim=self.sam_prompt_embed_dim,
210
+ image_embedding_size=(
211
+ self.sam_image_embedding_size,
212
+ self.sam_image_embedding_size,
213
+ ),
214
+ input_image_size=(self.image_size, self.image_size),
215
+ mask_in_chans=16,
216
+ )
217
+ self.sam_mask_decoder = MaskDecoder(
218
+ num_multimask_outputs=3,
219
+ transformer=TwoWayTransformer(
220
+ depth=2,
221
+ embedding_dim=self.sam_prompt_embed_dim,
222
+ mlp_dim=2048,
223
+ num_heads=8,
224
+ ),
225
+ transformer_dim=self.sam_prompt_embed_dim,
226
+ iou_head_depth=3,
227
+ iou_head_hidden_dim=256,
228
+ use_high_res_features=self.use_high_res_features_in_sam,
229
+ iou_prediction_use_sigmoid=self.iou_prediction_use_sigmoid,
230
+ pred_obj_scores=self.pred_obj_scores,
231
+ pred_obj_scores_mlp=self.pred_obj_scores_mlp,
232
+ use_multimask_token_for_obj_ptr=self.use_multimask_token_for_obj_ptr,
233
+ **(self.sam_mask_decoder_extra_args or {}),
234
+ )
235
+ if self.use_obj_ptrs_in_encoder:
236
+ # a linear projection on SAM output tokens to turn them into object pointers
237
+ self.obj_ptr_proj = torch.nn.Linear(self.hidden_dim, self.hidden_dim)
238
+ if self.use_mlp_for_obj_ptr_proj:
239
+ self.obj_ptr_proj = MLP(
240
+ self.hidden_dim, self.hidden_dim, self.hidden_dim, 3
241
+ )
242
+ else:
243
+ self.obj_ptr_proj = torch.nn.Identity()
244
+ if self.proj_tpos_enc_in_obj_ptrs:
245
+ # a linear projection on temporal positional encoding in object pointers to
246
+ # avoid potential interference with spatial positional encoding
247
+ self.obj_ptr_tpos_proj = torch.nn.Linear(self.hidden_dim, self.mem_dim)
248
+ else:
249
+ self.obj_ptr_tpos_proj = torch.nn.Identity()
250
+
251
+ def _forward_sam_heads(
252
+ self,
253
+ backbone_features,
254
+ point_inputs=None,
255
+ mask_inputs=None,
256
+ high_res_features=None,
257
+ multimask_output=False,
258
+ ):
259
+ """
260
+ Forward SAM prompt encoders and mask heads.
261
+
262
+ Inputs:
263
+ - backbone_features: image features of [B, C, H, W] shape
264
+ - point_inputs: a dictionary with "point_coords" and "point_labels", where
265
+ 1) "point_coords" has [B, P, 2] shape and float32 dtype and contains the
266
+ absolute pixel-unit coordinate in (x, y) format of the P input points
267
+ 2) "point_labels" has shape [B, P] and int32 dtype, where 1 means
268
+ positive clicks, 0 means negative clicks, and -1 means padding
269
+ - mask_inputs: a mask of [B, 1, H*16, W*16] shape, float or bool, with the
270
+ same spatial size as the image.
271
+ - high_res_features: either 1) None or 2) or a list of length 2 containing
272
+ two feature maps of [B, C, 4*H, 4*W] and [B, C, 2*H, 2*W] shapes respectively,
273
+ which will be used as high-resolution feature maps for SAM decoder.
274
+ - multimask_output: if it's True, we output 3 candidate masks and their 3
275
+ corresponding IoU estimates, and if it's False, we output only 1 mask and
276
+ its corresponding IoU estimate.
277
+
278
+ Outputs:
279
+ - low_res_multimasks: [B, M, H*4, W*4] shape (where M = 3 if
280
+ `multimask_output=True` and M = 1 if `multimask_output=False`), the SAM
281
+ output mask logits (before sigmoid) for the low-resolution masks, with 4x
282
+ the resolution (1/4 stride) of the input backbone_features.
283
+ - high_res_multimasks: [B, M, H*16, W*16] shape (where M = 3
284
+ if `multimask_output=True` and M = 1 if `multimask_output=False`),
285
+ upsampled from the low-resolution masks, with shape size as the image
286
+ (stride is 1 pixel).
287
+ - ious, [B, M] shape, where (where M = 3 if `multimask_output=True` and M = 1
288
+ if `multimask_output=False`), the estimated IoU of each output mask.
289
+ - low_res_masks: [B, 1, H*4, W*4] shape, the best mask in `low_res_multimasks`.
290
+ If `multimask_output=True`, it's the mask with the highest IoU estimate.
291
+ If `multimask_output=False`, it's the same as `low_res_multimasks`.
292
+ - high_res_masks: [B, 1, H*16, W*16] shape, the best mask in `high_res_multimasks`.
293
+ If `multimask_output=True`, it's the mask with the highest IoU estimate.
294
+ If `multimask_output=False`, it's the same as `high_res_multimasks`.
295
+ - obj_ptr: [B, C] shape, the object pointer vector for the output mask, extracted
296
+ based on the output token from the SAM mask decoder.
297
+ """
298
+ B = backbone_features.size(0)
299
+ device = backbone_features.device
300
+ assert backbone_features.size(1) == self.sam_prompt_embed_dim
301
+ assert backbone_features.size(2) == self.sam_image_embedding_size
302
+ assert backbone_features.size(3) == self.sam_image_embedding_size
303
+
304
+ # a) Handle point prompts
305
+ if point_inputs is not None:
306
+ sam_point_coords = point_inputs["point_coords"]
307
+ sam_point_labels = point_inputs["point_labels"]
308
+ assert sam_point_coords.size(0) == B and sam_point_labels.size(0) == B
309
+ else:
310
+ # If no points are provide, pad with an empty point (with label -1)
311
+ sam_point_coords = torch.zeros(B, 1, 2, device=device)
312
+ sam_point_labels = -torch.ones(B, 1, dtype=torch.int32, device=device)
313
+
314
+ # b) Handle mask prompts
315
+ if mask_inputs is not None:
316
+ # If mask_inputs is provided, downsize it into low-res mask input if needed
317
+ # and feed it as a dense mask prompt into the SAM mask encoder
318
+ assert len(mask_inputs.shape) == 4 and mask_inputs.shape[:2] == (B, 1)
319
+ if mask_inputs.shape[-2:] != self.sam_prompt_encoder.mask_input_size:
320
+ sam_mask_prompt = F.interpolate(
321
+ mask_inputs.float(),
322
+ size=self.sam_prompt_encoder.mask_input_size,
323
+ align_corners=False,
324
+ mode="bilinear",
325
+ antialias=True, # use antialias for downsampling
326
+ )
327
+ else:
328
+ sam_mask_prompt = mask_inputs
329
+ else:
330
+ # Otherwise, simply feed None (and SAM's prompt encoder will add
331
+ # a learned `no_mask_embed` to indicate no mask input in this case).
332
+ sam_mask_prompt = None
333
+
334
+ sparse_embeddings, dense_embeddings = self.sam_prompt_encoder(
335
+ points=(sam_point_coords, sam_point_labels),
336
+ boxes=None,
337
+ masks=sam_mask_prompt,
338
+ )
339
+ (
340
+ low_res_multimasks,
341
+ ious,
342
+ sam_output_tokens,
343
+ object_score_logits,
344
+ ) = self.sam_mask_decoder(
345
+ image_embeddings=backbone_features,
346
+ image_pe=self.sam_prompt_encoder.get_dense_pe(),
347
+ sparse_prompt_embeddings=sparse_embeddings,
348
+ dense_prompt_embeddings=dense_embeddings,
349
+ multimask_output=multimask_output,
350
+ repeat_image=False, # the image is already batched
351
+ high_res_features=high_res_features,
352
+ )
353
+ if self.pred_obj_scores:
354
+ is_obj_appearing = object_score_logits > 0
355
+
356
+ # Mask used for spatial memories is always a *hard* choice between obj and no obj,
357
+ # consistent with the actual mask prediction
358
+ low_res_multimasks = torch.where(
359
+ is_obj_appearing[:, None, None],
360
+ low_res_multimasks,
361
+ NO_OBJ_SCORE,
362
+ )
363
+
364
+ # convert masks from possibly bfloat16 (or float16) to float32
365
+ # (older PyTorch versions before 2.1 don't support `interpolate` on bf16)
366
+ low_res_multimasks = low_res_multimasks.float()
367
+ high_res_multimasks = F.interpolate(
368
+ low_res_multimasks,
369
+ size=(self.image_size, self.image_size),
370
+ mode="bilinear",
371
+ align_corners=False,
372
+ )
373
+
374
+ sam_output_token = sam_output_tokens[:, 0]
375
+ if multimask_output:
376
+ # take the best mask prediction (with the highest IoU estimation)
377
+ best_iou_inds = torch.argmax(ious, dim=-1)
378
+ batch_inds = torch.arange(B, device=device)
379
+ low_res_masks = low_res_multimasks[batch_inds, best_iou_inds].unsqueeze(1)
380
+ high_res_masks = high_res_multimasks[batch_inds, best_iou_inds].unsqueeze(1)
381
+ if sam_output_tokens.size(1) > 1:
382
+ sam_output_token = sam_output_tokens[batch_inds, best_iou_inds]
383
+ else:
384
+ low_res_masks, high_res_masks = low_res_multimasks, high_res_multimasks
385
+
386
+ # Extract object pointer from the SAM output token (with occlusion handling)
387
+ obj_ptr = self.obj_ptr_proj(sam_output_token)
388
+ if self.pred_obj_scores:
389
+ # Allow *soft* no obj ptr, unlike for masks
390
+ if self.soft_no_obj_ptr:
391
+ # Only hard possible with gt
392
+ assert not self.teacher_force_obj_scores_for_mem
393
+ lambda_is_obj_appearing = object_score_logits.sigmoid()
394
+ else:
395
+ lambda_is_obj_appearing = is_obj_appearing.float()
396
+
397
+ if self.fixed_no_obj_ptr:
398
+ obj_ptr = lambda_is_obj_appearing * obj_ptr
399
+ obj_ptr = obj_ptr + (1 - lambda_is_obj_appearing) * self.no_obj_ptr
400
+
401
+ return (
402
+ low_res_multimasks,
403
+ high_res_multimasks,
404
+ ious,
405
+ low_res_masks,
406
+ high_res_masks,
407
+ obj_ptr,
408
+ object_score_logits,
409
+ )
410
+
411
+ def _use_mask_as_output(self, backbone_features, high_res_features, mask_inputs):
412
+ """
413
+ Directly turn binary `mask_inputs` into a output mask logits without using SAM.
414
+ (same input and output shapes as in _forward_sam_heads above).
415
+ """
416
+ # Use -10/+10 as logits for neg/pos pixels (very close to 0/1 in prob after sigmoid).
417
+ out_scale, out_bias = 20.0, -10.0 # sigmoid(-10.0)=4.5398e-05
418
+ mask_inputs_float = mask_inputs.float()
419
+ high_res_masks = mask_inputs_float * out_scale + out_bias
420
+ low_res_masks = F.interpolate(
421
+ high_res_masks,
422
+ size=(high_res_masks.size(-2) // 4, high_res_masks.size(-1) // 4),
423
+ align_corners=False,
424
+ mode="bilinear",
425
+ antialias=True, # use antialias for downsampling
426
+ )
427
+ # a dummy IoU prediction of all 1's under mask input
428
+ ious = mask_inputs.new_ones(mask_inputs.size(0), 1).float()
429
+ if not self.use_obj_ptrs_in_encoder:
430
+ # all zeros as a dummy object pointer (of shape [B, C])
431
+ obj_ptr = torch.zeros(
432
+ mask_inputs.size(0), self.hidden_dim, device=mask_inputs.device
433
+ )
434
+ else:
435
+ # produce an object pointer using the SAM decoder from the mask input
436
+ _, _, _, _, _, obj_ptr, _ = self._forward_sam_heads(
437
+ backbone_features=backbone_features,
438
+ mask_inputs=self.mask_downsample(mask_inputs_float),
439
+ high_res_features=high_res_features,
440
+ )
441
+ # In this method, we are treating mask_input as output, e.g. using it directly to create spatial mem;
442
+ # Below, we follow the same design axiom to use mask_input to decide if obj appears or not instead of relying
443
+ # on the object_scores from the SAM decoder.
444
+ is_obj_appearing = torch.any(mask_inputs.flatten(1).float() > 0.0, dim=1)
445
+ is_obj_appearing = is_obj_appearing[..., None]
446
+ lambda_is_obj_appearing = is_obj_appearing.float()
447
+ object_score_logits = out_scale * lambda_is_obj_appearing + out_bias
448
+ if self.pred_obj_scores:
449
+ if self.fixed_no_obj_ptr:
450
+ obj_ptr = lambda_is_obj_appearing * obj_ptr
451
+ obj_ptr = obj_ptr + (1 - lambda_is_obj_appearing) * self.no_obj_ptr
452
+
453
+ return (
454
+ low_res_masks,
455
+ high_res_masks,
456
+ ious,
457
+ low_res_masks,
458
+ high_res_masks,
459
+ obj_ptr,
460
+ object_score_logits,
461
+ )
462
+
463
+ def forward_image(self, img_batch: torch.Tensor):
464
+ """Get the image feature on the input batch."""
465
+ backbone_out = self.image_encoder(img_batch)
466
+ if self.use_high_res_features_in_sam:
467
+ # precompute projected level 0 and level 1 features in SAM decoder
468
+ # to avoid running it again on every SAM click
469
+ backbone_out["backbone_fpn"][0] = self.sam_mask_decoder.conv_s0(
470
+ backbone_out["backbone_fpn"][0]
471
+ )
472
+ backbone_out["backbone_fpn"][1] = self.sam_mask_decoder.conv_s1(
473
+ backbone_out["backbone_fpn"][1]
474
+ )
475
+ return backbone_out
476
+
477
+ def _prepare_backbone_features(self, backbone_out):
478
+ """Prepare and flatten visual features."""
479
+ backbone_out = backbone_out.copy()
480
+ assert len(backbone_out["backbone_fpn"]) == len(backbone_out["vision_pos_enc"])
481
+ assert len(backbone_out["backbone_fpn"]) >= self.num_feature_levels
482
+
483
+ feature_maps = backbone_out["backbone_fpn"][-self.num_feature_levels :]
484
+ vision_pos_embeds = backbone_out["vision_pos_enc"][-self.num_feature_levels :]
485
+
486
+ feat_sizes = [(x.shape[-2], x.shape[-1]) for x in vision_pos_embeds]
487
+ # flatten NxCxHxW to HWxNxC
488
+ vision_feats = [x.flatten(2).permute(2, 0, 1) for x in feature_maps]
489
+ vision_pos_embeds = [x.flatten(2).permute(2, 0, 1) for x in vision_pos_embeds]
490
+
491
+ return backbone_out, vision_feats, vision_pos_embeds, feat_sizes
492
+
493
+ def _prepare_memory_conditioned_features(
494
+ self,
495
+ frame_idx,
496
+ is_init_cond_frame,
497
+ current_vision_feats,
498
+ current_vision_pos_embeds,
499
+ feat_sizes,
500
+ output_dict,
501
+ num_frames,
502
+ track_in_reverse=False, # tracking in reverse time order (for demo usage)
503
+ ):
504
+ """Fuse the current frame's visual feature map with previous memory."""
505
+ B = current_vision_feats[-1].size(1) # batch size on this frame
506
+ C = self.hidden_dim
507
+ H, W = feat_sizes[-1] # top-level (lowest-resolution) feature size
508
+ device = current_vision_feats[-1].device
509
+ # The case of `self.num_maskmem == 0` below is primarily used for reproducing SAM on images.
510
+ # In this case, we skip the fusion with any memory.
511
+ if self.num_maskmem == 0: # Disable memory and skip fusion
512
+ pix_feat = current_vision_feats[-1].permute(1, 2, 0).view(B, C, H, W)
513
+ return pix_feat
514
+
515
+ num_obj_ptr_tokens = 0
516
+ # Step 1: condition the visual features of the current frame on previous memories
517
+ if not is_init_cond_frame:
518
+ # Retrieve the memories encoded with the maskmem backbone
519
+ to_cat_memory, to_cat_memory_pos_embed = [], []
520
+ # Add conditioning frames's output first (all cond frames have t_pos=0 for
521
+ # when getting temporal positional embedding below)
522
+ assert len(output_dict["cond_frame_outputs"]) > 0
523
+ # Select a maximum number of temporally closest cond frames for cross attention
524
+ cond_outputs = output_dict["cond_frame_outputs"]
525
+ selected_cond_outputs, unselected_cond_outputs = select_closest_cond_frames(
526
+ frame_idx, cond_outputs, self.max_cond_frames_in_attn
527
+ )
528
+ t_pos_and_prevs = [(0, out) for out in selected_cond_outputs.values()]
529
+ # Add last (self.num_maskmem - 1) frames before current frame for non-conditioning memory
530
+ # the earliest one has t_pos=1 and the latest one has t_pos=self.num_maskmem-1
531
+ # We also allow taking the memory frame non-consecutively (with r>1), in which case
532
+ # we take (self.num_maskmem - 2) frames among every r-th frames plus the last frame.
533
+ r = self.memory_temporal_stride_for_eval
534
+ for t_pos in range(1, self.num_maskmem):
535
+ t_rel = self.num_maskmem - t_pos # how many frames before current frame
536
+ if t_rel == 1:
537
+ # for t_rel == 1, we take the last frame (regardless of r)
538
+ if not track_in_reverse:
539
+ # the frame immediately before this frame (i.e. frame_idx - 1)
540
+ prev_frame_idx = frame_idx - t_rel
541
+ else:
542
+ # the frame immediately after this frame (i.e. frame_idx + 1)
543
+ prev_frame_idx = frame_idx + t_rel
544
+ else:
545
+ # for t_rel >= 2, we take the memory frame from every r-th frames
546
+ if not track_in_reverse:
547
+ # first find the nearest frame among every r-th frames before this frame
548
+ # for r=1, this would be (frame_idx - 2)
549
+ prev_frame_idx = ((frame_idx - 2) // r) * r
550
+ # then seek further among every r-th frames
551
+ prev_frame_idx = prev_frame_idx - (t_rel - 2) * r
552
+ else:
553
+ # first find the nearest frame among every r-th frames after this frame
554
+ # for r=1, this would be (frame_idx + 2)
555
+ prev_frame_idx = -(-(frame_idx + 2) // r) * r
556
+ # then seek further among every r-th frames
557
+ prev_frame_idx = prev_frame_idx + (t_rel - 2) * r
558
+ out = output_dict["non_cond_frame_outputs"].get(prev_frame_idx, None)
559
+ if out is None:
560
+ # If an unselected conditioning frame is among the last (self.num_maskmem - 1)
561
+ # frames, we still attend to it as if it's a non-conditioning frame.
562
+ out = unselected_cond_outputs.get(prev_frame_idx, None)
563
+ t_pos_and_prevs.append((t_pos, out))
564
+
565
+ for t_pos, prev in t_pos_and_prevs:
566
+ if prev is None:
567
+ continue # skip padding frames
568
+ # "maskmem_features" might have been offloaded to CPU in demo use cases,
569
+ # so we load it back to GPU (it's a no-op if it's already on GPU).
570
+ feats = prev["maskmem_features"].to(device, non_blocking=True)
571
+ to_cat_memory.append(feats.flatten(2).permute(2, 0, 1))
572
+ # Spatial positional encoding (it might have been offloaded to CPU in eval)
573
+ maskmem_enc = prev["maskmem_pos_enc"][-1].to(device)
574
+ maskmem_enc = maskmem_enc.flatten(2).permute(2, 0, 1)
575
+ # Temporal positional encoding
576
+ maskmem_enc = (
577
+ maskmem_enc + self.maskmem_tpos_enc[self.num_maskmem - t_pos - 1]
578
+ )
579
+ to_cat_memory_pos_embed.append(maskmem_enc)
580
+
581
+ # Construct the list of past object pointers
582
+ if self.use_obj_ptrs_in_encoder:
583
+ max_obj_ptrs_in_encoder = min(num_frames, self.max_obj_ptrs_in_encoder)
584
+ # First add those object pointers from selected conditioning frames
585
+ # (optionally, only include object pointers in the past during evaluation)
586
+ if not self.training and self.only_obj_ptrs_in_the_past_for_eval:
587
+ ptr_cond_outputs = {
588
+ t: out
589
+ for t, out in selected_cond_outputs.items()
590
+ if (t >= frame_idx if track_in_reverse else t <= frame_idx)
591
+ }
592
+ else:
593
+ ptr_cond_outputs = selected_cond_outputs
594
+ pos_and_ptrs = [
595
+ # Temporal pos encoding contains how far away each pointer is from current frame
596
+ (abs(frame_idx - t), out["obj_ptr"])
597
+ for t, out in ptr_cond_outputs.items()
598
+ ]
599
+ # Add up to (max_obj_ptrs_in_encoder - 1) non-conditioning frames before current frame
600
+ for t_diff in range(1, max_obj_ptrs_in_encoder):
601
+ t = frame_idx + t_diff if track_in_reverse else frame_idx - t_diff
602
+ if t < 0 or (num_frames is not None and t >= num_frames):
603
+ break
604
+ out = output_dict["non_cond_frame_outputs"].get(
605
+ t, unselected_cond_outputs.get(t, None)
606
+ )
607
+ if out is not None:
608
+ pos_and_ptrs.append((t_diff, out["obj_ptr"]))
609
+ # If we have at least one object pointer, add them to the across attention
610
+ if len(pos_and_ptrs) > 0:
611
+ pos_list, ptrs_list = zip(*pos_and_ptrs)
612
+ # stack object pointers along dim=0 into [ptr_seq_len, B, C] shape
613
+ obj_ptrs = torch.stack(ptrs_list, dim=0)
614
+ # a temporal positional embedding based on how far each object pointer is from
615
+ # the current frame (sine embedding normalized by the max pointer num).
616
+ if self.add_tpos_enc_to_obj_ptrs:
617
+ t_diff_max = max_obj_ptrs_in_encoder - 1
618
+ tpos_dim = C if self.proj_tpos_enc_in_obj_ptrs else self.mem_dim
619
+ obj_pos = torch.tensor(pos_list, device=device)
620
+ obj_pos = get_1d_sine_pe(obj_pos / t_diff_max, dim=tpos_dim)
621
+ obj_pos = self.obj_ptr_tpos_proj(obj_pos)
622
+ obj_pos = obj_pos.unsqueeze(1).expand(-1, B, self.mem_dim)
623
+ else:
624
+ obj_pos = obj_ptrs.new_zeros(len(pos_list), B, self.mem_dim)
625
+ if self.mem_dim < C:
626
+ # split a pointer into (C // self.mem_dim) tokens for self.mem_dim < C
627
+ obj_ptrs = obj_ptrs.reshape(
628
+ -1, B, C // self.mem_dim, self.mem_dim
629
+ )
630
+ obj_ptrs = obj_ptrs.permute(0, 2, 1, 3).flatten(0, 1)
631
+ obj_pos = obj_pos.repeat_interleave(C // self.mem_dim, dim=0)
632
+ to_cat_memory.append(obj_ptrs)
633
+ to_cat_memory_pos_embed.append(obj_pos)
634
+ num_obj_ptr_tokens = obj_ptrs.shape[0]
635
+ else:
636
+ num_obj_ptr_tokens = 0
637
+ else:
638
+ # for initial conditioning frames, encode them without using any previous memory
639
+ if self.directly_add_no_mem_embed:
640
+ # directly add no-mem embedding (instead of using the transformer encoder)
641
+ pix_feat_with_mem = current_vision_feats[-1] + self.no_mem_embed
642
+ pix_feat_with_mem = pix_feat_with_mem.permute(1, 2, 0).view(B, C, H, W)
643
+ return pix_feat_with_mem
644
+
645
+ # Use a dummy token on the first frame (to avoid empty memory input to tranformer encoder)
646
+ to_cat_memory = [self.no_mem_embed.expand(1, B, self.mem_dim)]
647
+ to_cat_memory_pos_embed = [self.no_mem_pos_enc.expand(1, B, self.mem_dim)]
648
+
649
+ # Step 2: Concatenate the memories and forward through the transformer encoder
650
+ memory = torch.cat(to_cat_memory, dim=0)
651
+ memory_pos_embed = torch.cat(to_cat_memory_pos_embed, dim=0)
652
+
653
+ pix_feat_with_mem = self.memory_attention(
654
+ curr=current_vision_feats,
655
+ curr_pos=current_vision_pos_embeds,
656
+ memory=memory,
657
+ memory_pos=memory_pos_embed,
658
+ num_obj_ptr_tokens=num_obj_ptr_tokens,
659
+ )
660
+ # reshape the output (HW)BC => BCHW
661
+ pix_feat_with_mem = pix_feat_with_mem.permute(1, 2, 0).view(B, C, H, W)
662
+ return pix_feat_with_mem
663
+
664
+ def _encode_new_memory(
665
+ self,
666
+ current_vision_feats,
667
+ feat_sizes,
668
+ pred_masks_high_res,
669
+ is_mask_from_pts,
670
+ ):
671
+ """Encode the current image and its prediction into a memory feature."""
672
+ B = current_vision_feats[-1].size(1) # batch size on this frame
673
+ C = self.hidden_dim
674
+ H, W = feat_sizes[-1] # top-level (lowest-resolution) feature size
675
+ # top-level feature, (HW)BC => BCHW
676
+ pix_feat = current_vision_feats[-1].permute(1, 2, 0).view(B, C, H, W)
677
+ if self.non_overlap_masks_for_mem_enc and not self.training:
678
+ # optionally, apply non-overlapping constraints to the masks (it's applied
679
+ # in the batch dimension and should only be used during eval, where all
680
+ # the objects come from the same video under batch size 1).
681
+ pred_masks_high_res = self._apply_non_overlapping_constraints(
682
+ pred_masks_high_res
683
+ )
684
+ # scale the raw mask logits with a temperature before applying sigmoid
685
+ binarize = self.binarize_mask_from_pts_for_mem_enc and is_mask_from_pts
686
+ if binarize and not self.training:
687
+ mask_for_mem = (pred_masks_high_res > 0).float()
688
+ else:
689
+ # apply sigmoid on the raw mask logits to turn them into range (0, 1)
690
+ mask_for_mem = torch.sigmoid(pred_masks_high_res)
691
+ # apply scale and bias terms to the sigmoid probabilities
692
+ if self.sigmoid_scale_for_mem_enc != 1.0:
693
+ mask_for_mem = mask_for_mem * self.sigmoid_scale_for_mem_enc
694
+ if self.sigmoid_bias_for_mem_enc != 0.0:
695
+ mask_for_mem = mask_for_mem + self.sigmoid_bias_for_mem_enc
696
+ maskmem_out = self.memory_encoder(
697
+ pix_feat, mask_for_mem, skip_mask_sigmoid=True # sigmoid already applied
698
+ )
699
+ maskmem_features = maskmem_out["vision_features"]
700
+ maskmem_pos_enc = maskmem_out["vision_pos_enc"]
701
+
702
+ return maskmem_features, maskmem_pos_enc
703
+
704
+ def track_step(
705
+ self,
706
+ frame_idx,
707
+ is_init_cond_frame,
708
+ current_vision_feats,
709
+ current_vision_pos_embeds,
710
+ feat_sizes,
711
+ point_inputs,
712
+ mask_inputs,
713
+ output_dict,
714
+ num_frames,
715
+ track_in_reverse=False, # tracking in reverse time order (for demo usage)
716
+ # Whether to run the memory encoder on the predicted masks. Sometimes we might want
717
+ # to skip the memory encoder with `run_mem_encoder=False`. For example,
718
+ # in demo we might call `track_step` multiple times for each user click,
719
+ # and only encode the memory when the user finalizes their clicks. And in ablation
720
+ # settings like SAM training on static images, we don't need the memory encoder.
721
+ run_mem_encoder=True,
722
+ # The previously predicted SAM mask logits (which can be fed together with new clicks in demo).
723
+ prev_sam_mask_logits=None,
724
+ ):
725
+ current_out = {"point_inputs": point_inputs, "mask_inputs": mask_inputs}
726
+ # High-resolution feature maps for the SAM head, reshape (HW)BC => BCHW
727
+ if len(current_vision_feats) > 1:
728
+ high_res_features = [
729
+ x.permute(1, 2, 0).view(x.size(1), x.size(2), *s)
730
+ for x, s in zip(current_vision_feats[:-1], feat_sizes[:-1])
731
+ ]
732
+ else:
733
+ high_res_features = None
734
+ if mask_inputs is not None and self.use_mask_input_as_output_without_sam:
735
+ # When use_mask_input_as_output_without_sam=True, we directly output the mask input
736
+ # (see it as a GT mask) without using a SAM prompt encoder + mask decoder.
737
+ pix_feat = current_vision_feats[-1].permute(1, 2, 0)
738
+ pix_feat = pix_feat.view(-1, self.hidden_dim, *feat_sizes[-1])
739
+ sam_outputs = self._use_mask_as_output(
740
+ pix_feat, high_res_features, mask_inputs
741
+ )
742
+ else:
743
+ # fused the visual feature with previous memory features in the memory bank
744
+ pix_feat_with_mem = self._prepare_memory_conditioned_features(
745
+ frame_idx=frame_idx,
746
+ is_init_cond_frame=is_init_cond_frame,
747
+ current_vision_feats=current_vision_feats[-1:],
748
+ current_vision_pos_embeds=current_vision_pos_embeds[-1:],
749
+ feat_sizes=feat_sizes[-1:],
750
+ output_dict=output_dict,
751
+ num_frames=num_frames,
752
+ track_in_reverse=track_in_reverse,
753
+ )
754
+ # apply SAM-style segmentation head
755
+ # here we might feed previously predicted low-res SAM mask logits into the SAM mask decoder,
756
+ # e.g. in demo where such logits come from earlier interaction instead of correction sampling
757
+ # (in this case, any `mask_inputs` shouldn't reach here as they are sent to _use_mask_as_output instead)
758
+ if prev_sam_mask_logits is not None:
759
+ assert point_inputs is not None and mask_inputs is None
760
+ mask_inputs = prev_sam_mask_logits
761
+ multimask_output = self._use_multimask(is_init_cond_frame, point_inputs)
762
+ sam_outputs = self._forward_sam_heads(
763
+ backbone_features=pix_feat_with_mem,
764
+ point_inputs=point_inputs,
765
+ mask_inputs=mask_inputs,
766
+ high_res_features=high_res_features,
767
+ multimask_output=multimask_output,
768
+ )
769
+ (
770
+ _,
771
+ _,
772
+ _,
773
+ low_res_masks,
774
+ high_res_masks,
775
+ obj_ptr,
776
+ _,
777
+ ) = sam_outputs
778
+
779
+ current_out["pred_masks"] = low_res_masks
780
+ current_out["pred_masks_high_res"] = high_res_masks
781
+ current_out["obj_ptr"] = obj_ptr
782
+
783
+ # Finally run the memory encoder on the predicted mask to encode
784
+ # it into a new memory feature (that can be used in future frames)
785
+ if run_mem_encoder and self.num_maskmem > 0:
786
+ high_res_masks_for_mem_enc = high_res_masks
787
+ maskmem_features, maskmem_pos_enc = self._encode_new_memory(
788
+ current_vision_feats=current_vision_feats,
789
+ feat_sizes=feat_sizes,
790
+ pred_masks_high_res=high_res_masks_for_mem_enc,
791
+ is_mask_from_pts=(point_inputs is not None),
792
+ )
793
+ current_out["maskmem_features"] = maskmem_features
794
+ current_out["maskmem_pos_enc"] = maskmem_pos_enc
795
+ else:
796
+ current_out["maskmem_features"] = None
797
+ current_out["maskmem_pos_enc"] = None
798
+
799
+ return current_out
800
+
801
+ def _use_multimask(self, is_init_cond_frame, point_inputs):
802
+ """Whether to use multimask output in the SAM head."""
803
+ num_pts = 0 if point_inputs is None else point_inputs["point_labels"].size(1)
804
+ multimask_output = (
805
+ self.multimask_output_in_sam
806
+ and (is_init_cond_frame or self.multimask_output_for_tracking)
807
+ and (self.multimask_min_pt_num <= num_pts <= self.multimask_max_pt_num)
808
+ )
809
+ return multimask_output
810
+
811
+ def _apply_non_overlapping_constraints(self, pred_masks):
812
+ """
813
+ Apply non-overlapping constraints to the object scores in pred_masks. Here we
814
+ keep only the highest scoring object at each spatial location in pred_masks.
815
+ """
816
+ batch_size = pred_masks.size(0)
817
+ if batch_size == 1:
818
+ return pred_masks
819
+
820
+ device = pred_masks.device
821
+ # "max_obj_inds": object index of the object with the highest score at each location
822
+ max_obj_inds = torch.argmax(pred_masks, dim=0, keepdim=True)
823
+ # "batch_obj_inds": object index of each object slice (along dim 0) in `pred_masks`
824
+ batch_obj_inds = torch.arange(batch_size, device=device)[:, None, None, None]
825
+ keep = max_obj_inds == batch_obj_inds
826
+ # suppress overlapping regions' scores below -10.0 so that the foreground regions
827
+ # don't overlap (here sigmoid(-10.0)=4.5398e-05)
828
+ pred_masks = torch.where(keep, pred_masks, torch.clamp(pred_masks, max=-10.0))
829
+ return pred_masks
segment-anything-2/sam2/modeling/sam2_utils.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+
8
+ import copy
9
+
10
+ import torch
11
+ import torch.nn as nn
12
+ import torch.nn.functional as F
13
+
14
+
15
+ def select_closest_cond_frames(frame_idx, cond_frame_outputs, max_cond_frame_num):
16
+ """
17
+ Select up to `max_cond_frame_num` conditioning frames from `cond_frame_outputs`
18
+ that are temporally closest to the current frame at `frame_idx`. Here, we take
19
+ - a) the closest conditioning frame before `frame_idx` (if any);
20
+ - b) the closest conditioning frame after `frame_idx` (if any);
21
+ - c) any other temporally closest conditioning frames until reaching a total
22
+ of `max_cond_frame_num` conditioning frames.
23
+
24
+ Outputs:
25
+ - selected_outputs: selected items (keys & values) from `cond_frame_outputs`.
26
+ - unselected_outputs: items (keys & values) not selected in `cond_frame_outputs`.
27
+ """
28
+ if max_cond_frame_num == -1 or len(cond_frame_outputs) <= max_cond_frame_num:
29
+ selected_outputs = cond_frame_outputs
30
+ unselected_outputs = {}
31
+ else:
32
+ assert max_cond_frame_num >= 2, "we should allow using 2+ conditioning frames"
33
+ selected_outputs = {}
34
+
35
+ # the closest conditioning frame before `frame_idx` (if any)
36
+ idx_before = max((t for t in cond_frame_outputs if t < frame_idx), default=None)
37
+ if idx_before is not None:
38
+ selected_outputs[idx_before] = cond_frame_outputs[idx_before]
39
+
40
+ # the closest conditioning frame after `frame_idx` (if any)
41
+ idx_after = min((t for t in cond_frame_outputs if t >= frame_idx), default=None)
42
+ if idx_after is not None:
43
+ selected_outputs[idx_after] = cond_frame_outputs[idx_after]
44
+
45
+ # add other temporally closest conditioning frames until reaching a total
46
+ # of `max_cond_frame_num` conditioning frames.
47
+ num_remain = max_cond_frame_num - len(selected_outputs)
48
+ inds_remain = sorted(
49
+ (t for t in cond_frame_outputs if t not in selected_outputs),
50
+ key=lambda x: abs(x - frame_idx),
51
+ )[:num_remain]
52
+ selected_outputs.update((t, cond_frame_outputs[t]) for t in inds_remain)
53
+ unselected_outputs = {
54
+ t: v for t, v in cond_frame_outputs.items() if t not in selected_outputs
55
+ }
56
+
57
+ return selected_outputs, unselected_outputs
58
+
59
+
60
+ def get_1d_sine_pe(pos_inds, dim, temperature=10000):
61
+ """
62
+ Get 1D sine positional embedding as in the original Transformer paper.
63
+ """
64
+ pe_dim = dim // 2
65
+ dim_t = torch.arange(pe_dim, dtype=torch.float32, device=pos_inds.device)
66
+ dim_t = temperature ** (2 * (dim_t // 2) / pe_dim)
67
+
68
+ pos_embed = pos_inds.unsqueeze(-1) / dim_t
69
+ pos_embed = torch.cat([pos_embed.sin(), pos_embed.cos()], dim=-1)
70
+ return pos_embed
71
+
72
+
73
+ def get_activation_fn(activation):
74
+ """Return an activation function given a string"""
75
+ if activation == "relu":
76
+ return F.relu
77
+ if activation == "gelu":
78
+ return F.gelu
79
+ if activation == "glu":
80
+ return F.glu
81
+ raise RuntimeError(f"activation should be relu/gelu, not {activation}.")
82
+
83
+
84
+ def get_clones(module, N):
85
+ return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
86
+
87
+
88
+ class DropPath(nn.Module):
89
+ # adapted from https://github.com/huggingface/pytorch-image-models/blob/main/timm/layers/drop.py
90
+ def __init__(self, drop_prob=0.0, scale_by_keep=True):
91
+ super(DropPath, self).__init__()
92
+ self.drop_prob = drop_prob
93
+ self.scale_by_keep = scale_by_keep
94
+
95
+ def forward(self, x):
96
+ if self.drop_prob == 0.0 or not self.training:
97
+ return x
98
+ keep_prob = 1 - self.drop_prob
99
+ shape = (x.shape[0],) + (1,) * (x.ndim - 1)
100
+ random_tensor = x.new_empty(shape).bernoulli_(keep_prob)
101
+ if keep_prob > 0.0 and self.scale_by_keep:
102
+ random_tensor.div_(keep_prob)
103
+ return x * random_tensor
104
+
105
+
106
+ # Lightly adapted from
107
+ # https://github.com/facebookresearch/MaskFormer/blob/main/mask_former/modeling/transformer/transformer_predictor.py # noqa
108
+ class MLP(nn.Module):
109
+ def __init__(
110
+ self,
111
+ input_dim: int,
112
+ hidden_dim: int,
113
+ output_dim: int,
114
+ num_layers: int,
115
+ activation: nn.Module = nn.ReLU,
116
+ sigmoid_output: bool = False,
117
+ ) -> None:
118
+ super().__init__()
119
+ self.num_layers = num_layers
120
+ h = [hidden_dim] * (num_layers - 1)
121
+ self.layers = nn.ModuleList(
122
+ nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])
123
+ )
124
+ self.sigmoid_output = sigmoid_output
125
+ self.act = activation()
126
+
127
+ def forward(self, x):
128
+ for i, layer in enumerate(self.layers):
129
+ x = self.act(layer(x)) if i < self.num_layers - 1 else layer(x)
130
+ if self.sigmoid_output:
131
+ x = F.sigmoid(x)
132
+ return x
133
+
134
+
135
+ # From https://github.com/facebookresearch/detectron2/blob/main/detectron2/layers/batch_norm.py # noqa
136
+ # Itself from https://github.com/facebookresearch/ConvNeXt/blob/d1fa8f6fef0a165b27399986cc2bdacc92777e40/models/convnext.py#L119 # noqa
137
+ class LayerNorm2d(nn.Module):
138
+ def __init__(self, num_channels: int, eps: float = 1e-6) -> None:
139
+ super().__init__()
140
+ self.weight = nn.Parameter(torch.ones(num_channels))
141
+ self.bias = nn.Parameter(torch.zeros(num_channels))
142
+ self.eps = eps
143
+
144
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
145
+ u = x.mean(1, keepdim=True)
146
+ s = (x - u).pow(2).mean(1, keepdim=True)
147
+ x = (x - u) / torch.sqrt(s + self.eps)
148
+ x = self.weight[:, None, None] * x + self.bias[:, None, None]
149
+ return x
segment-anything-2/sam2/sam2_image_predictor.py ADDED
@@ -0,0 +1,466 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import logging
8
+
9
+ from typing import List, Optional, Tuple, Union
10
+
11
+ import numpy as np
12
+ import torch
13
+ from PIL.Image import Image
14
+
15
+ from sam2.modeling.sam2_base import SAM2Base
16
+
17
+ from sam2.utils.transforms import SAM2Transforms
18
+
19
+
20
+ class SAM2ImagePredictor:
21
+ def __init__(
22
+ self,
23
+ sam_model: SAM2Base,
24
+ mask_threshold=0.0,
25
+ max_hole_area=0.0,
26
+ max_sprinkle_area=0.0,
27
+ **kwargs,
28
+ ) -> None:
29
+ """
30
+ Uses SAM-2 to calculate the image embedding for an image, and then
31
+ allow repeated, efficient mask prediction given prompts.
32
+
33
+ Arguments:
34
+ sam_model (Sam-2): The model to use for mask prediction.
35
+ mask_threshold (float): The threshold to use when converting mask logits
36
+ to binary masks. Masks are thresholded at 0 by default.
37
+ max_hole_area (int): If max_hole_area > 0, we fill small holes in up to
38
+ the maximum area of max_hole_area in low_res_masks.
39
+ max_sprinkle_area (int): If max_sprinkle_area > 0, we remove small sprinkles up to
40
+ the maximum area of max_sprinkle_area in low_res_masks.
41
+ """
42
+ super().__init__()
43
+ self.model = sam_model
44
+ self._transforms = SAM2Transforms(
45
+ resolution=self.model.image_size,
46
+ mask_threshold=mask_threshold,
47
+ max_hole_area=max_hole_area,
48
+ max_sprinkle_area=max_sprinkle_area,
49
+ )
50
+
51
+ # Predictor state
52
+ self._is_image_set = False
53
+ self._features = None
54
+ self._orig_hw = None
55
+ # Whether the predictor is set for single image or a batch of images
56
+ self._is_batch = False
57
+
58
+ # Predictor config
59
+ self.mask_threshold = mask_threshold
60
+
61
+ # Spatial dim for backbone feature maps
62
+ self._bb_feat_sizes = [
63
+ (256, 256),
64
+ (128, 128),
65
+ (64, 64),
66
+ ]
67
+
68
+ @classmethod
69
+ def from_pretrained(cls, model_id: str, **kwargs) -> "SAM2ImagePredictor":
70
+ """
71
+ Load a pretrained model from the Hugging Face hub.
72
+
73
+ Arguments:
74
+ model_id (str): The Hugging Face repository ID.
75
+ **kwargs: Additional arguments to pass to the model constructor.
76
+
77
+ Returns:
78
+ (SAM2ImagePredictor): The loaded model.
79
+ """
80
+ from sam2.build_sam import build_sam2_hf
81
+
82
+ sam_model = build_sam2_hf(model_id, **kwargs)
83
+ return cls(sam_model, **kwargs)
84
+
85
+ @torch.no_grad()
86
+ def set_image(
87
+ self,
88
+ image: Union[np.ndarray, Image],
89
+ ) -> None:
90
+ """
91
+ Calculates the image embeddings for the provided image, allowing
92
+ masks to be predicted with the 'predict' method.
93
+
94
+ Arguments:
95
+ image (np.ndarray or PIL Image): The input image to embed in RGB format. The image should be in HWC format if np.ndarray, or WHC format if PIL Image
96
+ with pixel values in [0, 255].
97
+ image_format (str): The color format of the image, in ['RGB', 'BGR'].
98
+ """
99
+ self.reset_predictor()
100
+ # Transform the image to the form expected by the model
101
+ if isinstance(image, np.ndarray):
102
+ logging.info("For numpy array image, we assume (HxWxC) format")
103
+ self._orig_hw = [image.shape[:2]]
104
+ elif isinstance(image, Image):
105
+ w, h = image.size
106
+ self._orig_hw = [(h, w)]
107
+ else:
108
+ raise NotImplementedError("Image format not supported")
109
+
110
+ input_image = self._transforms(image)
111
+ input_image = input_image[None, ...].to(self.device)
112
+
113
+ assert (
114
+ len(input_image.shape) == 4 and input_image.shape[1] == 3
115
+ ), f"input_image must be of size 1x3xHxW, got {input_image.shape}"
116
+ logging.info("Computing image embeddings for the provided image...")
117
+ backbone_out = self.model.forward_image(input_image)
118
+ _, vision_feats, _, _ = self.model._prepare_backbone_features(backbone_out)
119
+ # Add no_mem_embed, which is added to the lowest rest feat. map during training on videos
120
+ if self.model.directly_add_no_mem_embed:
121
+ vision_feats[-1] = vision_feats[-1] + self.model.no_mem_embed
122
+
123
+ feats = [
124
+ feat.permute(1, 2, 0).view(1, -1, *feat_size)
125
+ for feat, feat_size in zip(vision_feats[::-1], self._bb_feat_sizes[::-1])
126
+ ][::-1]
127
+ self._features = {"image_embed": feats[-1], "high_res_feats": feats[:-1]}
128
+ self._is_image_set = True
129
+ logging.info("Image embeddings computed.")
130
+
131
+ @torch.no_grad()
132
+ def set_image_batch(
133
+ self,
134
+ image_list: List[Union[np.ndarray]],
135
+ ) -> None:
136
+ """
137
+ Calculates the image embeddings for the provided image batch, allowing
138
+ masks to be predicted with the 'predict_batch' method.
139
+
140
+ Arguments:
141
+ image_list (List[np.ndarray]): The input images to embed in RGB format. The image should be in HWC format if np.ndarray
142
+ with pixel values in [0, 255].
143
+ """
144
+ self.reset_predictor()
145
+ assert isinstance(image_list, list)
146
+ self._orig_hw = []
147
+ for image in image_list:
148
+ assert isinstance(
149
+ image, np.ndarray
150
+ ), "Images are expected to be an np.ndarray in RGB format, and of shape HWC"
151
+ self._orig_hw.append(image.shape[:2])
152
+ # Transform the image to the form expected by the model
153
+ img_batch = self._transforms.forward_batch(image_list)
154
+ img_batch = img_batch.to(self.device)
155
+ batch_size = img_batch.shape[0]
156
+ assert (
157
+ len(img_batch.shape) == 4 and img_batch.shape[1] == 3
158
+ ), f"img_batch must be of size Bx3xHxW, got {img_batch.shape}"
159
+ logging.info("Computing image embeddings for the provided images...")
160
+ backbone_out = self.model.forward_image(img_batch)
161
+ _, vision_feats, _, _ = self.model._prepare_backbone_features(backbone_out)
162
+ # Add no_mem_embed, which is added to the lowest rest feat. map during training on videos
163
+ if self.model.directly_add_no_mem_embed:
164
+ vision_feats[-1] = vision_feats[-1] + self.model.no_mem_embed
165
+
166
+ feats = [
167
+ feat.permute(1, 2, 0).view(batch_size, -1, *feat_size)
168
+ for feat, feat_size in zip(vision_feats[::-1], self._bb_feat_sizes[::-1])
169
+ ][::-1]
170
+ self._features = {"image_embed": feats[-1], "high_res_feats": feats[:-1]}
171
+ self._is_image_set = True
172
+ self._is_batch = True
173
+ logging.info("Image embeddings computed.")
174
+
175
+ def predict_batch(
176
+ self,
177
+ point_coords_batch: List[np.ndarray] = None,
178
+ point_labels_batch: List[np.ndarray] = None,
179
+ box_batch: List[np.ndarray] = None,
180
+ mask_input_batch: List[np.ndarray] = None,
181
+ multimask_output: bool = True,
182
+ return_logits: bool = False,
183
+ normalize_coords=True,
184
+ ) -> Tuple[List[np.ndarray], List[np.ndarray], List[np.ndarray]]:
185
+ """This function is very similar to predict(...), however it is used for batched mode, when the model is expected to generate predictions on multiple images.
186
+ It returns a tuple of lists of masks, ious, and low_res_masks_logits.
187
+ """
188
+ assert self._is_batch, "This function should only be used when in batched mode"
189
+ if not self._is_image_set:
190
+ raise RuntimeError(
191
+ "An image must be set with .set_image_batch(...) before mask prediction."
192
+ )
193
+ num_images = len(self._features["image_embed"])
194
+ all_masks = []
195
+ all_ious = []
196
+ all_low_res_masks = []
197
+ for img_idx in range(num_images):
198
+ # Transform input prompts
199
+ point_coords = (
200
+ point_coords_batch[img_idx] if point_coords_batch is not None else None
201
+ )
202
+ point_labels = (
203
+ point_labels_batch[img_idx] if point_labels_batch is not None else None
204
+ )
205
+ box = box_batch[img_idx] if box_batch is not None else None
206
+ mask_input = (
207
+ mask_input_batch[img_idx] if mask_input_batch is not None else None
208
+ )
209
+ mask_input, unnorm_coords, labels, unnorm_box = self._prep_prompts(
210
+ point_coords,
211
+ point_labels,
212
+ box,
213
+ mask_input,
214
+ normalize_coords,
215
+ img_idx=img_idx,
216
+ )
217
+ masks, iou_predictions, low_res_masks = self._predict(
218
+ unnorm_coords,
219
+ labels,
220
+ unnorm_box,
221
+ mask_input,
222
+ multimask_output,
223
+ return_logits=return_logits,
224
+ img_idx=img_idx,
225
+ )
226
+ masks_np = masks.squeeze(0).float().detach().cpu().numpy()
227
+ iou_predictions_np = (
228
+ iou_predictions.squeeze(0).float().detach().cpu().numpy()
229
+ )
230
+ low_res_masks_np = low_res_masks.squeeze(0).float().detach().cpu().numpy()
231
+ all_masks.append(masks_np)
232
+ all_ious.append(iou_predictions_np)
233
+ all_low_res_masks.append(low_res_masks_np)
234
+
235
+ return all_masks, all_ious, all_low_res_masks
236
+
237
+ def predict(
238
+ self,
239
+ point_coords: Optional[np.ndarray] = None,
240
+ point_labels: Optional[np.ndarray] = None,
241
+ box: Optional[np.ndarray] = None,
242
+ mask_input: Optional[np.ndarray] = None,
243
+ multimask_output: bool = True,
244
+ return_logits: bool = False,
245
+ normalize_coords=True,
246
+ ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
247
+ """
248
+ Predict masks for the given input prompts, using the currently set image.
249
+
250
+ Arguments:
251
+ point_coords (np.ndarray or None): A Nx2 array of point prompts to the
252
+ model. Each point is in (X,Y) in pixels.
253
+ point_labels (np.ndarray or None): A length N array of labels for the
254
+ point prompts. 1 indicates a foreground point and 0 indicates a
255
+ background point.
256
+ box (np.ndarray or None): A length 4 array given a box prompt to the
257
+ model, in XYXY format.
258
+ mask_input (np.ndarray): A low resolution mask input to the model, typically
259
+ coming from a previous prediction iteration. Has form 1xHxW, where
260
+ for SAM, H=W=256.
261
+ multimask_output (bool): If true, the model will return three masks.
262
+ For ambiguous input prompts (such as a single click), this will often
263
+ produce better masks than a single prediction. If only a single
264
+ mask is needed, the model's predicted quality score can be used
265
+ to select the best mask. For non-ambiguous prompts, such as multiple
266
+ input prompts, multimask_output=False can give better results.
267
+ return_logits (bool): If true, returns un-thresholded masks logits
268
+ instead of a binary mask.
269
+ normalize_coords (bool): If true, the point coordinates will be normalized to the range [0,1] and point_coords is expected to be wrt. image dimensions.
270
+
271
+ Returns:
272
+ (np.ndarray): The output masks in CxHxW format, where C is the
273
+ number of masks, and (H, W) is the original image size.
274
+ (np.ndarray): An array of length C containing the model's
275
+ predictions for the quality of each mask.
276
+ (np.ndarray): An array of shape CxHxW, where C is the number
277
+ of masks and H=W=256. These low resolution logits can be passed to
278
+ a subsequent iteration as mask input.
279
+ """
280
+ if not self._is_image_set:
281
+ raise RuntimeError(
282
+ "An image must be set with .set_image(...) before mask prediction."
283
+ )
284
+
285
+ # Transform input prompts
286
+
287
+ mask_input, unnorm_coords, labels, unnorm_box = self._prep_prompts(
288
+ point_coords, point_labels, box, mask_input, normalize_coords
289
+ )
290
+
291
+ masks, iou_predictions, low_res_masks = self._predict(
292
+ unnorm_coords,
293
+ labels,
294
+ unnorm_box,
295
+ mask_input,
296
+ multimask_output,
297
+ return_logits=return_logits,
298
+ )
299
+
300
+ masks_np = masks.squeeze(0).float().detach().cpu().numpy()
301
+ iou_predictions_np = iou_predictions.squeeze(0).float().detach().cpu().numpy()
302
+ low_res_masks_np = low_res_masks.squeeze(0).float().detach().cpu().numpy()
303
+ return masks_np, iou_predictions_np, low_res_masks_np
304
+
305
+ def _prep_prompts(
306
+ self, point_coords, point_labels, box, mask_logits, normalize_coords, img_idx=-1
307
+ ):
308
+
309
+ unnorm_coords, labels, unnorm_box, mask_input = None, None, None, None
310
+ if point_coords is not None:
311
+ assert (
312
+ point_labels is not None
313
+ ), "point_labels must be supplied if point_coords is supplied."
314
+ point_coords = torch.as_tensor(
315
+ point_coords, dtype=torch.float, device=self.device
316
+ )
317
+ unnorm_coords = self._transforms.transform_coords(
318
+ point_coords, normalize=normalize_coords, orig_hw=self._orig_hw[img_idx]
319
+ )
320
+ labels = torch.as_tensor(point_labels, dtype=torch.int, device=self.device)
321
+ if len(unnorm_coords.shape) == 2:
322
+ unnorm_coords, labels = unnorm_coords[None, ...], labels[None, ...]
323
+ if box is not None:
324
+ box = torch.as_tensor(box, dtype=torch.float, device=self.device)
325
+ unnorm_box = self._transforms.transform_boxes(
326
+ box, normalize=normalize_coords, orig_hw=self._orig_hw[img_idx]
327
+ ) # Bx2x2
328
+ if mask_logits is not None:
329
+ mask_input = torch.as_tensor(
330
+ mask_logits, dtype=torch.float, device=self.device
331
+ )
332
+ if len(mask_input.shape) == 3:
333
+ mask_input = mask_input[None, :, :, :]
334
+ return mask_input, unnorm_coords, labels, unnorm_box
335
+
336
+ @torch.no_grad()
337
+ def _predict(
338
+ self,
339
+ point_coords: Optional[torch.Tensor],
340
+ point_labels: Optional[torch.Tensor],
341
+ boxes: Optional[torch.Tensor] = None,
342
+ mask_input: Optional[torch.Tensor] = None,
343
+ multimask_output: bool = True,
344
+ return_logits: bool = False,
345
+ img_idx: int = -1,
346
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
347
+ """
348
+ Predict masks for the given input prompts, using the currently set image.
349
+ Input prompts are batched torch tensors and are expected to already be
350
+ transformed to the input frame using SAM2Transforms.
351
+
352
+ Arguments:
353
+ point_coords (torch.Tensor or None): A BxNx2 array of point prompts to the
354
+ model. Each point is in (X,Y) in pixels.
355
+ point_labels (torch.Tensor or None): A BxN array of labels for the
356
+ point prompts. 1 indicates a foreground point and 0 indicates a
357
+ background point.
358
+ boxes (np.ndarray or None): A Bx4 array given a box prompt to the
359
+ model, in XYXY format.
360
+ mask_input (np.ndarray): A low resolution mask input to the model, typically
361
+ coming from a previous prediction iteration. Has form Bx1xHxW, where
362
+ for SAM, H=W=256. Masks returned by a previous iteration of the
363
+ predict method do not need further transformation.
364
+ multimask_output (bool): If true, the model will return three masks.
365
+ For ambiguous input prompts (such as a single click), this will often
366
+ produce better masks than a single prediction. If only a single
367
+ mask is needed, the model's predicted quality score can be used
368
+ to select the best mask. For non-ambiguous prompts, such as multiple
369
+ input prompts, multimask_output=False can give better results.
370
+ return_logits (bool): If true, returns un-thresholded masks logits
371
+ instead of a binary mask.
372
+
373
+ Returns:
374
+ (torch.Tensor): The output masks in BxCxHxW format, where C is the
375
+ number of masks, and (H, W) is the original image size.
376
+ (torch.Tensor): An array of shape BxC containing the model's
377
+ predictions for the quality of each mask.
378
+ (torch.Tensor): An array of shape BxCxHxW, where C is the number
379
+ of masks and H=W=256. These low res logits can be passed to
380
+ a subsequent iteration as mask input.
381
+ """
382
+ if not self._is_image_set:
383
+ raise RuntimeError(
384
+ "An image must be set with .set_image(...) before mask prediction."
385
+ )
386
+
387
+ if point_coords is not None:
388
+ concat_points = (point_coords, point_labels)
389
+ else:
390
+ concat_points = None
391
+
392
+ # Embed prompts
393
+ if boxes is not None:
394
+ box_coords = boxes.reshape(-1, 2, 2)
395
+ box_labels = torch.tensor([[2, 3]], dtype=torch.int, device=boxes.device)
396
+ box_labels = box_labels.repeat(boxes.size(0), 1)
397
+ # we merge "boxes" and "points" into a single "concat_points" input (where
398
+ # boxes are added at the beginning) to sam_prompt_encoder
399
+ if concat_points is not None:
400
+ concat_coords = torch.cat([box_coords, concat_points[0]], dim=1)
401
+ concat_labels = torch.cat([box_labels, concat_points[1]], dim=1)
402
+ concat_points = (concat_coords, concat_labels)
403
+ else:
404
+ concat_points = (box_coords, box_labels)
405
+
406
+ sparse_embeddings, dense_embeddings = self.model.sam_prompt_encoder(
407
+ points=concat_points,
408
+ boxes=None,
409
+ masks=mask_input,
410
+ )
411
+
412
+ # Predict masks
413
+ batched_mode = (
414
+ concat_points is not None and concat_points[0].shape[0] > 1
415
+ ) # multi object prediction
416
+ high_res_features = [
417
+ feat_level[img_idx].unsqueeze(0)
418
+ for feat_level in self._features["high_res_feats"]
419
+ ]
420
+ low_res_masks, iou_predictions, _, _ = self.model.sam_mask_decoder(
421
+ image_embeddings=self._features["image_embed"][img_idx].unsqueeze(0),
422
+ image_pe=self.model.sam_prompt_encoder.get_dense_pe(),
423
+ sparse_prompt_embeddings=sparse_embeddings,
424
+ dense_prompt_embeddings=dense_embeddings,
425
+ multimask_output=multimask_output,
426
+ repeat_image=batched_mode,
427
+ high_res_features=high_res_features,
428
+ )
429
+
430
+ # Upscale the masks to the original image resolution
431
+ masks = self._transforms.postprocess_masks(
432
+ low_res_masks, self._orig_hw[img_idx]
433
+ )
434
+ low_res_masks = torch.clamp(low_res_masks, -32.0, 32.0)
435
+ if not return_logits:
436
+ masks = masks > self.mask_threshold
437
+
438
+ return masks, iou_predictions, low_res_masks
439
+
440
+ def get_image_embedding(self) -> torch.Tensor:
441
+ """
442
+ Returns the image embeddings for the currently set image, with
443
+ shape 1xCxHxW, where C is the embedding dimension and (H,W) are
444
+ the embedding spatial dimension of SAM (typically C=256, H=W=64).
445
+ """
446
+ if not self._is_image_set:
447
+ raise RuntimeError(
448
+ "An image must be set with .set_image(...) to generate an embedding."
449
+ )
450
+ assert (
451
+ self._features is not None
452
+ ), "Features must exist if an image has been set."
453
+ return self._features["image_embed"]
454
+
455
+ @property
456
+ def device(self) -> torch.device:
457
+ return self.model.device
458
+
459
+ def reset_predictor(self) -> None:
460
+ """
461
+ Resets the image embeddings and other state variables.
462
+ """
463
+ self._is_image_set = False
464
+ self._features = None
465
+ self._orig_hw = None
466
+ self._is_batch = False
segment-anything-2/sam2/sam2_video_predictor.py ADDED
@@ -0,0 +1,983 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import warnings
8
+ from collections import OrderedDict
9
+
10
+ import torch
11
+
12
+ from tqdm import tqdm
13
+
14
+ from sam2.modeling.sam2_base import NO_OBJ_SCORE, SAM2Base
15
+ from sam2.utils.misc import concat_points, fill_holes_in_mask_scores, load_video_frames
16
+
17
+
18
+ class SAM2VideoPredictor(SAM2Base):
19
+ """The predictor class to handle user interactions and manage inference states."""
20
+
21
+ def __init__(
22
+ self,
23
+ fill_hole_area=0,
24
+ # whether to apply non-overlapping constraints on the output object masks
25
+ non_overlap_masks=False,
26
+ # whether to clear non-conditioning memory of the surrounding frames (which may contain outdated information) after adding correction clicks;
27
+ # note that this would only apply to *single-object tracking* unless `clear_non_cond_mem_for_multi_obj` is also set to True)
28
+ clear_non_cond_mem_around_input=False,
29
+ # whether to also clear non-conditioning memory of the surrounding frames (only effective when `clear_non_cond_mem_around_input` is True).
30
+ clear_non_cond_mem_for_multi_obj=False,
31
+ **kwargs,
32
+ ):
33
+ super().__init__(**kwargs)
34
+ self.fill_hole_area = fill_hole_area
35
+ self.non_overlap_masks = non_overlap_masks
36
+ self.clear_non_cond_mem_around_input = clear_non_cond_mem_around_input
37
+ self.clear_non_cond_mem_for_multi_obj = clear_non_cond_mem_for_multi_obj
38
+
39
+ @torch.inference_mode()
40
+ def init_state(
41
+ self,
42
+ video_path,
43
+ image_inputs=None,
44
+ offload_video_to_cpu=False,
45
+ offload_state_to_cpu=False,
46
+ async_loading_frames=False,
47
+ verbose=True,
48
+ ):
49
+ """Initialize an inference state."""
50
+ compute_device = self.device # device of the model
51
+ images, video_height, video_width = load_video_frames(
52
+ video_path=video_path,
53
+ image_size=self.image_size,
54
+ offload_video_to_cpu=offload_video_to_cpu,
55
+ async_loading_frames=async_loading_frames,
56
+ compute_device=compute_device,
57
+ image_inputs=image_inputs,
58
+ verbose=verbose
59
+ )
60
+ inference_state = {}
61
+ inference_state["images"] = images
62
+ inference_state["num_frames"] = len(images)
63
+ # whether to offload the video frames to CPU memory
64
+ # turning on this option saves the GPU memory with only a very small overhead
65
+ inference_state["offload_video_to_cpu"] = offload_video_to_cpu
66
+ # whether to offload the inference state to CPU memory
67
+ # turning on this option saves the GPU memory at the cost of a lower tracking fps
68
+ # (e.g. in a test case of 768x768 model, fps dropped from 27 to 24 when tracking one object
69
+ # and from 24 to 21 when tracking two objects)
70
+ inference_state["offload_state_to_cpu"] = offload_state_to_cpu
71
+ # the original video height and width, used for resizing final output scores
72
+ inference_state["video_height"] = video_height
73
+ inference_state["video_width"] = video_width
74
+ inference_state["device"] = compute_device
75
+ if offload_state_to_cpu:
76
+ inference_state["storage_device"] = torch.device("cpu")
77
+ else:
78
+ inference_state["storage_device"] = compute_device
79
+ # inputs on each frame
80
+ inference_state["point_inputs_per_obj"] = {}
81
+ inference_state["mask_inputs_per_obj"] = {}
82
+ # visual features on a small number of recently visited frames for quick interactions
83
+ inference_state["cached_features"] = {}
84
+ # values that don't change across frames (so we only need to hold one copy of them)
85
+ inference_state["constants"] = {}
86
+ # mapping between client-side object id and model-side object index
87
+ inference_state["obj_id_to_idx"] = OrderedDict()
88
+ inference_state["obj_idx_to_id"] = OrderedDict()
89
+ inference_state["obj_ids"] = []
90
+ # A storage to hold the model's tracking results and states on each frame
91
+ inference_state["output_dict"] = {
92
+ "cond_frame_outputs": {}, # dict containing {frame_idx: <out>}
93
+ "non_cond_frame_outputs": {}, # dict containing {frame_idx: <out>}
94
+ }
95
+ # Slice (view) of each object tracking results, sharing the same memory with "output_dict"
96
+ inference_state["output_dict_per_obj"] = {}
97
+ # A temporary storage to hold new outputs when user interact with a frame
98
+ # to add clicks or mask (it's merged into "output_dict" before propagation starts)
99
+ inference_state["temp_output_dict_per_obj"] = {}
100
+ # Frames that already holds consolidated outputs from click or mask inputs
101
+ # (we directly use their consolidated outputs during tracking)
102
+ inference_state["consolidated_frame_inds"] = {
103
+ "cond_frame_outputs": set(), # set containing frame indices
104
+ "non_cond_frame_outputs": set(), # set containing frame indices
105
+ }
106
+ # metadata for each tracking frame (e.g. which direction it's tracked)
107
+ inference_state["tracking_has_started"] = False
108
+ inference_state["frames_already_tracked"] = {}
109
+ # Warm up the visual backbone and cache the image feature on frame 0
110
+ self._get_image_feature(inference_state, frame_idx=0, batch_size=1)
111
+ return inference_state
112
+
113
+ @classmethod
114
+ def from_pretrained(cls, model_id: str, **kwargs) -> "SAM2VideoPredictor":
115
+ """
116
+ Load a pretrained model from the Hugging Face hub.
117
+
118
+ Arguments:
119
+ model_id (str): The Hugging Face repository ID.
120
+ **kwargs: Additional arguments to pass to the model constructor.
121
+
122
+ Returns:
123
+ (SAM2VideoPredictor): The loaded model.
124
+ """
125
+ from sam2.build_sam import build_sam2_video_predictor_hf
126
+
127
+ sam_model = build_sam2_video_predictor_hf(model_id, **kwargs)
128
+ return sam_model
129
+
130
+ def _obj_id_to_idx(self, inference_state, obj_id):
131
+ """Map client-side object id to model-side object index."""
132
+ obj_idx = inference_state["obj_id_to_idx"].get(obj_id, None)
133
+ if obj_idx is not None:
134
+ return obj_idx
135
+
136
+ # This is a new object id not sent to the server before. We only allow adding
137
+ # new objects *before* the tracking starts.
138
+ allow_new_object = not inference_state["tracking_has_started"]
139
+ if allow_new_object:
140
+ # get the next object slot
141
+ obj_idx = len(inference_state["obj_id_to_idx"])
142
+ inference_state["obj_id_to_idx"][obj_id] = obj_idx
143
+ inference_state["obj_idx_to_id"][obj_idx] = obj_id
144
+ inference_state["obj_ids"] = list(inference_state["obj_id_to_idx"])
145
+ # set up input and output structures for this object
146
+ inference_state["point_inputs_per_obj"][obj_idx] = {}
147
+ inference_state["mask_inputs_per_obj"][obj_idx] = {}
148
+ inference_state["output_dict_per_obj"][obj_idx] = {
149
+ "cond_frame_outputs": {}, # dict containing {frame_idx: <out>}
150
+ "non_cond_frame_outputs": {}, # dict containing {frame_idx: <out>}
151
+ }
152
+ inference_state["temp_output_dict_per_obj"][obj_idx] = {
153
+ "cond_frame_outputs": {}, # dict containing {frame_idx: <out>}
154
+ "non_cond_frame_outputs": {}, # dict containing {frame_idx: <out>}
155
+ }
156
+ return obj_idx
157
+ else:
158
+ raise RuntimeError(
159
+ f"Cannot add new object id {obj_id} after tracking starts. "
160
+ f"All existing object ids: {inference_state['obj_ids']}. "
161
+ f"Please call 'reset_state' to restart from scratch."
162
+ )
163
+
164
+ def _obj_idx_to_id(self, inference_state, obj_idx):
165
+ """Map model-side object index to client-side object id."""
166
+ return inference_state["obj_idx_to_id"][obj_idx]
167
+
168
+ def _get_obj_num(self, inference_state):
169
+ """Get the total number of unique object ids received so far in this session."""
170
+ return len(inference_state["obj_idx_to_id"])
171
+
172
+ @torch.inference_mode()
173
+ def add_new_points_or_box(
174
+ self,
175
+ inference_state,
176
+ frame_idx,
177
+ obj_id,
178
+ points=None,
179
+ labels=None,
180
+ mask=None,
181
+ clear_old_points=True,
182
+ normalize_coords=True,
183
+ box=None,
184
+ ):
185
+ """Add new points to a frame."""
186
+ obj_idx = self._obj_id_to_idx(inference_state, obj_id)
187
+ point_inputs_per_frame = inference_state["point_inputs_per_obj"][obj_idx]
188
+ mask_inputs_per_frame = inference_state["mask_inputs_per_obj"][obj_idx]
189
+
190
+ if (points is not None) != (labels is not None):
191
+ raise ValueError("points and labels must be provided together")
192
+ if points is None and box is None:
193
+ raise ValueError("at least one of points or box must be provided as input")
194
+
195
+ if points is None:
196
+ points = torch.zeros(0, 2, dtype=torch.float32)
197
+ elif not isinstance(points, torch.Tensor):
198
+ points = torch.tensor(points, dtype=torch.float32)
199
+ if labels is None:
200
+ labels = torch.zeros(0, dtype=torch.int32)
201
+ elif not isinstance(labels, torch.Tensor):
202
+ labels = torch.tensor(labels, dtype=torch.int32)
203
+ if points.dim() == 2:
204
+ points = points.unsqueeze(0) # add batch dimension
205
+ if labels.dim() == 1:
206
+ labels = labels.unsqueeze(0) # add batch dimension
207
+
208
+ # If `box` is provided, we add it as the first two points with labels 2 and 3
209
+ # along with the user-provided points (consistent with how SAM 2 is trained).
210
+ if box is not None:
211
+ if not clear_old_points:
212
+ raise ValueError(
213
+ "cannot add box without clearing old points, since "
214
+ "box prompt must be provided before any point prompt "
215
+ "(please use clear_old_points=True instead)"
216
+ )
217
+ if inference_state["tracking_has_started"]:
218
+ warnings.warn(
219
+ "You are adding a box after tracking starts. SAM 2 may not always be "
220
+ "able to incorporate a box prompt for *refinement*. If you intend to "
221
+ "use box prompt as an *initial* input before tracking, please call "
222
+ "'reset_state' on the inference state to restart from scratch.",
223
+ category=UserWarning,
224
+ stacklevel=2,
225
+ )
226
+ if not isinstance(box, torch.Tensor):
227
+ box = torch.tensor(box, dtype=torch.float32, device=points.device)
228
+ box_coords = box.reshape(1, 2, 2)
229
+ box_labels = torch.tensor([2, 3], dtype=torch.int32, device=labels.device)
230
+ box_labels = box_labels.reshape(1, 2)
231
+ points = torch.cat([box_coords, points], dim=1)
232
+ labels = torch.cat([box_labels, labels], dim=1)
233
+
234
+ if normalize_coords:
235
+ video_H = inference_state["video_height"]
236
+ video_W = inference_state["video_width"]
237
+ points = points / torch.tensor([video_W, video_H]).to(points.device)
238
+ # scale the (normalized) coordinates by the model's internal image size
239
+ points = points * self.image_size
240
+ points = points.to(inference_state["device"])
241
+ labels = labels.to(inference_state["device"])
242
+
243
+ if not clear_old_points:
244
+ point_inputs = point_inputs_per_frame.get(frame_idx, None)
245
+ else:
246
+ point_inputs = None
247
+ point_inputs = concat_points(point_inputs, points, labels)
248
+
249
+ point_inputs_per_frame[frame_idx] = point_inputs
250
+ mask_inputs_per_frame.pop(frame_idx, None)
251
+ # If this frame hasn't been tracked before, we treat it as an initial conditioning
252
+ # frame, meaning that the inputs points are to generate segments on this frame without
253
+ # using any memory from other frames, like in SAM. Otherwise (if it has been tracked),
254
+ # the input points will be used to correct the already tracked masks.
255
+ is_init_cond_frame = frame_idx not in inference_state["frames_already_tracked"]
256
+ # whether to track in reverse time order
257
+ if is_init_cond_frame:
258
+ reverse = False
259
+ else:
260
+ reverse = inference_state["frames_already_tracked"][frame_idx]["reverse"]
261
+ obj_output_dict = inference_state["output_dict_per_obj"][obj_idx]
262
+ obj_temp_output_dict = inference_state["temp_output_dict_per_obj"][obj_idx]
263
+ # Add a frame to conditioning output if it's an initial conditioning frame or
264
+ # if the model sees all frames receiving clicks/mask as conditioning frames.
265
+ is_cond = is_init_cond_frame or self.add_all_frames_to_correct_as_cond
266
+ storage_key = "cond_frame_outputs" if is_cond else "non_cond_frame_outputs"
267
+
268
+ # Get any previously predicted mask logits on this object and feed it along with
269
+ # the new clicks into the SAM mask decoder.
270
+ prev_sam_mask_logits = None
271
+ # lookup temporary output dict first, which contains the most recent output
272
+ # (if not found, then lookup conditioning and non-conditioning frame output)
273
+ prev_out = obj_temp_output_dict[storage_key].get(frame_idx)
274
+ if prev_out is None:
275
+ prev_out = obj_output_dict["cond_frame_outputs"].get(frame_idx)
276
+ if prev_out is None:
277
+ prev_out = obj_output_dict["non_cond_frame_outputs"].get(frame_idx)
278
+
279
+ if prev_out is not None and prev_out["pred_masks"] is not None:
280
+ device = inference_state["device"]
281
+ prev_sam_mask_logits = prev_out["pred_masks"].to(device, non_blocking=True)
282
+ # Clamp the scale of prev_sam_mask_logits to avoid rare numerical issues.
283
+ prev_sam_mask_logits = torch.clamp(prev_sam_mask_logits, -32.0, 32.0)
284
+ if mask is None:
285
+ current_out, _ = self._run_single_frame_inference(
286
+ inference_state=inference_state,
287
+ output_dict=obj_output_dict, # run on the slice of a single object
288
+ frame_idx=frame_idx,
289
+ batch_size=1, # run on the slice of a single object
290
+ is_init_cond_frame=is_init_cond_frame,
291
+ point_inputs=point_inputs,
292
+ mask_inputs=None,
293
+ reverse=reverse,
294
+ # Skip the memory encoder when adding clicks or mask. We execute the memory encoder
295
+ # at the beginning of `propagate_in_video` (after user finalize their clicks). This
296
+ # allows us to enforce non-overlapping constraints on all objects before encoding
297
+ # them into memory.
298
+ run_mem_encoder=False,
299
+ prev_sam_mask_logits=prev_sam_mask_logits,
300
+ )
301
+ else:
302
+ current_out, _ = self._run_single_frame_inference(
303
+ inference_state=inference_state,
304
+ output_dict=obj_output_dict, # run on the slice of a single object
305
+ frame_idx=frame_idx,
306
+ batch_size=1, # run on the slice of a single object
307
+ is_init_cond_frame=is_init_cond_frame,
308
+ point_inputs=None,
309
+ mask_inputs=mask,
310
+ reverse=reverse,
311
+ # Skip the memory encoder when adding clicks or mask. We execute the memory encoder
312
+ # at the beginning of `propagate_in_video` (after user finalize their clicks). This
313
+ # allows us to enforce non-overlapping constraints on all objects before encoding
314
+ # them into memory.
315
+ run_mem_encoder=False,
316
+ prev_sam_mask_logits=prev_sam_mask_logits,
317
+ )
318
+ # Add the output to the output dict (to be used as future memory)
319
+ obj_temp_output_dict[storage_key][frame_idx] = current_out
320
+
321
+ # Resize the output mask to the original video resolution
322
+ obj_ids = inference_state["obj_ids"]
323
+ consolidated_out = self._consolidate_temp_output_across_obj(
324
+ inference_state,
325
+ frame_idx,
326
+ is_cond=is_cond,
327
+ run_mem_encoder=False,
328
+ consolidate_at_video_res=True,
329
+ )
330
+ _, video_res_masks = self._get_orig_video_res_output(
331
+ inference_state, consolidated_out["pred_masks_video_res"]
332
+ )
333
+ return frame_idx, obj_ids, video_res_masks
334
+
335
+ def add_new_points(self, *args, **kwargs):
336
+ """Deprecated method. Please use `add_new_points_or_box` instead."""
337
+ return self.add_new_points_or_box(*args, **kwargs)
338
+
339
+ @torch.inference_mode()
340
+ def add_new_mask(
341
+ self,
342
+ inference_state,
343
+ frame_idx,
344
+ obj_id,
345
+ mask,
346
+ ):
347
+ """Add new mask to a frame."""
348
+ obj_idx = self._obj_id_to_idx(inference_state, obj_id)
349
+ point_inputs_per_frame = inference_state["point_inputs_per_obj"][obj_idx]
350
+ mask_inputs_per_frame = inference_state["mask_inputs_per_obj"][obj_idx]
351
+
352
+ if not isinstance(mask, torch.Tensor):
353
+ mask = torch.tensor(mask, dtype=torch.bool)
354
+ assert mask.dim() == 2
355
+ mask_H, mask_W = mask.shape
356
+ mask_inputs_orig = mask[None, None] # add batch and channel dimension
357
+ mask_inputs_orig = mask_inputs_orig.float().to(inference_state["device"])
358
+
359
+ # resize the mask if it doesn't match the model's image size
360
+ if mask_H != self.image_size or mask_W != self.image_size:
361
+ mask_inputs = torch.nn.functional.interpolate(
362
+ mask_inputs_orig,
363
+ size=(self.image_size, self.image_size),
364
+ align_corners=False,
365
+ mode="bilinear",
366
+ antialias=True, # use antialias for downsampling
367
+ )
368
+ mask_inputs = (mask_inputs >= 0.5).float()
369
+ else:
370
+ mask_inputs = mask_inputs_orig
371
+
372
+ mask_inputs_per_frame[frame_idx] = mask_inputs
373
+ point_inputs_per_frame.pop(frame_idx, None)
374
+ # If this frame hasn't been tracked before, we treat it as an initial conditioning
375
+ # frame, meaning that the inputs points are to generate segments on this frame without
376
+ # using any memory from other frames, like in SAM. Otherwise (if it has been tracked),
377
+ # the input points will be used to correct the already tracked masks.
378
+ is_init_cond_frame = frame_idx not in inference_state["frames_already_tracked"]
379
+ # whether to track in reverse time order
380
+ if is_init_cond_frame:
381
+ reverse = False
382
+ else:
383
+ reverse = inference_state["frames_already_tracked"][frame_idx]["reverse"]
384
+ obj_output_dict = inference_state["output_dict_per_obj"][obj_idx]
385
+ obj_temp_output_dict = inference_state["temp_output_dict_per_obj"][obj_idx]
386
+ # Add a frame to conditioning output if it's an initial conditioning frame or
387
+ # if the model sees all frames receiving clicks/mask as conditioning frames.
388
+ is_cond = is_init_cond_frame or self.add_all_frames_to_correct_as_cond
389
+ storage_key = "cond_frame_outputs" if is_cond else "non_cond_frame_outputs"
390
+
391
+ current_out, _ = self._run_single_frame_inference(
392
+ inference_state=inference_state,
393
+ output_dict=obj_output_dict, # run on the slice of a single object
394
+ frame_idx=frame_idx,
395
+ batch_size=1, # run on the slice of a single object
396
+ is_init_cond_frame=is_init_cond_frame,
397
+ point_inputs=None,
398
+ mask_inputs=mask_inputs,
399
+ reverse=reverse,
400
+ # Skip the memory encoder when adding clicks or mask. We execute the memory encoder
401
+ # at the beginning of `propagate_in_video` (after user finalize their clicks). This
402
+ # allows us to enforce non-overlapping constraints on all objects before encoding
403
+ # them into memory.
404
+ run_mem_encoder=False,
405
+ )
406
+ # Add the output to the output dict (to be used as future memory)
407
+ obj_temp_output_dict[storage_key][frame_idx] = current_out
408
+
409
+ # Resize the output mask to the original video resolution
410
+ obj_ids = inference_state["obj_ids"]
411
+ consolidated_out = self._consolidate_temp_output_across_obj(
412
+ inference_state,
413
+ frame_idx,
414
+ is_cond=is_cond,
415
+ run_mem_encoder=False,
416
+ consolidate_at_video_res=True,
417
+ )
418
+ _, video_res_masks = self._get_orig_video_res_output(
419
+ inference_state, consolidated_out["pred_masks_video_res"]
420
+ )
421
+ return frame_idx, obj_ids, video_res_masks
422
+
423
+ def _get_orig_video_res_output(self, inference_state, any_res_masks):
424
+ """
425
+ Resize the object scores to the original video resolution (video_res_masks)
426
+ and apply non-overlapping constraints for final output.
427
+ """
428
+ device = inference_state["device"]
429
+ video_H = inference_state["video_height"]
430
+ video_W = inference_state["video_width"]
431
+ any_res_masks = any_res_masks.to(device, non_blocking=True)
432
+ if any_res_masks.shape[-2:] == (video_H, video_W):
433
+ video_res_masks = any_res_masks
434
+ else:
435
+ video_res_masks = torch.nn.functional.interpolate(
436
+ any_res_masks,
437
+ size=(video_H, video_W),
438
+ mode="bilinear",
439
+ align_corners=False,
440
+ )
441
+ if self.non_overlap_masks:
442
+ video_res_masks = self._apply_non_overlapping_constraints(video_res_masks)
443
+ return any_res_masks, video_res_masks
444
+
445
+ def _consolidate_temp_output_across_obj(
446
+ self,
447
+ inference_state,
448
+ frame_idx,
449
+ is_cond,
450
+ run_mem_encoder,
451
+ consolidate_at_video_res=False,
452
+ ):
453
+ """
454
+ Consolidate the per-object temporary outputs in `temp_output_dict_per_obj` on
455
+ a frame into a single output for all objects, including
456
+ 1) fill any missing objects either from `output_dict_per_obj` (if they exist in
457
+ `output_dict_per_obj` for this frame) or leave them as placeholder values
458
+ (if they don't exist in `output_dict_per_obj` for this frame);
459
+ 2) if specified, rerun memory encoder after apply non-overlapping constraints
460
+ on the object scores.
461
+ """
462
+ batch_size = self._get_obj_num(inference_state)
463
+ storage_key = "cond_frame_outputs" if is_cond else "non_cond_frame_outputs"
464
+ # Optionally, we allow consolidating the temporary outputs at the original
465
+ # video resolution (to provide a better editing experience for mask prompts).
466
+ if consolidate_at_video_res:
467
+ assert not run_mem_encoder, "memory encoder cannot run at video resolution"
468
+ consolidated_H = inference_state["video_height"]
469
+ consolidated_W = inference_state["video_width"]
470
+ consolidated_mask_key = "pred_masks_video_res"
471
+ else:
472
+ consolidated_H = consolidated_W = self.image_size // 4
473
+ consolidated_mask_key = "pred_masks"
474
+
475
+ # Initialize `consolidated_out`. Its "maskmem_features" and "maskmem_pos_enc"
476
+ # will be added when rerunning the memory encoder after applying non-overlapping
477
+ # constraints to object scores. Its "pred_masks" are prefilled with a large
478
+ # negative value (NO_OBJ_SCORE) to represent missing objects.
479
+ consolidated_out = {
480
+ "maskmem_features": None,
481
+ "maskmem_pos_enc": None,
482
+ consolidated_mask_key: torch.full(
483
+ size=(batch_size, 1, consolidated_H, consolidated_W),
484
+ fill_value=NO_OBJ_SCORE,
485
+ dtype=torch.float32,
486
+ device=inference_state["storage_device"],
487
+ ),
488
+ "obj_ptr": torch.full(
489
+ size=(batch_size, self.hidden_dim),
490
+ fill_value=NO_OBJ_SCORE,
491
+ dtype=torch.float32,
492
+ device=inference_state["device"],
493
+ ),
494
+ }
495
+ empty_mask_ptr = None
496
+ for obj_idx in range(batch_size):
497
+ obj_temp_output_dict = inference_state["temp_output_dict_per_obj"][obj_idx]
498
+ obj_output_dict = inference_state["output_dict_per_obj"][obj_idx]
499
+ out = obj_temp_output_dict[storage_key].get(frame_idx, None)
500
+ # If the object doesn't appear in "temp_output_dict_per_obj" on this frame,
501
+ # we fall back and look up its previous output in "output_dict_per_obj".
502
+ # We look up both "cond_frame_outputs" and "non_cond_frame_outputs" in
503
+ # "output_dict_per_obj" to find a previous output for this object.
504
+ if out is None:
505
+ out = obj_output_dict["cond_frame_outputs"].get(frame_idx, None)
506
+ if out is None:
507
+ out = obj_output_dict["non_cond_frame_outputs"].get(frame_idx, None)
508
+ # If the object doesn't appear in "output_dict_per_obj" either, we skip it
509
+ # and leave its mask scores to the default scores (i.e. the NO_OBJ_SCORE
510
+ # placeholder above) and set its object pointer to be a dummy pointer.
511
+ if out is None:
512
+ # Fill in dummy object pointers for those objects without any inputs or
513
+ # tracking outcomes on this frame (only do it under `run_mem_encoder=True`,
514
+ # i.e. when we need to build the memory for tracking).
515
+ if run_mem_encoder:
516
+ if empty_mask_ptr is None:
517
+ empty_mask_ptr = self._get_empty_mask_ptr(
518
+ inference_state, frame_idx
519
+ )
520
+ # fill object pointer with a dummy pointer (based on an empty mask)
521
+ consolidated_out["obj_ptr"][obj_idx : obj_idx + 1] = empty_mask_ptr
522
+ continue
523
+ # Add the temporary object output mask to consolidated output mask
524
+ obj_mask = out["pred_masks"]
525
+ consolidated_pred_masks = consolidated_out[consolidated_mask_key]
526
+ if obj_mask.shape[-2:] == consolidated_pred_masks.shape[-2:]:
527
+ consolidated_pred_masks[obj_idx : obj_idx + 1] = obj_mask
528
+ else:
529
+ # Resize first if temporary object mask has a different resolution
530
+ resized_obj_mask = torch.nn.functional.interpolate(
531
+ obj_mask,
532
+ size=consolidated_pred_masks.shape[-2:],
533
+ mode="bilinear",
534
+ align_corners=False,
535
+ )
536
+ consolidated_pred_masks[obj_idx : obj_idx + 1] = resized_obj_mask
537
+ consolidated_out["obj_ptr"][obj_idx : obj_idx + 1] = out["obj_ptr"]
538
+
539
+ # Optionally, apply non-overlapping constraints on the consolidated scores
540
+ # and rerun the memory encoder
541
+ if run_mem_encoder:
542
+ device = inference_state["device"]
543
+ high_res_masks = torch.nn.functional.interpolate(
544
+ consolidated_out["pred_masks"].to(device, non_blocking=True),
545
+ size=(self.image_size, self.image_size),
546
+ mode="bilinear",
547
+ align_corners=False,
548
+ )
549
+ if self.non_overlap_masks_for_mem_enc:
550
+ high_res_masks = self._apply_non_overlapping_constraints(high_res_masks)
551
+ maskmem_features, maskmem_pos_enc = self._run_memory_encoder(
552
+ inference_state=inference_state,
553
+ frame_idx=frame_idx,
554
+ batch_size=batch_size,
555
+ high_res_masks=high_res_masks,
556
+ is_mask_from_pts=True, # these frames are what the user interacted with
557
+ )
558
+ consolidated_out["maskmem_features"] = maskmem_features
559
+ consolidated_out["maskmem_pos_enc"] = maskmem_pos_enc
560
+
561
+ return consolidated_out
562
+
563
+ def _get_empty_mask_ptr(self, inference_state, frame_idx):
564
+ """Get a dummy object pointer based on an empty mask on the current frame."""
565
+ # A dummy (empty) mask with a single object
566
+ batch_size = 1
567
+ mask_inputs = torch.zeros(
568
+ (batch_size, 1, self.image_size, self.image_size),
569
+ dtype=torch.float32,
570
+ device=inference_state["device"],
571
+ )
572
+
573
+ # Retrieve correct image features
574
+ (
575
+ _,
576
+ _,
577
+ current_vision_feats,
578
+ current_vision_pos_embeds,
579
+ feat_sizes,
580
+ ) = self._get_image_feature(inference_state, frame_idx, batch_size)
581
+
582
+ # Feed the empty mask and image feature above to get a dummy object pointer
583
+ current_out = self.track_step(
584
+ frame_idx=frame_idx,
585
+ is_init_cond_frame=True,
586
+ current_vision_feats=current_vision_feats,
587
+ current_vision_pos_embeds=current_vision_pos_embeds,
588
+ feat_sizes=feat_sizes,
589
+ point_inputs=None,
590
+ mask_inputs=mask_inputs,
591
+ output_dict={},
592
+ num_frames=inference_state["num_frames"],
593
+ track_in_reverse=False,
594
+ run_mem_encoder=False,
595
+ prev_sam_mask_logits=None,
596
+ )
597
+ return current_out["obj_ptr"]
598
+
599
+ def propagate_in_video_preflight(self, inference_state):
600
+ """Prepare inference_state and consolidate temporary outputs before tracking."""
601
+ # Tracking has started and we don't allow adding new objects until session is reset.
602
+ inference_state["tracking_has_started"] = True
603
+ batch_size = self._get_obj_num(inference_state)
604
+
605
+ # Consolidate per-object temporary outputs in "temp_output_dict_per_obj" and
606
+ # add them into "output_dict".
607
+ temp_output_dict_per_obj = inference_state["temp_output_dict_per_obj"]
608
+ output_dict = inference_state["output_dict"]
609
+ # "consolidated_frame_inds" contains indices of those frames where consolidated
610
+ # temporary outputs have been added (either in this call or any previous calls
611
+ # to `propagate_in_video_preflight`).
612
+ consolidated_frame_inds = inference_state["consolidated_frame_inds"]
613
+ for is_cond in [False, True]:
614
+ # Separately consolidate conditioning and non-conditioning temp outputs
615
+ storage_key = "cond_frame_outputs" if is_cond else "non_cond_frame_outputs"
616
+ # Find all the frames that contain temporary outputs for any objects
617
+ # (these should be the frames that have just received clicks for mask inputs
618
+ # via `add_new_points_or_box` or `add_new_mask`)
619
+ temp_frame_inds = set()
620
+ for obj_temp_output_dict in temp_output_dict_per_obj.values():
621
+ temp_frame_inds.update(obj_temp_output_dict[storage_key].keys())
622
+ consolidated_frame_inds[storage_key].update(temp_frame_inds)
623
+ # consolidate the temporary output across all objects on this frame
624
+ for frame_idx in temp_frame_inds:
625
+ consolidated_out = self._consolidate_temp_output_across_obj(
626
+ inference_state, frame_idx, is_cond=is_cond, run_mem_encoder=True
627
+ )
628
+ # merge them into "output_dict" and also create per-object slices
629
+ output_dict[storage_key][frame_idx] = consolidated_out
630
+ self._add_output_per_object(
631
+ inference_state, frame_idx, consolidated_out, storage_key
632
+ )
633
+ clear_non_cond_mem = self.clear_non_cond_mem_around_input and (
634
+ self.clear_non_cond_mem_for_multi_obj or batch_size <= 1
635
+ )
636
+ if clear_non_cond_mem:
637
+ # clear non-conditioning memory of the surrounding frames
638
+ self._clear_non_cond_mem_around_input(inference_state, frame_idx)
639
+
640
+ # clear temporary outputs in `temp_output_dict_per_obj`
641
+ for obj_temp_output_dict in temp_output_dict_per_obj.values():
642
+ obj_temp_output_dict[storage_key].clear()
643
+
644
+ # edge case: if an output is added to "cond_frame_outputs", we remove any prior
645
+ # output on the same frame in "non_cond_frame_outputs"
646
+ for frame_idx in output_dict["cond_frame_outputs"]:
647
+ output_dict["non_cond_frame_outputs"].pop(frame_idx, None)
648
+ for obj_output_dict in inference_state["output_dict_per_obj"].values():
649
+ for frame_idx in obj_output_dict["cond_frame_outputs"]:
650
+ obj_output_dict["non_cond_frame_outputs"].pop(frame_idx, None)
651
+ for frame_idx in consolidated_frame_inds["cond_frame_outputs"]:
652
+ assert frame_idx in output_dict["cond_frame_outputs"]
653
+ consolidated_frame_inds["non_cond_frame_outputs"].discard(frame_idx)
654
+
655
+ # Make sure that the frame indices in "consolidated_frame_inds" are exactly those frames
656
+ # with either points or mask inputs (which should be true under a correct workflow).
657
+ all_consolidated_frame_inds = (
658
+ consolidated_frame_inds["cond_frame_outputs"]
659
+ | consolidated_frame_inds["non_cond_frame_outputs"]
660
+ )
661
+ input_frames_inds = set()
662
+ for point_inputs_per_frame in inference_state["point_inputs_per_obj"].values():
663
+ input_frames_inds.update(point_inputs_per_frame.keys())
664
+ for mask_inputs_per_frame in inference_state["mask_inputs_per_obj"].values():
665
+ input_frames_inds.update(mask_inputs_per_frame.keys())
666
+ assert all_consolidated_frame_inds == input_frames_inds
667
+
668
+ def propagate_in_video(
669
+ self,
670
+ inference_state,
671
+ start_frame_idx=None,
672
+ max_frame_num_to_track=None,
673
+ reverse=False,
674
+ verbose=True,
675
+ ):
676
+ """Propagate the input points across frames to track in the entire video."""
677
+ self.propagate_in_video_preflight(inference_state)
678
+
679
+ output_dict = inference_state["output_dict"]
680
+ consolidated_frame_inds = inference_state["consolidated_frame_inds"]
681
+ obj_ids = inference_state["obj_ids"]
682
+ num_frames = inference_state["num_frames"]
683
+ batch_size = self._get_obj_num(inference_state)
684
+ if len(output_dict["cond_frame_outputs"]) == 0:
685
+ raise RuntimeError("No points are provided; please add points first")
686
+ clear_non_cond_mem = self.clear_non_cond_mem_around_input and (
687
+ self.clear_non_cond_mem_for_multi_obj or batch_size <= 1
688
+ )
689
+
690
+ # set start index, end index, and processing order
691
+ if start_frame_idx is None:
692
+ # default: start from the earliest frame with input points
693
+ start_frame_idx = min(output_dict["cond_frame_outputs"])
694
+ if max_frame_num_to_track is None:
695
+ # default: track all the frames in the video
696
+ max_frame_num_to_track = num_frames
697
+ if reverse:
698
+ end_frame_idx = max(start_frame_idx - max_frame_num_to_track, 0)
699
+ if start_frame_idx > 0:
700
+ processing_order = range(start_frame_idx, end_frame_idx - 1, -1)
701
+ else:
702
+ processing_order = [] # skip reverse tracking if starting from frame 0
703
+ else:
704
+ end_frame_idx = min(
705
+ start_frame_idx + max_frame_num_to_track, num_frames - 1
706
+ )
707
+ processing_order = range(start_frame_idx, end_frame_idx + 1)
708
+
709
+ for frame_idx in tqdm(processing_order, desc="propagate in video", disable=(not verbose)):
710
+ # We skip those frames already in consolidated outputs (these are frames
711
+ # that received input clicks or mask). Note that we cannot directly run
712
+ # batched forward on them via `_run_single_frame_inference` because the
713
+ # number of clicks on each object might be different.
714
+ if frame_idx in consolidated_frame_inds["cond_frame_outputs"]:
715
+ storage_key = "cond_frame_outputs"
716
+ current_out = output_dict[storage_key][frame_idx]
717
+ pred_masks = current_out["pred_masks"]
718
+ if clear_non_cond_mem:
719
+ # clear non-conditioning memory of the surrounding frames
720
+ self._clear_non_cond_mem_around_input(inference_state, frame_idx)
721
+ elif frame_idx in consolidated_frame_inds["non_cond_frame_outputs"]:
722
+ storage_key = "non_cond_frame_outputs"
723
+ current_out = output_dict[storage_key][frame_idx]
724
+ pred_masks = current_out["pred_masks"]
725
+ else:
726
+ storage_key = "non_cond_frame_outputs"
727
+ current_out, pred_masks = self._run_single_frame_inference(
728
+ inference_state=inference_state,
729
+ output_dict=output_dict,
730
+ frame_idx=frame_idx,
731
+ batch_size=batch_size,
732
+ is_init_cond_frame=False,
733
+ point_inputs=None,
734
+ mask_inputs=None,
735
+ reverse=reverse,
736
+ run_mem_encoder=True,
737
+ )
738
+ output_dict[storage_key][frame_idx] = current_out
739
+ # Create slices of per-object outputs for subsequent interaction with each
740
+ # individual object after tracking.
741
+ self._add_output_per_object(
742
+ inference_state, frame_idx, current_out, storage_key
743
+ )
744
+ inference_state["frames_already_tracked"][frame_idx] = {"reverse": reverse}
745
+
746
+ # Resize the output mask to the original video resolution (we directly use
747
+ # the mask scores on GPU for output to avoid any CPU conversion in between)
748
+ _, video_res_masks = self._get_orig_video_res_output(
749
+ inference_state, pred_masks
750
+ )
751
+ yield frame_idx, obj_ids, video_res_masks
752
+
753
+ def _add_output_per_object(
754
+ self, inference_state, frame_idx, current_out, storage_key
755
+ ):
756
+ """
757
+ Split a multi-object output into per-object output slices and add them into
758
+ `output_dict_per_obj`. The resulting slices share the same tensor storage.
759
+ """
760
+ maskmem_features = current_out["maskmem_features"]
761
+ assert maskmem_features is None or isinstance(maskmem_features, torch.Tensor)
762
+
763
+ maskmem_pos_enc = current_out["maskmem_pos_enc"]
764
+ assert maskmem_pos_enc is None or isinstance(maskmem_pos_enc, list)
765
+
766
+ output_dict_per_obj = inference_state["output_dict_per_obj"]
767
+ for obj_idx, obj_output_dict in output_dict_per_obj.items():
768
+ obj_slice = slice(obj_idx, obj_idx + 1)
769
+ obj_out = {
770
+ "maskmem_features": None,
771
+ "maskmem_pos_enc": None,
772
+ "pred_masks": current_out["pred_masks"][obj_slice],
773
+ "obj_ptr": current_out["obj_ptr"][obj_slice],
774
+ }
775
+ if maskmem_features is not None:
776
+ obj_out["maskmem_features"] = maskmem_features[obj_slice]
777
+ if maskmem_pos_enc is not None:
778
+ obj_out["maskmem_pos_enc"] = [x[obj_slice] for x in maskmem_pos_enc]
779
+ obj_output_dict[storage_key][frame_idx] = obj_out
780
+
781
+ @torch.inference_mode()
782
+ def reset_state(self, inference_state):
783
+ """Remove all input points or mask in all frames throughout the video."""
784
+ self._reset_tracking_results(inference_state)
785
+ # Remove all object ids
786
+ inference_state["obj_id_to_idx"].clear()
787
+ inference_state["obj_idx_to_id"].clear()
788
+ inference_state["obj_ids"].clear()
789
+ inference_state["point_inputs_per_obj"].clear()
790
+ inference_state["mask_inputs_per_obj"].clear()
791
+ inference_state["output_dict_per_obj"].clear()
792
+ inference_state["temp_output_dict_per_obj"].clear()
793
+
794
+ def _reset_tracking_results(self, inference_state):
795
+ """Reset all tracking inputs and results across the videos."""
796
+ for v in inference_state["point_inputs_per_obj"].values():
797
+ v.clear()
798
+ for v in inference_state["mask_inputs_per_obj"].values():
799
+ v.clear()
800
+ for v in inference_state["output_dict_per_obj"].values():
801
+ v["cond_frame_outputs"].clear()
802
+ v["non_cond_frame_outputs"].clear()
803
+ for v in inference_state["temp_output_dict_per_obj"].values():
804
+ v["cond_frame_outputs"].clear()
805
+ v["non_cond_frame_outputs"].clear()
806
+ inference_state["output_dict"]["cond_frame_outputs"].clear()
807
+ inference_state["output_dict"]["non_cond_frame_outputs"].clear()
808
+ inference_state["consolidated_frame_inds"]["cond_frame_outputs"].clear()
809
+ inference_state["consolidated_frame_inds"]["non_cond_frame_outputs"].clear()
810
+ inference_state["tracking_has_started"] = False
811
+ inference_state["frames_already_tracked"].clear()
812
+
813
+ def _get_image_feature(self, inference_state, frame_idx, batch_size):
814
+ """Compute the image features on a given frame."""
815
+ # Look up in the cache first
816
+ image, backbone_out = inference_state["cached_features"].get(
817
+ frame_idx, (None, None)
818
+ )
819
+ if backbone_out is None:
820
+ # Cache miss -- we will run inference on a single image
821
+ device = inference_state["device"]
822
+ image = inference_state["images"][frame_idx].to(device).float().unsqueeze(0)
823
+ backbone_out = self.forward_image(image)
824
+ # Cache the most recent frame's feature (for repeated interactions with
825
+ # a frame; we can use an LRU cache for more frames in the future).
826
+ inference_state["cached_features"] = {frame_idx: (image, backbone_out)}
827
+
828
+ # expand the features to have the same dimension as the number of objects
829
+ expanded_image = image.expand(batch_size, -1, -1, -1)
830
+ expanded_backbone_out = {
831
+ "backbone_fpn": backbone_out["backbone_fpn"].copy(),
832
+ "vision_pos_enc": backbone_out["vision_pos_enc"].copy(),
833
+ }
834
+ for i, feat in enumerate(expanded_backbone_out["backbone_fpn"]):
835
+ expanded_backbone_out["backbone_fpn"][i] = feat.expand(
836
+ batch_size, -1, -1, -1
837
+ )
838
+ for i, pos in enumerate(expanded_backbone_out["vision_pos_enc"]):
839
+ pos = pos.expand(batch_size, -1, -1, -1)
840
+ expanded_backbone_out["vision_pos_enc"][i] = pos
841
+
842
+ features = self._prepare_backbone_features(expanded_backbone_out)
843
+ features = (expanded_image,) + features
844
+ return features
845
+
846
+ def _run_single_frame_inference(
847
+ self,
848
+ inference_state,
849
+ output_dict,
850
+ frame_idx,
851
+ batch_size,
852
+ is_init_cond_frame,
853
+ point_inputs,
854
+ mask_inputs,
855
+ reverse,
856
+ run_mem_encoder,
857
+ prev_sam_mask_logits=None,
858
+ ):
859
+ """Run tracking on a single frame based on current inputs and previous memory."""
860
+ # Retrieve correct image features
861
+ (
862
+ _,
863
+ _,
864
+ current_vision_feats,
865
+ current_vision_pos_embeds,
866
+ feat_sizes,
867
+ ) = self._get_image_feature(inference_state, frame_idx, batch_size)
868
+
869
+ # point and mask should not appear as input simultaneously on the same frame
870
+ assert point_inputs is None or mask_inputs is None
871
+ current_out = self.track_step(
872
+ frame_idx=frame_idx,
873
+ is_init_cond_frame=is_init_cond_frame,
874
+ current_vision_feats=current_vision_feats,
875
+ current_vision_pos_embeds=current_vision_pos_embeds,
876
+ feat_sizes=feat_sizes,
877
+ point_inputs=point_inputs,
878
+ mask_inputs=mask_inputs,
879
+ output_dict=output_dict,
880
+ num_frames=inference_state["num_frames"],
881
+ track_in_reverse=reverse,
882
+ run_mem_encoder=run_mem_encoder,
883
+ prev_sam_mask_logits=prev_sam_mask_logits,
884
+ )
885
+
886
+ # optionally offload the output to CPU memory to save GPU space
887
+ storage_device = inference_state["storage_device"]
888
+ maskmem_features = current_out["maskmem_features"]
889
+ if maskmem_features is not None:
890
+ maskmem_features = maskmem_features.to(torch.bfloat16)
891
+ maskmem_features = maskmem_features.to(storage_device, non_blocking=True)
892
+ pred_masks_gpu = current_out["pred_masks"]
893
+ # potentially fill holes in the predicted masks
894
+ if self.fill_hole_area > 0:
895
+ pred_masks_gpu = fill_holes_in_mask_scores(
896
+ pred_masks_gpu, self.fill_hole_area
897
+ )
898
+ pred_masks = pred_masks_gpu.to(storage_device, non_blocking=True)
899
+ # "maskmem_pos_enc" is the same across frames, so we only need to store one copy of it
900
+ maskmem_pos_enc = self._get_maskmem_pos_enc(inference_state, current_out)
901
+ # object pointer is a small tensor, so we always keep it on GPU memory for fast access
902
+ obj_ptr = current_out["obj_ptr"]
903
+ # make a compact version of this frame's output to reduce the state size
904
+ compact_current_out = {
905
+ "maskmem_features": maskmem_features,
906
+ "maskmem_pos_enc": maskmem_pos_enc,
907
+ "pred_masks": pred_masks,
908
+ "obj_ptr": obj_ptr,
909
+ }
910
+ return compact_current_out, pred_masks_gpu
911
+
912
+ def _run_memory_encoder(
913
+ self, inference_state, frame_idx, batch_size, high_res_masks, is_mask_from_pts
914
+ ):
915
+ """
916
+ Run the memory encoder on `high_res_masks`. This is usually after applying
917
+ non-overlapping constraints to object scores. Since their scores changed, their
918
+ memory also need to be computed again with the memory encoder.
919
+ """
920
+ # Retrieve correct image features
921
+ _, _, current_vision_feats, _, feat_sizes = self._get_image_feature(
922
+ inference_state, frame_idx, batch_size
923
+ )
924
+ maskmem_features, maskmem_pos_enc = self._encode_new_memory(
925
+ current_vision_feats=current_vision_feats,
926
+ feat_sizes=feat_sizes,
927
+ pred_masks_high_res=high_res_masks,
928
+ is_mask_from_pts=is_mask_from_pts,
929
+ )
930
+
931
+ # optionally offload the output to CPU memory to save GPU space
932
+ storage_device = inference_state["storage_device"]
933
+ maskmem_features = maskmem_features.to(torch.bfloat16)
934
+ maskmem_features = maskmem_features.to(storage_device, non_blocking=True)
935
+ # "maskmem_pos_enc" is the same across frames, so we only need to store one copy of it
936
+ maskmem_pos_enc = self._get_maskmem_pos_enc(
937
+ inference_state, {"maskmem_pos_enc": maskmem_pos_enc}
938
+ )
939
+ return maskmem_features, maskmem_pos_enc
940
+
941
+ def _get_maskmem_pos_enc(self, inference_state, current_out):
942
+ """
943
+ `maskmem_pos_enc` is the same across frames and objects, so we cache it as
944
+ a constant in the inference session to reduce session storage size.
945
+ """
946
+ model_constants = inference_state["constants"]
947
+ # "out_maskmem_pos_enc" should be either a list of tensors or None
948
+ out_maskmem_pos_enc = current_out["maskmem_pos_enc"]
949
+ if out_maskmem_pos_enc is not None:
950
+ if "maskmem_pos_enc" not in model_constants:
951
+ assert isinstance(out_maskmem_pos_enc, list)
952
+ # only take the slice for one object, since it's same across objects
953
+ maskmem_pos_enc = [x[0:1].clone() for x in out_maskmem_pos_enc]
954
+ model_constants["maskmem_pos_enc"] = maskmem_pos_enc
955
+ else:
956
+ maskmem_pos_enc = model_constants["maskmem_pos_enc"]
957
+ # expand the cached maskmem_pos_enc to the actual batch size
958
+ batch_size = out_maskmem_pos_enc[0].size(0)
959
+ expanded_maskmem_pos_enc = [
960
+ x.expand(batch_size, -1, -1, -1) for x in maskmem_pos_enc
961
+ ]
962
+ else:
963
+ expanded_maskmem_pos_enc = None
964
+ return expanded_maskmem_pos_enc
965
+
966
+ def _clear_non_cond_mem_around_input(self, inference_state, frame_idx):
967
+ """
968
+ Remove the non-conditioning memory around the input frame. When users provide
969
+ correction clicks, the surrounding frames' non-conditioning memories can still
970
+ contain outdated object appearance information and could confuse the model.
971
+
972
+ This method clears those non-conditioning memories surrounding the interacted
973
+ frame to avoid giving the model both old and new information about the object.
974
+ """
975
+ r = self.memory_temporal_stride_for_eval
976
+ frame_idx_begin = frame_idx - r * self.num_maskmem
977
+ frame_idx_end = frame_idx + r * self.num_maskmem
978
+ output_dict = inference_state["output_dict"]
979
+ non_cond_frame_outputs = output_dict["non_cond_frame_outputs"]
980
+ for t in range(frame_idx_begin, frame_idx_end + 1):
981
+ non_cond_frame_outputs.pop(t, None)
982
+ for obj_output_dict in inference_state["output_dict_per_obj"].values():
983
+ obj_output_dict["non_cond_frame_outputs"].pop(t, None)
segment-anything-2/sam2/utils/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
segment-anything-2/sam2/utils/amg.py ADDED
@@ -0,0 +1,348 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import math
8
+ from copy import deepcopy
9
+ from itertools import product
10
+ from typing import Any, Dict, Generator, ItemsView, List, Tuple
11
+
12
+ import numpy as np
13
+ import torch
14
+
15
+ # Very lightly adapted from https://github.com/facebookresearch/segment-anything/blob/main/segment_anything/utils/amg.py
16
+
17
+
18
+ class MaskData:
19
+ """
20
+ A structure for storing masks and their related data in batched format.
21
+ Implements basic filtering and concatenation.
22
+ """
23
+
24
+ def __init__(self, **kwargs) -> None:
25
+ for v in kwargs.values():
26
+ assert isinstance(
27
+ v, (list, np.ndarray, torch.Tensor)
28
+ ), "MaskData only supports list, numpy arrays, and torch tensors."
29
+ self._stats = dict(**kwargs)
30
+
31
+ def __setitem__(self, key: str, item: Any) -> None:
32
+ assert isinstance(
33
+ item, (list, np.ndarray, torch.Tensor)
34
+ ), "MaskData only supports list, numpy arrays, and torch tensors."
35
+ self._stats[key] = item
36
+
37
+ def __delitem__(self, key: str) -> None:
38
+ del self._stats[key]
39
+
40
+ def __getitem__(self, key: str) -> Any:
41
+ return self._stats[key]
42
+
43
+ def items(self) -> ItemsView[str, Any]:
44
+ return self._stats.items()
45
+
46
+ def filter(self, keep: torch.Tensor) -> None:
47
+ for k, v in self._stats.items():
48
+ if v is None:
49
+ self._stats[k] = None
50
+ elif isinstance(v, torch.Tensor):
51
+ self._stats[k] = v[torch.as_tensor(keep, device=v.device)]
52
+ elif isinstance(v, np.ndarray):
53
+ self._stats[k] = v[keep.detach().cpu().numpy()]
54
+ elif isinstance(v, list) and keep.dtype == torch.bool:
55
+ self._stats[k] = [a for i, a in enumerate(v) if keep[i]]
56
+ elif isinstance(v, list):
57
+ self._stats[k] = [v[i] for i in keep]
58
+ else:
59
+ raise TypeError(f"MaskData key {k} has an unsupported type {type(v)}.")
60
+
61
+ def cat(self, new_stats: "MaskData") -> None:
62
+ for k, v in new_stats.items():
63
+ if k not in self._stats or self._stats[k] is None:
64
+ self._stats[k] = deepcopy(v)
65
+ elif isinstance(v, torch.Tensor):
66
+ self._stats[k] = torch.cat([self._stats[k], v], dim=0)
67
+ elif isinstance(v, np.ndarray):
68
+ self._stats[k] = np.concatenate([self._stats[k], v], axis=0)
69
+ elif isinstance(v, list):
70
+ self._stats[k] = self._stats[k] + deepcopy(v)
71
+ else:
72
+ raise TypeError(f"MaskData key {k} has an unsupported type {type(v)}.")
73
+
74
+ def to_numpy(self) -> None:
75
+ for k, v in self._stats.items():
76
+ if isinstance(v, torch.Tensor):
77
+ self._stats[k] = v.float().detach().cpu().numpy()
78
+
79
+
80
+ def is_box_near_crop_edge(
81
+ boxes: torch.Tensor, crop_box: List[int], orig_box: List[int], atol: float = 20.0
82
+ ) -> torch.Tensor:
83
+ """Filter masks at the edge of a crop, but not at the edge of the original image."""
84
+ crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device)
85
+ orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device)
86
+ boxes = uncrop_boxes_xyxy(boxes, crop_box).float()
87
+ near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0)
88
+ near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0)
89
+ near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge)
90
+ return torch.any(near_crop_edge, dim=1)
91
+
92
+
93
+ def box_xyxy_to_xywh(box_xyxy: torch.Tensor) -> torch.Tensor:
94
+ box_xywh = deepcopy(box_xyxy)
95
+ box_xywh[2] = box_xywh[2] - box_xywh[0]
96
+ box_xywh[3] = box_xywh[3] - box_xywh[1]
97
+ return box_xywh
98
+
99
+
100
+ def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]:
101
+ assert len(args) > 0 and all(
102
+ len(a) == len(args[0]) for a in args
103
+ ), "Batched iteration must have inputs of all the same size."
104
+ n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0)
105
+ for b in range(n_batches):
106
+ yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args]
107
+
108
+
109
+ def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]:
110
+ """
111
+ Encodes masks to an uncompressed RLE, in the format expected by
112
+ pycoco tools.
113
+ """
114
+ # Put in fortran order and flatten h,w
115
+ b, h, w = tensor.shape
116
+ tensor = tensor.permute(0, 2, 1).flatten(1)
117
+
118
+ # Compute change indices
119
+ diff = tensor[:, 1:] ^ tensor[:, :-1]
120
+ change_indices = diff.nonzero()
121
+
122
+ # Encode run length
123
+ out = []
124
+ for i in range(b):
125
+ cur_idxs = change_indices[change_indices[:, 0] == i, 1]
126
+ cur_idxs = torch.cat(
127
+ [
128
+ torch.tensor([0], dtype=cur_idxs.dtype, device=cur_idxs.device),
129
+ cur_idxs + 1,
130
+ torch.tensor([h * w], dtype=cur_idxs.dtype, device=cur_idxs.device),
131
+ ]
132
+ )
133
+ btw_idxs = cur_idxs[1:] - cur_idxs[:-1]
134
+ counts = [] if tensor[i, 0] == 0 else [0]
135
+ counts.extend(btw_idxs.detach().cpu().tolist())
136
+ out.append({"size": [h, w], "counts": counts})
137
+ return out
138
+
139
+
140
+ def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray:
141
+ """Compute a binary mask from an uncompressed RLE."""
142
+ h, w = rle["size"]
143
+ mask = np.empty(h * w, dtype=bool)
144
+ idx = 0
145
+ parity = False
146
+ for count in rle["counts"]:
147
+ mask[idx : idx + count] = parity
148
+ idx += count
149
+ parity ^= True
150
+ mask = mask.reshape(w, h)
151
+ return mask.transpose() # Put in C order
152
+
153
+
154
+ def area_from_rle(rle: Dict[str, Any]) -> int:
155
+ return sum(rle["counts"][1::2])
156
+
157
+
158
+ def calculate_stability_score(
159
+ masks: torch.Tensor, mask_threshold: float, threshold_offset: float
160
+ ) -> torch.Tensor:
161
+ """
162
+ Computes the stability score for a batch of masks. The stability
163
+ score is the IoU between the binary masks obtained by thresholding
164
+ the predicted mask logits at high and low values.
165
+ """
166
+ # One mask is always contained inside the other.
167
+ # Save memory by preventing unnecessary cast to torch.int64
168
+ intersections = (
169
+ (masks > (mask_threshold + threshold_offset))
170
+ .sum(-1, dtype=torch.int16)
171
+ .sum(-1, dtype=torch.int32)
172
+ )
173
+ unions = (
174
+ (masks > (mask_threshold - threshold_offset))
175
+ .sum(-1, dtype=torch.int16)
176
+ .sum(-1, dtype=torch.int32)
177
+ )
178
+ return intersections / unions
179
+
180
+
181
+ def build_point_grid(n_per_side: int) -> np.ndarray:
182
+ """Generates a 2D grid of points evenly spaced in [0,1]x[0,1]."""
183
+ offset = 1 / (2 * n_per_side)
184
+ points_one_side = np.linspace(offset, 1 - offset, n_per_side)
185
+ points_x = np.tile(points_one_side[None, :], (n_per_side, 1))
186
+ points_y = np.tile(points_one_side[:, None], (1, n_per_side))
187
+ points = np.stack([points_x, points_y], axis=-1).reshape(-1, 2)
188
+ return points
189
+
190
+
191
+ def build_all_layer_point_grids(
192
+ n_per_side: int, n_layers: int, scale_per_layer: int
193
+ ) -> List[np.ndarray]:
194
+ """Generates point grids for all crop layers."""
195
+ points_by_layer = []
196
+ for i in range(n_layers + 1):
197
+ n_points = int(n_per_side / (scale_per_layer**i))
198
+ points_by_layer.append(build_point_grid(n_points))
199
+ return points_by_layer
200
+
201
+
202
+ def generate_crop_boxes(
203
+ im_size: Tuple[int, ...], n_layers: int, overlap_ratio: float
204
+ ) -> Tuple[List[List[int]], List[int]]:
205
+ """
206
+ Generates a list of crop boxes of different sizes. Each layer
207
+ has (2**i)**2 boxes for the ith layer.
208
+ """
209
+ crop_boxes, layer_idxs = [], []
210
+ im_h, im_w = im_size
211
+ short_side = min(im_h, im_w)
212
+
213
+ # Original image
214
+ crop_boxes.append([0, 0, im_w, im_h])
215
+ layer_idxs.append(0)
216
+
217
+ def crop_len(orig_len, n_crops, overlap):
218
+ return int(math.ceil((overlap * (n_crops - 1) + orig_len) / n_crops))
219
+
220
+ for i_layer in range(n_layers):
221
+ n_crops_per_side = 2 ** (i_layer + 1)
222
+ overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side))
223
+
224
+ crop_w = crop_len(im_w, n_crops_per_side, overlap)
225
+ crop_h = crop_len(im_h, n_crops_per_side, overlap)
226
+
227
+ crop_box_x0 = [int((crop_w - overlap) * i) for i in range(n_crops_per_side)]
228
+ crop_box_y0 = [int((crop_h - overlap) * i) for i in range(n_crops_per_side)]
229
+
230
+ # Crops in XYWH format
231
+ for x0, y0 in product(crop_box_x0, crop_box_y0):
232
+ box = [x0, y0, min(x0 + crop_w, im_w), min(y0 + crop_h, im_h)]
233
+ crop_boxes.append(box)
234
+ layer_idxs.append(i_layer + 1)
235
+
236
+ return crop_boxes, layer_idxs
237
+
238
+
239
+ def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: List[int]) -> torch.Tensor:
240
+ x0, y0, _, _ = crop_box
241
+ offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device)
242
+ # Check if boxes has a channel dimension
243
+ if len(boxes.shape) == 3:
244
+ offset = offset.unsqueeze(1)
245
+ return boxes + offset
246
+
247
+
248
+ def uncrop_points(points: torch.Tensor, crop_box: List[int]) -> torch.Tensor:
249
+ x0, y0, _, _ = crop_box
250
+ offset = torch.tensor([[x0, y0]], device=points.device)
251
+ # Check if points has a channel dimension
252
+ if len(points.shape) == 3:
253
+ offset = offset.unsqueeze(1)
254
+ return points + offset
255
+
256
+
257
+ def uncrop_masks(
258
+ masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w: int
259
+ ) -> torch.Tensor:
260
+ x0, y0, x1, y1 = crop_box
261
+ if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h:
262
+ return masks
263
+ # Coordinate transform masks
264
+ pad_x, pad_y = orig_w - (x1 - x0), orig_h - (y1 - y0)
265
+ pad = (x0, pad_x - x0, y0, pad_y - y0)
266
+ return torch.nn.functional.pad(masks, pad, value=0)
267
+
268
+
269
+ def remove_small_regions(
270
+ mask: np.ndarray, area_thresh: float, mode: str
271
+ ) -> Tuple[np.ndarray, bool]:
272
+ """
273
+ Removes small disconnected regions and holes in a mask. Returns the
274
+ mask and an indicator of if the mask has been modified.
275
+ """
276
+ import cv2 # type: ignore
277
+
278
+ assert mode in ["holes", "islands"]
279
+ correct_holes = mode == "holes"
280
+ working_mask = (correct_holes ^ mask).astype(np.uint8)
281
+ n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8)
282
+ sizes = stats[:, -1][1:] # Row 0 is background label
283
+ small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh]
284
+ if len(small_regions) == 0:
285
+ return mask, False
286
+ fill_labels = [0] + small_regions
287
+ if not correct_holes:
288
+ fill_labels = [i for i in range(n_labels) if i not in fill_labels]
289
+ # If every region is below threshold, keep largest
290
+ if len(fill_labels) == 0:
291
+ fill_labels = [int(np.argmax(sizes)) + 1]
292
+ mask = np.isin(regions, fill_labels)
293
+ return mask, True
294
+
295
+
296
+ def coco_encode_rle(uncompressed_rle: Dict[str, Any]) -> Dict[str, Any]:
297
+ from pycocotools import mask as mask_utils # type: ignore
298
+
299
+ h, w = uncompressed_rle["size"]
300
+ rle = mask_utils.frPyObjects(uncompressed_rle, h, w)
301
+ rle["counts"] = rle["counts"].decode("utf-8") # Necessary to serialize with json
302
+ return rle
303
+
304
+
305
+ def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor:
306
+ """
307
+ Calculates boxes in XYXY format around masks. Return [0,0,0,0] for
308
+ an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4.
309
+ """
310
+ # torch.max below raises an error on empty inputs, just skip in this case
311
+ if torch.numel(masks) == 0:
312
+ return torch.zeros(*masks.shape[:-2], 4, device=masks.device)
313
+
314
+ # Normalize shape to CxHxW
315
+ shape = masks.shape
316
+ h, w = shape[-2:]
317
+ if len(shape) > 2:
318
+ masks = masks.flatten(0, -3)
319
+ else:
320
+ masks = masks.unsqueeze(0)
321
+
322
+ # Get top and bottom edges
323
+ in_height, _ = torch.max(masks, dim=-1)
324
+ in_height_coords = in_height * torch.arange(h, device=in_height.device)[None, :]
325
+ bottom_edges, _ = torch.max(in_height_coords, dim=-1)
326
+ in_height_coords = in_height_coords + h * (~in_height)
327
+ top_edges, _ = torch.min(in_height_coords, dim=-1)
328
+
329
+ # Get left and right edges
330
+ in_width, _ = torch.max(masks, dim=-2)
331
+ in_width_coords = in_width * torch.arange(w, device=in_width.device)[None, :]
332
+ right_edges, _ = torch.max(in_width_coords, dim=-1)
333
+ in_width_coords = in_width_coords + w * (~in_width)
334
+ left_edges, _ = torch.min(in_width_coords, dim=-1)
335
+
336
+ # If the mask is empty the right edge will be to the left of the left edge.
337
+ # Replace these boxes with [0, 0, 0, 0]
338
+ empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)
339
+ out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1)
340
+ out = out * (~empty_filter).unsqueeze(-1)
341
+
342
+ # Return to original shape
343
+ if len(shape) > 2:
344
+ out = out.reshape(*shape[:-2], 4)
345
+ else:
346
+ out = out[0]
347
+
348
+ return out
segment-anything-2/sam2/utils/misc.py ADDED
@@ -0,0 +1,297 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import os
8
+ import warnings
9
+ from threading import Thread
10
+
11
+ import numpy as np
12
+ import torch
13
+ from PIL import Image
14
+ from tqdm import tqdm
15
+
16
+
17
+ def get_sdpa_settings():
18
+ if torch.cuda.is_available():
19
+ old_gpu = torch.cuda.get_device_properties(0).major < 7
20
+ # only use Flash Attention on Ampere (8.0) or newer GPUs
21
+ use_flash_attn = torch.cuda.get_device_properties(0).major >= 8
22
+ if not use_flash_attn:
23
+ warnings.warn(
24
+ "Flash Attention is disabled as it requires a GPU with Ampere (8.0) CUDA capability.",
25
+ category=UserWarning,
26
+ stacklevel=2,
27
+ )
28
+ # keep math kernel for PyTorch versions before 2.2 (Flash Attention v2 is only
29
+ # available on PyTorch 2.2+, while Flash Attention v1 cannot handle all cases)
30
+ pytorch_version = tuple(int(v) for v in torch.__version__.split(".")[:2])
31
+ if pytorch_version < (2, 2):
32
+ warnings.warn(
33
+ f"You are using PyTorch {torch.__version__} without Flash Attention v2 support. "
34
+ "Consider upgrading to PyTorch 2.2+ for Flash Attention v2 (which could be faster).",
35
+ category=UserWarning,
36
+ stacklevel=2,
37
+ )
38
+ math_kernel_on = pytorch_version < (2, 2) or not use_flash_attn
39
+ else:
40
+ old_gpu = True
41
+ use_flash_attn = False
42
+ math_kernel_on = True
43
+
44
+ return old_gpu, use_flash_attn, math_kernel_on
45
+
46
+
47
+ def get_connected_components(mask):
48
+ """
49
+ Get the connected components (8-connectivity) of binary masks of shape (N, 1, H, W).
50
+
51
+ Inputs:
52
+ - mask: A binary mask tensor of shape (N, 1, H, W), where 1 is foreground and 0 is
53
+ background.
54
+
55
+ Outputs:
56
+ - labels: A tensor of shape (N, 1, H, W) containing the connected component labels
57
+ for foreground pixels and 0 for background pixels.
58
+ - counts: A tensor of shape (N, 1, H, W) containing the area of the connected
59
+ components for foreground pixels and 0 for background pixels.
60
+ """
61
+ from sam2 import _C
62
+
63
+ return _C.get_connected_componnets(mask.to(torch.uint8).contiguous())
64
+
65
+
66
+ def mask_to_box(masks: torch.Tensor):
67
+ """
68
+ compute bounding box given an input mask
69
+
70
+ Inputs:
71
+ - masks: [B, 1, H, W] masks, dtype=torch.Tensor
72
+
73
+ Returns:
74
+ - box_coords: [B, 1, 4], contains (x, y) coordinates of top left and bottom right box corners, dtype=torch.Tensor
75
+ """
76
+ B, _, h, w = masks.shape
77
+ device = masks.device
78
+ xs = torch.arange(w, device=device, dtype=torch.int32)
79
+ ys = torch.arange(h, device=device, dtype=torch.int32)
80
+ grid_xs, grid_ys = torch.meshgrid(xs, ys, indexing="xy")
81
+ grid_xs = grid_xs[None, None, ...].expand(B, 1, h, w)
82
+ grid_ys = grid_ys[None, None, ...].expand(B, 1, h, w)
83
+ min_xs, _ = torch.min(torch.where(masks, grid_xs, w).flatten(-2), dim=-1)
84
+ max_xs, _ = torch.max(torch.where(masks, grid_xs, -1).flatten(-2), dim=-1)
85
+ min_ys, _ = torch.min(torch.where(masks, grid_ys, h).flatten(-2), dim=-1)
86
+ max_ys, _ = torch.max(torch.where(masks, grid_ys, -1).flatten(-2), dim=-1)
87
+ bbox_coords = torch.stack((min_xs, min_ys, max_xs, max_ys), dim=-1)
88
+
89
+ return bbox_coords
90
+
91
+
92
+ def _load_img_as_tensor(img_np_or_path, image_size, load=True):
93
+ if load:
94
+ img_pil = Image.open(img_np_or_path)
95
+ img_np = np.array(img_pil.convert("RGB").resize((image_size, image_size)))
96
+ else:
97
+ img_np = img_np_or_path
98
+ img_pil = Image.fromarray(img_np)
99
+ img_np = np.array(img_pil.convert("RGB").resize((image_size, image_size)))
100
+ if img_np.dtype == np.uint8: # np.uint8 is expected for JPEG images
101
+ img_np = img_np / 255.0
102
+ img = torch.from_numpy(img_np).permute(2, 0, 1)
103
+ video_width, video_height = img_pil.size # the original video size
104
+ return img, video_height, video_width
105
+
106
+
107
+ class AsyncVideoFrameLoader:
108
+ """
109
+ A list of video frames to be load asynchronously without blocking session start.
110
+ """
111
+
112
+ def __init__(
113
+ self,
114
+ img_paths,
115
+ image_size,
116
+ offload_video_to_cpu,
117
+ img_mean,
118
+ img_std,
119
+ compute_device,
120
+ ):
121
+ self.img_paths = img_paths
122
+ self.image_size = image_size
123
+ self.offload_video_to_cpu = offload_video_to_cpu
124
+ self.img_mean = img_mean
125
+ self.img_std = img_std
126
+ # items in `self.images` will be loaded asynchronously
127
+ self.images = [None] * len(img_paths)
128
+ # catch and raise any exceptions in the async loading thread
129
+ self.exception = None
130
+ # video_height and video_width be filled when loading the first image
131
+ self.video_height = None
132
+ self.video_width = None
133
+ self.compute_device = compute_device
134
+
135
+ # load the first frame to fill video_height and video_width and also
136
+ # to cache it (since it's most likely where the user will click)
137
+ self.__getitem__(0)
138
+
139
+ # load the rest of frames asynchronously without blocking the session start
140
+ def _load_frames():
141
+ try:
142
+ for n in tqdm(range(len(self.images)), desc="frame loading (JPEG)"):
143
+ self.__getitem__(n)
144
+ except Exception as e:
145
+ self.exception = e
146
+
147
+ self.thread = Thread(target=_load_frames, daemon=True)
148
+ self.thread.start()
149
+
150
+ def __getitem__(self, index):
151
+ if self.exception is not None:
152
+ raise RuntimeError("Failure in frame loading thread") from self.exception
153
+
154
+ img = self.images[index]
155
+ if img is not None:
156
+ return img
157
+
158
+ img, video_height, video_width = _load_img_as_tensor(
159
+ self.img_paths[index], self.image_size
160
+ )
161
+ self.video_height = video_height
162
+ self.video_width = video_width
163
+ # normalize by mean and std
164
+ img -= self.img_mean
165
+ img /= self.img_std
166
+ if not self.offload_video_to_cpu:
167
+ img = img.to(self.compute_device, non_blocking=True)
168
+ # self.images[index] = img
169
+ return img
170
+
171
+ def __len__(self):
172
+ return len(self.images)
173
+
174
+
175
+ def load_video_frames(
176
+ video_path,
177
+ image_size,
178
+ offload_video_to_cpu,
179
+ img_mean=(0.485, 0.456, 0.406),
180
+ img_std=(0.229, 0.224, 0.225),
181
+ async_loading_frames=False,
182
+ compute_device=torch.device("cuda"),
183
+ image_inputs=None,
184
+ verbose=True,
185
+ ):
186
+ """
187
+ Load the video frames from a directory of JPEG files ("<frame_index>.jpg" format).
188
+
189
+ The frames are resized to image_size x image_size and are loaded to GPU if
190
+ `offload_video_to_cpu` is `False` and to CPU if `offload_video_to_cpu` is `True`.
191
+
192
+ You can load a frame asynchronously by setting `async_loading_frames` to `True`.
193
+ """
194
+ if image_inputs is None:
195
+ frame_names = video_path
196
+ frame_names.sort(key=lambda p: int(os.path.splitext(p)[0]))
197
+ num_frames = len(frame_names)
198
+ img_paths = frame_names
199
+ else:
200
+ num_frames = len(image_inputs)
201
+ img_paths = None
202
+ img_mean = torch.tensor(img_mean, dtype=torch.float32)[:, None, None]
203
+ img_std = torch.tensor(img_std, dtype=torch.float32)[:, None, None]
204
+
205
+ if async_loading_frames:
206
+ lazy_images = AsyncVideoFrameLoader(
207
+ img_paths,
208
+ image_size,
209
+ offload_video_to_cpu,
210
+ img_mean,
211
+ img_std,
212
+ compute_device,
213
+ )
214
+ return lazy_images, lazy_images.video_height, lazy_images.video_width
215
+ images = torch.zeros(num_frames, 3, image_size, image_size, dtype=torch.float32)
216
+ if image_inputs is None:
217
+ if verbose:
218
+ for n, img_path in enumerate(tqdm(img_paths, desc="frame loading (JPEG)")):
219
+ images[n], video_height, video_width = _load_img_as_tensor(img_path, image_size, load=True)
220
+ else:
221
+ for n, img_path in enumerate(img_paths):
222
+ images[n], video_height, video_width = _load_img_as_tensor(img_path, image_size, load=True)
223
+ else:
224
+ if verbose:
225
+ for n, img_in in enumerate(tqdm(image_inputs, desc="frame loading (JPEG)")):
226
+ # skip if image_in is already a tensor
227
+ if isinstance(img_in, torch.Tensor):
228
+ # resize the image if needed
229
+ if img_in.shape[-2:] != (image_size, image_size):
230
+ img_in = torch.nn.functional.interpolate(
231
+ img_in.unsqueeze(0), size=(image_size, image_size), mode="bilinear", align_corners=False
232
+ ).squeeze(0)
233
+ images[n] = img_in
234
+ video_height, video_width = img_in.shape[-2:]
235
+ continue
236
+ images[n], video_height, video_width = _load_img_as_tensor(img_in, image_size, load=False)
237
+ else:
238
+ for n, img_in in enumerate(image_inputs):
239
+ # skip if image_in is already a tensor
240
+ if isinstance(img_in, torch.Tensor):
241
+ # resize the image if needed
242
+ if img_in.shape[-2:] != (image_size, image_size):
243
+ img_in = torch.nn.functional.interpolate(
244
+ img_in.unsqueeze(0), size=(image_size, image_size), mode="bilinear", align_corners=False
245
+ ).squeeze(0)
246
+ images[n] = img_in
247
+ video_height, video_width = img_in.shape[-2:]
248
+ continue
249
+ images[n], video_height, video_width = _load_img_as_tensor(img_in, image_size, load=False)
250
+ if not offload_video_to_cpu:
251
+ images = images.to(compute_device)
252
+ img_mean = img_mean.to(compute_device)
253
+ img_std = img_std.to(compute_device)
254
+ # normalize by mean and std
255
+ images -= img_mean
256
+ images /= img_std
257
+ return images, video_height, video_width
258
+
259
+
260
+ def fill_holes_in_mask_scores(mask, max_area):
261
+ """
262
+ A post processor to fill small holes in mask scores with area under `max_area`.
263
+ """
264
+ # Holes are those connected components in background with area <= self.max_area
265
+ # (background regions are those with mask scores <= 0)
266
+ assert max_area > 0, "max_area must be positive"
267
+
268
+ input_mask = mask
269
+ try:
270
+ labels, areas = get_connected_components(mask <= 0)
271
+ is_hole = (labels > 0) & (areas <= max_area)
272
+ # We fill holes with a small positive mask score (0.1) to change them to foreground.
273
+ mask = torch.where(is_hole, 0.1, mask)
274
+ except Exception as e:
275
+ # Skip the post-processing step on removing small holes if the CUDA kernel fails
276
+ warnings.warn(
277
+ f"{e}\n\nSkipping the post-processing step due to the error above. You can "
278
+ "still use SAM 2 and it's OK to ignore the error above, although some post-processing "
279
+ "functionality may be limited (which doesn't affect the results in most cases; see "
280
+ "https://github.com/facebookresearch/segment-anything-2/blob/main/INSTALL.md).",
281
+ category=UserWarning,
282
+ stacklevel=2,
283
+ )
284
+ mask = input_mask
285
+
286
+ return mask
287
+
288
+
289
+ def concat_points(old_point_inputs, new_points, new_labels):
290
+ """Add new points and labels to previous point inputs (add at the end)."""
291
+ if old_point_inputs is None:
292
+ points, labels = new_points, new_labels
293
+ else:
294
+ points = torch.cat([old_point_inputs["point_coords"], new_points], dim=1)
295
+ labels = torch.cat([old_point_inputs["point_labels"], new_labels], dim=1)
296
+
297
+ return {"point_coords": points, "point_labels": labels}
segment-anything-2/sam2/utils/transforms.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import warnings
8
+
9
+ import torch
10
+ import torch.nn as nn
11
+ import torch.nn.functional as F
12
+ from torchvision.transforms import Normalize, Resize, ToTensor
13
+
14
+
15
+ class SAM2Transforms(nn.Module):
16
+ def __init__(
17
+ self, resolution, mask_threshold, max_hole_area=0.0, max_sprinkle_area=0.0
18
+ ):
19
+ """
20
+ Transforms for SAM2.
21
+ """
22
+ super().__init__()
23
+ self.resolution = resolution
24
+ self.mask_threshold = mask_threshold
25
+ self.max_hole_area = max_hole_area
26
+ self.max_sprinkle_area = max_sprinkle_area
27
+ self.mean = [0.485, 0.456, 0.406]
28
+ self.std = [0.229, 0.224, 0.225]
29
+ self.to_tensor = ToTensor()
30
+ self.transforms = torch.jit.script(
31
+ nn.Sequential(
32
+ Resize((self.resolution, self.resolution)),
33
+ Normalize(self.mean, self.std),
34
+ )
35
+ )
36
+
37
+ def __call__(self, x):
38
+ x = self.to_tensor(x)
39
+ return self.transforms(x)
40
+
41
+ def forward_batch(self, img_list):
42
+ img_batch = [self.transforms(self.to_tensor(img)) for img in img_list]
43
+ img_batch = torch.stack(img_batch, dim=0)
44
+ return img_batch
45
+
46
+ def transform_coords(
47
+ self, coords: torch.Tensor, normalize=False, orig_hw=None
48
+ ) -> torch.Tensor:
49
+ """
50
+ Expects a torch tensor with length 2 in the last dimension. The coordinates can be in absolute image or normalized coordinates,
51
+ If the coords are in absolute image coordinates, normalize should be set to True and original image size is required.
52
+
53
+ Returns
54
+ Un-normalized coordinates in the range of [0, 1] which is expected by the SAM2 model.
55
+ """
56
+ if normalize:
57
+ assert orig_hw is not None
58
+ h, w = orig_hw
59
+ coords = coords.clone()
60
+ coords[..., 0] = coords[..., 0] / w
61
+ coords[..., 1] = coords[..., 1] / h
62
+
63
+ coords = coords * self.resolution # unnormalize coords
64
+ return coords
65
+
66
+ def transform_boxes(
67
+ self, boxes: torch.Tensor, normalize=False, orig_hw=None
68
+ ) -> torch.Tensor:
69
+ """
70
+ Expects a tensor of shape Bx4. The coordinates can be in absolute image or normalized coordinates,
71
+ if the coords are in absolute image coordinates, normalize should be set to True and original image size is required.
72
+ """
73
+ boxes = self.transform_coords(boxes.reshape(-1, 2, 2), normalize, orig_hw)
74
+ return boxes
75
+
76
+ def postprocess_masks(self, masks: torch.Tensor, orig_hw) -> torch.Tensor:
77
+ """
78
+ Perform PostProcessing on output masks.
79
+ """
80
+ from sam2.utils.misc import get_connected_components
81
+
82
+ masks = masks.float()
83
+ input_masks = masks
84
+ mask_flat = masks.flatten(0, 1).unsqueeze(1) # flatten as 1-channel image
85
+ try:
86
+ if self.max_hole_area > 0:
87
+ # Holes are those connected components in background with area <= self.fill_hole_area
88
+ # (background regions are those with mask scores <= self.mask_threshold)
89
+ labels, areas = get_connected_components(
90
+ mask_flat <= self.mask_threshold
91
+ )
92
+ is_hole = (labels > 0) & (areas <= self.max_hole_area)
93
+ is_hole = is_hole.reshape_as(masks)
94
+ # We fill holes with a small positive mask score (10.0) to change them to foreground.
95
+ masks = torch.where(is_hole, self.mask_threshold + 10.0, masks)
96
+
97
+ if self.max_sprinkle_area > 0:
98
+ labels, areas = get_connected_components(
99
+ mask_flat > self.mask_threshold
100
+ )
101
+ is_hole = (labels > 0) & (areas <= self.max_sprinkle_area)
102
+ is_hole = is_hole.reshape_as(masks)
103
+ # We fill holes with negative mask score (-10.0) to change them to background.
104
+ masks = torch.where(is_hole, self.mask_threshold - 10.0, masks)
105
+ except Exception as e:
106
+ # Skip the post-processing step if the CUDA kernel fails
107
+ warnings.warn(
108
+ f"{e}\n\nSkipping the post-processing step due to the error above. You can "
109
+ "still use SAM 2 and it's OK to ignore the error above, although some post-processing "
110
+ "functionality may be limited (which doesn't affect the results in most cases; see "
111
+ "https://github.com/facebookresearch/segment-anything-2/blob/main/INSTALL.md).",
112
+ category=UserWarning,
113
+ stacklevel=2,
114
+ )
115
+ masks = input_masks
116
+
117
+ masks = F.interpolate(masks, orig_hw, mode="bilinear", align_corners=False)
118
+ return masks
segment-anything-2/sam2_configs/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
segment-anything-2/sam2_configs/sam2_hiera_b+.yaml ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # @package _global_
2
+
3
+ # Model
4
+ model:
5
+ _target_: sam2.modeling.sam2_base.SAM2Base
6
+ image_encoder:
7
+ _target_: sam2.modeling.backbones.image_encoder.ImageEncoder
8
+ scalp: 1
9
+ trunk:
10
+ _target_: sam2.modeling.backbones.hieradet.Hiera
11
+ embed_dim: 112
12
+ num_heads: 2
13
+ neck:
14
+ _target_: sam2.modeling.backbones.image_encoder.FpnNeck
15
+ position_encoding:
16
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
17
+ num_pos_feats: 256
18
+ normalize: true
19
+ scale: null
20
+ temperature: 10000
21
+ d_model: 256
22
+ backbone_channel_list: [896, 448, 224, 112]
23
+ fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features
24
+ fpn_interp_model: nearest
25
+
26
+ memory_attention:
27
+ _target_: sam2.modeling.memory_attention.MemoryAttention
28
+ d_model: 256
29
+ pos_enc_at_input: true
30
+ layer:
31
+ _target_: sam2.modeling.memory_attention.MemoryAttentionLayer
32
+ activation: relu
33
+ dim_feedforward: 2048
34
+ dropout: 0.1
35
+ pos_enc_at_attn: false
36
+ self_attention:
37
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
38
+ rope_theta: 10000.0
39
+ feat_sizes: [32, 32]
40
+ embedding_dim: 256
41
+ num_heads: 1
42
+ downsample_rate: 1
43
+ dropout: 0.1
44
+ d_model: 256
45
+ pos_enc_at_cross_attn_keys: true
46
+ pos_enc_at_cross_attn_queries: false
47
+ cross_attention:
48
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
49
+ rope_theta: 10000.0
50
+ feat_sizes: [32, 32]
51
+ rope_k_repeat: True
52
+ embedding_dim: 256
53
+ num_heads: 1
54
+ downsample_rate: 1
55
+ dropout: 0.1
56
+ kv_in_dim: 64
57
+ num_layers: 4
58
+
59
+ memory_encoder:
60
+ _target_: sam2.modeling.memory_encoder.MemoryEncoder
61
+ out_dim: 64
62
+ position_encoding:
63
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
64
+ num_pos_feats: 64
65
+ normalize: true
66
+ scale: null
67
+ temperature: 10000
68
+ mask_downsampler:
69
+ _target_: sam2.modeling.memory_encoder.MaskDownSampler
70
+ kernel_size: 3
71
+ stride: 2
72
+ padding: 1
73
+ fuser:
74
+ _target_: sam2.modeling.memory_encoder.Fuser
75
+ layer:
76
+ _target_: sam2.modeling.memory_encoder.CXBlock
77
+ dim: 256
78
+ kernel_size: 7
79
+ padding: 3
80
+ layer_scale_init_value: 1e-6
81
+ use_dwconv: True # depth-wise convs
82
+ num_layers: 2
83
+
84
+ num_maskmem: 7
85
+ image_size: 1024
86
+ # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask
87
+ sigmoid_scale_for_mem_enc: 20.0
88
+ sigmoid_bias_for_mem_enc: -10.0
89
+ use_mask_input_as_output_without_sam: true
90
+ # Memory
91
+ directly_add_no_mem_embed: true
92
+ # use high-resolution feature map in the SAM mask decoder
93
+ use_high_res_features_in_sam: true
94
+ # output 3 masks on the first click on initial conditioning frames
95
+ multimask_output_in_sam: true
96
+ # SAM heads
97
+ iou_prediction_use_sigmoid: True
98
+ # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
99
+ use_obj_ptrs_in_encoder: true
100
+ add_tpos_enc_to_obj_ptrs: false
101
+ only_obj_ptrs_in_the_past_for_eval: true
102
+ # object occlusion prediction
103
+ pred_obj_scores: true
104
+ pred_obj_scores_mlp: true
105
+ fixed_no_obj_ptr: true
106
+ # multimask tracking settings
107
+ multimask_output_for_tracking: true
108
+ use_multimask_token_for_obj_ptr: true
109
+ multimask_min_pt_num: 0
110
+ multimask_max_pt_num: 1
111
+ use_mlp_for_obj_ptr_proj: true
112
+ # Compilation flag
113
+ compile_image_encoder: False
segment-anything-2/sam2_configs/sam2_hiera_l.yaml ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # @package _global_
2
+
3
+ # Model
4
+ model:
5
+ _target_: sam2.modeling.sam2_base.SAM2Base
6
+ image_encoder:
7
+ _target_: sam2.modeling.backbones.image_encoder.ImageEncoder
8
+ scalp: 1
9
+ trunk:
10
+ _target_: sam2.modeling.backbones.hieradet.Hiera
11
+ embed_dim: 144
12
+ num_heads: 2
13
+ stages: [2, 6, 36, 4]
14
+ global_att_blocks: [23, 33, 43]
15
+ window_pos_embed_bkg_spatial_size: [7, 7]
16
+ window_spec: [8, 4, 16, 8]
17
+ neck:
18
+ _target_: sam2.modeling.backbones.image_encoder.FpnNeck
19
+ position_encoding:
20
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
21
+ num_pos_feats: 256
22
+ normalize: true
23
+ scale: null
24
+ temperature: 10000
25
+ d_model: 256
26
+ backbone_channel_list: [1152, 576, 288, 144]
27
+ fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features
28
+ fpn_interp_model: nearest
29
+
30
+ memory_attention:
31
+ _target_: sam2.modeling.memory_attention.MemoryAttention
32
+ d_model: 256
33
+ pos_enc_at_input: true
34
+ layer:
35
+ _target_: sam2.modeling.memory_attention.MemoryAttentionLayer
36
+ activation: relu
37
+ dim_feedforward: 2048
38
+ dropout: 0.1
39
+ pos_enc_at_attn: false
40
+ self_attention:
41
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
42
+ rope_theta: 10000.0
43
+ feat_sizes: [32, 32]
44
+ embedding_dim: 256
45
+ num_heads: 1
46
+ downsample_rate: 1
47
+ dropout: 0.1
48
+ d_model: 256
49
+ pos_enc_at_cross_attn_keys: true
50
+ pos_enc_at_cross_attn_queries: false
51
+ cross_attention:
52
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
53
+ rope_theta: 10000.0
54
+ feat_sizes: [32, 32]
55
+ rope_k_repeat: True
56
+ embedding_dim: 256
57
+ num_heads: 1
58
+ downsample_rate: 1
59
+ dropout: 0.1
60
+ kv_in_dim: 64
61
+ num_layers: 4
62
+
63
+ memory_encoder:
64
+ _target_: sam2.modeling.memory_encoder.MemoryEncoder
65
+ out_dim: 64
66
+ position_encoding:
67
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
68
+ num_pos_feats: 64
69
+ normalize: true
70
+ scale: null
71
+ temperature: 10000
72
+ mask_downsampler:
73
+ _target_: sam2.modeling.memory_encoder.MaskDownSampler
74
+ kernel_size: 3
75
+ stride: 2
76
+ padding: 1
77
+ fuser:
78
+ _target_: sam2.modeling.memory_encoder.Fuser
79
+ layer:
80
+ _target_: sam2.modeling.memory_encoder.CXBlock
81
+ dim: 256
82
+ kernel_size: 7
83
+ padding: 3
84
+ layer_scale_init_value: 1e-6
85
+ use_dwconv: True # depth-wise convs
86
+ num_layers: 2
87
+
88
+ num_maskmem: 7
89
+ image_size: 1024
90
+ # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask
91
+ sigmoid_scale_for_mem_enc: 20.0
92
+ sigmoid_bias_for_mem_enc: -10.0
93
+ use_mask_input_as_output_without_sam: true
94
+ # Memory
95
+ directly_add_no_mem_embed: true
96
+ # use high-resolution feature map in the SAM mask decoder
97
+ use_high_res_features_in_sam: true
98
+ # output 3 masks on the first click on initial conditioning frames
99
+ multimask_output_in_sam: true
100
+ # SAM heads
101
+ iou_prediction_use_sigmoid: True
102
+ # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
103
+ use_obj_ptrs_in_encoder: true
104
+ add_tpos_enc_to_obj_ptrs: false
105
+ only_obj_ptrs_in_the_past_for_eval: true
106
+ # object occlusion prediction
107
+ pred_obj_scores: true
108
+ pred_obj_scores_mlp: true
109
+ fixed_no_obj_ptr: true
110
+ # multimask tracking settings
111
+ multimask_output_for_tracking: true
112
+ use_multimask_token_for_obj_ptr: true
113
+ multimask_min_pt_num: 0
114
+ multimask_max_pt_num: 1
115
+ use_mlp_for_obj_ptr_proj: true
116
+ # Compilation flag
117
+ compile_image_encoder: False
segment-anything-2/sam2_configs/sam2_hiera_s.yaml ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # @package _global_
2
+
3
+ # Model
4
+ model:
5
+ _target_: sam2.modeling.sam2_base.SAM2Base
6
+ image_encoder:
7
+ _target_: sam2.modeling.backbones.image_encoder.ImageEncoder
8
+ scalp: 1
9
+ trunk:
10
+ _target_: sam2.modeling.backbones.hieradet.Hiera
11
+ embed_dim: 96
12
+ num_heads: 1
13
+ stages: [1, 2, 11, 2]
14
+ global_att_blocks: [7, 10, 13]
15
+ window_pos_embed_bkg_spatial_size: [7, 7]
16
+ neck:
17
+ _target_: sam2.modeling.backbones.image_encoder.FpnNeck
18
+ position_encoding:
19
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
20
+ num_pos_feats: 256
21
+ normalize: true
22
+ scale: null
23
+ temperature: 10000
24
+ d_model: 256
25
+ backbone_channel_list: [768, 384, 192, 96]
26
+ fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features
27
+ fpn_interp_model: nearest
28
+
29
+ memory_attention:
30
+ _target_: sam2.modeling.memory_attention.MemoryAttention
31
+ d_model: 256
32
+ pos_enc_at_input: true
33
+ layer:
34
+ _target_: sam2.modeling.memory_attention.MemoryAttentionLayer
35
+ activation: relu
36
+ dim_feedforward: 2048
37
+ dropout: 0.1
38
+ pos_enc_at_attn: false
39
+ self_attention:
40
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
41
+ rope_theta: 10000.0
42
+ feat_sizes: [32, 32]
43
+ embedding_dim: 256
44
+ num_heads: 1
45
+ downsample_rate: 1
46
+ dropout: 0.1
47
+ d_model: 256
48
+ pos_enc_at_cross_attn_keys: true
49
+ pos_enc_at_cross_attn_queries: false
50
+ cross_attention:
51
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
52
+ rope_theta: 10000.0
53
+ feat_sizes: [32, 32]
54
+ rope_k_repeat: True
55
+ embedding_dim: 256
56
+ num_heads: 1
57
+ downsample_rate: 1
58
+ dropout: 0.1
59
+ kv_in_dim: 64
60
+ num_layers: 4
61
+
62
+ memory_encoder:
63
+ _target_: sam2.modeling.memory_encoder.MemoryEncoder
64
+ out_dim: 64
65
+ position_encoding:
66
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
67
+ num_pos_feats: 64
68
+ normalize: true
69
+ scale: null
70
+ temperature: 10000
71
+ mask_downsampler:
72
+ _target_: sam2.modeling.memory_encoder.MaskDownSampler
73
+ kernel_size: 3
74
+ stride: 2
75
+ padding: 1
76
+ fuser:
77
+ _target_: sam2.modeling.memory_encoder.Fuser
78
+ layer:
79
+ _target_: sam2.modeling.memory_encoder.CXBlock
80
+ dim: 256
81
+ kernel_size: 7
82
+ padding: 3
83
+ layer_scale_init_value: 1e-6
84
+ use_dwconv: True # depth-wise convs
85
+ num_layers: 2
86
+
87
+ num_maskmem: 7
88
+ image_size: 1024
89
+ # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask
90
+ sigmoid_scale_for_mem_enc: 20.0
91
+ sigmoid_bias_for_mem_enc: -10.0
92
+ use_mask_input_as_output_without_sam: true
93
+ # Memory
94
+ directly_add_no_mem_embed: true
95
+ # use high-resolution feature map in the SAM mask decoder
96
+ use_high_res_features_in_sam: true
97
+ # output 3 masks on the first click on initial conditioning frames
98
+ multimask_output_in_sam: true
99
+ # SAM heads
100
+ iou_prediction_use_sigmoid: True
101
+ # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
102
+ use_obj_ptrs_in_encoder: true
103
+ add_tpos_enc_to_obj_ptrs: false
104
+ only_obj_ptrs_in_the_past_for_eval: true
105
+ # object occlusion prediction
106
+ pred_obj_scores: true
107
+ pred_obj_scores_mlp: true
108
+ fixed_no_obj_ptr: true
109
+ # multimask tracking settings
110
+ multimask_output_for_tracking: true
111
+ use_multimask_token_for_obj_ptr: true
112
+ multimask_min_pt_num: 0
113
+ multimask_max_pt_num: 1
114
+ use_mlp_for_obj_ptr_proj: true
115
+ # Compilation flag
116
+ compile_image_encoder: False
segment-anything-2/sam2_configs/sam2_hiera_t.yaml ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # @package _global_
2
+
3
+ # Model
4
+ model:
5
+ _target_: sam2.modeling.sam2_base.SAM2Base
6
+ image_encoder:
7
+ _target_: sam2.modeling.backbones.image_encoder.ImageEncoder
8
+ scalp: 1
9
+ trunk:
10
+ _target_: sam2.modeling.backbones.hieradet.Hiera
11
+ embed_dim: 96
12
+ num_heads: 1
13
+ stages: [1, 2, 7, 2]
14
+ global_att_blocks: [5, 7, 9]
15
+ window_pos_embed_bkg_spatial_size: [7, 7]
16
+ neck:
17
+ _target_: sam2.modeling.backbones.image_encoder.FpnNeck
18
+ position_encoding:
19
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
20
+ num_pos_feats: 256
21
+ normalize: true
22
+ scale: null
23
+ temperature: 10000
24
+ d_model: 256
25
+ backbone_channel_list: [768, 384, 192, 96]
26
+ fpn_top_down_levels: [2, 3] # output level 0 and 1 directly use the backbone features
27
+ fpn_interp_model: nearest
28
+
29
+ memory_attention:
30
+ _target_: sam2.modeling.memory_attention.MemoryAttention
31
+ d_model: 256
32
+ pos_enc_at_input: true
33
+ layer:
34
+ _target_: sam2.modeling.memory_attention.MemoryAttentionLayer
35
+ activation: relu
36
+ dim_feedforward: 2048
37
+ dropout: 0.1
38
+ pos_enc_at_attn: false
39
+ self_attention:
40
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
41
+ rope_theta: 10000.0
42
+ feat_sizes: [32, 32]
43
+ embedding_dim: 256
44
+ num_heads: 1
45
+ downsample_rate: 1
46
+ dropout: 0.1
47
+ d_model: 256
48
+ pos_enc_at_cross_attn_keys: true
49
+ pos_enc_at_cross_attn_queries: false
50
+ cross_attention:
51
+ _target_: sam2.modeling.sam.transformer.RoPEAttention
52
+ rope_theta: 10000.0
53
+ feat_sizes: [32, 32]
54
+ rope_k_repeat: True
55
+ embedding_dim: 256
56
+ num_heads: 1
57
+ downsample_rate: 1
58
+ dropout: 0.1
59
+ kv_in_dim: 64
60
+ num_layers: 4
61
+
62
+ memory_encoder:
63
+ _target_: sam2.modeling.memory_encoder.MemoryEncoder
64
+ out_dim: 64
65
+ position_encoding:
66
+ _target_: sam2.modeling.position_encoding.PositionEmbeddingSine
67
+ num_pos_feats: 64
68
+ normalize: true
69
+ scale: null
70
+ temperature: 10000
71
+ mask_downsampler:
72
+ _target_: sam2.modeling.memory_encoder.MaskDownSampler
73
+ kernel_size: 3
74
+ stride: 2
75
+ padding: 1
76
+ fuser:
77
+ _target_: sam2.modeling.memory_encoder.Fuser
78
+ layer:
79
+ _target_: sam2.modeling.memory_encoder.CXBlock
80
+ dim: 256
81
+ kernel_size: 7
82
+ padding: 3
83
+ layer_scale_init_value: 1e-6
84
+ use_dwconv: True # depth-wise convs
85
+ num_layers: 2
86
+
87
+ num_maskmem: 7
88
+ image_size: 1024
89
+ # apply scaled sigmoid on mask logits for memory encoder, and directly feed input mask as output mask
90
+ # SAM decoder
91
+ sigmoid_scale_for_mem_enc: 20.0
92
+ sigmoid_bias_for_mem_enc: -10.0
93
+ use_mask_input_as_output_without_sam: true
94
+ # Memory
95
+ directly_add_no_mem_embed: true
96
+ # use high-resolution feature map in the SAM mask decoder
97
+ use_high_res_features_in_sam: true
98
+ # output 3 masks on the first click on initial conditioning frames
99
+ multimask_output_in_sam: true
100
+ # SAM heads
101
+ iou_prediction_use_sigmoid: True
102
+ # cross-attend to object pointers from other frames (based on SAM output tokens) in the encoder
103
+ use_obj_ptrs_in_encoder: true
104
+ add_tpos_enc_to_obj_ptrs: false
105
+ only_obj_ptrs_in_the_past_for_eval: true
106
+ # object occlusion prediction
107
+ pred_obj_scores: true
108
+ pred_obj_scores_mlp: true
109
+ fixed_no_obj_ptr: true
110
+ # multimask tracking settings
111
+ multimask_output_for_tracking: true
112
+ use_multimask_token_for_obj_ptr: true
113
+ multimask_min_pt_num: 0
114
+ multimask_max_pt_num: 1
115
+ use_mlp_for_obj_ptr_proj: true
116
+ # Compilation flag
117
+ # HieraT does not currently support compilation, should always be set to False
118
+ compile_image_encoder: False
segment-anything-2/setup.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+ import os
7
+
8
+ from setuptools import find_packages, setup
9
+
10
+ # Package metadata
11
+ NAME = "SAM 2"
12
+ VERSION = "1.0"
13
+ DESCRIPTION = "SAM 2: Segment Anything in Images and Videos"
14
+ URL = "https://github.com/facebookresearch/segment-anything-2"
15
+ AUTHOR = "Meta AI"
16
+ AUTHOR_EMAIL = "[email protected]"
17
+ LICENSE = "Apache 2.0"
18
+
19
+ # Read the contents of README file
20
+ with open("README.md", "r", encoding="utf-8") as f:
21
+ LONG_DESCRIPTION = f.read()
22
+
23
+ # Required dependencies
24
+ REQUIRED_PACKAGES = [
25
+ "torch>=2.3.1",
26
+ "torchvision>=0.18.1",
27
+ "tqdm>=4.66.1",
28
+ "hydra-core>=1.3.2",
29
+ "iopath>=0.1.10",
30
+ "pillow>=9.4.0",
31
+ ]
32
+
33
+ EXTRA_PACKAGES = {
34
+ "demo": ["matplotlib>=3.9.1", "jupyter>=1.0.0", "opencv-python>=4.7.0"],
35
+ "dev": ["black==24.2.0", "usort==1.0.2", "ufmt==2.0.0b2"],
36
+ }
37
+
38
+ # By default, we also build the SAM 2 CUDA extension.
39
+ # You may turn off CUDA build with `export SAM2_BUILD_CUDA=0`.
40
+ BUILD_CUDA = os.getenv("SAM2_BUILD_CUDA", "1") == "1"
41
+ # By default, we allow SAM 2 installation to proceed even with build errors.
42
+ # You may force stopping on errors with `export SAM2_BUILD_ALLOW_ERRORS=0`.
43
+ BUILD_ALLOW_ERRORS = os.getenv("SAM2_BUILD_ALLOW_ERRORS", "1") == "1"
44
+
45
+ # Catch and skip errors during extension building and print a warning message
46
+ # (note that this message only shows up under verbose build mode
47
+ # "pip install -v -e ." or "python setup.py build_ext -v")
48
+ CUDA_ERROR_MSG = (
49
+ "{}\n\n"
50
+ "Failed to build the SAM 2 CUDA extension due to the error above. "
51
+ "You can still use SAM 2 and it's OK to ignore the error above, although some "
52
+ "post-processing functionality may be limited (which doesn't affect the results in most cases; "
53
+ "(see https://github.com/facebookresearch/segment-anything-2/blob/main/INSTALL.md).\n"
54
+ )
55
+
56
+
57
+ def get_extensions():
58
+ if not BUILD_CUDA:
59
+ return []
60
+
61
+ try:
62
+ from torch.utils.cpp_extension import CUDAExtension
63
+
64
+ srcs = ["sam2/csrc/connected_components.cu"]
65
+ compile_args = {
66
+ "cxx": [],
67
+ "nvcc": [
68
+ "-DCUDA_HAS_FP16=1",
69
+ "-D__CUDA_NO_HALF_OPERATORS__",
70
+ "-D__CUDA_NO_HALF_CONVERSIONS__",
71
+ "-D__CUDA_NO_HALF2_OPERATORS__",
72
+ ],
73
+ }
74
+ ext_modules = [CUDAExtension("sam2._C", srcs, extra_compile_args=compile_args)]
75
+ except Exception as e:
76
+ if BUILD_ALLOW_ERRORS:
77
+ print(CUDA_ERROR_MSG.format(e))
78
+ ext_modules = []
79
+ else:
80
+ raise e
81
+
82
+ return ext_modules
83
+
84
+
85
+ try:
86
+ from torch.utils.cpp_extension import BuildExtension
87
+
88
+ class BuildExtensionIgnoreErrors(BuildExtension):
89
+
90
+ def finalize_options(self):
91
+ try:
92
+ super().finalize_options()
93
+ except Exception as e:
94
+ print(CUDA_ERROR_MSG.format(e))
95
+ self.extensions = []
96
+
97
+ def build_extensions(self):
98
+ try:
99
+ super().build_extensions()
100
+ except Exception as e:
101
+ print(CUDA_ERROR_MSG.format(e))
102
+ self.extensions = []
103
+
104
+ def get_ext_filename(self, ext_name):
105
+ try:
106
+ return super().get_ext_filename(ext_name)
107
+ except Exception as e:
108
+ print(CUDA_ERROR_MSG.format(e))
109
+ self.extensions = []
110
+ return "_C.so"
111
+
112
+ cmdclass = {
113
+ "build_ext": (
114
+ BuildExtensionIgnoreErrors.with_options(no_python_abi_suffix=True)
115
+ if BUILD_ALLOW_ERRORS
116
+ else BuildExtension.with_options(no_python_abi_suffix=True)
117
+ )
118
+ }
119
+ except Exception as e:
120
+ cmdclass = {}
121
+ if BUILD_ALLOW_ERRORS:
122
+ print(CUDA_ERROR_MSG.format(e))
123
+ else:
124
+ raise e
125
+
126
+
127
+ # Setup configuration
128
+ setup(
129
+ name=NAME,
130
+ version=VERSION,
131
+ description=DESCRIPTION,
132
+ long_description=LONG_DESCRIPTION,
133
+ long_description_content_type="text/markdown",
134
+ url=URL,
135
+ author=AUTHOR,
136
+ author_email=AUTHOR_EMAIL,
137
+ license=LICENSE,
138
+ packages=find_packages(exclude="notebooks"),
139
+ package_data={"": ["*.yaml"]}, # SAM 2 configuration files
140
+ include_package_data=True,
141
+ install_requires=REQUIRED_PACKAGES,
142
+ extras_require=EXTRA_PACKAGES,
143
+ python_requires=">=3.10.0",
144
+ ext_modules=get_extensions(),
145
+ cmdclass=cmdclass,
146
+ )