python_code
stringlengths 0
992k
| repo_name
stringlengths 8
46
| file_path
stringlengths 5
162
|
---|---|---|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from torch import nn
from typing import Any, Optional, Tuple, Type
from .common import LayerNorm2d
class PromptEncoder(nn.Module):
def __init__(
self,
embed_dim: int,
image_embedding_size: Tuple[int, int],
input_image_size: Tuple[int, int],
mask_in_chans: int,
activation: Type[nn.Module] = nn.GELU,
) -> None:
"""
Encodes prompts for input to SAM's mask decoder.
Arguments:
embed_dim (int): The prompts' embedding dimension
image_embedding_size (tuple(int, int)): The spatial size of the
image embedding, as (H, W).
input_image_size (int): The padded size of the image as input
to the image encoder, as (H, W).
mask_in_chans (int): The number of hidden channels used for
encoding input masks.
activation (nn.Module): The activation to use when encoding
input masks.
"""
super().__init__()
self.embed_dim = embed_dim
self.input_image_size = input_image_size
self.image_embedding_size = image_embedding_size
self.pe_layer = PositionEmbeddingRandom(embed_dim // 2)
self.num_point_embeddings: int = 4 # pos/neg point + 2 box corners
point_embeddings = [nn.Embedding(1, embed_dim) for i in range(self.num_point_embeddings)]
self.point_embeddings = nn.ModuleList(point_embeddings)
self.not_a_point_embed = nn.Embedding(1, embed_dim)
self.mask_input_size = (4 * image_embedding_size[0], 4 * image_embedding_size[1])
self.mask_downscaling = nn.Sequential(
nn.Conv2d(1, mask_in_chans // 4, kernel_size=2, stride=2),
LayerNorm2d(mask_in_chans // 4),
activation(),
nn.Conv2d(mask_in_chans // 4, mask_in_chans, kernel_size=2, stride=2),
LayerNorm2d(mask_in_chans),
activation(),
nn.Conv2d(mask_in_chans, embed_dim, kernel_size=1),
)
self.no_mask_embed = nn.Embedding(1, embed_dim)
def get_dense_pe(self) -> torch.Tensor:
"""
Returns the positional encoding used to encode point prompts,
applied to a dense set of points the shape of the image encoding.
Returns:
torch.Tensor: Positional encoding with shape
1x(embed_dim)x(embedding_h)x(embedding_w)
"""
return self.pe_layer(self.image_embedding_size).unsqueeze(0)
def _embed_points(
self,
points: torch.Tensor,
labels: torch.Tensor,
pad: bool,
) -> torch.Tensor:
"""Embeds point prompts."""
points = points + 0.5 # Shift to center of pixel
if pad:
padding_point = torch.zeros((points.shape[0], 1, 2), device=points.device)
padding_label = -torch.ones((labels.shape[0], 1), device=labels.device)
points = torch.cat([points, padding_point], dim=1)
labels = torch.cat([labels, padding_label], dim=1)
point_embedding = self.pe_layer.forward_with_coords(points, self.input_image_size)
point_embedding[labels == -1] = 0.0
point_embedding[labels == -1] += self.not_a_point_embed.weight
point_embedding[labels == 0] += self.point_embeddings[0].weight
point_embedding[labels == 1] += self.point_embeddings[1].weight
return point_embedding
def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor:
"""Embeds box prompts."""
boxes = boxes + 0.5 # Shift to center of pixel
coords = boxes.reshape(-1, 2, 2)
corner_embedding = self.pe_layer.forward_with_coords(coords, self.input_image_size)
corner_embedding[:, 0, :] += self.point_embeddings[2].weight
corner_embedding[:, 1, :] += self.point_embeddings[3].weight
return corner_embedding
def _embed_masks(self, masks: torch.Tensor) -> torch.Tensor:
"""Embeds mask inputs."""
mask_embedding = self.mask_downscaling(masks)
return mask_embedding
def _get_batch_size(
self,
points: Optional[Tuple[torch.Tensor, torch.Tensor]],
boxes: Optional[torch.Tensor],
masks: Optional[torch.Tensor],
) -> int:
"""
Gets the batch size of the output given the batch size of the input prompts.
"""
if points is not None:
return points[0].shape[0]
elif boxes is not None:
return boxes.shape[0]
elif masks is not None:
return masks.shape[0]
else:
return 1
def _get_device(self) -> torch.device:
return self.point_embeddings[0].weight.device
def forward(
self,
points: Optional[Tuple[torch.Tensor, torch.Tensor]],
boxes: Optional[torch.Tensor],
masks: Optional[torch.Tensor],
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Embeds different types of prompts, returning both sparse and dense
embeddings.
Arguments:
points (tuple(torch.Tensor, torch.Tensor) or none): point coordinates
and labels to embed.
boxes (torch.Tensor or none): boxes to embed
masks (torch.Tensor or none): masks to embed
Returns:
torch.Tensor: sparse embeddings for the points and boxes, with shape
BxNx(embed_dim), where N is determined by the number of input points
and boxes.
torch.Tensor: dense embeddings for the masks, in the shape
Bx(embed_dim)x(embed_H)x(embed_W)
"""
bs = self._get_batch_size(points, boxes, masks)
sparse_embeddings = torch.empty((bs, 0, self.embed_dim), device=self._get_device())
if points is not None:
coords, labels = points
point_embeddings = self._embed_points(coords, labels, pad=(boxes is None))
sparse_embeddings = torch.cat([sparse_embeddings, point_embeddings], dim=1)
if boxes is not None:
box_embeddings = self._embed_boxes(boxes)
sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=1)
if masks is not None:
dense_embeddings = self._embed_masks(masks)
else:
dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand(
bs, -1, self.image_embedding_size[0], self.image_embedding_size[1]
)
return sparse_embeddings, dense_embeddings
class PositionEmbeddingRandom(nn.Module):
"""
Positional encoding using random spatial frequencies.
"""
def __init__(self, num_pos_feats: int = 64, scale: Optional[float] = None) -> None:
super().__init__()
if scale is None or scale <= 0.0:
scale = 1.0
self.register_buffer(
"positional_encoding_gaussian_matrix",
scale * torch.randn((2, num_pos_feats)),
)
def _pe_encoding(self, coords: torch.Tensor) -> torch.Tensor:
"""Positionally encode points that are normalized to [0,1]."""
# assuming coords are in [0, 1]^2 square and have d_1 x ... x d_n x 2 shape
coords = 2 * coords - 1
coords = coords @ self.positional_encoding_gaussian_matrix
coords = 2 * np.pi * coords
# outputs d_1 x ... x d_n x C shape
return torch.cat([torch.sin(coords), torch.cos(coords)], dim=-1)
def forward(self, size: Tuple[int, int]) -> torch.Tensor:
"""Generate positional encoding for a grid of the specified size."""
h, w = size
device: Any = self.positional_encoding_gaussian_matrix.device
grid = torch.ones((h, w), device=device, dtype=torch.float32)
y_embed = grid.cumsum(dim=0) - 0.5
x_embed = grid.cumsum(dim=1) - 0.5
y_embed = y_embed / h
x_embed = x_embed / w
pe = self._pe_encoding(torch.stack([x_embed, y_embed], dim=-1))
return pe.permute(2, 0, 1) # C x H x W
def forward_with_coords(
self, coords_input: torch.Tensor, image_size: Tuple[int, int]
) -> torch.Tensor:
"""Positionally encode points that are not normalized to [0,1]."""
coords = coords_input.clone()
coords[:, :, 0] = coords[:, :, 0] / image_size[1]
coords[:, :, 1] = coords[:, :, 1] / image_size[0]
return self._pe_encoding(coords.to(torch.float)) # B x N x C
| swarms-master | swarms/workers/models/segment_anything/segment_anything/modeling/prompt_encoder.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch import nn
from torch.nn import functional as F
from typing import Any, Dict, List, Tuple
from .image_encoder import ImageEncoderViT
from .mask_decoder import MaskDecoder
from .prompt_encoder import PromptEncoder
class Sam(nn.Module):
mask_threshold: float = 0.0
image_format: str = "RGB"
def __init__(
self,
image_encoder: ImageEncoderViT,
prompt_encoder: PromptEncoder,
mask_decoder: MaskDecoder,
pixel_mean: List[float] = [123.675, 116.28, 103.53],
pixel_std: List[float] = [58.395, 57.12, 57.375],
) -> None:
"""
SAM predicts object masks from an image and input prompts.
Arguments:
image_encoder (ImageEncoderViT): The backbone used to encode the
image into image embeddings that allow for efficient mask prediction.
prompt_encoder (PromptEncoder): Encodes various types of input prompts.
mask_decoder (MaskDecoder): Predicts masks from the image embeddings
and encoded prompts.
pixel_mean (list(float)): Mean values for normalizing pixels in the input image.
pixel_std (list(float)): Std values for normalizing pixels in the input image.
"""
super().__init__()
self.image_encoder = image_encoder
self.prompt_encoder = prompt_encoder
self.mask_decoder = mask_decoder
self.register_buffer("pixel_mean", torch.Tensor(pixel_mean).view(-1, 1, 1), False)
self.register_buffer("pixel_std", torch.Tensor(pixel_std).view(-1, 1, 1), False)
@property
def device(self) -> Any:
return self.pixel_mean.device
@torch.no_grad()
def forward(
self,
batched_input: List[Dict[str, Any]],
multimask_output: bool,
) -> List[Dict[str, torch.Tensor]]:
"""
Predicts masks end-to-end from provided images and prompts.
If prompts are not known in advance, using SamPredictor is
recommended over calling the model directly.
Arguments:
batched_input (list(dict)): A list over input images, each a
dictionary with the following keys. A prompt key can be
excluded if it is not present.
'image': The image as a torch tensor in 3xHxW format,
already transformed for input to the model.
'original_size': (tuple(int, int)) The original size of
the image before transformation, as (H, W).
'point_coords': (torch.Tensor) Batched point prompts for
this image, with shape BxNx2. Already transformed to the
input frame of the model.
'point_labels': (torch.Tensor) Batched labels for point prompts,
with shape BxN.
'boxes': (torch.Tensor) Batched box inputs, with shape Bx4.
Already transformed to the input frame of the model.
'mask_inputs': (torch.Tensor) Batched mask inputs to the model,
in the form Bx1xHxW.
multimask_output (bool): Whether the model should predict multiple
disambiguating masks, or return a single mask.
Returns:
(list(dict)): A list over input images, where each element is
as dictionary with the following keys.
'masks': (torch.Tensor) Batched binary mask predictions,
with shape BxCxHxW, where B is the number of input prompts,
C is determined by multimask_output, and (H, W) is the
original size of the image.
'iou_predictions': (torch.Tensor) The model's predictions
of mask quality, in shape BxC.
'low_res_logits': (torch.Tensor) Low resolution logits with
shape BxCxHxW, where H=W=256. Can be passed as mask input
to subsequent iterations of prediction.
"""
input_images = torch.stack([self.preprocess(x["image"]) for x in batched_input], dim=0)
image_embeddings = self.image_encoder(input_images)
outputs = []
for image_record, curr_embedding in zip(batched_input, image_embeddings):
if "point_coords" in image_record:
points = (image_record["point_coords"], image_record["point_labels"])
else:
points = None
sparse_embeddings, dense_embeddings = self.prompt_encoder(
points=points,
boxes=image_record.get("boxes", None),
masks=image_record.get("mask_inputs", None),
)
low_res_masks, iou_predictions = self.mask_decoder(
image_embeddings=curr_embedding.unsqueeze(0),
image_pe=self.prompt_encoder.get_dense_pe(),
sparse_prompt_embeddings=sparse_embeddings,
dense_prompt_embeddings=dense_embeddings,
multimask_output=multimask_output,
)
masks = self.postprocess_masks(
low_res_masks,
input_size=image_record["image"].shape[-2:],
original_size=image_record["original_size"],
)
masks = masks > self.mask_threshold
outputs.append(
{
"masks": masks,
"iou_predictions": iou_predictions,
"low_res_logits": low_res_masks,
}
)
return outputs
def postprocess_masks(
self,
masks: torch.Tensor,
input_size: Tuple[int, ...],
original_size: Tuple[int, ...],
) -> torch.Tensor:
"""
Remove padding and upscale masks to the original image size.
Arguments:
masks (torch.Tensor): Batched masks from the mask_decoder,
in BxCxHxW format.
input_size (tuple(int, int)): The size of the image input to the
model, in (H, W) format. Used to remove padding.
original_size (tuple(int, int)): The original size of the image
before resizing for input to the model, in (H, W) format.
Returns:
(torch.Tensor): Batched masks in BxCxHxW format, where (H, W)
is given by original_size.
"""
masks = F.interpolate(
masks,
(self.image_encoder.img_size, self.image_encoder.img_size),
mode="bilinear",
align_corners=False,
)
masks = masks[..., : input_size[0], : input_size[1]]
masks = F.interpolate(masks, original_size, mode="bilinear", align_corners=False)
return masks
def preprocess(self, x: torch.Tensor) -> torch.Tensor:
"""Normalize pixel values and pad to a square input."""
# Normalize colors
x = (x - self.pixel_mean) / self.pixel_std
# Pad
h, w = x.shape[-2:]
padh = self.image_encoder.img_size - h
padw = self.image_encoder.img_size - w
x = F.pad(x, (0, padw, 0, padh))
return x
| swarms-master | swarms/workers/models/segment_anything/segment_anything/modeling/sam.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch import nn
from torch.nn import functional as F
from typing import List, Tuple, Type
from .common import LayerNorm2d
class MaskDecoder(nn.Module):
def __init__(
self,
*,
transformer_dim: int,
transformer: nn.Module,
num_multimask_outputs: int = 3,
activation: Type[nn.Module] = nn.GELU,
iou_head_depth: int = 3,
iou_head_hidden_dim: int = 256,
) -> None:
"""
Predicts masks given an image and prompt embeddings, using a
transformer architecture.
Arguments:
transformer_dim (int): the channel dimension of the transformer
transformer (nn.Module): the transformer used to predict masks
num_multimask_outputs (int): the number of masks to predict
when disambiguating masks
activation (nn.Module): the type of activation to use when
upscaling masks
iou_head_depth (int): the depth of the MLP used to predict
mask quality
iou_head_hidden_dim (int): the hidden dimension of the MLP
used to predict mask quality
"""
super().__init__()
self.transformer_dim = transformer_dim
self.transformer = transformer
self.num_multimask_outputs = num_multimask_outputs
self.iou_token = nn.Embedding(1, transformer_dim)
self.num_mask_tokens = num_multimask_outputs + 1
self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim)
self.output_upscaling = nn.Sequential(
nn.ConvTranspose2d(transformer_dim, transformer_dim // 4, kernel_size=2, stride=2),
LayerNorm2d(transformer_dim // 4),
activation(),
nn.ConvTranspose2d(transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2),
activation(),
)
self.output_hypernetworks_mlps = nn.ModuleList(
[
MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3)
for i in range(self.num_mask_tokens)
]
)
self.iou_prediction_head = MLP(
transformer_dim, iou_head_hidden_dim, self.num_mask_tokens, iou_head_depth
)
def forward(
self,
image_embeddings: torch.Tensor,
image_pe: torch.Tensor,
sparse_prompt_embeddings: torch.Tensor,
dense_prompt_embeddings: torch.Tensor,
multimask_output: bool,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Predict masks given image and prompt embeddings.
Arguments:
image_embeddings (torch.Tensor): the embeddings from the image encoder
image_pe (torch.Tensor): positional encoding with the shape of image_embeddings
sparse_prompt_embeddings (torch.Tensor): the embeddings of the points and boxes
dense_prompt_embeddings (torch.Tensor): the embeddings of the mask inputs
multimask_output (bool): Whether to return multiple masks or a single
mask.
Returns:
torch.Tensor: batched predicted masks
torch.Tensor: batched predictions of mask quality
"""
masks, iou_pred = self.predict_masks(
image_embeddings=image_embeddings,
image_pe=image_pe,
sparse_prompt_embeddings=sparse_prompt_embeddings,
dense_prompt_embeddings=dense_prompt_embeddings,
)
# Select the correct mask or masks for output
if multimask_output:
mask_slice = slice(1, None)
else:
mask_slice = slice(0, 1)
masks = masks[:, mask_slice, :, :]
iou_pred = iou_pred[:, mask_slice]
# Prepare output
return masks, iou_pred
def predict_masks(
self,
image_embeddings: torch.Tensor,
image_pe: torch.Tensor,
sparse_prompt_embeddings: torch.Tensor,
dense_prompt_embeddings: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Predicts masks. See 'forward' for more details."""
# Concatenate output tokens
output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight], dim=0)
output_tokens = output_tokens.unsqueeze(0).expand(sparse_prompt_embeddings.size(0), -1, -1)
tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1)
# Expand per-image data in batch direction to be per-mask
src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0)
src = src + dense_prompt_embeddings
pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0)
b, c, h, w = src.shape
# Run the transformer
hs, src = self.transformer(src, pos_src, tokens)
iou_token_out = hs[:, 0, :]
mask_tokens_out = hs[:, 1 : (1 + self.num_mask_tokens), :]
# Upscale mask embeddings and predict masks using the mask tokens
src = src.transpose(1, 2).view(b, c, h, w)
upscaled_embedding = self.output_upscaling(src)
hyper_in_list: List[torch.Tensor] = []
for i in range(self.num_mask_tokens):
hyper_in_list.append(self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :]))
hyper_in = torch.stack(hyper_in_list, dim=1)
b, c, h, w = upscaled_embedding.shape
masks = (hyper_in @ upscaled_embedding.view(b, c, h * w)).view(b, -1, h, w)
# Generate mask quality predictions
iou_pred = self.iou_prediction_head(iou_token_out)
return masks, iou_pred
# Lightly adapted from
# https://github.com/facebookresearch/MaskFormer/blob/main/mask_former/modeling/transformer/transformer_predictor.py # noqa
class MLP(nn.Module):
def __init__(
self,
input_dim: int,
hidden_dim: int,
output_dim: int,
num_layers: int,
sigmoid_output: bool = False,
) -> None:
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(
nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])
)
self.sigmoid_output = sigmoid_output
def forward(self, x):
for i, layer in enumerate(self.layers):
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
if self.sigmoid_output:
x = F.sigmoid(x)
return x
| swarms-master | swarms/workers/models/segment_anything/segment_anything/modeling/mask_decoder.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import cv2 # type: ignore
from segment_anything import SamAutomaticMaskGenerator, sam_model_registry
import argparse
import json
import os
from typing import Any, Dict, List
parser = argparse.ArgumentParser(
description=(
"Runs automatic mask generation on an input image or directory of images, "
"and outputs masks as either PNGs or COCO-style RLEs. Requires open-cv, "
"as well as pycocotools if saving in RLE format."
)
)
parser.add_argument(
"--input",
type=str,
required=True,
help="Path to either a single input image or folder of images.",
)
parser.add_argument(
"--output",
type=str,
required=True,
help=(
"Path to the directory where masks will be output. Output will be either a folder "
"of PNGs per image or a single json with COCO-style masks."
),
)
parser.add_argument(
"--model-type",
type=str,
required=True,
help="The type of model to load, in ['default', 'vit_h', 'vit_l', 'vit_b']",
)
parser.add_argument(
"--checkpoint",
type=str,
required=True,
help="The path to the SAM checkpoint to use for mask generation.",
)
parser.add_argument("--device", type=str, default="cuda", help="The device to run generation on.")
parser.add_argument(
"--convert-to-rle",
action="store_true",
help=(
"Save masks as COCO RLEs in a single json instead of as a folder of PNGs. "
"Requires pycocotools."
),
)
amg_settings = parser.add_argument_group("AMG Settings")
amg_settings.add_argument(
"--points-per-side",
type=int,
default=None,
help="Generate masks by sampling a grid over the image with this many points to a side.",
)
amg_settings.add_argument(
"--points-per-batch",
type=int,
default=None,
help="How many input points to process simultaneously in one batch.",
)
amg_settings.add_argument(
"--pred-iou-thresh",
type=float,
default=None,
help="Exclude masks with a predicted score from the model that is lower than this threshold.",
)
amg_settings.add_argument(
"--stability-score-thresh",
type=float,
default=None,
help="Exclude masks with a stability score lower than this threshold.",
)
amg_settings.add_argument(
"--stability-score-offset",
type=float,
default=None,
help="Larger values perturb the mask more when measuring stability score.",
)
amg_settings.add_argument(
"--box-nms-thresh",
type=float,
default=None,
help="The overlap threshold for excluding a duplicate mask.",
)
amg_settings.add_argument(
"--crop-n-layers",
type=int,
default=None,
help=(
"If >0, mask generation is run on smaller crops of the image to generate more masks. "
"The value sets how many different scales to crop at."
),
)
amg_settings.add_argument(
"--crop-nms-thresh",
type=float,
default=None,
help="The overlap threshold for excluding duplicate masks across different crops.",
)
amg_settings.add_argument(
"--crop-overlap-ratio",
type=int,
default=None,
help="Larger numbers mean image crops will overlap more.",
)
amg_settings.add_argument(
"--crop-n-points-downscale-factor",
type=int,
default=None,
help="The number of points-per-side in each layer of crop is reduced by this factor.",
)
amg_settings.add_argument(
"--min-mask-region-area",
type=int,
default=None,
help=(
"Disconnected mask regions or holes with area smaller than this value "
"in pixels are removed by postprocessing."
),
)
def write_masks_to_folder(masks: List[Dict[str, Any]], path: str) -> None:
header = "id,area,bbox_x0,bbox_y0,bbox_w,bbox_h,point_input_x,point_input_y,predicted_iou,stability_score,crop_box_x0,crop_box_y0,crop_box_w,crop_box_h" # noqa
metadata = [header]
for i, mask_data in enumerate(masks):
mask = mask_data["segmentation"]
filename = f"{i}.png"
cv2.imwrite(os.path.join(path, filename), mask * 255)
mask_metadata = [
str(i),
str(mask_data["area"]),
*[str(x) for x in mask_data["bbox"]],
*[str(x) for x in mask_data["point_coords"][0]],
str(mask_data["predicted_iou"]),
str(mask_data["stability_score"]),
*[str(x) for x in mask_data["crop_box"]],
]
row = ",".join(mask_metadata)
metadata.append(row)
metadata_path = os.path.join(path, "metadata.csv")
with open(metadata_path, "w") as f:
f.write("\n".join(metadata))
return
def get_amg_kwargs(args):
amg_kwargs = {
"points_per_side": args.points_per_side,
"points_per_batch": args.points_per_batch,
"pred_iou_thresh": args.pred_iou_thresh,
"stability_score_thresh": args.stability_score_thresh,
"stability_score_offset": args.stability_score_offset,
"box_nms_thresh": args.box_nms_thresh,
"crop_n_layers": args.crop_n_layers,
"crop_nms_thresh": args.crop_nms_thresh,
"crop_overlap_ratio": args.crop_overlap_ratio,
"crop_n_points_downscale_factor": args.crop_n_points_downscale_factor,
"min_mask_region_area": args.min_mask_region_area,
}
amg_kwargs = {k: v for k, v in amg_kwargs.items() if v is not None}
return amg_kwargs
def main(args: argparse.Namespace) -> None:
print("Loading model...")
sam = sam_model_registry[args.model_type](checkpoint=args.checkpoint)
_ = sam.to(device=args.device)
output_mode = "coco_rle" if args.convert_to_rle else "binary_mask"
amg_kwargs = get_amg_kwargs(args)
generator = SamAutomaticMaskGenerator(sam, output_mode=output_mode, **amg_kwargs)
if not os.path.isdir(args.input):
targets = [args.input]
else:
targets = [
f for f in os.listdir(args.input) if not os.path.isdir(os.path.join(args.input, f))
]
targets = [os.path.join(args.input, f) for f in targets]
os.makedirs(args.output, exist_ok=True)
for t in targets:
print(f"Processing '{t}'...")
image = cv2.imread(t)
if image is None:
print(f"Could not load '{t}' as an image, skipping...")
continue
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
masks = generator.generate(image)
base = os.path.basename(t)
base = os.path.splitext(base)[0]
save_base = os.path.join(args.output, base)
if output_mode == "binary_mask":
os.makedirs(save_base, exist_ok=False)
write_masks_to_folder(masks, save_base)
else:
save_file = save_base + ".json"
with open(save_file, "w") as f:
json.dump(masks, f)
print("Done!")
if __name__ == "__main__":
args = parser.parse_args()
main(args)
| swarms-master | swarms/workers/models/segment_anything/scripts/amg.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
from segment_anything import sam_model_registry
from segment_anything.utils.onnx import SamOnnxModel
import argparse
import warnings
try:
import onnxruntime # type: ignore
onnxruntime_exists = True
except ImportError:
onnxruntime_exists = False
parser = argparse.ArgumentParser(
description="Export the SAM prompt encoder and mask decoder to an ONNX model."
)
parser.add_argument(
"--checkpoint", type=str, required=True, help="The path to the SAM model checkpoint."
)
parser.add_argument(
"--output", type=str, required=True, help="The filename to save the ONNX model to."
)
parser.add_argument(
"--model-type",
type=str,
required=True,
help="In ['default', 'vit_h', 'vit_l', 'vit_b']. Which type of SAM model to export.",
)
parser.add_argument(
"--return-single-mask",
action="store_true",
help=(
"If true, the exported ONNX model will only return the best mask, "
"instead of returning multiple masks. For high resolution images "
"this can improve runtime when upscaling masks is expensive."
),
)
parser.add_argument(
"--opset",
type=int,
default=17,
help="The ONNX opset version to use. Must be >=11",
)
parser.add_argument(
"--quantize-out",
type=str,
default=None,
help=(
"If set, will quantize the model and save it with this name. "
"Quantization is performed with quantize_dynamic from onnxruntime.quantization.quantize."
),
)
parser.add_argument(
"--gelu-approximate",
action="store_true",
help=(
"Replace GELU operations with approximations using tanh. Useful "
"for some runtimes that have slow or unimplemented erf ops, used in GELU."
),
)
parser.add_argument(
"--use-stability-score",
action="store_true",
help=(
"Replaces the model's predicted mask quality score with the stability "
"score calculated on the low resolution masks using an offset of 1.0. "
),
)
parser.add_argument(
"--return-extra-metrics",
action="store_true",
help=(
"The model will return five results: (masks, scores, stability_scores, "
"areas, low_res_logits) instead of the usual three. This can be "
"significantly slower for high resolution outputs."
),
)
def run_export(
model_type: str,
checkpoint: str,
output: str,
opset: int,
return_single_mask: bool,
gelu_approximate: bool = False,
use_stability_score: bool = False,
return_extra_metrics=False,
):
print("Loading model...")
sam = sam_model_registry[model_type](checkpoint=checkpoint)
onnx_model = SamOnnxModel(
model=sam,
return_single_mask=return_single_mask,
use_stability_score=use_stability_score,
return_extra_metrics=return_extra_metrics,
)
if gelu_approximate:
for n, m in onnx_model.named_modules():
if isinstance(m, torch.nn.GELU):
m.approximate = "tanh"
dynamic_axes = {
"point_coords": {1: "num_points"},
"point_labels": {1: "num_points"},
}
embed_dim = sam.prompt_encoder.embed_dim
embed_size = sam.prompt_encoder.image_embedding_size
mask_input_size = [4 * x for x in embed_size]
dummy_inputs = {
"image_embeddings": torch.randn(1, embed_dim, *embed_size, dtype=torch.float),
"point_coords": torch.randint(low=0, high=1024, size=(1, 5, 2), dtype=torch.float),
"point_labels": torch.randint(low=0, high=4, size=(1, 5), dtype=torch.float),
"mask_input": torch.randn(1, 1, *mask_input_size, dtype=torch.float),
"has_mask_input": torch.tensor([1], dtype=torch.float),
"orig_im_size": torch.tensor([1500, 2250], dtype=torch.float),
}
_ = onnx_model(**dummy_inputs)
output_names = ["masks", "iou_predictions", "low_res_masks"]
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=torch.jit.TracerWarning)
warnings.filterwarnings("ignore", category=UserWarning)
with open(output, "wb") as f:
print(f"Exporting onnx model to {output}...")
torch.onnx.export(
onnx_model,
tuple(dummy_inputs.values()),
f,
export_params=True,
verbose=False,
opset_version=opset,
do_constant_folding=True,
input_names=list(dummy_inputs.keys()),
output_names=output_names,
dynamic_axes=dynamic_axes,
)
if onnxruntime_exists:
ort_inputs = {k: to_numpy(v) for k, v in dummy_inputs.items()}
# set cpu provider default
providers = ["CPUExecutionProvider"]
ort_session = onnxruntime.InferenceSession(output, providers=providers)
_ = ort_session.run(None, ort_inputs)
print("Model has successfully been run with ONNXRuntime.")
def to_numpy(tensor):
return tensor.cpu().numpy()
if __name__ == "__main__":
args = parser.parse_args()
run_export(
model_type=args.model_type,
checkpoint=args.checkpoint,
output=args.output,
opset=args.opset,
return_single_mask=args.return_single_mask,
gelu_approximate=args.gelu_approximate,
use_stability_score=args.use_stability_score,
return_extra_metrics=args.return_extra_metrics,
)
if args.quantize_out is not None:
assert onnxruntime_exists, "onnxruntime is required to quantize the model."
from onnxruntime.quantization import QuantType # type: ignore
from onnxruntime.quantization.quantize import quantize_dynamic # type: ignore
print(f"Quantizing model and writing to {args.quantize_out}...")
quantize_dynamic(
model_input=args.output,
model_output=args.quantize_out,
optimize_model=True,
per_channel=False,
reduce_range=False,
weight_type=QuantType.QUInt8,
)
print("Done!")
| swarms-master | swarms/workers/models/segment_anything/scripts/export_onnx_model.py |
swarms-master | swarms/workers/models/GroundingDINO/__init__.py |
|
# coding=utf-8
# Copyright 2022 The IDEA Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------------------------
# Modified from
# https://github.com/fundamentalvision/Deformable-DETR/blob/main/models/ops/setup.py
# https://github.com/facebookresearch/detectron2/blob/main/setup.py
# https://github.com/open-mmlab/mmdetection/blob/master/setup.py
# https://github.com/Oneflow-Inc/libai/blob/main/setup.py
# ------------------------------------------------------------------------------------------------
import glob
import os
import subprocess
import torch
from setuptools import find_packages, setup
from torch.utils.cpp_extension import CUDA_HOME, CppExtension, CUDAExtension
# groundingdino version info
version = "0.1.0"
package_name = "groundingdino"
cwd = os.path.dirname(os.path.abspath(__file__))
sha = "Unknown"
try:
sha = subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=cwd).decode("ascii").strip()
except Exception:
pass
def write_version_file():
version_path = os.path.join(cwd, "groundingdino", "version.py")
with open(version_path, "w") as f:
f.write(f"__version__ = '{version}'\n")
# f.write(f"git_version = {repr(sha)}\n")
requirements = ["torch", "torchvision"]
torch_ver = [int(x) for x in torch.__version__.split(".")[:2]]
def get_extensions():
this_dir = os.path.dirname(os.path.abspath(__file__))
extensions_dir = os.path.join(this_dir, "groundingdino", "models", "GroundingDINO", "csrc")
main_source = os.path.join(extensions_dir, "vision.cpp")
sources = glob.glob(os.path.join(extensions_dir, "**", "*.cpp"))
source_cuda = glob.glob(os.path.join(extensions_dir, "**", "*.cu")) + glob.glob(
os.path.join(extensions_dir, "*.cu")
)
sources = [main_source] + sources
extension = CppExtension
extra_compile_args = {"cxx": []}
define_macros = []
if CUDA_HOME is not None and (torch.cuda.is_available() or "TORCH_CUDA_ARCH_LIST" in os.environ):
print("Compiling with CUDA")
extension = CUDAExtension
sources += source_cuda
define_macros += [("WITH_CUDA", None)]
extra_compile_args["nvcc"] = [
"-DCUDA_HAS_FP16=1",
"-D__CUDA_NO_HALF_OPERATORS__",
"-D__CUDA_NO_HALF_CONVERSIONS__",
"-D__CUDA_NO_HALF2_OPERATORS__",
]
else:
print("Compiling without CUDA")
define_macros += [("WITH_HIP", None)]
extra_compile_args["nvcc"] = []
return None
sources = [os.path.join(extensions_dir, s) for s in sources]
include_dirs = [extensions_dir]
ext_modules = [
extension(
"groundingdino._C",
sources,
include_dirs=include_dirs,
define_macros=define_macros,
extra_compile_args=extra_compile_args,
)
]
return ext_modules
def parse_requirements(fname="requirements.txt", with_version=True):
"""Parse the package dependencies listed in a requirements file but strips
specific versioning information.
Args:
fname (str): path to requirements file
with_version (bool, default=False): if True include version specs
Returns:
List[str]: list of requirements items
CommandLine:
python -c "import setup; print(setup.parse_requirements())"
"""
import re
import sys
from os.path import exists
require_fpath = fname
def parse_line(line):
"""Parse information from a line in a requirements text file."""
if line.startswith("-r "):
# Allow specifying requirements in other files
target = line.split(" ")[1]
for info in parse_require_file(target):
yield info
else:
info = {"line": line}
if line.startswith("-e "):
info["package"] = line.split("#egg=")[1]
elif "@git+" in line:
info["package"] = line
else:
# Remove versioning from the package
pat = "(" + "|".join([">=", "==", ">"]) + ")"
parts = re.split(pat, line, maxsplit=1)
parts = [p.strip() for p in parts]
info["package"] = parts[0]
if len(parts) > 1:
op, rest = parts[1:]
if ";" in rest:
# Handle platform specific dependencies
# http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies
version, platform_deps = map(str.strip, rest.split(";"))
info["platform_deps"] = platform_deps
else:
version = rest # NOQA
info["version"] = (op, version)
yield info
def parse_require_file(fpath):
with open(fpath, "r") as f:
for line in f.readlines():
line = line.strip()
if line and not line.startswith("#"):
for info in parse_line(line):
yield info
def gen_packages_items():
if exists(require_fpath):
for info in parse_require_file(require_fpath):
parts = [info["package"]]
if with_version and "version" in info:
parts.extend(info["version"])
if not sys.version.startswith("3.4"):
# apparently package_deps are broken in 3.4
platform_deps = info.get("platform_deps")
if platform_deps is not None:
parts.append(";" + platform_deps)
item = "".join(parts)
yield item
packages = list(gen_packages_items())
return packages
if __name__ == "__main__":
print(f"Building wheel {package_name}-{version}")
with open("LICENSE", "r", encoding="utf-8") as f:
license = f.read()
write_version_file()
setup(
name="groundingdino",
version="0.1.0",
author="International Digital Economy Academy, Shilong Liu",
url="https://github.com/IDEA-Research/GroundingDINO",
description="open-set object detector",
license=license,
install_requires=parse_requirements("requirements.txt"),
packages=find_packages(
exclude=(
"configs",
"tests",
)
),
ext_modules=get_extensions(),
cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension},
)
| swarms-master | swarms/workers/models/GroundingDINO/setup.py |
import argparse
import time
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from groundingdino.models import build_model
import groundingdino.datasets.transforms as T
from groundingdino.util import box_ops, get_tokenlizer
from groundingdino.util.misc import clean_state_dict, collate_fn
from groundingdino.util.slconfig import SLConfig
# from torchvision.datasets import CocoDetection
import torchvision
from groundingdino.util.vl_utils import build_captions_and_token_span, create_positive_map_from_span
from groundingdino.datasets.cocogrounding_eval import CocoGroundingEvaluator
def load_model(model_config_path: str, model_checkpoint_path: str, device: str = "cuda"):
args = SLConfig.fromfile(model_config_path)
args.device = device
model = build_model(args)
checkpoint = torch.load(model_checkpoint_path, map_location="cpu")
model.load_state_dict(clean_state_dict(checkpoint["ema_model"]), strict=False)
model.eval()
return model
class CocoDetection(torchvision.datasets.CocoDetection):
def __init__(self, img_folder, ann_file, transforms):
super().__init__(img_folder, ann_file)
self._transforms = transforms
def __getitem__(self, idx):
img, target = super().__getitem__(idx) # target: list
# import ipdb; ipdb.set_trace()
w, h = img.size
boxes = [obj["bbox"] for obj in target]
boxes = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4)
boxes[:, 2:] += boxes[:, :2] # xywh -> xyxy
boxes[:, 0::2].clamp_(min=0, max=w)
boxes[:, 1::2].clamp_(min=0, max=h)
# filt invalid boxes/masks/keypoints
keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0])
boxes = boxes[keep]
target_new = {}
image_id = self.ids[idx]
target_new["image_id"] = image_id
target_new["boxes"] = boxes
target_new["orig_size"] = torch.as_tensor([int(h), int(w)])
if self._transforms is not None:
img, target = self._transforms(img, target_new)
return img, target
class PostProcessCocoGrounding(nn.Module):
""" This module converts the model's output into the format expected by the coco api"""
def __init__(self, num_select=300, coco_api=None, tokenlizer=None) -> None:
super().__init__()
self.num_select = num_select
assert coco_api is not None
category_dict = coco_api.dataset['categories']
cat_list = [item['name'] for item in category_dict]
captions, cat2tokenspan = build_captions_and_token_span(cat_list, True)
tokenspanlist = [cat2tokenspan[cat] for cat in cat_list]
positive_map = create_positive_map_from_span(
tokenlizer(captions), tokenspanlist) # 80, 256. normed
id_map = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 7, 7: 8, 8: 9, 9: 10, 10: 11, 11: 13, 12: 14, 13: 15, 14: 16, 15: 17, 16: 18, 17: 19, 18: 20, 19: 21, 20: 22, 21: 23, 22: 24, 23: 25, 24: 27, 25: 28, 26: 31, 27: 32, 28: 33, 29: 34, 30: 35, 31: 36, 32: 37, 33: 38, 34: 39, 35: 40, 36: 41, 37: 42, 38: 43, 39: 44, 40: 46,
41: 47, 42: 48, 43: 49, 44: 50, 45: 51, 46: 52, 47: 53, 48: 54, 49: 55, 50: 56, 51: 57, 52: 58, 53: 59, 54: 60, 55: 61, 56: 62, 57: 63, 58: 64, 59: 65, 60: 67, 61: 70, 62: 72, 63: 73, 64: 74, 65: 75, 66: 76, 67: 77, 68: 78, 69: 79, 70: 80, 71: 81, 72: 82, 73: 84, 74: 85, 75: 86, 76: 87, 77: 88, 78: 89, 79: 90}
# build a mapping from label_id to pos_map
new_pos_map = torch.zeros((91, 256))
for k, v in id_map.items():
new_pos_map[v] = positive_map[k]
self.positive_map = new_pos_map
@torch.no_grad()
def forward(self, outputs, target_sizes, not_to_xyxy=False):
""" Perform the computation
Parameters:
outputs: raw outputs of the model
target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch
For evaluation, this must be the original image size (before any data augmentation)
For visualization, this should be the image size after data augment, but before padding
"""
num_select = self.num_select
out_logits, out_bbox = outputs['pred_logits'], outputs['pred_boxes']
# pos map to logit
prob_to_token = out_logits.sigmoid() # bs, 100, 256
pos_maps = self.positive_map.to(prob_to_token.device)
# (bs, 100, 256) @ (91, 256).T -> (bs, 100, 91)
prob_to_label = prob_to_token @ pos_maps.T
# if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO':
# import ipdb; ipdb.set_trace()
assert len(out_logits) == len(target_sizes)
assert target_sizes.shape[1] == 2
prob = prob_to_label
topk_values, topk_indexes = torch.topk(
prob.view(out_logits.shape[0], -1), num_select, dim=1)
scores = topk_values
topk_boxes = topk_indexes // prob.shape[2]
labels = topk_indexes % prob.shape[2]
if not_to_xyxy:
boxes = out_bbox
else:
boxes = box_ops.box_cxcywh_to_xyxy(out_bbox)
boxes = torch.gather(
boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4))
# and from relative [0, 1] to absolute [0, height] coordinates
img_h, img_w = target_sizes.unbind(1)
scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)
boxes = boxes * scale_fct[:, None, :]
results = [{'scores': s, 'labels': l, 'boxes': b}
for s, l, b in zip(scores, labels, boxes)]
return results
def main(args):
# config
cfg = SLConfig.fromfile(args.config_file)
# build model
model = load_model(args.config_file, args.checkpoint_path)
model = model.to(args.device)
model = model.eval()
# build dataloader
transform = T.Compose(
[
T.RandomResize([800], max_size=1333),
T.ToTensor(),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
)
dataset = CocoDetection(
args.image_dir, args.anno_path, transforms=transform)
data_loader = DataLoader(
dataset, batch_size=1, shuffle=False, num_workers=args.num_workers, collate_fn=collate_fn)
# build post processor
tokenlizer = get_tokenlizer.get_tokenlizer(cfg.text_encoder_type)
postprocessor = PostProcessCocoGrounding(
coco_api=dataset.coco, tokenlizer=tokenlizer)
# build evaluator
evaluator = CocoGroundingEvaluator(
dataset.coco, iou_types=("bbox",), useCats=True)
# build captions
category_dict = dataset.coco.dataset['categories']
cat_list = [item['name'] for item in category_dict]
caption = " . ".join(cat_list) + ' .'
print("Input text prompt:", caption)
# run inference
start = time.time()
for i, (images, targets) in enumerate(data_loader):
# get images and captions
images = images.tensors.to(args.device)
bs = images.shape[0]
input_captions = [caption] * bs
# feed to the model
outputs = model(images, captions=input_captions)
orig_target_sizes = torch.stack(
[t["orig_size"] for t in targets], dim=0).to(images.device)
results = postprocessor(outputs, orig_target_sizes)
cocogrounding_res = {
target["image_id"]: output for target, output in zip(targets, results)}
evaluator.update(cocogrounding_res)
if (i+1) % 30 == 0:
used_time = time.time() - start
eta = len(data_loader) / (i+1e-5) * used_time - used_time
print(
f"processed {i}/{len(data_loader)} images. time: {used_time:.2f}s, ETA: {eta:.2f}s")
evaluator.synchronize_between_processes()
evaluator.accumulate()
evaluator.summarize()
print("Final results:", evaluator.coco_eval["bbox"].stats.tolist())
if __name__ == "__main__":
parser = argparse.ArgumentParser(
"Grounding DINO eval on COCO", add_help=True)
# load model
parser.add_argument("--config_file", "-c", type=str,
required=True, help="path to config file")
parser.add_argument(
"--checkpoint_path", "-p", type=str, required=True, help="path to checkpoint file"
)
parser.add_argument("--device", type=str, default="cuda",
help="running device (default: cuda)")
# post processing
parser.add_argument("--num_select", type=int, default=300,
help="number of topk to select")
# coco info
parser.add_argument("--anno_path", type=str,
required=True, help="coco root")
parser.add_argument("--image_dir", type=str,
required=True, help="coco image dir")
parser.add_argument("--num_workers", type=int, default=4,
help="number of workers for dataloader")
args = parser.parse_args()
main(args)
| swarms-master | swarms/workers/models/GroundingDINO/demo/test_ap_on_coco.py |
import argparse
import cv2
import os
from PIL import Image
import numpy as np
import warnings
import torch
# prepare the environment
os.system("python setup.py build develop --user")
os.system("pip install packaging==21.3")
os.system("pip install gradio")
warnings.filterwarnings("ignore")
import gradio as gr
from groundingdino.models import build_model
from groundingdino.util.slconfig import SLConfig
from groundingdino.util.utils import clean_state_dict
from groundingdino.util.inference import annotate, predict
import groundingdino.datasets.transforms as T
from huggingface_hub import hf_hub_download
# Use this command for evaluate the Grounding DINO model
config_file = "groundingdino/config/GroundingDINO_SwinT_OGC.py"
ckpt_repo_id = "ShilongLiu/GroundingDINO"
ckpt_filenmae = "groundingdino_swint_ogc.pth"
def load_model_hf(model_config_path, repo_id, filename, device='cpu'):
args = SLConfig.fromfile(model_config_path)
model = build_model(args)
args.device = device
cache_file = hf_hub_download(repo_id=repo_id, filename=filename)
checkpoint = torch.load(cache_file, map_location='cpu')
log = model.load_state_dict(clean_state_dict(checkpoint['model']), strict=False)
print("Model loaded from {} \n => {}".format(cache_file, log))
_ = model.eval()
return model
def image_transform_grounding(init_image):
transform = T.Compose([
T.RandomResize([800], max_size=1333),
T.ToTensor(),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
image, _ = transform(init_image, None) # 3, h, w
return init_image, image
def image_transform_grounding_for_vis(init_image):
transform = T.Compose([
T.RandomResize([800], max_size=1333),
])
image, _ = transform(init_image, None) # 3, h, w
return image
model = load_model_hf(config_file, ckpt_repo_id, ckpt_filenmae)
def run_grounding(input_image, grounding_caption, box_threshold, text_threshold):
init_image = input_image.convert("RGB")
_, image_tensor = image_transform_grounding(init_image)
image_pil: Image = image_transform_grounding_for_vis(init_image)
# run grounidng
boxes, logits, phrases = predict(model, image_tensor, grounding_caption, box_threshold, text_threshold, device='cpu')
annotated_frame = annotate(image_source=np.asarray(image_pil), boxes=boxes, logits=logits, phrases=phrases)
image_with_box = Image.fromarray(cv2.cvtColor(annotated_frame, cv2.COLOR_BGR2RGB))
return image_with_box
if __name__ == "__main__":
parser = argparse.ArgumentParser("Grounding DINO demo", add_help=True)
parser.add_argument("--debug", action="store_true", help="using debug mode")
parser.add_argument("--share", action="store_true", help="share the app")
args = parser.parse_args()
block = gr.Blocks().queue()
with block:
gr.Markdown("# [Grounding DINO](https://github.com/IDEA-Research/GroundingDINO)")
gr.Markdown("### Open-World Detection with Grounding DINO")
with gr.Row():
with gr.Column():
input_image = gr.Image(source='upload', type="pil")
grounding_caption = gr.Textbox(label="Detection Prompt")
run_button = gr.Button(label="Run")
with gr.Accordion("Advanced options", open=False):
box_threshold = gr.Slider(
label="Box Threshold", minimum=0.0, maximum=1.0, value=0.25, step=0.001
)
text_threshold = gr.Slider(
label="Text Threshold", minimum=0.0, maximum=1.0, value=0.25, step=0.001
)
with gr.Column():
gallery = gr.outputs.Image(
type="pil",
# label="grounding results"
).style(full_width=True, full_height=True)
# gallery = gr.Gallery(label="Generated images", show_label=False).style(
# grid=[1], height="auto", container=True, full_width=True, full_height=True)
run_button.click(fn=run_grounding, inputs=[
input_image, grounding_caption, box_threshold, text_threshold], outputs=[gallery])
block.launch(server_name='0.0.0.0', server_port=7579, debug=args.debug, share=args.share)
| swarms-master | swarms/workers/models/GroundingDINO/demo/gradio_app.py |
import argparse
import os
import numpy as np
import torch
from PIL import Image, ImageDraw, ImageFont
import groundingdino.datasets.transforms as T
from groundingdino.models import build_model
from groundingdino.util.slconfig import SLConfig
from groundingdino.util.utils import clean_state_dict, get_phrases_from_posmap
from groundingdino.util.vl_utils import create_positive_map_from_span
def plot_boxes_to_image(image_pil, tgt):
H, W = tgt["size"]
boxes = tgt["boxes"]
labels = tgt["labels"]
assert len(boxes) == len(labels), "boxes and labels must have same length"
draw = ImageDraw.Draw(image_pil)
mask = Image.new("L", image_pil.size, 0)
mask_draw = ImageDraw.Draw(mask)
# draw boxes and masks
for box, label in zip(boxes, labels):
# from 0..1 to 0..W, 0..H
box = box * torch.Tensor([W, H, W, H])
# from xywh to xyxy
box[:2] -= box[2:] / 2
box[2:] += box[:2]
# random color
color = tuple(np.random.randint(0, 255, size=3).tolist())
# draw
x0, y0, x1, y1 = box
x0, y0, x1, y1 = int(x0), int(y0), int(x1), int(y1)
draw.rectangle([x0, y0, x1, y1], outline=color, width=6)
# draw.text((x0, y0), str(label), fill=color)
font = ImageFont.load_default()
if hasattr(font, "getbbox"):
bbox = draw.textbbox((x0, y0), str(label), font)
else:
w, h = draw.textsize(str(label), font)
bbox = (x0, y0, w + x0, y0 + h)
# bbox = draw.textbbox((x0, y0), str(label))
draw.rectangle(bbox, fill=color)
draw.text((x0, y0), str(label), fill="white")
mask_draw.rectangle([x0, y0, x1, y1], fill=255, width=6)
return image_pil, mask
def load_image(image_path):
# load image
image_pil = Image.open(image_path).convert("RGB") # load image
transform = T.Compose(
[
T.RandomResize([800], max_size=1333),
T.ToTensor(),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
)
image, _ = transform(image_pil, None) # 3, h, w
return image_pil, image
def load_model(model_config_path, model_checkpoint_path, cpu_only=False):
args = SLConfig.fromfile(model_config_path)
args.device = "cuda" if not cpu_only else "cpu"
model = build_model(args)
checkpoint = torch.load(model_checkpoint_path, map_location="cpu")
load_res = model.load_state_dict(clean_state_dict(checkpoint["model"]), strict=False)
print(load_res)
_ = model.eval()
return model
def get_grounding_output(model, image, caption, box_threshold, text_threshold=None, with_logits=True, cpu_only=False, token_spans=None):
assert text_threshold is not None or token_spans is not None, "text_threshould and token_spans should not be None at the same time!"
caption = caption.lower()
caption = caption.strip()
if not caption.endswith("."):
caption = caption + "."
device = "cuda" if not cpu_only else "cpu"
model = model.to(device)
image = image.to(device)
with torch.no_grad():
outputs = model(image[None], captions=[caption])
logits = outputs["pred_logits"].sigmoid()[0] # (nq, 256)
boxes = outputs["pred_boxes"][0] # (nq, 4)
# filter output
if token_spans is None:
logits_filt = logits.cpu().clone()
boxes_filt = boxes.cpu().clone()
filt_mask = logits_filt.max(dim=1)[0] > box_threshold
logits_filt = logits_filt[filt_mask] # num_filt, 256
boxes_filt = boxes_filt[filt_mask] # num_filt, 4
# get phrase
tokenlizer = model.tokenizer
tokenized = tokenlizer(caption)
# build pred
pred_phrases = []
for logit, box in zip(logits_filt, boxes_filt):
pred_phrase = get_phrases_from_posmap(logit > text_threshold, tokenized, tokenlizer)
if with_logits:
pred_phrases.append(pred_phrase + f"({str(logit.max().item())[:4]})")
else:
pred_phrases.append(pred_phrase)
else:
# given-phrase mode
positive_maps = create_positive_map_from_span(
model.tokenizer(text_prompt),
token_span=token_spans
).to(image.device) # n_phrase, 256
logits_for_phrases = positive_maps @ logits.T # n_phrase, nq
all_logits = []
all_phrases = []
all_boxes = []
for (token_span, logit_phr) in zip(token_spans, logits_for_phrases):
# get phrase
phrase = ' '.join([caption[_s:_e] for (_s, _e) in token_span])
# get mask
filt_mask = logit_phr > box_threshold
# filt box
all_boxes.append(boxes[filt_mask])
# filt logits
all_logits.append(logit_phr[filt_mask])
if with_logits:
logit_phr_num = logit_phr[filt_mask]
all_phrases.extend([phrase + f"({str(logit.item())[:4]})" for logit in logit_phr_num])
else:
all_phrases.extend([phrase for _ in range(len(filt_mask))])
boxes_filt = torch.cat(all_boxes, dim=0).cpu()
pred_phrases = all_phrases
return boxes_filt, pred_phrases
if __name__ == "__main__":
parser = argparse.ArgumentParser("Grounding DINO example", add_help=True)
parser.add_argument("--config_file", "-c", type=str, required=True, help="path to config file")
parser.add_argument(
"--checkpoint_path", "-p", type=str, required=True, help="path to checkpoint file"
)
parser.add_argument("--image_path", "-i", type=str, required=True, help="path to image file")
parser.add_argument("--text_prompt", "-t", type=str, required=True, help="text prompt")
parser.add_argument(
"--output_dir", "-o", type=str, default="outputs", required=True, help="output directory"
)
parser.add_argument("--box_threshold", type=float, default=0.3, help="box threshold")
parser.add_argument("--text_threshold", type=float, default=0.25, help="text threshold")
parser.add_argument("--token_spans", type=str, default=None, help=
"The positions of start and end positions of phrases of interest. \
For example, a caption is 'a cat and a dog', \
if you would like to detect 'cat', the token_spans should be '[[[2, 5]], ]', since 'a cat and a dog'[2:5] is 'cat'. \
if you would like to detect 'a cat', the token_spans should be '[[[0, 1], [2, 5]], ]', since 'a cat and a dog'[0:1] is 'a', and 'a cat and a dog'[2:5] is 'cat'. \
")
parser.add_argument("--cpu-only", action="store_true", help="running on cpu only!, default=False")
args = parser.parse_args()
# cfg
config_file = args.config_file # change the path of the model config file
checkpoint_path = args.checkpoint_path # change the path of the model
image_path = args.image_path
text_prompt = args.text_prompt
output_dir = args.output_dir
box_threshold = args.box_threshold
text_threshold = args.text_threshold
token_spans = args.token_spans
# make dir
os.makedirs(output_dir, exist_ok=True)
# load image
image_pil, image = load_image(image_path)
# load model
model = load_model(config_file, checkpoint_path, cpu_only=args.cpu_only)
# visualize raw image
image_pil.save(os.path.join(output_dir, "raw_image.jpg"))
# set the text_threshold to None if token_spans is set.
if token_spans is not None:
text_threshold = None
print("Using token_spans. Set the text_threshold to None.")
# run model
boxes_filt, pred_phrases = get_grounding_output(
model, image, text_prompt, box_threshold, text_threshold, cpu_only=args.cpu_only, token_spans=eval(token_spans)
)
# visualize pred
size = image_pil.size
pred_dict = {
"boxes": boxes_filt,
"size": [size[1], size[0]], # H,W
"labels": pred_phrases,
}
# import ipdb; ipdb.set_trace()
image_with_box = plot_boxes_to_image(image_pil, pred_dict)[0]
image_with_box.save(os.path.join(output_dir, "pred.jpg"))
| swarms-master | swarms/workers/models/GroundingDINO/demo/inference_on_a_image.py |
import typer
from groundingdino.util.inference import load_model, load_image, predict
from tqdm import tqdm
import torchvision
import fiftyone as fo
def main(
image_directory: str = 'test_grounding_dino',
text_prompt: str = 'bus, car',
box_threshold: float = 0.15,
text_threshold: float = 0.10,
export_dataset: bool = False,
view_dataset: bool = False,
export_annotated_images: bool = True,
weights_path : str = "groundingdino_swint_ogc.pth",
config_path: str = "../../GroundingDINO/groundingdino/config/GroundingDINO_SwinT_OGC.py",
subsample: int = None,
):
model = load_model(config_path, weights_path)
dataset = fo.Dataset.from_images_dir(image_directory)
if subsample is not None:
if subsample < len(dataset):
dataset = dataset.take(subsample).clone()
for sample in tqdm(dataset):
image_source, image = load_image(sample.filepath)
boxes, logits, phrases = predict(
model=model,
image=image,
caption=text_prompt,
box_threshold=box_threshold,
text_threshold=text_threshold,
)
detections = []
for box, logit, phrase in zip(boxes, logits, phrases):
rel_box = torchvision.ops.box_convert(box, 'cxcywh', 'xywh')
detections.append(
fo.Detection(
label=phrase,
bounding_box=rel_box,
confidence=logit,
))
# Store detections in a field name of your choice
sample["detections"] = fo.Detections(detections=detections)
sample.save()
# loads the voxel fiftyone UI ready for viewing the dataset.
if view_dataset:
session = fo.launch_app(dataset)
session.wait()
# exports COCO dataset ready for training
if export_dataset:
dataset.export(
'coco_dataset',
dataset_type=fo.types.COCODetectionDataset,
)
# saves bounding boxes plotted on the input images to disk
if export_annotated_images:
dataset.draw_labels(
'images_with_bounding_boxes',
label_fields=['detections']
)
if __name__ == '__main__':
typer.run(main)
| swarms-master | swarms/workers/models/GroundingDINO/demo/create_coco_dataset.py |
swarms-master | swarms/workers/models/GroundingDINO/groundingdino/__init__.py |
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Misc functions, including distributed helpers.
Mostly copy-paste from torchvision references.
"""
import colorsys
import datetime
import functools
import io
import json
import os
import pickle
import subprocess
import time
from collections import OrderedDict, defaultdict, deque
from typing import List, Optional
import numpy as np
import torch
import torch.distributed as dist
# needed due to empty tensor bug in pytorch and torchvision 0.5
import torchvision
from torch import Tensor
__torchvision_need_compat_flag = float(torchvision.__version__.split(".")[1]) < 7
if __torchvision_need_compat_flag:
from torchvision.ops import _new_empty_tensor
from torchvision.ops.misc import _output_size
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device="cuda")
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
if d.shape[0] == 0:
return 0
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
if os.environ.get("SHILONG_AMP", None) == "1":
eps = 1e-4
else:
eps = 1e-6
return self.total / (self.count + eps)
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value,
)
@functools.lru_cache()
def _get_global_gloo_group():
"""
Return a process group based on gloo backend, containing all the ranks
The result is cached.
"""
if dist.get_backend() == "nccl":
return dist.new_group(backend="gloo")
return dist.group.WORLD
def all_gather_cpu(data):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
world_size = get_world_size()
if world_size == 1:
return [data]
cpu_group = _get_global_gloo_group()
buffer = io.BytesIO()
torch.save(data, buffer)
data_view = buffer.getbuffer()
device = "cuda" if cpu_group is None else "cpu"
tensor = torch.ByteTensor(data_view).to(device)
# obtain Tensor size of each rank
local_size = torch.tensor([tensor.numel()], device=device, dtype=torch.long)
size_list = [torch.tensor([0], device=device, dtype=torch.long) for _ in range(world_size)]
if cpu_group is None:
dist.all_gather(size_list, local_size)
else:
print("gathering on cpu")
dist.all_gather(size_list, local_size, group=cpu_group)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
assert isinstance(local_size.item(), int)
local_size = int(local_size.item())
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list = []
for _ in size_list:
tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device=device))
if local_size != max_size:
padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device=device)
tensor = torch.cat((tensor, padding), dim=0)
if cpu_group is None:
dist.all_gather(tensor_list, tensor)
else:
dist.all_gather(tensor_list, tensor, group=cpu_group)
data_list = []
for size, tensor in zip(size_list, tensor_list):
tensor = torch.split(tensor, [size, max_size - size], dim=0)[0]
buffer = io.BytesIO(tensor.cpu().numpy())
obj = torch.load(buffer)
data_list.append(obj)
return data_list
def all_gather(data):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
if os.getenv("CPU_REDUCE") == "1":
return all_gather_cpu(data)
world_size = get_world_size()
if world_size == 1:
return [data]
# serialized to a Tensor
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to("cuda")
# obtain Tensor size of each rank
local_size = torch.tensor([tensor.numel()], device="cuda")
size_list = [torch.tensor([0], device="cuda") for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list = []
for _ in size_list:
tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device="cuda"))
if local_size != max_size:
padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device="cuda")
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def reduce_dict(input_dict, average=True):
"""
Args:
input_dict (dict): all the values will be reduced
average (bool): whether to do average or sum
Reduce the values in the dictionary from all processes so that all processes
have the averaged results. Returns a dict with the same fields as
input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.all_reduce(values)
if average:
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
# print(name, str(meter))
# import ipdb;ipdb.set_trace()
if meter.count > 0:
loss_str.append("{}: {}".format(name, str(meter)))
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None, logger=None):
if logger is None:
print_func = print
else:
print_func = logger.info
i = 0
if not header:
header = ""
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt="{avg:.4f}")
data_time = SmoothedValue(fmt="{avg:.4f}")
space_fmt = ":" + str(len(str(len(iterable)))) + "d"
if torch.cuda.is_available():
log_msg = self.delimiter.join(
[
header,
"[{0" + space_fmt + "}/{1}]",
"eta: {eta}",
"{meters}",
"time: {time}",
"data: {data}",
"max mem: {memory:.0f}",
]
)
else:
log_msg = self.delimiter.join(
[
header,
"[{0" + space_fmt + "}/{1}]",
"eta: {eta}",
"{meters}",
"time: {time}",
"data: {data}",
]
)
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
# import ipdb; ipdb.set_trace()
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == len(iterable) - 1:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print_func(
log_msg.format(
i,
len(iterable),
eta=eta_string,
meters=str(self),
time=str(iter_time),
data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB,
)
)
else:
print_func(
log_msg.format(
i,
len(iterable),
eta=eta_string,
meters=str(self),
time=str(iter_time),
data=str(data_time),
)
)
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print_func(
"{} Total time: {} ({:.4f} s / it)".format(
header, total_time_str, total_time / len(iterable)
)
)
def get_sha():
cwd = os.path.dirname(os.path.abspath(__file__))
def _run(command):
return subprocess.check_output(command, cwd=cwd).decode("ascii").strip()
sha = "N/A"
diff = "clean"
branch = "N/A"
try:
sha = _run(["git", "rev-parse", "HEAD"])
subprocess.check_output(["git", "diff"], cwd=cwd)
diff = _run(["git", "diff-index", "HEAD"])
diff = "has uncommited changes" if diff else "clean"
branch = _run(["git", "rev-parse", "--abbrev-ref", "HEAD"])
except Exception:
pass
message = f"sha: {sha}, status: {diff}, branch: {branch}"
return message
def collate_fn(batch):
# import ipdb; ipdb.set_trace()
batch = list(zip(*batch))
batch[0] = nested_tensor_from_tensor_list(batch[0])
return tuple(batch)
def _max_by_axis(the_list):
# type: (List[List[int]]) -> List[int]
maxes = the_list[0]
for sublist in the_list[1:]:
for index, item in enumerate(sublist):
maxes[index] = max(maxes[index], item)
return maxes
class NestedTensor(object):
def __init__(self, tensors, mask: Optional[Tensor]):
self.tensors = tensors
self.mask = mask
if mask == "auto":
self.mask = torch.zeros_like(tensors).to(tensors.device)
if self.mask.dim() == 3:
self.mask = self.mask.sum(0).to(bool)
elif self.mask.dim() == 4:
self.mask = self.mask.sum(1).to(bool)
else:
raise ValueError(
"tensors dim must be 3 or 4 but {}({})".format(
self.tensors.dim(), self.tensors.shape
)
)
def imgsize(self):
res = []
for i in range(self.tensors.shape[0]):
mask = self.mask[i]
maxH = (~mask).sum(0).max()
maxW = (~mask).sum(1).max()
res.append(torch.Tensor([maxH, maxW]))
return res
def to(self, device):
# type: (Device) -> NestedTensor # noqa
cast_tensor = self.tensors.to(device)
mask = self.mask
if mask is not None:
assert mask is not None
cast_mask = mask.to(device)
else:
cast_mask = None
return NestedTensor(cast_tensor, cast_mask)
def to_img_list_single(self, tensor, mask):
assert tensor.dim() == 3, "dim of tensor should be 3 but {}".format(tensor.dim())
maxH = (~mask).sum(0).max()
maxW = (~mask).sum(1).max()
img = tensor[:, :maxH, :maxW]
return img
def to_img_list(self):
"""remove the padding and convert to img list
Returns:
[type]: [description]
"""
if self.tensors.dim() == 3:
return self.to_img_list_single(self.tensors, self.mask)
else:
res = []
for i in range(self.tensors.shape[0]):
tensor_i = self.tensors[i]
mask_i = self.mask[i]
res.append(self.to_img_list_single(tensor_i, mask_i))
return res
@property
def device(self):
return self.tensors.device
def decompose(self):
return self.tensors, self.mask
def __repr__(self):
return str(self.tensors)
@property
def shape(self):
return {"tensors.shape": self.tensors.shape, "mask.shape": self.mask.shape}
def nested_tensor_from_tensor_list(tensor_list: List[Tensor]):
# TODO make this more general
if tensor_list[0].ndim == 3:
if torchvision._is_tracing():
# nested_tensor_from_tensor_list() does not export well to ONNX
# call _onnx_nested_tensor_from_tensor_list() instead
return _onnx_nested_tensor_from_tensor_list(tensor_list)
# TODO make it support different-sized images
max_size = _max_by_axis([list(img.shape) for img in tensor_list])
# min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))
batch_shape = [len(tensor_list)] + max_size
b, c, h, w = batch_shape
dtype = tensor_list[0].dtype
device = tensor_list[0].device
tensor = torch.zeros(batch_shape, dtype=dtype, device=device)
mask = torch.ones((b, h, w), dtype=torch.bool, device=device)
for img, pad_img, m in zip(tensor_list, tensor, mask):
pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
m[: img.shape[1], : img.shape[2]] = False
else:
raise ValueError("not supported")
return NestedTensor(tensor, mask)
# _onnx_nested_tensor_from_tensor_list() is an implementation of
# nested_tensor_from_tensor_list() that is supported by ONNX tracing.
@torch.jit.unused
def _onnx_nested_tensor_from_tensor_list(tensor_list: List[Tensor]) -> NestedTensor:
max_size = []
for i in range(tensor_list[0].dim()):
max_size_i = torch.max(
torch.stack([img.shape[i] for img in tensor_list]).to(torch.float32)
).to(torch.int64)
max_size.append(max_size_i)
max_size = tuple(max_size)
# work around for
# pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
# m[: img.shape[1], :img.shape[2]] = False
# which is not yet supported in onnx
padded_imgs = []
padded_masks = []
for img in tensor_list:
padding = [(s1 - s2) for s1, s2 in zip(max_size, tuple(img.shape))]
padded_img = torch.nn.functional.pad(img, (0, padding[2], 0, padding[1], 0, padding[0]))
padded_imgs.append(padded_img)
m = torch.zeros_like(img[0], dtype=torch.int, device=img.device)
padded_mask = torch.nn.functional.pad(m, (0, padding[2], 0, padding[1]), "constant", 1)
padded_masks.append(padded_mask.to(torch.bool))
tensor = torch.stack(padded_imgs)
mask = torch.stack(padded_masks)
return NestedTensor(tensor, mask=mask)
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop("force", False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def init_distributed_mode(args):
if "WORLD_SIZE" in os.environ and os.environ["WORLD_SIZE"] != "": # 'RANK' in os.environ and
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ["WORLD_SIZE"])
args.gpu = args.local_rank = int(os.environ["LOCAL_RANK"])
# launch by torch.distributed.launch
# Single node
# python -m torch.distributed.launch --nproc_per_node=8 main.py --world-size 1 --rank 0 ...
# Multi nodes
# python -m torch.distributed.launch --nproc_per_node=8 main.py --world-size 2 --rank 0 --dist-url 'tcp://IP_OF_NODE0:FREEPORT' ...
# python -m torch.distributed.launch --nproc_per_node=8 main.py --world-size 2 --rank 1 --dist-url 'tcp://IP_OF_NODE0:FREEPORT' ...
# args.rank = int(os.environ.get('OMPI_COMM_WORLD_RANK'))
# local_world_size = int(os.environ['GPU_PER_NODE_COUNT'])
# args.world_size = args.world_size * local_world_size
# args.gpu = args.local_rank = int(os.environ['LOCAL_RANK'])
# args.rank = args.rank * local_world_size + args.local_rank
print(
"world size: {}, rank: {}, local rank: {}".format(
args.world_size, args.rank, args.local_rank
)
)
print(json.dumps(dict(os.environ), indent=2))
elif "SLURM_PROCID" in os.environ:
args.rank = int(os.environ["SLURM_PROCID"])
args.gpu = args.local_rank = int(os.environ["SLURM_LOCALID"])
args.world_size = int(os.environ["SLURM_NPROCS"])
print(
"world size: {}, world rank: {}, local rank: {}, device_count: {}".format(
args.world_size, args.rank, args.local_rank, torch.cuda.device_count()
)
)
else:
print("Not using distributed mode")
args.distributed = False
args.world_size = 1
args.rank = 0
args.local_rank = 0
return
print("world_size:{} rank:{} local_rank:{}".format(args.world_size, args.rank, args.local_rank))
args.distributed = True
torch.cuda.set_device(args.local_rank)
args.dist_backend = "nccl"
print("| distributed init (rank {}): {}".format(args.rank, args.dist_url), flush=True)
torch.distributed.init_process_group(
backend=args.dist_backend,
world_size=args.world_size,
rank=args.rank,
init_method=args.dist_url,
)
print("Before torch.distributed.barrier()")
torch.distributed.barrier()
print("End torch.distributed.barrier()")
setup_for_distributed(args.rank == 0)
@torch.no_grad()
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
if target.numel() == 0:
return [torch.zeros([], device=output.device)]
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
@torch.no_grad()
def accuracy_onehot(pred, gt):
"""_summary_
Args:
pred (_type_): n, c
gt (_type_): n, c
"""
tp = ((pred - gt).abs().sum(-1) < 1e-4).float().sum()
acc = tp / gt.shape[0] * 100
return acc
def interpolate(input, size=None, scale_factor=None, mode="nearest", align_corners=None):
# type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor
"""
Equivalent to nn.functional.interpolate, but with support for empty batch sizes.
This will eventually be supported natively by PyTorch, and this
class can go away.
"""
if __torchvision_need_compat_flag < 0.7:
if input.numel() > 0:
return torch.nn.functional.interpolate(input, size, scale_factor, mode, align_corners)
output_shape = _output_size(2, input, size, scale_factor)
output_shape = list(input.shape[:-2]) + list(output_shape)
return _new_empty_tensor(input, output_shape)
else:
return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners)
class color_sys:
def __init__(self, num_colors) -> None:
self.num_colors = num_colors
colors = []
for i in np.arange(0.0, 360.0, 360.0 / num_colors):
hue = i / 360.0
lightness = (50 + np.random.rand() * 10) / 100.0
saturation = (90 + np.random.rand() * 10) / 100.0
colors.append(
tuple([int(j * 255) for j in colorsys.hls_to_rgb(hue, lightness, saturation)])
)
self.colors = colors
def __call__(self, idx):
return self.colors[idx]
def inverse_sigmoid(x, eps=1e-3):
x = x.clamp(min=0, max=1)
x1 = x.clamp(min=eps)
x2 = (1 - x).clamp(min=eps)
return torch.log(x1 / x2)
def clean_state_dict(state_dict):
new_state_dict = OrderedDict()
for k, v in state_dict.items():
if k[:7] == "module.":
k = k[7:] # remove `module.`
new_state_dict[k] = v
return new_state_dict
| swarms-master | swarms/workers/models/GroundingDINO/groundingdino/util/misc.py |
# ==========================================================
# Modified from mmcv
# ==========================================================
import ast
import os
import os.path as osp
import shutil
import sys
import tempfile
from argparse import Action
from importlib import import_module
from addict import Dict
from yapf.yapflib.yapf_api import FormatCode
BASE_KEY = "_base_"
DELETE_KEY = "_delete_"
RESERVED_KEYS = ["filename", "text", "pretty_text", "get", "dump", "merge_from_dict"]
def check_file_exist(filename, msg_tmpl='file "{}" does not exist'):
if not osp.isfile(filename):
raise FileNotFoundError(msg_tmpl.format(filename))
class ConfigDict(Dict):
def __missing__(self, name):
raise KeyError(name)
def __getattr__(self, name):
try:
value = super(ConfigDict, self).__getattr__(name)
except KeyError:
ex = AttributeError(f"'{self.__class__.__name__}' object has no " f"attribute '{name}'")
except Exception as e:
ex = e
else:
return value
raise ex
class SLConfig(object):
"""
config files.
only support .py file as config now.
ref: mmcv.utils.config
Example:
>>> cfg = Config(dict(a=1, b=dict(b1=[0, 1])))
>>> cfg.a
1
>>> cfg.b
{'b1': [0, 1]}
>>> cfg.b.b1
[0, 1]
>>> cfg = Config.fromfile('tests/data/config/a.py')
>>> cfg.filename
"/home/kchen/projects/mmcv/tests/data/config/a.py"
>>> cfg.item4
'test'
>>> cfg
"Config [path: /home/kchen/projects/mmcv/tests/data/config/a.py]: "
"{'item1': [1, 2], 'item2': {'a': 0}, 'item3': True, 'item4': 'test'}"
"""
@staticmethod
def _validate_py_syntax(filename):
with open(filename) as f:
content = f.read()
try:
ast.parse(content)
except SyntaxError:
raise SyntaxError("There are syntax errors in config " f"file {filename}")
@staticmethod
def _file2dict(filename):
filename = osp.abspath(osp.expanduser(filename))
check_file_exist(filename)
if filename.lower().endswith(".py"):
with tempfile.TemporaryDirectory() as temp_config_dir:
temp_config_file = tempfile.NamedTemporaryFile(dir=temp_config_dir, suffix=".py")
temp_config_name = osp.basename(temp_config_file.name)
if os.name == 'nt':
temp_config_file.close()
shutil.copyfile(filename, osp.join(temp_config_dir, temp_config_name))
temp_module_name = osp.splitext(temp_config_name)[0]
sys.path.insert(0, temp_config_dir)
SLConfig._validate_py_syntax(filename)
mod = import_module(temp_module_name)
sys.path.pop(0)
cfg_dict = {
name: value for name, value in mod.__dict__.items() if not name.startswith("__")
}
# delete imported module
del sys.modules[temp_module_name]
# close temp file
temp_config_file.close()
elif filename.lower().endswith((".yml", ".yaml", ".json")):
from .slio import slload
cfg_dict = slload(filename)
else:
raise IOError("Only py/yml/yaml/json type are supported now!")
cfg_text = filename + "\n"
with open(filename, "r") as f:
cfg_text += f.read()
# parse the base file
if BASE_KEY in cfg_dict:
cfg_dir = osp.dirname(filename)
base_filename = cfg_dict.pop(BASE_KEY)
base_filename = base_filename if isinstance(base_filename, list) else [base_filename]
cfg_dict_list = list()
cfg_text_list = list()
for f in base_filename:
_cfg_dict, _cfg_text = SLConfig._file2dict(osp.join(cfg_dir, f))
cfg_dict_list.append(_cfg_dict)
cfg_text_list.append(_cfg_text)
base_cfg_dict = dict()
for c in cfg_dict_list:
if len(base_cfg_dict.keys() & c.keys()) > 0:
raise KeyError("Duplicate key is not allowed among bases")
# TODO Allow the duplicate key while warnning user
base_cfg_dict.update(c)
base_cfg_dict = SLConfig._merge_a_into_b(cfg_dict, base_cfg_dict)
cfg_dict = base_cfg_dict
# merge cfg_text
cfg_text_list.append(cfg_text)
cfg_text = "\n".join(cfg_text_list)
return cfg_dict, cfg_text
@staticmethod
def _merge_a_into_b(a, b):
"""merge dict `a` into dict `b` (non-inplace).
values in `a` will overwrite `b`.
copy first to avoid inplace modification
Args:
a ([type]): [description]
b ([type]): [description]
Returns:
[dict]: [description]
"""
# import ipdb; ipdb.set_trace()
if not isinstance(a, dict):
return a
b = b.copy()
for k, v in a.items():
if isinstance(v, dict) and k in b and not v.pop(DELETE_KEY, False):
if not isinstance(b[k], dict) and not isinstance(b[k], list):
# if :
# import ipdb; ipdb.set_trace()
raise TypeError(
f"{k}={v} in child config cannot inherit from base "
f"because {k} is a dict in the child config but is of "
f"type {type(b[k])} in base config. You may set "
f"`{DELETE_KEY}=True` to ignore the base config"
)
b[k] = SLConfig._merge_a_into_b(v, b[k])
elif isinstance(b, list):
try:
_ = int(k)
except:
raise TypeError(
f"b is a list, " f"index {k} should be an int when input but {type(k)}"
)
b[int(k)] = SLConfig._merge_a_into_b(v, b[int(k)])
else:
b[k] = v
return b
@staticmethod
def fromfile(filename):
cfg_dict, cfg_text = SLConfig._file2dict(filename)
return SLConfig(cfg_dict, cfg_text=cfg_text, filename=filename)
def __init__(self, cfg_dict=None, cfg_text=None, filename=None):
if cfg_dict is None:
cfg_dict = dict()
elif not isinstance(cfg_dict, dict):
raise TypeError("cfg_dict must be a dict, but " f"got {type(cfg_dict)}")
for key in cfg_dict:
if key in RESERVED_KEYS:
raise KeyError(f"{key} is reserved for config file")
super(SLConfig, self).__setattr__("_cfg_dict", ConfigDict(cfg_dict))
super(SLConfig, self).__setattr__("_filename", filename)
if cfg_text:
text = cfg_text
elif filename:
with open(filename, "r") as f:
text = f.read()
else:
text = ""
super(SLConfig, self).__setattr__("_text", text)
@property
def filename(self):
return self._filename
@property
def text(self):
return self._text
@property
def pretty_text(self):
indent = 4
def _indent(s_, num_spaces):
s = s_.split("\n")
if len(s) == 1:
return s_
first = s.pop(0)
s = [(num_spaces * " ") + line for line in s]
s = "\n".join(s)
s = first + "\n" + s
return s
def _format_basic_types(k, v, use_mapping=False):
if isinstance(v, str):
v_str = f"'{v}'"
else:
v_str = str(v)
if use_mapping:
k_str = f"'{k}'" if isinstance(k, str) else str(k)
attr_str = f"{k_str}: {v_str}"
else:
attr_str = f"{str(k)}={v_str}"
attr_str = _indent(attr_str, indent)
return attr_str
def _format_list(k, v, use_mapping=False):
# check if all items in the list are dict
if all(isinstance(_, dict) for _ in v):
v_str = "[\n"
v_str += "\n".join(
f"dict({_indent(_format_dict(v_), indent)})," for v_ in v
).rstrip(",")
if use_mapping:
k_str = f"'{k}'" if isinstance(k, str) else str(k)
attr_str = f"{k_str}: {v_str}"
else:
attr_str = f"{str(k)}={v_str}"
attr_str = _indent(attr_str, indent) + "]"
else:
attr_str = _format_basic_types(k, v, use_mapping)
return attr_str
def _contain_invalid_identifier(dict_str):
contain_invalid_identifier = False
for key_name in dict_str:
contain_invalid_identifier |= not str(key_name).isidentifier()
return contain_invalid_identifier
def _format_dict(input_dict, outest_level=False):
r = ""
s = []
use_mapping = _contain_invalid_identifier(input_dict)
if use_mapping:
r += "{"
for idx, (k, v) in enumerate(input_dict.items()):
is_last = idx >= len(input_dict) - 1
end = "" if outest_level or is_last else ","
if isinstance(v, dict):
v_str = "\n" + _format_dict(v)
if use_mapping:
k_str = f"'{k}'" if isinstance(k, str) else str(k)
attr_str = f"{k_str}: dict({v_str}"
else:
attr_str = f"{str(k)}=dict({v_str}"
attr_str = _indent(attr_str, indent) + ")" + end
elif isinstance(v, list):
attr_str = _format_list(k, v, use_mapping) + end
else:
attr_str = _format_basic_types(k, v, use_mapping) + end
s.append(attr_str)
r += "\n".join(s)
if use_mapping:
r += "}"
return r
cfg_dict = self._cfg_dict.to_dict()
text = _format_dict(cfg_dict, outest_level=True)
# copied from setup.cfg
yapf_style = dict(
based_on_style="pep8",
blank_line_before_nested_class_or_def=True,
split_before_expression_after_opening_paren=True,
)
text, _ = FormatCode(text, style_config=yapf_style, verify=True)
return text
def __repr__(self):
return f"Config (path: {self.filename}): {self._cfg_dict.__repr__()}"
def __len__(self):
return len(self._cfg_dict)
def __getattr__(self, name):
# # debug
# print('+'*15)
# print('name=%s' % name)
# print("addr:", id(self))
# # print('type(self):', type(self))
# print(self.__dict__)
# print('+'*15)
# if self.__dict__ == {}:
# raise ValueError
return getattr(self._cfg_dict, name)
def __getitem__(self, name):
return self._cfg_dict.__getitem__(name)
def __setattr__(self, name, value):
if isinstance(value, dict):
value = ConfigDict(value)
self._cfg_dict.__setattr__(name, value)
def __setitem__(self, name, value):
if isinstance(value, dict):
value = ConfigDict(value)
self._cfg_dict.__setitem__(name, value)
def __iter__(self):
return iter(self._cfg_dict)
def dump(self, file=None):
# import ipdb; ipdb.set_trace()
if file is None:
return self.pretty_text
else:
with open(file, "w") as f:
f.write(self.pretty_text)
def merge_from_dict(self, options):
"""Merge list into cfg_dict
Merge the dict parsed by MultipleKVAction into this cfg.
Examples:
>>> options = {'model.backbone.depth': 50,
... 'model.backbone.with_cp':True}
>>> cfg = Config(dict(model=dict(backbone=dict(type='ResNet'))))
>>> cfg.merge_from_dict(options)
>>> cfg_dict = super(Config, self).__getattribute__('_cfg_dict')
>>> assert cfg_dict == dict(
... model=dict(backbone=dict(depth=50, with_cp=True)))
Args:
options (dict): dict of configs to merge from.
"""
option_cfg_dict = {}
for full_key, v in options.items():
d = option_cfg_dict
key_list = full_key.split(".")
for subkey in key_list[:-1]:
d.setdefault(subkey, ConfigDict())
d = d[subkey]
subkey = key_list[-1]
d[subkey] = v
cfg_dict = super(SLConfig, self).__getattribute__("_cfg_dict")
super(SLConfig, self).__setattr__(
"_cfg_dict", SLConfig._merge_a_into_b(option_cfg_dict, cfg_dict)
)
# for multiprocess
def __setstate__(self, state):
self.__init__(state)
def copy(self):
return SLConfig(self._cfg_dict.copy())
def deepcopy(self):
return SLConfig(self._cfg_dict.deepcopy())
class DictAction(Action):
"""
argparse action to split an argument into KEY=VALUE form
on the first = and append to a dictionary. List options should
be passed as comma separated values, i.e KEY=V1,V2,V3
"""
@staticmethod
def _parse_int_float_bool(val):
try:
return int(val)
except ValueError:
pass
try:
return float(val)
except ValueError:
pass
if val.lower() in ["true", "false"]:
return True if val.lower() == "true" else False
if val.lower() in ["none", "null"]:
return None
return val
def __call__(self, parser, namespace, values, option_string=None):
options = {}
for kv in values:
key, val = kv.split("=", maxsplit=1)
val = [self._parse_int_float_bool(v) for v in val.split(",")]
if len(val) == 1:
val = val[0]
options[key] = val
setattr(namespace, self.dest, options)
| swarms-master | swarms/workers/models/GroundingDINO/groundingdino/util/slconfig.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Utilities for bounding box manipulation and GIoU.
"""
import torch
from torchvision.ops.boxes import box_area
def box_cxcywh_to_xyxy(x):
x_c, y_c, w, h = x.unbind(-1)
b = [(x_c - 0.5 * w), (y_c - 0.5 * h), (x_c + 0.5 * w), (y_c + 0.5 * h)]
return torch.stack(b, dim=-1)
def box_xyxy_to_cxcywh(x):
x0, y0, x1, y1 = x.unbind(-1)
b = [(x0 + x1) / 2, (y0 + y1) / 2, (x1 - x0), (y1 - y0)]
return torch.stack(b, dim=-1)
# modified from torchvision to also return the union
def box_iou(boxes1, boxes2):
area1 = box_area(boxes1)
area2 = box_area(boxes2)
# import ipdb; ipdb.set_trace()
lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2]
rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2]
wh = (rb - lt).clamp(min=0) # [N,M,2]
inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]
union = area1[:, None] + area2 - inter
iou = inter / (union + 1e-6)
return iou, union
def generalized_box_iou(boxes1, boxes2):
"""
Generalized IoU from https://giou.stanford.edu/
The boxes should be in [x0, y0, x1, y1] format
Returns a [N, M] pairwise matrix, where N = len(boxes1)
and M = len(boxes2)
"""
# degenerate boxes gives inf / nan results
# so do an early check
assert (boxes1[:, 2:] >= boxes1[:, :2]).all()
assert (boxes2[:, 2:] >= boxes2[:, :2]).all()
# except:
# import ipdb; ipdb.set_trace()
iou, union = box_iou(boxes1, boxes2)
lt = torch.min(boxes1[:, None, :2], boxes2[:, :2])
rb = torch.max(boxes1[:, None, 2:], boxes2[:, 2:])
wh = (rb - lt).clamp(min=0) # [N,M,2]
area = wh[:, :, 0] * wh[:, :, 1]
return iou - (area - union) / (area + 1e-6)
# modified from torchvision to also return the union
def box_iou_pairwise(boxes1, boxes2):
area1 = box_area(boxes1)
area2 = box_area(boxes2)
lt = torch.max(boxes1[:, :2], boxes2[:, :2]) # [N,2]
rb = torch.min(boxes1[:, 2:], boxes2[:, 2:]) # [N,2]
wh = (rb - lt).clamp(min=0) # [N,2]
inter = wh[:, 0] * wh[:, 1] # [N]
union = area1 + area2 - inter
iou = inter / union
return iou, union
def generalized_box_iou_pairwise(boxes1, boxes2):
"""
Generalized IoU from https://giou.stanford.edu/
Input:
- boxes1, boxes2: N,4
Output:
- giou: N, 4
"""
# degenerate boxes gives inf / nan results
# so do an early check
assert (boxes1[:, 2:] >= boxes1[:, :2]).all()
assert (boxes2[:, 2:] >= boxes2[:, :2]).all()
assert boxes1.shape == boxes2.shape
iou, union = box_iou_pairwise(boxes1, boxes2) # N, 4
lt = torch.min(boxes1[:, :2], boxes2[:, :2])
rb = torch.max(boxes1[:, 2:], boxes2[:, 2:])
wh = (rb - lt).clamp(min=0) # [N,2]
area = wh[:, 0] * wh[:, 1]
return iou - (area - union) / area
def masks_to_boxes(masks):
"""Compute the bounding boxes around the provided masks
The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions.
Returns a [N, 4] tensors, with the boxes in xyxy format
"""
if masks.numel() == 0:
return torch.zeros((0, 4), device=masks.device)
h, w = masks.shape[-2:]
y = torch.arange(0, h, dtype=torch.float)
x = torch.arange(0, w, dtype=torch.float)
y, x = torch.meshgrid(y, x)
x_mask = masks * x.unsqueeze(0)
x_max = x_mask.flatten(1).max(-1)[0]
x_min = x_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
y_mask = masks * y.unsqueeze(0)
y_max = y_mask.flatten(1).max(-1)[0]
y_min = y_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
return torch.stack([x_min, y_min, x_max, y_max], 1)
if __name__ == "__main__":
x = torch.rand(5, 4)
y = torch.rand(3, 4)
iou, union = box_iou(x, y)
import ipdb
ipdb.set_trace()
| swarms-master | swarms/workers/models/GroundingDINO/groundingdino/util/box_ops.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
| swarms-master | swarms/workers/models/GroundingDINO/groundingdino/util/__init__.py |
import os
import random
from typing import List
import torch
def create_positive_map_from_span(tokenized, token_span, max_text_len=256):
"""construct a map such that positive_map[i,j] = True iff box i is associated to token j
Input:
- tokenized:
- input_ids: Tensor[1, ntokens]
- attention_mask: Tensor[1, ntokens]
- token_span: list with length num_boxes.
- each item: [start_idx, end_idx]
"""
positive_map = torch.zeros((len(token_span), max_text_len), dtype=torch.float)
for j, tok_list in enumerate(token_span):
for (beg, end) in tok_list:
beg_pos = tokenized.char_to_token(beg)
end_pos = tokenized.char_to_token(end - 1)
if beg_pos is None:
try:
beg_pos = tokenized.char_to_token(beg + 1)
if beg_pos is None:
beg_pos = tokenized.char_to_token(beg + 2)
except:
beg_pos = None
if end_pos is None:
try:
end_pos = tokenized.char_to_token(end - 2)
if end_pos is None:
end_pos = tokenized.char_to_token(end - 3)
except:
end_pos = None
if beg_pos is None or end_pos is None:
continue
assert beg_pos is not None and end_pos is not None
if os.environ.get("SHILONG_DEBUG_ONLY_ONE_POS", None) == "TRUE":
positive_map[j, beg_pos] = 1
break
else:
positive_map[j, beg_pos : end_pos + 1].fill_(1)
return positive_map / (positive_map.sum(-1)[:, None] + 1e-6)
def build_captions_and_token_span(cat_list, force_lowercase):
"""
Return:
captions: str
cat2tokenspan: dict
{
'dog': [[0, 2]],
...
}
"""
cat2tokenspan = {}
captions = ""
for catname in cat_list:
class_name = catname
if force_lowercase:
class_name = class_name.lower()
if "/" in class_name:
class_name_list: List = class_name.strip().split("/")
class_name_list.append(class_name)
class_name: str = random.choice(class_name_list)
tokens_positive_i = []
subnamelist = [i.strip() for i in class_name.strip().split(" ")]
for subname in subnamelist:
if len(subname) == 0:
continue
if len(captions) > 0:
captions = captions + " "
strat_idx = len(captions)
end_idx = strat_idx + len(subname)
tokens_positive_i.append([strat_idx, end_idx])
captions = captions + subname
if len(tokens_positive_i) > 0:
captions = captions + " ."
cat2tokenspan[class_name] = tokens_positive_i
return captions, cat2tokenspan
def build_id2posspan_and_caption(category_dict: dict):
"""Build id2pos_span and caption from category_dict
Args:
category_dict (dict): category_dict
"""
cat_list = [item["name"].lower() for item in category_dict]
id2catname = {item["id"]: item["name"].lower() for item in category_dict}
caption, cat2posspan = build_captions_and_token_span(cat_list, force_lowercase=True)
id2posspan = {catid: cat2posspan[catname] for catid, catname in id2catname.items()}
return id2posspan, caption
| swarms-master | swarms/workers/models/GroundingDINO/groundingdino/util/vl_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import functools
import logging
import os
import sys
from termcolor import colored
class _ColorfulFormatter(logging.Formatter):
def __init__(self, *args, **kwargs):
self._root_name = kwargs.pop("root_name") + "."
self._abbrev_name = kwargs.pop("abbrev_name", "")
if len(self._abbrev_name):
self._abbrev_name = self._abbrev_name + "."
super(_ColorfulFormatter, self).__init__(*args, **kwargs)
def formatMessage(self, record):
record.name = record.name.replace(self._root_name, self._abbrev_name)
log = super(_ColorfulFormatter, self).formatMessage(record)
if record.levelno == logging.WARNING:
prefix = colored("WARNING", "red", attrs=["blink"])
elif record.levelno == logging.ERROR or record.levelno == logging.CRITICAL:
prefix = colored("ERROR", "red", attrs=["blink", "underline"])
else:
return log
return prefix + " " + log
# so that calling setup_logger multiple times won't add many handlers
@functools.lru_cache()
def setup_logger(output=None, distributed_rank=0, *, color=True, name="imagenet", abbrev_name=None):
"""
Initialize the detectron2 logger and set its verbosity level to "INFO".
Args:
output (str): a file name or a directory to save log. If None, will not save log file.
If ends with ".txt" or ".log", assumed to be a file name.
Otherwise, logs will be saved to `output/log.txt`.
name (str): the root module name of this logger
Returns:
logging.Logger: a logger
"""
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
logger.propagate = False
if abbrev_name is None:
abbrev_name = name
plain_formatter = logging.Formatter(
"[%(asctime)s.%(msecs)03d]: %(message)s", datefmt="%m/%d %H:%M:%S"
)
# stdout logging: master only
if distributed_rank == 0:
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.DEBUG)
if color:
formatter = _ColorfulFormatter(
colored("[%(asctime)s.%(msecs)03d]: ", "green") + "%(message)s",
datefmt="%m/%d %H:%M:%S",
root_name=name,
abbrev_name=str(abbrev_name),
)
else:
formatter = plain_formatter
ch.setFormatter(formatter)
logger.addHandler(ch)
# file logging: all workers
if output is not None:
if output.endswith(".txt") or output.endswith(".log"):
filename = output
else:
filename = os.path.join(output, "log.txt")
if distributed_rank > 0:
filename = filename + f".rank{distributed_rank}"
os.makedirs(os.path.dirname(filename), exist_ok=True)
fh = logging.StreamHandler(_cached_log_stream(filename))
fh.setLevel(logging.DEBUG)
fh.setFormatter(plain_formatter)
logger.addHandler(fh)
return logger
# cache the opened file object, so that different calls to `setup_logger`
# with the same file name can safely write to the same file.
@functools.lru_cache(maxsize=None)
def _cached_log_stream(filename):
return open(filename, "a")
| swarms-master | swarms/workers/models/GroundingDINO/groundingdino/util/logger.py |
import json
import time
class TimeCounter:
def __init__(self) -> None:
pass
def clear(self):
self.timedict = {}
self.basetime = time.perf_counter()
def timeit(self, name):
nowtime = time.perf_counter() - self.basetime
self.timedict[name] = nowtime
self.basetime = time.perf_counter()
class TimeHolder:
def __init__(self) -> None:
self.timedict = {}
def update(self, _timedict: dict):
for k, v in _timedict.items():
if k not in self.timedict:
self.timedict[k] = AverageMeter(name=k, val_only=True)
self.timedict[k].update(val=v)
def final_res(self):
return {k: v.avg for k, v in self.timedict.items()}
def __str__(self):
return json.dumps(self.final_res(), indent=2)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=":f", val_only=False):
self.name = name
self.fmt = fmt
self.val_only = val_only
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
if self.val_only:
fmtstr = "{name} {val" + self.fmt + "}"
else:
fmtstr = "{name} {val" + self.fmt + "} ({avg" + self.fmt + "})"
return fmtstr.format(**self.__dict__)
| swarms-master | swarms/workers/models/GroundingDINO/groundingdino/util/time_counter.py |
import argparse
import json
import warnings
from collections import OrderedDict
from copy import deepcopy
from typing import Any, Dict, List
import numpy as np
import torch
from transformers import AutoTokenizer
from groundingdino.util.slconfig import SLConfig
def slprint(x, name="x"):
if isinstance(x, (torch.Tensor, np.ndarray)):
print(f"{name}.shape:", x.shape)
elif isinstance(x, (tuple, list)):
print("type x:", type(x))
for i in range(min(10, len(x))):
slprint(x[i], f"{name}[{i}]")
elif isinstance(x, dict):
for k, v in x.items():
slprint(v, f"{name}[{k}]")
else:
print(f"{name}.type:", type(x))
def clean_state_dict(state_dict):
new_state_dict = OrderedDict()
for k, v in state_dict.items():
if k[:7] == "module.":
k = k[7:] # remove `module.`
new_state_dict[k] = v
return new_state_dict
def renorm(
img: torch.FloatTensor, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
) -> torch.FloatTensor:
# img: tensor(3,H,W) or tensor(B,3,H,W)
# return: same as img
assert img.dim() == 3 or img.dim() == 4, "img.dim() should be 3 or 4 but %d" % img.dim()
if img.dim() == 3:
assert img.size(0) == 3, 'img.size(0) shoule be 3 but "%d". (%s)' % (
img.size(0),
str(img.size()),
)
img_perm = img.permute(1, 2, 0)
mean = torch.Tensor(mean)
std = torch.Tensor(std)
img_res = img_perm * std + mean
return img_res.permute(2, 0, 1)
else: # img.dim() == 4
assert img.size(1) == 3, 'img.size(1) shoule be 3 but "%d". (%s)' % (
img.size(1),
str(img.size()),
)
img_perm = img.permute(0, 2, 3, 1)
mean = torch.Tensor(mean)
std = torch.Tensor(std)
img_res = img_perm * std + mean
return img_res.permute(0, 3, 1, 2)
class CocoClassMapper:
def __init__(self) -> None:
self.category_map_str = {
"1": 1,
"2": 2,
"3": 3,
"4": 4,
"5": 5,
"6": 6,
"7": 7,
"8": 8,
"9": 9,
"10": 10,
"11": 11,
"13": 12,
"14": 13,
"15": 14,
"16": 15,
"17": 16,
"18": 17,
"19": 18,
"20": 19,
"21": 20,
"22": 21,
"23": 22,
"24": 23,
"25": 24,
"27": 25,
"28": 26,
"31": 27,
"32": 28,
"33": 29,
"34": 30,
"35": 31,
"36": 32,
"37": 33,
"38": 34,
"39": 35,
"40": 36,
"41": 37,
"42": 38,
"43": 39,
"44": 40,
"46": 41,
"47": 42,
"48": 43,
"49": 44,
"50": 45,
"51": 46,
"52": 47,
"53": 48,
"54": 49,
"55": 50,
"56": 51,
"57": 52,
"58": 53,
"59": 54,
"60": 55,
"61": 56,
"62": 57,
"63": 58,
"64": 59,
"65": 60,
"67": 61,
"70": 62,
"72": 63,
"73": 64,
"74": 65,
"75": 66,
"76": 67,
"77": 68,
"78": 69,
"79": 70,
"80": 71,
"81": 72,
"82": 73,
"84": 74,
"85": 75,
"86": 76,
"87": 77,
"88": 78,
"89": 79,
"90": 80,
}
self.origin2compact_mapper = {int(k): v - 1 for k, v in self.category_map_str.items()}
self.compact2origin_mapper = {int(v - 1): int(k) for k, v in self.category_map_str.items()}
def origin2compact(self, idx):
return self.origin2compact_mapper[int(idx)]
def compact2origin(self, idx):
return self.compact2origin_mapper[int(idx)]
def to_device(item, device):
if isinstance(item, torch.Tensor):
return item.to(device)
elif isinstance(item, list):
return [to_device(i, device) for i in item]
elif isinstance(item, dict):
return {k: to_device(v, device) for k, v in item.items()}
else:
raise NotImplementedError(
"Call Shilong if you use other containers! type: {}".format(type(item))
)
#
def get_gaussian_mean(x, axis, other_axis, softmax=True):
"""
Args:
x (float): Input images(BxCxHxW)
axis (int): The index for weighted mean
other_axis (int): The other index
Returns: weighted index for axis, BxC
"""
mat2line = torch.sum(x, axis=other_axis)
# mat2line = mat2line / mat2line.mean() * 10
if softmax:
u = torch.softmax(mat2line, axis=2)
else:
u = mat2line / (mat2line.sum(2, keepdim=True) + 1e-6)
size = x.shape[axis]
ind = torch.linspace(0, 1, size).to(x.device)
batch = x.shape[0]
channel = x.shape[1]
index = ind.repeat([batch, channel, 1])
mean_position = torch.sum(index * u, dim=2)
return mean_position
def get_expected_points_from_map(hm, softmax=True):
"""get_gaussian_map_from_points
B,C,H,W -> B,N,2 float(0, 1) float(0, 1)
softargmax function
Args:
hm (float): Input images(BxCxHxW)
Returns:
weighted index for axis, BxCx2. float between 0 and 1.
"""
# hm = 10*hm
B, C, H, W = hm.shape
y_mean = get_gaussian_mean(hm, 2, 3, softmax=softmax) # B,C
x_mean = get_gaussian_mean(hm, 3, 2, softmax=softmax) # B,C
# return torch.cat((x_mean.unsqueeze(-1), y_mean.unsqueeze(-1)), 2)
return torch.stack([x_mean, y_mean], dim=2)
# Positional encoding (section 5.1)
# borrow from nerf
class Embedder:
def __init__(self, **kwargs):
self.kwargs = kwargs
self.create_embedding_fn()
def create_embedding_fn(self):
embed_fns = []
d = self.kwargs["input_dims"]
out_dim = 0
if self.kwargs["include_input"]:
embed_fns.append(lambda x: x)
out_dim += d
max_freq = self.kwargs["max_freq_log2"]
N_freqs = self.kwargs["num_freqs"]
if self.kwargs["log_sampling"]:
freq_bands = 2.0 ** torch.linspace(0.0, max_freq, steps=N_freqs)
else:
freq_bands = torch.linspace(2.0**0.0, 2.0**max_freq, steps=N_freqs)
for freq in freq_bands:
for p_fn in self.kwargs["periodic_fns"]:
embed_fns.append(lambda x, p_fn=p_fn, freq=freq: p_fn(x * freq))
out_dim += d
self.embed_fns = embed_fns
self.out_dim = out_dim
def embed(self, inputs):
return torch.cat([fn(inputs) for fn in self.embed_fns], -1)
def get_embedder(multires, i=0):
import torch.nn as nn
if i == -1:
return nn.Identity(), 3
embed_kwargs = {
"include_input": True,
"input_dims": 3,
"max_freq_log2": multires - 1,
"num_freqs": multires,
"log_sampling": True,
"periodic_fns": [torch.sin, torch.cos],
}
embedder_obj = Embedder(**embed_kwargs)
def embed(x, eo=embedder_obj):
return eo.embed(x)
return embed, embedder_obj.out_dim
class APOPMeter:
def __init__(self) -> None:
self.tp = 0
self.fp = 0
self.tn = 0
self.fn = 0
def update(self, pred, gt):
"""
Input:
pred, gt: Tensor()
"""
assert pred.shape == gt.shape
self.tp += torch.logical_and(pred == 1, gt == 1).sum().item()
self.fp += torch.logical_and(pred == 1, gt == 0).sum().item()
self.tn += torch.logical_and(pred == 0, gt == 0).sum().item()
self.tn += torch.logical_and(pred == 1, gt == 0).sum().item()
def update_cm(self, tp, fp, tn, fn):
self.tp += tp
self.fp += fp
self.tn += tn
self.tn += fn
def inverse_sigmoid(x, eps=1e-5):
x = x.clamp(min=0, max=1)
x1 = x.clamp(min=eps)
x2 = (1 - x).clamp(min=eps)
return torch.log(x1 / x2)
def get_raw_dict(args):
"""
return the dicf contained in args.
e.g:
>>> with open(path, 'w') as f:
json.dump(get_raw_dict(args), f, indent=2)
"""
if isinstance(args, argparse.Namespace):
return vars(args)
elif isinstance(args, dict):
return args
elif isinstance(args, SLConfig):
return args._cfg_dict
else:
raise NotImplementedError("Unknown type {}".format(type(args)))
def stat_tensors(tensor):
assert tensor.dim() == 1
tensor_sm = tensor.softmax(0)
entropy = (tensor_sm * torch.log(tensor_sm + 1e-9)).sum()
return {
"max": tensor.max(),
"min": tensor.min(),
"mean": tensor.mean(),
"var": tensor.var(),
"std": tensor.var() ** 0.5,
"entropy": entropy,
}
class NiceRepr:
"""Inherit from this class and define ``__nice__`` to "nicely" print your
objects.
Defines ``__str__`` and ``__repr__`` in terms of ``__nice__`` function
Classes that inherit from :class:`NiceRepr` should redefine ``__nice__``.
If the inheriting class has a ``__len__``, method then the default
``__nice__`` method will return its length.
Example:
>>> class Foo(NiceRepr):
... def __nice__(self):
... return 'info'
>>> foo = Foo()
>>> assert str(foo) == '<Foo(info)>'
>>> assert repr(foo).startswith('<Foo(info) at ')
Example:
>>> class Bar(NiceRepr):
... pass
>>> bar = Bar()
>>> import pytest
>>> with pytest.warns(None) as record:
>>> assert 'object at' in str(bar)
>>> assert 'object at' in repr(bar)
Example:
>>> class Baz(NiceRepr):
... def __len__(self):
... return 5
>>> baz = Baz()
>>> assert str(baz) == '<Baz(5)>'
"""
def __nice__(self):
"""str: a "nice" summary string describing this module"""
if hasattr(self, "__len__"):
# It is a common pattern for objects to use __len__ in __nice__
# As a convenience we define a default __nice__ for these objects
return str(len(self))
else:
# In all other cases force the subclass to overload __nice__
raise NotImplementedError(f"Define the __nice__ method for {self.__class__!r}")
def __repr__(self):
"""str: the string of the module"""
try:
nice = self.__nice__()
classname = self.__class__.__name__
return f"<{classname}({nice}) at {hex(id(self))}>"
except NotImplementedError as ex:
warnings.warn(str(ex), category=RuntimeWarning)
return object.__repr__(self)
def __str__(self):
"""str: the string of the module"""
try:
classname = self.__class__.__name__
nice = self.__nice__()
return f"<{classname}({nice})>"
except NotImplementedError as ex:
warnings.warn(str(ex), category=RuntimeWarning)
return object.__repr__(self)
def ensure_rng(rng=None):
"""Coerces input into a random number generator.
If the input is None, then a global random state is returned.
If the input is a numeric value, then that is used as a seed to construct a
random state. Otherwise the input is returned as-is.
Adapted from [1]_.
Args:
rng (int | numpy.random.RandomState | None):
if None, then defaults to the global rng. Otherwise this can be an
integer or a RandomState class
Returns:
(numpy.random.RandomState) : rng -
a numpy random number generator
References:
.. [1] https://gitlab.kitware.com/computer-vision/kwarray/blob/master/kwarray/util_random.py#L270 # noqa: E501
"""
if rng is None:
rng = np.random.mtrand._rand
elif isinstance(rng, int):
rng = np.random.RandomState(rng)
else:
rng = rng
return rng
def random_boxes(num=1, scale=1, rng=None):
"""Simple version of ``kwimage.Boxes.random``
Returns:
Tensor: shape (n, 4) in x1, y1, x2, y2 format.
References:
https://gitlab.kitware.com/computer-vision/kwimage/blob/master/kwimage/structs/boxes.py#L1390
Example:
>>> num = 3
>>> scale = 512
>>> rng = 0
>>> boxes = random_boxes(num, scale, rng)
>>> print(boxes)
tensor([[280.9925, 278.9802, 308.6148, 366.1769],
[216.9113, 330.6978, 224.0446, 456.5878],
[405.3632, 196.3221, 493.3953, 270.7942]])
"""
rng = ensure_rng(rng)
tlbr = rng.rand(num, 4).astype(np.float32)
tl_x = np.minimum(tlbr[:, 0], tlbr[:, 2])
tl_y = np.minimum(tlbr[:, 1], tlbr[:, 3])
br_x = np.maximum(tlbr[:, 0], tlbr[:, 2])
br_y = np.maximum(tlbr[:, 1], tlbr[:, 3])
tlbr[:, 0] = tl_x * scale
tlbr[:, 1] = tl_y * scale
tlbr[:, 2] = br_x * scale
tlbr[:, 3] = br_y * scale
boxes = torch.from_numpy(tlbr)
return boxes
class ModelEma(torch.nn.Module):
def __init__(self, model, decay=0.9997, device=None):
super(ModelEma, self).__init__()
# make a copy of the model for accumulating moving average of weights
self.module = deepcopy(model)
self.module.eval()
# import ipdb; ipdb.set_trace()
self.decay = decay
self.device = device # perform ema on different device from model if set
if self.device is not None:
self.module.to(device=device)
def _update(self, model, update_fn):
with torch.no_grad():
for ema_v, model_v in zip(
self.module.state_dict().values(), model.state_dict().values()
):
if self.device is not None:
model_v = model_v.to(device=self.device)
ema_v.copy_(update_fn(ema_v, model_v))
def update(self, model):
self._update(model, update_fn=lambda e, m: self.decay * e + (1.0 - self.decay) * m)
def set(self, model):
self._update(model, update_fn=lambda e, m: m)
class BestMetricSingle:
def __init__(self, init_res=0.0, better="large") -> None:
self.init_res = init_res
self.best_res = init_res
self.best_ep = -1
self.better = better
assert better in ["large", "small"]
def isbetter(self, new_res, old_res):
if self.better == "large":
return new_res > old_res
if self.better == "small":
return new_res < old_res
def update(self, new_res, ep):
if self.isbetter(new_res, self.best_res):
self.best_res = new_res
self.best_ep = ep
return True
return False
def __str__(self) -> str:
return "best_res: {}\t best_ep: {}".format(self.best_res, self.best_ep)
def __repr__(self) -> str:
return self.__str__()
def summary(self) -> dict:
return {
"best_res": self.best_res,
"best_ep": self.best_ep,
}
class BestMetricHolder:
def __init__(self, init_res=0.0, better="large", use_ema=False) -> None:
self.best_all = BestMetricSingle(init_res, better)
self.use_ema = use_ema
if use_ema:
self.best_ema = BestMetricSingle(init_res, better)
self.best_regular = BestMetricSingle(init_res, better)
def update(self, new_res, epoch, is_ema=False):
"""
return if the results is the best.
"""
if not self.use_ema:
return self.best_all.update(new_res, epoch)
else:
if is_ema:
self.best_ema.update(new_res, epoch)
return self.best_all.update(new_res, epoch)
else:
self.best_regular.update(new_res, epoch)
return self.best_all.update(new_res, epoch)
def summary(self):
if not self.use_ema:
return self.best_all.summary()
res = {}
res.update({f"all_{k}": v for k, v in self.best_all.summary().items()})
res.update({f"regular_{k}": v for k, v in self.best_regular.summary().items()})
res.update({f"ema_{k}": v for k, v in self.best_ema.summary().items()})
return res
def __repr__(self) -> str:
return json.dumps(self.summary(), indent=2)
def __str__(self) -> str:
return self.__repr__()
def targets_to(targets: List[Dict[str, Any]], device):
"""Moves the target dicts to the given device."""
excluded_keys = [
"questionId",
"tokens_positive",
"strings_positive",
"tokens",
"dataset_name",
"sentence_id",
"original_img_id",
"nb_eval",
"task_id",
"original_id",
"token_span",
"caption",
"dataset_type",
]
return [
{k: v.to(device) if k not in excluded_keys else v for k, v in t.items()} for t in targets
]
def get_phrases_from_posmap(
posmap: torch.BoolTensor, tokenized: Dict, tokenizer: AutoTokenizer, left_idx: int = 0, right_idx: int = 255
):
assert isinstance(posmap, torch.Tensor), "posmap must be torch.Tensor"
if posmap.dim() == 1:
posmap[0: left_idx + 1] = False
posmap[right_idx:] = False
non_zero_idx = posmap.nonzero(as_tuple=True)[0].tolist()
token_ids = [tokenized["input_ids"][i] for i in non_zero_idx]
return tokenizer.decode(token_ids)
else:
raise NotImplementedError("posmap must be 1-dim")
| swarms-master | swarms/workers/models/GroundingDINO/groundingdino/util/utils.py |
# ==========================================================
# Modified from mmcv
# ==========================================================
import json
import pickle
from abc import ABCMeta, abstractmethod
from pathlib import Path
import yaml
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
# ===========================
# Rigister handler
# ===========================
class BaseFileHandler(metaclass=ABCMeta):
@abstractmethod
def load_from_fileobj(self, file, **kwargs):
pass
@abstractmethod
def dump_to_fileobj(self, obj, file, **kwargs):
pass
@abstractmethod
def dump_to_str(self, obj, **kwargs):
pass
def load_from_path(self, filepath, mode="r", **kwargs):
with open(filepath, mode) as f:
return self.load_from_fileobj(f, **kwargs)
def dump_to_path(self, obj, filepath, mode="w", **kwargs):
with open(filepath, mode) as f:
self.dump_to_fileobj(obj, f, **kwargs)
class JsonHandler(BaseFileHandler):
def load_from_fileobj(self, file):
return json.load(file)
def dump_to_fileobj(self, obj, file, **kwargs):
json.dump(obj, file, **kwargs)
def dump_to_str(self, obj, **kwargs):
return json.dumps(obj, **kwargs)
class PickleHandler(BaseFileHandler):
def load_from_fileobj(self, file, **kwargs):
return pickle.load(file, **kwargs)
def load_from_path(self, filepath, **kwargs):
return super(PickleHandler, self).load_from_path(filepath, mode="rb", **kwargs)
def dump_to_str(self, obj, **kwargs):
kwargs.setdefault("protocol", 2)
return pickle.dumps(obj, **kwargs)
def dump_to_fileobj(self, obj, file, **kwargs):
kwargs.setdefault("protocol", 2)
pickle.dump(obj, file, **kwargs)
def dump_to_path(self, obj, filepath, **kwargs):
super(PickleHandler, self).dump_to_path(obj, filepath, mode="wb", **kwargs)
class YamlHandler(BaseFileHandler):
def load_from_fileobj(self, file, **kwargs):
kwargs.setdefault("Loader", Loader)
return yaml.load(file, **kwargs)
def dump_to_fileobj(self, obj, file, **kwargs):
kwargs.setdefault("Dumper", Dumper)
yaml.dump(obj, file, **kwargs)
def dump_to_str(self, obj, **kwargs):
kwargs.setdefault("Dumper", Dumper)
return yaml.dump(obj, **kwargs)
file_handlers = {
"json": JsonHandler(),
"yaml": YamlHandler(),
"yml": YamlHandler(),
"pickle": PickleHandler(),
"pkl": PickleHandler(),
}
# ===========================
# load and dump
# ===========================
def is_str(x):
"""Whether the input is an string instance.
Note: This method is deprecated since python 2 is no longer supported.
"""
return isinstance(x, str)
def slload(file, file_format=None, **kwargs):
"""Load data from json/yaml/pickle files.
This method provides a unified api for loading data from serialized files.
Args:
file (str or :obj:`Path` or file-like object): Filename or a file-like
object.
file_format (str, optional): If not specified, the file format will be
inferred from the file extension, otherwise use the specified one.
Currently supported formats include "json", "yaml/yml" and
"pickle/pkl".
Returns:
The content from the file.
"""
if isinstance(file, Path):
file = str(file)
if file_format is None and is_str(file):
file_format = file.split(".")[-1]
if file_format not in file_handlers:
raise TypeError(f"Unsupported format: {file_format}")
handler = file_handlers[file_format]
if is_str(file):
obj = handler.load_from_path(file, **kwargs)
elif hasattr(file, "read"):
obj = handler.load_from_fileobj(file, **kwargs)
else:
raise TypeError('"file" must be a filepath str or a file-object')
return obj
def sldump(obj, file=None, file_format=None, **kwargs):
"""Dump data to json/yaml/pickle strings or files.
This method provides a unified api for dumping data as strings or to files,
and also supports custom arguments for each file format.
Args:
obj (any): The python object to be dumped.
file (str or :obj:`Path` or file-like object, optional): If not
specified, then the object is dump to a str, otherwise to a file
specified by the filename or file-like object.
file_format (str, optional): Same as :func:`load`.
Returns:
bool: True for success, False otherwise.
"""
if isinstance(file, Path):
file = str(file)
if file_format is None:
if is_str(file):
file_format = file.split(".")[-1]
elif file is None:
raise ValueError("file_format must be specified since file is None")
if file_format not in file_handlers:
raise TypeError(f"Unsupported format: {file_format}")
handler = file_handlers[file_format]
if file is None:
return handler.dump_to_str(obj, **kwargs)
elif is_str(file):
handler.dump_to_path(obj, file, **kwargs)
elif hasattr(file, "write"):
handler.dump_to_fileobj(obj, file, **kwargs)
else:
raise TypeError('"file" must be a filename str or a file-object')
| swarms-master | swarms/workers/models/GroundingDINO/groundingdino/util/slio.py |
from typing import Tuple, List
import cv2
import numpy as np
import supervision as sv
import torch
from PIL import Image
from torchvision.ops import box_convert
import bisect
import groundingdino.datasets.transforms as T
from groundingdino.models import build_model
from groundingdino.util.misc import clean_state_dict
from groundingdino.util.slconfig import SLConfig
from groundingdino.util.utils import get_phrases_from_posmap
# ----------------------------------------------------------------------------------------------------------------------
# OLD API
# ----------------------------------------------------------------------------------------------------------------------
def preprocess_caption(caption: str) -> str:
result = caption.lower().strip()
if result.endswith("."):
return result
return result + "."
def load_model(model_config_path: str, model_checkpoint_path: str, device: str = "cuda"):
args = SLConfig.fromfile(model_config_path)
args.device = device
model = build_model(args)
checkpoint = torch.load(model_checkpoint_path, map_location="cpu")
model.load_state_dict(clean_state_dict(checkpoint["model"]), strict=False)
model.eval()
return model
def load_image(image_path: str) -> Tuple[np.array, torch.Tensor]:
transform = T.Compose(
[
T.RandomResize([800], max_size=1333),
T.ToTensor(),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
)
image_source = Image.open(image_path).convert("RGB")
image = np.asarray(image_source)
image_transformed, _ = transform(image_source, None)
return image, image_transformed
def predict(
model,
image: torch.Tensor,
caption: str,
box_threshold: float,
text_threshold: float,
device: str = "cuda",
remove_combined: bool = False
) -> Tuple[torch.Tensor, torch.Tensor, List[str]]:
caption = preprocess_caption(caption=caption)
model = model.to(device)
image = image.to(device)
with torch.no_grad():
outputs = model(image[None], captions=[caption])
prediction_logits = outputs["pred_logits"].cpu().sigmoid()[0] # prediction_logits.shape = (nq, 256)
prediction_boxes = outputs["pred_boxes"].cpu()[0] # prediction_boxes.shape = (nq, 4)
mask = prediction_logits.max(dim=1)[0] > box_threshold
logits = prediction_logits[mask] # logits.shape = (n, 256)
boxes = prediction_boxes[mask] # boxes.shape = (n, 4)
tokenizer = model.tokenizer
tokenized = tokenizer(caption)
if remove_combined:
sep_idx = [i for i in range(len(tokenized['input_ids'])) if tokenized['input_ids'][i] in [101, 102, 1012]]
phrases = []
for logit in logits:
max_idx = logit.argmax()
insert_idx = bisect.bisect_left(sep_idx, max_idx)
right_idx = sep_idx[insert_idx]
left_idx = sep_idx[insert_idx - 1]
phrases.append(get_phrases_from_posmap(logit > text_threshold, tokenized, tokenizer, left_idx, right_idx).replace('.', ''))
else:
phrases = [
get_phrases_from_posmap(logit > text_threshold, tokenized, tokenizer).replace('.', '')
for logit
in logits
]
return boxes, logits.max(dim=1)[0], phrases
def annotate(image_source: np.ndarray, boxes: torch.Tensor, logits: torch.Tensor, phrases: List[str]) -> np.ndarray:
h, w, _ = image_source.shape
boxes = boxes * torch.Tensor([w, h, w, h])
xyxy = box_convert(boxes=boxes, in_fmt="cxcywh", out_fmt="xyxy").numpy()
detections = sv.Detections(xyxy=xyxy)
labels = [
f"{phrase} {logit:.2f}"
for phrase, logit
in zip(phrases, logits)
]
box_annotator = sv.BoxAnnotator()
annotated_frame = cv2.cvtColor(image_source, cv2.COLOR_RGB2BGR)
annotated_frame = box_annotator.annotate(scene=annotated_frame, detections=detections, labels=labels)
return annotated_frame
# ----------------------------------------------------------------------------------------------------------------------
# NEW API
# ----------------------------------------------------------------------------------------------------------------------
class Model:
def __init__(
self,
model_config_path: str,
model_checkpoint_path: str,
device: str = "cuda"
):
self.model = load_model(
model_config_path=model_config_path,
model_checkpoint_path=model_checkpoint_path,
device=device
).to(device)
self.device = device
def predict_with_caption(
self,
image: np.ndarray,
caption: str,
box_threshold: float = 0.35,
text_threshold: float = 0.25
) -> Tuple[sv.Detections, List[str]]:
"""
import cv2
image = cv2.imread(IMAGE_PATH)
model = Model(model_config_path=CONFIG_PATH, model_checkpoint_path=WEIGHTS_PATH)
detections, labels = model.predict_with_caption(
image=image,
caption=caption,
box_threshold=BOX_THRESHOLD,
text_threshold=TEXT_THRESHOLD
)
import supervision as sv
box_annotator = sv.BoxAnnotator()
annotated_image = box_annotator.annotate(scene=image, detections=detections, labels=labels)
"""
processed_image = Model.preprocess_image(image_bgr=image).to(self.device)
boxes, logits, phrases = predict(
model=self.model,
image=processed_image,
caption=caption,
box_threshold=box_threshold,
text_threshold=text_threshold,
device=self.device)
source_h, source_w, _ = image.shape
detections = Model.post_process_result(
source_h=source_h,
source_w=source_w,
boxes=boxes,
logits=logits)
return detections, phrases
def predict_with_classes(
self,
image: np.ndarray,
classes: List[str],
box_threshold: float,
text_threshold: float
) -> sv.Detections:
"""
import cv2
image = cv2.imread(IMAGE_PATH)
model = Model(model_config_path=CONFIG_PATH, model_checkpoint_path=WEIGHTS_PATH)
detections = model.predict_with_classes(
image=image,
classes=CLASSES,
box_threshold=BOX_THRESHOLD,
text_threshold=TEXT_THRESHOLD
)
import supervision as sv
box_annotator = sv.BoxAnnotator()
annotated_image = box_annotator.annotate(scene=image, detections=detections)
"""
caption = ". ".join(classes)
processed_image = Model.preprocess_image(image_bgr=image).to(self.device)
boxes, logits, phrases = predict(
model=self.model,
image=processed_image,
caption=caption,
box_threshold=box_threshold,
text_threshold=text_threshold,
device=self.device)
source_h, source_w, _ = image.shape
detections = Model.post_process_result(
source_h=source_h,
source_w=source_w,
boxes=boxes,
logits=logits)
class_id = Model.phrases2classes(phrases=phrases, classes=classes)
detections.class_id = class_id
return detections
@staticmethod
def preprocess_image(image_bgr: np.ndarray) -> torch.Tensor:
transform = T.Compose(
[
T.RandomResize([800], max_size=1333),
T.ToTensor(),
T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]
)
image_pillow = Image.fromarray(cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB))
image_transformed, _ = transform(image_pillow, None)
return image_transformed
@staticmethod
def post_process_result(
source_h: int,
source_w: int,
boxes: torch.Tensor,
logits: torch.Tensor
) -> sv.Detections:
boxes = boxes * torch.Tensor([source_w, source_h, source_w, source_h])
xyxy = box_convert(boxes=boxes, in_fmt="cxcywh", out_fmt="xyxy").numpy()
confidence = logits.numpy()
return sv.Detections(xyxy=xyxy, confidence=confidence)
@staticmethod
def phrases2classes(phrases: List[str], classes: List[str]) -> np.ndarray:
class_ids = []
for phrase in phrases:
for class_ in classes:
if class_ in phrase:
class_ids.append(classes.index(class_))
break
else:
class_ids.append(None)
return np.array(class_ids)
| swarms-master | swarms/workers/models/GroundingDINO/groundingdino/util/inference.py |
from transformers import AutoTokenizer, BertModel, RobertaModel
import os
def get_tokenlizer(text_encoder_type):
if not isinstance(text_encoder_type, str):
# print("text_encoder_type is not a str")
if hasattr(text_encoder_type, "text_encoder_type"):
text_encoder_type = text_encoder_type.text_encoder_type
elif text_encoder_type.get("text_encoder_type", False):
text_encoder_type = text_encoder_type.get("text_encoder_type")
elif os.path.isdir(text_encoder_type) and os.path.exists(text_encoder_type):
pass
else:
raise ValueError(
"Unknown type of text_encoder_type: {}".format(type(text_encoder_type))
)
print("final text_encoder_type: {}".format(text_encoder_type))
tokenizer = AutoTokenizer.from_pretrained(text_encoder_type)
return tokenizer
def get_pretrained_language_model(text_encoder_type):
if text_encoder_type == "bert-base-uncased" or (os.path.isdir(text_encoder_type) and os.path.exists(text_encoder_type)):
return BertModel.from_pretrained(text_encoder_type)
if text_encoder_type == "roberta-base":
return RobertaModel.from_pretrained(text_encoder_type)
raise ValueError("Unknown text_encoder_type {}".format(text_encoder_type))
| swarms-master | swarms/workers/models/GroundingDINO/groundingdino/util/get_tokenlizer.py |
# -*- coding: utf-8 -*-
"""
@File : visualizer.py
@Time : 2022/04/05 11:39:33
@Author : Shilong Liu
@Contact : [email protected]
"""
import datetime
import os
import matplotlib.pyplot as plt
import numpy as np
import torch
from matplotlib import transforms
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
from pycocotools import mask as maskUtils
def renorm(
img: torch.FloatTensor, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
) -> torch.FloatTensor:
# img: tensor(3,H,W) or tensor(B,3,H,W)
# return: same as img
assert img.dim() == 3 or img.dim() == 4, "img.dim() should be 3 or 4 but %d" % img.dim()
if img.dim() == 3:
assert img.size(0) == 3, 'img.size(0) shoule be 3 but "%d". (%s)' % (
img.size(0),
str(img.size()),
)
img_perm = img.permute(1, 2, 0)
mean = torch.Tensor(mean)
std = torch.Tensor(std)
img_res = img_perm * std + mean
return img_res.permute(2, 0, 1)
else: # img.dim() == 4
assert img.size(1) == 3, 'img.size(1) shoule be 3 but "%d". (%s)' % (
img.size(1),
str(img.size()),
)
img_perm = img.permute(0, 2, 3, 1)
mean = torch.Tensor(mean)
std = torch.Tensor(std)
img_res = img_perm * std + mean
return img_res.permute(0, 3, 1, 2)
class ColorMap:
def __init__(self, basergb=[255, 255, 0]):
self.basergb = np.array(basergb)
def __call__(self, attnmap):
# attnmap: h, w. np.uint8.
# return: h, w, 4. np.uint8.
assert attnmap.dtype == np.uint8
h, w = attnmap.shape
res = self.basergb.copy()
res = res[None][None].repeat(h, 0).repeat(w, 1) # h, w, 3
attn1 = attnmap.copy()[..., None] # h, w, 1
res = np.concatenate((res, attn1), axis=-1).astype(np.uint8)
return res
def rainbow_text(x, y, ls, lc, **kw):
"""
Take a list of strings ``ls`` and colors ``lc`` and place them next to each
other, with text ls[i] being shown in color lc[i].
This example shows how to do both vertical and horizontal text, and will
pass all keyword arguments to plt.text, so you can set the font size,
family, etc.
"""
t = plt.gca().transData
fig = plt.gcf()
plt.show()
# horizontal version
for s, c in zip(ls, lc):
text = plt.text(x, y, " " + s + " ", color=c, transform=t, **kw)
text.draw(fig.canvas.get_renderer())
ex = text.get_window_extent()
t = transforms.offset_copy(text._transform, x=ex.width, units="dots")
# #vertical version
# for s,c in zip(ls,lc):
# text = plt.text(x,y," "+s+" ",color=c, transform=t,
# rotation=90,va='bottom',ha='center',**kw)
# text.draw(fig.canvas.get_renderer())
# ex = text.get_window_extent()
# t = transforms.offset_copy(text._transform, y=ex.height, units='dots')
class COCOVisualizer:
def __init__(self, coco=None, tokenlizer=None) -> None:
self.coco = coco
def visualize(self, img, tgt, caption=None, dpi=180, savedir="vis"):
"""
img: tensor(3, H, W)
tgt: make sure they are all on cpu.
must have items: 'image_id', 'boxes', 'size'
"""
plt.figure(dpi=dpi)
plt.rcParams["font.size"] = "5"
ax = plt.gca()
img = renorm(img).permute(1, 2, 0)
# if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO':
# import ipdb; ipdb.set_trace()
ax.imshow(img)
self.addtgt(tgt)
if tgt is None:
image_id = 0
elif "image_id" not in tgt:
image_id = 0
else:
image_id = tgt["image_id"]
if caption is None:
savename = "{}/{}-{}.png".format(
savedir, int(image_id), str(datetime.datetime.now()).replace(" ", "-")
)
else:
savename = "{}/{}-{}-{}.png".format(
savedir, caption, int(image_id), str(datetime.datetime.now()).replace(" ", "-")
)
print("savename: {}".format(savename))
os.makedirs(os.path.dirname(savename), exist_ok=True)
plt.savefig(savename)
plt.close()
def addtgt(self, tgt):
""" """
if tgt is None or "boxes" not in tgt:
ax = plt.gca()
if "caption" in tgt:
ax.set_title(tgt["caption"], wrap=True)
ax.set_axis_off()
return
ax = plt.gca()
H, W = tgt["size"]
numbox = tgt["boxes"].shape[0]
color = []
polygons = []
boxes = []
for box in tgt["boxes"].cpu():
unnormbbox = box * torch.Tensor([W, H, W, H])
unnormbbox[:2] -= unnormbbox[2:] / 2
[bbox_x, bbox_y, bbox_w, bbox_h] = unnormbbox.tolist()
boxes.append([bbox_x, bbox_y, bbox_w, bbox_h])
poly = [
[bbox_x, bbox_y],
[bbox_x, bbox_y + bbox_h],
[bbox_x + bbox_w, bbox_y + bbox_h],
[bbox_x + bbox_w, bbox_y],
]
np_poly = np.array(poly).reshape((4, 2))
polygons.append(Polygon(np_poly))
c = (np.random.random((1, 3)) * 0.6 + 0.4).tolist()[0]
color.append(c)
p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.1)
ax.add_collection(p)
p = PatchCollection(polygons, facecolor="none", edgecolors=color, linewidths=2)
ax.add_collection(p)
if "strings_positive" in tgt and len(tgt["strings_positive"]) > 0:
assert (
len(tgt["strings_positive"]) == numbox
), f"{len(tgt['strings_positive'])} = {numbox}, "
for idx, strlist in enumerate(tgt["strings_positive"]):
cate_id = int(tgt["labels"][idx])
_string = str(cate_id) + ":" + " ".join(strlist)
bbox_x, bbox_y, bbox_w, bbox_h = boxes[idx]
# ax.text(bbox_x, bbox_y, _string, color='black', bbox={'facecolor': 'yellow', 'alpha': 1.0, 'pad': 1})
ax.text(
bbox_x,
bbox_y,
_string,
color="black",
bbox={"facecolor": color[idx], "alpha": 0.6, "pad": 1},
)
if "box_label" in tgt:
assert len(tgt["box_label"]) == numbox, f"{len(tgt['box_label'])} = {numbox}, "
for idx, bl in enumerate(tgt["box_label"]):
_string = str(bl)
bbox_x, bbox_y, bbox_w, bbox_h = boxes[idx]
# ax.text(bbox_x, bbox_y, _string, color='black', bbox={'facecolor': 'yellow', 'alpha': 1.0, 'pad': 1})
ax.text(
bbox_x,
bbox_y,
_string,
color="black",
bbox={"facecolor": color[idx], "alpha": 0.6, "pad": 1},
)
if "caption" in tgt:
ax.set_title(tgt["caption"], wrap=True)
# plt.figure()
# rainbow_text(0.0,0.0,"all unicorns poop rainbows ! ! !".split(),
# ['red', 'orange', 'brown', 'green', 'blue', 'purple', 'black'])
if "attn" in tgt:
# if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO':
# import ipdb; ipdb.set_trace()
if isinstance(tgt["attn"], tuple):
tgt["attn"] = [tgt["attn"]]
for item in tgt["attn"]:
attn_map, basergb = item
attn_map = (attn_map - attn_map.min()) / (attn_map.max() - attn_map.min() + 1e-3)
attn_map = (attn_map * 255).astype(np.uint8)
cm = ColorMap(basergb)
heatmap = cm(attn_map)
ax.imshow(heatmap)
ax.set_axis_off()
def showAnns(self, anns, draw_bbox=False):
"""
Display the specified annotations.
:param anns (array of object): annotations to display
:return: None
"""
if len(anns) == 0:
return 0
if "segmentation" in anns[0] or "keypoints" in anns[0]:
datasetType = "instances"
elif "caption" in anns[0]:
datasetType = "captions"
else:
raise Exception("datasetType not supported")
if datasetType == "instances":
ax = plt.gca()
ax.set_autoscale_on(False)
polygons = []
color = []
for ann in anns:
c = (np.random.random((1, 3)) * 0.6 + 0.4).tolist()[0]
if "segmentation" in ann:
if type(ann["segmentation"]) == list:
# polygon
for seg in ann["segmentation"]:
poly = np.array(seg).reshape((int(len(seg) / 2), 2))
polygons.append(Polygon(poly))
color.append(c)
else:
# mask
t = self.imgs[ann["image_id"]]
if type(ann["segmentation"]["counts"]) == list:
rle = maskUtils.frPyObjects(
[ann["segmentation"]], t["height"], t["width"]
)
else:
rle = [ann["segmentation"]]
m = maskUtils.decode(rle)
img = np.ones((m.shape[0], m.shape[1], 3))
if ann["iscrowd"] == 1:
color_mask = np.array([2.0, 166.0, 101.0]) / 255
if ann["iscrowd"] == 0:
color_mask = np.random.random((1, 3)).tolist()[0]
for i in range(3):
img[:, :, i] = color_mask[i]
ax.imshow(np.dstack((img, m * 0.5)))
if "keypoints" in ann and type(ann["keypoints"]) == list:
# turn skeleton into zero-based index
sks = np.array(self.loadCats(ann["category_id"])[0]["skeleton"]) - 1
kp = np.array(ann["keypoints"])
x = kp[0::3]
y = kp[1::3]
v = kp[2::3]
for sk in sks:
if np.all(v[sk] > 0):
plt.plot(x[sk], y[sk], linewidth=3, color=c)
plt.plot(
x[v > 0],
y[v > 0],
"o",
markersize=8,
markerfacecolor=c,
markeredgecolor="k",
markeredgewidth=2,
)
plt.plot(
x[v > 1],
y[v > 1],
"o",
markersize=8,
markerfacecolor=c,
markeredgecolor=c,
markeredgewidth=2,
)
if draw_bbox:
[bbox_x, bbox_y, bbox_w, bbox_h] = ann["bbox"]
poly = [
[bbox_x, bbox_y],
[bbox_x, bbox_y + bbox_h],
[bbox_x + bbox_w, bbox_y + bbox_h],
[bbox_x + bbox_w, bbox_y],
]
np_poly = np.array(poly).reshape((4, 2))
polygons.append(Polygon(np_poly))
color.append(c)
# p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4)
# ax.add_collection(p)
p = PatchCollection(polygons, facecolor="none", edgecolors=color, linewidths=2)
ax.add_collection(p)
elif datasetType == "captions":
for ann in anns:
print(ann["caption"])
| swarms-master | swarms/workers/models/GroundingDINO/groundingdino/util/visualizer.py |
swarms-master | swarms/workers/models/GroundingDINO/groundingdino/config/__init__.py |
|
batch_size = 1
modelname = "groundingdino"
backbone = "swin_B_384_22k"
position_embedding = "sine"
pe_temperatureH = 20
pe_temperatureW = 20
return_interm_indices = [1, 2, 3]
backbone_freeze_keywords = None
enc_layers = 6
dec_layers = 6
pre_norm = False
dim_feedforward = 2048
hidden_dim = 256
dropout = 0.0
nheads = 8
num_queries = 900
query_dim = 4
num_patterns = 0
num_feature_levels = 4
enc_n_points = 4
dec_n_points = 4
two_stage_type = "standard"
two_stage_bbox_embed_share = False
two_stage_class_embed_share = False
transformer_activation = "relu"
dec_pred_bbox_embed_share = True
dn_box_noise_scale = 1.0
dn_label_noise_ratio = 0.5
dn_label_coef = 1.0
dn_bbox_coef = 1.0
embed_init_tgt = True
dn_labelbook_size = 2000
max_text_len = 256
text_encoder_type = "bert-base-uncased"
use_text_enhancer = True
use_fusion_layer = True
use_checkpoint = True
use_transformer_ckpt = True
use_text_cross_attention = True
text_dropout = 0.0
fusion_dropout = 0.0
fusion_droppath = 0.1
sub_sentence_present = True
| swarms-master | swarms/workers/models/GroundingDINO/groundingdino/config/GroundingDINO_SwinB_cfg.py |
batch_size = 1
modelname = "groundingdino"
backbone = "swin_T_224_1k"
position_embedding = "sine"
pe_temperatureH = 20
pe_temperatureW = 20
return_interm_indices = [1, 2, 3]
backbone_freeze_keywords = None
enc_layers = 6
dec_layers = 6
pre_norm = False
dim_feedforward = 2048
hidden_dim = 256
dropout = 0.0
nheads = 8
num_queries = 900
query_dim = 4
num_patterns = 0
num_feature_levels = 4
enc_n_points = 4
dec_n_points = 4
two_stage_type = "standard"
two_stage_bbox_embed_share = False
two_stage_class_embed_share = False
transformer_activation = "relu"
dec_pred_bbox_embed_share = True
dn_box_noise_scale = 1.0
dn_label_noise_ratio = 0.5
dn_label_coef = 1.0
dn_bbox_coef = 1.0
embed_init_tgt = True
dn_labelbook_size = 2000
max_text_len = 256
text_encoder_type = "bert-base-uncased"
use_text_enhancer = True
use_fusion_layer = True
use_checkpoint = True
use_transformer_ckpt = True
use_text_cross_attention = True
text_dropout = 0.0
fusion_dropout = 0.0
fusion_droppath = 0.1
sub_sentence_present = True
| swarms-master | swarms/workers/models/GroundingDINO/groundingdino/config/GroundingDINO_SwinT_OGC.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Transforms and data augmentation for both image + bbox.
"""
import os
import random
import PIL
import torch
import torchvision.transforms as T
import torchvision.transforms.functional as F
from groundingdino.util.box_ops import box_xyxy_to_cxcywh
from groundingdino.util.misc import interpolate
def crop(image, target, region):
cropped_image = F.crop(image, *region)
target = target.copy()
i, j, h, w = region
# should we do something wrt the original size?
target["size"] = torch.tensor([h, w])
fields = ["labels", "area", "iscrowd", "positive_map"]
if "boxes" in target:
boxes = target["boxes"]
max_size = torch.as_tensor([w, h], dtype=torch.float32)
cropped_boxes = boxes - torch.as_tensor([j, i, j, i])
cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size)
cropped_boxes = cropped_boxes.clamp(min=0)
area = (cropped_boxes[:, 1, :] - cropped_boxes[:, 0, :]).prod(dim=1)
target["boxes"] = cropped_boxes.reshape(-1, 4)
target["area"] = area
fields.append("boxes")
if "masks" in target:
# FIXME should we update the area here if there are no boxes?
target["masks"] = target["masks"][:, i : i + h, j : j + w]
fields.append("masks")
# remove elements for which the boxes or masks that have zero area
if "boxes" in target or "masks" in target:
# favor boxes selection when defining which elements to keep
# this is compatible with previous implementation
if "boxes" in target:
cropped_boxes = target["boxes"].reshape(-1, 2, 2)
keep = torch.all(cropped_boxes[:, 1, :] > cropped_boxes[:, 0, :], dim=1)
else:
keep = target["masks"].flatten(1).any(1)
for field in fields:
if field in target:
target[field] = target[field][keep]
if os.environ.get("IPDB_SHILONG_DEBUG", None) == "INFO":
# for debug and visualization only.
if "strings_positive" in target:
target["strings_positive"] = [
_i for _i, _j in zip(target["strings_positive"], keep) if _j
]
return cropped_image, target
def hflip(image, target):
flipped_image = F.hflip(image)
w, h = image.size
target = target.copy()
if "boxes" in target:
boxes = target["boxes"]
boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor([-1, 1, -1, 1]) + torch.as_tensor(
[w, 0, w, 0]
)
target["boxes"] = boxes
if "masks" in target:
target["masks"] = target["masks"].flip(-1)
return flipped_image, target
def resize(image, target, size, max_size=None):
# size can be min_size (scalar) or (w, h) tuple
def get_size_with_aspect_ratio(image_size, size, max_size=None):
w, h = image_size
if max_size is not None:
min_original_size = float(min((w, h)))
max_original_size = float(max((w, h)))
if max_original_size / min_original_size * size > max_size:
size = int(round(max_size * min_original_size / max_original_size))
if (w <= h and w == size) or (h <= w and h == size):
return (h, w)
if w < h:
ow = size
oh = int(size * h / w)
else:
oh = size
ow = int(size * w / h)
return (oh, ow)
def get_size(image_size, size, max_size=None):
if isinstance(size, (list, tuple)):
return size[::-1]
else:
return get_size_with_aspect_ratio(image_size, size, max_size)
size = get_size(image.size, size, max_size)
rescaled_image = F.resize(image, size)
if target is None:
return rescaled_image, None
ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(rescaled_image.size, image.size))
ratio_width, ratio_height = ratios
target = target.copy()
if "boxes" in target:
boxes = target["boxes"]
scaled_boxes = boxes * torch.as_tensor(
[ratio_width, ratio_height, ratio_width, ratio_height]
)
target["boxes"] = scaled_boxes
if "area" in target:
area = target["area"]
scaled_area = area * (ratio_width * ratio_height)
target["area"] = scaled_area
h, w = size
target["size"] = torch.tensor([h, w])
if "masks" in target:
target["masks"] = (
interpolate(target["masks"][:, None].float(), size, mode="nearest")[:, 0] > 0.5
)
return rescaled_image, target
def pad(image, target, padding):
# assumes that we only pad on the bottom right corners
padded_image = F.pad(image, (0, 0, padding[0], padding[1]))
if target is None:
return padded_image, None
target = target.copy()
# should we do something wrt the original size?
target["size"] = torch.tensor(padded_image.size[::-1])
if "masks" in target:
target["masks"] = torch.nn.functional.pad(target["masks"], (0, padding[0], 0, padding[1]))
return padded_image, target
class ResizeDebug(object):
def __init__(self, size):
self.size = size
def __call__(self, img, target):
return resize(img, target, self.size)
class RandomCrop(object):
def __init__(self, size):
self.size = size
def __call__(self, img, target):
region = T.RandomCrop.get_params(img, self.size)
return crop(img, target, region)
class RandomSizeCrop(object):
def __init__(self, min_size: int, max_size: int, respect_boxes: bool = False):
# respect_boxes: True to keep all boxes
# False to tolerence box filter
self.min_size = min_size
self.max_size = max_size
self.respect_boxes = respect_boxes
def __call__(self, img: PIL.Image.Image, target: dict):
init_boxes = len(target["boxes"])
max_patience = 10
for i in range(max_patience):
w = random.randint(self.min_size, min(img.width, self.max_size))
h = random.randint(self.min_size, min(img.height, self.max_size))
region = T.RandomCrop.get_params(img, [h, w])
result_img, result_target = crop(img, target, region)
if (
not self.respect_boxes
or len(result_target["boxes"]) == init_boxes
or i == max_patience - 1
):
return result_img, result_target
return result_img, result_target
class CenterCrop(object):
def __init__(self, size):
self.size = size
def __call__(self, img, target):
image_width, image_height = img.size
crop_height, crop_width = self.size
crop_top = int(round((image_height - crop_height) / 2.0))
crop_left = int(round((image_width - crop_width) / 2.0))
return crop(img, target, (crop_top, crop_left, crop_height, crop_width))
class RandomHorizontalFlip(object):
def __init__(self, p=0.5):
self.p = p
def __call__(self, img, target):
if random.random() < self.p:
return hflip(img, target)
return img, target
class RandomResize(object):
def __init__(self, sizes, max_size=None):
assert isinstance(sizes, (list, tuple))
self.sizes = sizes
self.max_size = max_size
def __call__(self, img, target=None):
size = random.choice(self.sizes)
return resize(img, target, size, self.max_size)
class RandomPad(object):
def __init__(self, max_pad):
self.max_pad = max_pad
def __call__(self, img, target):
pad_x = random.randint(0, self.max_pad)
pad_y = random.randint(0, self.max_pad)
return pad(img, target, (pad_x, pad_y))
class RandomSelect(object):
"""
Randomly selects between transforms1 and transforms2,
with probability p for transforms1 and (1 - p) for transforms2
"""
def __init__(self, transforms1, transforms2, p=0.5):
self.transforms1 = transforms1
self.transforms2 = transforms2
self.p = p
def __call__(self, img, target):
if random.random() < self.p:
return self.transforms1(img, target)
return self.transforms2(img, target)
class ToTensor(object):
def __call__(self, img, target):
return F.to_tensor(img), target
class RandomErasing(object):
def __init__(self, *args, **kwargs):
self.eraser = T.RandomErasing(*args, **kwargs)
def __call__(self, img, target):
return self.eraser(img), target
class Normalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, image, target=None):
image = F.normalize(image, mean=self.mean, std=self.std)
if target is None:
return image, None
target = target.copy()
h, w = image.shape[-2:]
if "boxes" in target:
boxes = target["boxes"]
boxes = box_xyxy_to_cxcywh(boxes)
boxes = boxes / torch.tensor([w, h, w, h], dtype=torch.float32)
target["boxes"] = boxes
return image, target
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, target):
for t in self.transforms:
image, target = t(image, target)
return image, target
def __repr__(self):
format_string = self.__class__.__name__ + "("
for t in self.transforms:
format_string += "\n"
format_string += " {0}".format(t)
format_string += "\n)"
return format_string
| swarms-master | swarms/workers/models/GroundingDINO/groundingdino/datasets/transforms.py |
swarms-master | swarms/workers/models/GroundingDINO/groundingdino/datasets/__init__.py |
|
# ------------------------------------------------------------------------
# Grounding DINO. Midified by Shilong Liu.
# url: https://github.com/IDEA-Research/GroundingDINO
# Copyright (c) 2023 IDEA. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
# Copyright (c) Aishwarya Kamath & Nicolas Carion. Licensed under the Apache License 2.0. All Rights Reserved
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
COCO evaluator that works in distributed mode.
Mostly copy-paste from https://github.com/pytorch/vision/blob/edfd5a7/references/detection/coco_eval.py
The difference is that there is less copy-pasting from pycocotools
in the end of the file, as python3 can suppress prints with contextlib
"""
import contextlib
import copy
import os
import numpy as np
import pycocotools.mask as mask_util
import torch
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from groundingdino.util.misc import all_gather
class CocoGroundingEvaluator(object):
def __init__(self, coco_gt, iou_types, useCats=True):
assert isinstance(iou_types, (list, tuple))
coco_gt = copy.deepcopy(coco_gt)
self.coco_gt = coco_gt
self.iou_types = iou_types
self.coco_eval = {}
for iou_type in iou_types:
self.coco_eval[iou_type] = COCOeval(coco_gt, iouType=iou_type)
self.coco_eval[iou_type].useCats = useCats
self.img_ids = []
self.eval_imgs = {k: [] for k in iou_types}
self.useCats = useCats
def update(self, predictions):
img_ids = list(np.unique(list(predictions.keys())))
self.img_ids.extend(img_ids)
for iou_type in self.iou_types:
results = self.prepare(predictions, iou_type)
# suppress pycocotools prints
with open(os.devnull, "w") as devnull:
with contextlib.redirect_stdout(devnull):
coco_dt = COCO.loadRes(self.coco_gt, results) if results else COCO()
coco_eval = self.coco_eval[iou_type]
coco_eval.cocoDt = coco_dt
coco_eval.params.imgIds = list(img_ids)
coco_eval.params.useCats = self.useCats
img_ids, eval_imgs = evaluate(coco_eval)
self.eval_imgs[iou_type].append(eval_imgs)
def synchronize_between_processes(self):
for iou_type in self.iou_types:
self.eval_imgs[iou_type] = np.concatenate(self.eval_imgs[iou_type], 2)
create_common_coco_eval(self.coco_eval[iou_type], self.img_ids, self.eval_imgs[iou_type])
def accumulate(self):
for coco_eval in self.coco_eval.values():
coco_eval.accumulate()
def summarize(self):
for iou_type, coco_eval in self.coco_eval.items():
print("IoU metric: {}".format(iou_type))
coco_eval.summarize()
def prepare(self, predictions, iou_type):
if iou_type == "bbox":
return self.prepare_for_coco_detection(predictions)
elif iou_type == "segm":
return self.prepare_for_coco_segmentation(predictions)
elif iou_type == "keypoints":
return self.prepare_for_coco_keypoint(predictions)
else:
raise ValueError("Unknown iou type {}".format(iou_type))
def prepare_for_coco_detection(self, predictions):
coco_results = []
for original_id, prediction in predictions.items():
if len(prediction) == 0:
continue
boxes = prediction["boxes"]
boxes = convert_to_xywh(boxes).tolist()
scores = prediction["scores"].tolist()
labels = prediction["labels"].tolist()
coco_results.extend(
[
{
"image_id": original_id,
"category_id": labels[k],
"bbox": box,
"score": scores[k],
}
for k, box in enumerate(boxes)
]
)
return coco_results
def prepare_for_coco_segmentation(self, predictions):
coco_results = []
for original_id, prediction in predictions.items():
if len(prediction) == 0:
continue
scores = prediction["scores"]
labels = prediction["labels"]
masks = prediction["masks"]
masks = masks > 0.5
scores = prediction["scores"].tolist()
labels = prediction["labels"].tolist()
rles = [
mask_util.encode(np.array(mask[0, :, :, np.newaxis], dtype=np.uint8, order="F"))[0]
for mask in masks
]
for rle in rles:
rle["counts"] = rle["counts"].decode("utf-8")
coco_results.extend(
[
{
"image_id": original_id,
"category_id": labels[k],
"segmentation": rle,
"score": scores[k],
}
for k, rle in enumerate(rles)
]
)
return coco_results
def prepare_for_coco_keypoint(self, predictions):
coco_results = []
for original_id, prediction in predictions.items():
if len(prediction) == 0:
continue
boxes = prediction["boxes"]
boxes = convert_to_xywh(boxes).tolist()
scores = prediction["scores"].tolist()
labels = prediction["labels"].tolist()
keypoints = prediction["keypoints"]
keypoints = keypoints.flatten(start_dim=1).tolist()
coco_results.extend(
[
{
"image_id": original_id,
"category_id": labels[k],
"keypoints": keypoint,
"score": scores[k],
}
for k, keypoint in enumerate(keypoints)
]
)
return coco_results
def convert_to_xywh(boxes):
xmin, ymin, xmax, ymax = boxes.unbind(1)
return torch.stack((xmin, ymin, xmax - xmin, ymax - ymin), dim=1)
def merge(img_ids, eval_imgs):
all_img_ids = all_gather(img_ids)
all_eval_imgs = all_gather(eval_imgs)
merged_img_ids = []
for p in all_img_ids:
merged_img_ids.extend(p)
merged_eval_imgs = []
for p in all_eval_imgs:
merged_eval_imgs.append(p)
merged_img_ids = np.array(merged_img_ids)
merged_eval_imgs = np.concatenate(merged_eval_imgs, 2)
# keep only unique (and in sorted order) images
merged_img_ids, idx = np.unique(merged_img_ids, return_index=True)
merged_eval_imgs = merged_eval_imgs[..., idx]
return merged_img_ids, merged_eval_imgs
def create_common_coco_eval(coco_eval, img_ids, eval_imgs):
img_ids, eval_imgs = merge(img_ids, eval_imgs)
img_ids = list(img_ids)
eval_imgs = list(eval_imgs.flatten())
coco_eval.evalImgs = eval_imgs
coco_eval.params.imgIds = img_ids
coco_eval._paramsEval = copy.deepcopy(coco_eval.params)
#################################################################
# From pycocotools, just removed the prints and fixed
# a Python3 bug about unicode not defined
#################################################################
def evaluate(self):
"""
Run per image evaluation on given images and store results (a list of dict) in self.evalImgs
:return: None
"""
# tic = time.time()
# print('Running per image evaluation...')
p = self.params
# add backward compatibility if useSegm is specified in params
if p.useSegm is not None:
p.iouType = "segm" if p.useSegm == 1 else "bbox"
print("useSegm (deprecated) is not None. Running {} evaluation".format(p.iouType))
# print('Evaluate annotation type *{}*'.format(p.iouType))
p.imgIds = list(np.unique(p.imgIds))
if p.useCats:
p.catIds = list(np.unique(p.catIds))
p.maxDets = sorted(p.maxDets)
self.params = p
self._prepare()
# loop through images, area range, max detection number
catIds = p.catIds if p.useCats else [-1]
if p.iouType == "segm" or p.iouType == "bbox":
computeIoU = self.computeIoU
elif p.iouType == "keypoints":
computeIoU = self.computeOks
self.ious = {
(imgId, catId): computeIoU(imgId, catId)
for imgId in p.imgIds
for catId in catIds}
evaluateImg = self.evaluateImg
maxDet = p.maxDets[-1]
evalImgs = [
evaluateImg(imgId, catId, areaRng, maxDet)
for catId in catIds
for areaRng in p.areaRng
for imgId in p.imgIds
]
# this is NOT in the pycocotools code, but could be done outside
evalImgs = np.asarray(evalImgs).reshape(len(catIds), len(p.areaRng), len(p.imgIds))
self._paramsEval = copy.deepcopy(self.params)
# toc = time.time()
# print('DONE (t={:0.2f}s).'.format(toc-tic))
return p.imgIds, evalImgs
#################################################################
# end of straight copy from pycocotools, just removing the prints
#################################################################
| swarms-master | swarms/workers/models/GroundingDINO/groundingdino/datasets/cocogrounding_eval.py |
# ------------------------------------------------------------------------
# Grounding DINO
# url: https://github.com/IDEA-Research/GroundingDINO
# Copyright (c) 2023 IDEA. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
# -*- coding: utf-8 -*-
# @Author: Yihao Chen
# @Date: 2021-08-16 16:03:17
# @Last Modified by: Shilong Liu
# @Last Modified time: 2022-01-23 15:26
# modified from mmcv
import inspect
from functools import partial
class Registry(object):
def __init__(self, name):
self._name = name
self._module_dict = dict()
def __repr__(self):
format_str = self.__class__.__name__ + "(name={}, items={})".format(
self._name, list(self._module_dict.keys())
)
return format_str
def __len__(self):
return len(self._module_dict)
@property
def name(self):
return self._name
@property
def module_dict(self):
return self._module_dict
def get(self, key):
return self._module_dict.get(key, None)
def registe_with_name(self, module_name=None, force=False):
return partial(self.register, module_name=module_name, force=force)
def register(self, module_build_function, module_name=None, force=False):
"""Register a module build function.
Args:
module (:obj:`nn.Module`): Module to be registered.
"""
if not inspect.isfunction(module_build_function):
raise TypeError(
"module_build_function must be a function, but got {}".format(
type(module_build_function)
)
)
if module_name is None:
module_name = module_build_function.__name__
if not force and module_name in self._module_dict:
raise KeyError("{} is already registered in {}".format(module_name, self.name))
self._module_dict[module_name] = module_build_function
return module_build_function
MODULE_BUILD_FUNCS = Registry("model build functions")
| swarms-master | swarms/workers/models/GroundingDINO/groundingdino/models/registry.py |
# ------------------------------------------------------------------------
# Grounding DINO
# url: https://github.com/IDEA-Research/GroundingDINO
# Copyright (c) 2023 IDEA. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
def build_model(args):
# we use register to maintain models from catdet6 on.
from .registry import MODULE_BUILD_FUNCS
assert args.modelname in MODULE_BUILD_FUNCS._module_dict
build_func = MODULE_BUILD_FUNCS.get(args.modelname)
model = build_func(args)
return model
| swarms-master | swarms/workers/models/GroundingDINO/groundingdino/models/__init__.py |
# ------------------------------------------------------------------------
# Grounding DINO
# url: https://github.com/IDEA-Research/GroundingDINO
# Copyright (c) 2023 IDEA. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.models.layers import DropPath
class FeatureResizer(nn.Module):
"""
This class takes as input a set of embeddings of dimension C1 and outputs a set of
embedding of dimension C2, after a linear transformation, dropout and normalization (LN).
"""
def __init__(self, input_feat_size, output_feat_size, dropout, do_ln=True):
super().__init__()
self.do_ln = do_ln
# Object feature encoding
self.fc = nn.Linear(input_feat_size, output_feat_size, bias=True)
self.layer_norm = nn.LayerNorm(output_feat_size, eps=1e-12)
self.dropout = nn.Dropout(dropout)
def forward(self, encoder_features):
x = self.fc(encoder_features)
if self.do_ln:
x = self.layer_norm(x)
output = self.dropout(x)
return output
def l1norm(X, dim, eps=1e-8):
"""L1-normalize columns of X"""
norm = torch.abs(X).sum(dim=dim, keepdim=True) + eps
X = torch.div(X, norm)
return X
def l2norm(X, dim, eps=1e-8):
"""L2-normalize columns of X"""
norm = torch.pow(X, 2).sum(dim=dim, keepdim=True).sqrt() + eps
X = torch.div(X, norm)
return X
def func_attention(query, context, smooth=1, raw_feature_norm="softmax", eps=1e-8):
"""
query: (n_context, queryL, d)
context: (n_context, sourceL, d)
"""
batch_size_q, queryL = query.size(0), query.size(1)
batch_size, sourceL = context.size(0), context.size(1)
# Get attention
# --> (batch, d, queryL)
queryT = torch.transpose(query, 1, 2)
# (batch, sourceL, d)(batch, d, queryL)
# --> (batch, sourceL, queryL)
attn = torch.bmm(context, queryT)
if raw_feature_norm == "softmax":
# --> (batch*sourceL, queryL)
attn = attn.view(batch_size * sourceL, queryL)
attn = nn.Softmax()(attn)
# --> (batch, sourceL, queryL)
attn = attn.view(batch_size, sourceL, queryL)
elif raw_feature_norm == "l2norm":
attn = l2norm(attn, 2)
elif raw_feature_norm == "clipped_l2norm":
attn = nn.LeakyReLU(0.1)(attn)
attn = l2norm(attn, 2)
else:
raise ValueError("unknown first norm type:", raw_feature_norm)
# --> (batch, queryL, sourceL)
attn = torch.transpose(attn, 1, 2).contiguous()
# --> (batch*queryL, sourceL)
attn = attn.view(batch_size * queryL, sourceL)
attn = nn.Softmax()(attn * smooth)
# --> (batch, queryL, sourceL)
attn = attn.view(batch_size, queryL, sourceL)
# --> (batch, sourceL, queryL)
attnT = torch.transpose(attn, 1, 2).contiguous()
# --> (batch, d, sourceL)
contextT = torch.transpose(context, 1, 2)
# (batch x d x sourceL)(batch x sourceL x queryL)
# --> (batch, d, queryL)
weightedContext = torch.bmm(contextT, attnT)
# --> (batch, queryL, d)
weightedContext = torch.transpose(weightedContext, 1, 2)
return weightedContext, attnT
class BiMultiHeadAttention(nn.Module):
def __init__(self, v_dim, l_dim, embed_dim, num_heads, dropout=0.1, cfg=None):
super(BiMultiHeadAttention, self).__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.head_dim = embed_dim // num_heads
self.v_dim = v_dim
self.l_dim = l_dim
assert (
self.head_dim * self.num_heads == self.embed_dim
), f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads})."
self.scale = self.head_dim ** (-0.5)
self.dropout = dropout
self.v_proj = nn.Linear(self.v_dim, self.embed_dim)
self.l_proj = nn.Linear(self.l_dim, self.embed_dim)
self.values_v_proj = nn.Linear(self.v_dim, self.embed_dim)
self.values_l_proj = nn.Linear(self.l_dim, self.embed_dim)
self.out_v_proj = nn.Linear(self.embed_dim, self.v_dim)
self.out_l_proj = nn.Linear(self.embed_dim, self.l_dim)
self.stable_softmax_2d = True
self.clamp_min_for_underflow = True
self.clamp_max_for_overflow = True
self._reset_parameters()
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def _reset_parameters(self):
nn.init.xavier_uniform_(self.v_proj.weight)
self.v_proj.bias.data.fill_(0)
nn.init.xavier_uniform_(self.l_proj.weight)
self.l_proj.bias.data.fill_(0)
nn.init.xavier_uniform_(self.values_v_proj.weight)
self.values_v_proj.bias.data.fill_(0)
nn.init.xavier_uniform_(self.values_l_proj.weight)
self.values_l_proj.bias.data.fill_(0)
nn.init.xavier_uniform_(self.out_v_proj.weight)
self.out_v_proj.bias.data.fill_(0)
nn.init.xavier_uniform_(self.out_l_proj.weight)
self.out_l_proj.bias.data.fill_(0)
def forward(self, v, l, attention_mask_v=None, attention_mask_l=None):
"""_summary_
Args:
v (_type_): bs, n_img, dim
l (_type_): bs, n_text, dim
attention_mask_v (_type_, optional): _description_. bs, n_img
attention_mask_l (_type_, optional): _description_. bs, n_text
Returns:
_type_: _description_
"""
# if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO':
# import ipdb; ipdb.set_trace()
bsz, tgt_len, _ = v.size()
query_states = self.v_proj(v) * self.scale
key_states = self._shape(self.l_proj(l), -1, bsz)
value_v_states = self._shape(self.values_v_proj(v), -1, bsz)
value_l_states = self._shape(self.values_l_proj(l), -1, bsz)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_v_states = value_v_states.view(*proj_shape)
value_l_states = value_l_states.view(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) # bs*nhead, nimg, ntxt
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
raise ValueError(
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}"
)
if self.stable_softmax_2d:
attn_weights = attn_weights - attn_weights.max()
if self.clamp_min_for_underflow:
attn_weights = torch.clamp(
attn_weights, min=-50000
) # Do not increase -50000, data type half has quite limited range
if self.clamp_max_for_overflow:
attn_weights = torch.clamp(
attn_weights, max=50000
) # Do not increase 50000, data type half has quite limited range
attn_weights_T = attn_weights.transpose(1, 2)
attn_weights_l = attn_weights_T - torch.max(attn_weights_T, dim=-1, keepdim=True)[0]
if self.clamp_min_for_underflow:
attn_weights_l = torch.clamp(
attn_weights_l, min=-50000
) # Do not increase -50000, data type half has quite limited range
if self.clamp_max_for_overflow:
attn_weights_l = torch.clamp(
attn_weights_l, max=50000
) # Do not increase 50000, data type half has quite limited range
# mask vison for language
if attention_mask_v is not None:
attention_mask_v = (
attention_mask_v[:, None, None, :].repeat(1, self.num_heads, 1, 1).flatten(0, 1)
)
attn_weights_l.masked_fill_(attention_mask_v, float("-inf"))
attn_weights_l = attn_weights_l.softmax(dim=-1)
# mask language for vision
if attention_mask_l is not None:
attention_mask_l = (
attention_mask_l[:, None, None, :].repeat(1, self.num_heads, 1, 1).flatten(0, 1)
)
attn_weights.masked_fill_(attention_mask_l, float("-inf"))
attn_weights_v = attn_weights.softmax(dim=-1)
attn_probs_v = F.dropout(attn_weights_v, p=self.dropout, training=self.training)
attn_probs_l = F.dropout(attn_weights_l, p=self.dropout, training=self.training)
attn_output_v = torch.bmm(attn_probs_v, value_l_states)
attn_output_l = torch.bmm(attn_probs_l, value_v_states)
if attn_output_v.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
raise ValueError(
f"`attn_output_v` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output_v.size()}"
)
if attn_output_l.size() != (bsz * self.num_heads, src_len, self.head_dim):
raise ValueError(
f"`attn_output_l` should be of size {(bsz, self.num_heads, src_len, self.head_dim)}, but is {attn_output_l.size()}"
)
attn_output_v = attn_output_v.view(bsz, self.num_heads, tgt_len, self.head_dim)
attn_output_v = attn_output_v.transpose(1, 2)
attn_output_v = attn_output_v.reshape(bsz, tgt_len, self.embed_dim)
attn_output_l = attn_output_l.view(bsz, self.num_heads, src_len, self.head_dim)
attn_output_l = attn_output_l.transpose(1, 2)
attn_output_l = attn_output_l.reshape(bsz, src_len, self.embed_dim)
attn_output_v = self.out_v_proj(attn_output_v)
attn_output_l = self.out_l_proj(attn_output_l)
return attn_output_v, attn_output_l
# Bi-Direction MHA (text->image, image->text)
class BiAttentionBlock(nn.Module):
def __init__(
self,
v_dim,
l_dim,
embed_dim,
num_heads,
dropout=0.1,
drop_path=0.0,
init_values=1e-4,
cfg=None,
):
"""
Inputs:
embed_dim - Dimensionality of input and attention feature vectors
hidden_dim - Dimensionality of hidden layer in feed-forward network
(usually 2-4x larger than embed_dim)
num_heads - Number of heads to use in the Multi-Head Attention block
dropout - Amount of dropout to apply in the feed-forward network
"""
super(BiAttentionBlock, self).__init__()
# pre layer norm
self.layer_norm_v = nn.LayerNorm(v_dim)
self.layer_norm_l = nn.LayerNorm(l_dim)
self.attn = BiMultiHeadAttention(
v_dim=v_dim, l_dim=l_dim, embed_dim=embed_dim, num_heads=num_heads, dropout=dropout
)
# add layer scale for training stability
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.gamma_v = nn.Parameter(init_values * torch.ones((v_dim)), requires_grad=True)
self.gamma_l = nn.Parameter(init_values * torch.ones((l_dim)), requires_grad=True)
def forward(self, v, l, attention_mask_v=None, attention_mask_l=None):
v = self.layer_norm_v(v)
l = self.layer_norm_l(l)
delta_v, delta_l = self.attn(
v, l, attention_mask_v=attention_mask_v, attention_mask_l=attention_mask_l
)
# v, l = v + delta_v, l + delta_l
v = v + self.drop_path(self.gamma_v * delta_v)
l = l + self.drop_path(self.gamma_l * delta_l)
return v, l
# def forward(self, v:List[torch.Tensor], l, attention_mask_v=None, attention_mask_l=None)
| swarms-master | swarms/workers/models/GroundingDINO/groundingdino/models/GroundingDINO/fuse_modules.py |
# ------------------------------------------------------------------------
# Grounding DINO
# url: https://github.com/IDEA-Research/GroundingDINO
# Copyright (c) 2023 IDEA. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
# Deformable DETR
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------------------------------
# Modified from:
# https://github.com/fundamentalvision/Deformable-DETR/blob/main/models/ops/functions/ms_deform_attn_func.py
# https://github.com/fundamentalvision/Deformable-DETR/blob/main/models/ops/modules/ms_deform_attn.py
# https://github.com/open-mmlab/mmcv/blob/master/mmcv/ops/multi_scale_deform_attn.py
# ------------------------------------------------------------------------------------------------
import math
import warnings
from typing import Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.nn.init import constant_, xavier_uniform_
try:
from groundingdino import _C
except:
warnings.warn("Failed to load custom C++ ops. Running on CPU mode Only!")
# helpers
def _is_power_of_2(n):
if (not isinstance(n, int)) or (n < 0):
raise ValueError("invalid input for _is_power_of_2: {} (type: {})".format(n, type(n)))
return (n & (n - 1) == 0) and n != 0
class MultiScaleDeformableAttnFunction(Function):
@staticmethod
def forward(
ctx,
value,
value_spatial_shapes,
value_level_start_index,
sampling_locations,
attention_weights,
im2col_step,
):
ctx.im2col_step = im2col_step
output = _C.ms_deform_attn_forward(
value,
value_spatial_shapes,
value_level_start_index,
sampling_locations,
attention_weights,
ctx.im2col_step,
)
ctx.save_for_backward(
value,
value_spatial_shapes,
value_level_start_index,
sampling_locations,
attention_weights,
)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
(
value,
value_spatial_shapes,
value_level_start_index,
sampling_locations,
attention_weights,
) = ctx.saved_tensors
grad_value, grad_sampling_loc, grad_attn_weight = _C.ms_deform_attn_backward(
value,
value_spatial_shapes,
value_level_start_index,
sampling_locations,
attention_weights,
grad_output,
ctx.im2col_step,
)
return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None
def multi_scale_deformable_attn_pytorch(
value: torch.Tensor,
value_spatial_shapes: torch.Tensor,
sampling_locations: torch.Tensor,
attention_weights: torch.Tensor,
) -> torch.Tensor:
bs, _, num_heads, embed_dims = value.shape
_, num_queries, num_heads, num_levels, num_points, _ = sampling_locations.shape
value_list = value.split([H_ * W_ for H_, W_ in value_spatial_shapes], dim=1)
sampling_grids = 2 * sampling_locations - 1
sampling_value_list = []
for level, (H_, W_) in enumerate(value_spatial_shapes):
# bs, H_*W_, num_heads, embed_dims ->
# bs, H_*W_, num_heads*embed_dims ->
# bs, num_heads*embed_dims, H_*W_ ->
# bs*num_heads, embed_dims, H_, W_
value_l_ = (
value_list[level].flatten(2).transpose(1, 2).reshape(bs * num_heads, embed_dims, H_, W_)
)
# bs, num_queries, num_heads, num_points, 2 ->
# bs, num_heads, num_queries, num_points, 2 ->
# bs*num_heads, num_queries, num_points, 2
sampling_grid_l_ = sampling_grids[:, :, :, level].transpose(1, 2).flatten(0, 1)
# bs*num_heads, embed_dims, num_queries, num_points
sampling_value_l_ = F.grid_sample(
value_l_, sampling_grid_l_, mode="bilinear", padding_mode="zeros", align_corners=False
)
sampling_value_list.append(sampling_value_l_)
# (bs, num_queries, num_heads, num_levels, num_points) ->
# (bs, num_heads, num_queries, num_levels, num_points) ->
# (bs, num_heads, 1, num_queries, num_levels*num_points)
attention_weights = attention_weights.transpose(1, 2).reshape(
bs * num_heads, 1, num_queries, num_levels * num_points
)
output = (
(torch.stack(sampling_value_list, dim=-2).flatten(-2) * attention_weights)
.sum(-1)
.view(bs, num_heads * embed_dims, num_queries)
)
return output.transpose(1, 2).contiguous()
class MultiScaleDeformableAttention(nn.Module):
"""Multi-Scale Deformable Attention Module used in Deformable-DETR
`Deformable DETR: Deformable Transformers for End-to-End Object Detection.
<https://arxiv.org/pdf/2010.04159.pdf>`_.
Args:
embed_dim (int): The embedding dimension of Attention. Default: 256.
num_heads (int): The number of attention heads. Default: 8.
num_levels (int): The number of feature map used in Attention. Default: 4.
num_points (int): The number of sampling points for each query
in each head. Default: 4.
img2col_steps (int): The step used in image_to_column. Defualt: 64.
dropout (float): Dropout layer used in output. Default: 0.1.
batch_first (bool): if ``True``, then the input and output tensor will be
provided as `(bs, n, embed_dim)`. Default: False. `(n, bs, embed_dim)`
"""
def __init__(
self,
embed_dim: int = 256,
num_heads: int = 8,
num_levels: int = 4,
num_points: int = 4,
img2col_step: int = 64,
batch_first: bool = False,
):
super().__init__()
if embed_dim % num_heads != 0:
raise ValueError(
"embed_dim must be divisible by num_heads, but got {} and {}".format(
embed_dim, num_heads
)
)
head_dim = embed_dim // num_heads
self.batch_first = batch_first
if not _is_power_of_2(head_dim):
warnings.warn(
"""
You'd better set d_model in MSDeformAttn to make sure that
each dim of the attention head a power of 2, which is more efficient.
"""
)
self.im2col_step = img2col_step
self.embed_dim = embed_dim
self.num_heads = num_heads
self.num_levels = num_levels
self.num_points = num_points
self.sampling_offsets = nn.Linear(embed_dim, num_heads * num_levels * num_points * 2)
self.attention_weights = nn.Linear(embed_dim, num_heads * num_levels * num_points)
self.value_proj = nn.Linear(embed_dim, embed_dim)
self.output_proj = nn.Linear(embed_dim, embed_dim)
self.init_weights()
def _reset_parameters(self):
return self.init_weights()
def init_weights(self):
"""
Default initialization for Parameters of Module.
"""
constant_(self.sampling_offsets.weight.data, 0.0)
thetas = torch.arange(self.num_heads, dtype=torch.float32) * (
2.0 * math.pi / self.num_heads
)
grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)
grid_init = (
(grid_init / grid_init.abs().max(-1, keepdim=True)[0])
.view(self.num_heads, 1, 1, 2)
.repeat(1, self.num_levels, self.num_points, 1)
)
for i in range(self.num_points):
grid_init[:, :, i, :] *= i + 1
with torch.no_grad():
self.sampling_offsets.bias = nn.Parameter(grid_init.view(-1))
constant_(self.attention_weights.weight.data, 0.0)
constant_(self.attention_weights.bias.data, 0.0)
xavier_uniform_(self.value_proj.weight.data)
constant_(self.value_proj.bias.data, 0.0)
xavier_uniform_(self.output_proj.weight.data)
constant_(self.output_proj.bias.data, 0.0)
def freeze_sampling_offsets(self):
print("Freeze sampling offsets")
self.sampling_offsets.weight.requires_grad = False
self.sampling_offsets.bias.requires_grad = False
def freeze_attention_weights(self):
print("Freeze attention weights")
self.attention_weights.weight.requires_grad = False
self.attention_weights.bias.requires_grad = False
def forward(
self,
query: torch.Tensor,
key: Optional[torch.Tensor] = None,
value: Optional[torch.Tensor] = None,
query_pos: Optional[torch.Tensor] = None,
key_padding_mask: Optional[torch.Tensor] = None,
reference_points: Optional[torch.Tensor] = None,
spatial_shapes: Optional[torch.Tensor] = None,
level_start_index: Optional[torch.Tensor] = None,
**kwargs
) -> torch.Tensor:
"""Forward Function of MultiScaleDeformableAttention
Args:
query (torch.Tensor): Query embeddings with shape
`(num_query, bs, embed_dim)`
key (torch.Tensor): Key embeddings with shape
`(num_key, bs, embed_dim)`
value (torch.Tensor): Value embeddings with shape
`(num_key, bs, embed_dim)`
query_pos (torch.Tensor): The position embedding for `query`. Default: None.
key_padding_mask (torch.Tensor): ByteTensor for `query`, with shape `(bs, num_key)`,
indicating which elements within `key` to be ignored in attention.
reference_points (torch.Tensor): The normalized reference points
with shape `(bs, num_query, num_levels, 2)`,
all elements is range in [0, 1], top-left (0, 0),
bottom-right (1, 1), including padding are.
or `(N, Length_{query}, num_levels, 4)`, add additional
two dimensions `(h, w)` to form reference boxes.
spatial_shapes (torch.Tensor): Spatial shape of features in different levels.
With shape `(num_levels, 2)`, last dimension represents `(h, w)`.
level_start_index (torch.Tensor): The start index of each level. A tensor with
shape `(num_levels, )` which can be represented as
`[0, h_0 * w_0, h_0 * w_0 + h_1 * w_1, ...]`.
Returns:
torch.Tensor: forward results with shape `(num_query, bs, embed_dim)`
"""
if value is None:
value = query
if query_pos is not None:
query = query + query_pos
if not self.batch_first:
# change to (bs, num_query ,embed_dims)
query = query.permute(1, 0, 2)
value = value.permute(1, 0, 2)
bs, num_query, _ = query.shape
bs, num_value, _ = value.shape
assert (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() == num_value
value = self.value_proj(value)
if key_padding_mask is not None:
value = value.masked_fill(key_padding_mask[..., None], float(0))
value = value.view(bs, num_value, self.num_heads, -1)
sampling_offsets = self.sampling_offsets(query).view(
bs, num_query, self.num_heads, self.num_levels, self.num_points, 2
)
attention_weights = self.attention_weights(query).view(
bs, num_query, self.num_heads, self.num_levels * self.num_points
)
attention_weights = attention_weights.softmax(-1)
attention_weights = attention_weights.view(
bs,
num_query,
self.num_heads,
self.num_levels,
self.num_points,
)
# bs, num_query, num_heads, num_levels, num_points, 2
if reference_points.shape[-1] == 2:
offset_normalizer = torch.stack([spatial_shapes[..., 1], spatial_shapes[..., 0]], -1)
sampling_locations = (
reference_points[:, :, None, :, None, :]
+ sampling_offsets / offset_normalizer[None, None, None, :, None, :]
)
elif reference_points.shape[-1] == 4:
sampling_locations = (
reference_points[:, :, None, :, None, :2]
+ sampling_offsets
/ self.num_points
* reference_points[:, :, None, :, None, 2:]
* 0.5
)
else:
raise ValueError(
"Last dim of reference_points must be 2 or 4, but get {} instead.".format(
reference_points.shape[-1]
)
)
if torch.cuda.is_available() and value.is_cuda:
halffloat = False
if value.dtype == torch.float16:
halffloat = True
value = value.float()
sampling_locations = sampling_locations.float()
attention_weights = attention_weights.float()
output = MultiScaleDeformableAttnFunction.apply(
value,
spatial_shapes,
level_start_index,
sampling_locations,
attention_weights,
self.im2col_step,
)
if halffloat:
output = output.half()
else:
output = multi_scale_deformable_attn_pytorch(
value, spatial_shapes, sampling_locations, attention_weights
)
output = self.output_proj(output)
if not self.batch_first:
output = output.permute(1, 0, 2)
return output
def create_dummy_class(klass, dependency, message=""):
"""
When a dependency of a class is not available, create a dummy class which throws ImportError
when used.
Args:
klass (str): name of the class.
dependency (str): name of the dependency.
message: extra message to print
Returns:
class: a class object
"""
err = "Cannot import '{}', therefore '{}' is not available.".format(dependency, klass)
if message:
err = err + " " + message
class _DummyMetaClass(type):
# throw error on class attribute access
def __getattr__(_, __): # noqa: B902
raise ImportError(err)
class _Dummy(object, metaclass=_DummyMetaClass):
# throw error on constructor
def __init__(self, *args, **kwargs):
raise ImportError(err)
return _Dummy
def create_dummy_func(func, dependency, message=""):
"""
When a dependency of a function is not available, create a dummy function which throws
ImportError when used.
Args:
func (str): name of the function.
dependency (str or list[str]): name(s) of the dependency.
message: extra message to print
Returns:
function: a function object
"""
err = "Cannot import '{}', therefore '{}' is not available.".format(dependency, func)
if message:
err = err + " " + message
if isinstance(dependency, (list, tuple)):
dependency = ",".join(dependency)
def _dummy(*args, **kwargs):
raise ImportError(err)
return _dummy
| swarms-master | swarms/workers/models/GroundingDINO/groundingdino/models/GroundingDINO/ms_deform_attn.py |
# ------------------------------------------------------------------------
# Grounding DINO
# url: https://github.com/IDEA-Research/GroundingDINO
# Copyright (c) 2023 IDEA. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
# Conditional DETR model and criterion classes.
# Copyright (c) 2021 Microsoft. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
# Modified from DETR (https://github.com/facebookresearch/detr)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# ------------------------------------------------------------------------
# Modified from Deformable DETR (https://github.com/fundamentalvision/Deformable-DETR)
# Copyright (c) 2020 SenseTime. All Rights Reserved.
# ------------------------------------------------------------------------
import copy
from typing import List
import torch
import torch.nn.functional as F
from torch import nn
from groundingdino.util import get_tokenlizer
from groundingdino.util.misc import (
NestedTensor,
inverse_sigmoid,
nested_tensor_from_tensor_list,
)
from ..registry import MODULE_BUILD_FUNCS
from .backbone import build_backbone
from .bertwarper import (
BertModelWarper,
generate_masks_with_special_tokens_and_transfer_map,
)
from .transformer import build_transformer
from .utils import MLP, ContrastiveEmbed
class GroundingDINO(nn.Module):
"""This is the Cross-Attention Detector module that performs object detection"""
def __init__(
self,
backbone,
transformer,
num_queries,
aux_loss=False,
iter_update=False,
query_dim=2,
num_feature_levels=1,
nheads=8,
# two stage
two_stage_type="no", # ['no', 'standard']
dec_pred_bbox_embed_share=True,
two_stage_class_embed_share=True,
two_stage_bbox_embed_share=True,
num_patterns=0,
dn_number=100,
dn_box_noise_scale=0.4,
dn_label_noise_ratio=0.5,
dn_labelbook_size=100,
text_encoder_type="bert-base-uncased",
sub_sentence_present=True,
max_text_len=256,
):
"""Initializes the model.
Parameters:
backbone: torch module of the backbone to be used. See backbone.py
transformer: torch module of the transformer architecture. See transformer.py
num_queries: number of object queries, ie detection slot. This is the maximal number of objects
Conditional DETR can detect in a single image. For COCO, we recommend 100 queries.
aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used.
"""
super().__init__()
self.num_queries = num_queries
self.transformer = transformer
self.hidden_dim = hidden_dim = transformer.d_model
self.num_feature_levels = num_feature_levels
self.nheads = nheads
self.max_text_len = 256
self.sub_sentence_present = sub_sentence_present
# setting query dim
self.query_dim = query_dim
assert query_dim == 4
# for dn training
self.num_patterns = num_patterns
self.dn_number = dn_number
self.dn_box_noise_scale = dn_box_noise_scale
self.dn_label_noise_ratio = dn_label_noise_ratio
self.dn_labelbook_size = dn_labelbook_size
# bert
self.tokenizer = get_tokenlizer.get_tokenlizer(text_encoder_type)
self.bert = get_tokenlizer.get_pretrained_language_model(text_encoder_type)
self.bert.pooler.dense.weight.requires_grad_(False)
self.bert.pooler.dense.bias.requires_grad_(False)
self.bert = BertModelWarper(bert_model=self.bert)
self.feat_map = nn.Linear(self.bert.config.hidden_size, self.hidden_dim, bias=True)
nn.init.constant_(self.feat_map.bias.data, 0)
nn.init.xavier_uniform_(self.feat_map.weight.data)
# freeze
# special tokens
self.specical_tokens = self.tokenizer.convert_tokens_to_ids(["[CLS]", "[SEP]", ".", "?"])
# prepare input projection layers
if num_feature_levels > 1:
num_backbone_outs = len(backbone.num_channels)
input_proj_list = []
for _ in range(num_backbone_outs):
in_channels = backbone.num_channels[_]
input_proj_list.append(
nn.Sequential(
nn.Conv2d(in_channels, hidden_dim, kernel_size=1),
nn.GroupNorm(32, hidden_dim),
)
)
for _ in range(num_feature_levels - num_backbone_outs):
input_proj_list.append(
nn.Sequential(
nn.Conv2d(in_channels, hidden_dim, kernel_size=3, stride=2, padding=1),
nn.GroupNorm(32, hidden_dim),
)
)
in_channels = hidden_dim
self.input_proj = nn.ModuleList(input_proj_list)
else:
assert two_stage_type == "no", "two_stage_type should be no if num_feature_levels=1 !!!"
self.input_proj = nn.ModuleList(
[
nn.Sequential(
nn.Conv2d(backbone.num_channels[-1], hidden_dim, kernel_size=1),
nn.GroupNorm(32, hidden_dim),
)
]
)
self.backbone = backbone
self.aux_loss = aux_loss
self.box_pred_damping = None
self.iter_update = iter_update
assert iter_update, "Why not iter_update?"
# prepare pred layers
self.dec_pred_bbox_embed_share = dec_pred_bbox_embed_share
# prepare class & box embed
_class_embed = ContrastiveEmbed()
_bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)
nn.init.constant_(_bbox_embed.layers[-1].weight.data, 0)
nn.init.constant_(_bbox_embed.layers[-1].bias.data, 0)
if dec_pred_bbox_embed_share:
box_embed_layerlist = [_bbox_embed for i in range(transformer.num_decoder_layers)]
else:
box_embed_layerlist = [
copy.deepcopy(_bbox_embed) for i in range(transformer.num_decoder_layers)
]
class_embed_layerlist = [_class_embed for i in range(transformer.num_decoder_layers)]
self.bbox_embed = nn.ModuleList(box_embed_layerlist)
self.class_embed = nn.ModuleList(class_embed_layerlist)
self.transformer.decoder.bbox_embed = self.bbox_embed
self.transformer.decoder.class_embed = self.class_embed
# two stage
self.two_stage_type = two_stage_type
assert two_stage_type in ["no", "standard"], "unknown param {} of two_stage_type".format(
two_stage_type
)
if two_stage_type != "no":
if two_stage_bbox_embed_share:
assert dec_pred_bbox_embed_share
self.transformer.enc_out_bbox_embed = _bbox_embed
else:
self.transformer.enc_out_bbox_embed = copy.deepcopy(_bbox_embed)
if two_stage_class_embed_share:
assert dec_pred_bbox_embed_share
self.transformer.enc_out_class_embed = _class_embed
else:
self.transformer.enc_out_class_embed = copy.deepcopy(_class_embed)
self.refpoint_embed = None
self._reset_parameters()
def _reset_parameters(self):
# init input_proj
for proj in self.input_proj:
nn.init.xavier_uniform_(proj[0].weight, gain=1)
nn.init.constant_(proj[0].bias, 0)
def init_ref_points(self, use_num_queries):
self.refpoint_embed = nn.Embedding(use_num_queries, self.query_dim)
def forward(self, samples: NestedTensor, targets: List = None, **kw):
"""The forward expects a NestedTensor, which consists of:
- samples.tensor: batched images, of shape [batch_size x 3 x H x W]
- samples.mask: a binary mask of shape [batch_size x H x W], containing 1 on padded pixels
It returns a dict with the following elements:
- "pred_logits": the classification logits (including no-object) for all queries.
Shape= [batch_size x num_queries x num_classes]
- "pred_boxes": The normalized boxes coordinates for all queries, represented as
(center_x, center_y, width, height). These values are normalized in [0, 1],
relative to the size of each individual image (disregarding possible padding).
See PostProcess for information on how to retrieve the unnormalized bounding box.
- "aux_outputs": Optional, only returned when auxilary losses are activated. It is a list of
dictionnaries containing the two above keys for each decoder layer.
"""
if targets is None:
captions = kw["captions"]
else:
captions = [t["caption"] for t in targets]
# encoder texts
tokenized = self.tokenizer(captions, padding="longest", return_tensors="pt").to(
samples.device
)
(
text_self_attention_masks,
position_ids,
cate_to_token_mask_list,
) = generate_masks_with_special_tokens_and_transfer_map(
tokenized, self.specical_tokens, self.tokenizer
)
if text_self_attention_masks.shape[1] > self.max_text_len:
text_self_attention_masks = text_self_attention_masks[
:, : self.max_text_len, : self.max_text_len
]
position_ids = position_ids[:, : self.max_text_len]
tokenized["input_ids"] = tokenized["input_ids"][:, : self.max_text_len]
tokenized["attention_mask"] = tokenized["attention_mask"][:, : self.max_text_len]
tokenized["token_type_ids"] = tokenized["token_type_ids"][:, : self.max_text_len]
# extract text embeddings
if self.sub_sentence_present:
tokenized_for_encoder = {k: v for k, v in tokenized.items() if k != "attention_mask"}
tokenized_for_encoder["attention_mask"] = text_self_attention_masks
tokenized_for_encoder["position_ids"] = position_ids
else:
# import ipdb; ipdb.set_trace()
tokenized_for_encoder = tokenized
bert_output = self.bert(**tokenized_for_encoder) # bs, 195, 768
encoded_text = self.feat_map(bert_output["last_hidden_state"]) # bs, 195, d_model
text_token_mask = tokenized.attention_mask.bool() # bs, 195
# text_token_mask: True for nomask, False for mask
# text_self_attention_masks: True for nomask, False for mask
if encoded_text.shape[1] > self.max_text_len:
encoded_text = encoded_text[:, : self.max_text_len, :]
text_token_mask = text_token_mask[:, : self.max_text_len]
position_ids = position_ids[:, : self.max_text_len]
text_self_attention_masks = text_self_attention_masks[
:, : self.max_text_len, : self.max_text_len
]
text_dict = {
"encoded_text": encoded_text, # bs, 195, d_model
"text_token_mask": text_token_mask, # bs, 195
"position_ids": position_ids, # bs, 195
"text_self_attention_masks": text_self_attention_masks, # bs, 195,195
}
# import ipdb; ipdb.set_trace()
if isinstance(samples, (list, torch.Tensor)):
samples = nested_tensor_from_tensor_list(samples)
features, poss = self.backbone(samples)
srcs = []
masks = []
for l, feat in enumerate(features):
src, mask = feat.decompose()
srcs.append(self.input_proj[l](src))
masks.append(mask)
assert mask is not None
if self.num_feature_levels > len(srcs):
_len_srcs = len(srcs)
for l in range(_len_srcs, self.num_feature_levels):
if l == _len_srcs:
src = self.input_proj[l](features[-1].tensors)
else:
src = self.input_proj[l](srcs[-1])
m = samples.mask
mask = F.interpolate(m[None].float(), size=src.shape[-2:]).to(torch.bool)[0]
pos_l = self.backbone[1](NestedTensor(src, mask)).to(src.dtype)
srcs.append(src)
masks.append(mask)
poss.append(pos_l)
input_query_bbox = input_query_label = attn_mask = None
hs, reference, hs_enc, ref_enc, init_box_proposal = self.transformer(
srcs, masks, input_query_bbox, poss, input_query_label, attn_mask, text_dict
)
# deformable-detr-like anchor update
outputs_coord_list = []
for dec_lid, (layer_ref_sig, layer_bbox_embed, layer_hs) in enumerate(
zip(reference[:-1], self.bbox_embed, hs)
):
layer_delta_unsig = layer_bbox_embed(layer_hs)
layer_outputs_unsig = layer_delta_unsig + inverse_sigmoid(layer_ref_sig)
layer_outputs_unsig = layer_outputs_unsig.sigmoid()
outputs_coord_list.append(layer_outputs_unsig)
outputs_coord_list = torch.stack(outputs_coord_list)
# output
outputs_class = torch.stack(
[
layer_cls_embed(layer_hs, text_dict)
for layer_cls_embed, layer_hs in zip(self.class_embed, hs)
]
)
out = {"pred_logits": outputs_class[-1], "pred_boxes": outputs_coord_list[-1]}
# # for intermediate outputs
# if self.aux_loss:
# out['aux_outputs'] = self._set_aux_loss(outputs_class, outputs_coord_list)
# # for encoder output
# if hs_enc is not None:
# # prepare intermediate outputs
# interm_coord = ref_enc[-1]
# interm_class = self.transformer.enc_out_class_embed(hs_enc[-1], text_dict)
# out['interm_outputs'] = {'pred_logits': interm_class, 'pred_boxes': interm_coord}
# out['interm_outputs_for_matching_pre'] = {'pred_logits': interm_class, 'pred_boxes': init_box_proposal}
return out
@torch.jit.unused
def _set_aux_loss(self, outputs_class, outputs_coord):
# this is a workaround to make torchscript happy, as torchscript
# doesn't support dictionary with non-homogeneous values, such
# as a dict having both a Tensor and a list.
return [
{"pred_logits": a, "pred_boxes": b}
for a, b in zip(outputs_class[:-1], outputs_coord[:-1])
]
@MODULE_BUILD_FUNCS.registe_with_name(module_name="groundingdino")
def build_groundingdino(args):
backbone = build_backbone(args)
transformer = build_transformer(args)
dn_labelbook_size = args.dn_labelbook_size
dec_pred_bbox_embed_share = args.dec_pred_bbox_embed_share
sub_sentence_present = args.sub_sentence_present
model = GroundingDINO(
backbone,
transformer,
num_queries=args.num_queries,
aux_loss=True,
iter_update=True,
query_dim=4,
num_feature_levels=args.num_feature_levels,
nheads=args.nheads,
dec_pred_bbox_embed_share=dec_pred_bbox_embed_share,
two_stage_type=args.two_stage_type,
two_stage_bbox_embed_share=args.two_stage_bbox_embed_share,
two_stage_class_embed_share=args.two_stage_class_embed_share,
num_patterns=args.num_patterns,
dn_number=0,
dn_box_noise_scale=args.dn_box_noise_scale,
dn_label_noise_ratio=args.dn_label_noise_ratio,
dn_labelbook_size=dn_labelbook_size,
text_encoder_type=args.text_encoder_type,
sub_sentence_present=sub_sentence_present,
max_text_len=args.max_text_len,
)
return model
| swarms-master | swarms/workers/models/GroundingDINO/groundingdino/models/GroundingDINO/groundingdino.py |
# ------------------------------------------------------------------------
# Grounding DINO
# url: https://github.com/IDEA-Research/GroundingDINO
# Copyright (c) 2023 IDEA. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
import torch
from torch import nn
from transformers.modeling_outputs import BaseModelOutputWithPoolingAndCrossAttentions
class BertModelWarper(nn.Module):
def __init__(self, bert_model):
super().__init__()
# self.bert = bert_modelc
self.config = bert_model.config
self.embeddings = bert_model.embeddings
self.encoder = bert_model.encoder
self.pooler = bert_model.pooler
self.get_extended_attention_mask = bert_model.get_extended_attention_mask
self.invert_attention_mask = bert_model.invert_attention_mask
self.get_head_mask = bert_model.get_head_mask
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
"""
output_attentions = (
output_attentions if output_attentions is not None else self.config.output_attentions
)
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
batch_size, seq_length = input_shape
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size, seq_length = input_shape
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
# past_key_values_length
past_key_values_length = (
past_key_values[0][0].shape[2] if past_key_values is not None else 0
)
if attention_mask is None:
attention_mask = torch.ones(
((batch_size, seq_length + past_key_values_length)), device=device
)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(
attention_mask, input_shape, device
)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO':
# import ipdb; ipdb.set_trace()
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
class TextEncoderShell(nn.Module):
def __init__(self, text_encoder):
super().__init__()
self.text_encoder = text_encoder
self.config = self.text_encoder.config
def forward(self, **kw):
# feed into text encoder
return self.text_encoder(**kw)
def generate_masks_with_special_tokens(tokenized, special_tokens_list, tokenizer):
"""Generate attention mask between each pair of special tokens
Args:
input_ids (torch.Tensor): input ids. Shape: [bs, num_token]
special_tokens_mask (list): special tokens mask.
Returns:
torch.Tensor: attention mask between each special tokens.
"""
input_ids = tokenized["input_ids"]
bs, num_token = input_ids.shape
# special_tokens_mask: bs, num_token. 1 for special tokens. 0 for normal tokens
special_tokens_mask = torch.zeros((bs, num_token), device=input_ids.device).bool()
for special_token in special_tokens_list:
special_tokens_mask |= input_ids == special_token
# idxs: each row is a list of indices of special tokens
idxs = torch.nonzero(special_tokens_mask)
# generate attention mask and positional ids
attention_mask = (
torch.eye(num_token, device=input_ids.device).bool().unsqueeze(0).repeat(bs, 1, 1)
)
position_ids = torch.zeros((bs, num_token), device=input_ids.device)
previous_col = 0
for i in range(idxs.shape[0]):
row, col = idxs[i]
if (col == 0) or (col == num_token - 1):
attention_mask[row, col, col] = True
position_ids[row, col] = 0
else:
attention_mask[row, previous_col + 1 : col + 1, previous_col + 1 : col + 1] = True
position_ids[row, previous_col + 1 : col + 1] = torch.arange(
0, col - previous_col, device=input_ids.device
)
previous_col = col
# # padding mask
# padding_mask = tokenized['attention_mask']
# attention_mask = attention_mask & padding_mask.unsqueeze(1).bool() & padding_mask.unsqueeze(2).bool()
return attention_mask, position_ids.to(torch.long)
def generate_masks_with_special_tokens_and_transfer_map(tokenized, special_tokens_list, tokenizer):
"""Generate attention mask between each pair of special tokens
Args:
input_ids (torch.Tensor): input ids. Shape: [bs, num_token]
special_tokens_mask (list): special tokens mask.
Returns:
torch.Tensor: attention mask between each special tokens.
"""
input_ids = tokenized["input_ids"]
bs, num_token = input_ids.shape
# special_tokens_mask: bs, num_token. 1 for special tokens. 0 for normal tokens
special_tokens_mask = torch.zeros((bs, num_token), device=input_ids.device).bool()
for special_token in special_tokens_list:
special_tokens_mask |= input_ids == special_token
# idxs: each row is a list of indices of special tokens
idxs = torch.nonzero(special_tokens_mask)
# generate attention mask and positional ids
attention_mask = (
torch.eye(num_token, device=input_ids.device).bool().unsqueeze(0).repeat(bs, 1, 1)
)
position_ids = torch.zeros((bs, num_token), device=input_ids.device)
cate_to_token_mask_list = [[] for _ in range(bs)]
previous_col = 0
for i in range(idxs.shape[0]):
row, col = idxs[i]
if (col == 0) or (col == num_token - 1):
attention_mask[row, col, col] = True
position_ids[row, col] = 0
else:
attention_mask[row, previous_col + 1 : col + 1, previous_col + 1 : col + 1] = True
position_ids[row, previous_col + 1 : col + 1] = torch.arange(
0, col - previous_col, device=input_ids.device
)
c2t_maski = torch.zeros((num_token), device=input_ids.device).bool()
c2t_maski[previous_col + 1 : col] = True
cate_to_token_mask_list[row].append(c2t_maski)
previous_col = col
cate_to_token_mask_list = [
torch.stack(cate_to_token_mask_listi, dim=0)
for cate_to_token_mask_listi in cate_to_token_mask_list
]
# # padding mask
# padding_mask = tokenized['attention_mask']
# attention_mask = attention_mask & padding_mask.unsqueeze(1).bool() & padding_mask.unsqueeze(2).bool()
return attention_mask, position_ids.to(torch.long), cate_to_token_mask_list
| swarms-master | swarms/workers/models/GroundingDINO/groundingdino/models/GroundingDINO/bertwarper.py |
# ------------------------------------------------------------------------
# Grounding DINO
# url: https://github.com/IDEA-Research/GroundingDINO
# Copyright (c) 2023 IDEA. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
# Conditional DETR
# Copyright (c) 2021 Microsoft. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
# Copied from DETR (https://github.com/facebookresearch/detr)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# ------------------------------------------------------------------------
| swarms-master | swarms/workers/models/GroundingDINO/groundingdino/models/GroundingDINO/__init__.py |
# ------------------------------------------------------------------------
# Grounding DINO
# url: https://github.com/IDEA-Research/GroundingDINO
# Copyright (c) 2023 IDEA. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
# Copyright (c) Aishwarya Kamath & Nicolas Carion. Licensed under the Apache License 2.0. All Rights Reserved
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
DETR Transformer class.
Copy-paste from torch.nn.Transformer with modifications:
* positional encodings are passed in MHattention
* extra LN at the end of encoder is removed
* decoder returns a stack of activations from all decoding layers
"""
from typing import Optional
import torch
from torch import Tensor, nn
from .utils import (
_get_activation_fn,
_get_clones,
)
class TextTransformer(nn.Module):
def __init__(self, num_layers, d_model=256, nheads=8, dim_feedforward=2048, dropout=0.1):
super().__init__()
self.num_layers = num_layers
self.d_model = d_model
self.nheads = nheads
self.dim_feedforward = dim_feedforward
self.norm = None
single_encoder_layer = TransformerEncoderLayer(
d_model=d_model, nhead=nheads, dim_feedforward=dim_feedforward, dropout=dropout
)
self.layers = _get_clones(single_encoder_layer, num_layers)
def forward(self, memory_text: torch.Tensor, text_attention_mask: torch.Tensor):
"""
Args:
text_attention_mask: bs, num_token
memory_text: bs, num_token, d_model
Raises:
RuntimeError: _description_
Returns:
output: bs, num_token, d_model
"""
output = memory_text.transpose(0, 1)
for layer in self.layers:
output = layer(output, src_key_padding_mask=text_attention_mask)
if self.norm is not None:
output = self.norm(output)
return output.transpose(0, 1)
class TransformerEncoderLayer(nn.Module):
def __init__(
self,
d_model,
nhead,
dim_feedforward=2048,
dropout=0.1,
activation="relu",
normalize_before=False,
):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
self.nhead = nhead
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward(
self,
src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
):
# repeat attn mask
if src_mask.dim() == 3 and src_mask.shape[0] == src.shape[1]:
# bs, num_q, num_k
src_mask = src_mask.repeat(self.nhead, 1, 1)
q = k = self.with_pos_embed(src, pos)
src2 = self.self_attn(q, k, value=src, attn_mask=src_mask)[0]
# src2 = self.self_attn(q, k, value=src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0]
src = src + self.dropout1(src2)
src = self.norm1(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src
| swarms-master | swarms/workers/models/GroundingDINO/groundingdino/models/GroundingDINO/transformer_vanilla.py |
# ------------------------------------------------------------------------
# Grounding DINO
# url: https://github.com/IDEA-Research/GroundingDINO
# Copyright (c) 2023 IDEA. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
import copy
import math
import torch
import torch.nn.functional as F
from torch import Tensor, nn
def _get_clones(module, N, layer_share=False):
# import ipdb; ipdb.set_trace()
if layer_share:
return nn.ModuleList([module for i in range(N)])
else:
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def get_sine_pos_embed(
pos_tensor: torch.Tensor,
num_pos_feats: int = 128,
temperature: int = 10000,
exchange_xy: bool = True,
):
"""generate sine position embedding from a position tensor
Args:
pos_tensor (torch.Tensor): shape: [..., n].
num_pos_feats (int): projected shape for each float in the tensor.
temperature (int): temperature in the sine/cosine function.
exchange_xy (bool, optional): exchange pos x and pos y. \
For example, input tensor is [x,y], the results will be [pos(y), pos(x)]. Defaults to True.
Returns:
pos_embed (torch.Tensor): shape: [..., n*num_pos_feats].
"""
scale = 2 * math.pi
dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=pos_tensor.device)
dim_t = temperature ** (2 * torch.div(dim_t, 2, rounding_mode="floor") / num_pos_feats)
def sine_func(x: torch.Tensor):
sin_x = x * scale / dim_t
sin_x = torch.stack((sin_x[..., 0::2].sin(), sin_x[..., 1::2].cos()), dim=3).flatten(2)
return sin_x
pos_res = [sine_func(x) for x in pos_tensor.split([1] * pos_tensor.shape[-1], dim=-1)]
if exchange_xy:
pos_res[0], pos_res[1] = pos_res[1], pos_res[0]
pos_res = torch.cat(pos_res, dim=-1)
return pos_res
def gen_encoder_output_proposals(
memory: Tensor, memory_padding_mask: Tensor, spatial_shapes: Tensor, learnedwh=None
):
"""
Input:
- memory: bs, \sum{hw}, d_model
- memory_padding_mask: bs, \sum{hw}
- spatial_shapes: nlevel, 2
- learnedwh: 2
Output:
- output_memory: bs, \sum{hw}, d_model
- output_proposals: bs, \sum{hw}, 4
"""
N_, S_, C_ = memory.shape
proposals = []
_cur = 0
for lvl, (H_, W_) in enumerate(spatial_shapes):
mask_flatten_ = memory_padding_mask[:, _cur : (_cur + H_ * W_)].view(N_, H_, W_, 1)
valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1)
valid_W = torch.sum(~mask_flatten_[:, 0, :, 0], 1)
# import ipdb; ipdb.set_trace()
grid_y, grid_x = torch.meshgrid(
torch.linspace(0, H_ - 1, H_, dtype=torch.float32, device=memory.device),
torch.linspace(0, W_ - 1, W_, dtype=torch.float32, device=memory.device),
)
grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1) # H_, W_, 2
scale = torch.cat([valid_W.unsqueeze(-1), valid_H.unsqueeze(-1)], 1).view(N_, 1, 1, 2)
grid = (grid.unsqueeze(0).expand(N_, -1, -1, -1) + 0.5) / scale
if learnedwh is not None:
# import ipdb; ipdb.set_trace()
wh = torch.ones_like(grid) * learnedwh.sigmoid() * (2.0**lvl)
else:
wh = torch.ones_like(grid) * 0.05 * (2.0**lvl)
# scale = torch.cat([W_[None].unsqueeze(-1), H_[None].unsqueeze(-1)], 1).view(1, 1, 1, 2).repeat(N_, 1, 1, 1)
# grid = (grid.unsqueeze(0).expand(N_, -1, -1, -1) + 0.5) / scale
# wh = torch.ones_like(grid) / scale
proposal = torch.cat((grid, wh), -1).view(N_, -1, 4)
proposals.append(proposal)
_cur += H_ * W_
# import ipdb; ipdb.set_trace()
output_proposals = torch.cat(proposals, 1)
output_proposals_valid = ((output_proposals > 0.01) & (output_proposals < 0.99)).all(
-1, keepdim=True
)
output_proposals = torch.log(output_proposals / (1 - output_proposals)) # unsigmoid
output_proposals = output_proposals.masked_fill(memory_padding_mask.unsqueeze(-1), float("inf"))
output_proposals = output_proposals.masked_fill(~output_proposals_valid, float("inf"))
output_memory = memory
output_memory = output_memory.masked_fill(memory_padding_mask.unsqueeze(-1), float(0))
output_memory = output_memory.masked_fill(~output_proposals_valid, float(0))
# output_memory = output_memory.masked_fill(memory_padding_mask.unsqueeze(-1), float('inf'))
# output_memory = output_memory.masked_fill(~output_proposals_valid, float('inf'))
return output_memory, output_proposals
class RandomBoxPerturber:
def __init__(
self, x_noise_scale=0.2, y_noise_scale=0.2, w_noise_scale=0.2, h_noise_scale=0.2
) -> None:
self.noise_scale = torch.Tensor(
[x_noise_scale, y_noise_scale, w_noise_scale, h_noise_scale]
)
def __call__(self, refanchors: Tensor) -> Tensor:
nq, bs, query_dim = refanchors.shape
device = refanchors.device
noise_raw = torch.rand_like(refanchors)
noise_scale = self.noise_scale.to(device)[:query_dim]
new_refanchors = refanchors * (1 + (noise_raw - 0.5) * noise_scale)
return new_refanchors.clamp_(0, 1)
def sigmoid_focal_loss(
inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2, no_reduction=False
):
"""
Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.
Args:
inputs: A float tensor of arbitrary shape.
The predictions for each example.
targets: A float tensor with the same shape as inputs. Stores the binary
classification label for each element in inputs
(0 for the negative class and 1 for the positive class).
alpha: (optional) Weighting factor in range (0,1) to balance
positive vs negative examples. Default = -1 (no weighting).
gamma: Exponent of the modulating factor (1 - p_t) to
balance easy vs hard examples.
Returns:
Loss tensor
"""
prob = inputs.sigmoid()
ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none")
p_t = prob * targets + (1 - prob) * (1 - targets)
loss = ce_loss * ((1 - p_t) ** gamma)
if alpha >= 0:
alpha_t = alpha * targets + (1 - alpha) * (1 - targets)
loss = alpha_t * loss
if no_reduction:
return loss
return loss.mean(1).sum() / num_boxes
class MLP(nn.Module):
"""Very simple multi-layer perceptron (also called FFN)"""
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(
nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])
)
def forward(self, x):
for i, layer in enumerate(self.layers):
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
def _get_activation_fn(activation, d_model=256, batch_dim=0):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
if activation == "prelu":
return nn.PReLU()
if activation == "selu":
return F.selu
raise RuntimeError(f"activation should be relu/gelu, not {activation}.")
def gen_sineembed_for_position(pos_tensor):
# n_query, bs, _ = pos_tensor.size()
# sineembed_tensor = torch.zeros(n_query, bs, 256)
scale = 2 * math.pi
dim_t = torch.arange(128, dtype=torch.float32, device=pos_tensor.device)
dim_t = 10000 ** (2 * (torch.div(dim_t, 2, rounding_mode='floor')) / 128)
x_embed = pos_tensor[:, :, 0] * scale
y_embed = pos_tensor[:, :, 1] * scale
pos_x = x_embed[:, :, None] / dim_t
pos_y = y_embed[:, :, None] / dim_t
pos_x = torch.stack((pos_x[:, :, 0::2].sin(), pos_x[:, :, 1::2].cos()), dim=3).flatten(2)
pos_y = torch.stack((pos_y[:, :, 0::2].sin(), pos_y[:, :, 1::2].cos()), dim=3).flatten(2)
if pos_tensor.size(-1) == 2:
pos = torch.cat((pos_y, pos_x), dim=2)
elif pos_tensor.size(-1) == 4:
w_embed = pos_tensor[:, :, 2] * scale
pos_w = w_embed[:, :, None] / dim_t
pos_w = torch.stack((pos_w[:, :, 0::2].sin(), pos_w[:, :, 1::2].cos()), dim=3).flatten(2)
h_embed = pos_tensor[:, :, 3] * scale
pos_h = h_embed[:, :, None] / dim_t
pos_h = torch.stack((pos_h[:, :, 0::2].sin(), pos_h[:, :, 1::2].cos()), dim=3).flatten(2)
pos = torch.cat((pos_y, pos_x, pos_w, pos_h), dim=2)
else:
raise ValueError("Unknown pos_tensor shape(-1):{}".format(pos_tensor.size(-1)))
return pos
class ContrastiveEmbed(nn.Module):
def __init__(self, max_text_len=256):
"""
Args:
max_text_len: max length of text.
"""
super().__init__()
self.max_text_len = max_text_len
def forward(self, x, text_dict):
"""_summary_
Args:
x (_type_): _description_
text_dict (_type_): _description_
{
'encoded_text': encoded_text, # bs, 195, d_model
'text_token_mask': text_token_mask, # bs, 195
# True for used tokens. False for padding tokens
}
Returns:
_type_: _description_
"""
assert isinstance(text_dict, dict)
y = text_dict["encoded_text"]
text_token_mask = text_dict["text_token_mask"]
res = x @ y.transpose(-1, -2)
res.masked_fill_(~text_token_mask[:, None, :], float("-inf"))
# padding to max_text_len
new_res = torch.full((*res.shape[:-1], self.max_text_len), float("-inf"), device=res.device)
new_res[..., : res.shape[-1]] = res
return new_res
| swarms-master | swarms/workers/models/GroundingDINO/groundingdino/models/GroundingDINO/utils.py |
# ------------------------------------------------------------------------
# Grounding DINO
# url: https://github.com/IDEA-Research/GroundingDINO
# Copyright (c) 2023 IDEA. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
# DINO
# Copyright (c) 2022 IDEA. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
# Conditional DETR Transformer class.
# Copyright (c) 2021 Microsoft. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
# Modified from DETR (https://github.com/facebookresearch/detr)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# ------------------------------------------------------------------------
from typing import Optional
import torch
import torch.utils.checkpoint as checkpoint
from torch import Tensor, nn
from groundingdino.util.misc import inverse_sigmoid
from .fuse_modules import BiAttentionBlock
from .ms_deform_attn import MultiScaleDeformableAttention as MSDeformAttn
from .transformer_vanilla import TransformerEncoderLayer
from .utils import (
MLP,
_get_activation_fn,
_get_clones,
gen_encoder_output_proposals,
gen_sineembed_for_position,
get_sine_pos_embed,
)
class Transformer(nn.Module):
def __init__(
self,
d_model=256,
nhead=8,
num_queries=300,
num_encoder_layers=6,
num_unicoder_layers=0,
num_decoder_layers=6,
dim_feedforward=2048,
dropout=0.0,
activation="relu",
normalize_before=False,
return_intermediate_dec=False,
query_dim=4,
num_patterns=0,
# for deformable encoder
num_feature_levels=1,
enc_n_points=4,
dec_n_points=4,
# init query
learnable_tgt_init=False,
# two stage
two_stage_type="no", # ['no', 'standard', 'early', 'combine', 'enceachlayer', 'enclayer1']
embed_init_tgt=False,
# for text
use_text_enhancer=False,
use_fusion_layer=False,
use_checkpoint=False,
use_transformer_ckpt=False,
use_text_cross_attention=False,
text_dropout=0.1,
fusion_dropout=0.1,
fusion_droppath=0.0,
):
super().__init__()
self.num_feature_levels = num_feature_levels
self.num_encoder_layers = num_encoder_layers
self.num_unicoder_layers = num_unicoder_layers
self.num_decoder_layers = num_decoder_layers
self.num_queries = num_queries
assert query_dim == 4
# choose encoder layer type
encoder_layer = DeformableTransformerEncoderLayer(
d_model, dim_feedforward, dropout, activation, num_feature_levels, nhead, enc_n_points
)
if use_text_enhancer:
text_enhance_layer = TransformerEncoderLayer(
d_model=d_model,
nhead=nhead // 2,
dim_feedforward=dim_feedforward // 2,
dropout=text_dropout,
)
else:
text_enhance_layer = None
if use_fusion_layer:
feature_fusion_layer = BiAttentionBlock(
v_dim=d_model,
l_dim=d_model,
embed_dim=dim_feedforward // 2,
num_heads=nhead // 2,
dropout=fusion_dropout,
drop_path=fusion_droppath,
)
else:
feature_fusion_layer = None
encoder_norm = nn.LayerNorm(d_model) if normalize_before else None
assert encoder_norm is None
self.encoder = TransformerEncoder(
encoder_layer,
num_encoder_layers,
d_model=d_model,
num_queries=num_queries,
text_enhance_layer=text_enhance_layer,
feature_fusion_layer=feature_fusion_layer,
use_checkpoint=use_checkpoint,
use_transformer_ckpt=use_transformer_ckpt,
)
# choose decoder layer type
decoder_layer = DeformableTransformerDecoderLayer(
d_model,
dim_feedforward,
dropout,
activation,
num_feature_levels,
nhead,
dec_n_points,
use_text_cross_attention=use_text_cross_attention,
)
decoder_norm = nn.LayerNorm(d_model)
self.decoder = TransformerDecoder(
decoder_layer,
num_decoder_layers,
decoder_norm,
return_intermediate=return_intermediate_dec,
d_model=d_model,
query_dim=query_dim,
num_feature_levels=num_feature_levels,
)
self.d_model = d_model
self.nhead = nhead
self.dec_layers = num_decoder_layers
self.num_queries = num_queries # useful for single stage model only
self.num_patterns = num_patterns
if not isinstance(num_patterns, int):
Warning("num_patterns should be int but {}".format(type(num_patterns)))
self.num_patterns = 0
if num_feature_levels > 1:
if self.num_encoder_layers > 0:
self.level_embed = nn.Parameter(torch.Tensor(num_feature_levels, d_model))
else:
self.level_embed = None
self.learnable_tgt_init = learnable_tgt_init
assert learnable_tgt_init, "why not learnable_tgt_init"
self.embed_init_tgt = embed_init_tgt
if (two_stage_type != "no" and embed_init_tgt) or (two_stage_type == "no"):
self.tgt_embed = nn.Embedding(self.num_queries, d_model)
nn.init.normal_(self.tgt_embed.weight.data)
else:
self.tgt_embed = None
# for two stage
self.two_stage_type = two_stage_type
assert two_stage_type in ["no", "standard"], "unknown param {} of two_stage_type".format(
two_stage_type
)
if two_stage_type == "standard":
# anchor selection at the output of encoder
self.enc_output = nn.Linear(d_model, d_model)
self.enc_output_norm = nn.LayerNorm(d_model)
self.two_stage_wh_embedding = None
if two_stage_type == "no":
self.init_ref_points(num_queries) # init self.refpoint_embed
self.enc_out_class_embed = None
self.enc_out_bbox_embed = None
self._reset_parameters()
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
for m in self.modules():
if isinstance(m, MSDeformAttn):
m._reset_parameters()
if self.num_feature_levels > 1 and self.level_embed is not None:
nn.init.normal_(self.level_embed)
def get_valid_ratio(self, mask):
_, H, W = mask.shape
valid_H = torch.sum(~mask[:, :, 0], 1)
valid_W = torch.sum(~mask[:, 0, :], 1)
valid_ratio_h = valid_H.float() / H
valid_ratio_w = valid_W.float() / W
valid_ratio = torch.stack([valid_ratio_w, valid_ratio_h], -1)
return valid_ratio
def init_ref_points(self, use_num_queries):
self.refpoint_embed = nn.Embedding(use_num_queries, 4)
def forward(self, srcs, masks, refpoint_embed, pos_embeds, tgt, attn_mask=None, text_dict=None):
"""
Input:
- srcs: List of multi features [bs, ci, hi, wi]
- masks: List of multi masks [bs, hi, wi]
- refpoint_embed: [bs, num_dn, 4]. None in infer
- pos_embeds: List of multi pos embeds [bs, ci, hi, wi]
- tgt: [bs, num_dn, d_model]. None in infer
"""
# prepare input for encoder
src_flatten = []
mask_flatten = []
lvl_pos_embed_flatten = []
spatial_shapes = []
for lvl, (src, mask, pos_embed) in enumerate(zip(srcs, masks, pos_embeds)):
bs, c, h, w = src.shape
spatial_shape = (h, w)
spatial_shapes.append(spatial_shape)
src = src.flatten(2).transpose(1, 2) # bs, hw, c
mask = mask.flatten(1) # bs, hw
pos_embed = pos_embed.flatten(2).transpose(1, 2) # bs, hw, c
if self.num_feature_levels > 1 and self.level_embed is not None:
lvl_pos_embed = pos_embed + self.level_embed[lvl].view(1, 1, -1)
else:
lvl_pos_embed = pos_embed
lvl_pos_embed_flatten.append(lvl_pos_embed)
src_flatten.append(src)
mask_flatten.append(mask)
src_flatten = torch.cat(src_flatten, 1) # bs, \sum{hxw}, c
mask_flatten = torch.cat(mask_flatten, 1) # bs, \sum{hxw}
lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1) # bs, \sum{hxw}, c
spatial_shapes = torch.as_tensor(
spatial_shapes, dtype=torch.long, device=src_flatten.device
)
level_start_index = torch.cat(
(spatial_shapes.new_zeros((1,)), spatial_shapes.prod(1).cumsum(0)[:-1])
)
valid_ratios = torch.stack([self.get_valid_ratio(m) for m in masks], 1)
# two stage
#########################################################
# Begin Encoder
#########################################################
memory, memory_text = self.encoder(
src_flatten,
pos=lvl_pos_embed_flatten,
level_start_index=level_start_index,
spatial_shapes=spatial_shapes,
valid_ratios=valid_ratios,
key_padding_mask=mask_flatten,
memory_text=text_dict["encoded_text"],
text_attention_mask=~text_dict["text_token_mask"],
# we ~ the mask . False means use the token; True means pad the token
position_ids=text_dict["position_ids"],
text_self_attention_masks=text_dict["text_self_attention_masks"],
)
#########################################################
# End Encoder
# - memory: bs, \sum{hw}, c
# - mask_flatten: bs, \sum{hw}
# - lvl_pos_embed_flatten: bs, \sum{hw}, c
# - enc_intermediate_output: None or (nenc+1, bs, nq, c) or (nenc, bs, nq, c)
# - enc_intermediate_refpoints: None or (nenc+1, bs, nq, c) or (nenc, bs, nq, c)
#########################################################
text_dict["encoded_text"] = memory_text
# if os.environ.get("SHILONG_AMP_INFNAN_DEBUG") == '1':
# if memory.isnan().any() | memory.isinf().any():
# import ipdb; ipdb.set_trace()
if self.two_stage_type == "standard":
output_memory, output_proposals = gen_encoder_output_proposals(
memory, mask_flatten, spatial_shapes
)
output_memory = self.enc_output_norm(self.enc_output(output_memory))
if text_dict is not None:
enc_outputs_class_unselected = self.enc_out_class_embed(output_memory, text_dict)
else:
enc_outputs_class_unselected = self.enc_out_class_embed(output_memory)
topk_logits = enc_outputs_class_unselected.max(-1)[0]
enc_outputs_coord_unselected = (
self.enc_out_bbox_embed(output_memory) + output_proposals
) # (bs, \sum{hw}, 4) unsigmoid
topk = self.num_queries
topk_proposals = torch.topk(topk_logits, topk, dim=1)[1] # bs, nq
# gather boxes
refpoint_embed_undetach = torch.gather(
enc_outputs_coord_unselected, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, 4)
) # unsigmoid
refpoint_embed_ = refpoint_embed_undetach.detach()
init_box_proposal = torch.gather(
output_proposals, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, 4)
).sigmoid() # sigmoid
# gather tgt
tgt_undetach = torch.gather(
output_memory, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, self.d_model)
)
if self.embed_init_tgt:
tgt_ = (
self.tgt_embed.weight[:, None, :].repeat(1, bs, 1).transpose(0, 1)
) # nq, bs, d_model
else:
tgt_ = tgt_undetach.detach()
if refpoint_embed is not None:
refpoint_embed = torch.cat([refpoint_embed, refpoint_embed_], dim=1)
tgt = torch.cat([tgt, tgt_], dim=1)
else:
refpoint_embed, tgt = refpoint_embed_, tgt_
elif self.two_stage_type == "no":
tgt_ = (
self.tgt_embed.weight[:, None, :].repeat(1, bs, 1).transpose(0, 1)
) # nq, bs, d_model
refpoint_embed_ = (
self.refpoint_embed.weight[:, None, :].repeat(1, bs, 1).transpose(0, 1)
) # nq, bs, 4
if refpoint_embed is not None:
refpoint_embed = torch.cat([refpoint_embed, refpoint_embed_], dim=1)
tgt = torch.cat([tgt, tgt_], dim=1)
else:
refpoint_embed, tgt = refpoint_embed_, tgt_
if self.num_patterns > 0:
tgt_embed = tgt.repeat(1, self.num_patterns, 1)
refpoint_embed = refpoint_embed.repeat(1, self.num_patterns, 1)
tgt_pat = self.patterns.weight[None, :, :].repeat_interleave(
self.num_queries, 1
) # 1, n_q*n_pat, d_model
tgt = tgt_embed + tgt_pat
init_box_proposal = refpoint_embed_.sigmoid()
else:
raise NotImplementedError("unknown two_stage_type {}".format(self.two_stage_type))
#########################################################
# End preparing tgt
# - tgt: bs, NQ, d_model
# - refpoint_embed(unsigmoid): bs, NQ, d_model
#########################################################
#########################################################
# Begin Decoder
#########################################################
hs, references = self.decoder(
tgt=tgt.transpose(0, 1),
memory=memory.transpose(0, 1),
memory_key_padding_mask=mask_flatten,
pos=lvl_pos_embed_flatten.transpose(0, 1),
refpoints_unsigmoid=refpoint_embed.transpose(0, 1),
level_start_index=level_start_index,
spatial_shapes=spatial_shapes,
valid_ratios=valid_ratios,
tgt_mask=attn_mask,
memory_text=text_dict["encoded_text"],
text_attention_mask=~text_dict["text_token_mask"],
# we ~ the mask . False means use the token; True means pad the token
)
#########################################################
# End Decoder
# hs: n_dec, bs, nq, d_model
# references: n_dec+1, bs, nq, query_dim
#########################################################
#########################################################
# Begin postprocess
#########################################################
if self.two_stage_type == "standard":
hs_enc = tgt_undetach.unsqueeze(0)
ref_enc = refpoint_embed_undetach.sigmoid().unsqueeze(0)
else:
hs_enc = ref_enc = None
#########################################################
# End postprocess
# hs_enc: (n_enc+1, bs, nq, d_model) or (1, bs, nq, d_model) or (n_enc, bs, nq, d_model) or None
# ref_enc: (n_enc+1, bs, nq, query_dim) or (1, bs, nq, query_dim) or (n_enc, bs, nq, d_model) or None
#########################################################
return hs, references, hs_enc, ref_enc, init_box_proposal
# hs: (n_dec, bs, nq, d_model)
# references: sigmoid coordinates. (n_dec+1, bs, bq, 4)
# hs_enc: (n_enc+1, bs, nq, d_model) or (1, bs, nq, d_model) or None
# ref_enc: sigmoid coordinates. \
# (n_enc+1, bs, nq, query_dim) or (1, bs, nq, query_dim) or None
class TransformerEncoder(nn.Module):
def __init__(
self,
encoder_layer,
num_layers,
d_model=256,
num_queries=300,
enc_layer_share=False,
text_enhance_layer=None,
feature_fusion_layer=None,
use_checkpoint=False,
use_transformer_ckpt=False,
):
"""_summary_
Args:
encoder_layer (_type_): _description_
num_layers (_type_): _description_
norm (_type_, optional): _description_. Defaults to None.
d_model (int, optional): _description_. Defaults to 256.
num_queries (int, optional): _description_. Defaults to 300.
enc_layer_share (bool, optional): _description_. Defaults to False.
"""
super().__init__()
# prepare layers
self.layers = []
self.text_layers = []
self.fusion_layers = []
if num_layers > 0:
self.layers = _get_clones(encoder_layer, num_layers, layer_share=enc_layer_share)
if text_enhance_layer is not None:
self.text_layers = _get_clones(
text_enhance_layer, num_layers, layer_share=enc_layer_share
)
if feature_fusion_layer is not None:
self.fusion_layers = _get_clones(
feature_fusion_layer, num_layers, layer_share=enc_layer_share
)
else:
self.layers = []
del encoder_layer
if text_enhance_layer is not None:
self.text_layers = []
del text_enhance_layer
if feature_fusion_layer is not None:
self.fusion_layers = []
del feature_fusion_layer
self.query_scale = None
self.num_queries = num_queries
self.num_layers = num_layers
self.d_model = d_model
self.use_checkpoint = use_checkpoint
self.use_transformer_ckpt = use_transformer_ckpt
@staticmethod
def get_reference_points(spatial_shapes, valid_ratios, device):
reference_points_list = []
for lvl, (H_, W_) in enumerate(spatial_shapes):
ref_y, ref_x = torch.meshgrid(
torch.linspace(0.5, H_ - 0.5, H_, dtype=torch.float32, device=device),
torch.linspace(0.5, W_ - 0.5, W_, dtype=torch.float32, device=device),
)
ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, lvl, 1] * H_)
ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, lvl, 0] * W_)
ref = torch.stack((ref_x, ref_y), -1)
reference_points_list.append(ref)
reference_points = torch.cat(reference_points_list, 1)
reference_points = reference_points[:, :, None] * valid_ratios[:, None]
return reference_points
def forward(
self,
# for images
src: Tensor,
pos: Tensor,
spatial_shapes: Tensor,
level_start_index: Tensor,
valid_ratios: Tensor,
key_padding_mask: Tensor,
# for texts
memory_text: Tensor = None,
text_attention_mask: Tensor = None,
pos_text: Tensor = None,
text_self_attention_masks: Tensor = None,
position_ids: Tensor = None,
):
"""
Input:
- src: [bs, sum(hi*wi), 256]
- pos: pos embed for src. [bs, sum(hi*wi), 256]
- spatial_shapes: h,w of each level [num_level, 2]
- level_start_index: [num_level] start point of level in sum(hi*wi).
- valid_ratios: [bs, num_level, 2]
- key_padding_mask: [bs, sum(hi*wi)]
- memory_text: bs, n_text, 256
- text_attention_mask: bs, n_text
False for no padding; True for padding
- pos_text: bs, n_text, 256
- position_ids: bs, n_text
Intermedia:
- reference_points: [bs, sum(hi*wi), num_level, 2]
Outpus:
- output: [bs, sum(hi*wi), 256]
"""
output = src
# preparation and reshape
if self.num_layers > 0:
reference_points = self.get_reference_points(
spatial_shapes, valid_ratios, device=src.device
)
if self.text_layers:
# generate pos_text
bs, n_text, text_dim = memory_text.shape
if pos_text is None and position_ids is None:
pos_text = (
torch.arange(n_text, device=memory_text.device)
.float()
.unsqueeze(0)
.unsqueeze(-1)
.repeat(bs, 1, 1)
)
pos_text = get_sine_pos_embed(pos_text, num_pos_feats=256, exchange_xy=False)
if position_ids is not None:
pos_text = get_sine_pos_embed(
position_ids[..., None], num_pos_feats=256, exchange_xy=False
)
# main process
for layer_id, layer in enumerate(self.layers):
# if output.isnan().any() or memory_text.isnan().any():
# if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO':
# import ipdb; ipdb.set_trace()
if self.fusion_layers:
if self.use_checkpoint:
output, memory_text = checkpoint.checkpoint(
self.fusion_layers[layer_id],
output,
memory_text,
key_padding_mask,
text_attention_mask,
)
else:
output, memory_text = self.fusion_layers[layer_id](
v=output,
l=memory_text,
attention_mask_v=key_padding_mask,
attention_mask_l=text_attention_mask,
)
if self.text_layers:
memory_text = self.text_layers[layer_id](
src=memory_text.transpose(0, 1),
src_mask=~text_self_attention_masks, # note we use ~ for mask here
src_key_padding_mask=text_attention_mask,
pos=(pos_text.transpose(0, 1) if pos_text is not None else None),
).transpose(0, 1)
# main process
if self.use_transformer_ckpt:
output = checkpoint.checkpoint(
layer,
output,
pos,
reference_points,
spatial_shapes,
level_start_index,
key_padding_mask,
)
else:
output = layer(
src=output,
pos=pos,
reference_points=reference_points,
spatial_shapes=spatial_shapes,
level_start_index=level_start_index,
key_padding_mask=key_padding_mask,
)
return output, memory_text
class TransformerDecoder(nn.Module):
def __init__(
self,
decoder_layer,
num_layers,
norm=None,
return_intermediate=False,
d_model=256,
query_dim=4,
num_feature_levels=1,
):
super().__init__()
if num_layers > 0:
self.layers = _get_clones(decoder_layer, num_layers)
else:
self.layers = []
self.num_layers = num_layers
self.norm = norm
self.return_intermediate = return_intermediate
assert return_intermediate, "support return_intermediate only"
self.query_dim = query_dim
assert query_dim in [2, 4], "query_dim should be 2/4 but {}".format(query_dim)
self.num_feature_levels = num_feature_levels
self.ref_point_head = MLP(query_dim // 2 * d_model, d_model, d_model, 2)
self.query_pos_sine_scale = None
self.query_scale = None
self.bbox_embed = None
self.class_embed = None
self.d_model = d_model
self.ref_anchor_head = None
def forward(
self,
tgt,
memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
refpoints_unsigmoid: Optional[Tensor] = None, # num_queries, bs, 2
# for memory
level_start_index: Optional[Tensor] = None, # num_levels
spatial_shapes: Optional[Tensor] = None, # bs, num_levels, 2
valid_ratios: Optional[Tensor] = None,
# for text
memory_text: Optional[Tensor] = None,
text_attention_mask: Optional[Tensor] = None,
):
"""
Input:
- tgt: nq, bs, d_model
- memory: hw, bs, d_model
- pos: hw, bs, d_model
- refpoints_unsigmoid: nq, bs, 2/4
- valid_ratios/spatial_shapes: bs, nlevel, 2
"""
output = tgt
intermediate = []
reference_points = refpoints_unsigmoid.sigmoid()
ref_points = [reference_points]
for layer_id, layer in enumerate(self.layers):
if reference_points.shape[-1] == 4:
reference_points_input = (
reference_points[:, :, None]
* torch.cat([valid_ratios, valid_ratios], -1)[None, :]
) # nq, bs, nlevel, 4
else:
assert reference_points.shape[-1] == 2
reference_points_input = reference_points[:, :, None] * valid_ratios[None, :]
query_sine_embed = gen_sineembed_for_position(
reference_points_input[:, :, 0, :]
) # nq, bs, 256*2
# conditional query
raw_query_pos = self.ref_point_head(query_sine_embed) # nq, bs, 256
pos_scale = self.query_scale(output) if self.query_scale is not None else 1
query_pos = pos_scale * raw_query_pos
# if os.environ.get("SHILONG_AMP_INFNAN_DEBUG") == '1':
# if query_pos.isnan().any() | query_pos.isinf().any():
# import ipdb; ipdb.set_trace()
# main process
output = layer(
tgt=output,
tgt_query_pos=query_pos,
tgt_query_sine_embed=query_sine_embed,
tgt_key_padding_mask=tgt_key_padding_mask,
tgt_reference_points=reference_points_input,
memory_text=memory_text,
text_attention_mask=text_attention_mask,
memory=memory,
memory_key_padding_mask=memory_key_padding_mask,
memory_level_start_index=level_start_index,
memory_spatial_shapes=spatial_shapes,
memory_pos=pos,
self_attn_mask=tgt_mask,
cross_attn_mask=memory_mask,
)
if output.isnan().any() | output.isinf().any():
print(f"output layer_id {layer_id} is nan")
try:
num_nan = output.isnan().sum().item()
num_inf = output.isinf().sum().item()
print(f"num_nan {num_nan}, num_inf {num_inf}")
except Exception as e:
print(e)
# if os.environ.get("SHILONG_AMP_INFNAN_DEBUG") == '1':
# import ipdb; ipdb.set_trace()
# iter update
if self.bbox_embed is not None:
# box_holder = self.bbox_embed(output)
# box_holder[..., :self.query_dim] += inverse_sigmoid(reference_points)
# new_reference_points = box_holder[..., :self.query_dim].sigmoid()
reference_before_sigmoid = inverse_sigmoid(reference_points)
delta_unsig = self.bbox_embed[layer_id](output)
outputs_unsig = delta_unsig + reference_before_sigmoid
new_reference_points = outputs_unsig.sigmoid()
reference_points = new_reference_points.detach()
# if layer_id != self.num_layers - 1:
ref_points.append(new_reference_points)
intermediate.append(self.norm(output))
return [
[itm_out.transpose(0, 1) for itm_out in intermediate],
[itm_refpoint.transpose(0, 1) for itm_refpoint in ref_points],
]
class DeformableTransformerEncoderLayer(nn.Module):
def __init__(
self,
d_model=256,
d_ffn=1024,
dropout=0.1,
activation="relu",
n_levels=4,
n_heads=8,
n_points=4,
):
super().__init__()
# self attention
self.self_attn = MSDeformAttn(
embed_dim=d_model,
num_levels=n_levels,
num_heads=n_heads,
num_points=n_points,
batch_first=True,
)
self.dropout1 = nn.Dropout(dropout)
self.norm1 = nn.LayerNorm(d_model)
# ffn
self.linear1 = nn.Linear(d_model, d_ffn)
self.activation = _get_activation_fn(activation, d_model=d_ffn)
self.dropout2 = nn.Dropout(dropout)
self.linear2 = nn.Linear(d_ffn, d_model)
self.dropout3 = nn.Dropout(dropout)
self.norm2 = nn.LayerNorm(d_model)
@staticmethod
def with_pos_embed(tensor, pos):
return tensor if pos is None else tensor + pos
def forward_ffn(self, src):
src2 = self.linear2(self.dropout2(self.activation(self.linear1(src))))
src = src + self.dropout3(src2)
src = self.norm2(src)
return src
def forward(
self, src, pos, reference_points, spatial_shapes, level_start_index, key_padding_mask=None
):
# self attention
# import ipdb; ipdb.set_trace()
src2 = self.self_attn(
query=self.with_pos_embed(src, pos),
reference_points=reference_points,
value=src,
spatial_shapes=spatial_shapes,
level_start_index=level_start_index,
key_padding_mask=key_padding_mask,
)
src = src + self.dropout1(src2)
src = self.norm1(src)
# ffn
src = self.forward_ffn(src)
return src
class DeformableTransformerDecoderLayer(nn.Module):
def __init__(
self,
d_model=256,
d_ffn=1024,
dropout=0.1,
activation="relu",
n_levels=4,
n_heads=8,
n_points=4,
use_text_feat_guide=False,
use_text_cross_attention=False,
):
super().__init__()
# cross attention
self.cross_attn = MSDeformAttn(
embed_dim=d_model,
num_levels=n_levels,
num_heads=n_heads,
num_points=n_points,
batch_first=True,
)
self.dropout1 = nn.Dropout(dropout) if dropout > 0 else nn.Identity()
self.norm1 = nn.LayerNorm(d_model)
# cross attention text
if use_text_cross_attention:
self.ca_text = nn.MultiheadAttention(d_model, n_heads, dropout=dropout)
self.catext_dropout = nn.Dropout(dropout) if dropout > 0 else nn.Identity()
self.catext_norm = nn.LayerNorm(d_model)
# self attention
self.self_attn = nn.MultiheadAttention(d_model, n_heads, dropout=dropout)
self.dropout2 = nn.Dropout(dropout) if dropout > 0 else nn.Identity()
self.norm2 = nn.LayerNorm(d_model)
# ffn
self.linear1 = nn.Linear(d_model, d_ffn)
self.activation = _get_activation_fn(activation, d_model=d_ffn, batch_dim=1)
self.dropout3 = nn.Dropout(dropout) if dropout > 0 else nn.Identity()
self.linear2 = nn.Linear(d_ffn, d_model)
self.dropout4 = nn.Dropout(dropout) if dropout > 0 else nn.Identity()
self.norm3 = nn.LayerNorm(d_model)
self.key_aware_proj = None
self.use_text_feat_guide = use_text_feat_guide
assert not use_text_feat_guide
self.use_text_cross_attention = use_text_cross_attention
def rm_self_attn_modules(self):
self.self_attn = None
self.dropout2 = None
self.norm2 = None
@staticmethod
def with_pos_embed(tensor, pos):
return tensor if pos is None else tensor + pos
def forward_ffn(self, tgt):
with torch.cuda.amp.autocast(enabled=False):
tgt2 = self.linear2(self.dropout3(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout4(tgt2)
tgt = self.norm3(tgt)
return tgt
def forward(
self,
# for tgt
tgt: Optional[Tensor], # nq, bs, d_model
tgt_query_pos: Optional[Tensor] = None, # pos for query. MLP(Sine(pos))
tgt_query_sine_embed: Optional[Tensor] = None, # pos for query. Sine(pos)
tgt_key_padding_mask: Optional[Tensor] = None,
tgt_reference_points: Optional[Tensor] = None, # nq, bs, 4
memory_text: Optional[Tensor] = None, # bs, num_token, d_model
text_attention_mask: Optional[Tensor] = None, # bs, num_token
# for memory
memory: Optional[Tensor] = None, # hw, bs, d_model
memory_key_padding_mask: Optional[Tensor] = None,
memory_level_start_index: Optional[Tensor] = None, # num_levels
memory_spatial_shapes: Optional[Tensor] = None, # bs, num_levels, 2
memory_pos: Optional[Tensor] = None, # pos for memory
# sa
self_attn_mask: Optional[Tensor] = None, # mask used for self-attention
cross_attn_mask: Optional[Tensor] = None, # mask used for cross-attention
):
"""
Input:
- tgt/tgt_query_pos: nq, bs, d_model
-
"""
assert cross_attn_mask is None
# self attention
if self.self_attn is not None:
# import ipdb; ipdb.set_trace()
q = k = self.with_pos_embed(tgt, tgt_query_pos)
tgt2 = self.self_attn(q, k, tgt, attn_mask=self_attn_mask)[0]
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
if self.use_text_cross_attention:
tgt2 = self.ca_text(
self.with_pos_embed(tgt, tgt_query_pos),
memory_text.transpose(0, 1),
memory_text.transpose(0, 1),
key_padding_mask=text_attention_mask,
)[0]
tgt = tgt + self.catext_dropout(tgt2)
tgt = self.catext_norm(tgt)
tgt2 = self.cross_attn(
query=self.with_pos_embed(tgt, tgt_query_pos).transpose(0, 1),
reference_points=tgt_reference_points.transpose(0, 1).contiguous(),
value=memory.transpose(0, 1),
spatial_shapes=memory_spatial_shapes,
level_start_index=memory_level_start_index,
key_padding_mask=memory_key_padding_mask,
).transpose(0, 1)
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
# ffn
tgt = self.forward_ffn(tgt)
return tgt
def build_transformer(args):
return Transformer(
d_model=args.hidden_dim,
dropout=args.dropout,
nhead=args.nheads,
num_queries=args.num_queries,
dim_feedforward=args.dim_feedforward,
num_encoder_layers=args.enc_layers,
num_decoder_layers=args.dec_layers,
normalize_before=args.pre_norm,
return_intermediate_dec=True,
query_dim=args.query_dim,
activation=args.transformer_activation,
num_patterns=args.num_patterns,
num_feature_levels=args.num_feature_levels,
enc_n_points=args.enc_n_points,
dec_n_points=args.dec_n_points,
learnable_tgt_init=True,
# two stage
two_stage_type=args.two_stage_type, # ['no', 'standard', 'early']
embed_init_tgt=args.embed_init_tgt,
use_text_enhancer=args.use_text_enhancer,
use_fusion_layer=args.use_fusion_layer,
use_checkpoint=args.use_checkpoint,
use_transformer_ckpt=args.use_transformer_ckpt,
use_text_cross_attention=args.use_text_cross_attention,
text_dropout=args.text_dropout,
fusion_dropout=args.fusion_dropout,
fusion_droppath=args.fusion_droppath,
)
| swarms-master | swarms/workers/models/GroundingDINO/groundingdino/models/GroundingDINO/transformer.py |
# ------------------------------------------------------------------------
# Grounding DINO
# url: https://github.com/IDEA-Research/GroundingDINO
# Copyright (c) 2023 IDEA. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
# Conditional DETR
# Copyright (c) 2021 Microsoft. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
# Copied from DETR (https://github.com/facebookresearch/detr)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# ------------------------------------------------------------------------
"""
Backbone modules.
"""
from typing import Dict, List
import torch
import torch.nn.functional as F
import torchvision
from torch import nn
from torchvision.models._utils import IntermediateLayerGetter
from groundingdino.util.misc import NestedTensor, is_main_process
from .position_encoding import build_position_encoding
from .swin_transformer import build_swin_transformer
class FrozenBatchNorm2d(torch.nn.Module):
"""
BatchNorm2d where the batch statistics and the affine parameters are fixed.
Copy-paste from torchvision.misc.ops with added eps before rqsrt,
without which any other models than torchvision.models.resnet[18,34,50,101]
produce nans.
"""
def __init__(self, n):
super(FrozenBatchNorm2d, self).__init__()
self.register_buffer("weight", torch.ones(n))
self.register_buffer("bias", torch.zeros(n))
self.register_buffer("running_mean", torch.zeros(n))
self.register_buffer("running_var", torch.ones(n))
def _load_from_state_dict(
self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
):
num_batches_tracked_key = prefix + "num_batches_tracked"
if num_batches_tracked_key in state_dict:
del state_dict[num_batches_tracked_key]
super(FrozenBatchNorm2d, self)._load_from_state_dict(
state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
)
def forward(self, x):
# move reshapes to the beginning
# to make it fuser-friendly
w = self.weight.reshape(1, -1, 1, 1)
b = self.bias.reshape(1, -1, 1, 1)
rv = self.running_var.reshape(1, -1, 1, 1)
rm = self.running_mean.reshape(1, -1, 1, 1)
eps = 1e-5
scale = w * (rv + eps).rsqrt()
bias = b - rm * scale
return x * scale + bias
class BackboneBase(nn.Module):
def __init__(
self,
backbone: nn.Module,
train_backbone: bool,
num_channels: int,
return_interm_indices: list,
):
super().__init__()
for name, parameter in backbone.named_parameters():
if (
not train_backbone
or "layer2" not in name
and "layer3" not in name
and "layer4" not in name
):
parameter.requires_grad_(False)
return_layers = {}
for idx, layer_index in enumerate(return_interm_indices):
return_layers.update(
{"layer{}".format(5 - len(return_interm_indices) + idx): "{}".format(layer_index)}
)
# if len:
# if use_stage1_feature:
# return_layers = {"layer1": "0", "layer2": "1", "layer3": "2", "layer4": "3"}
# else:
# return_layers = {"layer2": "0", "layer3": "1", "layer4": "2"}
# else:
# return_layers = {'layer4': "0"}
self.body = IntermediateLayerGetter(backbone, return_layers=return_layers)
self.num_channels = num_channels
def forward(self, tensor_list: NestedTensor):
xs = self.body(tensor_list.tensors)
out: Dict[str, NestedTensor] = {}
for name, x in xs.items():
m = tensor_list.mask
assert m is not None
mask = F.interpolate(m[None].float(), size=x.shape[-2:]).to(torch.bool)[0]
out[name] = NestedTensor(x, mask)
# import ipdb; ipdb.set_trace()
return out
class Backbone(BackboneBase):
"""ResNet backbone with frozen BatchNorm."""
def __init__(
self,
name: str,
train_backbone: bool,
dilation: bool,
return_interm_indices: list,
batch_norm=FrozenBatchNorm2d,
):
if name in ["resnet18", "resnet34", "resnet50", "resnet101"]:
backbone = getattr(torchvision.models, name)(
replace_stride_with_dilation=[False, False, dilation],
pretrained=is_main_process(),
norm_layer=batch_norm,
)
else:
raise NotImplementedError("Why you can get here with name {}".format(name))
# num_channels = 512 if name in ('resnet18', 'resnet34') else 2048
assert name not in ("resnet18", "resnet34"), "Only resnet50 and resnet101 are available."
assert return_interm_indices in [[0, 1, 2, 3], [1, 2, 3], [3]]
num_channels_all = [256, 512, 1024, 2048]
num_channels = num_channels_all[4 - len(return_interm_indices) :]
super().__init__(backbone, train_backbone, num_channels, return_interm_indices)
class Joiner(nn.Sequential):
def __init__(self, backbone, position_embedding):
super().__init__(backbone, position_embedding)
def forward(self, tensor_list: NestedTensor):
xs = self[0](tensor_list)
out: List[NestedTensor] = []
pos = []
for name, x in xs.items():
out.append(x)
# position encoding
pos.append(self[1](x).to(x.tensors.dtype))
return out, pos
def build_backbone(args):
"""
Useful args:
- backbone: backbone name
- lr_backbone:
- dilation
- return_interm_indices: available: [0,1,2,3], [1,2,3], [3]
- backbone_freeze_keywords:
- use_checkpoint: for swin only for now
"""
position_embedding = build_position_encoding(args)
train_backbone = True
if not train_backbone:
raise ValueError("Please set lr_backbone > 0")
return_interm_indices = args.return_interm_indices
assert return_interm_indices in [[0, 1, 2, 3], [1, 2, 3], [3]]
args.backbone_freeze_keywords
use_checkpoint = getattr(args, "use_checkpoint", False)
if args.backbone in ["resnet50", "resnet101"]:
backbone = Backbone(
args.backbone,
train_backbone,
args.dilation,
return_interm_indices,
batch_norm=FrozenBatchNorm2d,
)
bb_num_channels = backbone.num_channels
elif args.backbone in [
"swin_T_224_1k",
"swin_B_224_22k",
"swin_B_384_22k",
"swin_L_224_22k",
"swin_L_384_22k",
]:
pretrain_img_size = int(args.backbone.split("_")[-2])
backbone = build_swin_transformer(
args.backbone,
pretrain_img_size=pretrain_img_size,
out_indices=tuple(return_interm_indices),
dilation=False,
use_checkpoint=use_checkpoint,
)
bb_num_channels = backbone.num_features[4 - len(return_interm_indices) :]
else:
raise NotImplementedError("Unknown backbone {}".format(args.backbone))
assert len(bb_num_channels) == len(
return_interm_indices
), f"len(bb_num_channels) {len(bb_num_channels)} != len(return_interm_indices) {len(return_interm_indices)}"
model = Joiner(backbone, position_embedding)
model.num_channels = bb_num_channels
assert isinstance(
bb_num_channels, List
), "bb_num_channels is expected to be a List but {}".format(type(bb_num_channels))
# import ipdb; ipdb.set_trace()
return model
| swarms-master | swarms/workers/models/GroundingDINO/groundingdino/models/GroundingDINO/backbone/backbone.py |
# ------------------------------------------------------------------------
# Grounding DINO
# url: https://github.com/IDEA-Research/GroundingDINO
# Copyright (c) 2023 IDEA. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
# DINO
# Copyright (c) 2022 IDEA. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
# Conditional DETR
# Copyright (c) 2021 Microsoft. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
# Copied from DETR (https://github.com/facebookresearch/detr)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# ------------------------------------------------------------------------
"""
Various positional encodings for the transformer.
"""
import math
import torch
from torch import nn
from groundingdino.util.misc import NestedTensor
class PositionEmbeddingSine(nn.Module):
"""
This is a more standard version of the position embedding, very similar to the one
used by the Attention is all you need paper, generalized to work on images.
"""
def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None):
super().__init__()
self.num_pos_feats = num_pos_feats
self.temperature = temperature
self.normalize = normalize
if scale is not None and normalize is False:
raise ValueError("normalize should be True if scale is passed")
if scale is None:
scale = 2 * math.pi
self.scale = scale
def forward(self, tensor_list: NestedTensor):
x = tensor_list.tensors
mask = tensor_list.mask
assert mask is not None
not_mask = ~mask
y_embed = not_mask.cumsum(1, dtype=torch.float32)
x_embed = not_mask.cumsum(2, dtype=torch.float32)
if self.normalize:
eps = 1e-6
# if os.environ.get("SHILONG_AMP", None) == '1':
# eps = 1e-4
# else:
# eps = 1e-6
y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
pos_x = torch.stack(
(pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4
).flatten(3)
pos_y = torch.stack(
(pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4
).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
return pos
class PositionEmbeddingSineHW(nn.Module):
"""
This is a more standard version of the position embedding, very similar to the one
used by the Attention is all you need paper, generalized to work on images.
"""
def __init__(
self, num_pos_feats=64, temperatureH=10000, temperatureW=10000, normalize=False, scale=None
):
super().__init__()
self.num_pos_feats = num_pos_feats
self.temperatureH = temperatureH
self.temperatureW = temperatureW
self.normalize = normalize
if scale is not None and normalize is False:
raise ValueError("normalize should be True if scale is passed")
if scale is None:
scale = 2 * math.pi
self.scale = scale
def forward(self, tensor_list: NestedTensor):
x = tensor_list.tensors
mask = tensor_list.mask
assert mask is not None
not_mask = ~mask
y_embed = not_mask.cumsum(1, dtype=torch.float32)
x_embed = not_mask.cumsum(2, dtype=torch.float32)
# import ipdb; ipdb.set_trace()
if self.normalize:
eps = 1e-6
y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
dim_tx = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
dim_tx = self.temperatureW ** (2 * (torch.div(dim_tx, 2, rounding_mode='floor')) / self.num_pos_feats)
pos_x = x_embed[:, :, :, None] / dim_tx
dim_ty = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
dim_ty = self.temperatureH ** (2 * (torch.div(dim_ty, 2, rounding_mode='floor')) / self.num_pos_feats)
pos_y = y_embed[:, :, :, None] / dim_ty
pos_x = torch.stack(
(pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4
).flatten(3)
pos_y = torch.stack(
(pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4
).flatten(3)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
# import ipdb; ipdb.set_trace()
return pos
class PositionEmbeddingLearned(nn.Module):
"""
Absolute pos embedding, learned.
"""
def __init__(self, num_pos_feats=256):
super().__init__()
self.row_embed = nn.Embedding(50, num_pos_feats)
self.col_embed = nn.Embedding(50, num_pos_feats)
self.reset_parameters()
def reset_parameters(self):
nn.init.uniform_(self.row_embed.weight)
nn.init.uniform_(self.col_embed.weight)
def forward(self, tensor_list: NestedTensor):
x = tensor_list.tensors
h, w = x.shape[-2:]
i = torch.arange(w, device=x.device)
j = torch.arange(h, device=x.device)
x_emb = self.col_embed(i)
y_emb = self.row_embed(j)
pos = (
torch.cat(
[
x_emb.unsqueeze(0).repeat(h, 1, 1),
y_emb.unsqueeze(1).repeat(1, w, 1),
],
dim=-1,
)
.permute(2, 0, 1)
.unsqueeze(0)
.repeat(x.shape[0], 1, 1, 1)
)
return pos
def build_position_encoding(args):
N_steps = args.hidden_dim // 2
if args.position_embedding in ("v2", "sine"):
# TODO find a better way of exposing other arguments
position_embedding = PositionEmbeddingSineHW(
N_steps,
temperatureH=args.pe_temperatureH,
temperatureW=args.pe_temperatureW,
normalize=True,
)
elif args.position_embedding in ("v3", "learned"):
position_embedding = PositionEmbeddingLearned(N_steps)
else:
raise ValueError(f"not supported {args.position_embedding}")
return position_embedding
| swarms-master | swarms/workers/models/GroundingDINO/groundingdino/models/GroundingDINO/backbone/position_encoding.py |
swarms-master | swarms/workers/models/GroundingDINO/groundingdino/models/GroundingDINO/backbone/__init__.py |
|
# ------------------------------------------------------------------------
# Grounding DINO
# url: https://github.com/IDEA-Research/GroundingDINO
# Copyright (c) 2023 IDEA. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# ------------------------------------------------------------------------
# DINO
# Copyright (c) 2022 IDEA. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 [see LICENSE for details]
# --------------------------------------------------------
# modified from https://github.com/SwinTransformer/Swin-Transformer-Object-Detection/blob/master/mmdet/models/backbones/swin_transformer.py
# --------------------------------------------------------
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from groundingdino.util.misc import NestedTensor
class Mlp(nn.Module):
"""Multilayer perceptron."""
def __init__(
self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0
):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
def window_partition(x, window_size):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows
def window_reverse(windows, window_size, H, W):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
class WindowAttention(nn.Module):
"""Window based multi-head self attention (W-MSA) module with relative position bias.
It supports both of shifted and non-shifted window.
Args:
dim (int): Number of input channels.
window_size (tuple[int]): The height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
"""
def __init__(
self,
dim,
window_size,
num_heads,
qkv_bias=True,
qk_scale=None,
attn_drop=0.0,
proj_drop=0.0,
):
super().__init__()
self.dim = dim
self.window_size = window_size # Wh, Ww
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim**-0.5
# define a parameter table of relative position bias
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)
) # 2*Wh-1 * 2*Ww-1, nH
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
self.register_buffer("relative_position_index", relative_position_index)
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
trunc_normal_(self.relative_position_bias_table, std=0.02)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, mask=None):
"""Forward function.
Args:
x: input features with shape of (num_windows*B, N, C)
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
"""
B_, N, C = x.shape
qkv = (
self.qkv(x)
.reshape(B_, N, 3, self.num_heads, C // self.num_heads)
.permute(2, 0, 3, 1, 4)
)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
q = q * self.scale
attn = q @ k.transpose(-2, -1)
relative_position_bias = self.relative_position_bias_table[
self.relative_position_index.view(-1)
].view(
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1
) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(
2, 0, 1
).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
nW = mask.shape[0]
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class SwinTransformerBlock(nn.Module):
"""Swin Transformer Block.
Args:
dim (int): Number of input channels.
num_heads (int): Number of attention heads.
window_size (int): Window size.
shift_size (int): Shift size for SW-MSA.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(
self,
dim,
num_heads,
window_size=7,
shift_size=0,
mlp_ratio=4.0,
qkv_bias=True,
qk_scale=None,
drop=0.0,
attn_drop=0.0,
drop_path=0.0,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
):
super().__init__()
self.dim = dim
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
self.norm1 = norm_layer(dim)
self.attn = WindowAttention(
dim,
window_size=to_2tuple(self.window_size),
num_heads=num_heads,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attn_drop=attn_drop,
proj_drop=drop,
)
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(
in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop
)
self.H = None
self.W = None
def forward(self, x, mask_matrix):
"""Forward function.
Args:
x: Input feature, tensor size (B, H*W, C).
H, W: Spatial resolution of the input feature.
mask_matrix: Attention mask for cyclic shift.
"""
B, L, C = x.shape
H, W = self.H, self.W
assert L == H * W, "input feature has wrong size"
shortcut = x
x = self.norm1(x)
x = x.view(B, H, W, C)
# pad feature maps to multiples of window size
pad_l = pad_t = 0
pad_r = (self.window_size - W % self.window_size) % self.window_size
pad_b = (self.window_size - H % self.window_size) % self.window_size
x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b))
_, Hp, Wp, _ = x.shape
# cyclic shift
if self.shift_size > 0:
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
attn_mask = mask_matrix
else:
shifted_x = x
attn_mask = None
# partition windows
x_windows = window_partition(
shifted_x, self.window_size
) # nW*B, window_size, window_size, C
x_windows = x_windows.view(
-1, self.window_size * self.window_size, C
) # nW*B, window_size*window_size, C
# W-MSA/SW-MSA
attn_windows = self.attn(x_windows, mask=attn_mask) # nW*B, window_size*window_size, C
# merge windows
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
shifted_x = window_reverse(attn_windows, self.window_size, Hp, Wp) # B H' W' C
# reverse cyclic shift
if self.shift_size > 0:
x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
x = shifted_x
if pad_r > 0 or pad_b > 0:
x = x[:, :H, :W, :].contiguous()
x = x.view(B, H * W, C)
# FFN
x = shortcut + self.drop_path(x)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x
class PatchMerging(nn.Module):
"""Patch Merging Layer
Args:
dim (int): Number of input channels.
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, dim, norm_layer=nn.LayerNorm):
super().__init__()
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(4 * dim)
def forward(self, x, H, W):
"""Forward function.
Args:
x: Input feature, tensor size (B, H*W, C).
H, W: Spatial resolution of the input feature.
"""
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
x = x.view(B, H, W, C)
# padding
pad_input = (H % 2 == 1) or (W % 2 == 1)
if pad_input:
x = F.pad(x, (0, 0, 0, W % 2, 0, H % 2))
x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
x = self.norm(x)
x = self.reduction(x)
return x
class BasicLayer(nn.Module):
"""A basic Swin Transformer layer for one stage.
Args:
dim (int): Number of feature channels
depth (int): Depths of this stage.
num_heads (int): Number of attention head.
window_size (int): Local window size. Default: 7.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
"""
def __init__(
self,
dim,
depth,
num_heads,
window_size=7,
mlp_ratio=4.0,
qkv_bias=True,
qk_scale=None,
drop=0.0,
attn_drop=0.0,
drop_path=0.0,
norm_layer=nn.LayerNorm,
downsample=None,
use_checkpoint=False,
):
super().__init__()
self.window_size = window_size
self.shift_size = window_size // 2
self.depth = depth
self.use_checkpoint = use_checkpoint
# build blocks
self.blocks = nn.ModuleList(
[
SwinTransformerBlock(
dim=dim,
num_heads=num_heads,
window_size=window_size,
shift_size=0 if (i % 2 == 0) else window_size // 2,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop,
attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer,
)
for i in range(depth)
]
)
# patch merging layer
if downsample is not None:
self.downsample = downsample(dim=dim, norm_layer=norm_layer)
else:
self.downsample = None
def forward(self, x, H, W):
"""Forward function.
Args:
x: Input feature, tensor size (B, H*W, C).
H, W: Spatial resolution of the input feature.
"""
# calculate attention mask for SW-MSA
Hp = int(np.ceil(H / self.window_size)) * self.window_size
Wp = int(np.ceil(W / self.window_size)) * self.window_size
img_mask = torch.zeros((1, Hp, Wp, 1), device=x.device) # 1 Hp Wp 1
h_slices = (
slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None),
)
w_slices = (
slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None),
)
cnt = 0
for h in h_slices:
for w in w_slices:
img_mask[:, h, w, :] = cnt
cnt += 1
mask_windows = window_partition(
img_mask, self.window_size
) # nW, window_size, window_size, 1
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(
attn_mask == 0, float(0.0)
)
for blk in self.blocks:
blk.H, blk.W = H, W
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x, attn_mask)
else:
x = blk(x, attn_mask)
if self.downsample is not None:
x_down = self.downsample(x, H, W)
Wh, Ww = (H + 1) // 2, (W + 1) // 2
return x, H, W, x_down, Wh, Ww
else:
return x, H, W, x, H, W
class PatchEmbed(nn.Module):
"""Image to Patch Embedding
Args:
patch_size (int): Patch token size. Default: 4.
in_chans (int): Number of input image channels. Default: 3.
embed_dim (int): Number of linear projection output channels. Default: 96.
norm_layer (nn.Module, optional): Normalization layer. Default: None
"""
def __init__(self, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
super().__init__()
patch_size = to_2tuple(patch_size)
self.patch_size = patch_size
self.in_chans = in_chans
self.embed_dim = embed_dim
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
if norm_layer is not None:
self.norm = norm_layer(embed_dim)
else:
self.norm = None
def forward(self, x):
"""Forward function."""
# padding
_, _, H, W = x.size()
if W % self.patch_size[1] != 0:
x = F.pad(x, (0, self.patch_size[1] - W % self.patch_size[1]))
if H % self.patch_size[0] != 0:
x = F.pad(x, (0, 0, 0, self.patch_size[0] - H % self.patch_size[0]))
x = self.proj(x) # B C Wh Ww
if self.norm is not None:
Wh, Ww = x.size(2), x.size(3)
x = x.flatten(2).transpose(1, 2)
x = self.norm(x)
x = x.transpose(1, 2).view(-1, self.embed_dim, Wh, Ww)
return x
class SwinTransformer(nn.Module):
"""Swin Transformer backbone.
A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -
https://arxiv.org/pdf/2103.14030
Args:
pretrain_img_size (int): Input image size for training the pretrained model,
used in absolute postion embedding. Default 224.
patch_size (int | tuple(int)): Patch size. Default: 4.
in_chans (int): Number of input image channels. Default: 3.
embed_dim (int): Number of linear projection output channels. Default: 96.
depths (tuple[int]): Depths of each Swin Transformer stage.
num_heads (tuple[int]): Number of attention head of each stage.
window_size (int): Window size. Default: 7.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float): Override default qk scale of head_dim ** -0.5 if set.
drop_rate (float): Dropout rate.
attn_drop_rate (float): Attention dropout rate. Default: 0.
drop_path_rate (float): Stochastic depth rate. Default: 0.2.
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
ape (bool): If True, add absolute position embedding to the patch embedding. Default: False.
patch_norm (bool): If True, add normalization after patch embedding. Default: True.
out_indices (Sequence[int]): Output from which stages.
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
-1 means not freezing any parameters.
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
dilation (bool): if True, the output size if 16x downsample, ow 32x downsample.
"""
def __init__(
self,
pretrain_img_size=224,
patch_size=4,
in_chans=3,
embed_dim=96,
depths=[2, 2, 6, 2],
num_heads=[3, 6, 12, 24],
window_size=7,
mlp_ratio=4.0,
qkv_bias=True,
qk_scale=None,
drop_rate=0.0,
attn_drop_rate=0.0,
drop_path_rate=0.2,
norm_layer=nn.LayerNorm,
ape=False,
patch_norm=True,
out_indices=(0, 1, 2, 3),
frozen_stages=-1,
dilation=False,
use_checkpoint=False,
):
super().__init__()
self.pretrain_img_size = pretrain_img_size
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.ape = ape
self.patch_norm = patch_norm
self.out_indices = out_indices
self.frozen_stages = frozen_stages
self.dilation = dilation
# if use_checkpoint:
# print("use_checkpoint!!!!!!!!!!!!!!!!!!!!!!!!")
# split image into non-overlapping patches
self.patch_embed = PatchEmbed(
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None,
)
# absolute position embedding
if self.ape:
pretrain_img_size = to_2tuple(pretrain_img_size)
patch_size = to_2tuple(patch_size)
patches_resolution = [
pretrain_img_size[0] // patch_size[0],
pretrain_img_size[1] // patch_size[1],
]
self.absolute_pos_embed = nn.Parameter(
torch.zeros(1, embed_dim, patches_resolution[0], patches_resolution[1])
)
trunc_normal_(self.absolute_pos_embed, std=0.02)
self.pos_drop = nn.Dropout(p=drop_rate)
# stochastic depth
dpr = [
x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))
] # stochastic depth decay rule
# build layers
self.layers = nn.ModuleList()
# prepare downsample list
downsamplelist = [PatchMerging for i in range(self.num_layers)]
downsamplelist[-1] = None
num_features = [int(embed_dim * 2**i) for i in range(self.num_layers)]
if self.dilation:
downsamplelist[-2] = None
num_features[-1] = int(embed_dim * 2 ** (self.num_layers - 1)) // 2
for i_layer in range(self.num_layers):
layer = BasicLayer(
# dim=int(embed_dim * 2 ** i_layer),
dim=num_features[i_layer],
depth=depths[i_layer],
num_heads=num_heads[i_layer],
window_size=window_size,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:i_layer]) : sum(depths[: i_layer + 1])],
norm_layer=norm_layer,
# downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
downsample=downsamplelist[i_layer],
use_checkpoint=use_checkpoint,
)
self.layers.append(layer)
# num_features = [int(embed_dim * 2 ** i) for i in range(self.num_layers)]
self.num_features = num_features
# add a norm layer for each output
for i_layer in out_indices:
layer = norm_layer(num_features[i_layer])
layer_name = f"norm{i_layer}"
self.add_module(layer_name, layer)
self._freeze_stages()
def _freeze_stages(self):
if self.frozen_stages >= 0:
self.patch_embed.eval()
for param in self.patch_embed.parameters():
param.requires_grad = False
if self.frozen_stages >= 1 and self.ape:
self.absolute_pos_embed.requires_grad = False
if self.frozen_stages >= 2:
self.pos_drop.eval()
for i in range(0, self.frozen_stages - 1):
m = self.layers[i]
m.eval()
for param in m.parameters():
param.requires_grad = False
# def init_weights(self, pretrained=None):
# """Initialize the weights in backbone.
# Args:
# pretrained (str, optional): Path to pre-trained weights.
# Defaults to None.
# """
# def _init_weights(m):
# if isinstance(m, nn.Linear):
# trunc_normal_(m.weight, std=.02)
# if isinstance(m, nn.Linear) and m.bias is not None:
# nn.init.constant_(m.bias, 0)
# elif isinstance(m, nn.LayerNorm):
# nn.init.constant_(m.bias, 0)
# nn.init.constant_(m.weight, 1.0)
# if isinstance(pretrained, str):
# self.apply(_init_weights)
# logger = get_root_logger()
# load_checkpoint(self, pretrained, strict=False, logger=logger)
# elif pretrained is None:
# self.apply(_init_weights)
# else:
# raise TypeError('pretrained must be a str or None')
def forward_raw(self, x):
"""Forward function."""
x = self.patch_embed(x)
Wh, Ww = x.size(2), x.size(3)
if self.ape:
# interpolate the position embedding to the corresponding size
absolute_pos_embed = F.interpolate(
self.absolute_pos_embed, size=(Wh, Ww), mode="bicubic"
)
x = (x + absolute_pos_embed).flatten(2).transpose(1, 2) # B Wh*Ww C
else:
x = x.flatten(2).transpose(1, 2)
x = self.pos_drop(x)
outs = []
for i in range(self.num_layers):
layer = self.layers[i]
x_out, H, W, x, Wh, Ww = layer(x, Wh, Ww)
# import ipdb; ipdb.set_trace()
if i in self.out_indices:
norm_layer = getattr(self, f"norm{i}")
x_out = norm_layer(x_out)
out = x_out.view(-1, H, W, self.num_features[i]).permute(0, 3, 1, 2).contiguous()
outs.append(out)
# in:
# torch.Size([2, 3, 1024, 1024])
# outs:
# [torch.Size([2, 192, 256, 256]), torch.Size([2, 384, 128, 128]), \
# torch.Size([2, 768, 64, 64]), torch.Size([2, 1536, 32, 32])]
return tuple(outs)
def forward(self, tensor_list: NestedTensor):
x = tensor_list.tensors
"""Forward function."""
x = self.patch_embed(x)
Wh, Ww = x.size(2), x.size(3)
if self.ape:
# interpolate the position embedding to the corresponding size
absolute_pos_embed = F.interpolate(
self.absolute_pos_embed, size=(Wh, Ww), mode="bicubic"
)
x = (x + absolute_pos_embed).flatten(2).transpose(1, 2) # B Wh*Ww C
else:
x = x.flatten(2).transpose(1, 2)
x = self.pos_drop(x)
outs = []
for i in range(self.num_layers):
layer = self.layers[i]
x_out, H, W, x, Wh, Ww = layer(x, Wh, Ww)
if i in self.out_indices:
norm_layer = getattr(self, f"norm{i}")
x_out = norm_layer(x_out)
out = x_out.view(-1, H, W, self.num_features[i]).permute(0, 3, 1, 2).contiguous()
outs.append(out)
# in:
# torch.Size([2, 3, 1024, 1024])
# out:
# [torch.Size([2, 192, 256, 256]), torch.Size([2, 384, 128, 128]), \
# torch.Size([2, 768, 64, 64]), torch.Size([2, 1536, 32, 32])]
# collect for nesttensors
outs_dict = {}
for idx, out_i in enumerate(outs):
m = tensor_list.mask
assert m is not None
mask = F.interpolate(m[None].float(), size=out_i.shape[-2:]).to(torch.bool)[0]
outs_dict[idx] = NestedTensor(out_i, mask)
return outs_dict
def train(self, mode=True):
"""Convert the model into training mode while keep layers freezed."""
super(SwinTransformer, self).train(mode)
self._freeze_stages()
def build_swin_transformer(modelname, pretrain_img_size, **kw):
assert modelname in [
"swin_T_224_1k",
"swin_B_224_22k",
"swin_B_384_22k",
"swin_L_224_22k",
"swin_L_384_22k",
]
model_para_dict = {
"swin_T_224_1k": dict(
embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], window_size=7
),
"swin_B_224_22k": dict(
embed_dim=128, depths=[2, 2, 18, 2], num_heads=[4, 8, 16, 32], window_size=7
),
"swin_B_384_22k": dict(
embed_dim=128, depths=[2, 2, 18, 2], num_heads=[4, 8, 16, 32], window_size=12
),
"swin_L_224_22k": dict(
embed_dim=192, depths=[2, 2, 18, 2], num_heads=[6, 12, 24, 48], window_size=7
),
"swin_L_384_22k": dict(
embed_dim=192, depths=[2, 2, 18, 2], num_heads=[6, 12, 24, 48], window_size=12
),
}
kw_cgf = model_para_dict[modelname]
kw_cgf.update(kw)
model = SwinTransformer(pretrain_img_size=pretrain_img_size, **kw_cgf)
return model
if __name__ == "__main__":
model = build_swin_transformer("swin_L_384_22k", 384, dilation=True)
x = torch.rand(2, 3, 1024, 1024)
y = model.forward_raw(x)
import ipdb
ipdb.set_trace()
x = torch.rand(2, 3, 384, 384)
y = model.forward_raw(x)
| swarms-master | swarms/workers/models/GroundingDINO/groundingdino/models/GroundingDINO/backbone/swin_transformer.py |
swarms-master | swarms/swarms/__init__.py |
|
import logging
import queue
import threading
from abc import ABC, abstractmethod
from concurrent.futures import ThreadPoolExecutor
from typing import Any, Dict, List
import chromadb
from chromadb.utils import embedding_functions
class Orchestrator(ABC):
"""
The Orchestrator takes in an agent, worker, or boss as input
then handles all the logic for
- task creation,
- task assignment,
- and task compeletion.
And, the communication for millions of agents to communicate with eachother through
a vector database that each agent has access to communicate with.
Each LLM agent communicates with the orchestrator through a dedicated
communication layer. The orchestrator assigns tasks to each LLM agent,
which the agents then complete and return.
This setup allows for a high degree of flexibility, scalability, and robustness.
In the context of swarm LLMs, one could consider an **Omni-Vector Embedding Database
for communication. This database could store and manage
the high-dimensional vectors produced by each LLM agent.
Strengths: This approach would allow for similarity-based lookup and matching of
LLM-generated vectors, which can be particularly useful for tasks that involve finding similar outputs or recognizing patterns.
Weaknesses: An Omni-Vector Embedding Database might add complexity to the system in terms of setup and maintenance.
It might also require significant computational resources,
depending on the volume of data being handled and the complexity of the vectors.
The handling and transmission of high-dimensional vectors could also pose challenges
in terms of network load.
# Orchestrator
* Takes in an agent class with vector store,
then handles all the communication and scales
up a swarm with number of agents and handles task assignment and task completion
from swarms import OpenAI, Orchestrator, Swarm
orchestrated = Orchestrate(OpenAI, nodes=40) #handles all the task assignment and allocation and agent communication using a vectorstore as a universal communication layer and also handlles the task completion logic
Objective = "Make a business website for a marketing consultancy"
Swarms = (Swarms(orchestrated, auto=True, Objective))
```
In terms of architecture, the swarm might look something like this:
```
(Orchestrator)
/ \
Tools + Vector DB -- (LLM Agent)---(Communication Layer) (Communication Layer)---(LLM Agent)-- Tools + Vector DB
/ | | \
(Task Assignment) (Task Completion) (Task Assignment) (Task Completion)
"""
def __init__(
self,
agent,
agent_list: List[Any],
task_queue: List[Any],
collection_name: str = "swarm",
api_key: str = None,
model_name: str = None
):
self.agent = agent
self.agents = queue.Queue()
for _ in range(agent_list):
self.agents.put(agent())
self.task_queue = queue.Queue()
self.chroma_client = chromadb.Client()
self.collection = self.chroma_client.create_collection(
name = collection_name
)
self.current_tasks = {}
self.lock = threading.Lock()
self.condition = threading.Condition(self.lock)
self.executor = ThreadPoolExecutor(max_workers=len(agent_list))
@abstractmethod
def assign_task(
self,
agent_id: int,
task: Dict[str, Any]
) -> None:
"""Assign a task to a specific agent"""
while True:
with self.condition:
while not self.task_queue:
self.condition.wait()
agent = self.agents.get()
task = self.task_queue.get()
try:
result = self.worker.run(task["content"])
#using the embed method to get the vector representation of the result
vector_representation = self.embed(
result,
self.api_key,
self.model_name
)
self.collection.add(
embeddings=[vector_representation],
documents=[str(id(task))],
ids=[str(id(task))]
)
logging.info(f"Task {id(str)} has been processed by agent {id(agent)} with")
except Exception as error:
logging.error(f"Failed to process task {id(task)} by agent {id(agent)}. Error: {error}")
finally:
with self.condition:
self.agents.put(agent)
self.condition.notify()
def embed(self, input, api_key, model_name):
openai = embedding_functions.OpenAIEmbeddingFunction(
api_key=api_key,
model_name=model_name
)
embedding = openai(input)
return embedding
@abstractmethod
def retrieve_results(self, agent_id: int) -> Any:
"""Retrieve results from a specific agent"""
try:
#Query the vector database for documents created by the agents
results = self.collection.query(
query_texts=[str(agent_id)],
n_results=10
)
return results
except Exception as e:
logging.error(f"Failed to retrieve results from agent {agent_id}. Error {e}")
raise
@abstractmethod
def update_vector_db(self, data) -> None:
"""Update the vector database"""
try:
self.collection.add(
embeddings=[data["vector"]],
documents=[str(data["task_id"])],
ids=[str(data["task_id"])]
)
except Exception as e:
logging.error(f"Failed to update the vector database. Error: {e}")
raise
@abstractmethod
def get_vector_db(self):
"""Retrieve the vector database"""
return self.collection
def append_to_db(self, result: str):
"""append the result of the swarm to a specifici collection in the database"""
try:
self.collection.add(
documents=[result],
ids=[str(id(result))]
)
except Exception as e:
logging.error(f"Failed to append the agent output to database. Error: {e}")
raise
def run(self, objective:str):
"""Runs"""
if not objective or not isinstance(objective, str):
logging.error("Invalid objective")
raise ValueError("A valid objective is required")
try:
self.task_queue.append(objective)
results = [
self.assign_task(
agent_id, task
) for agent_id, task in zip(
range(
len(self.agents)
), self.task_queue
)
]
for result in results:
self.append_to_db(result)
logging.info(f"Successfully ran swarms with results: {results}")
return results
except Exception as e:
logging.error(f"An error occured in swarm: {e}")
return None
| swarms-master | swarms/swarms/orchestrate.py |
import queue
import threading
from time import sleep
from swarms.utils.decorators import error_decorator, log_decorator, timing_decorator
from swarms.workers.worker import Worker
class AutoScaler:
"""
The AutoScaler is like a kubernetes pod, that autoscales an agent or worker or boss!
# TODO Handle task assignment and task delegation
# TODO: User task => decomposed into very small sub tasks => sub tasks assigned to workers => workers complete and update the swarm, can ask for help from other agents.
# TODO: Missing, Task Assignment, Task delegation, Task completion, Swarm level communication with vector db
Example
```
# usage of usage
auto_scaler = AutoScaler(agent=YourCustomAgent)
auto_scaler.start()
for i in range(100):
auto_scaler.add_task9f"task {I}})
```
"""
@log_decorator
@error_decorator
@timing_decorator
def __init__(
self,
initial_agents=10,
scale_up_factor=1,
idle_threshold=0.2,
busy_threshold=0.7,
agent=None,
):
self.agent = agent or Worker
self.agents_pool = [self.agent() for _ in range(initial_agents)]
self.task_queue = queue.Queue()
self.scale_up_factor = scale_up_factor
self.idle_threshold = idle_threshold
self.lock = threading.Lock()
def add_task(self, task):
self.tasks_queue.put(task)
@log_decorator
@error_decorator
@timing_decorator
def scale_up(self):
with self.lock:
new_agents_counts = len(self.agents_pool) * self.scale_up_factor
for _ in range(new_agents_counts):
self.agents_pool.append(Worker())
def scale_down(self):
with self.lock:
if len(self.agents_pool) > 10: #ensure minmum of 10 agents
del self.agents_pool[-1] #remove last agent
@log_decorator
@error_decorator
@timing_decorator
def monitor_and_scale(self):
while True:
sleep(60)#check minute
pending_tasks = self.task_queue.qsize()
active_agents = sum([1 for agent in self.agents_pool if agent.is_busy()])
if pending_tasks / len(self.agents_pool) > self.busy_threshold:
self.scale_up()
elif active_agents / len(self.agents_pool) < self.idle_threshold:
self.scale_down()
@log_decorator
@error_decorator
@timing_decorator
def start(self):
monitor_thread = threading.Thread(target=self.monitor_and_scale)
monitor_thread.start()
while True:
task = self.task_queue.get()
if task:
available_agent = next((agent for agent in self.agents_pool))
if available_agent:
available_agent.run(task)
def del_agent(self):
with self.lock:
if self.agents_pool:
agent_to_remove = self.agents_poo.pop()
del agent_to_remove
| swarms-master | swarms/swarms/autoscaler.py |
import asyncio
import logging
from typing import Optional
from langchain import OpenAI
from swarms.boss.boss_node import BossNode
from swarms.workers.worker_node import WorkerNode
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
# ---------- Constants ----------
ROOT_DIR = "./data/"
class HierarchicalSwarm:
def __init__(
self,
openai_api_key: Optional[str] = "",
use_vectorstore: Optional[bool] = True,
use_async: Optional[bool] = True,
worker_name: Optional[str] = "Swarm Worker AI Assistant",
verbose: Optional[bool] = False,
human_in_the_loop: Optional[bool] = True,
boss_prompt: Optional[str] = "You are an Boss in a swarm who performs one task based on the following objective: {objective}. Take into account these previously completed tasks: {context}.\n",
worker_prompt: Optional[str] = None,
temperature: Optional[float] = 0.5,
max_iterations: Optional[int] = None,
logging_enabled: Optional[bool] = True
):
self.openai_api_key = openai_api_key
self.use_vectorstore = use_vectorstore
self.use_async = use_async
self.worker_name = worker_name
self.human_in_the_loop = human_in_the_loop
self.boss_prompt = boss_prompt
self.temperature = temperature
self.max_iterations = max_iterations
self.logging_enabled = logging_enabled
self.verbose = verbose
self.worker_node = WorkerNode(
openai_api_key=self.openai_api_key,
temperature=self.temperature,
human_in_the_loop=self.human_in_the_loop,
verbose=self.verbose
)
self.boss_node = BossNode(
api_key=self.openai_api_key,
worker_node=self.worker_node,
llm_class=OpenAI,
max_iterations=self.max_iterations,
verbose=self.verbose
)
self.logger = logging.getLogger()
if not logging_enabled:
self.logger.disabled = True
def run(self, objective):
try:
self.boss_node.task = self.boss_node.create_task(objective)
logging.info(f"Running task: {self.boss_node.task}")
if self.use_async:
loop = asyncio.get_event_loop()
result = loop.run_until_complete(self.boss_node.run())
else:
result = self.boss_node.run()
logging.info(f"Completed tasks: {self.boss_node.task}")
return result
except Exception as e:
logging.error(f"An error occurred in run: {e}")
return None
def swarm(
api_key: Optional[str]="",
objective: Optional[str]=""
):
if not api_key or not isinstance(api_key, str):
logging.error("Invalid OpenAI key")
raise ValueError("A valid OpenAI API key is required")
if not objective or not isinstance(objective, str):
logging.error("Invalid objective")
raise ValueError("A valid objective is required")
try:
swarms = HierarchicalSwarm(api_key, use_async=False)
result = swarms.run(objective)
if result is None:
logging.error("Failed to run swarms")
else:
logging.info(f"Successfully ran swarms with results: {result}")
return result
except Exception as e:
logging.error(f"An error occured in swarm: {e}")
return None
| swarms-master | swarms/swarms/swarms.py |
from abc import ABC, abstractmethod
class AbstractSwarm(ABC):
# TODO: Pass in abstract LLM class that can utilize Hf or Anthropic models, Move away from OPENAI
# TODO: ADD Universal Communication Layer, a ocean vectorstore instance
# TODO: BE MORE EXPLICIT ON TOOL USE, TASK DECOMPOSITION AND TASK COMPLETETION AND ALLOCATION
# TODO: Add RLHF Data collection, ask user how the swarm is performing
# TODO: Create an onboarding process if not settings are preconfigured like `from swarms import Swarm, Swarm()` => then initiate onboarding name your swarm + provide purpose + etc
def __init__(self, agents, vectorstore, tools):
self.agents = agents
self.vectorstore = vectorstore
self.tools = tools
@abstractmethod
def communicate(self):
pass
@abstractmethod
def run(self):
pass | swarms-master | swarms/swarms/base.py |
import boto3
import json
import subprocess
import os
import re
def b64text(txt):
"""Generate Base 64 encoded CF json for a multiline string, subbing in values where appropriate"""
lines = []
for line in txt.splitlines(True):
if "${" in line:
lines.append({"Fn::Sub": line})
else:
lines.append(line)
return {"Fn::Base64": {"Fn::Join": ["", lines]}}
path = os.path.dirname(os.path.realpath(__file__))
version = subprocess.check_output(f"{path}/version").decode("ascii").strip()
with open(f"{path}/templates/docker-compose.yml") as f:
docker_compose_file = str(f.read())
with open(f"{path}/../config/backup_disk.xml") as f:
backup_disk_config = str(f.read())
with open(f"{path}/../config/ocean_users.xml") as f:
ocean_users_config = str(f.read())
cloud_config_script = """
#cloud-config
cloud_final_modules:
- [scripts-user, always]
"""
cloud_init_script = f"""
#!/bin/bash
amazon-linux-extras install docker
usermod -a -G docker ec2-user
curl -L https://github.com/docker/compose/releases/latest/download/docker-compose-$(uname -s)-$(uname -m) -o /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose
ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose
systemctl enable docker
systemctl start docker
cat << EOF > /home/ec2-user/docker-compose.yml
{docker_compose_file}
EOF
mkdir /home/ec2-user/config
cat << EOF > /home/ec2-user/config/backup_disk.xml
{backup_disk_config}
EOF
cat << EOF > /home/ec2-user/config/ocean_users.xml
{ocean_users_config}
EOF
docker-compose -f /home/ec2-user/docker-compose.yml up -d
"""
userdata = f"""Content-Type: multipart/mixed; boundary="//"
MIME-Version: 1.0
--//
Content-Type: text/cloud-config; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
Content-Disposition: attachment; filename="cloud-config.txt"
{cloud_config_script}
--//
Content-Type: text/x-shellscript; charset="us-ascii"
MIME-Version: 1.0
Content-Transfer-Encoding: 7bit
Content-Disposition: attachment; filename="userdata.txt"
{cloud_init_script}
--//--
"""
cf = {
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "Create a stack that runs Ocean hosted on a single instance",
"Parameters": {
"KeyName": {
"Description": "Name of an existing EC2 KeyPair to enable SSH access to the instance",
"Type": "String",
"ConstraintDescription": "If present, must be the name of an existing EC2 KeyPair.",
"Default": "",
},
"InstanceType": {
"Description": "EC2 instance type",
"Type": "String",
"Default": "t3.small",
},
"OceanVersion": {
"Description": "Ocean version to install",
"Type": "String",
"Default": version,
},
},
"Conditions": {
"HasKeyName": {"Fn::Not": [{"Fn::Equals": [{"Ref": "KeyName"}, ""]}]},
},
"Resources": {
"OceanInstance": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId": {
"Fn::FindInMap": ["Region2AMI", {"Ref": "AWS::Region"}, "AMI"]
},
"InstanceType": {"Ref": "InstanceType"},
"UserData": b64text(userdata),
"SecurityGroupIds": [{"Ref": "OceanInstanceSecurityGroup"}],
"KeyName": {
"Fn::If": [
"HasKeyName",
{"Ref": "KeyName"},
{"Ref": "AWS::NoValue"},
]
},
"BlockDeviceMappings": [
{
"DeviceName": {
"Fn::FindInMap": [
"Region2AMI",
{"Ref": "AWS::Region"},
"RootDeviceName",
]
},
"Ebs": {"VolumeSize": 24},
}
],
},
},
"OceanInstanceSecurityGroup": {
"Type": "AWS::EC2::SecurityGroup",
"Properties": {
"GroupDescription": "Ocean Instance Security Group",
"SecurityGroupIngress": [
{
"IpProtocol": "tcp",
"FromPort": "22",
"ToPort": "22",
"CidrIp": "0.0.0.0/0",
},
{
"IpProtocol": "tcp",
"FromPort": "8000",
"ToPort": "8000",
"CidrIp": "0.0.0.0/0",
},
],
},
},
},
"Outputs": {
"ServerIp": {
"Description": "IP address of the Ocean server",
"Value": {"Fn::GetAtt": ["OceanInstance", "PublicIp"]},
}
},
"Mappings": {"Region2AMI": {}},
}
# Populate the Region2AMI mappings
regions = boto3.client("ec2", region_name="us-east-1").describe_regions()["Regions"]
for region in regions:
region_name = region["RegionName"]
ami_result = boto3.client("ec2", region_name=region_name).describe_images(
Owners=["137112412989"],
Filters=[
{"Name": "name", "Values": ["amzn2-ami-kernel-5.10-hvm-*-x86_64-gp2"]},
{"Name": "root-device-type", "Values": ["ebs"]},
{"Name": "virtualization-type", "Values": ["hvm"]},
],
)
img = ami_result["Images"][0]
ami_id = img["ImageId"]
root_device_name = img["BlockDeviceMappings"][0]["DeviceName"]
cf["Mappings"]["Region2AMI"][region_name] = {
"AMI": ami_id,
"RootDeviceName": root_device_name,
}
# Write the CF json to a file
json.dump(cf, open("/tmp/ocean.cf.json", "w"), indent=4)
# upload to S3
s3 = boto3.client("s3", region_name="us-east-1")
s3.upload_file(
"/tmp/ocean.cf.json",
"public.tryocean.com",
f"cloudformation/{version}/ocean.cf.json",
)
# Upload to s3 under /latest version only if this is a release
pattern = re.compile(r"^\d+\.\d+\.\d+$")
if pattern.match(version):
s3.upload_file(
"/tmp/ocean.cf.json",
"public.tryocean.com",
"cloudformation/latest/ocean.cf.json",
)
else:
print(f"Version {version} is not a 3-part semver, not uploading to /latest")
| Ocean-master | bin/generate_cloudformation.py |
# Sanity check script to ensure that the Ocean client can connect
# and is capable of recieving data.
import oceandb
# run in in-memory mode
ocean_api = oceandb.Client()
print(ocean_api.heartbeat())
| Ocean-master | bin/test.py |
from pydantic import BaseSettings
from typing import List
TELEMETRY_WHITELISTED_SETTINGS = [
"ocean_db_impl",
"ocean_api_impl",
"ocean_server_ssl_enabled",
]
class Settings(BaseSettings):
environment: str = ""
ocean_db_impl: str = "duckdb"
ocean_api_impl: str = "local"
clickhouse_host: str = None # type: ignore
clickhouse_port: str = None
persist_directory: str = ".ocean"
ocean_server_host: str = None
ocean_server_http_port: str = None
ocean_server_ssl_enabled: bool = False
ocean_server_grpc_port: str = None
ocean_server_cors_allow_origins: List[str] = [] # eg ["http://localhost:3000"]
anonymized_telemetry: bool = True
def __getitem__(self, item):
return getattr(self, item)
class Config:
env_file = ".env"
env_file_encoding = "utf-8"
| Ocean-master | oceandb/config.py |
import oceandb.config
import logging
from oceandb.telemetry.events import ClientStartEvent
from oceandb.telemetry.posthog import Posthog
logger = logging.getLogger(__name__)
__settings = oceandb.config.Settings()
__version__ = "0.3.22"
def configure(**kwargs):
"""Override Ocean's default settings, environment variables or .env files"""
global __settings
__settings = oceandb.config.Settings(**kwargs)
def get_settings():
return __settings
def get_db(settings=__settings):
"""Return a ocean.DB instance based on the provided or environmental settings."""
setting = settings.ocean_db_impl.lower()
def require(key):
assert settings[
key
], f"Setting '{key}' is required when ocean_db_impl={setting}"
if setting == "clickhouse":
require("clickhouse_host")
require("clickhouse_port")
require("persist_directory")
logger.info("Using Clickhouse for database")
import oceandb.db.clickhouse
return oceandb.db.clickhouse.Clickhouse(settings)
elif setting == "duckdb+parquet":
require("persist_directory")
logger.warning(
f"Using embedded DuckDB with persistence: data will be stored in: {settings.persist_directory}"
)
import oceandb.db.duckdb
return oceandb.db.duckdb.PersistentDuckDB(settings)
elif setting == "duckdb":
require("persist_directory")
logger.warning(
"Using embedded DuckDB without persistence: data will be transient"
)
import oceandb.db.duckdb
return oceandb.db.duckdb.DuckDB(settings)
else:
raise ValueError(
f"Expected ocean_db_impl to be one of clickhouse, duckdb, duckdb+parquet, got {setting}"
)
def Client(settings=__settings):
"""Return a ocean.API instance based on the provided or environmental
settings, optionally overriding the DB instance."""
setting = settings.ocean_api_impl.lower()
telemetry_client = Posthog(settings)
# Submit event for client start
telemetry_client.capture(ClientStartEvent())
def require(key):
assert settings[
key
], f"Setting '{key}' is required when ocean_api_impl={setting}"
if setting == "rest":
require("ocean_server_host")
require("ocean_server_http_port")
logger.info(
"Running Ocean in client mode using REST to connect to remote server"
)
import oceandb.api.fastapi
return oceandb.api.fastapi.FastAPI(settings, telemetry_client)
elif setting == "local":
logger.info("Running Ocean using direct local API.")
import oceandb.api.local
return oceandb.api.local.LocalAPI(settings, get_db(settings), telemetry_client)
else:
raise ValueError(
f"Expected ocean_api_impl to be one of rest, local, got {setting}"
)
| Ocean-master | oceandb/__init__.py |
import oceandb
import oceandb.config
from oceandb.server.fastapi import FastAPI
settings = oceandb.config.Settings()
server = FastAPI(settings)
app = server.app()
| Ocean-master | oceandb/app.py |
from abc import abstractmethod
class OceanError(Exception):
def code(self):
"""Return an appropriate HTTP response code for this error"""
return 400 # Bad Request
def message(self):
return ", ".join(self.args)
@classmethod
@abstractmethod
def name(self):
"""Return the error name"""
pass
class NoDatapointsException(OceanError):
@classmethod
def name(cls):
return "NoDatapoints"
class NoIndexException(OceanError):
@classmethod
def name(cls):
return "NoIndex"
class InvalidDimensionException(OceanError):
@classmethod
def name(cls):
return "InvalidDimension"
class NotEnoughElementsException(OceanError):
@classmethod
def name(cls):
return "NotEnoughElements"
class IDAlreadyExistsError(OceanError):
def code(self):
return 409 # Conflict
@classmethod
def name(cls):
return "IDAlreadyExists"
class DuplicateIDError(OceanError):
@classmethod
def name(cls):
return "DuplicateID"
class InvalidUUIDError(OceanError):
@classmethod
def name(cls):
return "InvalidUUID"
error_types = {
"NoDatapoints": NoDatapointsException,
"NoIndex": NoIndexException,
"InvalidDimension": InvalidDimensionException,
"NotEnoughElements": NotEnoughElementsException,
"IDAlreadyExists": IDAlreadyExistsError,
"DuplicateID": DuplicateIDError,
"InvalidUUID": InvalidUUIDError,
}
| Ocean-master | oceandb/errors.py |
from oceandb.config import Settings
from oceandb import Client
from oceandb.api import API
import oceandb.server.fastapi
from requests.exceptions import ConnectionError
import hypothesis
import tempfile
import os
import uvicorn
import time
from multiprocessing import Process
import pytest
from typing import Generator, List, Callable
import shutil
hypothesis.settings.register_profile(
"dev",
deadline=15000,
suppress_health_check=[
hypothesis.HealthCheck.data_too_large,
hypothesis.HealthCheck.large_base_example,
],
)
hypothesis.settings.load_profile(os.getenv("HYPOTHESIS_PROFILE", "dev"))
def _run_server():
"""Run a Ocean server locally"""
settings = Settings(
ocean_api_impl="local",
ocean_db_impl="duckdb",
persist_directory=tempfile.gettempdir() + "/test_server",
)
server = oceandb.server.fastapi.FastAPI(settings)
uvicorn.run(server.app(), host="0.0.0.0", port=6666, log_level="error")
def _await_server(api: API, attempts: int = 0):
try:
api.heartbeat()
except ConnectionError as e:
if attempts > 10:
raise e
else:
time.sleep(2)
_await_server(api, attempts + 1)
def fastapi() -> Generator[API, None, None]:
"""Fixture generator that launches a server in a separate process, and yields a
fastapi client connect to it"""
proc = Process(target=_run_server, args=(), daemon=True)
proc.start()
api = oceandb.Client(
Settings(
ocean_api_impl="rest",
ocean_server_host="localhost",
ocean_server_http_port="6666",
)
)
_await_server(api)
yield api
proc.kill()
def duckdb() -> Generator[API, None, None]:
"""Fixture generator for duckdb"""
yield Client(
Settings(
ocean_api_impl="local",
ocean_db_impl="duckdb",
persist_directory=tempfile.gettempdir(),
)
)
def duckdb_parquet() -> Generator[API, None, None]:
"""Fixture generator for duckdb+parquet"""
save_path = tempfile.gettempdir() + "/tests"
yield Client(
Settings(
ocean_api_impl="local",
ocean_db_impl="duckdb+parquet",
persist_directory=save_path,
)
)
if os.path.exists(save_path):
shutil.rmtree(save_path)
def integration_api() -> Generator[API, None, None]:
"""Fixture generator for returning a client configured via environmenet
variables, intended for externally configured integration tests
"""
yield oceandb.Client()
def fixtures() -> List[Callable[[], Generator[API, None, None]]]:
api_fixtures = [duckdb, duckdb_parquet, fastapi]
if "OCEAN_INTEGRATION_TEST" in os.environ:
api_fixtures.append(integration_api)
if "OCEAN_INTEGRATION_TEST_ONLY" in os.environ:
api_fixtures = [integration_api]
return api_fixtures
@pytest.fixture(scope="module", params=fixtures())
def api(request) -> Generator[API, None, None]:
yield next(request.param())
| Ocean-master | oceandb/test/conftest.py |
import unittest
import os
from unittest.mock import patch
import oceandb
import oceandb.config
class GetDBTest(unittest.TestCase):
@patch("oceandb.db.duckdb.DuckDB", autospec=True)
def test_default_db(self, mock):
oceandb.get_db(oceandb.config.Settings(persist_directory="./foo"))
assert mock.called
@patch("oceandb.db.duckdb.PersistentDuckDB", autospec=True)
def test_persistent_duckdb(self, mock):
oceandb.get_db(
oceandb.config.Settings(
ocean_db_impl="duckdb+parquet", persist_directory="./foo"
)
)
assert mock.called
@patch("oceandb.db.clickhouse.Clickhouse", autospec=True)
def test_clickhouse(self, mock):
oceandb.get_db(
oceandb.config.Settings(
ocean_db_impl="clickhouse",
persist_directory="./foo",
clickhouse_host="foo",
clickhouse_port=666,
)
)
assert mock.called
class GetAPITest(unittest.TestCase):
@patch("oceandb.db.duckdb.DuckDB", autospec=True)
@patch("oceandb.api.local.LocalAPI", autospec=True)
@patch.dict(os.environ, {}, clear=True)
def test_local(self, mock_api, mock_db):
oceandb.Client(oceandb.config.Settings(persist_directory="./foo"))
assert mock_api.called
assert mock_db.called
@patch("oceandb.api.fastapi.FastAPI", autospec=True)
@patch.dict(os.environ, {}, clear=True)
def test_fastapi(self, mock):
oceandb.Client(
oceandb.config.Settings(
ocean_api_impl="rest",
persist_directory="./foo",
ocean_server_host="foo",
ocean_server_http_port="80",
)
)
assert mock.called
| Ocean-master | oceandb/test/test_chroma.py |
import oceandb
from oceandb.api.types import QueryResult
from oceandb.config import Settings
import oceandb.server.fastapi
import pytest
import tempfile
import numpy as np
@pytest.fixture
def local_persist_api():
return oceandb.Client(
Settings(
ocean_api_impl="local",
ocean_db_impl="duckdb+parquet",
persist_directory=tempfile.gettempdir() + "/test_server",
)
)
# https://docs.pytest.org/en/6.2.x/fixture.html#fixtures-can-be-requested-more-than-once-per-test-return-values-are-cached
@pytest.fixture
def local_persist_api_cache_bust():
return oceandb.Client(
Settings(
ocean_api_impl="local",
ocean_db_impl="duckdb+parquet",
persist_directory=tempfile.gettempdir() + "/test_server",
)
)
@pytest.mark.parametrize("api_fixture", [local_persist_api])
def test_persist_index_loading(api_fixture, request):
api = request.getfixturevalue("local_persist_api")
api.reset()
collection = api.create_collection("test")
collection.add(ids="id1", documents="hello")
api.persist()
del api
api2 = request.getfixturevalue("local_persist_api_cache_bust")
collection = api2.get_collection("test")
nn = collection.query(
query_texts="hello",
n_results=1,
include=["embeddings", "documents", "metadatas", "distances"],
)
for key in nn.keys():
assert len(nn[key]) == 1
@pytest.mark.parametrize("api_fixture", [local_persist_api])
def test_persist_index_loading_embedding_function(api_fixture, request):
embedding_function = lambda x: [[1, 2, 3] for _ in range(len(x))] # noqa E731
api = request.getfixturevalue("local_persist_api")
api.reset()
collection = api.create_collection("test", embedding_function=embedding_function)
collection.add(ids="id1", documents="hello")
api.persist()
del api
api2 = request.getfixturevalue("local_persist_api_cache_bust")
collection = api2.get_collection("test", embedding_function=embedding_function)
nn = collection.query(
query_texts="hello",
n_results=1,
include=["embeddings", "documents", "metadatas", "distances"],
)
for key in nn.keys():
assert len(nn[key]) == 1
@pytest.mark.parametrize("api_fixture", [local_persist_api])
def test_persist_index_get_or_create_embedding_function(api_fixture, request):
embedding_function = lambda x: [[1, 2, 3] for _ in range(len(x))] # noqa E731
api = request.getfixturevalue("local_persist_api")
api.reset()
collection = api.get_or_create_collection(
"test", embedding_function=embedding_function
)
collection.add(ids="id1", documents="hello")
api.persist()
del api
api2 = request.getfixturevalue("local_persist_api_cache_bust")
collection = api2.get_or_create_collection(
"test", embedding_function=embedding_function
)
nn = collection.query(
query_texts="hello",
n_results=1,
include=["embeddings", "documents", "metadatas", "distances"],
)
for key in nn.keys():
assert len(nn[key]) == 1
@pytest.mark.parametrize("api_fixture", [local_persist_api])
def test_persist(api_fixture, request):
api = request.getfixturevalue(api_fixture.__name__)
api.reset()
collection = api.create_collection("testspace")
collection.add(**batch_records)
assert collection.count() == 2
api.persist()
del api
api = request.getfixturevalue(api_fixture.__name__)
collection = api.get_collection("testspace")
assert collection.count() == 2
api.delete_collection("testspace")
api.persist()
del api
api = request.getfixturevalue(api_fixture.__name__)
assert api.list_collections() == []
def test_heartbeat(api):
assert isinstance(api.heartbeat(), int)
batch_records = {
"embeddings": [[1.1, 2.3, 3.2], [1.2, 2.24, 3.2]],
"ids": ["https://example.com/1", "https://example.com/2"],
}
def test_add(api):
api.reset()
collection = api.create_collection("testspace")
collection.add(**batch_records)
assert collection.count() == 2
def test_get_or_create(api):
api.reset()
collection = api.create_collection("testspace")
collection.add(**batch_records)
assert collection.count() == 2
with pytest.raises(Exception):
collection = api.create_collection("testspace")
collection = api.get_or_create_collection("testspace")
assert collection.count() == 2
minimal_records = {
"embeddings": [[1.1, 2.3, 3.2], [1.2, 2.24, 3.2]],
"ids": ["https://example.com/1", "https://example.com/2"],
}
def test_add_minimal(api):
api.reset()
collection = api.create_collection("testspace")
collection.add(**minimal_records)
assert collection.count() == 2
def test_get_from_db(api):
api.reset()
collection = api.create_collection("testspace")
collection.add(**batch_records)
records = collection.get(include=["embeddings", "documents", "metadatas"])
for key in records.keys():
assert len(records[key]) == 2
def test_reset_db(api):
api.reset()
collection = api.create_collection("testspace")
collection.add(**batch_records)
assert collection.count() == 2
assert api.reset()
assert len(api.list_collections()) == 0
def test_get_nearest_neighbors(api):
api.reset()
collection = api.create_collection("testspace")
collection.add(**batch_records)
# assert api.create_index(collection_name="testspace") # default is auto now
nn = collection.query(
query_embeddings=[1.1, 2.3, 3.2],
n_results=1,
where={},
include=["embeddings", "documents", "metadatas", "distances"],
)
for key in nn.keys():
assert len(nn[key]) == 1
nn = collection.query(
query_embeddings=[[1.1, 2.3, 3.2]],
n_results=1,
where={},
include=["embeddings", "documents", "metadatas", "distances"],
)
for key in nn.keys():
assert len(nn[key]) == 1
nn = collection.query(
query_embeddings=[[1.1, 2.3, 3.2], [0.1, 2.3, 4.5]],
n_results=1,
where={},
include=["embeddings", "documents", "metadatas", "distances"],
)
for key in nn.keys():
assert len(nn[key]) == 2
def test_get_nearest_neighbors_filter(api, request):
api.reset()
collection = api.create_collection("testspace")
collection.add(**batch_records)
# assert api.create_index(collection_name="testspace") # default is auto now
with pytest.raises(Exception) as e:
collection.query(
query_embeddings=[[1.1, 2.3, 3.2]], n_results=1, where={"distance": "false"}
)
assert str(e.value).__contains__("found")
def test_delete(api):
api.reset()
collection = api.create_collection("testspace")
collection.add(**batch_records)
assert collection.count() == 2
collection.delete()
assert collection.count() == 0
def test_delete_with_index(api):
api.reset()
collection = api.create_collection("testspace")
collection.add(**batch_records)
assert collection.count() == 2
collection.query(query_embeddings=[[1.1, 2.3, 3.2]], n_results=1)
def test_count(api):
api.reset()
collection = api.create_collection("testspace")
assert collection.count() == 0
collection.add(**batch_records)
assert collection.count() == 2
def test_modify(api):
api.reset()
collection = api.create_collection("testspace")
collection.modify(name="testspace2")
# collection name is modify
assert collection.name == "testspace2"
def test_metadata_cru(api):
api.reset()
metadata_a = {"a": 1, "b": 2}
# Test create metatdata
collection = api.create_collection("testspace", metadata=metadata_a)
assert collection.metadata is not None
assert collection.metadata["a"] == 1
assert collection.metadata["b"] == 2
# Test get metatdata
collection = api.get_collection("testspace")
assert collection.metadata is not None
assert collection.metadata["a"] == 1
assert collection.metadata["b"] == 2
# Test modify metatdata
collection.modify(metadata={"a": 2, "c": 3})
assert collection.metadata["a"] == 2
assert collection.metadata["c"] == 3
assert "b" not in collection.metadata
# Test get after modify metatdata
collection = api.get_collection("testspace")
assert collection.metadata is not None
assert collection.metadata["a"] == 2
assert collection.metadata["c"] == 3
assert "b" not in collection.metadata
# Test name exists get_or_create_metadata
collection = api.get_or_create_collection("testspace")
assert collection.metadata is not None
assert collection.metadata["a"] == 2
assert collection.metadata["c"] == 3
# Test name exists create metadata
collection = api.get_or_create_collection("testspace2")
assert collection.metadata is None
# Test list collections
collections = api.list_collections()
for collection in collections:
if collection.name == "testspace":
assert collection.metadata is not None
assert collection.metadata["a"] == 2
assert collection.metadata["c"] == 3
elif collection.name == "testspace2":
assert collection.metadata is None
def test_increment_index_on(api):
api.reset()
collection = api.create_collection("testspace")
collection.add(**batch_records)
assert collection.count() == 2
# increment index
# collection.create_index(index_type="hnsw", index_params={"M": 16, "efConstruction": 200})
nn = collection.query(
query_embeddings=[[1.1, 2.3, 3.2]],
n_results=1,
include=["embeddings", "documents", "metadatas", "distances"],
)
for key in nn.keys():
assert len(nn[key]) == 1
def test_increment_index_off(api):
api.reset()
collection = api.create_collection("testspace")
collection.add(**batch_records, increment_index=False)
assert collection.count() == 2
# incremental index
collection.create_index()
nn = collection.query(
query_embeddings=[[1.1, 2.3, 3.2]],
n_results=1,
include=["embeddings", "documents", "metadatas", "distances"],
)
for key in nn.keys():
assert len(nn[key]) == 1
def skipping_indexing_will_fail(api):
api.reset()
collection = api.create_collection("testspace")
collection.add(**batch_records, increment_index=False)
assert collection.count() == 2
# incremental index
with pytest.raises(Exception) as e:
collection.query(query_embeddings=[[1.1, 2.3, 3.2]], n_results=1)
assert str(e.value).__contains__("index not found")
def test_add_a_collection(api):
api.reset()
api.create_collection("testspace")
# get collection does not throw an error
collection = api.get_collection("testspace")
assert collection.name == "testspace"
# get collection should throw an error if collection does not exist
with pytest.raises(Exception):
collection = api.get_collection("testspace2")
def test_list_collections(api):
api.reset()
api.create_collection("testspace")
api.create_collection("testspace2")
# get collection does not throw an error
collections = api.list_collections()
assert len(collections) == 2
def test_reset(api):
api.reset()
api.create_collection("testspace")
api.create_collection("testspace2")
# get collection does not throw an error
collections = api.list_collections()
assert len(collections) == 2
api.reset()
collections = api.list_collections()
assert len(collections) == 0
def test_peek(api):
api.reset()
collection = api.create_collection("testspace")
collection.add(**batch_records)
assert collection.count() == 2
# peek
peek = collection.peek()
for key in peek.keys():
assert len(peek[key]) == 2
# TEST METADATA AND METADATA FILTERING
# region
metadata_records = {
"embeddings": [[1.1, 2.3, 3.2], [1.2, 2.24, 3.2]],
"ids": ["id1", "id2"],
"metadatas": [
{"int_value": 1, "string_value": "one", "float_value": 1.001},
{"int_value": 2},
],
}
def test_metadata_add_get_int_float(api):
api.reset()
collection = api.create_collection("test_int")
collection.add(**metadata_records)
items = collection.get(ids=["id1", "id2"])
assert items["metadatas"][0]["int_value"] == 1
assert items["metadatas"][0]["float_value"] == 1.001
assert items["metadatas"][1]["int_value"] == 2
assert type(items["metadatas"][0]["int_value"]) == int
assert type(items["metadatas"][0]["float_value"]) == float
def test_metadata_add_query_int_float(api):
api.reset()
collection = api.create_collection("test_int")
collection.add(**metadata_records)
items: QueryResult = collection.query(
query_embeddings=[[1.1, 2.3, 3.2]], n_results=1
)
assert items["metadatas"] is not None
assert items["metadatas"][0][0]["int_value"] == 1
assert items["metadatas"][0][0]["float_value"] == 1.001
assert type(items["metadatas"][0][0]["int_value"]) == int
assert type(items["metadatas"][0][0]["float_value"]) == float
def test_metadata_get_where_string(api):
api.reset()
collection = api.create_collection("test_int")
collection.add(**metadata_records)
items = collection.get(where={"string_value": "one"})
assert items["metadatas"][0]["int_value"] == 1
assert items["metadatas"][0]["string_value"] == "one"
def test_metadata_get_where_int(api):
api.reset()
collection = api.create_collection("test_int")
collection.add(**metadata_records)
items = collection.get(where={"int_value": 1})
assert items["metadatas"][0]["int_value"] == 1
assert items["metadatas"][0]["string_value"] == "one"
def test_metadata_get_where_float(api):
api.reset()
collection = api.create_collection("test_int")
collection.add(**metadata_records)
items = collection.get(where={"float_value": 1.001})
assert items["metadatas"][0]["int_value"] == 1
assert items["metadatas"][0]["string_value"] == "one"
assert items["metadatas"][0]["float_value"] == 1.001
def test_metadata_update_get_int_float(api):
api.reset()
collection = api.create_collection("test_int")
collection.add(**metadata_records)
collection.update(
ids=["id1"],
metadatas=[{"int_value": 2, "string_value": "two", "float_value": 2.002}],
)
items = collection.get(ids=["id1"])
assert items["metadatas"][0]["int_value"] == 2
assert items["metadatas"][0]["string_value"] == "two"
assert items["metadatas"][0]["float_value"] == 2.002
bad_metadata_records = {
"embeddings": [[1.1, 2.3, 3.2], [1.2, 2.24, 3.2]],
"ids": ["id1", "id2"],
"metadatas": [{"value": {"nested": "5"}}, {"value": [1, 2, 3]}],
}
def test_metadata_validation_add(api):
api.reset()
collection = api.create_collection("test_metadata_validation")
with pytest.raises(ValueError, match="metadata"):
collection.add(**bad_metadata_records)
def test_metadata_validation_update(api):
api.reset()
collection = api.create_collection("test_metadata_validation")
collection.add(**metadata_records)
with pytest.raises(ValueError, match="metadata"):
collection.update(ids=["id1"], metadatas={"value": {"nested": "5"}})
def test_where_validation_get(api):
api.reset()
collection = api.create_collection("test_where_validation")
with pytest.raises(ValueError, match="where"):
collection.get(where={"value": {"nested": "5"}})
def test_where_validation_query(api):
api.reset()
collection = api.create_collection("test_where_validation")
with pytest.raises(ValueError, match="where"):
collection.query(query_embeddings=[0, 0, 0], where={"value": {"nested": "5"}})
operator_records = {
"embeddings": [[1.1, 2.3, 3.2], [1.2, 2.24, 3.2]],
"ids": ["id1", "id2"],
"metadatas": [
{"int_value": 1, "string_value": "one", "float_value": 1.001},
{"int_value": 2, "float_value": 2.002, "string_value": "two"},
],
}
def test_where_lt(api):
api.reset()
collection = api.create_collection("test_where_lt")
collection.add(**operator_records)
items = collection.get(where={"int_value": {"$lt": 2}})
assert len(items["metadatas"]) == 1
def test_where_lte(api):
api.reset()
collection = api.create_collection("test_where_lte")
collection.add(**operator_records)
items = collection.get(where={"int_value": {"$lte": 2.0}})
assert len(items["metadatas"]) == 2
def test_where_gt(api):
api.reset()
collection = api.create_collection("test_where_lte")
collection.add(**operator_records)
items = collection.get(where={"float_value": {"$gt": -1.4}})
assert len(items["metadatas"]) == 2
def test_where_gte(api):
api.reset()
collection = api.create_collection("test_where_lte")
collection.add(**operator_records)
items = collection.get(where={"float_value": {"$gte": 2.002}})
assert len(items["metadatas"]) == 1
def test_where_ne_string(api):
api.reset()
collection = api.create_collection("test_where_lte")
collection.add(**operator_records)
items = collection.get(where={"string_value": {"$ne": "two"}})
assert len(items["metadatas"]) == 1
def test_where_ne_eq_number(api):
api.reset()
collection = api.create_collection("test_where_lte")
collection.add(**operator_records)
items = collection.get(where={"int_value": {"$ne": 1}})
assert len(items["metadatas"]) == 1
items = collection.get(where={"float_value": {"$eq": 2.002}})
assert len(items["metadatas"]) == 1
def test_where_valid_operators(api):
api.reset()
collection = api.create_collection("test_where_valid_operators")
collection.add(**operator_records)
with pytest.raises(ValueError):
collection.get(where={"int_value": {"$invalid": 2}})
with pytest.raises(ValueError):
collection.get(where={"int_value": {"$lt": "2"}})
with pytest.raises(ValueError):
collection.get(where={"int_value": {"$lt": 2, "$gt": 1}})
# Test invalid $and, $or
with pytest.raises(ValueError):
collection.get(where={"$and": {"int_value": {"$lt": 2}}})
with pytest.raises(ValueError):
collection.get(
where={"int_value": {"$lt": 2}, "$or": {"int_value": {"$gt": 1}}}
)
with pytest.raises(ValueError):
collection.get(
where={"$gt": [{"int_value": {"$lt": 2}}, {"int_value": {"$gt": 1}}]}
)
with pytest.raises(ValueError):
collection.get(where={"$or": [{"int_value": {"$lt": 2}}]})
with pytest.raises(ValueError):
collection.get(where={"$or": []})
with pytest.raises(ValueError):
collection.get(where={"a": {"$contains": "test"}})
with pytest.raises(ValueError):
collection.get(
where={
"$or": [
{"a": {"$contains": "first"}}, # invalid
{"$contains": "second"}, # valid
]
}
)
# TODO: Define the dimensionality of these embeddingds in terms of the default record
bad_dimensionality_records = {
"embeddings": [[1.1, 2.3, 3.2, 4.5], [1.2, 2.24, 3.2, 4.5]],
"ids": ["id1", "id2"],
}
bad_dimensionality_query = {
"query_embeddings": [[1.1, 2.3, 3.2, 4.5], [1.2, 2.24, 3.2, 4.5]],
}
bad_number_of_results_query = {
"query_embeddings": [[1.1, 2.3, 3.2], [1.2, 2.24, 3.2]],
"n_results": 100,
}
def test_dimensionality_validation_add(api):
api.reset()
collection = api.create_collection("test_dimensionality_validation")
collection.add(**minimal_records)
with pytest.raises(Exception) as e:
collection.add(**bad_dimensionality_records)
assert "dimensionality" in str(e.value)
def test_dimensionality_validation_query(api):
api.reset()
collection = api.create_collection("test_dimensionality_validation_query")
collection.add(**minimal_records)
with pytest.raises(Exception) as e:
collection.query(**bad_dimensionality_query)
assert "dimensionality" in str(e.value)
def test_number_of_elements_validation_query(api):
api.reset()
collection = api.create_collection("test_number_of_elements_validation")
collection.add(**minimal_records)
with pytest.raises(Exception) as e:
collection.query(**bad_number_of_results_query)
assert "number of elements" in str(e.value)
def test_query_document_valid_operators(api):
api.reset()
collection = api.create_collection("test_where_valid_operators")
collection.add(**operator_records)
with pytest.raises(ValueError, match="where document"):
collection.get(where_document={"$lt": {"$nested": 2}})
with pytest.raises(ValueError, match="where document"):
collection.query(query_embeddings=[0, 0, 0], where_document={"$contains": 2})
with pytest.raises(ValueError, match="where document"):
collection.get(where_document={"$contains": []})
# Test invalid $and, $or
with pytest.raises(ValueError):
collection.get(where_document={"$and": {"$unsupported": "doc"}})
with pytest.raises(ValueError):
collection.get(
where_document={"$or": [{"$unsupported": "doc"}, {"$unsupported": "doc"}]}
)
with pytest.raises(ValueError):
collection.get(where_document={"$or": [{"$contains": "doc"}]})
with pytest.raises(ValueError):
collection.get(where_document={"$or": []})
with pytest.raises(ValueError):
collection.get(
where_document={
"$or": [{"$and": [{"$contains": "doc"}]}, {"$contains": "doc"}]
}
)
contains_records = {
"embeddings": [[1.1, 2.3, 3.2], [1.2, 2.24, 3.2]],
"documents": ["this is doc1 and it's great!", "doc2 is also great!"],
"ids": ["id1", "id2"],
"metadatas": [
{"int_value": 1, "string_value": "one", "float_value": 1.001},
{"int_value": 2, "float_value": 2.002, "string_value": "two"},
],
}
def test_get_where_document(api):
api.reset()
collection = api.create_collection("test_get_where_document")
collection.add(**contains_records)
items = collection.get(where_document={"$contains": "doc1"})
assert len(items["metadatas"]) == 1
items = collection.get(where_document={"$contains": "great"})
assert len(items["metadatas"]) == 2
items = collection.get(where_document={"$contains": "bad"})
assert len(items["metadatas"]) == 0
def test_query_where_document(api):
api.reset()
collection = api.create_collection("test_query_where_document")
collection.add(**contains_records)
items = collection.query(
query_embeddings=[1, 0, 0], where_document={"$contains": "doc1"}, n_results=1
)
assert len(items["metadatas"][0]) == 1
items = collection.query(
query_embeddings=[0, 0, 0], where_document={"$contains": "great"}, n_results=2
)
assert len(items["metadatas"][0]) == 2
with pytest.raises(Exception) as e:
items = collection.query(
query_embeddings=[0, 0, 0], where_document={"$contains": "bad"}, n_results=1
)
assert "datapoints" in str(e.value)
def test_delete_where_document(api):
api.reset()
collection = api.create_collection("test_delete_where_document")
collection.add(**contains_records)
collection.delete(where_document={"$contains": "doc1"})
assert collection.count() == 1
collection.delete(where_document={"$contains": "bad"})
assert collection.count() == 1
collection.delete(where_document={"$contains": "great"})
assert collection.count() == 0
logical_operator_records = {
"embeddings": [
[1.1, 2.3, 3.2],
[1.2, 2.24, 3.2],
[1.3, 2.25, 3.2],
[1.4, 2.26, 3.2],
],
"ids": ["id1", "id2", "id3", "id4"],
"metadatas": [
{"int_value": 1, "string_value": "one", "float_value": 1.001, "is": "doc"},
{"int_value": 2, "float_value": 2.002, "string_value": "two", "is": "doc"},
{"int_value": 3, "float_value": 3.003, "string_value": "three", "is": "doc"},
{"int_value": 4, "float_value": 4.004, "string_value": "four", "is": "doc"},
],
"documents": [
"this document is first and great",
"this document is second and great",
"this document is third and great",
"this document is fourth and great",
],
}
def test_where_logical_operators(api):
api.reset()
collection = api.create_collection("test_logical_operators")
collection.add(**logical_operator_records)
items = collection.get(
where={
"$and": [
{"$or": [{"int_value": {"$gte": 3}}, {"float_value": {"$lt": 1.9}}]},
{"is": "doc"},
]
}
)
assert len(items["metadatas"]) == 3
items = collection.get(
where={
"$or": [
{
"$and": [
{"int_value": {"$eq": 3}},
{"string_value": {"$eq": "three"}},
]
},
{
"$and": [
{"int_value": {"$eq": 4}},
{"string_value": {"$eq": "four"}},
]
},
]
}
)
assert len(items["metadatas"]) == 2
items = collection.get(
where={
"$or": [
{
"$and": [
{"int_value": {"$eq": 3}},
{"string_value": {"$eq": "three"}},
]
},
{
"$and": [
{"int_value": {"$eq": 4}},
{"string_value": {"$eq": "four"}},
]
},
],
"$and": [{"is": "doc"}, {"string_value": "four"}],
}
)
assert len(items["metadatas"]) == 1
def test_where_document_logical_operators(api):
api.reset()
collection = api.create_collection("test_document_logical_operators")
collection.add(**logical_operator_records)
items = collection.get(
where_document={
"$and": [
{"$contains": "first"},
{"$contains": "doc"},
]
}
)
assert len(items["metadatas"]) == 1
items = collection.get(
where_document={
"$or": [
{"$contains": "first"},
{"$contains": "second"},
]
}
)
assert len(items["metadatas"]) == 2
items = collection.get(
where_document={
"$or": [
{"$contains": "first"},
{"$contains": "second"},
]
},
where={
"int_value": {"$ne": 2},
},
)
assert len(items["metadatas"]) == 1
# endregion
records = {
"embeddings": [[0, 0, 0], [1.2, 2.24, 3.2]],
"ids": ["id1", "id2"],
"metadatas": [
{"int_value": 1, "string_value": "one", "float_value": 1.001},
{"int_value": 2},
],
"documents": ["this document is first", "this document is second"],
}
def test_query_include(api):
api.reset()
collection = api.create_collection("test_query_include")
collection.add(**records)
items = collection.query(
query_embeddings=[0, 0, 0],
include=["metadatas", "documents", "distances"],
n_results=1,
)
assert items["embeddings"] is None
assert items["ids"][0][0] == "id1"
assert items["metadatas"][0][0]["int_value"] == 1
items = collection.query(
query_embeddings=[0, 0, 0],
include=["embeddings", "documents", "distances"],
n_results=1,
)
assert items["metadatas"] is None
assert items["ids"][0][0] == "id1"
items = collection.query(
query_embeddings=[[0, 0, 0], [1, 2, 1.2]],
include=[],
n_results=2,
)
assert items["documents"] is None
assert items["metadatas"] is None
assert items["embeddings"] is None
assert items["distances"] is None
assert items["ids"][0][0] == "id1"
assert items["ids"][0][1] == "id2"
def test_get_include(api):
api.reset()
collection = api.create_collection("test_get_include")
collection.add(**records)
items = collection.get(include=["metadatas", "documents"], where={"int_value": 1})
assert items["embeddings"] is None
assert items["ids"][0] == "id1"
assert items["metadatas"][0]["int_value"] == 1
assert items["documents"][0] == "this document is first"
items = collection.get(include=["embeddings", "documents"])
assert items["metadatas"] is None
assert items["ids"][0] == "id1"
assert items["embeddings"][1][0] == 1.2
items = collection.get(include=[])
assert items["documents"] is None
assert items["metadatas"] is None
assert items["embeddings"] is None
assert items["ids"][0] == "id1"
with pytest.raises(ValueError, match="include"):
items = collection.get(include=["metadatas", "undefined"])
with pytest.raises(ValueError, match="include"):
items = collection.get(include=None)
# make sure query results are returned in the right order
def test_query_order(api):
api.reset()
collection = api.create_collection("test_query_order")
collection.add(**records)
items = collection.query(
query_embeddings=[1.2, 2.24, 3.2],
include=["metadatas", "documents", "distances"],
n_results=2,
)
assert items["documents"][0][0] == "this document is second"
assert items["documents"][0][1] == "this document is first"
# test to make sure add, get, delete error on invalid id input
def test_invalid_id(api):
api.reset()
collection = api.create_collection("test_invalid_id")
# Add with non-string id
with pytest.raises(ValueError) as e:
collection.add(embeddings=[0, 0, 0], ids=[1], metadatas=[{}])
assert "ID" in str(e.value)
# Get with non-list id
with pytest.raises(ValueError) as e:
collection.get(ids=1)
assert "ID" in str(e.value)
# Delete with malformed ids
with pytest.raises(ValueError) as e:
collection.delete(ids=["valid", 0])
assert "ID" in str(e.value)
def test_index_params(api):
# first standard add
api.reset()
collection = api.create_collection(name="test_index_params")
collection.add(**records)
items = collection.query(
query_embeddings=[0.6, 1.12, 1.6],
n_results=1,
)
assert items["distances"][0][0] > 4
# cosine
api.reset()
collection = api.create_collection(
name="test_index_params",
metadata={"hnsw:space": "cosine", "hnsw:construction_ef": 20, "hnsw:M": 5},
)
collection.add(**records)
items = collection.query(
query_embeddings=[0.6, 1.12, 1.6],
n_results=1,
)
assert items["distances"][0][0] > 0
assert items["distances"][0][0] < 1
# ip
api.reset()
collection = api.create_collection(
name="test_index_params", metadata={"hnsw:space": "ip"}
)
collection.add(**records)
items = collection.query(
query_embeddings=[0.6, 1.12, 1.6],
n_results=1,
)
assert items["distances"][0][0] < -5
def test_invalid_index_params(api):
api.reset()
with pytest.raises(Exception):
collection = api.create_collection(
name="test_index_params", metadata={"hnsw:foobar": "blarg"}
)
collection.add(**records)
with pytest.raises(Exception):
collection = api.create_collection(
name="test_index_params", metadata={"hnsw:space": "foobar"}
)
collection.add(**records)
def test_persist_index_loading_params(api, request):
api = request.getfixturevalue("local_persist_api")
api.reset()
collection = api.create_collection("test", metadata={"hnsw:space": "ip"})
collection.add(ids="id1", documents="hello")
api.persist()
del api
api2 = request.getfixturevalue("local_persist_api_cache_bust")
collection = api2.get_collection("test")
assert collection.metadata["hnsw:space"] == "ip"
nn = collection.query(
query_texts="hello",
n_results=1,
include=["embeddings", "documents", "metadatas", "distances"],
)
for key in nn.keys():
assert len(nn[key]) == 1
def test_add_large(api):
api.reset()
collection = api.create_collection("testspace")
# Test adding a large number of records
large_records = np.random.rand(2000, 512).astype(np.float32).tolist()
collection.add(
embeddings=large_records,
ids=[f"http://example.com/{i}" for i in range(len(large_records))],
)
assert collection.count() == len(large_records)
# test get_version
def test_get_version(api):
api.reset()
version = api.get_version()
# assert version matches the pattern x.y.z
import re
assert re.match(r"\d+\.\d+\.\d+", version)
# test delete_collection
def test_delete_collection(api):
api.reset()
collection = api.create_collection("test_delete_collection")
collection.add(**records)
assert len(api.list_collections()) == 1
api.delete_collection("test_delete_collection")
assert len(api.list_collections()) == 0
def test_multiple_collections(api):
embeddings1 = np.random.rand(10, 512).astype(np.float32).tolist()
embeddings2 = np.random.rand(10, 512).astype(np.float32).tolist()
ids1 = [f"http://example.com/1/{i}" for i in range(len(embeddings1))]
ids2 = [f"http://example.com/2/{i}" for i in range(len(embeddings2))]
api.reset()
coll1 = api.create_collection("coll1")
coll1.add(embeddings=embeddings1, ids=ids1)
coll2 = api.create_collection("coll2")
coll2.add(embeddings=embeddings2, ids=ids2)
assert len(api.list_collections()) == 2
assert coll1.count() == len(embeddings1)
assert coll2.count() == len(embeddings2)
results1 = coll1.query(query_embeddings=embeddings1[0], n_results=1)
results2 = coll2.query(query_embeddings=embeddings2[0], n_results=1)
assert results1["ids"][0][0] == ids1[0]
assert results2["ids"][0][0] == ids2[0]
def test_update_query(api):
api.reset()
collection = api.create_collection("test_update_query")
collection.add(**records)
updated_records = {
"ids": [records["ids"][0]],
"embeddings": [[0.1, 0.2, 0.3]],
"documents": ["updated document"],
"metadatas": [{"foo": "bar"}],
}
collection.update(**updated_records)
# test query
results = collection.query(
query_embeddings=updated_records["embeddings"],
n_results=1,
include=["embeddings", "documents", "metadatas"],
)
assert len(results["ids"][0]) == 1
assert results["ids"][0][0] == updated_records["ids"][0]
assert results["documents"][0][0] == updated_records["documents"][0]
assert results["metadatas"][0][0]["foo"] == "bar"
assert results["embeddings"][0][0] == updated_records["embeddings"][0]
initial_records = {
"embeddings": [[0, 0, 0], [1.2, 2.24, 3.2], [2.2, 3.24, 4.2]],
"ids": ["id1", "id2", "id3"],
"metadatas": [
{"int_value": 1, "string_value": "one", "float_value": 1.001},
{"int_value": 2},
{"string_value": "three"},
],
"documents": [
"this document is first",
"this document is second",
"this document is third",
],
}
new_records = {
"embeddings": [[3.0, 3.0, 1.1], [3.2, 4.24, 5.2]],
"ids": ["id1", "id4"],
"metadatas": [
{"int_value": 1, "string_value": "one_of_one", "float_value": 1.001},
{"int_value": 4},
],
"documents": [
"this document is even more first",
"this document is new and fourth",
],
}
def test_upsert(api):
api.reset()
collection = api.create_collection("test")
collection.add(**initial_records)
assert collection.count() == 3
collection.upsert(**new_records)
assert collection.count() == 4
get_result = collection.get(
include=["embeddings", "metadatas", "documents"], ids=new_records["ids"][0]
)
assert get_result["embeddings"][0] == new_records["embeddings"][0]
assert get_result["metadatas"][0] == new_records["metadatas"][0]
assert get_result["documents"][0] == new_records["documents"][0]
query_result = collection.query(
query_embeddings=get_result["embeddings"],
n_results=1,
include=["embeddings", "metadatas", "documents"],
)
assert query_result["embeddings"][0][0] == new_records["embeddings"][0]
assert query_result["metadatas"][0][0] == new_records["metadatas"][0]
assert query_result["documents"][0][0] == new_records["documents"][0]
collection.delete(ids=initial_records["ids"][2])
collection.upsert(
ids=initial_records["ids"][2],
embeddings=[[1.1, 0.99, 2.21]],
metadatas=[{"string_value": "a new string value"}],
)
assert collection.count() == 4
get_result = collection.get(
include=["embeddings", "metadatas", "documents"], ids=["id3"]
)
assert get_result["embeddings"][0] == [1.1, 0.99, 2.21]
assert get_result["metadatas"][0] == {"string_value": "a new string value"}
assert get_result["documents"][0] is None
| Ocean-master | oceandb/test/test_api.py |
import hashlib
import hypothesis
import hypothesis.strategies as st
from typing import Optional, List, Dict, Union
from typing_extensions import TypedDict
import numpy as np
import oceandb.api.types as types
import re
from hypothesis.strategies._internal.strategies import SearchStrategy
from hypothesis.errors import InvalidDefinition
from dataclasses import dataclass
# Set the random seed for reproducibility
np.random.seed(0) # unnecessary, hypothesis does this for us
# See Hypothesis documentation for creating strategies at
# https://hypothesis.readthedocs.io/en/latest/data.html
# NOTE: Because these strategies are used in state machines, we need to
# work around an issue with state machines, in which strategies that frequently
# are marked as invalid (i.e. through the use of `assume` or `.filter`) can cause the
# state machine tests to fail with an hypothesis.errors.Unsatisfiable.
# Ultimately this is because the entire state machine is run as a single Hypothesis
# example, which ends up drawing from the same strategies an enormous number of times.
# Whenever a strategy marks itself as invalid, Hypothesis tries to start the entire
# state machine run over. See https://github.com/HypothesisWorks/hypothesis/issues/3618
# Because strategy generation is all interrelated, seemingly small changes (especially
# ones called early in a test) can have an outside effect. Generating lists with
# unique=True, or dictionaries with a min size seems especially bad.
# Please make changes to these strategies incrementally, testing to make sure they don't
# start generating unsatisfiable examples.
test_hnsw_config = {
"hnsw:construction_ef": 128,
"hnsw:search_ef": 128,
"hnsw:M": 128,
}
class RecordSet(TypedDict):
"""
A generated set of embeddings, ids, metadatas, and documents that
represent what a user would pass to the API.
"""
ids: Union[types.ID, List[types.ID]]
embeddings: Optional[Union[types.Embeddings, types.Embedding]]
metadatas: Optional[Union[List[types.Metadata], types.Metadata]]
documents: Optional[Union[List[types.Document], types.Document]]
# TODO: support arbitrary text everywhere so we don't SQL-inject ourselves.
# TODO: support empty strings everywhere
sql_alphabet = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_"
safe_text = st.text(alphabet=sql_alphabet, min_size=1)
# Workaround for FastAPI json encoding peculiarities
# https://github.com/tiangolo/fastapi/blob/8ac8d70d52bb0dd9eb55ba4e22d3e383943da05c/fastapi/encoders.py#L104
safe_text = safe_text.filter(lambda s: not s.startswith("_sa"))
safe_integers = st.integers(
min_value=-(2**31), max_value=2**31 - 1
) # TODO: handle longs
safe_floats = st.floats(
allow_infinity=False, allow_nan=False, allow_subnormal=False
) # TODO: handle infinity and NAN
safe_values = [safe_text, safe_integers, safe_floats]
def one_or_both(strategy_a, strategy_b):
return st.one_of(
st.tuples(strategy_a, strategy_b),
st.tuples(strategy_a, st.none()),
st.tuples(st.none(), strategy_b),
)
# Temporarily generate only these to avoid SQL formatting issues.
legal_id_characters = (
"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_./+"
)
float_types = [np.float16, np.float32, np.float64]
int_types = [np.int16, np.int32, np.int64] # TODO: handle int types
@st.composite
def collection_name(draw) -> str:
_collection_name_re = re.compile(r"^[a-zA-Z][a-zA-Z0-9-]{1,60}[a-zA-Z0-9]$")
_ipv4_address_re = re.compile(r"^([0-9]{1,3}\.){3}[0-9]{1,3}$")
_two_periods_re = re.compile(r"\.\.")
name = draw(st.from_regex(_collection_name_re))
hypothesis.assume(not _ipv4_address_re.match(name))
hypothesis.assume(not _two_periods_re.search(name))
return name
collection_metadata = st.one_of(
st.none(), st.dictionaries(safe_text, st.one_of(*safe_values))
)
# TODO: Use a hypothesis strategy while maintaining embedding uniqueness
# Or handle duplicate embeddings within a known epsilon
def create_embeddings(dim: int, count: int, dtype: np.dtype) -> types.Embeddings:
return (
np.random.uniform(
low=-1.0,
high=1.0,
size=(count, dim),
)
.astype(dtype)
.tolist()
)
class hashing_embedding_function(types.EmbeddingFunction):
def __init__(self, dim: int, dtype: np.dtype) -> None:
self.dim = dim
self.dtype = dtype
def __call__(self, texts: types.Documents) -> types.Embeddings:
# Hash the texts and convert to hex strings
hashed_texts = [
list(hashlib.sha256(text.encode("utf-8")).hexdigest()) for text in texts
]
# Pad with repetition, or truncate the hex strings to the desired dimension
padded_texts = [
text * (self.dim // len(text)) + text[: self.dim % len(text)]
for text in hashed_texts
]
# Convert the hex strings to dtype
return np.array(
[[int(char, 16) / 15.0 for char in text] for text in padded_texts],
dtype=self.dtype,
).tolist()
def embedding_function_strategy(
dim: int, dtype: np.dtype
) -> st.SearchStrategy[types.EmbeddingFunction]:
return st.just(hashing_embedding_function(dim, dtype))
@dataclass
class Collection:
name: str
metadata: Optional[types.Metadata]
dimension: int
dtype: np.dtype
known_metadata_keys: Dict[str, st.SearchStrategy]
known_document_keywords: List[str]
has_documents: bool = False
has_embeddings: bool = False
embedding_function: Optional[types.EmbeddingFunction] = None
@st.composite
def collections(
draw,
add_filterable_data=False,
with_hnsw_params=False,
has_embeddings: Optional[bool] = None,
has_documents: Optional[bool] = None,
) -> Collection:
"""Strategy to generate a Collection object. If add_filterable_data is True, then known_metadata_keys and known_document_keywords will be populated with consistent data."""
assert not ((has_embeddings is False) and (has_documents is False))
name = draw(collection_name())
metadata = draw(collection_metadata)
dimension = draw(st.integers(min_value=2, max_value=2048))
dtype = draw(st.sampled_from(float_types))
if with_hnsw_params:
if metadata is None:
metadata = {}
metadata.update(test_hnsw_config)
# Sometimes, select a space at random
if draw(st.booleans()):
# TODO: pull the distance functions from a source of truth that lives not
# in tests once https://github.com/ocean-core/issues/issues/61 lands
metadata["hnsw:space"] = draw(st.sampled_from(["cosine", "l2", "ip"]))
known_metadata_keys = {}
if add_filterable_data:
while len(known_metadata_keys) < 5:
key = draw(safe_text)
known_metadata_keys[key] = draw(st.sampled_from(safe_values))
if has_documents is None:
has_documents = draw(st.booleans())
if has_documents and add_filterable_data:
known_document_keywords = draw(st.lists(safe_text, min_size=5, max_size=5))
else:
known_document_keywords = []
if not has_documents:
has_embeddings = True
else:
if has_embeddings is None:
has_embeddings = draw(st.booleans())
embedding_function = draw(embedding_function_strategy(dimension, dtype))
return Collection(
name=name,
metadata=metadata,
dimension=dimension,
dtype=dtype,
known_metadata_keys=known_metadata_keys,
has_documents=has_documents,
known_document_keywords=known_document_keywords,
has_embeddings=has_embeddings,
embedding_function=embedding_function,
)
@st.composite
def metadata(draw, collection: Collection):
"""Strategy for generating metadata that could be a part of the given collection"""
# First draw a random dictionary.
md = draw(st.dictionaries(safe_text, st.one_of(*safe_values)))
# Then, remove keys that overlap with the known keys for the coll
# to avoid type errors when comparing.
if collection.known_metadata_keys:
for key in collection.known_metadata_keys.keys():
if key in md:
del md[key]
# Finally, add in some of the known keys for the collection
md.update(
draw(st.fixed_dictionaries({}, optional=collection.known_metadata_keys))
)
return md
@st.composite
def document(draw, collection: Collection):
"""Strategy for generating documents that could be a part of the given collection"""
if collection.known_document_keywords:
known_words_st = st.sampled_from(collection.known_document_keywords)
else:
known_words_st = st.text(min_size=1)
random_words_st = st.text(min_size=1)
words = draw(st.lists(st.one_of(known_words_st, random_words_st), min_size=1))
return " ".join(words)
@st.composite
def record(draw, collection: Collection, id_strategy=safe_text):
md = draw(metadata(collection))
if collection.has_embeddings:
embedding = create_embeddings(collection.dimension, 1, collection.dtype)[0]
else:
embedding = None
if collection.has_documents:
doc = draw(document(collection))
else:
doc = None
return {
"id": draw(id_strategy),
"embedding": embedding,
"metadata": md,
"document": doc,
}
@st.composite
def recordsets(
draw,
collection_strategy=collections(),
id_strategy=safe_text,
min_size=1,
max_size=50,
) -> RecordSet:
collection = draw(collection_strategy)
records = draw(
st.lists(record(collection, id_strategy), min_size=min_size, max_size=max_size)
)
records = {r["id"]: r for r in records}.values() # Remove duplicates
ids = [r["id"] for r in records]
embeddings = (
[r["embedding"] for r in records] if collection.has_embeddings else None
)
metadatas = [r["metadata"] for r in records]
documents = [r["document"] for r in records] if collection.has_documents else None
# in the case where we have a single record, sometimes exercise
# the code that handles individual values rather than lists
if len(records) == 1:
if draw(st.booleans()):
ids = ids[0]
if collection.has_embeddings and draw(st.booleans()):
embeddings = embeddings[0]
if draw(st.booleans()):
metadatas = metadatas[0]
if collection.has_documents and draw(st.booleans()):
documents = documents[0]
return {
"ids": ids,
"embeddings": embeddings,
"metadatas": metadatas,
"documents": documents,
}
# This class is mostly cloned from from hypothesis.stateful.RuleStrategy,
# but always runs all the rules, instead of using a FeatureStrategy to
# enable/disable rules. Disabled rules cause the entire test to be marked invalida and,
# combined with the complexity of our other strategies, leads to an
# unacceptably increased incidence of hypothesis.errors.Unsatisfiable.
class DeterministicRuleStrategy(SearchStrategy):
def __init__(self, machine):
super().__init__()
self.machine = machine
self.rules = list(machine.rules())
# The order is a bit arbitrary. Primarily we're trying to group rules
# that write to the same location together, and to put rules with no
# target first as they have less effect on the structure. We order from
# fewer to more arguments on grounds that it will plausibly need less
# data. This probably won't work especially well and we could be
# smarter about it, but it's better than just doing it in definition
# order.
self.rules.sort(
key=lambda rule: (
sorted(rule.targets),
len(rule.arguments),
rule.function.__name__,
)
)
def __repr__(self):
return "{}(machine={}({{...}}))".format(
self.__class__.__name__,
self.machine.__class__.__name__,
)
def do_draw(self, data):
if not any(self.is_valid(rule) for rule in self.rules):
msg = f"No progress can be made from state {self.machine!r}"
raise InvalidDefinition(msg) from None
rule = data.draw(st.sampled_from([r for r in self.rules if self.is_valid(r)]))
argdata = data.draw(rule.arguments_strategy)
return (rule, argdata)
def is_valid(self, rule):
if not all(precond(self.machine) for precond in rule.preconditions):
return False
for b in rule.bundles:
bundle = self.machine.bundle(b.name)
if not bundle:
return False
return True
@st.composite
def where_clause(draw, collection):
"""Generate a filter that could be used in a query against the given collection"""
known_keys = sorted(collection.known_metadata_keys.keys())
key = draw(st.sampled_from(known_keys))
value = draw(collection.known_metadata_keys[key])
legal_ops = [None, "$eq", "$ne"]
if not isinstance(value, str):
legal_ops = ["$gt", "$lt", "$lte", "$gte"] + legal_ops
op = draw(st.sampled_from(legal_ops))
if op is None:
return {key: value}
else:
return {key: {op: value}}
@st.composite
def where_doc_clause(draw, collection):
"""Generate a where_document filter that could be used against the given collection"""
if collection.known_document_keywords:
word = draw(st.sampled_from(collection.known_document_keywords))
else:
word = draw(safe_text)
return {"$contains": word}
@st.composite
def binary_operator_clause(draw, base_st):
op = draw(st.sampled_from(["$and", "$or"]))
return {op: [draw(base_st), draw(base_st)]}
@st.composite
def recursive_where_clause(draw, collection):
base_st = where_clause(collection)
return draw(st.recursive(base_st, binary_operator_clause))
@st.composite
def recursive_where_doc_clause(draw, collection):
base_st = where_doc_clause(collection)
return draw(st.recursive(base_st, binary_operator_clause))
class Filter(TypedDict):
where: Optional[Dict[str, Union[str, int, float]]]
ids: Optional[Union[str, List[str]]]
where_document: Optional[types.WhereDocument]
@st.composite
def filters(
draw,
collection_st: st.SearchStrategy[Collection],
recordset_st: st.SearchStrategy[RecordSet],
include_all_ids=False,
) -> Filter:
collection = draw(collection_st)
recordset = draw(recordset_st)
where_clause = draw(st.one_of(st.none(), recursive_where_clause(collection)))
where_document_clause = draw(
st.one_of(st.none(), recursive_where_doc_clause(collection))
)
ids = recordset["ids"]
# Record sets can be a value instead of a list of values if there is only one record
if isinstance(ids, str):
ids = [ids]
if not include_all_ids:
ids = draw(st.one_of(st.none(), st.lists(st.sampled_from(ids))))
if ids is not None:
# Remove duplicates since hypothesis samples with replacement
ids = list(set(ids))
# Test both the single value list and the unwrapped single value case
if ids is not None and len(ids) == 1 and draw(st.booleans()):
ids = ids[0]
return {"where": where_clause, "where_document": where_document_clause, "ids": ids}
| Ocean-master | oceandb/test/property/strategies.py |
import pytest
import logging
import hypothesis.strategies as st
from typing import Set
from dataclasses import dataclass
import oceandb.errors as errors
from oceandb.api import API
from oceandb.api.models.Collection import Collection
import oceandb.test.property.strategies as strategies
from hypothesis.stateful import (
Bundle,
RuleBasedStateMachine,
rule,
initialize,
precondition,
consumes,
run_state_machine_as_test,
multiple,
invariant,
)
from collections import defaultdict
import oceandb.test.property.invariants as invariants
traces = defaultdict(lambda: 0)
def trace(key):
global traces
traces[key] += 1
def print_traces():
global traces
for key, value in traces.items():
print(f"{key}: {value}")
dtype_shared_st = st.shared(st.sampled_from(strategies.float_types), key="dtype")
dimension_shared_st = st.shared(
st.integers(min_value=2, max_value=2048), key="dimension"
)
@dataclass
class EmbeddingStateMachineStates:
initialize = "initialize"
add_embeddings = "add_embeddings"
delete_by_ids = "delete_by_ids"
update_embeddings = "update_embeddings"
upsert_embeddings = "upsert_embeddings"
collection_st = st.shared(strategies.collections(with_hnsw_params=True), key="coll")
class EmbeddingStateMachine(RuleBasedStateMachine):
collection: Collection
embedding_ids: Bundle = Bundle("embedding_ids")
def __init__(self, api: API):
super().__init__()
self.api = api
self._rules_strategy = strategies.DeterministicRuleStrategy(self)
@initialize(collection=collection_st)
def initialize(self, collection: strategies.Collection):
self.api.reset()
self.collection = self.api.create_collection(
name=collection.name,
metadata=collection.metadata,
embedding_function=collection.embedding_function,
)
self.embedding_function = collection.embedding_function
trace("init")
self.on_state_change(EmbeddingStateMachineStates.initialize)
self.embeddings = {
"ids": [],
"embeddings": [],
"metadatas": [],
"documents": [],
}
@rule(target=embedding_ids, embedding_set=strategies.recordsets(collection_st))
def add_embeddings(self, embedding_set):
trace("add_embeddings")
self.on_state_change(EmbeddingStateMachineStates.add_embeddings)
normalized_embedding_set = invariants.wrap_all(embedding_set)
if len(normalized_embedding_set["ids"]) > 0:
trace("add_more_embeddings")
if set(normalized_embedding_set["ids"]).intersection(
set(self.embeddings["ids"])
):
with pytest.raises(errors.IDAlreadyExistsError):
self.collection.add(**embedding_set)
return multiple()
else:
self.collection.add(**embedding_set)
self._upsert_embeddings(embedding_set)
return multiple(*normalized_embedding_set["ids"])
@precondition(lambda self: len(self.embeddings["ids"]) > 20)
@rule(ids=st.lists(consumes(embedding_ids), min_size=1, max_size=20))
def delete_by_ids(self, ids):
trace("remove embeddings")
self.on_state_change(EmbeddingStateMachineStates.delete_by_ids)
indices_to_remove = [self.embeddings["ids"].index(id) for id in ids]
self.collection.delete(ids=ids)
self._remove_embeddings(set(indices_to_remove))
# Removing the precondition causes the tests to frequently fail as "unsatisfiable"
# Using a value < 5 causes retries and lowers the number of valid samples
@precondition(lambda self: len(self.embeddings["ids"]) >= 5)
@rule(
embedding_set=strategies.recordsets(
collection_strategy=collection_st,
id_strategy=embedding_ids,
min_size=1,
max_size=5,
)
)
def update_embeddings(self, embedding_set):
trace("update embeddings")
self.on_state_change(EmbeddingStateMachineStates.update_embeddings)
self.collection.update(**embedding_set)
self._upsert_embeddings(embedding_set)
# Using a value < 3 causes more retries and lowers the number of valid samples
@precondition(lambda self: len(self.embeddings["ids"]) >= 3)
@rule(
embedding_set=strategies.recordsets(
collection_strategy=collection_st,
id_strategy=st.one_of(embedding_ids, strategies.safe_text),
min_size=1,
max_size=5,
)
)
def upsert_embeddings(self, embedding_set):
trace("upsert embeddings")
self.on_state_change(EmbeddingStateMachineStates.upsert_embeddings)
self.collection.upsert(**embedding_set)
self._upsert_embeddings(embedding_set)
@invariant()
def count(self):
invariants.count(self.collection, self.embeddings) # type: ignore
@invariant()
def no_duplicates(self):
invariants.no_duplicates(self.collection)
@invariant()
def ann_accuracy(self):
invariants.ann_accuracy(
collection=self.collection, record_set=self.embeddings, min_recall=0.95, embedding_function=self.embedding_function # type: ignore
)
def _upsert_embeddings(self, embeddings: strategies.RecordSet):
embeddings = invariants.wrap_all(embeddings)
for idx, id in enumerate(embeddings["ids"]):
if id in self.embeddings["ids"]:
target_idx = self.embeddings["ids"].index(id)
if "embeddings" in embeddings and embeddings["embeddings"] is not None:
self.embeddings["embeddings"][target_idx] = embeddings[
"embeddings"
][idx]
else:
self.embeddings["embeddings"][target_idx] = self.embedding_function(
[embeddings["documents"][idx]]
)[0]
if "metadatas" in embeddings and embeddings["metadatas"] is not None:
self.embeddings["metadatas"][target_idx] = embeddings["metadatas"][
idx
]
if "documents" in embeddings and embeddings["documents"] is not None:
self.embeddings["documents"][target_idx] = embeddings["documents"][
idx
]
else:
# Add path
self.embeddings["ids"].append(id)
if "embeddings" in embeddings and embeddings["embeddings"] is not None:
self.embeddings["embeddings"].append(embeddings["embeddings"][idx])
else:
self.embeddings["embeddings"].append(
self.embedding_function([embeddings["documents"][idx]])[0]
)
if "metadatas" in embeddings and embeddings["metadatas"] is not None:
self.embeddings["metadatas"].append(embeddings["metadatas"][idx])
else:
self.embeddings["metadatas"].append(None)
if "documents" in embeddings and embeddings["documents"] is not None:
self.embeddings["documents"].append(embeddings["documents"][idx])
else:
self.embeddings["documents"].append(None)
def _remove_embeddings(self, indices_to_remove: Set[int]):
indices_list = list(indices_to_remove)
indices_list.sort(reverse=True)
for i in indices_list:
del self.embeddings["ids"][i]
del self.embeddings["embeddings"][i]
del self.embeddings["metadatas"][i]
del self.embeddings["documents"][i]
def on_state_change(self, new_state):
pass
def test_embeddings_state(caplog, api):
caplog.set_level(logging.ERROR)
run_state_machine_as_test(lambda: EmbeddingStateMachine(api))
print_traces()
def test_multi_add(api: API):
api.reset()
coll = api.create_collection(name="foo")
coll.add(ids=["a"], embeddings=[[0.0]])
assert coll.count() == 1
with pytest.raises(errors.IDAlreadyExistsError):
coll.add(ids=["a"], embeddings=[[0.0]])
assert coll.count() == 1
results = coll.get()
assert results["ids"] == ["a"]
coll.delete(ids=["a"])
assert coll.count() == 0
def test_dup_add(api: API):
api.reset()
coll = api.create_collection(name="foo")
with pytest.raises(errors.DuplicateIDError):
coll.add(ids=["a", "a"], embeddings=[[0.0], [1.1]])
with pytest.raises(errors.DuplicateIDError):
coll.upsert(ids=["a", "a"], embeddings=[[0.0], [1.1]])
# TODO: Use SQL escaping correctly internally
@pytest.mark.xfail(reason="We don't properly escape SQL internally, causing problems")
def test_escape_chars_in_ids(api: API):
api.reset()
id = "\x1f"
coll = api.create_collection(name="foo")
coll.add(ids=[id], embeddings=[[0.0]])
assert coll.count() == 1
coll.delete(ids=[id])
assert coll.count() == 0
| Ocean-master | oceandb/test/property/test_embeddings.py |
import pytest
import hypothesis.strategies as st
from hypothesis import given, settings
from oceandb.api import API
import oceandb.test.property.strategies as strategies
import oceandb.test.property.invariants as invariants
collection_st = st.shared(strategies.collections(with_hnsw_params=True), key="coll")
@given(collection=collection_st, embeddings=strategies.recordsets(collection_st))
@settings(deadline=None)
def test_add(
api: API, collection: strategies.Collection, embeddings: strategies.RecordSet
):
api.reset()
# TODO: Generative embedding functions
coll = api.create_collection(
name=collection.name,
metadata=collection.metadata,
embedding_function=collection.embedding_function,
)
coll.add(**embeddings)
embeddings = invariants.wrap_all(embeddings)
invariants.count(coll, embeddings)
n_results = max(1, (len(embeddings["ids"]) // 10))
invariants.ann_accuracy(
coll,
embeddings,
n_results=n_results,
embedding_function=collection.embedding_function,
)
# TODO: This test fails right now because the ids are not sorted by the input order
@pytest.mark.xfail(
reason="This is expected to fail right now. We should change the API to sort the \
ids by input order."
)
def test_out_of_order_ids(api: API):
api.reset()
ooo_ids = [
"40",
"05",
"8",
"6",
"10",
"01",
"00",
"3",
"04",
"20",
"02",
"9",
"30",
"11",
"13",
"2",
"0",
"7",
"06",
"5",
"50",
"12",
"03",
"4",
"1",
]
coll = api.create_collection("test", embedding_function=lambda x: [1, 2, 3])
coll.add(ids=ooo_ids, embeddings=[[1, 2, 3] for _ in range(len(ooo_ids))])
get_ids = coll.get(ids=ooo_ids)["ids"]
assert get_ids == ooo_ids
| Ocean-master | oceandb/test/property/test_add.py |
import logging
import multiprocessing
from typing import Generator, Callable
from hypothesis import given
import hypothesis.strategies as st
import pytest
import oceandb
from oceandb.api import API
from oceandb.config import Settings
import oceandb.test.property.strategies as strategies
import oceandb.test.property.invariants as invariants
from oceandb.test.property.test_embeddings import (
EmbeddingStateMachine,
EmbeddingStateMachineStates,
)
from hypothesis.stateful import run_state_machine_as_test, rule, precondition
import os
import shutil
import tempfile
CreatePersistAPI = Callable[[], API]
configurations = [
Settings(
ocean_api_impl="local",
ocean_db_impl="duckdb+parquet",
persist_directory=tempfile.gettempdir() + "/tests",
)
]
@pytest.fixture(scope="module", params=configurations)
def settings(request) -> Generator[Settings, None, None]:
configuration = request.param
yield configuration
save_path = configuration.persist_directory
# Remove if it exists
if os.path.exists(save_path):
shutil.rmtree(save_path)
collection_st = st.shared(strategies.collections(with_hnsw_params=True), key="coll")
@given(
collection_strategy=collection_st,
embeddings_strategy=strategies.recordsets(collection_st),
)
def test_persist(
settings: Settings,
collection_strategy: strategies.Collection,
embeddings_strategy: strategies.RecordSet,
):
api_1 = oceandb.Client(settings)
api_1.reset()
coll = api_1.create_collection(
name=collection_strategy.name,
metadata=collection_strategy.metadata,
embedding_function=collection_strategy.embedding_function,
)
coll.add(**embeddings_strategy)
invariants.count(coll, embeddings_strategy)
invariants.metadatas_match(coll, embeddings_strategy)
invariants.documents_match(coll, embeddings_strategy)
invariants.ids_match(coll, embeddings_strategy)
invariants.ann_accuracy(
coll,
embeddings_strategy,
embedding_function=collection_strategy.embedding_function,
)
api_1.persist()
del api_1
api_2 = oceandb.Client(settings)
coll = api_2.get_collection(
name=collection_strategy.name,
embedding_function=collection_strategy.embedding_function,
)
invariants.count(coll, embeddings_strategy)
invariants.metadatas_match(coll, embeddings_strategy)
invariants.documents_match(coll, embeddings_strategy)
invariants.ids_match(coll, embeddings_strategy)
invariants.ann_accuracy(
coll,
embeddings_strategy,
embedding_function=collection_strategy.embedding_function,
)
def load_and_check(settings: Settings, collection_name: str, embeddings_set, conn):
try:
api = oceandb.Client(settings)
coll = api.get_collection(
name=collection_name, embedding_function=lambda x: None
)
invariants.count(coll, embeddings_set)
invariants.metadatas_match(coll, embeddings_set)
invariants.documents_match(coll, embeddings_set)
invariants.ids_match(coll, embeddings_set)
invariants.ann_accuracy(coll, embeddings_set)
except Exception as e:
conn.send(e)
raise e
class PersistEmbeddingsStateMachineStates(EmbeddingStateMachineStates):
persist = "persist"
class PersistEmbeddingsStateMachine(EmbeddingStateMachine):
def __init__(self, api: API, settings: Settings):
self.api = api
self.settings = settings
self.last_persist_delay = 10
self.api.reset()
super().__init__(self.api)
@precondition(lambda self: len(self.embeddings["ids"]) >= 1)
@precondition(lambda self: self.last_persist_delay <= 0)
@rule()
def persist(self):
self.on_state_change(PersistEmbeddingsStateMachineStates.persist)
self.api.persist()
collection_name = self.collection.name
# Create a new process and then inside the process run the invariants
# TODO: Once we switch off of duckdb and onto sqlite we can remove this
ctx = multiprocessing.get_context("spawn")
conn1, conn2 = multiprocessing.Pipe()
p = ctx.Process(
target=load_and_check,
args=(self.settings, collection_name, self.embeddings, conn2),
)
p.start()
p.join()
if conn1.poll():
e = conn1.recv()
raise e
def on_state_change(self, new_state):
if new_state == PersistEmbeddingsStateMachineStates.persist:
self.last_persist_delay = 10
else:
self.last_persist_delay -= 1
def test_persist_embeddings_state(caplog, settings: Settings):
caplog.set_level(logging.ERROR)
api = oceandb.Client(settings)
run_state_machine_as_test(
lambda: PersistEmbeddingsStateMachine(settings=settings, api=api)
)
| Ocean-master | oceandb/test/property/test_persist.py |
import sys
import os
import shutil
import subprocess
import tempfile
from typing import Generator, Tuple
from hypothesis import given, settings
import hypothesis.strategies as st
import pytest
import json
from urllib import request
from oceandb.api import API
import oceandb.test.property.strategies as strategies
import oceandb.test.property.invariants as invariants
from importlib.util import spec_from_file_location, module_from_spec
from packaging import version as packaging_version
import re
import multiprocessing
from oceandb import Client
from oceandb.config import Settings
MINIMUM_VERSION = "0.3.20"
COLLECTION_NAME_LOWERCASE_VERSION = "0.3.21"
version_re = re.compile(r"^[0-9]+\.[0-9]+\.[0-9]+$")
def _patch_uppercase_coll_name(
collection: strategies.Collection, embeddings: strategies.RecordSet
):
"""Old versions didn't handle uppercase characters in collection names"""
collection.name = collection.name.lower()
def _patch_empty_dict_metadata(
collection: strategies.Collection, embeddings: strategies.RecordSet
):
"""Old versions do the wrong thing when metadata is a single empty dict"""
if embeddings["metadatas"] == {}:
embeddings["metadatas"] = None
version_patches = [
("0.3.21", _patch_uppercase_coll_name),
("0.3.21", _patch_empty_dict_metadata),
]
def patch_for_version(
version, collection: strategies.Collection, embeddings: strategies.RecordSet
):
"""Override aspects of the collection and embeddings, before testing, to account for
breaking changes in old versions."""
for patch_version, patch in version_patches:
if packaging_version.Version(version) <= packaging_version.Version(
patch_version
):
patch(collection, embeddings)
def versions():
"""Returns the pinned minimum version and the latest version of oceandb."""
url = "https://pypi.org/pypi/oceandb/json"
data = json.load(request.urlopen(request.Request(url)))
versions = list(data["releases"].keys())
# Older versions on pypi contain "devXYZ" suffixes
versions = [v for v in versions if version_re.match(v)]
versions.sort(key=packaging_version.Version)
return [MINIMUM_VERSION, versions[-1]]
def configurations(versions):
return [
(
version,
Settings(
ocean_api_impl="local",
ocean_db_impl="duckdb+parquet",
persist_directory=tempfile.gettempdir() + "/tests/" + version + "/",
),
)
for version in versions
]
test_old_versions = versions()
base_install_dir = tempfile.gettempdir() + "/persistence_test_oceandb_versions"
# This fixture is not shared with the rest of the tests because it is unique in how it
# installs the versions of oceandb
@pytest.fixture(scope="module", params=configurations(test_old_versions))
def version_settings(request) -> Generator[Tuple[str, Settings], None, None]:
configuration = request.param
version = configuration[0]
install_version(version)
yield configuration
# Cleanup the installed version
path = get_path_to_version_install(version)
shutil.rmtree(path)
# Cleanup the persisted data
data_path = configuration[1].persist_directory
if os.path.exists(data_path):
shutil.rmtree(data_path)
def get_path_to_version_install(version):
return base_install_dir + "/" + version
def get_path_to_version_library(version):
return get_path_to_version_install(version) + "/oceandb/__init__.py"
def install_version(version):
# Check if already installed
version_library = get_path_to_version_library(version)
if os.path.exists(version_library):
return
path = get_path_to_version_install(version)
install(f"oceandb=={version}", path)
def install(pkg, path):
# -q -q to suppress pip output to ERROR level
# https://pip.pypa.io/en/stable/cli/pip/#quiet
print(f"Installing oceandb version {pkg} to {path}")
return subprocess.check_call(
[
sys.executable,
"-m",
"pip",
"-q",
"-q",
"install",
pkg,
"--target={}".format(path),
]
)
def switch_to_version(version):
module_name = "oceandb"
# Remove old version from sys.modules, except test modules
old_modules = {
n: m
for n, m in sys.modules.items()
if n == module_name or (n.startswith(module_name + "."))
}
for n in old_modules:
del sys.modules[n]
# Load the target version and override the path to the installed version
# https://docs.python.org/3/library/importlib.html#importing-a-source-file-directly
path = get_path_to_version_library(version)
sys.path.insert(0, get_path_to_version_install(version))
spec = spec_from_file_location(module_name, path)
assert spec is not None and spec.loader is not None
module = module_from_spec(spec)
spec.loader.exec_module(module)
assert module.__version__ == version
sys.modules[module_name] = module
return module
def persist_generated_data_with_old_version(
version,
settings,
collection_strategy: strategies.Collection,
embeddings_strategy: strategies.RecordSet,
):
old_module = switch_to_version(version)
api: API = old_module.Client(settings)
api.reset()
coll = api.create_collection(
name=collection_strategy.name,
metadata=collection_strategy.metadata,
embedding_function=lambda x: None,
)
coll.add(**embeddings_strategy)
# We can't use the invariants module here because it uses the current version
# Just use some basic checks for sanity and manual testing where you break the new
# version
check_embeddings = invariants.wrap_all(embeddings_strategy)
# Check count
assert coll.count() == len(check_embeddings["embeddings"] or [])
# Check ids
result = coll.get()
actual_ids = result["ids"]
embedding_id_to_index = {id: i for i, id in enumerate(check_embeddings["ids"])}
actual_ids = sorted(actual_ids, key=lambda id: embedding_id_to_index[id])
assert actual_ids == check_embeddings["ids"]
api.persist()
del api
# Since we can't pickle the embedding function, we always generate record sets with embeddings
collection_st = st.shared(
strategies.collections(with_hnsw_params=True, has_embeddings=True), key="coll"
)
@given(
collection_strategy=collection_st,
embeddings_strategy=strategies.recordsets(collection_st),
)
@pytest.mark.skipif(
sys.version_info.major < 3
or (sys.version_info.major == 3 and sys.version_info.minor <= 7),
reason="The mininum supported versions of ocean do not work with python <= 3.7",
)
@settings(deadline=None)
def test_cycle_versions(
version_settings: Tuple[str, Settings],
collection_strategy: strategies.Collection,
embeddings_strategy: strategies.RecordSet,
):
# # Test backwards compatibility
# # For the current version, ensure that we can load a collection from
# # the previous versions
version, settings = version_settings
patch_for_version(version, collection_strategy, embeddings_strategy)
# Can't pickle a function, and we won't need them
collection_strategy.embedding_function = None
collection_strategy.known_metadata_keys = {}
# Run the task in a separate process to avoid polluting the current process
# with the old version. Using spawn instead of fork to avoid sharing the
# current process memory which would cause the old version to be loaded
ctx = multiprocessing.get_context("spawn")
p = ctx.Process(
target=persist_generated_data_with_old_version,
args=(version, settings, collection_strategy, embeddings_strategy),
)
p.start()
p.join()
# Switch to the current version (local working directory) and check the invariants
# are preserved for the collection
api = Client(settings)
coll = api.get_collection(
name=collection_strategy.name, embedding_function=lambda x: None
)
invariants.count(coll, embeddings_strategy)
invariants.metadatas_match(coll, embeddings_strategy)
invariants.documents_match(coll, embeddings_strategy)
invariants.ids_match(coll, embeddings_strategy)
invariants.ann_accuracy(coll, embeddings_strategy)
| Ocean-master | oceandb/test/property/test_cross_version_persist.py |
import pytest
import logging
import hypothesis.strategies as st
import oceandb.test.property.strategies as strategies
from oceandb.api import API
import oceandb.api.types as types
from hypothesis.stateful import (
Bundle,
RuleBasedStateMachine,
rule,
initialize,
multiple,
consumes,
run_state_machine_as_test,
MultipleResults,
)
from typing import Optional, Set
class CollectionStateMachine(RuleBasedStateMachine): # type: ignore
collections: Bundle
existing: Set[str]
collections = Bundle("collections")
def __init__(self, api: API):
super().__init__()
self.existing = set()
self.api = api
@initialize() # type: ignore
def initialize(self) -> None:
self.api.reset()
self.existing = set()
@rule(target=collections, coll=strategies.collections()) # type: ignore
def create_coll(
self, coll: strategies.Collection
) -> MultipleResults[strategies.Collection]:
if coll.name in self.existing:
with pytest.raises(Exception):
c = self.api.create_collection(
name=coll.name,
metadata=coll.metadata,
embedding_function=coll.embedding_function,
)
return multiple()
c = self.api.create_collection(
name=coll.name,
metadata=coll.metadata,
embedding_function=coll.embedding_function,
)
self.existing.add(coll.name)
assert c.name == coll.name
assert c.metadata == coll.metadata
return multiple(coll)
@rule(coll=collections) # type: ignore
def get_coll(self, coll: strategies.Collection) -> None:
if coll.name in self.existing:
c = self.api.get_collection(name=coll.name)
assert c.name == coll.name
assert c.metadata == coll.metadata
else:
with pytest.raises(Exception):
self.api.get_collection(name=coll.name)
@rule(coll=consumes(collections)) # type: ignore
def delete_coll(self, coll: strategies.Collection) -> None:
if coll.name in self.existing:
self.api.delete_collection(name=coll.name)
self.existing.remove(coll.name)
else:
with pytest.raises(Exception):
self.api.delete_collection(name=coll.name)
with pytest.raises(Exception):
self.api.get_collection(name=coll.name)
@rule() # type: ignore
def list_collections(self) -> None:
colls = self.api.list_collections()
assert len(colls) == len(self.existing)
for c in colls:
assert c.name in self.existing
@rule(
target=collections,
new_metadata=st.one_of(st.none(), strategies.collection_metadata),
coll=st.one_of(consumes(collections), strategies.collections()),
) # type: ignore
def get_or_create_coll(
self,
coll: strategies.Collection,
new_metadata: Optional[types.Metadata],
) -> MultipleResults[strategies.Collection]:
# In our current system, you can create with None but not update with None
# An update with none is a no-op for the update of that field
if coll.name not in self.existing:
coll.metadata = new_metadata
else:
coll.metadata = new_metadata if new_metadata is not None else coll.metadata
c = self.api.get_or_create_collection(
name=coll.name,
metadata=new_metadata,
embedding_function=coll.embedding_function,
)
assert c.name == coll.name
assert c.metadata == coll.metadata
self.existing.add(coll.name)
return multiple(coll)
@rule(
target=collections,
coll=consumes(collections),
new_metadata=strategies.collection_metadata,
new_name=st.one_of(st.none(), strategies.collection_name()),
) # type: ignore
def modify_coll(
self,
coll: strategies.Collection,
new_metadata: types.Metadata,
new_name: Optional[str],
) -> MultipleResults[strategies.Collection]:
if coll.name not in self.existing:
with pytest.raises(Exception):
c = self.api.get_collection(name=coll.name)
return multiple()
c = self.api.get_collection(name=coll.name)
if new_metadata is not None:
coll.metadata = new_metadata
if new_name is not None:
self.existing.remove(coll.name)
self.existing.add(new_name)
coll.name = new_name
c.modify(metadata=new_metadata, name=new_name)
c = self.api.get_collection(name=coll.name)
assert c.name == coll.name
assert c.metadata == coll.metadata
return multiple(coll)
def test_collections(caplog: pytest.LogCaptureFixture, api: API) -> None:
caplog.set_level(logging.ERROR)
run_state_machine_as_test(lambda: CollectionStateMachine(api))
| Ocean-master | oceandb/test/property/test_collections.py |
import math
from oceandb.test.property.strategies import RecordSet
from typing import Callable, Optional, Union, List, TypeVar
from typing_extensions import Literal
import numpy as np
from oceandb.api import types
from oceandb.api.models.Collection import Collection
from hypothesis import note
from hypothesis.errors import InvalidArgument
T = TypeVar("T")
def maybe_wrap(value: Union[T, List[T]]) -> Union[None, List[T]]:
"""Wrap a value in a list if it is not a list"""
if value is None:
return None
elif isinstance(value, List):
return value
else:
return [value]
def wrap_all(embeddings: RecordSet) -> RecordSet:
"""Ensure that an embedding set has lists for all its values"""
if embeddings["embeddings"] is None:
embedding_list = None
elif isinstance(embeddings["embeddings"], list):
if len(embeddings["embeddings"]) > 0:
if isinstance(embeddings["embeddings"][0], list):
embedding_list = embeddings["embeddings"]
else:
embedding_list = [embeddings["embeddings"]]
else:
embedding_list = []
else:
raise InvalidArgument("embeddings must be a list, list of lists, or None")
return {
"ids": maybe_wrap(embeddings["ids"]), # type: ignore
"documents": maybe_wrap(embeddings["documents"]), # type: ignore
"metadatas": maybe_wrap(embeddings["metadatas"]), # type: ignore
"embeddings": embedding_list,
}
def count(collection: Collection, embeddings: RecordSet):
"""The given collection count is equal to the number of embeddings"""
count = collection.count()
embeddings = wrap_all(embeddings)
assert count == len(embeddings["ids"])
def _field_matches(
collection: Collection,
embeddings: RecordSet,
field_name: Union[Literal["documents"], Literal["metadatas"]],
):
"""
The actual embedding field is equal to the expected field
field_name: one of [documents, metadatas]
"""
result = collection.get(ids=embeddings["ids"], include=[field_name])
# The test_out_of_order_ids test fails because of this in test_add.py
# Here we sort by the ids to match the input order
embedding_id_to_index = {id: i for i, id in enumerate(embeddings["ids"])}
actual_field = result[field_name]
# This assert should never happen, if we include metadatas/documents it will be
# [None, None..] if there is no metadata. It will not be just None.
assert actual_field is not None
actual_field = sorted(
enumerate(actual_field),
key=lambda index_and_field_value: embedding_id_to_index[
result["ids"][index_and_field_value[0]]
],
)
actual_field = [field_value for _, field_value in actual_field]
expected_field = embeddings[field_name]
if expected_field is None:
# Since an RecordSet is the user input, we need to convert the documents to
# a List since thats what the API returns -> none per entry
expected_field = [None] * len(embeddings["ids"])
assert actual_field == expected_field
def ids_match(collection: Collection, embeddings: RecordSet):
"""The actual embedding ids is equal to the expected ids"""
embeddings = wrap_all(embeddings)
actual_ids = collection.get(ids=embeddings["ids"], include=[])["ids"]
# The test_out_of_order_ids test fails because of this in test_add.py
# Here we sort the ids to match the input order
embedding_id_to_index = {id: i for i, id in enumerate(embeddings["ids"])}
actual_ids = sorted(actual_ids, key=lambda id: embedding_id_to_index[id])
assert actual_ids == embeddings["ids"]
def metadatas_match(collection: Collection, embeddings: RecordSet):
"""The actual embedding metadata is equal to the expected metadata"""
embeddings = wrap_all(embeddings)
_field_matches(collection, embeddings, "metadatas")
def documents_match(collection: Collection, embeddings: RecordSet):
"""The actual embedding documents is equal to the expected documents"""
embeddings = wrap_all(embeddings)
_field_matches(collection, embeddings, "documents")
def no_duplicates(collection: Collection):
ids = collection.get()["ids"]
assert len(ids) == len(set(ids))
# These match what the spec of hnswlib is
# This epsilon is used to prevent division by zero and the value is the same
# https://github.com/nmslib/hnswlib/blob/359b2ba87358224963986f709e593d799064ace6/python_bindings/bindings.cpp#L238
NORM_EPS = 1e-30
distance_functions = {
"l2": lambda x, y: np.linalg.norm(x - y) ** 2,
"cosine": lambda x, y: 1
- np.dot(x, y) / ((np.linalg.norm(x) + NORM_EPS) * (np.linalg.norm(y) + NORM_EPS)),
"ip": lambda x, y: 1 - np.dot(x, y),
}
def _exact_distances(
query: types.Embeddings,
targets: types.Embeddings,
distance_fn: Callable = lambda x, y: np.linalg.norm(x - y) ** 2,
):
"""Return the ordered indices and distances from each query to each target"""
np_query = np.array(query)
np_targets = np.array(targets)
# Compute the distance between each query and each target, using the distance function
distances = np.apply_along_axis(
lambda query: np.apply_along_axis(distance_fn, 1, np_targets, query),
1,
np_query,
)
# Sort the distances and return the indices
return np.argsort(distances), distances
def ann_accuracy(
collection: Collection,
record_set: RecordSet,
n_results: int = 1,
min_recall: float = 0.99,
embedding_function: Optional[types.EmbeddingFunction] = None,
):
"""Validate that the API performs nearest_neighbor searches correctly"""
record_set = wrap_all(record_set)
if len(record_set["ids"]) == 0:
return # nothing to test here
embeddings = record_set["embeddings"]
have_embeddings = embeddings is not None and len(embeddings) > 0
if not have_embeddings:
assert embedding_function is not None
assert record_set["documents"] is not None
# Compute the embeddings for the documents
embeddings = embedding_function(record_set["documents"])
# l2 is the default distance function
distance_function = distance_functions["l2"]
accuracy_threshold = 1e-6
if "hnsw:space" in collection.metadata:
space = collection.metadata["hnsw:space"]
# TODO: ip and cosine are numerically unstable in HNSW.
# The higher the dimensionality, the more noise is introduced, since each float element
# of the vector has noise added, which is then subsequently included in all normalization calculations.
# This means that higher dimensions will have more noise, and thus more error.
dim = len(embeddings[0])
accuracy_threshold = accuracy_threshold * math.pow(10, int(math.log10(dim)))
if space == "cosine":
distance_function = distance_functions["cosine"]
if space == "ip":
distance_function = distance_functions["ip"]
# Perform exact distance computation
indices, distances = _exact_distances(
embeddings, embeddings, distance_fn=distance_function
)
query_results = collection.query(
query_embeddings=record_set["embeddings"],
query_texts=record_set["documents"] if not have_embeddings else None,
n_results=n_results,
include=["embeddings", "documents", "metadatas", "distances"],
)
# Dict of ids to indices
id_to_index = {id: i for i, id in enumerate(record_set["ids"])}
missing = 0
for i, (indices_i, distances_i) in enumerate(zip(indices, distances)):
expected_ids = np.array(record_set["ids"])[indices_i[:n_results]]
missing += len(set(expected_ids) - set(query_results["ids"][i]))
# For each id in the query results, find the index in the embeddings set
# and assert that the embeddings are the same
for j, id in enumerate(query_results["ids"][i]):
# This may be because the true nth nearest neighbor didn't get returned by the ANN query
unexpected_id = id not in expected_ids
index = id_to_index[id]
correct_distance = np.allclose(
distances_i[index],
query_results["distances"][i][j],
atol=accuracy_threshold,
)
if unexpected_id:
# If the ID is unexpcted, but the distance is correct, then we
# have a duplicate in the data. In this case, we should not reduce recall.
if correct_distance:
missing -= 1
else:
continue
else:
assert correct_distance
assert np.allclose(embeddings[index], query_results["embeddings"][i][j])
if record_set["documents"] is not None:
assert (
record_set["documents"][index] == query_results["documents"][i][j]
)
if record_set["metadatas"] is not None:
assert (
record_set["metadatas"][index] == query_results["metadatas"][i][j]
)
size = len(record_set["ids"])
recall = (size - missing) / size
try:
note(
f"recall: {recall}, missing {missing} out of {size}, accuracy threshold {accuracy_threshold}"
)
except InvalidArgument:
pass # it's ok if we're running outside hypothesis
assert recall >= min_recall
# Ensure that the query results are sorted by distance
for distance_result in query_results["distances"]:
assert np.allclose(np.sort(distance_result), distance_result)
| Ocean-master | oceandb/test/property/invariants.py |
from hypothesis import given, settings, HealthCheck
from oceandb.api import API
from oceandb.errors import NoDatapointsException
from oceandb.test.property import invariants
import oceandb.test.property.strategies as strategies
import hypothesis.strategies as st
import logging
import random
def _filter_where_clause(clause, mm):
"""Return true if the where clause is true for the given metadata map"""
key, expr = list(clause.items())[0]
# Handle the shorthand for equal: {key: val} where val is a simple value
if isinstance(expr, str) or isinstance(expr, int) or isinstance(expr, float):
return _filter_where_clause({key: {"$eq": expr}}, mm)
if key == "$and":
return all(_filter_where_clause(clause, mm) for clause in expr)
if key == "$or":
return any(_filter_where_clause(clause, mm) for clause in expr)
op, val = list(expr.items())[0]
if op == "$eq":
return key in mm and mm[key] == val
elif op == "$ne":
return key in mm and mm[key] != val
elif op == "$gt":
return key in mm and mm[key] > val
elif op == "$gte":
return key in mm and mm[key] >= val
elif op == "$lt":
return key in mm and mm[key] < val
elif op == "$lte":
return key in mm and mm[key] <= val
else:
raise ValueError("Unknown operator: {}".format(key))
def _filter_where_doc_clause(clause, doc):
key, expr = list(clause.items())[0]
if key == "$and":
return all(_filter_where_doc_clause(clause, doc) for clause in expr)
elif key == "$or":
return any(_filter_where_doc_clause(clause, doc) for clause in expr)
elif key == "$contains":
return expr in doc
else:
raise ValueError("Unknown operator: {}".format(key))
EMPTY_DICT = {}
EMPTY_STRING = ""
def _filter_embedding_set(recordset: strategies.RecordSet, filter: strategies.Filter):
"""Return IDs from the embedding set that match the given filter object"""
recordset = invariants.wrap_all(recordset)
ids = set(recordset["ids"])
filter_ids = filter["ids"]
if filter_ids is not None:
filter_ids = invariants.maybe_wrap(filter_ids)
assert filter_ids is not None
# If the filter ids is an empty list then we treat that as get all
if len(filter_ids) != 0:
ids = ids.intersection(filter_ids)
for i in range(len(recordset["ids"])):
if filter["where"]:
metadatas = recordset["metadatas"] or [EMPTY_DICT] * len(recordset["ids"])
if not _filter_where_clause(filter["where"], metadatas[i]):
ids.discard(recordset["ids"][i])
if filter["where_document"]:
documents = recordset["documents"] or [EMPTY_STRING] * len(recordset["ids"])
if not _filter_where_doc_clause(filter["where_document"], documents[i]):
ids.discard(recordset["ids"][i])
return list(ids)
collection_st = st.shared(
strategies.collections(add_filterable_data=True, with_hnsw_params=True),
key="coll",
)
recordset_st = st.shared(
strategies.recordsets(collection_st, max_size=1000), key="recordset"
)
@settings(
suppress_health_check=[
HealthCheck.function_scoped_fixture,
HealthCheck.large_base_example,
]
)
@given(
collection=collection_st,
recordset=recordset_st,
filters=st.lists(strategies.filters(collection_st, recordset_st), min_size=1),
)
def test_filterable_metadata_get(caplog, api: API, collection, recordset, filters):
caplog.set_level(logging.ERROR)
api.reset()
coll = api.create_collection(
name=collection.name,
metadata=collection.metadata,
embedding_function=collection.embedding_function,
)
coll.add(**recordset)
for filter in filters:
result_ids = coll.get(**filter)["ids"]
expected_ids = _filter_embedding_set(recordset, filter)
assert sorted(result_ids) == sorted(expected_ids)
@settings(
suppress_health_check=[
HealthCheck.function_scoped_fixture,
HealthCheck.large_base_example,
]
)
@given(
collection=collection_st,
recordset=recordset_st,
filters=st.lists(
strategies.filters(collection_st, recordset_st, include_all_ids=True),
min_size=1,
),
)
def test_filterable_metadata_query(
caplog,
api: API,
collection: strategies.Collection,
recordset: strategies.RecordSet,
filters,
):
caplog.set_level(logging.ERROR)
api.reset()
coll = api.create_collection(
name=collection.name,
metadata=collection.metadata,
embedding_function=collection.embedding_function,
)
coll.add(**recordset)
recordset = invariants.wrap_all(recordset)
total_count = len(recordset["ids"])
# Pick a random vector
if collection.has_embeddings:
random_query = recordset["embeddings"][random.randint(0, total_count - 1)]
else:
random_query = collection.embedding_function(
recordset["documents"][random.randint(0, total_count - 1)]
)
for filter in filters:
try:
result_ids = set(
coll.query(
query_embeddings=random_query,
n_results=total_count,
where=filter["where"],
where_document=filter["where_document"],
)["ids"][0]
)
except NoDatapointsException:
result_ids = set()
expected_ids = set(_filter_embedding_set(recordset, filter))
assert len(result_ids.intersection(expected_ids)) == len(result_ids)
| Ocean-master | oceandb/test/property/test_filtering.py |
from abc import ABC, abstractmethod
class Server(ABC):
@abstractmethod
def __init__(self, settings):
pass
| Ocean-master | oceandb/server/__init__.py |
import fastapi
from fastapi import FastAPI as _FastAPI
from fastapi.responses import JSONResponse
from fastapi.middleware.cors import CORSMiddleware
from fastapi.routing import APIRoute
from fastapi import HTTPException, status
from uuid import UUID
import oceandb
import oceandb.server
import oceandb.api
from oceandb.errors import (
OceanError,
InvalidUUIDError,
InvalidDimensionException,
)
from oceandb.server.fastapi.types import (
AddEmbedding,
DeleteEmbedding,
GetEmbedding,
QueryEmbedding,
RawSql, # Results,
CreateCollection,
UpdateCollection,
UpdateEmbedding,
)
from starlette.requests import Request
import logging
from oceandb.telemetry import ServerContext, Telemetry
logger = logging.getLogger(__name__)
def use_route_names_as_operation_ids(app: _FastAPI) -> None:
"""
Simplify operation IDs so that generated API clients have simpler function
names.
Should be called only after all routes have been added.
"""
for route in app.routes:
if isinstance(route, APIRoute):
route.operation_id = route.name
async def catch_exceptions_middleware(request: Request, call_next):
try:
return await call_next(request)
except OceanError as e:
return JSONResponse(
content={"error": e.name(), "message": e.message()}, status_code=e.code()
)
except Exception as e:
logger.exception(e)
return JSONResponse(content={"error": repr(e)}, status_code=500)
def _uuid(uuid_str: str):
try:
return UUID(uuid_str)
except ValueError:
raise InvalidUUIDError(f"Could not parse {uuid_str} as a UUID")
class FastAPI(oceandb.server.Server):
def __init__(self, settings):
super().__init__(settings)
Telemetry.SERVER_CONTEXT = ServerContext.FASTAPI
self._app = fastapi.FastAPI(debug=True)
self._api: oceandb.api.API = oceandb.Client(settings)
self._app.middleware("http")(catch_exceptions_middleware)
self._app.add_middleware(
CORSMiddleware,
allow_headers=["*"],
allow_origins=settings.ocean_server_cors_allow_origins,
allow_methods=["*"],
)
self.router = fastapi.APIRouter()
self.router.add_api_route("/api/v1", self.root, methods=["GET"])
self.router.add_api_route("/api/v1/reset", self.reset, methods=["POST"])
self.router.add_api_route("/api/v1/version", self.version, methods=["GET"])
self.router.add_api_route("/api/v1/heartbeat", self.heartbeat, methods=["GET"])
self.router.add_api_route("/api/v1/persist", self.persist, methods=["POST"])
self.router.add_api_route("/api/v1/raw_sql", self.raw_sql, methods=["POST"])
self.router.add_api_route(
"/api/v1/collections", self.list_collections, methods=["GET"]
)
self.router.add_api_route(
"/api/v1/collections", self.create_collection, methods=["POST"]
)
self.router.add_api_route(
"/api/v1/collections/{collection_id}/add",
self.add,
methods=["POST"],
status_code=status.HTTP_201_CREATED,
)
self.router.add_api_route(
"/api/v1/collections/{collection_id}/update", self.update, methods=["POST"]
)
self.router.add_api_route(
"/api/v1/collections/{collection_id}/upsert", self.upsert, methods=["POST"]
)
self.router.add_api_route(
"/api/v1/collections/{collection_id}/get", self.get, methods=["POST"]
)
self.router.add_api_route(
"/api/v1/collections/{collection_id}/delete", self.delete, methods=["POST"]
)
self.router.add_api_route(
"/api/v1/collections/{collection_id}/count", self.count, methods=["GET"]
)
self.router.add_api_route(
"/api/v1/collections/{collection_id}/query",
self.get_nearest_neighbors,
methods=["POST"],
)
self.router.add_api_route(
"/api/v1/collections/{collection_name}/create_index",
self.create_index,
methods=["POST"],
)
self.router.add_api_route(
"/api/v1/collections/{collection_name}",
self.get_collection,
methods=["GET"],
)
self.router.add_api_route(
"/api/v1/collections/{collection_id}",
self.update_collection,
methods=["PUT"],
)
self.router.add_api_route(
"/api/v1/collections/{collection_name}",
self.delete_collection,
methods=["DELETE"],
)
self._app.include_router(self.router)
use_route_names_as_operation_ids(self._app)
def app(self):
return self._app
def root(self):
return {"nanosecond heartbeat": self._api.heartbeat()}
def heartbeat(self):
return self.root()
def persist(self):
self._api.persist()
def version(self):
return self._api.get_version()
def list_collections(self):
return self._api.list_collections()
def create_collection(self, collection: CreateCollection):
return self._api.create_collection(
name=collection.name,
metadata=collection.metadata,
get_or_create=collection.get_or_create,
)
def get_collection(self, collection_name: str):
return self._api.get_collection(collection_name)
def update_collection(self, collection_id: str, collection: UpdateCollection):
return self._api._modify(
id=_uuid(collection_id),
new_name=collection.new_name,
new_metadata=collection.new_metadata,
)
def delete_collection(self, collection_name: str):
return self._api.delete_collection(collection_name)
def add(self, collection_id: str, add: AddEmbedding):
try:
result = self._api._add(
collection_id=_uuid(collection_id),
embeddings=add.embeddings,
metadatas=add.metadatas,
documents=add.documents,
ids=add.ids,
increment_index=add.increment_index,
)
except InvalidDimensionException as e:
raise HTTPException(status_code=500, detail=str(e))
return result
def update(self, collection_id: str, add: UpdateEmbedding):
return self._api._update(
ids=add.ids,
collection_id=_uuid(collection_id),
embeddings=add.embeddings,
documents=add.documents,
metadatas=add.metadatas,
)
def upsert(self, collection_id: str, upsert: AddEmbedding):
return self._api._upsert(
collection_id=_uuid(collection_id),
ids=upsert.ids,
embeddings=upsert.embeddings,
documents=upsert.documents,
metadatas=upsert.metadatas,
increment_index=upsert.increment_index,
)
def get(self, collection_id: str, get: GetEmbedding):
return self._api._get(
collection_id=_uuid(collection_id),
ids=get.ids,
where=get.where,
where_document=get.where_document,
sort=get.sort,
limit=get.limit,
offset=get.offset,
include=get.include,
)
def delete(self, collection_id: str, delete: DeleteEmbedding):
return self._api._delete(
where=delete.where,
ids=delete.ids,
collection_id=_uuid(collection_id),
where_document=delete.where_document,
)
def count(self, collection_id: str):
return self._api._count(_uuid(collection_id))
def reset(self):
return self._api.reset()
def get_nearest_neighbors(self, collection_id: str, query: QueryEmbedding):
nnresult = self._api._query(
collection_id=_uuid(collection_id),
where=query.where,
where_document=query.where_document,
query_embeddings=query.query_embeddings,
n_results=query.n_results,
include=query.include,
)
return nnresult
def raw_sql(self, raw_sql: RawSql):
return self._api.raw_sql(raw_sql.raw_sql)
def create_index(self, collection_name: str):
return self._api.create_index(collection_name)
| Ocean-master | oceandb/server/fastapi/__init__.py |
from pydantic import BaseModel
from typing import List, Union
from oceandb.api.types import Include
# type supports single and batch mode
class AddEmbedding(BaseModel):
embeddings: List
metadatas: Union[List, dict] = None
documents: Union[str, List] = None
ids: Union[str, List] = None
increment_index: bool = True
class UpdateEmbedding(BaseModel):
embeddings: List = None
metadatas: Union[List, dict] = None
documents: Union[str, List] = None
ids: Union[str, List] = None
increment_index: bool = True
class QueryEmbedding(BaseModel):
where: dict = {}
where_document: dict = {}
query_embeddings: List
n_results: int = 10
include: Include = ["metadatas", "documents", "distances"]
class ProcessEmbedding(BaseModel):
collection_name: str = None
training_dataset_name: str = None
unlabeled_dataset_name: str = None
class GetEmbedding(BaseModel):
ids: List = None
where: dict = None
where_document: dict = None
sort: str = None
limit: int = None
offset: int = None
include: Include = ["metadatas", "documents"]
class CountEmbedding(BaseModel):
collection_name: str = None
class RawSql(BaseModel):
raw_sql: str = None
class SpaceKeyInput(BaseModel):
collection_name: str
class DeleteEmbedding(BaseModel):
ids: List = None
where: dict = None
where_document: dict = None
class CreateCollection(BaseModel):
name: str
metadata: dict = None
get_or_create: bool = False
class UpdateCollection(BaseModel):
new_name: str = None
new_metadata: dict = None
| Ocean-master | oceandb/server/fastapi/types.py |
from oceandb.api.types import Documents, EmbeddingFunction, Embeddings
from typing import Optional
import torch
from .ImageBind.models import imagebind_model
from .ImageBind.models.imagebind_model import ModalityType
from .ImageBind.data import (
load_and_transform_text,
load_and_transform_vision_data,
load_and_transform_audio_data,
)
class SentenceTransformerEmbeddingFunction(EmbeddingFunction):
models = {}
# If you have a beefier machine, try "gtr-t5-large".
# for a full list of options: https://huggingface.co/sentence-transformers, https://www.sbert.net/docs/pretrained_models.html
def __init__(self, model_name: str = "all-MiniLM-L6-v2"):
if model_name not in self.models:
try:
from sentence_transformers import SentenceTransformer
except ImportError:
raise ValueError(
"The sentence_transformers python package is not installed. Please install it with `pip install sentence_transformers`"
)
self.models[model_name] = SentenceTransformer(model_name)
self._model = self.models[model_name]
def __call__(self, texts: Documents) -> Embeddings:
return self._model.encode(list(texts), convert_to_numpy=True).tolist()
#
class MultiModalEmbeddingFunction(EmbeddingFunction):
def __init__(
self,
modality: str = ModalityType, # type: ignore
model_path: str = "https://dl.fbaipublicfiles.com/imagebind/imagebind_huge.pth",
device: str = "cuda:0",
):
self._modality = modality
self.device = "cuda:0" if torch.cuda.is_available() else "cpu"
self._model = imagebind_model.imagebind_huge(pretrained=True)
self._model.eval()
self._model.to(self.device)
def __call__(self, *args: Documents) -> Embeddings:
if self._modality == ModalityType.TEXT:
inputs = {ModalityType.TEXT: load_and_transform_text(args[0], self.device)}
print("Inputs:", inputs)
elif self._modality == ModalityType.VISION:
inputs = {
ModalityType.VISION: load_and_transform_vision_data(
args[0], self.device
)
}
elif self._modality == ModalityType.AUDIO:
inputs = {
ModalityType.AUDIO: load_and_transform_audio_data(args[0], self.device)
}
else:
raise ValueError("Invalid modality specified")
with torch.no_grad():
embeddings = self._model(inputs)
print("Embeddings:", embeddings)
# Convert the embeddings tensor to a NumPy array and then to a list of lists (embeddings)
embeddings_array = embeddings[self._modality].cpu().numpy()
print("Embeddings array:", embeddings_array)
# embeddings_list = embeddings_array.tolist()
# return embeddings_list
return [embedding.tolist() for embedding in embeddings_array]
"""
text_embedding_function = MultiModalEmbeddingFunction(modality=ModalityType.TEXT)
vision_embedding_function = MultiModalEmbeddingFunction(modality=ModalityType.VISION)
audio_embedding_function = MultiModalEmbeddingFunction(modality=ModalityType.AUDIO)
"""
class OpenAIEmbeddingFunction(EmbeddingFunction):
def __init__(
self, api_key: Optional[str] = None, model_name: str = "text-embedding-ada-002"
):
try:
import openai
except ImportError:
raise ValueError(
"The openai python package is not installed. Please install it with `pip install openai`"
)
if api_key is not None:
openai.api_key = api_key
# If the api key is still not set, raise an error
elif openai.api_key is None:
raise ValueError(
"Please provide an OpenAI API key. You can get one at https://platform.openai.com/account/api-keys"
)
self._client = openai.Embedding
self._model_name = model_name
def __call__(self, texts: Documents) -> Embeddings:
# replace newlines, which can negatively affect performance.
texts = [t.replace("\n", " ") for t in texts]
# Call the OpenAI Embedding API
embeddings = self._client.create(input=texts, engine=self._model_name)["data"]
# Sort resulting embeddings by index
sorted_embeddings = sorted(embeddings, key=lambda e: e["index"])
# Return just the embeddings
return [result["embedding"] for result in sorted_embeddings]
class CohereEmbeddingFunction(EmbeddingFunction):
def __init__(self, api_key: str, model_name: str = "large"):
try:
import cohere
except ImportError:
raise ValueError(
"The cohere python package is not installed. Please install it with `pip install cohere`"
)
self._client = cohere.Client(api_key)
self._model_name = model_name
def __call__(self, texts: Documents) -> Embeddings:
# Call Cohere Embedding API for each document.
return [
embeddings
for embeddings in self._client.embed(texts=texts, model=self._model_name)
]
class HuggingFaceEmbeddingFunction(EmbeddingFunction):
def __init__(
self, api_key: str, model_name: str = "sentence-transformers/all-MiniLM-L6-v2"
):
try:
import requests
except ImportError:
raise ValueError(
"The requests python package is not installed. Please install it with `pip install requests`"
)
self._api_url = f"https://api-inference.huggingface.co/pipeline/feature-extraction/{model_name}"
self._session = requests.Session()
self._session.headers.update({"Authorization": f"Bearer {api_key}"})
def __call__(self, texts: Documents) -> Embeddings:
# Call HuggingFace Embedding API for each document
return self._session.post(
self._api_url, json={"inputs": texts, "options": {"wait_for_model": True}}
).json()
class InstructorEmbeddingFunction(EmbeddingFunction):
# If you have a GPU with at least 6GB try model_name = "hkunlp/instructor-xl" and device = "cuda"
# for a full list of options: https://github.com/HKUNLP/instructor-embedding#model-list
def __init__(self, model_name: str = "hkunlp/instructor-base", device="cpu"):
try:
from InstructorEmbedding import INSTRUCTOR
except ImportError:
raise ValueError(
"The InstructorEmbedding python package is not installed. Please install it with `pip install InstructorEmbedding`"
)
self._model = INSTRUCTOR(model_name, device=device)
def __call__(self, texts: Documents) -> Embeddings:
return self._model.encode(texts).tolist()
class GooglePalmEmbeddingFunction(EmbeddingFunction):
"""To use this EmbeddingFunction, you must have the google.generativeai Python package installed and have a PaLM API key."""
def __init__(self, api_key: str, model_name: str = "models/embedding-gecko-001"):
if not api_key:
raise ValueError("Please provide a PaLM API key.")
if not model_name:
raise ValueError("Please provide the model name.")
try:
import google.generativeai as palm
except ImportError:
raise ValueError(
"The Google Generative AI python package is not installed. Please install it with `pip install google-generativeai`"
)
palm.configure(api_key=api_key)
self._palm = palm
self._model_name = model_name
def __call__(self, texts: Documents) -> Embeddings:
return [
self._palm.generate_embeddings(model=self._model_name, text=text)[
"embedding"
]
for text in texts
]
| Ocean-master | oceandb/utils/embedding_functions.py |
Ocean-master | oceandb/utils/__init__.py |
|
#!/usr/bin/env python3
# Portions Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn as nn
import torchaudio
import logging
# from ..multimodal_preprocessors import SimpleTokenizer
from .models.multimodal_preprocessors import SimpleTokenizer
from PIL import Image
from pytorchvideo import transforms as pv_transforms
from pytorchvideo.data.clip_sampling import ConstantClipsPerVideoSampler
from pytorchvideo.data.encoded_video import EncodedVideo
from torchvision import transforms
from torchvision.transforms._transforms_video import NormalizeVideo
DEFAULT_AUDIO_FRAME_SHIFT_MS = 10 # in milliseconds
BPE_PATH = "oceandb/utils/ImageBind/bpe_simple_vocab_16e6.txt.gz"
def waveform2melspec(waveform, sample_rate, num_mel_bins, target_length):
# Based on https://github.com/YuanGongND/ast/blob/d7d8b4b8e06cdaeb6c843cdb38794c1c7692234c/src/dataloader.py#L102
waveform -= waveform.mean()
fbank = torchaudio.compliance.kaldi.fbank(
waveform,
htk_compat=True,
sample_frequency=sample_rate,
use_energy=False,
window_type="hanning",
num_mel_bins=num_mel_bins,
dither=0.0,
frame_length=25,
frame_shift=DEFAULT_AUDIO_FRAME_SHIFT_MS,
)
# Convert to [mel_bins, num_frames] shape
fbank = fbank.transpose(0, 1)
# Pad to target_length
n_frames = fbank.size(1)
p = target_length - n_frames
# if p is too large (say >20%), flash a warning
if abs(p) / n_frames > 0.2:
logging.warning(
"Large gap between audio n_frames(%d) and "
"target_length (%d). Is the audio_target_length "
"setting correct?",
n_frames,
target_length,
)
# cut and pad
if p > 0:
fbank = torch.nn.functional.pad(fbank, (0, p), mode="constant", value=0)
elif p < 0:
fbank = fbank[:, 0:target_length]
# Convert to [1, mel_bins, num_frames] shape, essentially like a 1
# channel image
fbank = fbank.unsqueeze(0)
return fbank
def get_clip_timepoints(clip_sampler, duration):
# Read out all clips in this video
all_clips_timepoints = []
is_last_clip = False
end = 0.0
while not is_last_clip:
start, end, _, _, is_last_clip = clip_sampler(end, duration, annotation=None)
all_clips_timepoints.append((start, end))
return all_clips_timepoints
def load_and_transform_vision_data(image_paths, device):
if image_paths is None:
return None
image_ouputs = []
for image_path in image_paths:
data_transform = transforms.Compose(
[
transforms.Resize(
224, interpolation=transforms.InterpolationMode.BICUBIC
),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711),
),
]
)
with open(image_path, "rb") as fopen:
image = Image.open(fopen).convert("RGB")
image = data_transform(image).to(device)
image_ouputs.append(image)
return torch.stack(image_ouputs, dim=0)
def load_and_transform_text(text, device):
if text is None:
return None
tokenizer = SimpleTokenizer(bpe_path=BPE_PATH)
tokens = [tokenizer(t).unsqueeze(0).to(device) for t in text]
tokens = torch.cat(tokens, dim=0)
return tokens
def load_and_transform_audio_data(
audio_paths,
device,
num_mel_bins=128,
target_length=204,
sample_rate=16000,
clip_duration=2,
clips_per_video=3,
mean=-4.268,
std=9.138,
):
if audio_paths is None:
return None
audio_outputs = []
clip_sampler = ConstantClipsPerVideoSampler(
clip_duration=clip_duration, clips_per_video=clips_per_video
)
for audio_path in audio_paths:
waveform, sr = torchaudio.load(audio_path)
if sample_rate != sr:
waveform = torchaudio.functional.resample(
waveform, orig_freq=sr, new_freq=sample_rate
)
all_clips_timepoints = get_clip_timepoints(
clip_sampler, waveform.size(1) / sample_rate
)
all_clips = []
for clip_timepoints in all_clips_timepoints:
waveform_clip = waveform[
:,
int(clip_timepoints[0] * sample_rate) : int(
clip_timepoints[1] * sample_rate
),
]
waveform_melspec = waveform2melspec(
waveform_clip, sample_rate, num_mel_bins, target_length
)
all_clips.append(waveform_melspec)
normalize = transforms.Normalize(mean=mean, std=std)
all_clips = [normalize(ac).to(device) for ac in all_clips]
all_clips = torch.stack(all_clips, dim=0)
audio_outputs.append(all_clips)
return torch.stack(audio_outputs, dim=0)
def get_clip_timepoints(clip_sampler, duration):
# Read out all clips in this video
all_clips_timepoints = []
is_last_clip = False
end = 0.0
while not is_last_clip:
start, end, _, _, is_last_clip = clip_sampler(end, duration, annotation=None)
all_clips_timepoints.append((start, end))
return all_clips_timepoints
def crop_boxes(boxes, x_offset, y_offset):
"""
Peform crop on the bounding boxes given the offsets.
Args:
boxes (ndarray or None): bounding boxes to peform crop. The dimension
is `num boxes` x 4.
x_offset (int): cropping offset in the x axis.
y_offset (int): cropping offset in the y axis.
Returns:
cropped_boxes (ndarray or None): the cropped boxes with dimension of
`num boxes` x 4.
"""
cropped_boxes = boxes.copy()
cropped_boxes[:, [0, 2]] = boxes[:, [0, 2]] - x_offset
cropped_boxes[:, [1, 3]] = boxes[:, [1, 3]] - y_offset
return cropped_boxes
def uniform_crop(images, size, spatial_idx, boxes=None, scale_size=None):
"""
Perform uniform spatial sampling on the images and corresponding boxes.
Args:
images (tensor): images to perform uniform crop. The dimension is
`num frames` x `channel` x `height` x `width`.
size (int): size of height and weight to crop the images.
spatial_idx (int): 0, 1, or 2 for left, center, and right crop if width
is larger than height. Or 0, 1, or 2 for top, center, and bottom
crop if height is larger than width.
boxes (ndarray or None): optional. Corresponding boxes to images.
Dimension is `num boxes` x 4.
scale_size (int): optinal. If not None, resize the images to scale_size before
performing any crop.
Returns:
cropped (tensor): images with dimension of
`num frames` x `channel` x `size` x `size`.
cropped_boxes (ndarray or None): the cropped boxes with dimension of
`num boxes` x 4.
"""
assert spatial_idx in [0, 1, 2]
ndim = len(images.shape)
if ndim == 3:
images = images.unsqueeze(0)
height = images.shape[2]
width = images.shape[3]
if scale_size is not None:
if width <= height:
width, height = scale_size, int(height / width * scale_size)
else:
width, height = int(width / height * scale_size), scale_size
images = torch.nn.functional.interpolate(
images,
size=(height, width),
mode="bilinear",
align_corners=False,
)
y_offset = int(math.ceil((height - size) / 2))
x_offset = int(math.ceil((width - size) / 2))
if height > width:
if spatial_idx == 0:
y_offset = 0
elif spatial_idx == 2:
y_offset = height - size
else:
if spatial_idx == 0:
x_offset = 0
elif spatial_idx == 2:
x_offset = width - size
cropped = images[:, :, y_offset : y_offset + size, x_offset : x_offset + size]
cropped_boxes = crop_boxes(boxes, x_offset, y_offset) if boxes is not None else None
if ndim == 3:
cropped = cropped.squeeze(0)
return cropped, cropped_boxes
class SpatialCrop(nn.Module):
"""
Convert the video into 3 smaller clips spatially. Must be used after the
temporal crops to get spatial crops, and should be used with
-2 in the spatial crop at the slowfast augmentation stage (so full
frames are passed in here). Will return a larger list with the
3x spatial crops as well.
"""
def __init__(self, crop_size: int = 224, num_crops: int = 3):
super().__init__()
self.crop_size = crop_size
if num_crops == 3:
self.crops_to_ext = [0, 1, 2]
self.flipped_crops_to_ext = []
elif num_crops == 1:
self.crops_to_ext = [1]
self.flipped_crops_to_ext = []
else:
raise NotImplementedError("Nothing else supported yet")
def forward(self, videos):
"""
Args:
videos: A list of C, T, H, W videos.
Returns:
videos: A list with 3x the number of elements. Each video converted
to C, T, H', W' by spatial cropping.
"""
assert isinstance(videos, list), "Must be a list of videos after temporal crops"
assert all([video.ndim == 4 for video in videos]), "Must be (C,T,H,W)"
res = []
for video in videos:
for spatial_idx in self.crops_to_ext:
res.append(uniform_crop(video, self.crop_size, spatial_idx)[0])
if not self.flipped_crops_to_ext:
continue
flipped_video = transforms.functional.hflip(video)
for spatial_idx in self.flipped_crops_to_ext:
res.append(uniform_crop(flipped_video, self.crop_size, spatial_idx)[0])
return res
def load_and_transform_video_data(
video_paths,
device,
clip_duration=2,
clips_per_video=5,
sample_rate=16000,
):
if video_paths is None:
return None
video_outputs = []
video_transform = transforms.Compose(
[
pv_transforms.ShortSideScale(224),
NormalizeVideo(
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711),
),
]
)
clip_sampler = ConstantClipsPerVideoSampler(
clip_duration=clip_duration, clips_per_video=clips_per_video
)
frame_sampler = pv_transforms.UniformTemporalSubsample(num_samples=clip_duration)
for video_path in video_paths:
video = EncodedVideo.from_path(
video_path,
decoder="decord",
decode_audio=False,
**{"sample_rate": sample_rate},
)
all_clips_timepoints = get_clip_timepoints(clip_sampler, video.duration)
all_video = []
for clip_timepoints in all_clips_timepoints:
# Read the clip, get frames
clip = video.get_clip(clip_timepoints[0], clip_timepoints[1])
if clip is None:
raise ValueError("No clip found")
video_clip = frame_sampler(clip["video"])
video_clip = video_clip / 255.0 # since this is float, need 0-1
all_video.append(video_clip)
all_video = [video_transform(clip) for clip in all_video]
all_video = SpatialCrop(224, num_crops=3)(all_video)
all_video = torch.stack(all_video, dim=0)
video_outputs.append(all_video)
return torch.stack(video_outputs, dim=0).to(device)
| Ocean-master | oceandb/utils/ImageBind/data.py |
Ocean-master | oceandb/utils/ImageBind/models/__init__.py |
|
#!/usr/bin/env python3
# Portions Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
from functools import partial
from types import SimpleNamespace
import torch
import torch.nn as nn
from .helpers import (
EinOpsRearrange,
LearnableLogitScaling,
Normalize,
SelectElement,
SelectEOSAndProject,
)
from .multimodal_preprocessors import (
AudioPreprocessor,
IMUPreprocessor,
PadIm2Video,
PatchEmbedGeneric,
RGBDTPreprocessor,
SpatioTemporalPosEmbeddingHelper,
TextPreprocessor,
ThermalPreprocessor,
)
from .transformer import MultiheadAttention, SimpleTransformer
ModalityType = SimpleNamespace(
VISION="vision",
TEXT="text",
AUDIO="audio",
THERMAL="thermal",
DEPTH="depth",
IMU="imu",
)
class ImageBindModel(nn.Module):
def __init__(
self,
video_frames=2,
kernel_size=(2, 14, 14),
audio_kernel_size=16,
audio_stride=10,
out_embed_dim=768,
vision_embed_dim=1024,
vision_num_blocks=24,
vision_num_heads=16,
audio_embed_dim=768,
audio_num_blocks=12,
audio_num_heads=12,
audio_num_mel_bins=128,
audio_target_len=204,
audio_drop_path=0.1,
text_embed_dim=768,
text_num_blocks=12,
text_num_heads=12,
depth_embed_dim=384,
depth_kernel_size=16,
depth_num_blocks=12,
depth_num_heads=8,
depth_drop_path=0.0,
thermal_embed_dim=768,
thermal_kernel_size=16,
thermal_num_blocks=12,
thermal_num_heads=12,
thermal_drop_path=0.0,
imu_embed_dim=512,
imu_kernel_size=8,
imu_num_blocks=6,
imu_num_heads=8,
imu_drop_path=0.7,
):
super().__init__()
self.modality_preprocessors = self._create_modality_preprocessors(
video_frames,
vision_embed_dim,
kernel_size,
text_embed_dim,
audio_embed_dim,
audio_kernel_size,
audio_stride,
audio_num_mel_bins,
audio_target_len,
depth_embed_dim,
depth_kernel_size,
thermal_embed_dim,
thermal_kernel_size,
imu_embed_dim,
)
self.modality_trunks = self._create_modality_trunks(
vision_embed_dim,
vision_num_blocks,
vision_num_heads,
text_embed_dim,
text_num_blocks,
text_num_heads,
audio_embed_dim,
audio_num_blocks,
audio_num_heads,
audio_drop_path,
depth_embed_dim,
depth_num_blocks,
depth_num_heads,
depth_drop_path,
thermal_embed_dim,
thermal_num_blocks,
thermal_num_heads,
thermal_drop_path,
imu_embed_dim,
imu_num_blocks,
imu_num_heads,
imu_drop_path,
)
self.modality_heads = self._create_modality_heads(
out_embed_dim,
vision_embed_dim,
text_embed_dim,
audio_embed_dim,
depth_embed_dim,
thermal_embed_dim,
imu_embed_dim,
)
self.modality_postprocessors = self._create_modality_postprocessors(
out_embed_dim
)
def _create_modality_preprocessors(
self,
video_frames=2,
vision_embed_dim=1024,
kernel_size=(2, 14, 14),
text_embed_dim=768,
audio_embed_dim=768,
audio_kernel_size=16,
audio_stride=10,
audio_num_mel_bins=128,
audio_target_len=204,
depth_embed_dim=768,
depth_kernel_size=16,
thermal_embed_dim=768,
thermal_kernel_size=16,
imu_embed_dim=512,
):
rgbt_stem = PatchEmbedGeneric(
proj_stem=[
PadIm2Video(pad_type="repeat", ntimes=2),
nn.Conv3d(
in_channels=3,
kernel_size=kernel_size,
out_channels=vision_embed_dim,
stride=kernel_size,
bias=False,
),
]
)
rgbt_preprocessor = RGBDTPreprocessor(
img_size=[3, video_frames, 224, 224],
num_cls_tokens=1,
pos_embed_fn=partial(SpatioTemporalPosEmbeddingHelper, learnable=True),
rgbt_stem=rgbt_stem,
depth_stem=None,
)
text_preprocessor = TextPreprocessor(
context_length=77,
vocab_size=49408,
embed_dim=text_embed_dim,
causal_masking=True,
)
audio_stem = PatchEmbedGeneric(
proj_stem=[
nn.Conv2d(
in_channels=1,
kernel_size=audio_kernel_size,
stride=audio_stride,
out_channels=audio_embed_dim,
bias=False,
),
],
norm_layer=nn.LayerNorm(normalized_shape=audio_embed_dim),
)
audio_preprocessor = AudioPreprocessor(
img_size=[1, audio_num_mel_bins, audio_target_len],
num_cls_tokens=1,
pos_embed_fn=partial(SpatioTemporalPosEmbeddingHelper, learnable=True),
audio_stem=audio_stem,
)
depth_stem = PatchEmbedGeneric(
[
nn.Conv2d(
kernel_size=depth_kernel_size,
in_channels=1,
out_channels=depth_embed_dim,
stride=depth_kernel_size,
bias=False,
),
],
norm_layer=nn.LayerNorm(normalized_shape=depth_embed_dim),
)
depth_preprocessor = RGBDTPreprocessor(
img_size=[1, 224, 224],
num_cls_tokens=1,
pos_embed_fn=partial(SpatioTemporalPosEmbeddingHelper, learnable=True),
rgbt_stem=None,
depth_stem=depth_stem,
)
thermal_stem = PatchEmbedGeneric(
[
nn.Conv2d(
kernel_size=thermal_kernel_size,
in_channels=1,
out_channels=thermal_embed_dim,
stride=thermal_kernel_size,
bias=False,
),
],
norm_layer=nn.LayerNorm(normalized_shape=thermal_embed_dim),
)
thermal_preprocessor = ThermalPreprocessor(
img_size=[1, 224, 224],
num_cls_tokens=1,
pos_embed_fn=partial(SpatioTemporalPosEmbeddingHelper, learnable=True),
thermal_stem=thermal_stem,
)
imu_stem = PatchEmbedGeneric(
[
nn.Linear(
in_features=48,
out_features=imu_embed_dim,
bias=False,
),
],
norm_layer=nn.LayerNorm(normalized_shape=imu_embed_dim),
)
imu_preprocessor = IMUPreprocessor(
img_size=[6, 2000],
num_cls_tokens=1,
kernel_size=8,
embed_dim=imu_embed_dim,
pos_embed_fn=partial(SpatioTemporalPosEmbeddingHelper, learnable=True),
imu_stem=imu_stem,
)
modality_preprocessors = {
ModalityType.VISION: rgbt_preprocessor,
ModalityType.TEXT: text_preprocessor,
ModalityType.AUDIO: audio_preprocessor,
ModalityType.DEPTH: depth_preprocessor,
ModalityType.THERMAL: thermal_preprocessor,
ModalityType.IMU: imu_preprocessor,
}
return nn.ModuleDict(modality_preprocessors)
def _create_modality_trunks(
self,
vision_embed_dim=1024,
vision_num_blocks=24,
vision_num_heads=16,
text_embed_dim=768,
text_num_blocks=12,
text_num_heads=12,
audio_embed_dim=768,
audio_num_blocks=12,
audio_num_heads=12,
audio_drop_path=0.0,
depth_embed_dim=768,
depth_num_blocks=12,
depth_num_heads=12,
depth_drop_path=0.0,
thermal_embed_dim=768,
thermal_num_blocks=12,
thermal_num_heads=12,
thermal_drop_path=0.0,
imu_embed_dim=512,
imu_num_blocks=6,
imu_num_heads=8,
imu_drop_path=0.7,
):
def instantiate_trunk(
embed_dim, num_blocks, num_heads, pre_transformer_ln, add_bias_kv, drop_path
):
return SimpleTransformer(
embed_dim=embed_dim,
num_blocks=num_blocks,
ffn_dropout_rate=0.0,
drop_path_rate=drop_path,
attn_target=partial(
MultiheadAttention,
embed_dim=embed_dim,
num_heads=num_heads,
bias=True,
add_bias_kv=add_bias_kv,
),
pre_transformer_layer=nn.Sequential(
nn.LayerNorm(embed_dim, eps=1e-6)
if pre_transformer_ln
else nn.Identity(),
EinOpsRearrange("b l d -> l b d"),
),
post_transformer_layer=EinOpsRearrange("l b d -> b l d"),
)
modality_trunks = {}
modality_trunks[ModalityType.VISION] = instantiate_trunk(
vision_embed_dim,
vision_num_blocks,
vision_num_heads,
pre_transformer_ln=True,
add_bias_kv=False,
drop_path=0.0,
)
modality_trunks[ModalityType.TEXT] = instantiate_trunk(
text_embed_dim,
text_num_blocks,
text_num_heads,
pre_transformer_ln=False,
add_bias_kv=False,
drop_path=0.0,
)
modality_trunks[ModalityType.AUDIO] = instantiate_trunk(
audio_embed_dim,
audio_num_blocks,
audio_num_heads,
pre_transformer_ln=False,
add_bias_kv=True,
drop_path=audio_drop_path,
)
modality_trunks[ModalityType.DEPTH] = instantiate_trunk(
depth_embed_dim,
depth_num_blocks,
depth_num_heads,
pre_transformer_ln=False,
add_bias_kv=True,
drop_path=depth_drop_path,
)
modality_trunks[ModalityType.THERMAL] = instantiate_trunk(
thermal_embed_dim,
thermal_num_blocks,
thermal_num_heads,
pre_transformer_ln=False,
add_bias_kv=True,
drop_path=thermal_drop_path,
)
modality_trunks[ModalityType.IMU] = instantiate_trunk(
imu_embed_dim,
imu_num_blocks,
imu_num_heads,
pre_transformer_ln=False,
add_bias_kv=True,
drop_path=imu_drop_path,
)
return nn.ModuleDict(modality_trunks)
def _create_modality_heads(
self,
out_embed_dim,
vision_embed_dim,
text_embed_dim,
audio_embed_dim,
depth_embed_dim,
thermal_embed_dim,
imu_embed_dim,
):
modality_heads = {}
modality_heads[ModalityType.VISION] = nn.Sequential(
nn.LayerNorm(normalized_shape=vision_embed_dim, eps=1e-6),
SelectElement(index=0),
nn.Linear(vision_embed_dim, out_embed_dim, bias=False),
)
modality_heads[ModalityType.TEXT] = SelectEOSAndProject(
proj=nn.Sequential(
nn.LayerNorm(normalized_shape=text_embed_dim, eps=1e-6),
nn.Linear(text_embed_dim, out_embed_dim, bias=False),
)
)
modality_heads[ModalityType.AUDIO] = nn.Sequential(
nn.LayerNorm(normalized_shape=audio_embed_dim, eps=1e-6),
SelectElement(index=0),
nn.Linear(audio_embed_dim, out_embed_dim, bias=False),
)
modality_heads[ModalityType.DEPTH] = nn.Sequential(
nn.LayerNorm(normalized_shape=depth_embed_dim, eps=1e-6),
SelectElement(index=0),
nn.Linear(depth_embed_dim, out_embed_dim, bias=False),
)
modality_heads[ModalityType.THERMAL] = nn.Sequential(
nn.LayerNorm(normalized_shape=thermal_embed_dim, eps=1e-6),
SelectElement(index=0),
nn.Linear(thermal_embed_dim, out_embed_dim, bias=False),
)
modality_heads[ModalityType.IMU] = nn.Sequential(
nn.LayerNorm(normalized_shape=imu_embed_dim, eps=1e-6),
SelectElement(index=0),
nn.Dropout(p=0.5),
nn.Linear(imu_embed_dim, out_embed_dim, bias=False),
)
return nn.ModuleDict(modality_heads)
def _create_modality_postprocessors(self, out_embed_dim):
modality_postprocessors = {}
modality_postprocessors[ModalityType.VISION] = Normalize(dim=-1)
modality_postprocessors[ModalityType.TEXT] = nn.Sequential(
Normalize(dim=-1), LearnableLogitScaling(learnable=True)
)
modality_postprocessors[ModalityType.AUDIO] = nn.Sequential(
Normalize(dim=-1),
LearnableLogitScaling(logit_scale_init=20.0, learnable=False),
)
modality_postprocessors[ModalityType.DEPTH] = nn.Sequential(
Normalize(dim=-1),
LearnableLogitScaling(logit_scale_init=5.0, learnable=False),
)
modality_postprocessors[ModalityType.THERMAL] = nn.Sequential(
Normalize(dim=-1),
LearnableLogitScaling(logit_scale_init=10.0, learnable=False),
)
modality_postprocessors[ModalityType.IMU] = nn.Sequential(
Normalize(dim=-1),
LearnableLogitScaling(logit_scale_init=5.0, learnable=False),
)
return nn.ModuleDict(modality_postprocessors)
def forward(self, inputs):
outputs = {}
for modality_key, modality_value in inputs.items():
reduce_list = (
modality_value.ndim >= 5
) # Audio and Video inputs consist of multiple clips
if reduce_list:
B, S = modality_value.shape[:2]
modality_value = modality_value.reshape(
B * S, *modality_value.shape[2:]
)
if modality_value is not None:
modality_value = self.modality_preprocessors[modality_key](
**{modality_key: modality_value}
)
trunk_inputs = modality_value["trunk"]
head_inputs = modality_value["head"]
modality_value = self.modality_trunks[modality_key](**trunk_inputs)
modality_value = self.modality_heads[modality_key](
modality_value, **head_inputs
)
modality_value = self.modality_postprocessors[modality_key](
modality_value
)
if reduce_list:
modality_value = modality_value.reshape(B, S, -1)
modality_value = modality_value.mean(dim=1)
outputs[modality_key] = modality_value
return outputs
def imagebind_huge(pretrained=False):
model = ImageBindModel(
vision_embed_dim=1280,
vision_num_blocks=32,
vision_num_heads=16,
text_embed_dim=1024,
text_num_blocks=24,
text_num_heads=16,
out_embed_dim=1024,
audio_drop_path=0.1,
imu_drop_path=0.7,
)
if pretrained:
if not os.path.exists(".checkpoints/imagebind_huge.pth"):
print(
"Downloading imagebind weights to .checkpoints/imagebind_huge.pth ..."
)
os.makedirs(".checkpoints", exist_ok=True)
torch.hub.download_url_to_file(
"https://dl.fbaipublicfiles.com/imagebind/imagebind_huge.pth",
".checkpoints/imagebind_huge.pth",
progress=True,
)
model.load_state_dict(torch.load(".checkpoints/imagebind_huge.pth"))
return model
| Ocean-master | oceandb/utils/ImageBind/models/imagebind_model.py |
#!/usr/bin/env python3
# Portions Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# Code modified from
# https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py ;
# https://github.com/facebookresearch/deit/blob/main/models.py
# and https://github.com/facebookresearch/vissl/blob/main/vissl/models/trunks/vision_transformer.py
from functools import partial
from typing import Callable, List
import torch
import torch.nn as nn
import torch.utils.checkpoint as checkpoint
from timm.models.layers import DropPath, trunc_normal_
class Attention(nn.Module):
def __init__(
self,
dim,
num_heads=8,
qkv_bias=False,
qk_scale=None,
attn_drop=0.0,
proj_drop=0.0,
):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
# NOTE scale factor was wrong in my original version,
# can set manually to be compat with prev weights
self.scale = qk_scale or head_dim**-0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = (
self.qkv(x)
.reshape(B, N, 3, self.num_heads, C // self.num_heads)
.permute(2, 0, 3, 1, 4)
)
q, k, v = (
qkv[0],
qkv[1],
qkv[2],
) # make torchscript happy (cannot use tensor as tuple)
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Mlp(nn.Module):
def __init__(
self,
in_features,
hidden_features=None,
out_features=None,
act_layer=nn.GELU,
drop=0.0,
):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class MultiheadAttention(nn.MultiheadAttention):
def forward(self, x: torch.Tensor, attn_mask: torch.Tensor):
return super().forward(x, x, x, need_weights=False, attn_mask=attn_mask)[0]
class ViTAttention(Attention):
def forward(self, x: torch.Tensor, attn_mask: torch.Tensor):
assert attn_mask is None
return super().forward(x)
class BlockWithMasking(nn.Module):
def __init__(
self,
dim: int,
attn_target: Callable,
mlp_ratio: int = 4,
act_layer: Callable = nn.GELU,
norm_layer: Callable = nn.LayerNorm,
ffn_dropout_rate: float = 0.0,
drop_path: float = 0.0,
layer_scale_type: str = None,
layer_scale_init_value: float = 1e-4,
):
super().__init__()
assert not isinstance(
attn_target, nn.Module
), "attn_target should be a Callable. Otherwise attn_target is shared across blocks!"
self.attn = attn_target()
if drop_path > 0.0:
self.drop_path = DropPath(drop_path)
else:
self.drop_path = nn.Identity()
self.norm_1 = norm_layer(dim)
mlp_hidden_dim = int(mlp_ratio * dim)
self.mlp = Mlp(
in_features=dim,
hidden_features=mlp_hidden_dim,
act_layer=act_layer,
drop=ffn_dropout_rate,
)
self.norm_2 = norm_layer(dim)
self.layer_scale_type = layer_scale_type
if self.layer_scale_type is not None:
assert self.layer_scale_type in [
"per_channel",
"scalar",
], f"Found Layer scale type {self.layer_scale_type}"
if self.layer_scale_type == "per_channel":
# one gamma value per channel
gamma_shape = [1, 1, dim]
elif self.layer_scale_type == "scalar":
# single gamma value for all channels
gamma_shape = [1, 1, 1]
# two gammas: for each part of the fwd in the encoder
self.layer_scale_gamma1 = nn.Parameter(
torch.ones(size=gamma_shape) * layer_scale_init_value,
requires_grad=True,
)
self.layer_scale_gamma2 = nn.Parameter(
torch.ones(size=gamma_shape) * layer_scale_init_value,
requires_grad=True,
)
def forward(self, x: torch.Tensor, attn_mask: torch.Tensor):
if self.layer_scale_type is None:
x = x + self.drop_path(self.attn(self.norm_1(x), attn_mask))
x = x + self.drop_path(self.mlp(self.norm_2(x)))
else:
x = (
x
+ self.drop_path(self.attn(self.norm_1(x), attn_mask))
* self.layer_scale_gamma1
)
x = x + self.drop_path(self.mlp(self.norm_2(x))) * self.layer_scale_gamma2
return x
_LAYER_NORM = partial(nn.LayerNorm, eps=1e-6)
class SimpleTransformer(nn.Module):
def __init__(
self,
attn_target: Callable,
embed_dim: int,
num_blocks: int,
block: Callable = BlockWithMasking,
pre_transformer_layer: Callable = None,
post_transformer_layer: Callable = None,
drop_path_rate: float = 0.0,
drop_path_type: str = "progressive",
norm_layer: Callable = _LAYER_NORM,
mlp_ratio: int = 4,
ffn_dropout_rate: float = 0.0,
layer_scale_type: str = None, # from cait; possible values are None, "per_channel", "scalar"
layer_scale_init_value: float = 1e-4, # from cait; float
weight_init_style: str = "jax", # possible values jax or pytorch
):
"""
Simple Transformer with the following features
1. Supports masked attention
2. Supports DropPath
3. Supports LayerScale
4. Supports Dropout in Attention and FFN
5. Makes few assumptions about the input except that it is a Tensor
"""
super().__init__()
self.pre_transformer_layer = pre_transformer_layer
if drop_path_type == "progressive":
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, num_blocks)]
elif drop_path_type == "uniform":
dpr = [drop_path_rate for i in range(num_blocks)]
else:
raise ValueError(f"Unknown drop_path_type: {drop_path_type}")
self.blocks = nn.Sequential(
*[
block(
dim=embed_dim,
attn_target=attn_target,
mlp_ratio=mlp_ratio,
ffn_dropout_rate=ffn_dropout_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
layer_scale_type=layer_scale_type,
layer_scale_init_value=layer_scale_init_value,
)
for i in range(num_blocks)
]
)
self.post_transformer_layer = post_transformer_layer
self.weight_init_style = weight_init_style
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
if self.weight_init_style == "jax":
# Based on MAE and official Jax ViT implementation
torch.nn.init.xavier_uniform_(m.weight)
elif self.weight_init_style == "pytorch":
# PyTorch ViT uses trunc_normal_
trunc_normal_(m.weight, std=0.02)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, (nn.LayerNorm)):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def forward(
self,
tokens: torch.Tensor,
attn_mask: torch.Tensor = None,
use_checkpoint: bool = False,
checkpoint_every_n: int = 1,
checkpoint_blk_ids: List[int] = None,
):
"""
Inputs
- tokens: data of shape N x L x D (or L x N x D depending on the attention implementation)
- attn: mask of shape L x L
Output
- x: data of shape N x L x D (or L x N x D depending on the attention implementation)
"""
if self.pre_transformer_layer:
tokens = self.pre_transformer_layer(tokens)
if use_checkpoint and checkpoint_blk_ids is None:
checkpoint_blk_ids = [
blk_id
for blk_id in range(len(self.blocks))
if blk_id % checkpoint_every_n == 0
]
if checkpoint_blk_ids:
checkpoint_blk_ids = set(checkpoint_blk_ids)
for blk_id, blk in enumerate(self.blocks):
if use_checkpoint and blk_id in checkpoint_blk_ids:
tokens = checkpoint.checkpoint(
blk, tokens, attn_mask, use_reentrant=False
)
else:
tokens = blk(tokens, attn_mask=attn_mask)
if self.post_transformer_layer:
tokens = self.post_transformer_layer(tokens)
return tokens
| Ocean-master | oceandb/utils/ImageBind/models/transformer.py |
#!/usr/bin/env python3
# Portions Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import gzip
import html
import io
import math
from functools import lru_cache
from typing import Callable, List, Optional
import ftfy
import numpy as np
import regex as re
import torch
import torch.nn as nn
from iopath.common.file_io import g_pathmgr
from timm.models.layers import trunc_normal_
from .helpers import cast_if_src_dtype, VerboseNNModule
def get_sinusoid_encoding_table(n_position, d_hid):
"""Sinusoid position encoding table"""
# TODO: make it with torch instead of numpy
def get_position_angle_vec(position):
return [
position / np.power(10000, 2 * (hid_j // 2) / d_hid)
for hid_j in range(d_hid)
]
sinusoid_table = np.array(
[get_position_angle_vec(pos_i) for pos_i in range(n_position)]
)
sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i
sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1
return torch.FloatTensor(sinusoid_table).unsqueeze(0)
def interpolate_pos_encoding_2d(target_spatial_size, pos_embed):
N = pos_embed.shape[1]
if N == target_spatial_size:
return pos_embed
dim = pos_embed.shape[-1]
# nn.functional.interpolate doesn't work with bfloat16 so we cast to float32
pos_embed, updated = cast_if_src_dtype(pos_embed, torch.bfloat16, torch.float32)
pos_embed = nn.functional.interpolate(
pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(
0, 3, 1, 2
),
scale_factor=math.sqrt(target_spatial_size / N),
mode="bicubic",
)
if updated:
pos_embed, _ = cast_if_src_dtype(pos_embed, torch.float32, torch.bfloat16)
pos_embed = pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return pos_embed
def interpolate_pos_encoding(
npatch_per_img,
pos_embed,
patches_layout,
input_shape=None,
first_patch_idx=1,
):
assert first_patch_idx == 0 or first_patch_idx == 1, "there is 1 CLS token or none"
N = pos_embed.shape[1] - first_patch_idx # since it's 1 if cls_token exists
if npatch_per_img == N:
return pos_embed
assert (
patches_layout[-1] == patches_layout[-2]
), "Interpolation of pos embed not supported for non-square layouts"
class_emb = pos_embed[:, :first_patch_idx]
pos_embed = pos_embed[:, first_patch_idx:]
if input_shape is None or patches_layout[0] == 1:
# simple 2D pos embedding, no temporal component
pos_embed = interpolate_pos_encoding_2d(npatch_per_img, pos_embed)
elif patches_layout[0] > 1:
# pos embed has a temporal component
assert len(input_shape) == 4, "temporal interpolation not supported"
# we only support 2D interpolation in this case
num_frames = patches_layout[0]
num_spatial_tokens = patches_layout[1] * patches_layout[2]
pos_embed = pos_embed.view(1, num_frames, num_spatial_tokens, -1)
# interpolate embedding for zeroth frame
pos_embed = interpolate_pos_encoding_2d(
npatch_per_img, pos_embed[0, 0, ...].unsqueeze(0)
)
else:
raise ValueError("This type of interpolation isn't implemented")
return torch.cat((class_emb, pos_embed), dim=1)
def _get_pos_embedding(
npatch_per_img,
pos_embed,
patches_layout,
input_shape,
first_patch_idx=1,
):
pos_embed = interpolate_pos_encoding(
npatch_per_img,
pos_embed,
patches_layout,
input_shape=input_shape,
first_patch_idx=first_patch_idx,
)
return pos_embed
class PatchEmbedGeneric(nn.Module):
"""
PatchEmbed from Hydra
"""
def __init__(self, proj_stem, norm_layer: Optional[nn.Module] = None):
super().__init__()
if len(proj_stem) > 1:
self.proj = nn.Sequential(*proj_stem)
else:
# Special case to be able to load pre-trained models that were
# trained with a standard stem
self.proj = proj_stem[0]
self.norm_layer = norm_layer
def get_patch_layout(self, img_size):
with torch.no_grad():
dummy_img = torch.zeros(
[
1,
]
+ img_size
)
dummy_out = self.proj(dummy_img)
embed_dim = dummy_out.shape[1]
patches_layout = tuple(dummy_out.shape[2:])
num_patches = np.prod(patches_layout)
return patches_layout, num_patches, embed_dim
def forward(self, x):
x = self.proj(x)
# B C (T) H W -> B (T)HW C
x = x.flatten(2).transpose(1, 2)
if self.norm_layer is not None:
x = self.norm_layer(x)
return x
class SpatioTemporalPosEmbeddingHelper(VerboseNNModule):
def __init__(
self,
patches_layout: List,
num_patches: int,
num_cls_tokens: int,
embed_dim: int,
learnable: bool,
) -> None:
super().__init__()
self.num_cls_tokens = num_cls_tokens
self.patches_layout = patches_layout
self.num_patches = num_patches
self.num_tokens = num_cls_tokens + num_patches
self.learnable = learnable
if self.learnable:
self.pos_embed = nn.Parameter(torch.zeros(1, self.num_tokens, embed_dim))
trunc_normal_(self.pos_embed, std=0.02)
else:
self.register_buffer(
"pos_embed", get_sinusoid_encoding_table(self.num_tokens, embed_dim)
)
def get_pos_embedding(self, vision_input, all_vision_tokens):
input_shape = vision_input.shape
pos_embed = _get_pos_embedding(
all_vision_tokens.size(1) - self.num_cls_tokens,
pos_embed=self.pos_embed,
patches_layout=self.patches_layout,
input_shape=input_shape,
first_patch_idx=self.num_cls_tokens,
)
return pos_embed
class RGBDTPreprocessor(VerboseNNModule):
def __init__(
self,
rgbt_stem: PatchEmbedGeneric,
depth_stem: PatchEmbedGeneric,
img_size: List = (3, 224, 224),
num_cls_tokens: int = 1,
pos_embed_fn: Callable = None,
use_type_embed: bool = False,
init_param_style: str = "openclip",
) -> None:
super().__init__()
stem = rgbt_stem if rgbt_stem is not None else depth_stem
(
self.patches_layout,
self.num_patches,
self.embed_dim,
) = stem.get_patch_layout(img_size)
self.rgbt_stem = rgbt_stem
self.depth_stem = depth_stem
self.use_pos_embed = pos_embed_fn is not None
self.use_type_embed = use_type_embed
self.num_cls_tokens = num_cls_tokens
if self.use_pos_embed:
self.pos_embedding_helper = pos_embed_fn(
patches_layout=self.patches_layout,
num_cls_tokens=num_cls_tokens,
num_patches=self.num_patches,
embed_dim=self.embed_dim,
)
if self.num_cls_tokens > 0:
self.cls_token = nn.Parameter(
torch.zeros(1, self.num_cls_tokens, self.embed_dim)
)
if self.use_type_embed:
self.type_embed = nn.Parameter(torch.zeros(1, 1, self.embed_dim))
self.init_parameters(init_param_style)
@torch.no_grad()
def init_parameters(self, init_param_style):
if init_param_style == "openclip":
# OpenCLIP style initialization
scale = self.embed_dim**-0.5
if self.use_pos_embed:
nn.init.normal_(self.pos_embedding_helper.pos_embed)
self.pos_embedding_helper.pos_embed *= scale
if self.num_cls_tokens > 0:
nn.init.normal_(self.cls_token)
self.cls_token *= scale
elif init_param_style == "vit":
self.cls_token.data.fill_(0)
else:
raise ValueError(f"Unknown init {init_param_style}")
if self.use_type_embed:
nn.init.normal_(self.type_embed)
def tokenize_input_and_cls_pos(self, input, stem, mask):
# tokens is of shape B x L x D
tokens = stem(input)
assert tokens.ndim == 3
assert tokens.shape[2] == self.embed_dim
B = tokens.shape[0]
if self.num_cls_tokens > 0:
class_tokens = self.cls_token.expand(
B, -1, -1
) # stole class_tokens impl from Phil Wang, thanks
tokens = torch.cat((class_tokens, tokens), dim=1)
if self.use_pos_embed:
pos_embed = self.pos_embedding_helper.get_pos_embedding(input, tokens)
tokens = tokens + pos_embed
if self.use_type_embed:
tokens = tokens + self.type_embed.expand(B, -1, -1)
return tokens
def forward(self, vision=None, depth=None, patch_mask=None):
if patch_mask is not None:
raise NotImplementedError()
if vision is not None:
vision_tokens = self.tokenize_input_and_cls_pos(
vision, self.rgbt_stem, patch_mask
)
if depth is not None:
depth_tokens = self.tokenize_input_and_cls_pos(
depth, self.depth_stem, patch_mask
)
# aggregate tokens
if vision is not None and depth is not None:
final_tokens = vision_tokens + depth_tokens
else:
final_tokens = vision_tokens if vision is not None else depth_tokens
return_dict = {
"trunk": {
"tokens": final_tokens,
},
"head": {},
}
return return_dict
class AudioPreprocessor(RGBDTPreprocessor):
def __init__(self, audio_stem: PatchEmbedGeneric, **kwargs) -> None:
super().__init__(rgbt_stem=audio_stem, depth_stem=None, **kwargs)
def forward(self, audio=None):
return super().forward(vision=audio)
class ThermalPreprocessor(RGBDTPreprocessor):
def __init__(self, thermal_stem: PatchEmbedGeneric, **kwargs) -> None:
super().__init__(rgbt_stem=thermal_stem, depth_stem=None, **kwargs)
def forward(self, thermal=None):
return super().forward(vision=thermal)
def build_causal_attention_mask(context_length):
# lazily create causal attention mask, with full attention between the vision tokens
# pytorch uses additive attention mask; fill with -inf
mask = torch.empty(context_length, context_length, requires_grad=False)
mask.fill_(float("-inf"))
mask.triu_(1) # zero out the lower diagonal
return mask
class TextPreprocessor(VerboseNNModule):
def __init__(
self,
vocab_size: int,
context_length: int,
embed_dim: int,
causal_masking: bool,
supply_seq_len_to_head: bool = True,
num_cls_tokens: int = 0,
init_param_style: str = "openclip",
) -> None:
super().__init__()
self.vocab_size = vocab_size
self.context_length = context_length
self.token_embedding = nn.Embedding(vocab_size, embed_dim)
self.pos_embed = nn.Parameter(
torch.empty(1, self.context_length + num_cls_tokens, embed_dim)
)
self.causal_masking = causal_masking
if self.causal_masking:
mask = build_causal_attention_mask(self.context_length)
# register the mask as a buffer so it can be moved to the right device
self.register_buffer("mask", mask)
self.supply_seq_len_to_head = supply_seq_len_to_head
self.num_cls_tokens = num_cls_tokens
self.embed_dim = embed_dim
if num_cls_tokens > 0:
assert self.causal_masking is False, "Masking + CLS token isn't implemented"
self.cls_token = nn.Parameter(
torch.zeros(1, self.num_cls_tokens, embed_dim)
)
self.init_parameters(init_param_style)
@torch.no_grad()
def init_parameters(self, init_param_style="openclip"):
# OpenCLIP style initialization
nn.init.normal_(self.token_embedding.weight, std=0.02)
nn.init.normal_(self.pos_embed, std=0.01)
if init_param_style == "openclip":
# OpenCLIP style initialization
scale = self.embed_dim**-0.5
if self.num_cls_tokens > 0:
nn.init.normal_(self.cls_token)
self.cls_token *= scale
elif init_param_style == "vit":
self.cls_token.data.fill_(0)
else:
raise ValueError(f"Unknown init {init_param_style}")
def forward(self, text):
# text tokens are of shape B x L x D
text_tokens = self.token_embedding(text)
# concat CLS tokens if any
if self.num_cls_tokens > 0:
B = text_tokens.shape[0]
class_tokens = self.cls_token.expand(
B, -1, -1
) # stole class_tokens impl from Phil Wang, thanks
text_tokens = torch.cat((class_tokens, text_tokens), dim=1)
text_tokens = text_tokens + self.pos_embed
return_dict = {
"trunk": {
"tokens": text_tokens,
},
"head": {},
}
# Compute sequence length after adding CLS tokens
if self.supply_seq_len_to_head:
text_lengths = text.argmax(dim=-1)
return_dict["head"] = {
"seq_len": text_lengths,
}
if self.causal_masking:
return_dict["trunk"].update({"attn_mask": self.mask})
return return_dict
class Im2Video(nn.Module):
"""Convert an image into a trivial video."""
def __init__(self, time_dim=2):
super().__init__()
self.time_dim = time_dim
def forward(self, x):
if x.ndim == 4:
# B, C, H, W -> B, C, T, H, W
return x.unsqueeze(self.time_dim)
elif x.ndim == 5:
return x
else:
raise ValueError(f"Dimension incorrect {x.shape}")
class PadIm2Video(Im2Video):
def __init__(self, ntimes, pad_type, time_dim=2):
super().__init__(time_dim=time_dim)
assert ntimes > 0
assert pad_type in ["zero", "repeat"]
self.ntimes = ntimes
self.pad_type = pad_type
def forward(self, x):
x = super().forward(x)
if x.shape[self.time_dim] == 1:
if self.pad_type == "repeat":
new_shape = [1] * len(x.shape)
new_shape[self.time_dim] = self.ntimes
x = x.repeat(new_shape)
elif self.pad_type == "zero":
padarg = [0, 0] * len(x.shape)
padarg[2 * self.time_dim + 1] = self.ntimes - x.shape[self.time_dim]
x = nn.functional.pad(x, padarg)
return x
# Modified from github.com/openai/CLIP
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = (
list(range(ord("!"), ord("~") + 1))
+ list(range(ord("¡"), ord("¬") + 1))
+ list(range(ord("®"), ord("ÿ") + 1))
)
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8 + n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
def whitespace_clean(text):
text = re.sub(r"\s+", " ", text)
text = text.strip()
return text
class SimpleTokenizer(object):
def __init__(self, bpe_path: str, context_length=77):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
with g_pathmgr.open(bpe_path, "rb") as fh:
bpe_bytes = io.BytesIO(fh.read())
merges = gzip.open(bpe_bytes).read().decode("utf-8").split("\n")
merges = merges[1 : 49152 - 256 - 2 + 1]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = vocab + [v + "</w>" for v in vocab]
for merge in merges:
vocab.append("".join(merge))
vocab.extend(["<|startoftext|>", "<|endoftext|>"])
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for k, v in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {
"<|startoftext|>": "<|startoftext|>",
"<|endoftext|>": "<|endoftext|>",
}
self.pat = re.compile(
r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""",
re.IGNORECASE,
)
self.context_length = context_length
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + (token[-1] + "</w>",)
pairs = get_pairs(word)
if not pairs:
return token + "</w>"
while True:
bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = " ".join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = "".join(self.byte_encoder[b] for b in token.encode("utf-8"))
bpe_tokens.extend(
self.encoder[bpe_token] for bpe_token in self.bpe(token).split(" ")
)
return bpe_tokens
def decode(self, tokens):
text = "".join([self.decoder[token] for token in tokens])
text = (
bytearray([self.byte_decoder[c] for c in text])
.decode("utf-8", errors="replace")
.replace("</w>", " ")
)
return text
def __call__(self, texts, context_length=None):
if not context_length:
context_length = self.context_length
if isinstance(texts, str):
texts = [texts]
sot_token = self.encoder["<|startoftext|>"]
eot_token = self.encoder["<|endoftext|>"]
all_tokens = [[sot_token] + self.encode(text) + [eot_token] for text in texts]
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
for i, tokens in enumerate(all_tokens):
tokens = tokens[:context_length]
result[i, : len(tokens)] = torch.tensor(tokens)
if len(result) == 1:
return result[0]
return result
class IMUPreprocessor(VerboseNNModule):
def __init__(
self,
kernel_size: int,
imu_stem: PatchEmbedGeneric,
embed_dim: int,
img_size: List = (6, 2000),
num_cls_tokens: int = 1,
pos_embed_fn: Callable = None,
init_param_style: str = "openclip",
) -> None:
super().__init__()
self.imu_stem = imu_stem
self.embed_dim = embed_dim
self.use_pos_embed = pos_embed_fn is not None
self.num_cls_tokens = num_cls_tokens
self.kernel_size = kernel_size
self.pos_embed = nn.Parameter(
torch.empty(1, (img_size[1] // kernel_size) + num_cls_tokens, embed_dim)
)
if self.num_cls_tokens > 0:
self.cls_token = nn.Parameter(
torch.zeros(1, self.num_cls_tokens, self.embed_dim)
)
self.init_parameters(init_param_style)
@torch.no_grad()
def init_parameters(self, init_param_style):
nn.init.normal_(self.pos_embed, std=0.01)
if init_param_style == "openclip":
# OpenCLIP style initialization
scale = self.embed_dim**-0.5
if self.num_cls_tokens > 0:
nn.init.normal_(self.cls_token)
self.cls_token *= scale
elif init_param_style == "vit":
self.cls_token.data.fill_(0)
else:
raise ValueError(f"Unknown init {init_param_style}")
def tokenize_input_and_cls_pos(self, input, stem):
# tokens is of shape B x L x D
tokens = stem.norm_layer(stem.proj(input))
assert tokens.ndim == 3
assert tokens.shape[2] == self.embed_dim
B = tokens.shape[0]
if self.num_cls_tokens > 0:
class_tokens = self.cls_token.expand(
B, -1, -1
) # stole class_tokens impl from Phil Wang, thanks
tokens = torch.cat((class_tokens, tokens), dim=1)
if self.use_pos_embed:
tokens = tokens + self.pos_embed
return tokens
def forward(self, imu):
# Patchify
imu = imu.unfold(
-1,
self.kernel_size,
self.kernel_size,
).permute(0, 2, 1, 3)
imu = imu.reshape(imu.size(0), imu.size(1), -1)
imu_tokens = self.tokenize_input_and_cls_pos(
imu,
self.imu_stem,
)
return_dict = {
"trunk": {
"tokens": imu_tokens,
},
"head": {},
}
return return_dict
| Ocean-master | oceandb/utils/ImageBind/models/multimodal_preprocessors.py |
#!/usr/bin/env python3
# Portions Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import einops
import numpy as np
import torch
import torch.nn as nn
class Normalize(nn.Module):
def __init__(self, dim: int) -> None:
super().__init__()
self.dim = dim
def forward(self, x):
return torch.nn.functional.normalize(x, dim=self.dim, p=2)
class LearnableLogitScaling(nn.Module):
def __init__(
self,
logit_scale_init: float = 1 / 0.07,
learnable: bool = True,
max_logit_scale: float = 100,
) -> None:
super().__init__()
self.max_logit_scale = max_logit_scale
self.logit_scale_init = logit_scale_init
self.learnable = learnable
log_logit_scale = torch.ones([]) * np.log(self.logit_scale_init)
if learnable:
self.log_logit_scale = nn.Parameter(log_logit_scale)
else:
self.register_buffer("log_logit_scale", log_logit_scale)
def forward(self, x):
return torch.clip(self.log_logit_scale.exp(), max=self.max_logit_scale) * x
def extra_repr(self):
st = f"logit_scale_init={self.logit_scale_init},learnable={self.learnable}, max_logit_scale={self.max_logit_scale}"
return st
class EinOpsRearrange(nn.Module):
def __init__(self, rearrange_expr: str, **kwargs) -> None:
super().__init__()
self.rearrange_expr = rearrange_expr
self.kwargs = kwargs
def forward(self, x):
assert isinstance(x, torch.Tensor)
return einops.rearrange(x, self.rearrange_expr, **self.kwargs)
class VerboseNNModule(nn.Module):
"""
Wrapper around nn.Module that prints registered buffers and parameter names.
"""
@staticmethod
def get_readable_tensor_repr(name: str, tensor: torch.Tensor) -> str:
st = (
"("
+ name
+ "): "
+ "tensor("
+ str(tuple(tensor[1].shape))
+ ", requires_grad="
+ str(tensor[1].requires_grad)
+ ")\n"
)
return st
def extra_repr(self) -> str:
named_modules = set()
for p in self.named_modules():
named_modules.update([p[0]])
named_modules = list(named_modules)
string_repr = ""
for p in self.named_parameters():
name = p[0].split(".")[0]
if name not in named_modules:
string_repr += self.get_readable_tensor_repr(name, p)
for p in self.named_buffers():
name = p[0].split(".")[0]
string_repr += self.get_readable_tensor_repr(name, p)
return string_repr
def cast_if_src_dtype(
tensor: torch.Tensor, src_dtype: torch.dtype, tgt_dtype: torch.dtype
):
updated = False
if tensor.dtype == src_dtype:
tensor = tensor.to(dtype=tgt_dtype)
updated = True
return tensor, updated
class QuickGELU(nn.Module):
# From https://github.com/openai/CLIP/blob/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1/clip/model.py#L166
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class SelectElement(nn.Module):
def __init__(self, index) -> None:
super().__init__()
self.index = index
def forward(self, x):
assert x.ndim >= 3
return x[:, self.index, ...]
class SelectEOSAndProject(nn.Module):
"""
Text Pooling used in OpenCLIP
"""
def __init__(self, proj: nn.Module) -> None:
super().__init__()
self.proj = proj
def forward(self, x, seq_len):
assert x.ndim == 3
# x is of shape B x L x D
# take features from the eot embedding (eot_token is the highest number in each sequence)
x = x[torch.arange(x.shape[0]), seq_len]
x = self.proj(x)
return x
| Ocean-master | oceandb/utils/ImageBind/models/helpers.py |
# type: ignore
from oceandb.api.types import (
Documents,
Embeddings,
IDs,
Metadatas,
Where,
WhereDocument,
)
from oceandb.db import DB
from oceandb.db.index.hnswlib import Hnswlib, delete_all_indexes
from oceandb.errors import (
NoDatapointsException,
)
import uuid
import numpy.typing as npt
import json
from typing import Dict, Optional, Sequence, List, Tuple, cast
import clickhouse_connect
from clickhouse_connect.driver.client import Client
from clickhouse_connect import common
import logging
from uuid import UUID
logger = logging.getLogger(__name__)
COLLECTION_TABLE_SCHEMA = [{"uuid": "UUID"}, {"name": "String"}, {"metadata": "String"}]
EMBEDDING_TABLE_SCHEMA = [
{"collection_uuid": "UUID"},
{"uuid": "UUID"},
{"embedding": "Array(Float64)"},
{"document": "Nullable(String)"},
{"id": "Nullable(String)"},
{"metadata": "Nullable(String)"},
]
def db_array_schema_to_clickhouse_schema(table_schema):
return_str = ""
for element in table_schema:
for k, v in element.items():
return_str += f"{k} {v}, "
return return_str
def db_schema_to_keys() -> List[str]:
keys = []
for element in EMBEDDING_TABLE_SCHEMA:
keys.append(list(element.keys())[0])
return keys
class Clickhouse(DB):
#
# INIT METHODS
#
def __init__(self, settings):
self._conn = None
self._settings = settings
def _init_conn(self):
common.set_setting("autogenerate_session_id", False)
self._conn = clickhouse_connect.get_client(
host=self._settings.clickhouse_host,
port=int(self._settings.clickhouse_port),
)
self._create_table_collections(self._conn)
self._create_table_embeddings(self._conn)
def _get_conn(self) -> Client:
if self._conn is None:
self._init_conn()
return self._conn
def _create_table_collections(self, conn):
conn.command(
f"""CREATE TABLE IF NOT EXISTS collections (
{db_array_schema_to_clickhouse_schema(COLLECTION_TABLE_SCHEMA)}
) ENGINE = MergeTree() ORDER BY uuid"""
)
def _create_table_embeddings(self, conn):
conn.command(
f"""CREATE TABLE IF NOT EXISTS embeddings (
{db_array_schema_to_clickhouse_schema(EMBEDDING_TABLE_SCHEMA)}
) ENGINE = MergeTree() ORDER BY collection_uuid"""
)
index_cache = {}
def _index(self, collection_id):
"""Retrieve an HNSW index instance for the given collection"""
if collection_id not in self.index_cache:
coll = self.get_collection_by_id(collection_id)
collection_metadata = coll[2]
index = Hnswlib(collection_id, self._settings, collection_metadata)
self.index_cache[collection_id] = index
return self.index_cache[collection_id]
def _delete_index(self, collection_id):
"""Delete an index from the cache"""
index = self._index(collection_id)
index.delete()
del self.index_cache[collection_id]
#
# UTILITY METHODS
#
def persist(self):
raise NotImplementedError(
"Clickhouse is a persistent database, this method is not needed"
)
def get_collection_uuid_from_name(self, name: str) -> UUID:
res = self._get_conn().query(
f"""
SELECT uuid FROM collections WHERE name = '{name}'
"""
)
return res.result_rows[0][0]
def _create_where_clause(
self,
collection_uuid: str,
ids: Optional[List[str]] = None,
where: Where = {},
where_document: WhereDocument = {},
):
where_clauses: List[str] = []
self._format_where(where, where_clauses)
if len(where_document) > 0:
where_document_clauses = []
self._format_where_document(where_document, where_document_clauses)
where_clauses.extend(where_document_clauses)
if ids is not None:
where_clauses.append(f" id IN {tuple(ids)}")
where_clauses.append(f"collection_uuid = '{collection_uuid}'")
where_str = " AND ".join(where_clauses)
where_str = f"WHERE {where_str}"
return where_str
#
# COLLECTION METHODS
#
def create_collection(
self, name: str, metadata: Optional[Dict] = None, get_or_create: bool = False
) -> Sequence:
# poor man's unique constraint
dupe_check = self.get_collection(name)
if len(dupe_check) > 0:
if get_or_create:
if dupe_check[0][2] != metadata:
self.update_collection(
dupe_check[0][0], new_name=name, new_metadata=metadata
)
dupe_check = self.get_collection(name)
logger.info(
f"collection with name {name} already exists, returning existing collection"
)
return dupe_check
else:
raise ValueError(f"Collection with name {name} already exists")
collection_uuid = uuid.uuid4()
data_to_insert = [[collection_uuid, name, json.dumps(metadata)]]
self._get_conn().insert(
"collections", data_to_insert, column_names=["uuid", "name", "metadata"]
)
return [[collection_uuid, name, metadata]]
def get_collection(self, name: str):
res = (
self._get_conn()
.query(
f"""
SELECT * FROM collections WHERE name = '{name}'
"""
)
.result_rows
)
# json.loads the metadata
return [[x[0], x[1], json.loads(x[2])] for x in res]
def get_collection_by_id(self, collection_uuid: str):
res = (
self._get_conn()
.query(
f"""
SELECT * FROM collections WHERE uuid = '{collection_uuid}'
"""
)
.result_rows
)
# json.loads the metadata
return [[x[0], x[1], json.loads(x[2])] for x in res][0]
def list_collections(self) -> Sequence:
res = self._get_conn().query("SELECT * FROM collections").result_rows
return [[x[0], x[1], json.loads(x[2])] for x in res]
def update_collection(
self,
id: UUID,
new_name: Optional[str] = None,
new_metadata: Optional[Dict] = None,
):
if new_name is not None:
self._get_conn().command(
"ALTER TABLE collections UPDATE name = %(new_name)s WHERE uuid = %(uuid)s",
parameters={"new_name": new_name, "uuid": id},
)
if new_metadata is not None:
self._get_conn().command(
"ALTER TABLE collections UPDATE metadata = %(new_metadata)s WHERE uuid = %(uuid)s",
parameters={"new_metadata": json.dumps(new_metadata), "uuid": id},
)
def delete_collection(self, name: str):
collection_uuid = self.get_collection_uuid_from_name(name)
self._get_conn().command(
f"""
DELETE FROM embeddings WHERE collection_uuid = '{collection_uuid}'
"""
)
self._delete_index(collection_uuid)
self._get_conn().command(
f"""
DELETE FROM collections WHERE name = '{name}'
"""
)
#
# ITEM METHODS
#
def add(self, collection_uuid, embeddings, metadatas, documents, ids):
data_to_insert = [
[
collection_uuid,
uuid.uuid4(),
embedding,
json.dumps(metadatas[i]) if metadatas else None,
documents[i] if documents else None,
ids[i],
]
for i, embedding in enumerate(embeddings)
]
column_names = [
"collection_uuid",
"uuid",
"embedding",
"metadata",
"document",
"id",
]
self._get_conn().insert("embeddings", data_to_insert, column_names=column_names)
return [x[1] for x in data_to_insert] # return uuids
def _update(
self,
collection_uuid,
ids: IDs,
embeddings: Optional[Embeddings],
metadatas: Optional[Metadatas],
documents: Optional[Documents],
):
updates = []
parameters = {}
for i in range(len(ids)):
update_fields = []
parameters[f"i{i}"] = ids[i]
if embeddings is not None:
update_fields.append(f"embedding = %(e{i})s")
parameters[f"e{i}"] = embeddings[i]
if metadatas is not None:
update_fields.append(f"metadata = %(m{i})s")
parameters[f"m{i}"] = json.dumps(metadatas[i])
if documents is not None:
update_fields.append(f"document = %(d{i})s")
parameters[f"d{i}"] = documents[i]
update_statement = f"""
UPDATE
{",".join(update_fields)}
WHERE
id = %(i{i})s AND
collection_uuid = '{collection_uuid}'{"" if i == len(ids) - 1 else ","}
"""
updates.append(update_statement)
update_clauses = ("").join(updates)
self._get_conn().command(
f"ALTER TABLE embeddings {update_clauses}", parameters=parameters
)
def update(
self,
collection_uuid,
ids: IDs,
embeddings: Optional[Embeddings] = None,
metadatas: Optional[Metadatas] = None,
documents: Optional[Documents] = None,
):
# Verify all IDs exist
existing_items = self.get(collection_uuid=collection_uuid, ids=ids)
if len(existing_items) != len(ids):
raise ValueError(
f"Could not find {len(ids) - len(existing_items)} items for update"
)
# Update the db
self._update(collection_uuid, ids, embeddings, metadatas, documents)
# Update the index
if embeddings is not None:
# `get` current returns items in arbitrary order.
# TODO if we fix `get`, we can remove this explicit mapping.
uuid_mapping = {r[4]: r[1] for r in existing_items}
update_uuids = [uuid_mapping[id] for id in ids]
index = self._index(collection_uuid)
index.add(update_uuids, embeddings, update=True)
def _get(self, where={}, columns: Optional[List] = None):
select_columns = db_schema_to_keys() if columns is None else columns
val = (
self._get_conn()
.query(f"""SELECT {",".join(select_columns)} FROM embeddings {where}""")
.result_rows
)
for i in range(len(val)):
# We know val has index abilities, so cast it for typechecker
val = cast(list, val)
val[i] = list(val[i])
# json.load the metadata
if "metadata" in select_columns:
metadata_column_index = select_columns.index("metadata")
db_metadata = val[i][metadata_column_index]
val[i][metadata_column_index] = (
json.loads(db_metadata) if db_metadata else None
)
return val
def _format_where(self, where, result):
for key, value in where.items():
def has_key_and(clause):
return f"(JSONHas(metadata,'{key}') = 1 AND {clause})"
# Shortcut for $eq
if type(value) == str:
result.append(
has_key_and(f" JSONExtractString(metadata,'{key}') = '{value}'")
)
elif type(value) == int:
result.append(
has_key_and(f" JSONExtractInt(metadata,'{key}') = {value}")
)
elif type(value) == float:
result.append(
has_key_and(f" JSONExtractFloat(metadata,'{key}') = {value}")
)
# Operator expression
elif type(value) == dict:
operator, operand = list(value.items())[0]
if operator == "$gt":
return result.append(
has_key_and(f" JSONExtractFloat(metadata,'{key}') > {operand}")
)
elif operator == "$lt":
return result.append(
has_key_and(f" JSONExtractFloat(metadata,'{key}') < {operand}")
)
elif operator == "$gte":
return result.append(
has_key_and(f" JSONExtractFloat(metadata,'{key}') >= {operand}")
)
elif operator == "$lte":
return result.append(
has_key_and(f" JSONExtractFloat(metadata,'{key}') <= {operand}")
)
elif operator == "$ne":
if type(operand) == str:
return result.append(
has_key_and(
f" JSONExtractString(metadata,'{key}') != '{operand}'"
)
)
return result.append(
has_key_and(f" JSONExtractFloat(metadata,'{key}') != {operand}")
)
elif operator == "$eq":
if type(operand) == str:
return result.append(
has_key_and(
f" JSONExtractString(metadata,'{key}') = '{operand}'"
)
)
return result.append(
has_key_and(f" JSONExtractFloat(metadata,'{key}') = {operand}")
)
else:
raise ValueError(
f"Expected one of $gt, $lt, $gte, $lte, $ne, $eq, got {operator}"
)
elif type(value) == list:
all_subresults = []
for subwhere in value:
subresults = []
self._format_where(subwhere, subresults)
all_subresults.append(subresults[0])
if key == "$or":
result.append(f"({' OR '.join(all_subresults)})")
elif key == "$and":
result.append(f"({' AND '.join(all_subresults)})")
else:
raise ValueError(f"Expected one of $or, $and, got {key}")
def _format_where_document(self, where_document, results):
operator = list(where_document.keys())[0]
if operator == "$contains":
results.append(f"position(document, '{where_document[operator]}') > 0")
elif operator == "$and" or operator == "$or":
all_subresults = []
for subwhere in where_document[operator]:
subresults = []
self._format_where_document(subwhere, subresults)
all_subresults.append(subresults[0])
if operator == "$or":
results.append(f"({' OR '.join(all_subresults)})")
if operator == "$and":
results.append(f"({' AND '.join(all_subresults)})")
else:
raise ValueError(f"Expected one of $contains, $and, $or, got {operator}")
def get(
self,
where: Where = {},
collection_name: Optional[str] = None,
collection_uuid: Optional[UUID] = None,
ids: Optional[IDs] = None,
sort: Optional[str] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
where_document: WhereDocument = {},
columns: Optional[List[str]] = None,
) -> Sequence:
if collection_name is None and collection_uuid is None:
raise TypeError(
"Arguments collection_name and collection_uuid cannot both be None"
)
if collection_name is not None:
collection_uuid = self.get_collection_uuid_from_name(collection_name)
where_str = self._create_where_clause(
# collection_uuid must be defined at this point, cast it for typechecker
cast(str, collection_uuid),
ids=ids,
where=where,
where_document=where_document,
)
if sort is not None:
where_str += f" ORDER BY {sort}"
else:
where_str += " ORDER BY collection_uuid" # stable ordering
if limit is not None or isinstance(limit, int):
where_str += f" LIMIT {limit}"
if offset is not None or isinstance(offset, int):
where_str += f" OFFSET {offset}"
val = self._get(where=where_str, columns=columns)
return val
def count(self, collection_uuid: UUID):
where_string = f"WHERE collection_uuid = '{collection_uuid}'"
return (
self._get_conn()
.query(f"SELECT COUNT() FROM embeddings {where_string}")
.result_rows[0][0]
)
def _delete(self, where_str: Optional[str] = None) -> List:
deleted_uuids = (
self._get_conn()
.query(f"""SELECT uuid FROM embeddings {where_str}""")
.result_rows
)
self._get_conn().command(
f"""
DELETE FROM
embeddings
{where_str}
"""
)
return [res[0] for res in deleted_uuids] if len(deleted_uuids) > 0 else []
def delete(
self,
where: Where = {},
collection_uuid: Optional[str] = None,
ids: Optional[IDs] = None,
where_document: WhereDocument = {},
) -> List:
where_str = self._create_where_clause(
# collection_uuid must be defined at this point, cast it for typechecker
cast(str, collection_uuid),
ids=ids,
where=where,
where_document=where_document,
)
deleted_uuids = self._delete(where_str)
index = self._index(collection_uuid)
index.delete_from_index(deleted_uuids)
return deleted_uuids
def get_by_ids(self, ids: list, columns: Optional[List] = None):
columns = columns + ["uuid"] if columns else ["uuid"]
select_columns = db_schema_to_keys() if columns is None else columns
response = (
self._get_conn()
.query(
f"""
SELECT {",".join(select_columns)} FROM embeddings WHERE uuid IN ({[id.hex for id in ids]})
"""
)
.result_rows
)
# sort db results by the order of the uuids
response = sorted(response, key=lambda obj: ids.index(obj[len(columns) - 1]))
return response
def get_nearest_neighbors(
self,
collection_uuid: UUID,
where: Where,
where_document: WhereDocument,
embeddings: Embeddings,
n_results: int,
) -> Tuple[List[List[uuid.UUID]], npt.NDArray]:
# Either the collection name or the collection uuid must be provided
if collection_uuid is None:
raise TypeError("Argument collection_uuid cannot be None")
if len(where) != 0 or len(where_document) != 0:
results = self.get(
collection_uuid=collection_uuid,
where=where,
where_document=where_document,
)
if len(results) > 0:
ids = [x[1] for x in results]
else:
raise NoDatapointsException(
f"No datapoints found for the supplied filter {json.dumps(where)}"
)
else:
ids = None
index = self._index(collection_uuid)
uuids, distances = index.get_nearest_neighbors(embeddings, n_results, ids)
return uuids, distances
def create_index(self, collection_uuid: str):
"""Create an index for a collection_uuid and optionally scoped to a dataset.
Args:
collection_uuid (str): The collection_uuid to create an index for
dataset (str, optional): The dataset to scope the index to. Defaults to None.
Returns:
None
"""
get = self.get(collection_uuid=collection_uuid)
uuids = [x[1] for x in get]
embeddings = [x[2] for x in get]
index = self._index(collection_uuid)
index.add(uuids, embeddings)
def add_incremental(self, collection_uuid, uuids, embeddings):
index = self._index(collection_uuid)
index.add(uuids, embeddings)
def reset_indexes(self):
delete_all_indexes(self._settings)
self.index_cache = {}
def reset(self):
conn = self._get_conn()
conn.command("DROP TABLE collections")
conn.command("DROP TABLE embeddings")
self._create_table_collections(conn)
self._create_table_embeddings(conn)
self.reset_indexes()
def raw_sql(self, sql):
return self._get_conn().query(sql).result_rows
| Ocean-master | oceandb/db/clickhouse.py |
from abc import ABC, abstractmethod
from typing import Dict, List, Sequence, Optional, Tuple
from uuid import UUID
import numpy.typing as npt
from oceandb.api.types import (
Embeddings,
Documents,
IDs,
Metadatas,
Where,
WhereDocument,
)
class DB(ABC):
@abstractmethod
def __init__(self):
pass
@abstractmethod
def create_collection(
self, name: str, metadata: Optional[Dict] = None, get_or_create: bool = False
) -> Sequence:
pass
@abstractmethod
def get_collection(self, name: str) -> Sequence:
pass
@abstractmethod
def list_collections(self) -> Sequence:
pass
@abstractmethod
def update_collection(
self,
id: UUID,
new_name: Optional[str] = None,
new_metadata: Optional[Dict] = None,
):
pass
@abstractmethod
def delete_collection(self, name: str):
pass
@abstractmethod
def get_collection_uuid_from_name(self, collection_name: str) -> UUID:
pass
@abstractmethod
def add(
self,
collection_uuid: UUID,
embeddings: Embeddings,
metadatas: Optional[Metadatas],
documents: Optional[Documents],
ids: List[UUID],
) -> List[UUID]:
pass
@abstractmethod
def add_incremental(
self, collection_uuid: UUID, ids: List[UUID], embeddings: Embeddings
):
pass
@abstractmethod
def get(
self,
where: Where = {},
collection_name: Optional[str] = None,
collection_uuid: Optional[UUID] = None,
ids: Optional[IDs] = None,
sort: Optional[str] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
where_document: WhereDocument = {},
columns: Optional[List[str]] = None,
) -> Sequence:
pass
@abstractmethod
def update(
self,
collection_uuid: UUID,
ids: IDs,
embeddings: Optional[Embeddings] = None,
metadatas: Optional[Metadatas] = None,
documents: Optional[Documents] = None,
):
pass
@abstractmethod
def count(self, collection_id: UUID):
pass
@abstractmethod
def delete(
self,
where: Where = {},
collection_uuid: Optional[UUID] = None,
ids: Optional[IDs] = None,
where_document: WhereDocument = {},
) -> List:
pass
@abstractmethod
def reset(self):
pass
@abstractmethod
def get_nearest_neighbors(
self,
collection_uuid: UUID,
where,
embeddings,
n_results,
where_document,
) -> Tuple[List[List[UUID]], npt.NDArray]:
pass
@abstractmethod
def get_by_ids(self, uuids, columns=None) -> Sequence:
pass
@abstractmethod
def raw_sql(self, raw_sql):
pass
@abstractmethod
def create_index(self, collection_uuid: UUID):
pass
@abstractmethod
def persist(self):
pass
| Ocean-master | oceandb/db/__init__.py |
from oceandb.api.types import Documents, Embeddings, IDs, Metadatas
from oceandb.db.clickhouse import (
Clickhouse,
db_array_schema_to_clickhouse_schema,
EMBEDDING_TABLE_SCHEMA,
db_schema_to_keys,
COLLECTION_TABLE_SCHEMA,
)
from typing import List, Optional, Sequence, Dict
import pandas as pd
import json
import duckdb
import uuid
import os
import logging
import atexit
logger = logging.getLogger(__name__)
def clickhouse_to_duckdb_schema(table_schema):
for item in table_schema:
if "embedding" in item:
item["embedding"] = "DOUBLE[]"
# capitalize the key
item[list(item.keys())[0]] = item[list(item.keys())[0]].upper()
if "NULLABLE" in item[list(item.keys())[0]]:
item[list(item.keys())[0]] = (
item[list(item.keys())[0]].replace("NULLABLE(", "").replace(")", "")
)
if "UUID" in item[list(item.keys())[0]]:
item[list(item.keys())[0]] = "STRING"
if "FLOAT64" in item[list(item.keys())[0]]:
item[list(item.keys())[0]] = "DOUBLE"
return table_schema
# TODO: inherits ClickHouse for convenience of copying behavior, not
# because it's logically a subtype. Factoring out the common behavior
# to a third superclass they both extend would be preferable.
class DuckDB(Clickhouse):
# duckdb has a different way of connecting to the database
def __init__(self, settings):
self._conn = duckdb.connect()
self._create_table_collections()
self._create_table_embeddings()
self._settings = settings
# https://duckdb.org/docs/extensions/overview
self._conn.execute("LOAD 'json';")
def _create_table_collections(self):
self._conn.execute(
f"""CREATE TABLE collections (
{db_array_schema_to_clickhouse_schema(clickhouse_to_duckdb_schema(COLLECTION_TABLE_SCHEMA))}
) """
)
# duckdb has different types, so we want to convert the clickhouse schema to duckdb schema
def _create_table_embeddings(self):
self._conn.execute(
f"""CREATE TABLE embeddings (
{db_array_schema_to_clickhouse_schema(clickhouse_to_duckdb_schema(EMBEDDING_TABLE_SCHEMA))}
) """
)
#
# UTILITY METHODS
#
def get_collection_uuid_from_name(self, name):
return self._conn.execute(
"SELECT uuid FROM collections WHERE name = ?", [name]
).fetchall()[0][0]
#
# COLLECTION METHODS
#
def create_collection(
self, name: str, metadata: Optional[Dict] = None, get_or_create: bool = False
) -> Sequence:
# poor man's unique constraint
dupe_check = self.get_collection(name)
if len(dupe_check) > 0:
if get_or_create is True:
if dupe_check[0][2] != metadata:
self.update_collection(
dupe_check[0][0], new_name=name, new_metadata=metadata
)
dupe_check = self.get_collection(name)
logger.info(
f"collection with name {name} already exists, returning existing collection"
)
return dupe_check
else:
raise ValueError(f"Collection with name {name} already exists")
collection_uuid = uuid.uuid4()
self._conn.execute(
"""INSERT INTO collections (uuid, name, metadata) VALUES (?, ?, ?)""",
[str(collection_uuid), name, json.dumps(metadata)],
)
return [[str(collection_uuid), name, metadata]]
def get_collection(self, name: str) -> Sequence:
res = self._conn.execute(
"""SELECT * FROM collections WHERE name = ?""", [name]
).fetchall()
# json.loads the metadata
return [[x[0], x[1], json.loads(x[2])] for x in res]
def get_collection_by_id(self, uuid: str) -> Sequence:
res = self._conn.execute(
"""SELECT * FROM collections WHERE uuid = ?""", [uuid]
).fetchone()
return [res[0], res[1], json.loads(res[2])]
def list_collections(self) -> Sequence:
res = self._conn.execute("""SELECT * FROM collections""").fetchall()
return [[x[0], x[1], json.loads(x[2])] for x in res]
def delete_collection(self, name: str):
collection_uuid = self.get_collection_uuid_from_name(name)
self._conn.execute(
"""DELETE FROM embeddings WHERE collection_uuid = ?""", [collection_uuid]
)
self._delete_index(collection_uuid)
self._conn.execute("""DELETE FROM collections WHERE name = ?""", [name])
def update_collection(
self, id: uuid.UUID, new_name: str, new_metadata: Optional[Dict] = None
):
if new_name is not None:
self._conn.execute(
"""UPDATE collections SET name = ? WHERE uuid = ?""",
[new_name, id],
)
if new_metadata is not None:
self._conn.execute(
"""UPDATE collections SET metadata = ? WHERE uuid = ?""",
[json.dumps(new_metadata), id],
)
#
# ITEM METHODS
#
# the execute many syntax is different than clickhouse, the (?,?) syntax is different than clickhouse
def add(self, collection_uuid, embeddings, metadatas, documents, ids):
data_to_insert = [
[
collection_uuid,
str(uuid.uuid4()),
embedding,
json.dumps(metadatas[i]) if metadatas else None,
documents[i] if documents else None,
ids[i],
]
for i, embedding in enumerate(embeddings)
]
insert_string = "collection_uuid, uuid, embedding, metadata, document, id"
self._conn.executemany(
f"""
INSERT INTO embeddings ({insert_string}) VALUES (?,?,?,?,?,?)""",
data_to_insert,
)
return [uuid.UUID(x[1]) for x in data_to_insert] # return uuids
def count(self, collection_uuid):
where_string = f"WHERE collection_uuid = '{collection_uuid}'"
return self._conn.query(
f"SELECT COUNT() FROM embeddings {where_string}"
).fetchall()[0][0]
def _format_where(self, where, result):
for key, value in where.items():
# Shortcut for $eq
if type(value) == str:
result.append(f" json_extract_string(metadata,'$.{key}') = '{value}'")
if type(value) == int:
result.append(
f" CAST(json_extract(metadata,'$.{key}') AS INT) = {value}"
)
if type(value) == float:
result.append(
f" CAST(json_extract(metadata,'$.{key}') AS DOUBLE) = {value}"
)
# Operator expression
elif type(value) == dict:
operator, operand = list(value.items())[0]
if operator == "$gt":
result.append(
f" CAST(json_extract(metadata,'$.{key}') AS DOUBLE) > {operand}"
)
elif operator == "$lt":
result.append(
f" CAST(json_extract(metadata,'$.{key}') AS DOUBLE) < {operand}"
)
elif operator == "$gte":
result.append(
f" CAST(json_extract(metadata,'$.{key}') AS DOUBLE) >= {operand}"
)
elif operator == "$lte":
result.append(
f" CAST(json_extract(metadata,'$.{key}') AS DOUBLE) <= {operand}"
)
elif operator == "$ne":
if type(operand) == str:
return result.append(
f" json_extract_string(metadata,'$.{key}') != '{operand}'"
)
return result.append(
f" CAST(json_extract(metadata,'$.{key}') AS DOUBLE) != {operand}"
)
elif operator == "$eq":
if type(operand) == str:
return result.append(
f" json_extract_string(metadata,'$.{key}') = '{operand}'"
)
return result.append(
f" CAST(json_extract(metadata,'$.{key}') AS DOUBLE) = {operand}"
)
else:
raise ValueError(f"Operator {operator} not supported")
elif type(value) == list:
all_subresults = []
for subwhere in value:
subresults = []
self._format_where(subwhere, subresults)
all_subresults.append(subresults[0])
if key == "$or":
result.append(f"({' OR '.join(all_subresults)})")
elif key == "$and":
result.append(f"({' AND '.join(all_subresults)})")
else:
raise ValueError(
f"Operator {key} not supported with a list of where clauses"
)
def _format_where_document(self, where_document, results):
operator = list(where_document.keys())[0]
if operator == "$contains":
results.append(f"position('{where_document[operator]}' in document) > 0")
elif operator == "$and" or operator == "$or":
all_subresults = []
for subwhere in where_document[operator]:
subresults = []
self._format_where_document(subwhere, subresults)
all_subresults.append(subresults[0])
if operator == "$or":
results.append(f"({' OR '.join(all_subresults)})")
if operator == "$and":
results.append(f"({' AND '.join(all_subresults)})")
else:
raise ValueError(f"Operator {operator} not supported")
def _get(self, where, columns: Optional[List] = None):
select_columns = db_schema_to_keys() if columns is None else columns
val = self._conn.execute(
f"""SELECT {",".join(select_columns)} FROM embeddings {where}"""
).fetchall()
for i in range(len(val)):
val[i] = list(val[i])
if "collection_uuid" in select_columns:
collection_uuid_column_index = select_columns.index("collection_uuid")
val[i][collection_uuid_column_index] = uuid.UUID(
val[i][collection_uuid_column_index]
)
if "uuid" in select_columns:
uuid_column_index = select_columns.index("uuid")
val[i][uuid_column_index] = uuid.UUID(val[i][uuid_column_index])
if "metadata" in select_columns:
metadata_column_index = select_columns.index("metadata")
val[i][metadata_column_index] = (
json.loads(val[i][metadata_column_index])
if val[i][metadata_column_index]
else None
)
return val
def _update(
self,
collection_uuid,
ids: IDs,
embeddings: Optional[Embeddings],
metadatas: Optional[Metadatas],
documents: Optional[Documents],
):
update_data = []
for i in range(len(ids)):
data = []
update_data.append(data)
if embeddings is not None:
data.append(embeddings[i])
if metadatas is not None:
data.append(json.dumps(metadatas[i]))
if documents is not None:
data.append(documents[i])
data.append(ids[i])
update_fields = []
if embeddings is not None:
update_fields.append("embedding = ?")
if metadatas is not None:
update_fields.append("metadata = ?")
if documents is not None:
update_fields.append("document = ?")
update_statement = f"""
UPDATE
embeddings
SET
{", ".join(update_fields)}
WHERE
id = ? AND
collection_uuid = '{collection_uuid}';
"""
self._conn.executemany(update_statement, update_data)
def _delete(self, where_str: Optional[str] = None) -> List:
uuids_deleted = self._conn.execute(
f"""SELECT uuid FROM embeddings {where_str}"""
).fetchall()
self._conn.execute(
f"""
DELETE FROM
embeddings
{where_str}
"""
).fetchall()[0]
return [uuid.UUID(x[0]) for x in uuids_deleted]
def get_by_ids(self, ids: List, columns: Optional[List] = None):
# select from duckdb table where ids are in the list
if not isinstance(ids, list):
raise TypeError(f"Expected ids to be a list, got {ids}")
if not ids:
# create an empty pandas dataframe
return pd.DataFrame()
columns = columns + ["uuid"] if columns else ["uuid"]
select_columns = db_schema_to_keys() if columns is None else columns
response = self._conn.execute(
f"""
SELECT
{",".join(select_columns)}
FROM
embeddings
WHERE
uuid IN ({','.join([("'" + str(x) + "'") for x in ids])})
"""
).fetchall()
# sort db results by the order of the uuids
response = sorted(
response, key=lambda obj: ids.index(uuid.UUID(obj[len(columns) - 1]))
)
return response
def raw_sql(self, sql):
return self._conn.execute(sql).df()
# TODO: This method should share logic with clickhouse impl
def reset(self):
self._conn.execute("DROP TABLE collections")
self._conn.execute("DROP TABLE embeddings")
self._create_table_collections()
self._create_table_embeddings()
self.reset_indexes()
def __del__(self):
logger.info("Exiting: Cleaning up .ocean directory")
self.reset_indexes()
def persist(self):
raise NotImplementedError(
"Set ocean_db_impl='duckdb+parquet' to get persistence functionality"
)
class PersistentDuckDB(DuckDB):
_save_folder = None
def __init__(self, settings):
super().__init__(settings=settings)
if settings.persist_directory == ".ocean":
raise ValueError(
"You cannot use ocean's cache directory .ocean/, please set a different directory"
)
self._save_folder = settings.persist_directory
self.load()
# https://docs.python.org/3/library/atexit.html
atexit.register(self.persist)
def set_save_folder(self, path):
self._save_folder = path
def get_save_folder(self):
return self._save_folder
def persist(self):
"""
Persist the database to disk
"""
logger.info(
f"Persisting DB to disk, putting it in the save folder: {self._save_folder}"
)
if self._conn is None:
return
if not os.path.exists(self._save_folder):
os.makedirs(self._save_folder)
# if the db is empty, dont save
if self._conn.query("SELECT COUNT() FROM embeddings") == 0:
return
self._conn.execute(
f"""
COPY
(SELECT * FROM embeddings)
TO '{self._save_folder}/ocean-embeddings.parquet'
(FORMAT PARQUET);
"""
)
self._conn.execute(
f"""
COPY
(SELECT * FROM collections)
TO '{self._save_folder}/ocean-collections.parquet'
(FORMAT PARQUET);
"""
)
def load(self):
"""
Load the database from disk
"""
if not os.path.exists(self._save_folder):
os.makedirs(self._save_folder)
# load in the embeddings
if not os.path.exists(f"{self._save_folder}/ocean-embeddings.parquet"):
logger.info(f"No existing DB found in {self._save_folder}, skipping load")
else:
path = self._save_folder + "/ocean-embeddings.parquet"
self._conn.execute(
f"INSERT INTO embeddings SELECT * FROM read_parquet('{path}');"
)
logger.info(
f"""loaded in {self._conn.query("SELECT COUNT() FROM embeddings").fetchall()[0][0]} embeddings"""
)
# load in the collections
if not os.path.exists(f"{self._save_folder}/ocean-collections.parquet"):
logger.info(f"No existing DB found in {self._save_folder}, skipping load")
else:
path = self._save_folder + "/ocean-collections.parquet"
self._conn.execute(
f"INSERT INTO collections SELECT * FROM read_parquet('{path}');"
)
logger.info(
f"""loaded in {self._conn.query("SELECT COUNT() FROM collections").fetchall()[0][0]} collections"""
)
def __del__(self):
# No-op for duckdb with persistence since the base class will delete the indexes
pass
def reset(self):
super().reset()
# empty the save folder
import shutil
import os
shutil.rmtree(self._save_folder)
os.mkdir(self._save_folder)
| Ocean-master | oceandb/db/duckdb.py |
from abc import ABC, abstractmethod
class Index(ABC):
@abstractmethod
def __init__(self, id, settings, metadata):
pass
@abstractmethod
def delete(self):
pass
@abstractmethod
def delete_from_index(self, ids):
pass
@abstractmethod
def add(self, ids, embeddings, update=False):
pass
@abstractmethod
def get_nearest_neighbors(self, embedding, n_results, ids):
pass
| Ocean-master | oceandb/db/index/__init__.py |
import os
import pickle
import time
from typing import Dict
from oceandb.api.types import IndexMetadata
import hnswlib
from oceandb.db.index import Index
from oceandb.errors import (
NoIndexException,
InvalidDimensionException,
NotEnoughElementsException,
)
import logging
import re
from uuid import UUID
import multiprocessing
logger = logging.getLogger(__name__)
valid_params = {
"hnsw:space": r"^(l2|cosine|ip)$",
"hnsw:construction_ef": r"^\d+$",
"hnsw:search_ef": r"^\d+$",
"hnsw:M": r"^\d+$",
"hnsw:num_threads": r"^\d+$",
"hnsw:resize_factor": r"^\d+(\.\d+)?$",
}
class HnswParams:
space: str
construction_ef: int
search_ef: int
M: int
num_threads: int
resize_factor: float
def __init__(self, metadata):
metadata = metadata or {}
# Convert all values to strings for future compatibility.
metadata = {k: str(v) for k, v in metadata.items()}
for param, value in metadata.items():
if param.startswith("hnsw:"):
if param not in valid_params:
raise ValueError(f"Unknown HNSW parameter: {param}")
if not re.match(valid_params[param], value):
raise ValueError(
f"Invalid value for HNSW parameter: {param} = {value}"
)
self.space = metadata.get("hnsw:space", "l2")
self.construction_ef = int(metadata.get("hnsw:construction_ef", 100))
self.search_ef = int(metadata.get("hnsw:search_ef", 10))
self.M = int(metadata.get("hnsw:M", 16))
self.num_threads = int(
metadata.get("hnsw:num_threads", multiprocessing.cpu_count())
)
self.resize_factor = float(metadata.get("hnsw:resize_factor", 1.2))
def hexid(id):
"""Backwards compatibility for old indexes which called uuid.hex on UUID ids"""
return id.hex if isinstance(id, UUID) else id
def delete_all_indexes(settings):
if os.path.exists(f"{settings.persist_directory}/index"):
for file in os.listdir(f"{settings.persist_directory}/index"):
os.remove(f"{settings.persist_directory}/index/{file}")
class Hnswlib(Index):
_id: str
_index: hnswlib.Index
_index_metadata: IndexMetadata
_params: HnswParams
_id_to_label: Dict[str, int]
_label_to_id: Dict[int, UUID]
def __init__(self, id, settings, metadata):
self._save_folder = settings.persist_directory + "/index"
self._params = HnswParams(metadata)
self._id = id
self._index = None
# Mapping of IDs to HNSW integer labels
self._id_to_label = {}
self._label_to_id = {}
self._load()
def _init_index(self, dimensionality):
# more comments available at the source: https://github.com/nmslib/hnswlib
index = hnswlib.Index(
space=self._params.space, dim=dimensionality
) # possible options are l2, cosine or ip
index.init_index(
max_elements=1000,
ef_construction=self._params.construction_ef,
M=self._params.M,
)
index.set_ef(self._params.search_ef)
index.set_num_threads(self._params.num_threads)
self._index = index
self._index_metadata = {
"dimensionality": dimensionality,
"elements": 0,
"time_created": time.time(),
}
self._save()
def _check_dimensionality(self, data):
"""Assert that the given data matches the index dimensionality"""
dim = len(data[0])
idx_dim = self._index.dim
if dim != idx_dim:
raise InvalidDimensionException(
f"Dimensionality of ({dim}) does not match index dimensionality ({idx_dim})"
)
def add(self, ids, embeddings, update=False):
"""Add or update embeddings to the index"""
dim = len(embeddings[0])
if self._index is None:
self._init_index(dim)
# Check dimensionality
self._check_dimensionality(embeddings)
labels = []
for id in ids:
if hexid(id) in self._id_to_label:
if update:
labels.append(self._id_to_label[hexid(id)])
else:
raise ValueError(f"ID {id} already exists in index")
else:
self._index_metadata["elements"] += 1
next_label = self._index_metadata["elements"]
self._id_to_label[hexid(id)] = next_label
self._label_to_id[next_label] = id
labels.append(next_label)
if self._index_metadata["elements"] > self._index.get_max_elements():
new_size = max(
self._index_metadata["elements"] * self._params.resize_factor, 1000
)
self._index.resize_index(int(new_size))
self._index.add_items(embeddings, labels)
self._save()
def delete(self):
# delete files, dont throw error if they dont exist
try:
os.remove(f"{self._save_folder}/id_to_uuid_{self._id}.pkl")
os.remove(f"{self._save_folder}/uuid_to_id_{self._id}.pkl")
os.remove(f"{self._save_folder}/index_{self._id}.bin")
os.remove(f"{self._save_folder}/index_metadata_{self._id}.pkl")
except Exception:
pass
self._index = None
self._collection_uuid = None
self._id_to_label = {}
self._label_to_id = {}
def delete_from_index(self, ids):
if self._index is not None:
for id in ids:
label = self._id_to_label[hexid(id)]
self._index.mark_deleted(label)
del self._label_to_id[label]
del self._id_to_label[hexid(id)]
self._save()
def _save(self):
# create the directory if it doesn't exist
if not os.path.exists(f"{self._save_folder}"):
os.makedirs(f"{self._save_folder}")
if self._index is None:
return
self._index.save_index(f"{self._save_folder}/index_{self._id}.bin")
# pickle the mappers
# Use old filenames for backwards compatibility
with open(f"{self._save_folder}/id_to_uuid_{self._id}.pkl", "wb") as f:
pickle.dump(self._label_to_id, f, pickle.HIGHEST_PROTOCOL)
with open(f"{self._save_folder}/uuid_to_id_{self._id}.pkl", "wb") as f:
pickle.dump(self._id_to_label, f, pickle.HIGHEST_PROTOCOL)
with open(f"{self._save_folder}/index_metadata_{self._id}.pkl", "wb") as f:
pickle.dump(self._index_metadata, f, pickle.HIGHEST_PROTOCOL)
logger.debug(f"Index saved to {self._save_folder}/index.bin")
def _exists(self):
return
def _load(self):
if not os.path.exists(f"{self._save_folder}/index_{self._id}.bin"):
return
# unpickle the mappers
with open(f"{self._save_folder}/id_to_uuid_{self._id}.pkl", "rb") as f:
self._label_to_id = pickle.load(f)
with open(f"{self._save_folder}/uuid_to_id_{self._id}.pkl", "rb") as f:
self._id_to_label = pickle.load(f)
with open(f"{self._save_folder}/index_metadata_{self._id}.pkl", "rb") as f:
self._index_metadata = pickle.load(f)
p = hnswlib.Index(
space=self._params.space, dim=self._index_metadata["dimensionality"]
)
self._index = p
self._index.load_index(
f"{self._save_folder}/index_{self._id}.bin",
max_elements=self._index_metadata["elements"],
)
self._index.set_ef(self._params.search_ef)
self._index.set_num_threads(self._params.num_threads)
def get_nearest_neighbors(self, query, k, ids=None):
if self._index is None:
raise NoIndexException(
"Index not found, please create an instance before querying"
)
# Check dimensionality
self._check_dimensionality(query)
if k > self._index_metadata["elements"]:
raise NotEnoughElementsException(
f"Number of requested results {k} cannot be greater than number of elements in index {self._index_metadata['elements']}"
)
s2 = time.time()
# get ids from uuids as a set, if they are available
labels = {}
if ids is not None:
labels = {self._id_to_label[hexid(id)] for id in ids}
if len(labels) < k:
k = len(labels)
filter_function = None
if len(labels) != 0:
filter_function = lambda label: label in labels # NOQA: E731
logger.debug(f"time to pre process our knn query: {time.time() - s2}")
s3 = time.time()
database_labels, distances = self._index.knn_query(
query, k=k, filter=filter_function
)
logger.debug(f"time to run knn query: {time.time() - s3}")
ids = [
[self._label_to_id[label] for label in labels] for labels in database_labels
]
return ids, distances
| Ocean-master | oceandb/db/index/hnswlib.py |
from typing import Callable, Dict, List, Optional
from oceandb.api import API
from oceandb.api.types import (
CrossModalRetrieval,
Documents,
Embeddings,
Embedding,
IDs,
Include,
Metadatas,
ModalitySpecificSearching,
MultiModalFusion,
Where,
WhereDocument,
)
import pandas as pd
import requests
import json
from typing import Sequence, List, Any
from oceandb.api.models.Collection import Collection
from oceandb.telemetry import Telemetry
import oceandb.errors as errors
from uuid import UUID
class FastAPI(API):
def __init__(self, settings, telemetry_client: Telemetry):
url_prefix = "https" if settings.ocean_server_ssl_enabled else "http"
self._api_url = f"{url_prefix}://{settings.ocean_server_host}:{settings.ocean_server_http_port}/api/v1"
self._telemetry_client = telemetry_client
def heartbeat(self):
"""Returns the current server time in nanoseconds to check if the server is alive"""
resp = requests.get(self._api_url)
raise_ocean_error(resp)
return int(resp.json()["nanosecond heartbeat"])
def list_collections(self) -> Sequence[Collection]:
"""Returns a list of all collections"""
resp = requests.get(self._api_url + "/collections")
raise_ocean_error(resp)
json_collections = resp.json()
collections = []
for json_collection in json_collections:
collections.append(Collection(self, **json_collection))
return collections
def create_collection(
self,
name: str,
metadata: Optional[Dict] = None,
embedding_function: Optional[Callable] = None,
get_or_create: bool = False,
) -> Collection:
"""Creates a collection"""
resp = requests.post(
self._api_url + "/collections",
data=json.dumps(
{"name": name, "metadata": metadata, "get_or_create": get_or_create}
),
)
raise_ocean_error(resp)
resp_json = resp.json()
return Collection(
client=self,
id=resp_json["id"],
name=resp_json["name"],
embedding_function=embedding_function,
metadata=resp_json["metadata"],
)
def get_collection(
self,
name: str,
embedding_function: Optional[Callable] = None,
) -> Collection:
"""Returns a collection"""
resp = requests.get(self._api_url + "/collections/" + name)
raise_ocean_error(resp)
resp_json = resp.json()
return Collection(
client=self,
name=resp_json["name"],
id=resp_json["id"],
embedding_function=embedding_function,
metadata=resp_json["metadata"],
)
def get_or_create_collection(
self,
name: str,
metadata: Optional[Dict] = None,
embedding_function: Optional[Callable] = None,
) -> Collection:
"""Get a collection, or return it if it exists"""
return self.create_collection(
name, metadata, embedding_function, get_or_create=True
)
def _modify(self, id: UUID, new_name: str, new_metadata: Optional[Dict] = None):
"""Updates a collection"""
resp = requests.put(
self._api_url + "/collections/" + str(id),
data=json.dumps({"new_metadata": new_metadata, "new_name": new_name}),
)
raise_ocean_error(resp)
return resp.json()
def delete_collection(self, name: str):
"""Deletes a collection"""
resp = requests.delete(self._api_url + "/collections/" + name)
raise_ocean_error(resp)
def _count(self, collection_id: UUID):
"""Returns the number of embeddings in the database"""
resp = requests.get(
self._api_url + "/collections/" + str(collection_id) + "/count"
)
raise_ocean_error(resp)
return resp.json()
def _peek(self, collection_id, limit=10):
return self._get(
collection_id,
limit=limit,
include=["embeddings", "documents", "metadatas"],
)
def _get(
self,
collection_id: UUID,
ids: Optional[IDs] = None,
where: Optional[Where] = {},
sort: Optional[str] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
page: Optional[int] = None,
page_size: Optional[int] = None,
where_document: Optional[WhereDocument] = {},
include: Include = ["metadatas", "documents"],
):
"""Gets embeddings from the database"""
if page and page_size:
offset = (page - 1) * page_size
limit = page_size
resp = requests.post(
self._api_url + "/collections/" + str(collection_id) + "/get",
data=json.dumps(
{
"ids": ids,
"where": where,
"sort": sort,
"limit": limit,
"offset": offset,
"where_document": where_document,
"include": include,
}
),
)
raise_ocean_error(resp)
return resp.json()
def _delete(self, collection_id: UUID, ids=None, where={}, where_document={}):
"""Deletes embeddings from the database"""
resp = requests.post(
self._api_url + "/collections/" + str(collection_id) + "/delete",
data=json.dumps(
{"where": where, "ids": ids, "where_document": where_document}
),
)
raise_ocean_error(resp)
return resp.json()
def _add(
self,
ids,
collection_id: UUID,
embeddings,
metadatas=None,
documents=None,
increment_index=True,
):
"""
Adds a batch of embeddings to the database
- pass in column oriented data lists
- by default, the index is progressively built up as you add more data. If for ingestion performance reasons you want to disable this, set increment_index to False
- and then manually create the index yourself with collection.create_index()
"""
resp = requests.post(
self._api_url + "/collections/" + str(collection_id) + "/add",
data=json.dumps(
{
"ids": ids,
"embeddings": embeddings,
"metadatas": metadatas,
"documents": documents,
"increment_index": increment_index,
}
),
)
raise_ocean_error(resp)
return True
def _update(
self,
collection_id: UUID,
ids: IDs,
embeddings: Optional[Embeddings] = None,
metadatas: Optional[Metadatas] = None,
documents: Optional[Documents] = None,
):
"""
Updates a batch of embeddings in the database
- pass in column oriented data lists
"""
resp = requests.post(
self._api_url + "/collections/" + str(collection_id) + "/update",
data=json.dumps(
{
"ids": ids,
"embeddings": embeddings,
"metadatas": metadatas,
"documents": documents,
}
),
)
resp.raise_for_status()
return True
def _upsert(
self,
collection_id: UUID,
ids: IDs,
embeddings: Embeddings,
metadatas: Optional[Metadatas] = None,
documents: Optional[Documents] = None,
increment_index: bool = True,
):
"""
Updates a batch of embeddings in the database
- pass in column oriented data lists
"""
resp = requests.post(
self._api_url + "/collections/" + str(collection_id) + "/upsert",
data=json.dumps(
{
"ids": ids,
"embeddings": embeddings,
"metadatas": metadatas,
"documents": documents,
"increment_index": increment_index,
}
),
)
resp.raise_for_status()
return True
def _query(
self,
collection_id: UUID,
query_embeddings,
n_results=10,
where={},
where_document={},
include: Include = ["metadatas", "documents", "distances"],
):
"""Gets the nearest neighbors of a single embedding"""
resp = requests.post(
self._api_url + "/collections/" + str(collection_id) + "/query",
data=json.dumps(
{
"query_embeddings": query_embeddings,
"n_results": n_results,
"where": where,
"where_document": where_document,
"include": include,
}
),
)
raise_ocean_error(resp)
body = resp.json()
return body
def search(
self, search_function: str, query_embeddings: List[Embedding], index_data: Any
) -> List[str]:
if search_function == "cross_modal_retrieval":
search_instance = CrossModalRetrieval() # type: ignore
elif search_function == "multi_modal_fusion":
search_instance = MultiModalFusion() # type: ignore
elif search_function == "modality_specific_searching":
search_instance = ModalitySpecificSearching() # type: ignore
else:
raise ValueError("Invalid search function specified")
result_ids = search_instance.search(query_embeddings, index_data)
return result_ids # type: ignore
def reset(self):
"""Resets the database"""
resp = requests.post(self._api_url + "/reset")
raise_ocean_error(resp)
return resp.json
def persist(self):
"""Persists the database"""
resp = requests.post(self._api_url + "/persist")
raise_ocean_error(resp)
return resp.json
def raw_sql(self, sql):
"""Runs a raw SQL query against the database"""
resp = requests.post(
self._api_url + "/raw_sql", data=json.dumps({"raw_sql": sql})
)
raise_ocean_error(resp)
return pd.DataFrame.from_dict(resp.json())
def create_index(self, collection_name: str):
"""Creates an index for the given space key"""
resp = requests.post(
self._api_url + "/collections/" + collection_name + "/create_index"
)
raise_ocean_error(resp)
return resp.json()
def get_version(self):
"""Returns the version of the server"""
resp = requests.get(self._api_url + "/version")
raise_ocean_error(resp)
return resp.json()
def raise_ocean_error(resp):
"""Raises an error if the response is not ok, using a OceanError if possible"""
if resp.ok:
return
ocean_error = None
try:
body = resp.json()
if "error" in body:
if body["error"] in errors.error_types:
ocean_error = errors.error_types[body["error"]](body["message"])
except BaseException:
pass
if ocean_error:
raise ocean_error
try:
resp.raise_for_status()
except requests.HTTPError:
raise (Exception(resp.text))
| Ocean-master | oceandb/api/fastapi.py |
import json
import time
from uuid import UUID
from typing import Dict, List, Optional, Sequence, Callable, cast
from oceandb import __version__
import oceandb.errors as errors
from oceandb.api import API
from oceandb.db import DB
from oceandb.api.types import (
Documents,
Embeddings,
GetResult,
IDs,
Include,
Metadatas,
QueryResult,
Where,
WhereDocument,
)
from oceandb.api.models.Collection import Collection
import re
from oceandb.telemetry import Telemetry
from oceandb.telemetry.events import CollectionAddEvent, CollectionDeleteEvent
# mimics s3 bucket requirements for naming
def check_index_name(index_name):
msg = (
"Expected collection name that "
"(1) contains 3-63 characters, "
"(2) starts and ends with an alphanumeric character, "
"(3) otherwise contains only alphanumeric characters, underscores or hyphens (-), "
"(4) contains no two consecutive periods (..) and "
"(5) is not a valid IPv4 address, "
f"got {index_name}"
)
if len(index_name) < 3 or len(index_name) > 63:
raise ValueError(msg)
if not re.match("^[a-zA-Z0-9][a-zA-Z0-9._-]*[a-zA-Z0-9]$", index_name):
raise ValueError(msg)
if ".." in index_name:
raise ValueError(msg)
if re.match("^[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}$", index_name):
raise ValueError(msg)
class LocalAPI(API):
def __init__(self, settings, db: DB, telemetry_client: Telemetry):
self._db = db
self._telemetry_client = telemetry_client
def heartbeat(self):
"""Ping the database to ensure it is alive"""
return int(1000 * time.time_ns())
#
# COLLECTION METHODS
#
def create_collection(
self,
name: str,
metadata: Optional[Dict] = None,
embedding_function: Optional[Callable] = None,
get_or_create: bool = False,
) -> Collection:
"""Create a new collection with the given name and metadata.
Args:
name: The name of the collection to create
metadata: Optional metadata to associate with the collection
embedding_function: Optional function to use to embed documents
get_or_create: If True, return the existing collection if it exists
Returns:
The newly created collection
Raises:
ValueError: If the collection already exists and get_or_create is False
ValueError: If the collection name is invalid
Examples:
>>> client.create_collection("my_collection")
collection(name="my_collection", metadata={})
>>> client.create_collection("my_collection", metadata={"foo": "bar"})
collection(name="my_collection", metadata={"foo": "bar"})
"""
check_index_name(name)
res = self._db.create_collection(name, metadata, get_or_create)
return Collection(
client=self,
name=name,
embedding_function=embedding_function,
id=res[0][0],
metadata=res[0][2],
)
def get_or_create_collection(
self,
name: str,
metadata: Optional[Dict] = None,
embedding_function: Optional[Callable] = None,
) -> Collection:
"""Get or create a collection with the given name and metadata.
Args:
name: The name of the collection to get or create
metadata: Optional metadata to associate with the collection
embedding_function: Optional function to use to embed documents
Returns:
The collection
Examples:
>>> client.get_or_create_collection("my_collection")
collection(name="my_collection", metadata={})
"""
return self.create_collection(
name, metadata, embedding_function, get_or_create=True
)
def get_collection(
self,
name: str,
embedding_function: Optional[Callable] = None,
) -> Collection:
"""Get a collection with the given name.
Args:
name: The name of the collection to get
embedding_function: Optional function to use to embed documents
Returns:
The collection
Raises:
ValueError: If the collection does not exist
Examples:
>>> client.get_collection("my_collection")
collection(name="my_collection", metadata={})
"""
res = self._db.get_collection(name)
if len(res) == 0:
raise ValueError(f"Collection {name} does not exist")
return Collection(
client=self,
name=name,
id=res[0][0],
embedding_function=embedding_function,
metadata=res[0][2],
)
def list_collections(self) -> Sequence[Collection]:
"""List all collections.
Returns:
A list of collections
Examples:
>>> client.list_collections()
[collection(name="my_collection", metadata={})]
"""
collections = []
db_collections = self._db.list_collections()
for db_collection in db_collections:
collections.append(
Collection(
client=self,
id=db_collection[0],
name=db_collection[1],
metadata=db_collection[2],
)
)
return collections
def _modify(
self,
id: UUID,
new_name: Optional[str] = None,
new_metadata: Optional[Dict] = None,
):
if new_name is not None:
check_index_name(new_name)
self._db.update_collection(id, new_name, new_metadata)
def delete_collection(self, name: str):
"""Delete a collection with the given name.
Args:
name: The name of the collection to delete
Raises:
ValueError: If the collection does not exist
Examples:
>>> client.delete_collection("my_collection")
"""
return self._db.delete_collection(name)
#
# ITEM METHODS
#
def _add(
self,
ids,
collection_id: UUID,
embeddings: Embeddings,
metadatas: Optional[Metadatas] = None,
documents: Optional[Documents] = None,
increment_index: bool = True,
):
existing_ids = self._get(collection_id, ids=ids, include=[])["ids"]
if len(existing_ids) > 0:
raise errors.IDAlreadyExistsError(
f"IDs {existing_ids} already exist in collection {collection_id}"
)
added_uuids = self._db.add(
collection_id,
embeddings=embeddings,
metadatas=metadatas,
documents=documents,
ids=ids,
)
if increment_index:
self._db.add_incremental(collection_id, added_uuids, embeddings)
self._telemetry_client.capture(CollectionAddEvent(str(collection_id), len(ids)))
return True # NIT: should this return the ids of the succesfully added items?
def _update(
self,
collection_id: UUID,
ids: IDs,
embeddings: Optional[Embeddings] = None,
metadatas: Optional[Metadatas] = None,
documents: Optional[Documents] = None,
):
self._db.update(collection_id, ids, embeddings, metadatas, documents)
return True
def _upsert(
self,
collection_id: UUID,
ids: IDs,
embeddings: Embeddings,
metadatas: Optional[Metadatas] = None,
documents: Optional[Documents] = None,
increment_index: bool = True,
):
# Determine which ids need to be added and which need to be updated based on the ids already in the collection
existing_ids = set(self._get(collection_id, ids=ids, include=[])["ids"])
ids_to_add = []
ids_to_update = []
embeddings_to_add: Embeddings = []
embeddings_to_update: Embeddings = []
metadatas_to_add: Optional[Metadatas] = [] if metadatas else None
metadatas_to_update: Optional[Metadatas] = [] if metadatas else None
documents_to_add: Optional[Documents] = [] if documents else None
documents_to_update: Optional[Documents] = [] if documents else None
for i, id in enumerate(ids):
if id in existing_ids:
ids_to_update.append(id)
if embeddings is not None:
embeddings_to_update.append(embeddings[i])
if metadatas is not None:
metadatas_to_update.append(metadatas[i])
if documents is not None:
documents_to_update.append(documents[i])
else:
ids_to_add.append(id)
if embeddings is not None:
embeddings_to_add.append(embeddings[i])
if metadatas is not None:
metadatas_to_add.append(metadatas[i])
if documents is not None:
documents_to_add.append(documents[i])
if len(ids_to_add) > 0:
self._add(
ids_to_add,
collection_id,
embeddings_to_add,
metadatas_to_add,
documents_to_add,
increment_index=increment_index,
)
if len(ids_to_update) > 0:
self._update(
collection_id,
ids_to_update,
embeddings_to_update,
metadatas_to_update,
documents_to_update,
)
self._db.update(collection_id, ids, embeddings, metadatas, documents)
return True
def _get(
self,
collection_id: UUID,
ids: Optional[IDs] = None,
where: Optional[Where] = {},
sort: Optional[str] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
page: Optional[int] = None,
page_size: Optional[int] = None,
where_document: Optional[WhereDocument] = {},
include: Include = ["embeddings", "metadatas", "documents"],
):
if where is None:
where = {}
if where_document is None:
where_document = {}
if page and page_size:
offset = (page - 1) * page_size
limit = page_size
include_embeddings = "embeddings" in include
include_documents = "documents" in include
include_metadatas = "metadatas" in include
# Remove plural from include since db columns are singular
db_columns = [column[:-1] for column in include] + ["id"]
column_index = {
column_name: index for index, column_name in enumerate(db_columns)
}
db_result = self._db.get(
collection_uuid=collection_id,
ids=ids,
where=where,
sort=sort,
limit=limit,
offset=offset,
where_document=where_document,
columns=db_columns,
)
get_result = GetResult(
ids=[],
embeddings=[] if include_embeddings else None,
documents=[] if include_documents else None,
metadatas=[] if include_metadatas else None,
)
for entry in db_result:
if include_embeddings:
cast(List, get_result["embeddings"]).append(
entry[column_index["embedding"]]
)
if include_documents:
cast(List, get_result["documents"]).append(
entry[column_index["document"]]
)
if include_metadatas:
cast(List, get_result["metadatas"]).append(
entry[column_index["metadata"]]
)
get_result["ids"].append(entry[column_index["id"]])
return get_result
def _delete(self, collection_id, ids=None, where=None, where_document=None):
if where is None:
where = {}
if where_document is None:
where_document = {}
deleted_uuids = self._db.delete(
collection_uuid=collection_id,
where=where,
ids=ids,
where_document=where_document,
)
self._telemetry_client.capture(
CollectionDeleteEvent(str(collection_id), len(deleted_uuids))
)
return deleted_uuids
def _count(self, collection_id):
return self._db.count(collection_id)
def reset(self):
"""Reset the database. This will delete all collections and items.
Returns:
True if the database was reset successfully
"""
self._db.reset()
return True
def _query(
self,
collection_id,
query_embeddings,
n_results=10,
where={},
where_document={},
include: Include = ["documents", "metadatas", "distances"],
):
uuids, distances = self._db.get_nearest_neighbors(
collection_uuid=collection_id,
where=where,
where_document=where_document,
embeddings=query_embeddings,
n_results=n_results,
)
include_embeddings = "embeddings" in include
include_documents = "documents" in include
include_metadatas = "metadatas" in include
include_distances = "distances" in include
query_result = QueryResult(
ids=[],
embeddings=[] if include_embeddings else None,
documents=[] if include_documents else None,
metadatas=[] if include_metadatas else None,
distances=[] if include_distances else None,
)
for i in range(len(uuids)):
embeddings = []
documents = []
ids = []
metadatas = []
# Remove plural from include since db columns are singular
db_columns = [
column[:-1] for column in include if column != "distances"
] + ["id"]
column_index = {
column_name: index for index, column_name in enumerate(db_columns)
}
db_result = self._db.get_by_ids(uuids[i], columns=db_columns)
for entry in db_result:
if include_embeddings:
embeddings.append(entry[column_index["embedding"]])
if include_documents:
documents.append(entry[column_index["document"]])
if include_metadatas:
metadatas.append(
json.loads(entry[column_index["metadata"]])
if entry[column_index["metadata"]]
else None
)
ids.append(entry[column_index["id"]])
if include_embeddings:
cast(List, query_result["embeddings"]).append(embeddings)
if include_documents:
cast(List, query_result["documents"]).append(documents)
if include_metadatas:
cast(List, query_result["metadatas"]).append(metadatas)
if include_distances:
cast(List, query_result["distances"]).append(distances[i].tolist())
query_result["ids"].append(ids)
return query_result
def raw_sql(self, raw_sql):
return self._db.raw_sql(raw_sql)
def create_index(self, collection_name: str):
collection_uuid = self._db.get_collection_uuid_from_name(collection_name)
self._db.create_index(collection_uuid=collection_uuid)
return True
def _peek(self, collection_id: UUID, n=10):
return self._get(
collection_id=collection_id,
limit=n,
include=["embeddings", "documents", "metadatas"],
)
def persist(self):
"""Persist the database to disk.
Returns:
True if the database was persisted successfully
"""
self._db.persist()
return True
def get_version(self):
"""Get the version of Ocean.
Returns:
The version of Ocean
"""
return __version__
| Ocean-master | oceandb/api/local.py |
from abc import ABC, abstractmethod
from typing import Callable, Sequence, Optional, Dict
import pandas as pd
from uuid import UUID
from oceandb.api.models.Collection import Collection
from oceandb.api.types import (
Documents,
Embeddings,
IDs,
Include,
Metadatas,
Where,
QueryResult,
GetResult,
WhereDocument,
)
from oceandb.telemetry import Telemetry
class API(ABC):
@abstractmethod
def __init__(self, telemetry_client: Telemetry):
pass
@abstractmethod
def heartbeat(self) -> int:
"""Returns the current server time in nanoseconds to check if the server is alive
Args:
None
Returns:
int: The current server time in nanoseconds
"""
pass
@abstractmethod
def list_collections(self) -> Sequence[Collection]:
"""Returns all collections in the database
Args:
None
Returns:
dict: A dictionary of collections
"""
pass
@abstractmethod
def create_collection(
self,
name: str,
metadata: Optional[Dict] = None,
get_or_create: bool = False,
embedding_function: Optional[Callable] = None,
) -> Collection:
"""Creates a new collection in the database
Args:
name The name of the collection to create. The name must be unique.
metadata: A dictionary of metadata to associate with the collection. Defaults to None.
get_or_create: If True, will return the collection if it already exists,
and update the metadata (if applicable). Defaults to False.
embedding_function: A function that takes documents and returns an embedding. Defaults to None.
Returns:
dict: the created collection
"""
pass
@abstractmethod
def delete_collection(
self,
name: str,
):
"""Deletes a collection from the database
Args:
name: The name of the collection to delete
"""
@abstractmethod
def get_or_create_collection(
self, name: str, metadata: Optional[Dict] = None
) -> Collection:
"""Calls create_collection with get_or_create=True.
If the collection exists, but with different metadata, the metadata will be replaced.
Args:
name: The name of the collection to create. The name must be unique.
metadata: A dictionary of metadata to associate with the collection. Defaults to None.
Returns:
the created collection
"""
pass
@abstractmethod
def get_collection(
self,
name: Optional[str] = None,
embedding_function: Optional[Callable] = None,
) -> Collection:
"""Gets a collection from the database by either name or uuid
Args:
name: The name of the collection to get. Defaults to None.
embedding_function: A function that takes documents and returns an embedding. Should be the same as the one used to create the collection. Defaults to None.
Returns:
dict: the requested collection
"""
pass
def _modify(
self,
id: UUID,
new_name: Optional[str] = None,
new_metadata: Optional[Dict] = None,
):
"""Modify a collection in the database - can update the name and/or metadata
Args:
current_name: The name of the collection to modify
new_name: The new name of the collection. Defaults to None.
new_metadata: The new metadata to associate with the collection. Defaults to None.
"""
pass
@abstractmethod
def _add(
self,
ids: IDs,
collection_id: UUID,
embeddings: Optional[Embeddings],
metadatas: Optional[Metadatas] = None,
documents: Optional[Documents] = None,
increment_index: bool = True,
):
"""Add embeddings to the data store. This is the most general way to add embeddings to the database.
⚠️ It is recommended to use the more specific methods below when possible.
Args:
collection_id: The collection to add the embeddings to
embedding: The sequence of embeddings to add
metadata: The metadata to associate with the embeddings. Defaults to None.
documents: The documents to associate with the embeddings. Defaults to None.
ids: The ids to associate with the embeddings. Defaults to None.
"""
pass
@abstractmethod
def _update(
self,
collection_id: UUID,
ids: IDs,
embeddings: Optional[Embeddings] = None,
metadatas: Optional[Metadatas] = None,
documents: Optional[Documents] = None,
):
"""Add embeddings to the data store. This is the most general way to add embeddings to the database.
⚠️ It is recommended to use the more specific methods below when possible.
Args:
collection_id: The collection to add the embeddings to
embedding: The sequence of embeddings to add
"""
pass
@abstractmethod
def _upsert(
self,
collection_id: UUID,
ids: IDs,
embeddings: Optional[Embeddings] = None,
metadatas: Optional[Metadatas] = None,
documents: Optional[Documents] = None,
increment_index: bool = True,
):
"""Add or update entries in the embedding store.
If an entry with the same id already exists, it will be updated, otherwise it will be added.
Args:
collection_id: The collection to add the embeddings to
ids: The ids to associate with the embeddings. Defaults to None.
embeddings: The sequence of embeddings to add
metadatas: The metadata to associate with the embeddings. Defaults to None.
documents: The documents to associate with the embeddings. Defaults to None.
increment_index: If True, will incrementally add to the ANN index of the collection. Defaults to True.
"""
pass
@abstractmethod
def _count(self, collection_id: UUID) -> int:
"""Returns the number of embeddings in the database
Args:
collection_id: The collection to count the embeddings in.
Returns:
int: The number of embeddings in the collection
"""
pass
@abstractmethod
def _peek(self, collection_id: UUID, n: int = 10) -> GetResult:
pass
@abstractmethod
def _get(
self,
collection_id: UUID,
ids: Optional[IDs] = None,
where: Optional[Where] = {},
sort: Optional[str] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
page: Optional[int] = None,
page_size: Optional[int] = None,
where_document: Optional[WhereDocument] = {},
include: Include = ["embeddings", "metadatas", "documents"],
) -> GetResult:
"""Gets embeddings from the database. Supports filtering, sorting, and pagination.
⚠️ This method should not be used directly.
Args:
where: A dictionary of key-value pairs to filter the embeddings by. Defaults to {}.
sort: The column to sort the embeddings by. Defaults to None.
limit: The maximum number of embeddings to return. Defaults to None.
offset: The number of embeddings to skip before returning. Defaults to None.
page: The page number to return. Defaults to None.
page_size: The number of embeddings to return per page. Defaults to None.
Returns:
pd.DataFrame: A pandas dataframe containing the embeddings and metadata
"""
pass
@abstractmethod
def _delete(
self,
collection_id: UUID,
ids: Optional[IDs],
where: Optional[Where] = {},
where_document: Optional[WhereDocument] = {},
):
"""Deletes embeddings from the database
⚠️ This method should not be used directly.
Args:
where: A dictionary of key-value pairs to filter the embeddings by. Defaults to {}.
"""
pass
@abstractmethod
def _query(
self,
collection_id: UUID,
query_embeddings: Embeddings,
n_results: int = 10,
where: Where = {},
where_document: WhereDocument = {},
include: Include = ["embeddings", "metadatas", "documents", "distances"],
) -> QueryResult:
"""Gets the nearest neighbors of a single embedding
⚠️ This method should not be used directly.
Args:
embedding: The embedding to find the nearest neighbors of
n_results: The number of nearest neighbors to return. Defaults to 10.
where: A dictionary of key-value pairs to filter the embeddings by. Defaults to {}.
"""
pass
@abstractmethod
def reset(self) -> bool:
"""Resets the database
⚠️ This is destructive and will delete all data in the database.
Args:
None
Returns:
True if the reset was successful
"""
pass
@abstractmethod
def raw_sql(self, sql: str) -> pd.DataFrame:
"""Runs a raw SQL query against the database
⚠️ This method should not be used directly.
Args:
sql: The SQL query to run
Returns:
pd.DataFrame: A pandas dataframe containing the results of the query
"""
pass
@abstractmethod
def create_index(self, collection_name: Optional[str] = None) -> bool:
"""Creates an index for the given collection
⚠️ This method should not be used directly.
Args:
collection_name: The collection to create the index for. Uses the client's collection if None. Defaults to None.
Returns:
bool: True if the index was created successfully
"""
pass
@abstractmethod
def persist(self):
"""Persist the database to disk"""
pass
| Ocean-master | oceandb/api/__init__.py |
from typing import Optional, Union, Dict, Sequence, TypeVar, List
from typing_extensions import Literal, TypedDict, Protocol
import oceandb.errors as errors
from abc import ABC, abstractmethod
# from ..utils.ImageBind.imagebind_model import ModalityType
from ..utils.ImageBind.models.imagebind_model import ModalityType
import numpy as np
# use better cosine
from sklearn.metrics.pairwise import cosine_similarity
ID = str
IDs = List[ID]
Number = Union[int, float]
Embedding = List[Number]
Embeddings = List[Embedding]
Metadata = Dict[str, Union[str, int, float]]
Metadatas = List[Metadata]
Document = str
Documents = List[Document]
Parameter = TypeVar("Parameter", Embedding, Document, Metadata, ID)
T = TypeVar("T")
OneOrMany = Union[T, List[T]]
Include = List[Literal["documents", "embeddings", "metadatas", "distances"]]
# Grammar for where expressions
LiteralValue = Union[str, int, float]
LogicalOperator = Literal["$and", "$or"]
WhereOperator = Literal["$gt", "$gte", "$lt", "$lte", "$ne", "$eq"]
OperatorExpression = Dict[Union[WhereOperator, LogicalOperator], LiteralValue]
Where = Dict[
Union[str, LogicalOperator], Union[LiteralValue, OperatorExpression, List["Where"]]
]
WhereDocumentOperator = Literal["$contains", LogicalOperator]
WhereDocument = Dict[WhereDocumentOperator, Union[str, List["WhereDocument"]]]
class GetResult(TypedDict):
ids: List[ID]
embeddings: Optional[List[Embedding]]
documents: Optional[List[Document]]
metadatas: Optional[List[Metadata]]
class QueryResult(TypedDict):
ids: List[IDs]
embeddings: Optional[List[List[Embedding]]]
documents: Optional[List[List[Document]]]
metadatas: Optional[List[List[Metadata]]]
distances: Optional[List[List[float]]]
class IndexMetadata(TypedDict):
dimensionality: int
elements: int
time_created: float
class EmbeddingFunction(Protocol):
def __call__(self, texts: Documents) -> Embeddings:
...
def maybe_cast_one_to_many(
target: OneOrMany[Parameter],
) -> List[Parameter]:
"""Infers if target is Embedding, Metadata, or Document and casts it to a many object if its one"""
if isinstance(target, Sequence):
# One Document or ID
if isinstance(target, str) and target is not None:
return [target] # type: ignore
# One Embedding
if isinstance(target[0], (int, float)):
return [target] # type: ignore
# One Metadata dict
if isinstance(target, dict):
return [target]
# Already a sequence
return target # type: ignore
def validate_ids(ids: IDs) -> IDs:
"""Validates ids to ensure it is a list of strings"""
if not isinstance(ids, list):
raise ValueError(f"Expected IDs to be a list, got {ids}")
for id in ids:
if not isinstance(id, str):
raise ValueError(f"Expected ID to be a str, got {id}")
if len(ids) != len(set(ids)):
dups = set([x for x in ids if ids.count(x) > 1])
raise errors.DuplicateIDError(
f"Expected IDs to be unique, found duplicates for: {dups}"
)
return ids
def validate_metadata(metadata: Metadata) -> Metadata:
"""Validates metadata to ensure it is a dictionary of strings to strings, ints, or floats"""
if not isinstance(metadata, dict):
raise ValueError(f"Expected metadata to be a dict, got {metadata}")
for key, value in metadata.items():
if not isinstance(key, str):
raise ValueError(f"Expected metadata key to be a str, got {key}")
if not isinstance(value, (str, int, float)):
raise ValueError(
f"Expected metadata value to be a str, int, or float, got {value}"
)
return metadata
def validate_metadatas(metadatas: Metadatas) -> Metadatas:
"""Validates metadatas to ensure it is a list of dictionaries of strings to strings, ints, or floats"""
if not isinstance(metadatas, list):
raise ValueError(f"Expected metadatas to be a list, got {metadatas}")
for metadata in metadatas:
validate_metadata(metadata)
return metadatas
def validate_where(where: Where) -> Where:
"""
Validates where to ensure it is a dictionary of strings to strings, ints, floats or operator expressions,
or in the case of $and and $or, a list of where expressions
"""
if not isinstance(where, dict):
raise ValueError(f"Expected where to be a dict, got {where}")
for key, value in where.items():
if not isinstance(key, str):
raise ValueError(f"Expected where key to be a str, got {key}")
if (
key != "$and"
and key != "$or"
and not isinstance(value, (str, int, float, dict))
):
raise ValueError(
f"Expected where value to be a str, int, float, or operator expression, got {value}"
)
if key == "$and" or key == "$or":
if not isinstance(value, list):
raise ValueError(
f"Expected where value for $and or $or to be a list of where expressions, got {value}"
)
if len(value) <= 1:
raise ValueError(
f"Expected where value for $and or $or to be a list with at least two where expressions, got {value}"
)
for where_expression in value:
validate_where(where_expression)
# Value is a operator expression
if isinstance(value, dict):
# Ensure there is only one operator
if len(value) != 1:
raise ValueError(
f"Expected operator expression to have exactly one operator, got {value}"
)
for operator, operand in value.items():
# Only numbers can be compared with gt, gte, lt, lte
if operator in ["$gt", "$gte", "$lt", "$lte"]:
if not isinstance(operand, (int, float)):
raise ValueError(
f"Expected operand value to be an int or a float for operator {operator}, got {operand}"
)
if operator not in ["$gt", "$gte", "$lt", "$lte", "$ne", "$eq"]:
raise ValueError(
f"Expected where operator to be one of $gt, $gte, $lt, $lte, $ne, $eq, got {operator}"
)
if not isinstance(operand, (str, int, float)):
raise ValueError(
f"Expected where operand value to be a str, int, or float, got {operand}"
)
return where
def validate_where_document(where_document: WhereDocument) -> WhereDocument:
"""
Validates where_document to ensure it is a dictionary of WhereDocumentOperator to strings, or in the case of $and and $or,
a list of where_document expressions
"""
if not isinstance(where_document, dict):
raise ValueError(
f"Expected where document to be a dictionary, got {where_document}"
)
if len(where_document) != 1:
raise ValueError(
f"Expected where document to have exactly one operator, got {where_document}"
)
for operator, operand in where_document.items():
if operator not in ["$contains", "$and", "$or"]:
raise ValueError(
f"Expected where document operator to be one of $contains, $and, $or, got {operator}"
)
if operator == "$and" or operator == "$or":
if not isinstance(operand, list):
raise ValueError(
f"Expected document value for $and or $or to be a list of where document expressions, got {operand}"
)
if len(operand) <= 1:
raise ValueError(
f"Expected document value for $and or $or to be a list with at least two where document expressions, got {operand}"
)
for where_document_expression in operand:
validate_where_document(where_document_expression)
# Value is a $contains operator
elif not isinstance(operand, str):
raise ValueError(
f"Expected where document operand value for operator $contains to be a str, got {operand}"
)
return where_document
def validate_include(include: Include, allow_distances: bool) -> Include:
"""Validates include to ensure it is a list of strings. Since get does not allow distances, allow_distances is used
to control if distances is allowed"""
if not isinstance(include, list):
raise ValueError(f"Expected include to be a list, got {include}")
for item in include:
if not isinstance(item, str):
raise ValueError(f"Expected include item to be a str, got {item}")
allowed_values = ["embeddings", "documents", "metadatas"]
if allow_distances:
allowed_values.append("distances")
if item not in allowed_values:
raise ValueError(
f"Expected include item to be one of {', '.join(allowed_values)}, got {item}"
)
return include
class SearchFunction(ABC):
@abstractmethod
def search(
self, query_embeddings: List[Embedding], index_data: dict
) -> List[List[ID]]:
pass
class CrossModalRetrieval(SearchFunction):
"""
Use the provided MultiModalEmbeddingFunction to compute embeddings for the query.
Select the corresponding embeddings of the other modality.
Perform similarity search using the computed embeddings.
Return the results.
"""
def __init__(self, modality: str):
self.modality = modality
def search(
self, query_embeddings: List[Embedding], index_data: dict
) -> List[List[ID]]:
other_modality = [m for m in ModalityType if m != self.modality][0] # type: ignore
# Get the embeddings for the other modality
other_embeddings = index_data[other_modality]
# Perform similarity search
distances = cosine_similarity(query_embeddings, other_embeddings) # type: ignore
sorted_indices = np.argsort(distances, axis=1)[:, ::-1]
# Get the result IDs
result_ids = index_data[f"{other_modality}_ids"][sorted_indices]
return result_ids.tolist()
class MultiModalFusion(SearchFunction):
def __init__(self, fusion_type: str):
self.fusion_type = fusion_type
def search(
self, query_embeddings: List[Embedding], index_data: dict
) -> List[List[ID]]:
if self.fusion_type == "early":
combined_query_embeddings = self.early_fusion(query_embeddings) # type: ignore
combined_index_embeddings = self.early_fusion(index_data)
elif self.fusion_type == "late":
return self.late_fusion(query_embeddings, index_data)
else:
raise ValueError("Invalid fusion_type specified")
distances = cosine_similarity(
combined_query_embeddings, combined_index_embeddings
)
sorted_indices = np.argsort(distances, axis=1)[:, ::-1]
# Get the result IDs
result_ids = index_data["ids"][sorted_indices]
return result_ids.tolist()
@staticmethod
def early_fusion(embeddings: dict) -> np.ndarray:
combined_embeddings = np.hstack(list(embeddings.values()))
return combined_embeddings
def late_fusion(
self, query_embeddings: List[Embedding], index_data: dict
) -> List[List[ID]]:
result_scores = []
for modality, embeddings in query_embeddings.items(): # type: ignore
distances = cosine_similarity(embeddings, index_data[modality])
result_scores.append(distances)
combined_scores = np.mean(result_scores, axis=0)
sorted_indices = np.argsort(combined_scores, axis=1)[:, ::-1]
# Get the result IDs
result_ids = index_data["ids"][sorted_indices]
return result_ids.tolist()
class ModalitySpecificSearching(SearchFunction):
def __init__(self, modality: str):
self.modality = modality
def search(
self, query_embeddings: List[Embedding], index_data: dict
) -> List[List[ID]]:
# Get the embeddings for the target modality
modality_embeddings = index_data[self.modality]
# Perform similarity search
distances = cosine_similarity(query_embeddings, modality_embeddings) # type: ignore
sorted_indices = np.argsort(distances, axis=1)[:, ::-1]
# Get the result IDs
result_ids = index_data[f"{self.modality}_ids"][sorted_indices]
return result_ids.tolist()
# to do -> better cosine perhaps torch nn.CosineSimilarity + exception handling
| Ocean-master | oceandb/api/types.py |
from typing import TYPE_CHECKING, Optional, cast, List, Dict, Tuple
from pydantic import BaseModel, PrivateAttr
from uuid import UUID
from oceandb.api.types import (
CrossModalRetrieval,
Embedding,
Include,
Metadata,
Document,
ModalitySpecificSearching,
MultiModalFusion,
Where,
IDs,
EmbeddingFunction,
GetResult,
QueryResult,
ID,
OneOrMany,
WhereDocument,
maybe_cast_one_to_many,
validate_ids,
validate_include,
validate_metadatas,
validate_where,
validate_where_document,
)
import logging
logger = logging.getLogger(__name__)
if TYPE_CHECKING:
from oceandb.api import API
class Collection(BaseModel):
name: str
id: UUID
metadata: Optional[Dict] = None
_client: "API" = PrivateAttr()
_embedding_function: Optional[EmbeddingFunction] = PrivateAttr()
def __init__(
self,
client: "API",
name: str,
id: UUID,
embedding_function: Optional[EmbeddingFunction] = None,
metadata: Optional[Dict] = None,
):
self._client = client
if embedding_function is not None:
self._embedding_function = embedding_function
else:
import oceandb.utils.embedding_functions as ef
logger.warning(
"No embedding_function provided, using default embedding function: SentenceTransformerEmbeddingFunction"
)
self._embedding_function = ef.SentenceTransformerEmbeddingFunction()
super().__init__(name=name, metadata=metadata, id=id)
def __repr__(self):
return f"Collection(name={self.name})"
def count(self) -> int:
"""The total number of embeddings added to the database
Returns:
int: The total number of embeddings added to the database
"""
return self._client._count(collection_id=self.id)
def add(
self,
ids: OneOrMany[ID],
embeddings: Optional[OneOrMany[Embedding]] = None,
metadatas: Optional[OneOrMany[Metadata]] = None,
documents: Optional[OneOrMany[Document]] = None,
increment_index: bool = True,
):
"""Add embeddings to the data store.
Args:
ids: The ids of the embeddings you wish to add
embedding: The embeddings to add. If None, embeddings will be computed based on the documents using the embedding_function set for the Collection. Optional.
metadata: The metadata to associate with the embeddings. When querying, you can filter on this metadata. Optional.
documents: The documents to associate with the embeddings. Optional.
ids: The ids to associate with the embeddings. Optional.
Returns:
None
Raises:
ValueError: If you don't provide either embeddings or documents
ValueError: If the length of ids, embeddings, metadatas, or documents don't match
ValueError: If you don't provide an embedding function and don't provide embeddings
ValueError: If you provide both embeddings and documents
ValueError: If you provide an id that already exists
"""
ids, embeddings, metadatas, documents = self._validate_embedding_set(
ids, embeddings, metadatas, documents
)
self._client._add(
ids, self.id, embeddings, metadatas, documents, increment_index
)
def get(
self,
ids: Optional[OneOrMany[ID]] = None,
where: Optional[Where] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
where_document: Optional[WhereDocument] = None,
include: Include = ["metadatas", "documents"],
) -> GetResult:
"""Get embeddings and their associate data from the data store. If no ids or where filter is provided returns
all embeddings up to limit starting at offset.
Args:
ids: The ids of the embeddings to get. Optional.
where: A Where type dict used to filter results by. E.g. {"color" : "red", "price": 4.20}. Optional.
limit: The number of documents to return. Optional.
offset: The offset to start returning results from. Useful for paging results with limit. Optional.
where_document: A WhereDocument type dict used to filter by the documents. E.g. {$contains: {"text": "hello"}}. Optional.
include: A list of what to include in the results. Can contain "embeddings", "metadatas", "documents". Ids are always included. Defaults to ["metadatas", "documents"]. Optional.
Returns:
GetResult: A GetResult object containing the results.
"""
where = validate_where(where) if where else None
where_document = (
validate_where_document(where_document) if where_document else None
)
ids = validate_ids(maybe_cast_one_to_many(ids)) if ids else None
include = validate_include(include, allow_distances=False)
return self._client._get(
self.id,
ids,
where,
None,
limit,
offset,
where_document=where_document,
include=include,
)
def peek(self, limit: int = 10) -> GetResult:
"""Get the first few results in the database up to limit
Args:
limit: The number of results to return.
Returns:
GetResult: A GetResult object containing the results.
"""
return self._client._peek(self.id, limit)
def query(
self,
search_function: Optional[str] = None,
query_embeddings: Optional[OneOrMany[Embedding]] = None,
query_texts: Optional[OneOrMany[Document]] = None,
n_results: int = 10,
where: Optional[Where] = None,
where_document: Optional[WhereDocument] = None,
include: Include = ["metadatas", "documents", "distances"],
) -> QueryResult:
"""Get the n_results nearest neighbor embeddings for provided query_embeddings or query_texts.
Args:
query_embeddings: The embeddings to get the closes neighbors of. Optional.
query_texts: The document texts to get the closes neighbors of. Optional.
n_results: The number of neighbors to return for each query_embedding or query_text. Optional.
where: A Where type dict used to filter results by. E.g. {"color" : "red", "price": 4.20}. Optional.
where_document: A WhereDocument type dict used to filter by the documents. E.g. {$contains: {"text": "hello"}}. Optional.
include: A list of what to include in the results. Can contain "embeddings", "metadatas", "documents", "distances". Ids are always included. Defaults to ["metadatas", "documents", "distances"]. Optional.
Returns:
QueryResult: A QueryResult object containing the results.
Raises:
ValueError: If you don't provide either query_embeddings or query_texts
ValueError: If you provide both query_embeddings and query_texts
"""
where = validate_where(where) if where else None
where_document = (
validate_where_document(where_document) if where_document else None
)
query_embeddings = (
maybe_cast_one_to_many(query_embeddings)
if query_embeddings is not None
else None
)
query_texts = (
maybe_cast_one_to_many(query_texts) if query_texts is not None else None
)
include = validate_include(include, allow_distances=True)
# If neither query_embeddings nor query_texts are provided, or both are provided, raise an error
if (query_embeddings is None and query_texts is None) or (
query_embeddings is not None and query_texts is not None
):
raise ValueError(
"You must provide either query embeddings or query texts, but not both"
)
# If query_embeddings are not provided, we need to compute them from the query_texts
if query_embeddings is None:
if self._embedding_function is None:
raise ValueError(
"You must provide embeddings or a function to compute them"
)
# We know query texts is not None at this point, cast for the typechecker
query_embeddings = self._embedding_function(
cast(List[Document], query_texts)
)
if where is None:
where = {}
if where_document is None:
where_document = {}
if search_function:
if search_function == "cross_modal_retrieval":
CrossModalRetrieval() # type: ignore
elif search_function == "multi_modal_fusion":
MultiModalFusion() # type: ignore
elif search_function == "modality_specific_searching":
ModalitySpecificSearching() # type: ignore
else:
raise ValueError("Invalid search function specified")
result_ids = self._client.search(
search_function, query_embeddings, index_data
)
return self._client._fetch_results(
collection_id=self.id, result_ids=result_ids, include=include
)
else:
return self._client._query(
collection_id=self.id,
query_embeddings=query_embeddings,
n_results=n_results,
where=where,
where_document=where_document,
include=include,
)
# def fetch_results(self, collection_id: UUID, result_ids: List[str], include: List[str]) -> QueryResult:
# return self._fetch_results(collection_id, result_ids, include)
def modify(self, name: Optional[str] = None, metadata=None):
"""Modify the collection name or metadata
Args:
name: The updated name for the collection. Optional.
metadata: The updated metadata for the collection. Optional.
Returns:
None
"""
self._client._modify(id=self.id, new_name=name, new_metadata=metadata)
if name:
self.name = name
if metadata:
self.metadata = metadata
def update(
self,
ids: OneOrMany[ID],
embeddings: Optional[OneOrMany[Embedding]] = None,
metadatas: Optional[OneOrMany[Metadata]] = None,
documents: Optional[OneOrMany[Document]] = None,
):
"""Update the embeddings, metadatas or documents for provided ids.
Args:
ids: The ids of the embeddings to update
embeddings: The embeddings to add. If None, embeddings will be computed based on the documents using the embedding_function set for the Collection. Optional.
metadatas: The metadata to associate with the embeddings. When querying, you can filter on this metadata. Optional.
documents: The documents to associate with the embeddings. Optional.
Returns:
None
"""
ids, embeddings, metadatas, documents = self._validate_embedding_set(
ids, embeddings, metadatas, documents, require_embeddings_or_documents=False
)
self._client._update(self.id, ids, embeddings, metadatas, documents)
def upsert(
self,
ids: OneOrMany[ID],
embeddings: Optional[OneOrMany[Embedding]] = None,
metadatas: Optional[OneOrMany[Metadata]] = None,
documents: Optional[OneOrMany[Document]] = None,
increment_index: bool = True,
):
"""Update the embeddings, metadatas or documents for provided ids, or create them if they don't exist.
Args:
ids: The ids of the embeddings to update
embeddings: The embeddings to add. If None, embeddings will be computed based on the documents using the embedding_function set for the Collection. Optional.
metadatas: The metadata to associate with the embeddings. When querying, you can filter on this metadata. Optional.
documents: The documents to associate with the embeddings. Optional.
"""
ids, embeddings, metadatas, documents = self._validate_embedding_set(
ids, embeddings, metadatas, documents
)
self._client._upsert(
collection_id=self.id,
ids=ids,
embeddings=embeddings,
metadatas=metadatas,
documents=documents,
increment_index=increment_index,
)
def delete(
self,
ids: Optional[IDs] = None,
where: Optional[Where] = None,
where_document: Optional[WhereDocument] = None,
):
"""Delete the embeddings based on ids and/or a where filter
Args:
ids: The ids of the embeddings to delete
where: A Where type dict used to filter the delection by. E.g. {"color" : "red", "price": 4.20}. Optional.
where_document: A WhereDocument type dict used to filter the deletion by the document content. E.g. {$contains: {"text": "hello"}}. Optional.
Returns:
None
"""
ids = validate_ids(maybe_cast_one_to_many(ids)) if ids else None
where = validate_where(where) if where else None
where_document = (
validate_where_document(where_document) if where_document else None
)
return self._client._delete(self.id, ids, where, where_document)
def create_index(self):
self._client.create_index(self.name)
def _validate_embedding_set(
self,
ids,
embeddings,
metadatas,
documents,
require_embeddings_or_documents=True,
) -> Tuple[
IDs,
Optional[List[Embedding]],
Optional[List[Metadata]],
Optional[List[Document]],
]:
ids = validate_ids(maybe_cast_one_to_many(ids))
embeddings = (
maybe_cast_one_to_many(embeddings) if embeddings is not None else None
)
metadatas = (
validate_metadatas(maybe_cast_one_to_many(metadatas))
if metadatas is not None
else None
)
documents = maybe_cast_one_to_many(documents) if documents is not None else None
# Check that one of embeddings or documents is provided
if require_embeddings_or_documents:
if embeddings is None and documents is None:
raise ValueError(
"You must provide either embeddings or documents, or both"
)
# Check that, if they're provided, the lengths of the arrays match the length of ids
if embeddings is not None and len(embeddings) != len(ids):
raise ValueError(
f"Number of embeddings {len(embeddings)} must match number of ids {len(ids)}"
)
if metadatas is not None and len(metadatas) != len(ids):
raise ValueError(
f"Number of metadatas {len(metadatas)} must match number of ids {len(ids)}"
)
if documents is not None and len(documents) != len(ids):
raise ValueError(
f"Number of documents {len(documents)} must match number of ids {len(ids)}"
)
# If document embeddings are not provided, we need to compute them
if embeddings is None and documents is not None:
if self._embedding_function is None:
raise ValueError(
"You must provide embeddings or a function to compute them"
)
embeddings = self._embedding_function(documents)
return ids, embeddings, metadatas, documents
| Ocean-master | oceandb/api/models/Collection.py |
from dataclasses import dataclass
from typing import ClassVar
from oceandb.telemetry import TelemetryEvent
@dataclass
class ClientStartEvent(TelemetryEvent):
name: ClassVar[str] = "client_start"
@dataclass
class ServerStartEvent(TelemetryEvent):
name: ClassVar[str] = "server_start"
@dataclass
class CollectionAddEvent(TelemetryEvent):
name: ClassVar[str] = "collection_add"
collection_uuid: str
add_amount: int
@dataclass
class CollectionDeleteEvent(TelemetryEvent):
name: ClassVar[str] = "collection_delete"
collection_uuid: str
delete_amount: int
| Ocean-master | oceandb/telemetry/events.py |
from abc import abstractmethod
from dataclasses import asdict, dataclass
import os
from typing import Callable, ClassVar
import uuid
import time
from threading import Event, Thread
import oceandb
from pathlib import Path
from oceandb.config import TELEMETRY_WHITELISTED_SETTINGS, Settings
from enum import Enum
class ServerContext(Enum):
NONE = "None"
FASTAPI = "FastAPI"
@dataclass
class TelemetryEvent:
name: ClassVar[str]
@property
def properties(self) -> dict:
return asdict(self)
class RepeatedTelemetry:
def __init__(self, interval, function):
self.interval = interval
self.function = function
self.start = time.time()
self.event = Event()
self.thread = Thread(target=self._target)
self.thread.daemon = True
self.thread.start()
def _target(self):
while not self.event.wait(self._time):
self.function()
@property
def _time(self):
return self.interval - ((time.time() - self.start) % self.interval)
def stop(self):
self.event.set()
self.thread.join()
class Telemetry:
USER_ID_PATH = str(Path.home() / ".cache" / "ocean" / "telemetry_user_id")
UNKNOWN_USER_ID = "UNKNOWN"
SERVER_CONTEXT: ServerContext = ServerContext.NONE
_curr_user_id = None
@abstractmethod
def __init__(self, settings: Settings):
pass
@abstractmethod
def capture(self, event: TelemetryEvent):
pass
# Schedule a function that creates a TelemetryEvent to be called every `every_seconds` seconds.
def schedule_event_function(
self, event_function: Callable[..., TelemetryEvent], every_seconds: int
):
RepeatedTelemetry(every_seconds, lambda: self.capture(event_function()))
@property
def context(self) -> dict:
ocean_version = oceandb.__version__
settings = oceandb.get_settings()
telemetry_settings = {}
for whitelisted in TELEMETRY_WHITELISTED_SETTINGS:
telemetry_settings[whitelisted] = settings[whitelisted]
self._context = {
"ocean_version": ocean_version,
"server_context": self.SERVER_CONTEXT.value,
**telemetry_settings,
}
return self._context
@property
def user_id(self) -> str:
if self._curr_user_id:
return self._curr_user_id
# File access may fail due to permissions or other reasons. We don't want to crash so we catch all exceptions.
try:
if not os.path.exists(self.USER_ID_PATH):
os.makedirs(os.path.dirname(self.USER_ID_PATH), exist_ok=True)
with open(self.USER_ID_PATH, "w") as f:
new_user_id = str(uuid.uuid4())
f.write(new_user_id)
self._curr_user_id = new_user_id
else:
with open(self.USER_ID_PATH, "r") as f:
self._curr_user_id = f.read()
except Exception:
self._curr_user_id = self.UNKNOWN_USER_ID
return self._curr_user_id
| Ocean-master | oceandb/telemetry/__init__.py |
import posthog
import logging
import sys
from oceandb.config import Settings
from oceandb.telemetry import Telemetry, TelemetryEvent
logger = logging.getLogger(__name__)
class Posthog(Telemetry):
def __init__(self, settings: Settings):
if not settings.anonymized_telemetry or "pytest" in sys.modules:
posthog.disabled = True
else:
logger.info(
"Anonymized telemetry enabled. See https://docs.tryocean.com/telemetry for more information."
)
posthog.project_api_key = "phc_YeUxaojbKk5KPi8hNlx1bBKHzuZ4FDtl67kH1blv8Bh"
posthog_logger = logging.getLogger("posthog")
# Silence posthog's logging
posthog_logger.disabled = True
def capture(self, event: TelemetryEvent):
try:
posthog.capture(
self.user_id,
event.name,
{**(event.properties), "ocean_context": self.context},
)
except Exception as e:
logger.error(f"Failed to send telemetry event {event.name}: {e}")
| Ocean-master | oceandb/telemetry/posthog.py |
import torch
from nanox.model.encoder import NanoXGraphEncoder
from nanox.nanox import NanoXModel, NanoXModel
# Initialize the encoder
encoder = NanoXGraphEncoder()
# Initialize the model
model = NanoXModel(encoder)
# Define the batched data
batched_data = torch.rand(10, 512) # Example data
# Forward pass through the model
output = model(batched_data)
| NanoX-main | example.py |
from nanox.model.encoder import NanoXGraphEncoder
from nanox.nanox import NanoXModel, NanoX
| NanoX-main | nanox/__init__.py |
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import LayerNorm
try:
from apex.normalization import FusedLayerNorm as LayerNorm
except ModuleNotFoundError:
from torch.nn import LayerNorm
import copy
import torch.distributed as dist
from torch.hub import load_state_dict_from_url
#utils
def init_nanox_params(module):
#init weights
def normal_(data):
#fsdp => module params will be on cuda => back to cpu
data.copy_(data.cpu().normal_(mean=0.0, std=0.02).to(data.device))
if isinstance(module, nn.Linear):
normal_(module.weight.data)
if module.bias is not None:
module.bias.data.zeros_()
if isinstance(module, nn.Embedding):
normal_(module.weight.data)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
if isinstance(module, MultiheadAttention):
normal_(module.q_proj.weight.data)
normal_(module.proj.weight.data)
normal_(module.v_proj.weight.data)
def init_params(module, n_layers):
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=0.02 / math.sqrt(n_layers))
if module.bias is not None:
module.bias.data.zero_()
if isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=0.02)
##############
class MultiwayNetwork(nn.Module):
def __init__(self, module, dim=1):
super().__init__()
self.dim = dim
self.A = module
self.B = copy.deepcopy(module)
self.B.reset_parameters()
self.split_position = -1
def forward(self, x, **kwargs):
if self.split_position == -1:
return self.A(x, **kwargs)
if self.split_position == 0:
return self.B(x, **kwargs)
x1, x2 = torch.split(
x,
[self.split_position, x.size(self.dim) - self.split_position],
dim=self.dim,
)
# x1, x2 = x[:self.split_position], x[self.split_position:]
y1, y2 = self.A(x1, **kwargs), self.B(x2, **kwargs)
return torch.cat([y1, y2], dim=self.dim)
class MutliwayEmbedding(MultiwayNetwork):
def __init__(self, modules, dim=1):
super(MultiwayNetwork, self).__init__()
self.dim = dim
assert len(modules) == 2
self.A = modules[0]
self.B = modules[1]
self.split_position = -1
class XPOS(nn.Module):
def __init__(
self, head_dim, scale_base=512
):
super().__init__()
self.head_dim = head_dim
self.scale_base = scale_base
self.register_buffer(
"scale", (torch.arange(0, head_dim, 2) + 0.4 * head_dim) / (1.4 * head_dim)
)
def forward(self, x, offset=0, downscale=False):
length = x.shape[1]
min_pos = -(length + offset) // 2
max_pos = length + offset + min_pos
scale = self.scale ** torch.arange(min_pos, max_pos, 1).to(self.scale).div(self.scale_base)[:, None]
sin, cos = fixed_pos_embedding(scale)
if scale.shape[0] > length:
scale = scale[-length:]
sin = sin[-length:]
cos = cos[-length:]
if downscale:
scale = 1 / scale
x = apply_rotary_pos_emb(x, sin, cos, scale)
return x
def MultiwayWrapper(args, module, dim=1):
if args.multiway:
return MultiwayNetwork(module, dim=dim)
return module
def set_split_position(position):
def apply_fn(module):
if hasattr(module, "split_position"):
module.split_position = position
return apply_fn
def fixed_pos_embedding(x):
seq_len, dim = x.shape
inv_freq = 1.0 / (10000 ** (torch.arange(0, dim) / dim))
sinusoid_inp = (
torch.einsum("i , j -> i j", torch.arange(0, seq_len, dtype=torch.float), inv_freq).to(x)
)
return torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)
def rotate_every_two(x):
x1 = x[:, :, ::2]
x2 = x[:, :, 1::2]
x = torch.stack((-x2, x1), dim=-1)
return x.flatten(-2) # in einsum notation: rearrange(x, '... d j -> ... (d j)')\
def duplicate_interleave(m):
"""
A simple version of `torch.repeat_interleave` for duplicating a matrix while interleaving the copy.
"""
dim0 = m.shape[0]
m = m.view(-1, 1) # flatten the matrix
m = m.repeat(1, 2) # repeat all elements into the 2nd dimension
m = m.view(dim0, -1) # reshape into a matrix, interleaving the copy
return m
def apply_rotary_pos_emb(x, sin, cos, scale=1):
sin, cos = map(lambda t: duplicate_interleave(t * scale), (sin, cos))
# einsum notation for lambda t: repeat(t[offset:x.shape[1]+offset,:], "n d -> () n () (d j)", j=2)
return (x * cos) + (rotate_every_two(x) * sin)
class MultiheadAttention(nn.Module):
def __init__(
self,
args,
embed_dim,
num_heads,
dropout=0.0,
self_attention=False,
encoder_decoder_attention=False,
subln=False,
):
super().__init__()
self.args = args
self.embed_dim = embed_dim
self.num_heads = num_heads
self.head_dim = embed_dim // num_heads
self.scaling = self.head_dim**-0.5
self.self_attention = self_attention
self.encoder_decoder_attention = encoder_decoder_attention
assert self.self_attention ^ self.encoder_decoder_attention
self.k_proj = MultiwayWrapper(args, nn.Linear(embed_dim, embed_dim, bias=True))
self.v_proj = MultiwayWrapper(args, nn.Linear(embed_dim, embed_dim, bias=True))
self.q_proj = MultiwayWrapper(args, nn.Linear(embed_dim, embed_dim, bias=True))
self.out_proj = MultiwayWrapper(
args, nn.Linear(embed_dim, embed_dim, bias=True)
)
self.inner_attn_ln = (
MultiwayWrapper(args, LayerNorm(self.embed_dim, eps=args.layernorm_eps))
if subln and self.self_attention
else None
)
self.dropout_module = torch.nn.Dropout(dropout)
self.xpos = (
XPOS(self.head_dim, args.xpos_scale_base)
if args.xpos_rel_pos and self.self_attention
else None
)
def reset_parameters(self):
nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.out_proj.weight)
nn.init.constant_(self.out_proj.bias, 0.0)
def forward(
self,
query,
key,
value,
incremental_state=None,
key_padding_mask=None,
attn_mask=None,
rel_pos=None,
is_first_step=False,
):
bsz, tgt_len, embed_dim = query.size()
src_len = tgt_len
assert embed_dim == self.embed_dim, f"query dim {embed_dim} != {self.embed_dim}"
key_bsz, src_len, _ = key.size()
assert key_bsz == bsz, f"{query.size(), key.size()}"
assert value is not None
assert bsz, src_len == value.shape[:2]
q = self.q_proj(query)
k = self.k_proj(key)
v = self.v_proj(value)
q *= self.scaling
q = q.view(bsz, tgt_len, self.num_heads, self.head_dim).transpose(1, 2)
k = k.view(bsz, src_len, self.num_heads, self.head_dim).transpose(1, 2)
v = v.view(bsz, src_len, self.num_heads, self.head_dim).transpose(1, 2)
q = q.reshape(bsz * self.num_heads, tgt_len, self.head_dim)
k = k.reshape(bsz * self.num_heads, src_len, self.head_dim)
v = v.reshape(bsz * self.num_heads, src_len, self.head_dim)
if incremental_state is not None:
if "prev_key" in incremental_state:
prev_key = incremental_state["prev_key"].view(
bsz * self.num_heads, -1, self.head_dim
)
prev_value = incremental_state["prev_value"].view(
bsz * self.num_heads, -1, self.head_dim
)
k = torch.cat([prev_key, k], dim=1)
v = torch.cat([prev_value, v], dim=1)
incremental_state["prev_key"] = k.view(
bsz, self.num_heads, -1, self.head_dim
)
incremental_state["prev_value"] = v.view(
bsz, self.num_heads, -1, self.head_dim
)
src_len = k.size(1)
if self.xpos is not None:
if incremental_state is not None and not is_first_step:
offset = src_len - 1
else:
offset = 0
k = self.xpos(k, offset=0, downscale=True)
q = self.xpos(q, offset=offset, downscale=False)
attn_weights = torch.bmm(q, k.transpose(1, 2))
if attn_mask is not None:
attn_weights = torch.nan_to_num(attn_weights)
attn_mask = attn_mask.unsqueeze(0)
attn_weights += attn_mask
if key_padding_mask is not None:
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool),
float("-inf"),
)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if rel_pos is not None:
rel_pos = rel_pos.view(attn_weights.size())
attn_weights = attn_weights + rel_pos
attn_weights = F.softmax(attn_weights, dim=-1, dtype=torch.float32).type_as(
attn_weights
)
attn_probs = self.dropout_module(attn_weights)
attn = torch.bmm(attn_probs, v)
attn = attn.transpose(0, 1).reshape(tgt_len, bsz, embed_dim).transpose(0, 1)
if self.inner_attn_ln is not None:
attn = self.inner_attn_ln(attn)
attn = self.out_proj(attn)
attn_weights = attn_weights.view(
bsz, self.num_heads, tgt_len, src_len
).transpose(1, 0)
return attn, attn_weights
PRETRAINED_MODEL_URLS = {
"pcqm4mv1_graphormer_base":"https://ml2md.blob.core.windows.net/graphormer-ckpts/checkpoint_best_pcqm4mv1.pt",
"pcqm4mv2_graphormer_base":"https://ml2md.blob.core.windows.net/graphormer-ckpts/checkpoint_best_pcqm4mv2.pt",
"oc20is2re_graphormer3d_base":"https://szheng.blob.core.windows.net/graphormer/modelzoo/oc20is2re/checkpoint_last_oc20_is2re.pt", # this pretrained model is temporarily unavailable
"pcqm4mv1_graphormer_base_for_molhiv":"https://ml2md.blob.core.windows.net/graphormer-ckpts/checkpoint_base_preln_pcqm4mv1_for_hiv.pt",
}
def load_pretrained_model(model_name):
if model_name not in PRETRAINED_MODEL_URLS:
raise ValueError(f"IN load_pretrained_model => UNKOWN model name {model_name}")
if not dist.is_initialized():
return load_state_dict_from_url(PRETRAINED_MODEL_URLS[model_name], progress=True)["model"]
else:
pretrained_model = load_state_dict_from_url(PRETRAINED_MODEL_URLS[model_name], progress=True, file_name=f"{model_name}_{dist.get_rank()}")["model"]
dist.barrier()
return pretrained_model
#### model
class NanoNodeFeature(nn.Module):
#compute note for each node in graph
def __init__(
self,
num_heads,
num_atoms,
num_in_degree,
num_out_degree,
hidden_dim,
n_layers
):
super(NanoNodeFeature, self).__init__()
self.num_heads = num_heads
self.num_atoms = num_atoms
#graph token
self.atom_encoder = nn.Embedding(num_atoms + 1, hidden_dim, padding_idx=0)
self.in_degree_encoder = nn.Embedding(num_in_degree, hidden_dim, padding_idx=0)
self.out_degree_encoder = nn.Embedding(
num_out_degree, hidden_dim, padding_idx=0
)
self.graph_token = nn.Embedding(1, hidden_dim)
self.apply(lambda module: init_params(module, n_layers=n_layers))
def forward(self, batched_data):
x, in_degree, out_degree = (
batched_data["x"],
batched_data["in_degree"],
batched_data["out_degree"],
)
n_graph, n_node = x.size()[:2]
#node feature + graph token
node_feature = self.atom_encoder(x).sum(dim=-2) # [n_graph, n_node, n_hidden]
node_feature = (
node_feature
+ self.in_degree_encoder(in_degree),
+ self.out_degree_encoder(out_degree)
)
graph_token_feature = self.graph_token.weight.unsqueeze(0).repeat(n_graph, 1, 1)
graph_node_feature = torch.cat([graph_token_feature, node_feature], dim=1)
return graph_node_feature
class NanoBias(nn.Module):
#compute attn bias for each head
def __init__(
self,
num_heads,
num_atoms,
num_edges,
num_spatial,
num_edge_distancetance,
hidden_dim,
edge_type,
multi_hop_max_dist,
n_layers,
):
super(NanoBias, self).__init__()
self.num_heads = num_heads
self.multi_hop_max_dist = multi_hop_max_dist
self.edge_encoder = nn.Embedding(num_edges + 1, num_heads, padding_idx=0)
self.edge_type = edge_type
if self.edge_type == "multihop":
self.edge_dis_encoder = nn.Embedding(
num_edge_distancetance * num_heads * num_heads, 1
)
self.spatial_encoder = nn.Embedding(num_spatial, num_heads, padding_idx=0)
self.graph_token_virtual_distance = nn.Embedding(1, num_heads)
self.apply(lambda module: init_params(module, n_layers=n_layers))
def forward(self, batched_data):
bias, spatial_position, x = (
batched_data["bias"],
batched_data["spatial_position"],
batched_data["x"],
)
edge_input, attn_edge_type = (
batched_data["edge_input"],
batched_data["attn_edge_type"],
)
n_graph, n_node = x.size()[:2]
graph_bias = bias.clone()
graph_bias = graph_bias.unsqueeze(1).repeat(
1, self.num_heads, 1, 1
) # [n_graph, n_head, n_node+1, n_node+1]
#spatial position
#[n_graph, n_node, n_node, n_head] -> [n_graph, n_head, n_node, n_node]
spatial_position_bias = self.spatial_position_encoder(spatial_position).permute(0, 3, 1, 2)
graph_bias[:, :, 1: 1:] = graph_bias[:, :, 1:, 1:] + spatial_position_bias
#reset spatial position here
reshaped = self.graph_token_virtual_distance.weight.view(1, self.num_heads, 1)
graph_bias[:, :, 1:, 0] = graph_bias[:, :, 1:, 0] + reshaped
graph_bias[:, :, 0, :] = graph_bias[:, :, 0, :] + reshaped
#edge feature
if self.edge_type == "multi_hop":
spatial_position_ = spatial_position.clone()
spatial_position_[spatial_position_ == 0] = 1 #set pad to 1
spatial_position_ = torch.where(spatial_position_ > 1, spatial_position_ - 1, spatial_position_)
if self.multi_hop_max_dist > 0:
spatial_position_ = spatial_position_.clamp(0, self.multi_hop_max_dist)
edge_input = edge_input[:, :, :, : self.multi_hop_max_dist, :]
#[n_graph, n_node, n_node, max_dist, n_head]
edge_input = self.edge_encoder(edge_input).mean(-2)
max_dist = edge_input.size(-2)
edge_input_flat = edge_input.permute(3, 0, 1, 2, 4).reshape(
max_dist, -1, self.num_heads
)
edge_input_flat = torch.bmm(
edge_input_flat,
self.edge_dis_encoder.weight.reshape(
-1, self.num_heads, self.num_heads
)[:max_dist, :, :],
)
edge_input = edge_input_flat.reshape(
max_dist, n_graph, n_node, n_node, self.num_heas
).permute(1, 2, 3, 0, 4)
edge_input = (
edge_input.sum(-2) / (spatial_position_.float().unsqueeze(-1))
).permute(0, 3, 1, 2)
else:
#[n_graph, n_node, n_node, n_head] -> [n_graph, n_head, n_node, n_node, ]
edge_input = self.edge_encoder(attn_edge_type).mean(-2).permute(0, 3, 1, 2)
graph_bias[:, :, 1:, 1:] = graph_bias[:, :, 1:, 1:] + edge_input
graph_bias = graph_bias + bias.unsqueeze(1) # reset
return graph_bias
class NanoXGraphEncoderLayer(nn.Module):
def __init__(self, embedding_dim=768,
ffn_embedding_dim=3072,
num_heads=8,
dropout=0.1,
activation_fn=nn.relu, pre_layernorm=False):
super().__init__()
self.embedding_dim = embedding_dim
self.num_heads = num_heads
self.dropout = dropout
self.pre_layernorm = pre_layernorm
self.dropout_module = nn.Dropout(dropout)
self.activation_fn = activation_fn
self.self_attn = MultiheadAttention(embed_dim=embedding_dim, num_heads=num_heads, dropout=dropout)
self.self_attn_layer_norm = nn.LayerNorm(embedding_dim)
self.fc1 = nn.Linear(embedding_dim, ffn_embedding_dim)
self.fc2 = nn.Linear(ffn_embedding_dim, embedding_dim)
self.final_layer_norm = nn.LayerNorm(embedding_dim)
def forward(self, x, self_attn_mask=None, self_attn_padding_mask=None):
residual = x
if self.pre_layernorm:
x = self.self_attn_layer_norm(x)
x, attn = self.self_attn(x, x, x, attn_mask=self_attn_mask, key_padding_mask=self_attn_padding_mask)
x = self.dropout_module(x)
x = residual + x
if not self.pre_layernorm:
x = self.self_attn_layer_norm(x)
residual = x
if self.pre_layernorm:
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = self.dropout_module(x)
x = self.fc2(x)
x = residual + x
if not self.pre_layernorm:
x = self.final_layer_norm(x)
return x, attn
#############
class NanoXGraphEncoder(nn.Module):
def __init__(self,
num_atoms,
num_in_degree,
num_out_degree,
num_edges, num_spatial,
num_edge_distance,
edge_type,
multi_hop_max_dist,
num_encoder_layers=12,
embedding_dim=768,
ffn_embedding_dim=768,
num_attention_heads=32,
dropout=0.1,
activation_fn=nn.ReLU(),
embed_scale=None,
freeze_embeddings=False,
n_trans_layers_to_freeze=0,
export=False,
traceable=False,
q_noise=0.0,
qn_block_size=8):
super().__init__()
self.dropout_module = nn.Dropout(dropout)
self.embedding_dim = embedding_dim
self.traceable = traceable
self.graph_node_feature = NanoNodeFeature(num_heads=num_attention_heads,
num_atoms=num_atoms,
num_in_degree=num_in_degree,
num_out_degree=num_out_degree,
hidden_dim=embedding_dim,
n_layers=num_encoder_layers)
self.graph_attn_bias = NanoBias(num_heads=num_attention_heads,
num_atoms=num_atoms,
num_edges=num_edges,
num_spatial=num_spatial,
num_edge_distance=num_edge_distance,
edge_type=edge_type,
multi_hop_max_dist=multi_hop_max_dist,
hidden_dim=embedding_dim,
n_layers=num_encoder_layers)
self.embed = embed_scale
if q_noise > 0:
self.quant_noise = nn.Linear(self.embedding_dim, self.embedding_dim, bias=False)
else:
self.quant_noise = None
self.emb_layer_norm = nn.LayerNorm(self.embedding_dim, elementwise_affine=export)
self.layers = nn.ModuleList([NanoXGraphEncoderLayer(embedding_dim=embedding_dim,
ffn_embedding_dim=ffn_embedding_dim,
num_heads=num_attention_heads,
dropout=dropout,
activation_fn=activation_fn,
export=export) for _ in range(num_encoder_layers)])
if freeze_embeddings:
for layer in range(n_trans_layers_to_freeze):
for param in self.layers[layer].parameters():
param.requires_grad=True
def forward(self, batched_data,
pertub=None,
last_state_only=False,
token_embedding=None,
mask=None):
data_x = batched_data["x"]
n_graph, n_node = data_x.size()[:2]
padding_mask = (data_x[:, :, 0]).eq(0)
padding_mask_cls = torch.zeros(n_graph, 1, device=padding_mask.device,
dtype=padding_mask.dtype)
padding_mask = torch.cat((padding_mask_cls, padding_mask), dim=1)
if token_embedding is not None:
x = token_embedding
else:
x = self.graph_node_feature(batched_data)
if pertub is not None:
x[:, 1: :] *= pertub
attn_bias = self.graph_attn_bias(batched_data)
if self.embed_scale is not None:
x = x * self.embed_scale
if self.quant_noise is not None:
x = self.quant_noise(x)
if self.emb_layer_norm is not None:
x = self.emb_layer_norm(x)
x = self.dropout_module(x)
x = x.tranpose(0, 1)
inner_states = [x] if not last_state_only else []
for layer in self.layers:
x, _ = layer(x, self_attn_padding_mask=padding_mask, self_attn_mask=mask,
self_attn_bias=attn_bias)
if not last_state_only:
inner_states.append(x)
graph_representation = x[0, :, :]
if self.traceable:
return torch.stack(inner_states), graph_representation
else:
return inner_states, graph_representation
######################
class NanoXModel(nn.Module):
def __init__(self, encoder, encoder_embed_dim=1024, pretrained_model_name="none", load_pretrained_model_output_layer=True):
super().__init__()
self.encoder = encoder
self.encoder_embed_dim = encoder_embed_dim
if pretrained_model_name != "none":
self.load_state_dict(load_pretrained_model(pretrained_model_name))
if not load_pretrained_model_output_layer:
self.encoder.reset_output_layer_parameters()
def max_nodes(self):
return self.encoder.max_nodes
def forward(self, batched_data, **kwargs):
return self.encoder(batched_data, **kwargs)
class NanoX(nn.Module):
def __init__(self, max_nodes=512, share_input_output_embed=False, remove_head=False, activation_fn=nn.GELU()):
super().__init__()
self.max_nodes = max_nodes
self.graph_encoder = NanoXGraphEncoder()
self.share_input_output_embed = share_input_output_embed
self.embed_out = None
self.lm_output_learned_bias = None
self.load_softmax = not remove_head
self.masked_lm_pooler = nn.Linear(1024, 1024)
self.lm_head_transform_weight = nn.Linear(1024, 1024)
self.activation_fn = activation_fn
self.layer_norm = LayerNorm(1024)
self.lm_output_learned_bias = None
if self.load_softmax:
self.lm_output_learned_bias = nn.Parameter(torch.zeros(1))
if not self.share_input_output_embed:
self.embed_out = nn.Linear(1024, 1000, bias=False)
def reset_output_layer_parameters(self):
self.lm_output_learned_bias = nn.Parameter(torch.zeros(1))
if self.embed_out is not None:
self.embed_out.reset_parameters()
def forward(self, batched_data, perturb=None, masked_tokens=None, **unused):
inner_states, graph_representation = self.graph_encoder(batched_data, perturb=perturb)
x = inner_states[-1].transpose(0, 1)
if masked_tokens is not None:
raise NotImplementedError
x = self.layer_norm(self.activation_fn(self.lm_head_transform_weight(x)))
if self.share_input_output_embed and hasattr(self.graph_encoder.embed_tokens, "weight"):
x = F.linear(x, self.graph_encoder.embed_tokens.weight)
elif self.embed_out is not None:
x = self.embed_out(x)
if self.lm_output_learned_bias is not None:
x = x + self.lm_output_learned_bias
return x
def max_nodes(self):
return self.max_nodes
def upgrade_state_dict_named(self, state_dict, name):
if not self.load_softmax:
for k in list(state_dict.keys()):
if "embed_out.weight" in k or "lm_output_learned_bias" in k:
del state_dict[k]
return state_dict | NanoX-main | nanox/nanox.py |
import math
import torch
import torch.nn.functional as F
from torch import nn
try:
from apex.normalization import FusedLayerNorm as LayerNorm
except ModuleNotFoundError:
from torch.nn import LayerNorm
import copy
def MultiwayWrapper(args, module, dim=1):
if args.multiway:
return MultiwayNetwork(module, dim=dim)
return module
def set_split_position(position):
def apply_fn(module):
if hasattr(module, "split_position"):
module.split_position = position
return apply_fn
class MultiwayNetwork(nn.Module):
def __init__(self, module, dim=1):
super().__init__()
self.dim = dim
self.A = module
self.B = copy.deepcopy(module)
self.B.reset_parameters()
self.split_position = -1
def forward(self, x, **kwargs):
if self.split_position == -1:
return self.A(x, **kwargs)
if self.split_position == 0:
return self.B(x, **kwargs)
x1, x2 = torch.split(
x,
[self.split_position, x.size(self.dim) - self.split_position],
dim=self.dim,
)
# x1, x2 = x[:self.split_position], x[self.split_position:]
y1, y2 = self.A(x1, **kwargs), self.B(x2, **kwargs)
return torch.cat([y1, y2], dim=self.dim)
class MutliwayEmbedding(MultiwayNetwork):
def __init__(self, modules, dim=1):
super(MultiwayNetwork, self).__init__()
self.dim = dim
assert len(modules) == 2
self.A = modules[0]
self.B = modules[1]
self.split_position = -1
def fixed_pos_embedding(x):
seq_len, dim = x.shape
inv_freq = 1.0 / (10000 ** (torch.arange(0, dim) / dim))
sinusoid_inp = (
torch.einsum("i , j -> i j", torch.arange(0, seq_len, dtype=torch.float), inv_freq).to(x)
)
return torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)
def rotate_every_two(x):
x1 = x[:, :, ::2]
x2 = x[:, :, 1::2]
x = torch.stack((-x2, x1), dim=-1)
return x.flatten(-2) # in einsum notation: rearrange(x, '... d j -> ... (d j)')\
def duplicate_interleave(m):
"""
A simple version of `torch.repeat_interleave` for duplicating a matrix while interleaving the copy.
"""
dim0 = m.shape[0]
m = m.view(-1, 1) # flatten the matrix
m = m.repeat(1, 2) # repeat all elements into the 2nd dimension
m = m.view(dim0, -1) # reshape into a matrix, interleaving the copy
return m
def apply_rotary_pos_emb(x, sin, cos, scale=1):
sin, cos = map(lambda t: duplicate_interleave(t * scale), (sin, cos))
# einsum notation for lambda t: repeat(t[offset:x.shape[1]+offset,:], "n d -> () n () (d j)", j=2)
return (x * cos) + (rotate_every_two(x) * sin)
class XPOS(nn.Module):
def __init__(
self, head_dim, scale_base=512
):
super().__init__()
self.head_dim = head_dim
self.scale_base = scale_base
self.register_buffer(
"scale", (torch.arange(0, head_dim, 2) + 0.4 * head_dim) / (1.4 * head_dim)
)
def forward(self, x, offset=0, downscale=False):
length = x.shape[1]
min_pos = -(length + offset) // 2
max_pos = length + offset + min_pos
scale = self.scale ** torch.arange(min_pos, max_pos, 1).to(self.scale).div(self.scale_base)[:, None]
sin, cos = fixed_pos_embedding(scale)
if scale.shape[0] > length:
scale = scale[-length:]
sin = sin[-length:]
cos = cos[-length:]
if downscale:
scale = 1 / scale
x = apply_rotary_pos_emb(x, sin, cos, scale)
return x
class MultiheadAttention(nn.Module):
def __init__(
self,
args,
embed_dim,
num_heads,
dropout=0.0,
self_attention=False,
encoder_decoder_attention=False,
subln=False,
):
super().__init__()
self.args = args
self.embed_dim = embed_dim
self.num_heads = num_heads
self.head_dim = embed_dim // num_heads
self.scaling = self.head_dim**-0.5
self.self_attention = self_attention
self.encoder_decoder_attention = encoder_decoder_attention
assert self.self_attention ^ self.encoder_decoder_attention
self.k_proj = MultiwayWrapper(args, nn.Linear(embed_dim, embed_dim, bias=True))
self.v_proj = MultiwayWrapper(args, nn.Linear(embed_dim, embed_dim, bias=True))
self.q_proj = MultiwayWrapper(args, nn.Linear(embed_dim, embed_dim, bias=True))
self.out_proj = MultiwayWrapper(
args, nn.Linear(embed_dim, embed_dim, bias=True)
)
self.inner_attn_ln = (
MultiwayWrapper(args, LayerNorm(self.embed_dim, eps=args.layernorm_eps))
if subln and self.self_attention
else None
)
self.dropout_module = torch.nn.Dropout(dropout)
self.xpos = (
XPOS(self.head_dim, args.xpos_scale_base)
if args.xpos_rel_pos and self.self_attention
else None
)
def reset_parameters(self):
nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.out_proj.weight)
nn.init.constant_(self.out_proj.bias, 0.0)
def forward(
self,
query,
key,
value,
incremental_state=None,
key_padding_mask=None,
attn_mask=None,
rel_pos=None,
is_first_step=False,
):
bsz, tgt_len, embed_dim = query.size()
src_len = tgt_len
assert embed_dim == self.embed_dim, f"query dim {embed_dim} != {self.embed_dim}"
key_bsz, src_len, _ = key.size()
assert key_bsz == bsz, f"{query.size(), key.size()}"
assert value is not None
assert bsz, src_len == value.shape[:2]
q = self.q_proj(query)
k = self.k_proj(key)
v = self.v_proj(value)
q *= self.scaling
q = q.view(bsz, tgt_len, self.num_heads, self.head_dim).transpose(1, 2)
k = k.view(bsz, src_len, self.num_heads, self.head_dim).transpose(1, 2)
v = v.view(bsz, src_len, self.num_heads, self.head_dim).transpose(1, 2)
q = q.reshape(bsz * self.num_heads, tgt_len, self.head_dim)
k = k.reshape(bsz * self.num_heads, src_len, self.head_dim)
v = v.reshape(bsz * self.num_heads, src_len, self.head_dim)
if incremental_state is not None:
if "prev_key" in incremental_state:
prev_key = incremental_state["prev_key"].view(
bsz * self.num_heads, -1, self.head_dim
)
prev_value = incremental_state["prev_value"].view(
bsz * self.num_heads, -1, self.head_dim
)
k = torch.cat([prev_key, k], dim=1)
v = torch.cat([prev_value, v], dim=1)
incremental_state["prev_key"] = k.view(
bsz, self.num_heads, -1, self.head_dim
)
incremental_state["prev_value"] = v.view(
bsz, self.num_heads, -1, self.head_dim
)
src_len = k.size(1)
if self.xpos is not None:
if incremental_state is not None and not is_first_step:
offset = src_len - 1
else:
offset = 0
k = self.xpos(k, offset=0, downscale=True)
q = self.xpos(q, offset=offset, downscale=False)
attn_weights = torch.bmm(q, k.transpose(1, 2))
if attn_mask is not None:
attn_weights = torch.nan_to_num(attn_weights)
attn_mask = attn_mask.unsqueeze(0)
attn_weights += attn_mask
if key_padding_mask is not None:
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool),
float("-inf"),
)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if rel_pos is not None:
rel_pos = rel_pos.view(attn_weights.size())
attn_weights = attn_weights + rel_pos
attn_weights = F.softmax(attn_weights, dim=-1, dtype=torch.float32).type_as(
attn_weights
)
attn_probs = self.dropout_module(attn_weights)
attn = torch.bmm(attn_probs, v)
attn = attn.transpose(0, 1).reshape(tgt_len, bsz, embed_dim).transpose(0, 1)
if self.inner_attn_ln is not None:
attn = self.inner_attn_ln(attn)
attn = self.out_proj(attn)
attn_weights = attn_weights.view(
bsz, self.num_heads, tgt_len, src_len
).transpose(1, 0)
return attn, attn_weights | NanoX-main | nanox/model/multihead_attention.py |
from torch.hub import load_state_dict_from_url
import torch.distributed as dist
PRETRAINED_MODEL_URLS = {
"pcqm4mv1_graphormer_base":"https://ml2md.blob.core.windows.net/graphormer-ckpts/checkpoint_best_pcqm4mv1.pt",
"pcqm4mv2_graphormer_base":"https://ml2md.blob.core.windows.net/graphormer-ckpts/checkpoint_best_pcqm4mv2.pt",
"oc20is2re_graphormer3d_base":"https://szheng.blob.core.windows.net/graphormer/modelzoo/oc20is2re/checkpoint_last_oc20_is2re.pt", # this pretrained model is temporarily unavailable
"pcqm4mv1_graphormer_base_for_molhiv":"https://ml2md.blob.core.windows.net/graphormer-ckpts/checkpoint_base_preln_pcqm4mv1_for_hiv.pt",
}
def load_pretrained_model(model_name):
if model_name not in PRETRAINED_MODEL_URLS:
raise ValueError(f"IN load_pretrained_model => UNKOWN model name {model_name}")
if not dist.is_initialized():
return load_state_dict_from_url(PRETRAINED_MODEL_URLS[model_name], progress=True)["model"]
else:
pretrained_model = load_state_dict_from_url(PRETRAINED_MODEL_URLS[model_name], progress=True, file_name=f"{model_name}_{dist.get_rank()}")["model"]
dist.barrier()
return pretrained_model
| NanoX-main | nanox/model/pretrained.py |
import torch.nn as nn
from nanox.model.multihead_attention import MultiheadAttention
class NanoXGraphEncoderLayer(nn.Module):
def __init__(self, embedding_dim=768,
ffn_embedding_dim=3072,
num_heads=8,
dropout=0.1,
activation_fn=nn.relu, pre_layernorm=False):
super().__init__()
self.embedding_dim = embedding_dim
self.num_heads = num_heads
self.dropout = dropout
self.pre_layernorm = pre_layernorm
self.dropout_module = nn.Dropout(dropout)
self.activation_fn = activation_fn
self.self_attn = MultiheadAttention(embed_dim=embedding_dim, num_heads=num_heads, dropout=dropout)
self.self_attn_layer_norm = nn.LayerNorm(embedding_dim)
self.fc1 = nn.Linear(embedding_dim, ffn_embedding_dim)
self.fc2 = nn.Linear(ffn_embedding_dim, embedding_dim)
self.final_layer_norm = nn.LayerNorm(embedding_dim)
def forward(self, x, self_attn_mask=None, self_attn_padding_mask=None):
residual = x
if self.pre_layernorm:
x = self.self_attn_layer_norm(x)
x, attn = self.self_attn(x, x, x, attn_mask=self_attn_mask, key_padding_mask=self_attn_padding_mask)
x = self.dropout_module(x)
x = residual + x
if not self.pre_layernorm:
x = self.self_attn_layer_norm(x)
residual = x
if self.pre_layernorm:
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = self.dropout_module(x)
x = self.fc2(x)
x = residual + x
if not self.pre_layernorm:
x = self.final_layer_norm(x)
return x, attn | NanoX-main | nanox/model/encoder_layer.py |
NanoX-main | nanox/model/__init__.py |
|
import torch
import torch.nn as nn
from nanox.model.layers import NanoNodeFeature, NanoBias
from nanox.model.encoder_layer import NanoXGraphEncoderLayer
from nanox.model.multihead_attention import MultiheadAttention
def init_nanox_params(module):
#init weights
def normal_(data):
#fsdp => module params will be on cuda => back to cpu
data.copy_(data.cpu().normal_(mean=0.0, std=0.02).to(data.device))
if isinstance(module, nn.Linear):
normal_(module.weight.data)
if module.bias is not None:
module.bias.data.zeros_()
if isinstance(module, nn.Embedding):
normal_(module.weight.data)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
if isinstance(module, MultiheadAttention):
normal_(module.q_proj.weight.data)
normal_(module.proj.weight.data)
normal_(module.v_proj.weight.data)
class NanoXGraphEncoder(nn.Module):
def __init__(self,
num_atoms,
num_in_degree,
num_out_degree,
num_edges, num_spatial,
num_edge_distance,
edge_type,
multi_hop_max_dist,
num_encoder_layers=12,
embedding_dim=768,
ffn_embedding_dim=768,
num_attention_heads=32,
dropout=0.1,
activation_fn=nn.ReLU(),
embed_scale=None,
freeze_embeddings=False,
n_trans_layers_to_freeze=0,
export=False,
traceable=False,
q_noise=0.0,
qn_block_size=8):
super().__init__()
self.dropout_module = nn.Dropout(dropout)
self.embedding_dim = embedding_dim
self.traceable = traceable
self.graph_node_feature = NanoNodeFeature(num_heads=num_attention_heads,
num_atoms=num_atoms,
num_in_degree=num_in_degree,
num_out_degree=num_out_degree,
hidden_dim=embedding_dim,
n_layers=num_encoder_layers)
self.graph_attn_bias = NanoBias(num_heads=num_attention_heads,
num_atoms=num_atoms,
num_edges=num_edges,
num_spatial=num_spatial,
num_edge_distance=num_edge_distance,
edge_type=edge_type,
multi_hop_max_dist=multi_hop_max_dist,
hidden_dim=embedding_dim,
n_layers=num_encoder_layers)
self.embed = embed_scale
if q_noise > 0:
self.quant_noise = nn.Linear(self.embedding_dim, self.embedding_dim, bias=False)
else:
self.quant_noise = None
self.emb_layer_norm = nn.LayerNorm(self.embedding_dim, elementwise_affine=export)
self.layers = nn.ModuleList([NanoXGraphEncoderLayer(embedding_dim=embedding_dim,
ffn_embedding_dim=ffn_embedding_dim,
num_heads=num_attention_heads,
dropout=dropout,
activation_fn=activation_fn,
export=export) for _ in range(num_encoder_layers)])
if freeze_embeddings:
for layer in range(n_trans_layers_to_freeze):
for param in self.layers[layer].parameters():
param.requires_grad=True
def forward(self, batched_data,
pertub=None,
last_state_only=False,
token_embedding=None,
mask=None):
data_x = batched_data["x"]
n_graph, n_node = data_x.size()[:2]
padding_mask = (data_x[:, :, 0]).eq(0)
padding_mask_cls = torch.zeros(n_graph, 1, device=padding_mask.device,
dtype=padding_mask.dtype)
padding_mask = torch.cat((padding_mask_cls, padding_mask), dim=1)
if token_embedding is not None:
x = token_embedding
else:
x = self.graph_node_feature(batched_data)
if pertub is not None:
x[:, 1: :] *= pertub
attn_bias = self.graph_attn_bias(batched_data)
if self.embed_scale is not None:
x = x * self.embed_scale
if self.quant_noise is not None:
x = self.quant_noise(x)
if self.emb_layer_norm is not None:
x = self.emb_layer_norm(x)
x = self.dropout_module(x)
x = x.tranpose(0, 1)
inner_states = [x] if not last_state_only else []
for layer in self.layers:
x, _ = layer(x, self_attn_padding_mask=padding_mask, self_attn_mask=mask,
self_attn_bias=attn_bias)
if not last_state_only:
inner_states.append(x)
graph_representation = x[0, :, :]
if self.traceable:
return torch.stack(inner_states), graph_representation
else:
return inner_states, graph_representation | NanoX-main | nanox/model/encoder.py |
import math
import torch
import torch.nn as nn
def init_params(module, n_layers):
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=0.02 / math.sqrt(n_layers))
if module.bias is not None:
module.bias.data.zero_()
if isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=0.02)
class NanoNodeFeature(nn.Module):
#compute note for each node in graph
def __init__(
self,
num_heads,
num_atoms,
num_in_degree,
num_out_degree,
hidden_dim,
n_layers
):
super(NanoNodeFeature, self).__init__()
self.num_heads = num_heads
self.num_atoms = num_atoms
#graph token
self.atom_encoder = nn.Embedding(num_atoms + 1, hidden_dim, padding_idx=0)
self.in_degree_encoder = nn.Embedding(num_in_degree, hidden_dim, padding_idx=0)
self.out_degree_encoder = nn.Embedding(
num_out_degree, hidden_dim, padding_idx=0
)
self.graph_token = nn.Embedding(1, hidden_dim)
self.apply(lambda module: init_params(module, n_layers=n_layers))
def forward(self, batched_data):
x, in_degree, out_degree = (
batched_data["x"],
batched_data["in_degree"],
batched_data["out_degree"],
)
n_graph, n_node = x.size()[:2]
#node feature + graph token
node_feature = self.atom_encoder(x).sum(dim=-2) # [n_graph, n_node, n_hidden]
node_feature = (
node_feature
+ self.in_degree_encoder(in_degree),
+ self.out_degree_encoder(out_degree)
)
graph_token_feature = self.graph_token.weight.unsqueeze(0).repeat(n_graph, 1, 1)
graph_node_feature = torch.cat([graph_token_feature, node_feature], dim=1)
return graph_node_feature
class NanoBias(nn.Module):
#compute attn bias for each head
def __init__(
self,
num_heads,
num_atoms,
num_edges,
num_spatial,
num_edge_distancetance,
hidden_dim,
edge_type,
multi_hop_max_dist,
n_layers,
):
super(NanoBias, self).__init__()
self.num_heads = num_heads
self.multi_hop_max_dist = multi_hop_max_dist
self.edge_encoder = nn.Embedding(num_edges + 1, num_heads, padding_idx=0)
self.edge_type = edge_type
if self.edge_type == "multihop":
self.edge_dis_encoder = nn.Embedding(
num_edge_distancetance * num_heads * num_heads, 1
)
self.spatial_encoder = nn.Embedding(num_spatial, num_heads, padding_idx=0)
self.graph_token_virtual_distance = nn.Embedding(1, num_heads)
self.apply(lambda module: init_params(module, n_layers=n_layers))
def forward(self, batched_data):
bias, spatial_position, x = (
batched_data["bias"],
batched_data["spatial_position"],
batched_data["x"],
)
edge_input, attn_edge_type = (
batched_data["edge_input"],
batched_data["attn_edge_type"],
)
n_graph, n_node = x.size()[:2]
graph_bias = bias.clone()
graph_bias = graph_bias.unsqueeze(1).repeat(
1, self.num_heads, 1, 1
) # [n_graph, n_head, n_node+1, n_node+1]
#spatial position
#[n_graph, n_node, n_node, n_head] -> [n_graph, n_head, n_node, n_node]
spatial_position_bias = self.spatial_position_encoder(spatial_position).permute(0, 3, 1, 2)
graph_bias[:, :, 1: 1:] = graph_bias[:, :, 1:, 1:] + spatial_position_bias
#reset spatial position here
reshaped = self.graph_token_virtual_distance.weight.view(1, self.num_heads, 1)
graph_bias[:, :, 1:, 0] = graph_bias[:, :, 1:, 0] + reshaped
graph_bias[:, :, 0, :] = graph_bias[:, :, 0, :] + reshaped
#edge feature
if self.edge_type == "multi_hop":
spatial_position_ = spatial_position.clone()
spatial_position_[spatial_position_ == 0] = 1 #set pad to 1
spatial_position_ = torch.where(spatial_position_ > 1, spatial_position_ - 1, spatial_position_)
if self.multi_hop_max_dist > 0:
spatial_position_ = spatial_position_.clamp(0, self.multi_hop_max_dist)
edge_input = edge_input[:, :, :, : self.multi_hop_max_dist, :]
#[n_graph, n_node, n_node, max_dist, n_head]
edge_input = self.edge_encoder(edge_input).mean(-2)
max_dist = edge_input.size(-2)
edge_input_flat = edge_input.permute(3, 0, 1, 2, 4).reshape(
max_dist, -1, self.num_heads
)
edge_input_flat = torch.bmm(
edge_input_flat,
self.edge_dis_encoder.weight.reshape(
-1, self.num_heads, self.num_heads
)[:max_dist, :, :],
)
edge_input = edge_input_flat.reshape(
max_dist, n_graph, n_node, n_node, self.num_heas
).permute(1, 2, 3, 0, 4)
edge_input = (
edge_input.sum(-2) / (spatial_position_.float().unsqueeze(-1))
).permute(0, 3, 1, 2)
else:
#[n_graph, n_node, n_node, n_head] -> [n_graph, n_head, n_node, n_node, ]
edge_input = self.edge_encoder(attn_edge_type).mean(-2).permute(0, 3, 1, 2)
graph_bias[:, :, 1:, 1:] = graph_bias[:, :, 1:, 1:] + edge_input
graph_bias = graph_bias + bias.unsqueeze(1) # reset
return graph_bias
| NanoX-main | nanox/model/layers.py |
import torch
from saycan.model import SayCan
model = SayCan().cuda()
x = torch.randint(0, 256, (1, 1024)).cuda()
model(x) # (1, 1024, 20000) | SayCan-main | example.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.