python_code
stringlengths 0
992k
| repo_name
stringlengths 8
46
| file_path
stringlengths 5
162
|
---|---|---|
import json
import os
from collections import defaultdict
import numpy as np
if __name__ == '__main__':
base_dir = "vqa/reviews/coco2014_val80"
review_files = [x for x in os.listdir(base_dir) if x.endswith('.jsonl') and x.startswith('gpt4_text')]
for review_file in sorted(review_files):
config = review_file.replace('gpt4_text_', '').replace('.jsonl', '')
scores = defaultdict(list)
print(f'GPT-4 vs. {config}')
with open(os.path.join(base_dir, review_file)) as f:
for review_str in f:
review = json.loads(review_str)
scores[review['category']].append(review['tuple'])
scores['all'].append(review['tuple'])
for k, v in scores.items():
stats = np.asarray(v).mean(0).tolist()
stats = [round(x, 3) for x in stats]
print(k, stats, round(stats[1]/stats[0]*100, 1))
print('=================================')
| EXA-1-master | exa/models/LLaVA-main/llava/eval/summarize_gpt_review.py |
import argparse
import json
import os
import openai
import tqdm
import ray
import time
@ray.remote(num_cpus=4)
def get_eval(content: str, max_tokens: int):
while True:
try:
response = openai.ChatCompletion.create(
model='gpt-4',
messages=[{
'role': 'system',
'content': 'You are a helpful and precise assistant for checking the quality of the answer.'
}, {
'role': 'user',
'content': content,
}],
temperature=0.2, # TODO: figure out which temperature is best for evaluation
max_tokens=max_tokens,
)
break
except openai.error.RateLimitError:
pass
except Exception as e:
print(e)
time.sleep(1)
print('success!')
return response['choices'][0]['message']['content']
def parse_score(review):
try:
score_pair = review.split('\n')[0]
score_pair = score_pair.replace(',', ' ')
sp = score_pair.split(' ')
if len(sp) == 2:
return [float(sp[0]), float(sp[1])]
else:
print('error', review)
return [-1, -1]
except Exception as e:
print(e)
print('error', review)
return [-1, -1]
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='ChatGPT-based QA evaluation.')
parser.add_argument('-q', '--question')
# parser.add_argument('-a', '--answer')
parser.add_argument('-a', '--answer-list', nargs='+', default=[])
parser.add_argument('-r', '--rule')
parser.add_argument('-o', '--output')
parser.add_argument('--max-tokens', type=int, default=1024, help='maximum number of tokens produced in the output')
args = parser.parse_args()
ray.init()
f_q = open(os.path.expanduser(args.question))
f_ans1 = open(os.path.expanduser(args.answer_list[0]))
f_ans2 = open(os.path.expanduser(args.answer_list[1]))
rule_dict = json.load(open(os.path.expanduser(args.rule), 'r'))
review_file = open(f'{args.output}', 'w')
js_list = []
handles = []
idx = 0
for ques_js, ans1_js, ans2_js in zip(f_q, f_ans1, f_ans2):
# if idx == 1:
# break
ques = json.loads(ques_js)
ans1 = json.loads(ans1_js)
ans2 = json.loads(ans2_js)
category = json.loads(ques_js)['category']
if category in rule_dict:
rule = rule_dict[category]
else:
rule = rule_dict['default']
prompt = rule['prompt']
role = rule['role']
content = (f'[Question]\n{ques["text"]}\n\n'
f'[{role} 1]\n{ans1["text"]}\n\n[End of {role} 1]\n\n'
f'[{role} 2]\n{ans2["text"]}\n\n[End of {role} 2]\n\n'
f'[System]\n{prompt}\n\n')
js_list.append({
'id': idx+1,
'question_id': ques['question_id'],
'answer1_id': ans1['answer_id'],
'answer2_id': ans2['answer_id'],
'category': category})
idx += 1
handles.append(get_eval.remote(content, args.max_tokens))
# To avoid the rate limit set by OpenAI
time.sleep(1)
reviews = ray.get(handles)
for idx, review in enumerate(reviews):
scores = parse_score(review)
js_list[idx]['content'] = review
js_list[idx]['tuple'] = scores
review_file.write(json.dumps(js_list[idx]) + '\n')
review_file.close()
| EXA-1-master | exa/models/LLaVA-main/llava/eval/eval_gpt_review.py |
import argparse
from transformers import AutoTokenizer, AutoModelForCausalLM, AutoConfig
import torch
import os
import json
from tqdm import tqdm
import shortuuid
from llava.conversation import conv_templates
from llava.utils import disable_torch_init
from transformers import CLIPVisionModel, CLIPImageProcessor, StoppingCriteria
from PIL import Image
import random
import math
def split_list(lst, n):
"""Split a list into n (roughly) equal-sized chunks"""
chunk_size = math.ceil(len(lst) / n) # integer division
return [lst[i:i+chunk_size] for i in range(0, len(lst), chunk_size)]
def get_chunk(lst, n, k):
chunks = split_list(lst, n)
return chunks[k]
DEFAULT_IMAGE_TOKEN = "", "<audio>", "</audio>"],
extra_ids=0,
model_max_length=1984
)
self.audio_idx, self.audio_end_idx = self.tokenizer.convert_tokens_to_ids(["<audio>", "</audio>"])
self.im_idx, self.im_end_idx = self.tokenizer.convert_tokens_to_ids([""])
self.audio_tokenizer = Wav2Vec2Tokenizer.from_pretrained("facebook/wav2vec2-base-960h")
def tokenize_texts(self, texts):
texts = self.tokenizer(texts, return_tensors="pt", padding=True, truncation=True).input_ids
# Add image and audio tokens to text as "<s>  <audio> </audio> text </s>"
media_tokens = torch.tensor([[self.im_idx, self.im_end_idx, self.audio_idx, self.audio_end_idx]] * texts.shape[0])
return torch.cat([texts[:, 0:1], media_tokens, texts[:, 1:]], dim=1), texts
def tokenize_images(self, images):
return self.processor(images=images, return_tensors="pt").pixel_values
def tokenize_audio(self, audios):
return self.audio_tokenizer(audios, return_tensors="pt", padding=True, truncation=True).input_ids
def tokenize(self, target_texts):
text_tokens_list, only_text_tokens_list = [], []
max_length = 0
for target_text in target_texts:
text_tokens, only_text_tokens = self.tokenize_texts(target_text)
text_tokens_list.append(text_tokens)
only_text_tokens_list.append(only_text_tokens)
max_length = max(max_length, text_tokens.shape[1])
padded_text_tokens_list = []
padded_only_text_tokens_list = []
for text_tokens, only_text_tokens in zip(text_tokens_list, only_text_tokens_list):
padded_text_tokens = torch.cat([text_tokens, torch.full((1, max_length - text_tokens.shape[1]), self.tokenizer.pad_token_id, dtype=torch.long)], dim=1)
padded_only_text_tokens = torch.cat([only_text_tokens, torch.full((1, max_length - only_text_tokens.shape[1]), self.tokenizer.pad_token_id, dtype=torch.long)], dim=1)
padded_text_tokens_list.append(padded_text_tokens)
padded_only_text_tokens_list.append(padded_only_text_tokens)
attention_mask = torch.stack(padded_text_tokens_list) != self.tokenizer.pad_token_id
return {
"text_tokens": torch.stack(padded_text_tokens_list),
"labels": torch.stack(padded_only_text_tokens_list),
"attention_mask": attention_mask,
}
class Kosmos(Module):
def __init__(self, modalities=["text", "image", "audio"]):
super().__init__()
# Instantiate Clip Vit-l/14
self.modalities = modalities
self.clip_model = CLIPModel.from_pretrained("laion/CLIP-ViT-L-14-laion2B-s32B-b82K").vision_model
self.audio_model = Wav2Vec2Model.from_pretrained("facebook/wav2vec2-base-960h")
self.embed = bitsandbytes.nn.modules.Embedding(
32002,
2048,
padding_idx=1
)
self.embed_positions= PositionalEmbedding(
2048,
2048,
1
)
self.output_projection = torch.nn.Linear(
2048, 32002, bias=False
)
torch.nn.init.normal_(
self.output_projection.weight, mean=0, std=2048**-0.5
)
# Config following KOSMOS-1 paper (https://arxiv.org/pdf/2302.14045.pdf)
self.config = DecoderConfig(
decoder_layers=24,
decoder_embed_dim=2048,
decoder_ffn_embed_dim=8192,
decoder_attention_heads=32,
dropout=0.1,
activation_fn="gelu",
attention_dropout=0.1,
vocab_size=64007,
subln=True,
xpos_rel_pos=True,
max_rel_pos=2048
)
self.decoder = Decoder(
self.config,
embed_tokens=self.embed,
embed_positions=self.embed_positions,
output_projection=self.output_projection
)
self.perceive = PerceiverResampler(
dim = 1024,
depth = 2,
dim_head = 64,
heads = 8,
num_latents = 64,
num_media_embeds = 257
)
self.image_proj = torch.nn.Linear(1024, 2048, bias=False)
torch.nn.init.normal_(
self.image_proj.weight, mean=0, std=2048**-0.5
)
#add audio
self.audio_model = Wav2Vec2Model.from_pretrained("facebook/wav2vec2-base-960h")
self.audio_proj = torch.nn.Linear(768, 2048, bias=False)
torch.nn.init.normal_(
self.audio_proj.weight, mean=0, std=2048 ** -0.5
)
def forward(self, text_tokens, images=None, audios=None, **kwargs):
if "image" in self.modalities and images is not None:
images = self.clip_model(pixel_values=images)["last_hidden_state"]
images = self.perceive(images).squeeze(1)
images = self.image_proj(images)
if "audio" in self.modalities and audios is not None:
audios = self.audio_model(input_ids=audios).last_hidden_state
audios = audios.mean(dim=1)
audios = self.audio_proj(audios)
model_input = self.decoder.forward_embedding(text_tokens)[1]
if "image" in self.modalities and images is not None and "audio" in self.modalities and audios is not None:
model_input = torch.cat([model_input[:, 0:3], images, audios, model_input[:, 3:]], dim=1)
elif "image" in self.modalities and images is not None:
model_input = torch.cat([model_input[:, 0:3], images, model_input[:, 3:]], dim=1)
elif "audio" in self.modalities and audios is not None:
model_input = torch.cat([model_input[:, 0:3], audios, model_input[:, 3:]], dim=1)
model_input = self.decoder.forward_embedding(model_input, token_embedding=model_input)[0]
return self.decoder(model_input, passed_x=model_input)[0]
import time
import torch
from accelerate.utils import set_seed
from datasets import load_dataset
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader
from transformers import get_scheduler, default_data_collator, get_linear_schedule_with_warmup
from torch.optim import AdamW
# from kosmos import Kosmos, KosmosTokenizer
from accelerate import Accelerator
from rich.progress import Progress
from datasets import Image
from bitsandbytes.optim import AdamW8bit
from lion_pytorch import Lion
from torch.nn.parallel import DataParallel, DistributedDataParallel
import torch.distributed as dist
AWS_ACCESS_KEY_ID= 'AKIA5K4H36GT5EVDX2MA'
AWS_SECRET_ACCESS_KEY= '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'
WANDB_API_KEY= '0fc08bb0e90314a2bb602afa0b2e6cf56abc3f49'
#logging
import boto3
#training
import wandb
from torch.utils.tensorboard import SummaryWriter
def save_model_to_s3(model, bucket_name, key_prefix, step):
s3 = boto3.client('s3', aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY)
model_path = f"checkpoint_at_step_{step}.pt"
torch.save(model.state_dict(), model_path)
s3.upload_file(model_path, bucket_name, f"{key_prefix}/{model_path}")
def count_number_of_parameters(model, only_trainable: bool = True) -> int:
if only_trainable:
num_params: int = sum(p.numel()
for p in model.parameters() if p.requires_grad)
else:
num_params: int = sum(p.numel() for p in model.parameters() if p)
return int(num_params)
# def load_alpaca_cot_dataset(data_dir: str) -> DatasetDict:
# data_dir = Path(data_dir)
# dataset = {"train": [], "validation": []}
# for split in dataset.keys():
# for file in (data_dir / split).glob("*json"):
# with open(file, "r") as f:
# data = json.load(f)
# dataset[split].extend(data)
# return DatasetDict({split: Dataset.from_dict({"data": data}) for split, data in dataset.items()})
def prep_sample(sample):
instruction = sample["instruction"]
input_text = sample["input"]
output_text = sample["output"]
text = f"Instruction: {instruction} Input: {input_text} Output: {output_text}"
return {
"target_text": text
}
def train(args):
if args.use_ddp:
dist.init_process_group(backend="nccl")
accelerator = Accelerator(
mixed_precision="fp16"
)
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
#v1
model = Kosmos()
# if args.use_ddp:
# model = DistributedDataParallel(model)
# else:
# model = DataParallel(model)
model = model.to(accelerator.device)
#device count
if torch.cuda.device_count() > 1:
print(f"Let's use ${torch.cuda.device_count()} GPUS")
optimizer = Lion(model.parameters(), lr=args.learning_rate / 3, weight_decay=args.weight_decay * 3)
lr_scheduler = get_linear_schedule_with_warmup(
optimizer=optimizer,
num_warmup_steps=args.warmup_steps,
num_training_steps=args.max_steps,
)
tokenizer = KosmosTokenizer(modalities=["text"])
# dataset = load_dataset("QingyiSi/Alpaca-CoT", split="train[:1%]")
# dataset = load_dataset("yahma/alpaca-cleaned", split="train[:1%]")
dataset = load_dataset("yahma/alpaca-cleaned", split="train")
# dataset = dataset.map(prep_sample, num_proc=8)
dataset = dataset.map(prep_sample, num_proc=8)
# dataset = dataset.map(lambda sample: tokenizer(sample["target_text"]), batched=True, batch_size=128, remove_columns=["instruction", "input", "output"])
dataset = dataset.map(lambda sample: (print(sample), tokenizer.tokenize(sample))[1], batched=True, batch_size=128, remove_columns=["instruction", "input", "output"], input_columns=["target_text"])
train_dataloader = DataLoader(
dataset, collate_fn=default_data_collator, batch_size=args.batch_size, pin_memory=True
)
#====================> load data #====================> load data #====================> load data #====================> load data
model, train_dataloader, optimizer, lr_scheduler = accelerator.prepare(model, train_dataloader, optimizer,
lr_scheduler)
model.train()
accelerator.register_for_checkpointing(lr_scheduler)
model.clip_model.requires_grad_(False)
model.clip_model.encoder.layers[-1].requires_grad_(True)
accelerator.print(
f"Number of parameters: {count_number_of_parameters(model):,}")
accelerator.print(
f"Number of trainable parameters: {count_number_of_parameters(model, only_trainable=True):,}")
# Log model and optimizer parameters to wandb
accelerator.init_trackers(project_name="kosmos")
#wandb
wandb.init(project="kosmos", config=args)
#init tensorboard writer
tb_writer = SummaryWriter()
train_loader = iter(train_dataloader)
epoch_loss = 0
total_loss = 0
start_time = time.time()
with Progress() as progress:
task = progress.add_task("[red]Training...", total=args.max_steps)
for step in range(0, args.max_steps):
batch_start = time.time()
batch = {key: value for key, value in next(train_loader).items() if key != "images"}
outputs = model(**batch, self_attn_padding_mask=batch["attention_mask"])
# Shift so that tokens < n predict n
outputs = torch.cat([outputs[:, :1], outputs[:, 67:]], dim=1).contiguous()
# shift_logits = outputs[..., :-1, :].contiguous()
# shift_labels = batch["labels"][..., 1:].contiguous()
# Flatten the tokens
loss_fct = CrossEntropyLoss()
one_hot_labels = torch.nn.functional.one_hot(batch["labels"][:, 1:], num_classes=32002).float()
loss = loss_fct(outputs[:,:-1], one_hot_labels)
epoch_loss += loss.detach().float()
accelerator.backward(loss)
optimizer.step()
optimizer.zero_grad()
batch_end = time.time()
logs = {
"loss": loss.item(),
"perplexity": torch.exp(loss).item(),
"lr": lr_scheduler.get_last_lr()[0],
"examples": args.batch_size * (step + 1),
"examples_per_second": args.batch_size / (batch_end - batch_start),
}
if step % args.log_every == args.log_every - 1:
#log metrics to wandb
wandb.log(logs, step=step)
#log metrics to tensorboard
# Log metrics to TensorBoard
tb_writer.add_scalar("loss", logs["loss"], step)
tb_writer.add_scalar("perplexity", logs["perplexity"], step)
tb_writer.add_scalar("lr", logs["lr"], step)
tb_writer.add_scalar("examples", logs["examples"], step)
tb_writer.add_scalar("examples_per_second", logs["examples_per_second"], step)
#accelerator
accelerator.log(logs, step=step)
progress.update(task, advance=1, description=f"Step Loss: {loss.item():.5f} "
f"| Mean Loss: {(total_loss + epoch_loss) / step:.5f} "
f"| Mean PPL: {torch.exp((total_loss + epoch_loss) / step):.2f} "
f"| Examples: {args.batch_size * (step + 1)} "
f"| Examples/s: {args.batch_size / (batch_end - batch_start):.2f} "
f"| Elapsed: {time.strftime('%H:%M:%S', time.gmtime(time.time() - start_time))}")
if step % args.save_every == args.save_every - 1:
train_epoch_loss = epoch_loss / args.save_every
total_loss += epoch_loss
epoch_loss = 0
accelerator.log({
"train_ppl": torch.exp(train_epoch_loss),
"train_epoch_loss": train_epoch_loss,
}, step=step)
progress.print(f"Saving checkpoint at step {step}...")
accelerator.save_state(
f"{args.checkpoint_dir}/checkpoint_at_step_{step}/")
#save the model weights to s3
save_model_to_s3(model, "kosmostraining", "kosmosv1/checkpoints", step)
print(f"Saved to s3: {save_model_to_s3} ")
#finish tensorboard writer
tb_writer.close()
#finish wnabd run
wandb.finish()
class Args:
def __init__(self):
self.checkpoint_dir = "checkpoints"
self.learning_rate = 1e-5
self.weight_decay = 0.01
self.warmup_steps = 0
self.max_steps = 100000
self.batch_size = 4
self.log_every = 1
self.save_every = 100
self.seed = None
self.use_ddp = False
args = Args()
train(args) | EXA-1-master | exa/models/KOSMOS_reimplementation-main/training/notebookExperiments/main.py |
import torch
from torchscale.architecture.config import DecoderConfig
from torchscale.architecture.decoder import Decoder
from torchscale.component.embedding import PositionalEmbedding
from transformers import T5Tokenizer, CLIPProcessor, CLIPModel, PreTrainedTokenizerFast
from tokenizers import SentencePieceBPETokenizer
from flamingo_pytorch import PerceiverResampler
from PIL import Image
from torch.nn import Embedding, Module
import bitsandbytes
class KosmosTokenizer:
def __init__(self):
self.processor = CLIPProcessor.from_pretrained("laion/CLIP-ViT-L-14-laion2B-s32B-b82K")
# T5 uses SentencePiece tokenizer
# self.tokenizer = T5Tokenizer.from_pretrained(
# "t5-large",
# additional_special_tokens=[""],
# extra_ids=0,
# model_max_length=1984
# )
tokenizer = SentencePieceBPETokenizer.from_file("l")
self.tokenizer = PreTrainedTokenizerFast(tokenizer_object=tokenizer)
self.tokenizer.ad_special_tokens([""])
self.tokenizer.model_max_length= 1984
self.im_idx, self.im_end_idx = self.tokenizer.convert_tokens_to_ids([""])
def tokenize_texts(self, texts):
texts = self.tokenizer(texts, return_tensors="pt", padding=True, truncation=True).input_ids
# Add image tokens to text as "<s>  text </s>"
image_tokens = torch.tensor([[self.im_idx, self.im_end_idx]] * texts.shape[0])
return torch.cat([texts[:, 0:1], image_tokens, texts[:, 1:]], dim=1), texts
def tokenize_images(self, images):
return self.processor(images=images, return_tensors="pt").pixel_values
def tokenize(self, sample):
text_tokens, only_text_tokens = self.tokenize_texts(sample["target_text"])
attention_mask = text_tokens != self.tokenizer.pad_token_id
dummy_image_features = torch.ones((text_tokens.shape[0], 64))
attention_mask = torch.cat([dummy_image_features, attention_mask], dim=1)
return {
"text_tokens": text_tokens,
"images": self.tokenize_images(sample["image"]),
"labels": only_text_tokens,
"attention_mask": attention_mask,
}
class Kosmos(Module):
def __init__(self):
super().__init__()
# Instantiate Clip Vit-l/14
self.clip_model = CLIPModel.from_pretrained("laion/CLIP-ViT-L-14-laion2B-s32B-b82K").vision_model
self.embed = bitsandbytes.nn.modules.Embedding(
32002,
2048,
padding_idx=1
)
self.embed_positions= PositionalEmbedding(
2048,
2048,
1
)
self.output_projection = torch.nn.Linear(
2048, 32002, bias=False
)
torch.nn.init.normal_(
self.output_projection.weight, mean=0, std=2048**-0.5
)
# Config following KOSMOS-1 paper (https://arxiv.org/pdf/2302.14045.pdf)
self.config = DecoderConfig(
decoder_layers=24,
decoder_embed_dim=2048,
decoder_ffn_embed_dim=8192,
decoder_attention_heads=32,
dropout=0.1,
activation_fn="gelu",
attention_dropout=0.1,
vocab_size=64007,
subln=True,
xpos_rel_pos=True,
max_rel_pos=2048
)
self.decoder = Decoder(
self.config,
embed_tokens=self.embed,
embed_positions=self.embed_positions,
output_projection=self.output_projection
)
self.perceive = PerceiverResampler(
dim = 1024,
depth = 2,
dim_head = 64,
heads = 8,
num_latents = 64,
num_media_embeds = 257
)
self.image_proj = torch.nn.Linear(1024, 2048, bias=False)
torch.nn.init.normal_(
self.image_proj.weight, mean=0, std=2048**-0.5
)
def forward(self, text_tokens, images, **kwargs):
images = self.clip_model(pixel_values=images)["last_hidden_state"]
images = self.perceive(images).squeeze(1)
images = self.image_proj(images)
model_input = self.decoder.forward_embedding(text_tokens)[1]
model_input = torch.cat([model_input[:, 0:2], images, model_input[:, 2:]], dim=1)
model_input = self.decoder.forward_embedding(model_input, token_embedding=model_input)[0]
return self.decoder(model_input, passed_x=model_input)[0] | EXA-1-master | exa/models/KOSMOS_reimplementation-main/model/kosmosSP.py |
import torch
from torchscale.architecture.config import DecoderConfig
from torchscale.architecture.decoder import Decoder
from torchscale.component.embedding import PositionalEmbedding
from transformers import T5Tokenizer, CLIPProcessor, CLIPModel, PreTrainedTokenizerFast
from tokenizers import SentencePieceBPETokenizer
from flamingo_pytorch import PerceiverResampler
from PIL import Image
from torch.nn import Embedding, Module
import bitsandbytes
class KosmosTokenizer:
def __init__(self):
self.processor = CLIPProcessor.from_pretrained("laion/CLIP-ViT-L-14-laion2B-s32B-b82K")
# T5 uses SentencePiece tokenizer
self.tokenizer = T5Tokenizer.from_pretrained(
"t5-large",
additional_special_tokens=[""],
extra_ids=0,
model_max_length=1984
)
self.im_idx, self.im_end_idx = self.tokenizer.convert_tokens_to_ids([""])
def tokenize_texts(self, texts):
texts = self.tokenizer(texts, return_tensors="pt", padding=True, truncation=True).input_ids
# Add image tokens to text as "<s>  text </s>"
image_tokens = torch.tensor([[self.im_idx, self.im_end_idx]] * texts.shape[0])
return torch.cat([texts[:, 0:1], image_tokens, texts[:, 1:]], dim=1), texts
def tokenize_images(self, images):
return self.processor(images=images, return_tensors="pt").pixel_values
def tokenize(self, sample):
text_tokens, only_text_tokens = self.tokenize_texts(sample["target_text"])
attention_mask = text_tokens != self.tokenizer.pad_token_id
dummy_image_features = torch.ones((text_tokens.shape[0], 64))
attention_mask = torch.cat([dummy_image_features, attention_mask], dim=1)
return {
"text_tokens": text_tokens,
"images": self.tokenize_images(sample["image"]),
"labels": only_text_tokens,
"attention_mask": attention_mask,
}
class Kosmos(Module):
def __init__(self):
super().__init__()
# Instantiate Clip Vit-l/14
self.clip_model = CLIPModel.from_pretrained("laion/CLIP-ViT-L-14-laion2B-s32B-b82K").vision_model
self.embed = bitsandbytes.nn.modules.Embedding(
32002,
2048,
padding_idx=1
)
self.embed_positions= PositionalEmbedding(
2048,
2048,
1
)
self.output_projection = torch.nn.Linear(
2048, 32002, bias=False
)
torch.nn.init.normal_(
self.output_projection.weight, mean=0, std=2048**-0.5
)
# Config following KOSMOS-1 paper (https://arxiv.org/pdf/2302.14045.pdf)
self.config = DecoderConfig(
decoder_layers=24,
decoder_embed_dim=2048,
decoder_ffn_embed_dim=8192,
decoder_attention_heads=32,
dropout=0.1,
activation_fn="gelu",
attention_dropout=0.1,
vocab_size=64007,
subln=True,
xpos_rel_pos=True,
max_rel_pos=2048
)
self.decoder = Decoder(
self.config,
embed_tokens=self.embed,
embed_positions=self.embed_positions,
output_projection=self.output_projection
)
self.perceive = PerceiverResampler(
dim = 1024,
depth = 2,
dim_head = 64,
heads = 8,
num_latents = 64,
num_media_embeds = 257
)
self.image_proj = torch.nn.Linear(1024, 2048, bias=False)
torch.nn.init.normal_(
self.image_proj.weight, mean=0, std=2048**-0.5
)
def forward(self, text_tokens, images, **kwargs):
images = self.clip_model(pixel_values=images)["last_hidden_state"]
images = self.perceive(images).squeeze(1)
images = self.image_proj(images)
model_input = self.decoder.forward_embedding(text_tokens)[1]
model_input = torch.cat([model_input[:, 0:2], images, model_input[:, 2:]], dim=1)
model_input = self.decoder.forward_embedding(model_input, token_embedding=model_input)[0]
return self.decoder(model_input, passed_x=model_input)[0] | EXA-1-master | exa/models/KOSMOS_reimplementation-main/model/kosmos.py |
import torch
from torchscale.architecture.config import DecoderConfig
from torchscale.architecture.decoder import Decoder
from torchscale.component.embedding import PositionalEmbedding
from transformers import T5Tokenizer, CLIPProcessor, CLIPModel, PreTrainedTokenizerFast
from tokenizers import SentencePieceBPETokenizer
from transformers import Data2VecForCTC, Wav2Vec2Processor
from flamingo_pytorch import PerceiverResampler
from PIL import Image
from torch.nn import Embedding, Module
import bitsandbytes
#video
#preprecoess videos and tokenize them -> projection layer to transform the video features into the required embedding dimension
from torchvision import transforms
from torchvision.models.video import r3d_18
class KosmosTokenizer:
def __init__(self):
self.processor = CLIPProcessor.from_pretrained("laion/CLIP-ViT-L-14-laion2B-s32B-b82K")
self.audio_tokenizer = Wav2Vec2Processor.from_pretrained("facebook/data2vec-audio-base-960h")
#video
self.tokenizer = T5Tokenizer.from_pretrained(
"t5-large",
additional_special_tokens=["", "<audio>", "</audio>", "<video>", "</video>"],
extra_ids=0,
model_max_length=1984
)
self.video_transform = transforms.Compose([
transforms.Resize((112, 112)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.43216, 0.394666, 0.37645], std=[0.22803, 0.22145, 0.216989])
])
self.vid_idx, self.vid_end_ix = self.tokenizer.convert_tokens_to_ids(["<video>", "</video>"])
self.audio_idx, self.audio_end_idx = self.tokenizer.convert_tokens_to_ids(["<audio>", "</audio>"])
self.im_idx, self.im_end_idx = self.tokenizer.convert_tokens_to_ids([""])
def tokenize_texts(self, texts):
texts = self.tokenizer(texts, return_tensors="pt", padding=True, truncation=True).input_ids
# Add image and audio tokens to text as "<s>  <audio> </audio> text </s>"
media_tokens = torch.tensor([[self.im_idx, self.im_end_idx, self.audio_idx, self.audio_end_idx, self.vid_idx, self.vid_end_idx]] * texts.shape[0])
return torch.cat([texts[:, 0:1], media_tokens, texts[:, 1:]], dim=1), texts
def tokenize_images(self, images):
return self.processor(images=images, return_tensors="pt").pixel_values
def tokenize_audio(self, audios):
return self.audio_tokenizer(audios, return_tensors="pt", padding=True, truncation=True).input_values
def tokenize_videos(self, videos):
processed_videos = []
for video in videos:
video_frames = [self.video_transform(frame) for frame in video]
processed_videos.append(torch.stack(video_frames))
return torch.stack(processed_videos)
def tokenize(self, sample):
text_tokens, only_text_tokens = self.tokenize_texts(sample["target_text"])
attention_mask = text_tokens != self.tokenizer.pad_token_id
dummy_image_features = torch.ones((text_tokens.shape[0], 64))
attention_mask = torch.cat([dummy_image_features, attention_mask], dim=1)
return {
"text_tokens": text_tokens,
"images": self.tokenize_images(sample["image"]),
"labels": only_text_tokens,
"attention_mask": attention_mask,
"audios": self.tokenize_audio(sample["audio"]),
"videos": self.tokenize_videos(sample["video"])
}
class Kosmos(Module):
def __init__(self):
super().__init__()
# Instantiate Clip Vit-l/14
self.clip_model = CLIPModel.from_pretrained("laion/CLIP-ViT-L-14-laion2B-s32B-b82K").vision_model
#audio model
self.audio_model = Data2VecForCTC.from_pretrained("facebook/data2vec-audio-base-960h")
#video
self.video_model = r3d_18(pretrained=True)
self.video_model = torch.nn.Sequential(*list(self.video_model.children())[:-1])
self.embed = bitsandbytes.nn.modules.Embedding(
32002,
2048,
padding_idx=1
)
self.embed_positions= PositionalEmbedding(
2048,
2048,
1
)
self.output_projection = torch.nn.Linear(
2048, 32002, bias=False
)
torch.nn.init.normal_(
self.output_projection.weight, mean=0, std=2048**-0.5
)
# Config following KOSMOS-1 paper (https://arxiv.org/pdf/2302.14045.pdf)
self.config = DecoderConfig(
decoder_layers=24,
decoder_embed_dim=2048,
decoder_ffn_embed_dim=8192,
decoder_attention_heads=32,
dropout=0.1,
activation_fn="gelu",
attention_dropout=0.1,
vocab_size=64007,
subln=True,
xpos_rel_pos=True,
max_rel_pos=2048
)
self.decoder = Decoder(
self.config,
embed_tokens=self.embed,
embed_positions=self.embed_positions,
output_projection=self.output_projection
)
self.perceive = PerceiverResampler(
dim = 1024,
depth = 2,
dim_head = 64,
heads = 8,
num_latents = 64,
num_media_embeds = 257
)
self.image_proj = torch.nn.Linear(1024, 2048, bias=False)
torch.nn.init.normal_(
self.image_proj.weight, mean=0, std=2048**-0.5
)
self.audio_proj = torch.nn.Linear(768, 2048, bias=False)
torch.nn.init.normal_(
self.audio_proj.weight, mean=0, std=2048 ** -0.5
)
self.video_proj = torch.nn.Linear(512, 2048, bias=False)
torch.nn.init.normal_(
self.video_proj.weight, mean=0, std=2048 ** -0.5
)
def forward(self, text_tokens, images, audios, **kwargs):
images = self.clip_model(pixel_values=images)["last_hidden_state"]
images = self.perceive(images).squeeze(1)
images = self.image_proj(images)
# Process audio tokens
audios = self.audio_model(audios).logits
audios = audios.mean(dim=1)
audios = self.audio_proj(audios)
#process video tokens
videos = videos.transpose(1, 2).contigous()
videos = self.video_model(videos)
videos = videos.view(videos.size(0), -1)
videos = self.video_proj(videos)
model_input = self.decoder.forward_embedding(text_tokens)[1]
model_input = torch.cat([model_input[:, 0:6], images, audios, videos, model_input[:, 6:]], dim=1)
model_input = self.decoder.forward_embedding(model_input, token_embedding=model_input)[0]
return self.decoder(model_input, passed_x=model_input)[0] | EXA-1-master | exa/models/KOSMOS_reimplementation-main/model/video/kosmos_video.py |
import torch
from torchscale.architecture.config import DecoderConfig
from torchscale.architecture.decoder import Decoder
from torchscale.component.embedding import PositionalEmbedding
from transformers import T5Tokenizer, CLIPProcessor, CLIPModel, PreTrainedTokenizerFast
from tokenizers import SentencePieceBPETokenizer
from transformers import Data2VecForCTC, Wav2Vec2Processor
from flamingo_pytorch import PerceiverResampler
from PIL import Image
from torch.nn import Embedding, Module
import bitsandbytes
#video
#preprecoess videos and tokenize them -> projection layer to transform the video features into the required embedding dimension
import torchvision
class KosmosTokenizer:
def __init__(self, modalities=["text", "image", "audio", "video"]):
self.modalities = modalities
if "text" in modalities:
self.tokenizer = T5Tokenizer.from_pretrained(
"t5-large",
additional_special_tokens=["", "<audio>", "</audio>", "<video>", "</video>"],
extra_ids=0,
model_max_length=1984
)
self.audio_idx, self.audio_end_idx = self.tokenizer.convert_tokens_to_ids(["<audio>", "</audio>"])
self.im_idx, self.im_end_idx = self.tokenizer.convert_tokens_to_ids([""])
self.vid_idx, self.vid_end_idx = self.tokenizer.convert_tokens_to_ids(["<video>", "</video>"])
if "image" in modalities:
self.processor = CLIPProcessor.from_pretrained("laion/CLIP-ViT-L-14-laion2B-s32B-b82K")
if "audio" in modalities:
self.audio_tokenizer = Wav2Vec2Processor.from_pretrained("facebook/data2vec-audio-base-960h")
def tokenize_texts(self, texts):
texts = self.tokenizer(texts, return_tensors="pt", padding=True, truncation=True).input_ids
# Add image and audio tokens to text as "<s>  <audio> </audio> text </s>"
media_tokens = torch.tensor([[self.im_idx, self.im_end_idx, self.audio_idx, self.audio_end_idx, self.vid_idx, self.vid_end_idx]] * texts.shape[0])
return torch.cat([texts[:, 0:1], media_tokens, texts[:, 1:]], dim=1), texts
def tokenize_images(self, images):
return self.processor(images=images, return_tensors="pt").pixel_values
def tokenize_audio(self, audios):
return self.audio_tokenizer(audios, return_tensors="pt", padding=True, truncation=True).input_values
def tokenize_videos(self, videos):
processed_videos = []
for video in videos:
video_frames = [self.video_transform(frame) for frame in video]
processed_videos.append(torch.stack(video_frames))
return torch.stack(processed_videos)
def tokenize(self, sample):
text_tokens, only_text_tokens = self.tokenize_texts(sample["target_text"])
attention_mask = text_tokens != self.tokenizer.pad_token_id
tokenized_data = {
"text_tokens": text_tokens,
"labels": only_text_tokens,
"attention_mask": attention_mask
}
if "image" in self.modalities and "image" in sample:
tokenized_data["images"] = self.tokenize_images(sample["image"])
if "audio" in self.modalities and "audio" in sample:
tokenized_data["audios"] = self.tokenize_audio(sample["audio"])
if "video" in self.modalities and "video" in sample:
tokenized_data["videos"] = self.tokenize_videos(sample["video"])
return tokenized_data
class Kosmos(Module):
def __init__(self, modalities=["text", "image", "audio", "video"]):
super().__init__()
self.modalities = modalities
if "image" in modalities:
self.clip_model = CLIPModel.from_pretrained("laion/CLIP-ViT-L-14-laion2B-s32B-b82K").vision_model
self.perceive = PerceiverResampler(
dim=1024,
depth=2,
dim_head=64,
heads=8,
num_latents=64,
num_media_embeds=257
)
self.image_proj = torch.nn.Linear(1024, 2048, bias=False)
torch.nn.init.normal_(
self.image_proj.weight, mean=0, std=2048**-0.5
)
if "audio" in modalities:
self.audio_model = Data2VecForCTC.from_pretrained("facebook/data2vec-audio-base-960h")
self.audio_proj = torch.nn.Linear(768, 2048, bias=False)
torch.nn.init.normal_(
self.audio_proj.weight, mean=0, std=2048**-0.5
)
if "video" in modalities:
# Load video model and preprocessor here
self.video_model = torchvision.models.video.r3d_18(pretrained=True)
self.video_proj = torch.nn.Linear(512, 2048, bias=False)
torch.nn.init.normal_(
self.video_proj.weight, mean=0, std=2048**-0.5
)
self.embed = bitsandbytes.nn.modules.Embedding(
32002,
2048,
padding_idx=1
)
self.embed_positions= PositionalEmbedding(
2048,
2048,
1
)
self.output_projection = torch.nn.Linear(
2048, 32002, bias=False
)
torch.nn.init.normal_(
self.output_projection.weight, mean=0, std=2048**-0.5
)
# Config following KOSMOS-1 paper (https://arxiv.org/pdf/2302.14045.pdf)
self.config = DecoderConfig(
decoder_layers=24,
decoder_embed_dim=2048,
decoder_ffn_embed_dim=8192,
decoder_attention_heads=32,
dropout=0.1,
activation_fn="gelu",
attention_dropout=0.1,
vocab_size=64007,
subln=True,
xpos_rel_pos=True,
max_rel_pos=2048
)
self.decoder = Decoder(
self.config,
embed_tokens=self.embed,
embed_positions=self.embed_positions,
output_projection=self.output_projection
)
self.perceive = PerceiverResampler(
dim = 1024,
depth = 2,
dim_head = 64,
heads = 8,
num_latents = 64,
num_media_embeds = 257
)
self.image_proj = torch.nn.Linear(1024, 2048, bias=False)
torch.nn.init.normal_(
self.image_proj.weight, mean=0, std=2048**-0.5
)
self.audio_proj = torch.nn.Linear(768, 2048, bias=False)
torch.nn.init.normal_(
self.audio_proj.weight, mean=0, std=2048 ** -0.5
)
self.video_proj = torch.nn.Linear(512, 2048, bias=False)
torch.nn.init.normal_(
self.video_proj.weight, mean=0, std=2048 ** -0.5
)
def forward(self, text_tokens, **kwargs):
model_input = self.decoder.forward_embedding(text_tokens)[1]
processed_modalities = [model_input[:, 0:6]]
if "images" in kwargs:
images = self.clip_model(pixel_values=kwargs["images"])["last_hidden_state"]
images = self.perceive(images).squeeze(1)
images = self.image_proj(images)
processed_modalities.append(images)
if "audios" in kwargs:
audios = self.audio_model(kwargs["audios"]).logits
audios = audios.mean(dim=1)
audios = self.audio_proj(audios)
processed_modalities.append(audios)
if "video" in self.modalities and "videos" in kwargs:
videos = kwargs["videos"].transpose(1, 2).contiguous()
videos = self.video_model(videos)
videos = videos.view(videos.size(0), -1)
videos = self.video_proj(videos)
processed_modalities.append(videos)
processed_modalities.append(model_input[:, 6:])
model_input = torch.cat(processed_modalities, dim=1)
model_input = self.decoder.forward_embedding(model_input, token_embedding=model_input)[0]
return self.decoder(model_input, passed_x=model_input)[0]
"""
You can initialize the KosmosTokenizer and Kosmos classes with any combination of modalities, such as:
tokenizer = KosmosTokenizer(modalities=["text", "image"])
model = Kosmos(modalities=["text", "image"])
Copy code
or
tokenizer = KosmosTokenizer(modalities=["text", "image", "audio", "video"])
model = Kosmos(modalities=["text", "image", "audio", "video"])
Copy code
The classes will handle the specified modalities during tokenization and processing.
""" | EXA-1-master | exa/models/KOSMOS_reimplementation-main/model/video/kosmos_conditional.py |
import torch
from torchscale.architecture.config import DecoderConfig
from torchscale.architecture.decoder import Decoder
from torchscale.component.embedding import PositionalEmbedding
from transformers import T5Tokenizer, CLIPProcessor, CLIPModel, PreTrainedTokenizerFast
from tokenizers import SentencePieceBPETokenizer
from transformers import Wav2Vec2Tokenizer
from transformers import Wav2Vec2Model
from flamingo_pytorch import PerceiverResampler
from PIL import Image
from torch.nn import Embedding, Module
import bitsandbytes
class KosmosTokenizer:
def __init__(self):
self.processor = CLIPProcessor.from_pretrained("laion/CLIP-ViT-L-14-laion2B-s32B-b82K")
# T5 uses SentencePiece tokenizer
self.tokenizer = T5Tokenizer.from_pretrained(
"t5-large",
additional_special_tokens=["", "<audio>", "</audio>"],
extra_ids=0,
model_max_length=1984
)
self.audio_idx, self.audio_end_idx = self.tokenizer.convert_tokens_to_ids(["<audio>", "</audio>"])
self.im_idx, self.im_end_idx = self.tokenizer.convert_tokens_to_ids([""])
self.audio_tokenizer = Wav2Vec2Tokenizer.from_pretrained("facebook/wav2vec2-base-960h")
def tokenize_texts(self, texts):
texts = self.tokenizer(texts, return_tensors="pt", padding=True, truncation=True).input_ids
# Add image and audio tokens to text as "<s>  <audio> </audio> text </s>"
media_tokens = torch.tensor([[self.im_idx, self.im_end_idx, self.audio_idx, self.audio_end_idx]] * texts.shape[0])
return torch.cat([texts[:, 0:1], media_tokens, texts[:, 1:]], dim=1), texts
def tokenize_images(self, images):
return self.processor(images=images, return_tensors="pt").pixel_values
def tokenize_audio(self, audios):
return self.audio_tokenizer(audios, return_tensors="pt", padding=True, truncation=True).input_ids
def tokenize(self, sample):
text_tokens, only_text_tokens = self.tokenize_texts(sample["target_text"])
attention_mask = text_tokens != self.tokenizer.pad_token_id
dummy_image_features = torch.ones((text_tokens.shape[0], 64))
attention_mask = torch.cat([dummy_image_features, attention_mask], dim=1)
return {
"text_tokens": text_tokens,
"images": self.tokenize_images(sample["image"]),
"labels": only_text_tokens,
"attention_mask": attention_mask,
"audios": self.tokenize_audio(sample["audio"]),
}
class Kosmos(Module):
def __init__(self):
super().__init__()
# Instantiate Clip Vit-l/14
self.clip_model = CLIPModel.from_pretrained("laion/CLIP-ViT-L-14-laion2B-s32B-b82K").vision_model
self.audio_model = Wav2Vec2Model.from_pretrained("facebook/wav2vec2-base-960h")
self.embed = bitsandbytes.nn.modules.Embedding(
32002,
2048,
padding_idx=1
)
self.embed_positions= PositionalEmbedding(
2048,
2048,
1
)
self.output_projection = torch.nn.Linear(
2048, 32002, bias=False
)
torch.nn.init.normal_(
self.output_projection.weight, mean=0, std=2048**-0.5
)
# Config following KOSMOS-1 paper (https://arxiv.org/pdf/2302.14045.pdf)
self.config = DecoderConfig(
decoder_layers=24,
decoder_embed_dim=2048,
decoder_ffn_embed_dim=8192,
decoder_attention_heads=32,
dropout=0.1,
activation_fn="gelu",
attention_dropout=0.1,
vocab_size=64007,
subln=True,
xpos_rel_pos=True,
max_rel_pos=2048
)
self.decoder = Decoder(
self.config,
embed_tokens=self.embed,
embed_positions=self.embed_positions,
output_projection=self.output_projection
)
self.perceive = PerceiverResampler(
dim = 1024,
depth = 2,
dim_head = 64,
heads = 8,
num_latents = 64,
num_media_embeds = 257
)
self.image_proj = torch.nn.Linear(1024, 2048, bias=False)
torch.nn.init.normal_(
self.image_proj.weight, mean=0, std=2048**-0.5
)
#add audio
self.audio_model = Wav2Vec2Model.from_pretrained("facebook/wav2vec2-base-960h")
self.audio_proj = torch.nn.Linear(768, 2048, bias=False)
torch.nn.init.normal_(
self.audio_proj.weight, mean=0, std=2048 ** -0.5
)
def forward(self, text_tokens, images, audios, **kwargs):
images = self.clip_model(pixel_values=images)["last_hidden_state"]
images = self.perceive(images).squeeze(1)
images = self.image_proj(images)
#process audio tokens
audios = self.audio_model(input_ids=audios).last_hidden_state
audios = audios.mean(dim=1)
audios = self.audio_proj(audios)
model_input = self.decoder.forward_embedding(text_tokens)[1]
model_input = torch.cat([model_input[:, 0:3], images, audios, model_input[:, 3:]], dim=1)
model_input = self.decoder.forward_embedding(model_input, token_embedding=model_input)[0]
return self.decoder(model_input, passed_x=model_input)[0] | EXA-1-master | exa/models/KOSMOS_reimplementation-main/model/audio/kosmos_audio.py |
import torch
from torchscale.architecture.config import DecoderConfig
from torchscale.architecture.decoder import Decoder
from torchscale.component.embedding import PositionalEmbedding
from transformers import T5Tokenizer, CLIPProcessor, CLIPModel, PreTrainedTokenizerFast
from tokenizers import SentencePieceBPETokenizer
from transformers import Data2VecForCTC, Wav2Vec2Processor
from flamingo_pytorch import PerceiverResampler
from PIL import Image
from torch.nn import Embedding, Module
import bitsandbytes
class KosmosTokenizer:
def __init__(self):
self.processor = CLIPProcessor.from_pretrained("laion/CLIP-ViT-L-14-laion2B-s32B-b82K")
self.audio_tokenizer = Wav2Vec2Processor.from_pretrained("facebook/data2vec-audio-base-960h")
self.tokenizer = T5Tokenizer.from_pretrained(
"t5-large",
additional_special_tokens=["", "<audio>", "</audio>"],
extra_ids=0,
model_max_length=1984
)
self.audio_idx, self.audio_end_idx = self.tokenizer.convert_tokens_to_ids(["<audio>", "</audio>"])
self.im_idx, self.im_end_idx = self.tokenizer.convert_tokens_to_ids([""])
def tokenize_texts(self, texts):
texts = self.tokenizer(texts, return_tensors="pt", padding=True, truncation=True).input_ids
# Add image and audio tokens to text as "<s>  <audio> </audio> text </s>"
media_tokens = torch.tensor([[self.im_idx, self.im_end_idx, self.audio_idx, self.audio_end_idx]] * texts.shape[0])
return torch.cat([texts[:, 0:1], media_tokens, texts[:, 1:]], dim=1), texts
def tokenize_images(self, images):
return self.processor(images=images, return_tensors="pt").pixel_values
def tokenize_audio(self, audios):
return self.audio_tokenizer(audios, return_tensors="pt", padding=True, truncation=True).input_values
def tokenize(self, sample):
text_tokens, only_text_tokens = self.tokenize_texts(sample["target_text"])
attention_mask = text_tokens != self.tokenizer.pad_token_id
dummy_image_features = torch.ones((text_tokens.shape[0], 64))
attention_mask = torch.cat([dummy_image_features, attention_mask], dim=1)
return {
"text_tokens": text_tokens,
"images": self.tokenize_images(sample["image"]),
"labels": only_text_tokens,
"attention_mask": attention_mask,
"audios": self.tokenize_audio(sample["audio"]),
}
class Kosmos(Module):
def __init__(self):
super().__init__()
# Instantiate Clip Vit-l/14
self.clip_model = CLIPModel.from_pretrained("laion/CLIP-ViT-L-14-laion2B-s32B-b82K").vision_model
#audio model
self.audio_model = Data2VecForCTC.from_pretrained("facebook/data2vec-audio-base-960h")
self.embed = bitsandbytes.nn.modules.Embedding(
32002,
2048,
padding_idx=1
)
self.embed_positions= PositionalEmbedding(
2048,
2048,
1
)
self.output_projection = torch.nn.Linear(
2048, 32002, bias=False
)
torch.nn.init.normal_(
self.output_projection.weight, mean=0, std=2048**-0.5
)
# Config following KOSMOS-1 paper (https://arxiv.org/pdf/2302.14045.pdf)
self.config = DecoderConfig(
decoder_layers=24,
decoder_embed_dim=2048,
decoder_ffn_embed_dim=8192,
decoder_attention_heads=32,
dropout=0.1,
activation_fn="gelu",
attention_dropout=0.1,
vocab_size=64007,
subln=True,
xpos_rel_pos=True,
max_rel_pos=2048
)
self.decoder = Decoder(
self.config,
embed_tokens=self.embed,
embed_positions=self.embed_positions,
output_projection=self.output_projection
)
self.perceive = PerceiverResampler(
dim = 1024,
depth = 2,
dim_head = 64,
heads = 8,
num_latents = 64,
num_media_embeds = 257
)
self.image_proj = torch.nn.Linear(1024, 2048, bias=False)
torch.nn.init.normal_(
self.image_proj.weight, mean=0, std=2048**-0.5
)
self.audio_proj = torch.nn.Linear(768, 2048, bias=False)
torch.nn.init.normal_(
self.audio_proj.weight, mean=0, std=2048 ** -0.5
)
def forward(self, text_tokens, images, audios, **kwargs):
images = self.clip_model(pixel_values=images)["last_hidden_state"]
images = self.perceive(images).squeeze(1)
images = self.image_proj(images)
# Process audio tokens
audios = self.audio_model(audios).logits
audios = audios.mean(dim=1)
audios = self.audio_proj(audios)
model_input = self.decoder.forward_embedding(text_tokens)[1]
model_input = torch.cat([model_input[:, 0:3], images, audios, model_input[:, 3:]], dim=1)
model_input = self.decoder.forward_embedding(model_input, token_embedding=model_input)[0]
return self.decoder(model_input, passed_x=model_input)[0] | EXA-1-master | exa/models/KOSMOS_reimplementation-main/model/audio/kosmos_audio_data2vec.py |
import torch
from torchscale.architecture.config import DecoderConfig
from torchscale.architecture.decoder import Decoder
from torchscale.component.embedding import PositionalEmbedding
from transformers import T5Tokenizer, CLIPProcessor, CLIPModel, PreTrainedTokenizerFast
from tokenizers import SentencePieceBPETokenizer
from transformers import Wav2Vec2Tokenizer
from transformers import Wav2Vec2Model
from flamingo_pytorch import PerceiverResampler
from torch.nn import Module
import bitsandbytes
class KosmosTokenizer:
def __init__(self, modalities=["text", "image", "audio"]):
self.modalities = modalities
self.processor = CLIPProcessor.from_pretrained("laion/CLIP-ViT-L-14-laion2B-s32B-b82K")
# T5 uses SentencePiece tokenizer
self.tokenizer = T5Tokenizer.from_pretrained(
"t5-large",
additional_special_tokens=["", "<audio>", "</audio>"],
extra_ids=0,
model_max_length=1984
)
self.audio_idx, self.audio_end_idx = self.tokenizer.convert_tokens_to_ids(["<audio>", "</audio>"])
self.im_idx, self.im_end_idx = self.tokenizer.convert_tokens_to_ids([""])
self.audio_tokenizer = Wav2Vec2Tokenizer.from_pretrained("facebook/wav2vec2-base-960h")
def tokenize_texts(self, texts):
texts = self.tokenizer(texts, return_tensors="pt", padding=True, truncation=True).input_ids
# Add image and audio tokens to text as "<s>  <audio> </audio> text </s>"
media_tokens = torch.tensor([[self.im_idx, self.im_end_idx, self.audio_idx, self.audio_end_idx]] * texts.shape[0])
return torch.cat([texts[:, 0:1], media_tokens, texts[:, 1:]], dim=1), texts
def tokenize_images(self, images):
return self.processor(images=images, return_tensors="pt").pixel_values
def tokenize_audio(self, audios):
return self.audio_tokenizer(audios, return_tensors="pt", padding=True, truncation=True).input_ids
def tokenize(self, sample):
text_tokens, only_text_tokens = self.tokenize_texts(sample["target_text"])
attention_mask = text_tokens != self.tokenizer.pad_token_id
if "image" in self.modalities:
images = self.tokenize_images(sample["image"])
else:
images = None
if "audio" in self.modalities:
audios = self.tokenize_audio(sample["audio"])
else:
audios = None
return {
"text_tokens": text_tokens,
"images": images,
"labels": only_text_tokens,
"attention_mask": attention_mask,
"audios": audios,
}
class Kosmos(Module):
def __init__(self, modalities=["text", "image", "audio"]):
super().__init__()
# Instantiate Clip Vit-l/14
self.modalities = modalities
self.clip_model = CLIPModel.from_pretrained("laion/CLIP-ViT-L-14-laion2B-s32B-b82K").vision_model
self.audio_model = Wav2Vec2Model.from_pretrained("facebook/wav2vec2-base-960h")
self.embed = bitsandbytes.nn.modules.Embedding(
32002,
2048,
padding_idx=1
)
self.embed_positions= PositionalEmbedding(
2048,
2048,
1
)
self.output_projection = torch.nn.Linear(
2048, 32002, bias=False
)
torch.nn.init.normal_(
self.output_projection.weight, mean=0, std=2048**-0.5
)
# Config following KOSMOS-1 paper (https://arxiv.org/pdf/2302.14045.pdf)
self.config = DecoderConfig(
decoder_layers=24,
decoder_embed_dim=2048,
decoder_ffn_embed_dim=8192,
decoder_attention_heads=32,
dropout=0.1,
activation_fn="gelu",
attention_dropout=0.1,
vocab_size=64007,
subln=True,
xpos_rel_pos=True,
max_rel_pos=2048
)
self.decoder = Decoder(
self.config,
embed_tokens=self.embed,
embed_positions=self.embed_positions,
output_projection=self.output_projection
)
self.perceive = PerceiverResampler(
dim = 1024,
depth = 2,
dim_head = 64,
heads = 8,
num_latents = 64,
num_media_embeds = 257
)
self.image_proj = torch.nn.Linear(1024, 2048, bias=False)
torch.nn.init.normal_(
self.image_proj.weight, mean=0, std=2048**-0.5
)
#add audio
self.audio_model = Wav2Vec2Model.from_pretrained("facebook/wav2vec2-base-960h")
self.audio_proj = torch.nn.Linear(768, 2048, bias=False)
torch.nn.init.normal_(
self.audio_proj.weight, mean=0, std=2048 ** -0.5
)
def forward(self, text_tokens, images, audios, **kwargs):
if "image" in self.modalities:
images = self.clip_model(pixel_values=images)["last_hidden_state"]
images = self.perceive(images).squeeze(1)
images = self.image_proj(images)
if "audio" in self.modalities:
audios = self.audio_model(input_ids=audios).last_hidden_state
audios = audios.mean(dim=1)
audios = self.audio_proj(audios)
model_input = self.decoder.forward_embedding(text_tokens)[1]
model_input = torch.cat([model_input[:, 0:3], images, audios, model_input[:, 3:]], dim=1)
model_input = self.decoder.forward_embedding(model_input, token_embedding=model_input)[0]
return self.decoder(model_input, passed_x=model_input)[0]
| EXA-1-master | exa/models/KOSMOS_reimplementation-main/model/audio/kosmos_conditional.py |
"""
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright © 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for software and other kinds of works.
The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too.
When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights.
Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions.
Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and modification follow.
TERMS AND CONDITIONS
0. Definitions.
“This License” refers to version 3 of the GNU General Public License.
“Copyright” also means copyright-like laws that apply to other kinds of works, such as semiconductor masks.
“The Program” refers to any copyrightable work licensed under this License. Each licensee is addressed as “you”. “Licensees” and “recipients” may be individuals or organizations.
To “modify” a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a “modified version” of the earlier work or a work “based on” the earlier work.
A “covered work” means either the unmodified Program or a work based on the Program.
To “propagate” a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well.
To “convey” a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays “Appropriate Legal Notices” to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion.
1. Source Code.
The “source code” for a work means the preferred form of the work for making modifications to it. “Object code” means any non-source form of a work.
A “Standard Interface” means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language.
The “System Libraries” of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A “Major Component”, in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it.
The “Corresponding Source” for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work.
The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source.
The Corresponding Source for a work in source code form is that same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures.
When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified it, and giving a relevant date.
b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to “keep intact all notices”.
c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so.
A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an “aggregate” if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways:
a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b.
d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d.
A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work.
A “User Product” is either (1) a “consumer product”, which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, “normally used” refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product.
“Installation Information” for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made.
If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM).
The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying.
7. Additional Terms.
“Additional permissions” are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or authors of the material; or
e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors.
All other non-permissive additional terms are considered “further restrictions” within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11).
However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice.
Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License.
An “entity transaction” is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it.
11. Patents.
A “contributor” is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's “contributor version”.
A contributor's “essential patent claims” are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, “control” includes the right to grant patent sublicenses in a manner consistent with the requirements of this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version.
In the following three paragraphs, a “patent license” is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To “grant” such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party.
If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. “Knowingly relying” means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it.
A patent license is “discriminatory” if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns.
Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License “or any later version” applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation.
If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program.
Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM “AS IS” WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
"""
import os
import requests
import torch
from torch.nn import Module
from torchvision import transforms
from torchvision.models.video import r3d_18
from transformers import (
AutoModel,
AutoTokenizer,
CLIPModel,
CLIPProcessor,
Wav2Vec2ForCTC,
T5Tokenizer,
Wav2Vec2Processor,
)
from torchscale.architecture.config import DecoderConfig
from torchscale.architecture.decoder import Decoder
from torchscale.component.embedding import PositionalEmbedding
import bitsandbytes
from flamingo_pytorch import PerceiverResampler
from concurrent.futures import ThreadPoolExecutor
class BaseTokenizer:
def tokenize(self, data):
raise NotImplementedError('This method should be implemented in a subclass')
def process(self, data):
raise NotImplemented("This method should be implemented in a subclass")
def embed(self, data):
raise NotImplemented("This method should be implemented in a subclass")
class ModalityDetector:
def __init__(self, method, input_data, user_input=None):
self.method = method
self.input_data = input_data
self.user_input = user_input
def get_modality(self):
if self.method == "file_extension":
return self.detect_modality_from_file_extension()
elif self.method == "content_based":
return self.detect_modality_from_content()
elif self.method == "user_input":
return self.user_input
def detect_modality_from_file_extension(self):
_, file_extension = os.path.splitext(self.input_data)
file_extension = file_extension.lower()
if file_extension in ['.jpg', '.jpeg', '.png', '.bmp']:
return 'image'
elif file_extension in ['.wav', '.mp3', '.ogg']:
return 'audio'
elif file_extension in [".txt", '.md', '.json']:
return 'text'
elif file_extension in ['.mp4', '.avi', '.mkv', '.mov']:
return 'video'
elif file_extension in ['.csv']:
return 'csv'
elif file_extension in ['.pdf']:
return 'pdf'
#add more modalities
def detect_modality_from_content(self):
#model that detects modalities or algo
pass
class TokenizerFactory:
def create_tokenizer(self, modality):
# Fetch models from Hugging Face API
api_url = "https://huggingface.co/api/models"
response = requests.get(api_url)
if response.status_code != 200:
raise ValueError("Failed to fetch models from Hugging Face API")
models = response.json()
# Filter models based on modality and sort by likes
matching_models = sorted(
[model for model in models if modality in model["tags"]],
key=lambda x: x["likes"],
reverse=True
)
if not matching_models:
raise ValueError(f"No matching tokenizer found for modality '{modality}'")
# Select the most liked tokenizer and instantiate it
selected_model = matching_models[0]["modelId"]
tokenizer = AutoTokenizer.from_pretrained(selected_model)
return tokenizer
class KosmosEmbedder(torch.nn.Module):
def __init__(self):
super().__init__()
self.models = {}
self.tokenizers = {}
self.projections = {}
def load_model(self, modality):
if modality not in self.models:
tokenizer = AutoTokenizer.from_pretrained(modality)
model = AutoModel.from_pretrained(modality)
proj = torch.nn.Linear(model.config.hidden_size, 2048)
self.tokenizers[modality] = tokenizer
self.models[modality] = model
self.projections[modality] = proj
def embed(self, modality, data):
self.load_model(modality)
tokenizer = self.tokenizers[modality]
model = self.models[modality]
proj = self.projections[modality]
tokens = tokenizer(data, return_tensors="pt", padding=True, truncation=True)
output = model(**tokens)
embed = proj(output.last_hidden_state)
return embed
class ModalityProcessor:
def __init__(self, modality_detector):
self.modality_detecor = modality_detector
self.modalities = {}
self.tokenizer_factory = TokenizerFactory(self.modality_detector)
self.executor = ThreadPoolExecutor()
def process(self, modality, data):
modality = self.modality_detector.get_modality()
if modality in self.modalities:
tokenizer = self.modalities[modality]
else:
tokenizer = self.tokenizer_factory.create_tokenizer(modality)
self.modalities[modality] = tokenizer
tokens = tokenizer(data, return_tensors="pt", padding=True, truncation=True)
return tokens
def process_parallel(self, modality_data_list):
results = []
for modality_data in modality_data_list:
modality = modality_data["modality"]
data = modality_data["data"]
result = self.executor.submit(self.process, modality, data)
results.append(result)
return [result.result() for result in results]
class KosmosTokenizer:
def __init__(self):
self.processor = CLIPProcessor.from_pretrained("laion/CLIP-ViT-L-14-laion2B-s32B-b82K")
self.audio_tokenizer = Wav2Vec2Processor.from_pretrained("facebook/data2vec-audio-base-960h")
self.tokenizer = T5Tokenizer.from_pretrained(
"t5-large",
additional_special_tokens=["", "<audio>", "</audio>", "<video>", "</video>", "<any>", "</any>"],
extra_ids=0,
model_max_length=1984
)
self.video_transform = transforms.Compose([
transforms.Resize((112, 112)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.43216, 0.394666, 0.37645], std=[0.22803, 0.22145, 0.216989])
])
self.vid_idx, self.vid_end_ix = self.tokenizer.convert_tokens_to_ids(["<video>", "</video>"])
self.audio_idx, self.audio_end_idx = self.tokenizer.convert_tokens_to_ids(["<audio>", "</audio>"])
self.im_idx, self.im_end_idx = self.tokenizer.convert_tokens_to_ids([""])
self.any_idx, self.any_end_idx = self.tokenizer.convert_tokens_to_ids(["<any>", "</any>"])
def tokenize_texts(self, texts):
texts = self.tokenizer(texts, return_tensors="pt", padding=True, truncation=True).input_ids
media_tokens = torch.tensor([[self.im_idx, self.im_end_idx, self.audio_idx, self.audio_end_idx, self.vid_idx, self.vid_end_idx, self.any_idx, self.any_end_idx]] * texts.shape[0])
return torch.cat([texts[:, 0:1], media_tokens, texts[:, 1:]], dim=1), texts
def tokenize_images(self, images):
return self.processor(images=images, return_tensors="pt").pixel_values
def tokenize_audio(self, audios):
return self.audio_tokenizer(audios, return_tensors="pt", padding=True, truncation=True).input_values
def tokenize_videos(self, videos):
if not videos:
return None
processed_videos = []
for video in videos:
video_frames = [self.video_transform(frame) for frame in video]
processed_videos.append(torch.stack(video_frames))
return torch.stack(processed_videos)
def tokenize(self, sample):
text_tokens, only_text_tokens = self.tokenize_texts(sample["target_text"])
attention_mask = text_tokens != self.tokenizer.pad_token_id
dummy_image_features = torch.ones((text_tokens.shape[0], 64))
attention_mask = torch.cat([dummy_image_features, attention_mask], dim=1)
return {
"text_tokens": text_tokens,
"images": self.tokenize_images(sample["image"]),
"labels": only_text_tokens,
"attention_mask": attention_mask,
"audios": self.tokenize_audio(sample["audio"]),
"videos": self.tokenize_videos(sample["video"])
}
class Kosmos(Module):
def __init__(self, modality, modality_detector):
super().__init__()
self.clip_model = CLIPModel.from_pretrained("laion/CLIP-ViT-L-14-laion2B-s32B-b82K").vision_model
self.audio_model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h")
self.video_model = r3d_18(pretrained=True)
self.video_model = torch.nn.Sequential(*list(self.video_model.children())[:-1])
self.modality_detector = modality_detector
self.tokenizer = KosmosTokenizer()
self.processor = ModalityProcessor(modality_detector)
self.embedder = KosmosEmbedder(modality)
self.embed = bitsandbytes.nn.modules.Embedding(
32002,
2048,
padding_idx=1
)
self.embed_positions= PositionalEmbedding(
2048,
2048,
1
)
self.output_projection = torch.nn.Linear(
2048, 32002, bias=False
)
torch.nn.init.normal_(
self.output_projection.weight, mean=0, std=2048**-0.5
)
self.config = DecoderConfig(
decoder_layers=24,
decoder_embed_dim=2048,
decoder_ffn_embed_dim=8192,
decoder_attention_heads=32,
dropout=0.1,
activation_fn="gelu",
attention_dropout=0.1,
vocab_size=64007,
subln=True,
xpos_rel_pos=True,
max_rel_pos=2048
)
self.decoder = Decoder(
self.config,
embed_tokens=self.embed,
embed_positions=self.embed_positions,
output_projection=self.output_projection
)
self.perceive = PerceiverResampler(
dim = 1024,
depth = 2,
dim_head = 64,
heads = 8,
num_latents = 64,
num_media_embeds = 257
)
self.image_proj = torch.nn.Linear(1024, 2048, bias=False)
torch.nn.init.normal_(
self.image_proj.weight, mean=0, std=2048**-0.5
)
self.audio_proj = torch.nn.Linear(768, 2048, bias=False)
torch.nn.init.normal_(
self.audio_proj.weight, mean=0, std=2048 ** -0.5
)
self.video_proj = torch.nn.Linear(512, 2048, bias=False)
torch.nn.init.normal_(
self.video_proj.weight, mean=0, std=2048 ** -0.5
)
def forward(self, text_tokens, images, audios, videos, any_modality, **kwargs):
images = self.clip_model(pixel_values=images)["last_hidden_state"]
images = self.perceive(images).squeeze(1)
images = self.image_proj(images)
audios = self.audio_model(audios).logits
audios = audios.mean(dim=1)
audios = self.audio_proj(audios)
if videos is not None:
videos = videos.transpose(1, 2).contiguous()
videos = self.video_model(videos)
videos = videos.view(videos.size(0), -1)
videos = self.video_proj(videos)
any_embeddings = []
for modality_data in any_modality:
modality = modality_data["modality"]
data = modality_data["data"]
tokens = self.processor.processor(modality, data)
embed = self.embedder(modality)(tokens)
any_embeddings.append(embed)
any_embeddings = torch.stack(any_embeddings)
model_input = self.decoder.forward_embedding(text_tokens)[1]
model_input = torch.cat([model_input[:, 0:6], images, audios, videos, any_embeddings, model_input[:, 6:]], dim=1)
model_input = self.decoder.forward_embedding(model_input, token_embedding=model_input)[0]
return self.decoder(model_input, passed_x=model_input)[0] | EXA-1-master | exa/models/KOSMOS_reimplementation-main/model/allModalities/kosmos3.py |
import os
import requests
import torch
from torch.nn import Module
from torchvision import transforms
from torchvision.models.video import r3d_18
from transformers import (
AutoModel,
AutoTokenizer,
CLIPModel,
CLIPProcessor,
Wav2Vec2ForCTC,
T5Tokenizer,
Wav2Vec2Processor,
)
from torchscale.architecture.config import DecoderConfig
from torchscale.architecture.decoder import Decoder
from torchscale.component.embedding import PositionalEmbedding
import bitsandbytes
from flamingo_pytorch import PerceiverResampler
from concurrent.futures import ThreadPoolExecutor
class BaseTokenizer:
def tokenize(self, data):
raise NotImplementedError('This method should be implemented in a subclass')
def process(self, data):
raise NotImplemented("This method should be implemented in a subclass")
def embed(self, data):
raise NotImplemented("This method should be implemented in a subclass")
class ModalityDetector:
def __init__(self, method, input_data, user_input=None):
self.method = method
self.input_data = input_data
self.user_input = user_input
def get_modality(self):
if self.method == "file_extension":
return self.detect_modality_from_file_extension()
elif self.method == "content_based":
return self.detect_modality_from_content()
elif self.method == "user_input":
return self.user_input
def detect_modality_from_file_extension(self):
_, file_extension = os.path.splitext(self.input_data)
file_extension = file_extension.lower()
if file_extension in ['.jpg', '.jpeg', '.png', '.bmp']:
return 'image'
elif file_extension in ['.wav', '.mp3', '.ogg']:
return 'audio'
elif file_extension in [".txt", '.md', '.json']:
return 'text'
def detect_modality_from_content(self):
pass
class TokenizerFactory:
def create_tokenizer(self, modality):
# Fetch models from Hugging Face API
api_url = "https://huggingface.co/api/models"
response = requests.get(api_url)
if response.status_code != 200:
raise ValueError("Failed to fetch models from Hugging Face API")
models = response.json()
# Filter models based on modality and sort by likes
matching_models = sorted(
[model for model in models if modality in model["tags"]],
key=lambda x: x["likes"],
reverse=True
)
if not matching_models:
raise ValueError(f"No matching tokenizer found for modality '{modality}'")
# Select the most liked tokenizer and instantiate it
selected_model = matching_models[0]["modelId"]
tokenizer = AutoTokenizer.from_pretrained(selected_model)
return tokenizer
class ModalityProcessor:
def __init__(self, modality_detector):
self.modality_detector = modality_detector
self.modalities = {}
self.tokenizer_factory = TokenizerFactory(self.modality_detector)
def processor(self, modality, data):
modality = self.modality_detector.get_modality()
if modality in self.modalities:
tokenizer = self.modalities[modality]
else:
tokenizer = self.tokenizer_factory.create_tokenizer(modality)
self.modalities[modality] = tokenizer
tokens = tokenizer(data, return_tensors="pt", padding=True, truncation=True)
return tokens
class KosmosEmbedder(torch.nn.Module):
def __init__(self, modality):
super().__init__()
self.modality = modality
self.tokenizer = AutoTokenizer.from_pretrained(modality)
self.model = AutoModel.from_pretrained(modality)
self.proj = torch.nn.Linear(self.model.config.hidden_size, 2048)
def forward(self, data):
tokens = self.tokenizer(data, return_tensors="pt", padding=True, truncation=True)
output = self.model(**tokens)
embed = self.proj(output.last_hidden_state)
return embed
class KosmosTokenizer:
def __init__(self):
self.processor = CLIPProcessor.from_pretrained("laion/CLIP-ViT-L-14-laion2B-s32B-b82K")
self.audio_tokenizer = Wav2Vec2Processor.from_pretrained("facebook/data2vec-audio-base-960h")
self.tokenizer = T5Tokenizer.from_pretrained(
"t5-large",
additional_special_tokens=["", "<audio>", "</audio>", "<video>", "</video>", "<any>", "</any>"],
extra_ids=0,
model_max_length=1984
)
self.video_transform = transforms.Compose([
transforms.Resize((112, 112)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.43216, 0.394666, 0.37645], std=[0.22803, 0.22145, 0.216989])
])
self.vid_idx, self.vid_end_ix = self.tokenizer.convert_tokens_to_ids(["<video>", "</video>"])
self.audio_idx, self.audio_end_idx = self.tokenizer.convert_tokens_to_ids(["<audio>", "</audio>"])
self.im_idx, self.im_end_idx = self.tokenizer.convert_tokens_to_ids([""])
self.any_idx, self.any_end_idx = self.tokenizer.convert_tokens_to_ids(["<any>", "</any>"])
def tokenize_texts(self, texts):
texts = self.tokenizer(texts, return_tensors="pt", padding=True, truncation=True).input_ids
media_tokens = torch.tensor([[self.im_idx, self.im_end_idx, self.audio_idx, self.audio_end_idx, self.vid_idx, self.vid_end_idx, self.any_idx, self.any_end_idx]] * texts.shape[0])
return torch.cat([texts[:, 0:1], media_tokens, texts[:, 1:]], dim=1), texts
def tokenize_images(self, images):
return self.processor(images=images, return_tensors="pt").pixel_values
def tokenize_audio(self, audios):
return self.audio_tokenizer(audios, return_tensors="pt", padding=True, truncation=True).input_values
def tokenize_videos(self, videos):
if not videos:
return None
processed_videos = []
for video in videos:
video_frames = [self.video_transform(frame) for frame in video]
processed_videos.append(torch.stack(video_frames))
return torch.stack(processed_videos)
def tokenize(self, sample):
text_tokens, only_text_tokens = self.tokenize_texts(sample["target_text"])
attention_mask = text_tokens != self.tokenizer.pad_token_id
dummy_image_features = torch.ones((text_tokens.shape[0], 64))
attention_mask = torch.cat([dummy_image_features, attention_mask], dim=1)
return {
"text_tokens": text_tokens,
"images": self.tokenize_images(sample["image"]),
"labels": only_text_tokens,
"attention_mask": attention_mask,
"audios": self.tokenize_audio(sample["audio"]),
"videos": self.tokenize_videos(sample["video"])
}
class Kosmos(Module):
def __init__(self, modality, modality_detector):
super().__init__()
self.clip_model = CLIPModel.from_pretrained("laion/CLIP-ViT-L-14-laion2B-s32B-b82K").vision_model
self.audio_model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h")
self.video_model = r3d_18(pretrained=True)
self.video_model = torch.nn.Sequential(*list(self.video_model.children())[:-1])
self.modality_detector = modality_detector
self.tokenizer = KosmosTokenizer()
self.processor = ModalityProcessor(modality_detector)
self.embedder = KosmosEmbedder(modality)
self.embed = bitsandbytes.nn.modules.Embedding(
32002,
2048,
padding_idx=1
)
self.embed_positions= PositionalEmbedding(
2048,
2048,
1
)
self.output_projection = torch.nn.Linear(
2048, 32002, bias=False
)
torch.nn.init.normal_(
self.output_projection.weight, mean=0, std=2048**-0.5
)
self.config = DecoderConfig(
decoder_layers=24,
decoder_embed_dim=2048,
decoder_ffn_embed_dim=8192,
decoder_attention_heads=32,
dropout=0.1,
activation_fn="gelu",
attention_dropout=0.1,
vocab_size=64007,
subln=True,
xpos_rel_pos=True,
max_rel_pos=2048
)
self.decoder = Decoder(
self.config,
embed_tokens=self.embed,
embed_positions=self.embed_positions,
output_projection=self.output_projection
)
self.perceive = PerceiverResampler(
dim = 1024,
depth = 2,
dim_head = 64,
heads = 8,
num_latents = 64,
num_media_embeds = 257
)
self.image_proj = torch.nn.Linear(1024, 2048, bias=False)
torch.nn.init.normal_(
self.image_proj.weight, mean=0, std=2048**-0.5
)
self.audio_proj = torch.nn.Linear(768, 2048, bias=False)
torch.nn.init.normal_(
self.audio_proj.weight, mean=0, std=2048 ** -0.5
)
self.video_proj = torch.nn.Linear(512, 2048, bias=False)
torch.nn.init.normal_(
self.video_proj.weight, mean=0, std=2048 ** -0.5
)
def forward(self, text_tokens, images, audios, videos, any_modality, **kwargs):
images = self.clip_model(pixel_values=images)["last_hidden_state"]
images = self.perceive(images).squeeze(1)
images = self.image_proj(images)
audios = self.audio_model(audios).logits
audios = audios.mean(dim=1)
audios = self.audio_proj(audios)
if videos is not None:
videos = videos.transpose(1, 2).contiguous()
videos = self.video_model(videos)
videos = videos.view(videos.size(0), -1)
videos = self.video_proj(videos)
any_embeddings = []
for modality_data in any_modality:
modality = modality_data["modality"]
data = modality_data["data"]
tokens = self.processor.processor(modality, data)
embed = self.embedder(modality)(tokens)
any_embeddings.append(embed)
any_embeddings = torch.stack(any_embeddings)
model_input = self.decoder.forward_embedding(text_tokens)[1]
model_input = torch.cat([model_input[:, 0:6], images, audios, videos, any_embeddings, model_input[:, 6:]], dim=1)
model_input = self.decoder.forward_embedding(model_input, token_embedding=model_input)[0]
return self.decoder(model_input, passed_x=model_input)[0] | EXA-1-master | exa/models/KOSMOS_reimplementation-main/model/allModalities/kosmos2.py |
import os
import torch
from torch.nn import Module
from torchvision import transforms
from torchvision.models.video import r3d_18
from transformers import (
AutoModel,
AutoTokenizer,
CLIPModel,
CLIPProcessor,
Data2VecForCTC,
T5Tokenizer,
Wav2Vec2Processor,
list_models
)
# Add additional imports
from torchscale.architecture.config import DecoderConfig
from torchscale.architecture.decoder import Decoder
from torchscale.component.embedding import PositionalEmbedding
import bitsandbytes
from flamingo_pytorch import PerceiverResampler
# Import the ModalityDetector and other required classes
# from modality_detector import ModalityDetector, ModalityProcessor, TokenizerFactory
# from kosmos import Kosmos, KosmosEmbedder, KosmosTokenizer
#baseclass should contain the core methods for tokenizing processing and embedding input data
class BaseTokenizer:
def tokenize(self, data):
raise NotImplementedError('This method should be implemented in a subclass')
def process(self, data):
raise NotImplemented("This method should be implemented in a subclass")
def embed(self, data):
raise NotImplemented("This method should be implemented in a subclass")
class ModalityDetector:
def __init__(self, method, input_data, user_input=None):
self.method = method
self.input_data = input_data
self.user_input = user_input
def get_modality(self):
if self.method == "file_extension":
return self.detect_modality_from_file_extension()
elif self.method == "content_based":
return self.detect_modality_from_content()
elif self.method == "user_input":
return self.user_input
def detect_modality_from_file_extension(self):
_, file_extension = os.path.splitext(self.input_data)
file_extension = file_extension.lower()
if file_extension in ['.jpg', '.jpeg', '.png', '.bmp']:
return 'image'
elif file_extension in ['.wav', '.mp3', '.ogg']:
return 'audio'
elif file_extension in [".txt", '.md', '.json']:
return 'text'
def detect_modality_from_content(self):
# implement logic to determine modality based on content analysis
# this part requires a content-based modality detection model or algo
pass
class TokenizerFactory:
def __init__(self, modality_detector):
self.modality_detector = modality_detector
def create_tokenizer(self, modality):
modality = self.modality_detector.get_modality()
# search for pretrained tokenizers for the given modality
matching_models = list_models(filter=modality)
if not matching_models:
raise ValueError(f"No matching Tokenizer for modality")
# select the first matching tokenizer and instante it [make selection more favorable with most liked]
selected_model = matching_models[0]
tokenizer = AutoTokenizer.from_pretrained(selected_model)
return tokenizer
class ModalityProcessor:
def __init__(self, modality_detector):
self.modality_detector = modality_detector
self.modalities = {}
self.tokenizer_factory = TokenizerFactory(self.modality_detector)
def processor(self, modality, data):
modality = self.modality_detector.get_modality()
# Check if the modality is already registered
if modality in self.modalities:
tokenizer = self.modalities[modality]
else:
tokenizer = self.tokenizer_factory.create_tokenizer(modality)
self.modalities[modality] = tokenizer
tokens = tokenizer(data, return_tensors="pt", padding=True, truncation=True)
return tokens
class KosmosEmbedder(torch.nn.Module):
def __init__(self, modality):
super().__init__()
self.modality = modality
self.tokenizer = AutoTokenizer.from_pretrained(modality)
self.model = AutoModel.from_pretrained(modality)
self.proj = torch.nn.Linear(self.model.config.hidden_size, 2048)
def forward(self, data):
tokens = self.tokenizer(data, return_tensors="pt", padding=True, truncation=True)
output = self.model(**tokens)
embed = self.proj(output.last_hidden_state)
return embed
class KosmosTokenizer:
def __init__(self):
self.processor = CLIPProcessor.from_pretrained("laion/CLIP-ViT-L-14-laion2B-s32B-b82K")
self.audio_tokenizer = Wav2Vec2Processor.from_pretrained("facebook/data2vec-audio-base-960h")
#video
self.tokenizer = T5Tokenizer.from_pretrained(
"t5-large",
additional_special_tokens=["", "<audio>", "</audio>", "<video>", "</video>", "<any>", "</any>"],
extra_ids=0,
model_max_length=1984
)
self.video_transform = transforms.Compose([
transforms.Resize((112, 112)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.43216, 0.394666, 0.37645], std=[0.22803, 0.22145, 0.216989])
])
self.vid_idx, self.vid_end_ix = self.tokenizer.convert_tokens_to_ids(["<video>", "</video>"])
self.audio_idx, self.audio_end_idx = self.tokenizer.convert_tokens_to_ids(["<audio>", "</audio>"])
self.im_idx, self.im_end_idx = self.tokenizer.convert_tokens_to_ids([""])
self.any_idx, self.any_end_idx = self.tokenizer.convert_tokens_to_ids(["<any>", "</any>"])
def tokenize_texts(self, texts):
texts = self.tokenizer(texts, return_tensors="pt", padding=True, truncation=True).input_ids
# Add image and audio tokens to text as "<s>  <audio> </audio> text </s>"
# media_tokens = torch.tensor([[self.im_idx, self.im_end_idx, self.audio_idx, self.audio_end_idx, self.vid_idx, self.vid_end_idx, self.any_idx, self.any_end_idx]] * texts.shape[0])
# return torch.cat([texts[:, 0:1], media_tokens, texts[:, 1:]], dim=1), texts
media_tokens = torch.tensor([[self.im_idx, self.im_end_idx, self.audio_idx, self.audio_end_idx, self.vid_idx, self.vid_end_idx, self.any_idx, self.any_end_idx]] * texts.shape[0])
return torch.cat([texts[:, 0:1], media_tokens, texts[:, 1:]], dim=1), texts
def tokenize_images(self, images):
return self.processor(images=images, return_tensors="pt").pixel_values
def tokenize_audio(self, audios):
return self.audio_tokenizer(audios, return_tensors="pt", padding=True, truncation=True).input_values
def tokenize_videos(self, videos):
processed_videos = []
for video in videos:
video_frames = [self.video_transform(frame) for frame in video]
processed_videos.append(torch.stack(video_frames))
return torch.stack(processed_videos)
def tokenize(self, sample):
text_tokens, only_text_tokens = self.tokenize_texts(sample["target_text"])
attention_mask = text_tokens != self.tokenizer.pad_token_id
dummy_image_features = torch.ones((text_tokens.shape[0], 64))
attention_mask = torch.cat([dummy_image_features, attention_mask], dim=1)
return {
"text_tokens": text_tokens,
"images": self.tokenize_images(sample["image"]),
"labels": only_text_tokens,
"attention_mask": attention_mask,
"audios": self.tokenize_audio(sample["audio"]),
"videos": self.tokenize_videos(sample["video"])
}
class Kosmos(Module):
def __init__(self, modality, modality_detector):
super().__init__()
# Instantiate Clip Vit-l/14
self.clip_model = CLIPModel.from_pretrained("laion/CLIP-ViT-L-14-laion2B-s32B-b82K").vision_model
#audio model
self.audio_model = Data2VecForCTC.from_pretrained("facebook/data2vec-audio-base-960h")
#video
self.video_model = r3d_18(pretrained=True)
self.video_model = torch.nn.Sequential(*list(self.video_model.children())[:-1])
self.modality_detector = modality_detector
self.tokenizer = KosmosTokenizer()
self.processor = ModalityProcessor(modality_detector)
self.embedder = KosmosEmbedder(modality)
self.embed = bitsandbytes.nn.modules.Embedding(
32002,
2048,
padding_idx=1
)
self.embed_positions= PositionalEmbedding(
2048,
2048,
1
)
self.output_projection = torch.nn.Linear(
2048, 32002, bias=False
)
torch.nn.init.normal_(
self.output_projection.weight, mean=0, std=2048**-0.5
)
# Config following KOSMOS-1 paper (https://arxiv.org/pdf/2302.14045.pdf)
self.config = DecoderConfig(
decoder_layers=24,
decoder_embed_dim=2048,
decoder_ffn_embed_dim=8192,
decoder_attention_heads=32,
dropout=0.1,
activation_fn="gelu",
attention_dropout=0.1,
vocab_size=64007,
subln=True,
xpos_rel_pos=True,
max_rel_pos=2048
)
self.decoder = Decoder(
self.config,
embed_tokens=self.embed,
embed_positions=self.embed_positions,
output_projection=self.output_projection
)
self.perceive = PerceiverResampler(
dim = 1024,
depth = 2,
dim_head = 64,
heads = 8,
num_latents = 64,
num_media_embeds = 257
)
self.image_proj = torch.nn.Linear(1024, 2048, bias=False)
torch.nn.init.normal_(
self.image_proj.weight, mean=0, std=2048**-0.5
)
self.audio_proj = torch.nn.Linear(768, 2048, bias=False)
torch.nn.init.normal_(
self.audio_proj.weight, mean=0, std=2048 ** -0.5
)
self.video_proj = torch.nn.Linear(512, 2048, bias=False)
torch.nn.init.normal_(
self.video_proj.weight, mean=0, std=2048 ** -0.5
)
def forward(self, text_tokens, images, audios, videos, any_modality, **kwargs):
modality = self.modality_detector.get_modality(data)
images = self.clip_model(pixel_values=images)["last_hidden_state"]
images = self.perceive(images).squeeze(1)
images = self.image_proj(images)
# Process audio tokens
audios = self.audio_model(audios).logits
audios = audios.mean(dim=1)
audios = self.audio_proj(audios)
#process video tokens
videos = videos.transpose(1, 2).contigous()
videos = self.video_model(videos)
videos = videos.view(videos.size(0), -1)
videos = self.video_proj(videos)
#process any modality
any_embeddings = []
for modality_data in any_modality:
modality = modality_data["modality"]
data = modality_data["data"]
tokens = self.processor.processor(modality, data)
embed = self.embedder(modality)(tokens)
any_embeddings.append(embed)
any_embeddings = torch.stack(any_embeddings)
#v1
# Concatenate text tokens and media tokens
# model_input = self.decoder.forward_embedding(text_tokens)[1]
# model_input = torch.cat([model_input[:, 0:6], images, audios, videos, model_input[:, 6:]], dim=1)
# model_input = self.decoder.forward_embedding(model_input, token_embedding=model_input)[0]
#v2 any modality tokens
model_input = self.decoder.forward_embedding(text_tokens)[1]
model_input = torch.cat([model_input[:, 0:6], images, audios, videos, any_embeddings, model_input[:, 6:]], dim=1)
model_input = self.decoder.forward_embedding(model_input, token_embedding=model_input)[0]
return self.decoder(model_input, passed_x=model_input)[0]
# return self.decoder(model_input, passed_x=model_input)[0] | EXA-1-master | exa/models/KOSMOS_reimplementation-main/model/allModalities/kosmos.py |
from setuptools import find_packages, setup
setup(
name='gato-tf',
version='0.0.2',
description='Unofficial Gato: A Generalist Agent',
url='https://github.com/OrigamiDream/gato.git',
author='OrigamiDream',
author_email='[email protected]',
license='MIT',
packages=find_packages(),
install_requires=[
'tensorflow>=2.11',
],
keywords=[
'deep learning',
'gato',
'tensorflow',
'generalist agent'
]
)
| EXA-1-master | exa/models/gato/setup.py |
import tensorflow as tf
from tensorflow.keras.optimizers import schedules, AdamW
from gato import GatoConfig
from gato.models import Gato
# Load and preprocess your dataset
def load_and_preprocess_dataset():
# Load and preprocess your dataset here
# Return the dataset as a tf.data.Dataset object
pass
# Initialize GATO model
config = GatoConfig()
gato_model = Gato(config)
# Set up the optimizer, learning rate scheduler, and loss function
learning_rate_schedule = schedules.CosineDecayRestarts(
initial_learning_rate=config.max_learning_rate,
first_decay_steps=1000000,
t_mul=1.0,
m_mul=0.1,
alpha=config.min_learning_rate / config.max_learning_rate,
)
optimizer = AdamW(
learning_rate=learning_rate_schedule,
weight_decay=config.weight_decay,
beta_1=config.beta_1,
beta_2=config.beta_2,
epsilon=config.epsilon,
)
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
# Training loop
num_epochs = 10
train_dataset = load_and_preprocess_dataset()
for epoch in range(num_epochs):
print(f"Epoch {epoch + 1}/{num_epochs}")
for batch, (inputs, targets) in enumerate(train_dataset):
with tf.GradientTape() as tape:
logits = gato_model(inputs, training=True)
loss_value = loss_object(targets, logits)
grads = tape.gradient(loss_value, gato_model.trainable_weights)
optimizer.apply_gradients(zip(grads, gato_model.trainable_weights))
if batch % 100 == 0:
print(f"Batch {batch}: Loss = {loss_value}")
# Save the trained model weights
gato_model.save_weights("gato_trained_weights.h5") | EXA-1-master | exa/models/gato/train.py |
import copy
from typing import Dict, Any
class GatoConfig:
@staticmethod
def large():
return GatoConfig(num_transformer_blocks=24,
num_attention_heads=16,
layer_width=2048,
feedforward_hidden_size=8192,
key_value_size=128)
@staticmethod
def baseline():
return GatoConfig(num_transformer_blocks=12,
num_attention_heads=12,
layer_width=1536,
feedforward_hidden_size=6144,
key_value_size=128)
@staticmethod
def small():
return GatoConfig(num_transformer_blocks=8,
num_attention_heads=24,
layer_width=768,
feedforward_hidden_size=3072,
key_value_size=32)
def __init__(self, **kwargs):
self.input_dim = kwargs.pop('input_dim', 768)
self.img_patch_size = kwargs.pop('img_patch_size', 16)
# Section 2.3. Training
self.token_sequence_length = kwargs.pop('token_sequence_length', 1024)
# Section 2.1. Tokenization
# Text - SentencePiece
self.vocabulary_size = kwargs.pop('vocabulary_size', 32000)
# Discrete values
self.actions_size = kwargs.pop('actions_size', 1024)
# Continuous values
self.continuous_values_size = kwargs.pop('continuous_values_size', 1024)
# Appendix C.1. Transformer Hyperparameters
self.num_transformer_blocks = kwargs.pop('num_transformer_blocks', 8)
self.num_attention_heads = kwargs.pop('num_attention_heads', 24)
self.layer_width = kwargs.pop('layer_width', 768)
self.feedforward_hidden_size = kwargs.pop('feedforward_hidden_size', 3072)
self.key_value_size = kwargs.pop('key_value_size', 32)
# Appendix E. Regularization
self.dropout_rate = kwargs.pop('dropout_rate', 0.1)
# Appendix C.2. Embedding Function
self.num_group_norm_groups = kwargs.pop('num_group_norm_groups', 32)
# Appendix C.3. Position Encodings > Patch Position Encodings
self.discretize_depth = kwargs.pop('discretize_depth', 128)
# Appendix C.3. Position Encodings > Local Observation Position Encodings
self.local_position_encoding_size = kwargs.pop('local_position_encoding_size', 512)
@property
def embedding_input_size(self):
return self.vocabulary_size + self.continuous_values_size + self.actions_size + 1
@property
def output_target_size(self):
return self.vocabulary_size + self.actions_size
def to_dict(self) -> Dict[str, Any]:
output = copy.deepcopy(self.__dict__)
return output
@classmethod
def from_dict(cls, config_dict: Dict[str, Any]) -> "GatoConfig":
config = cls(**config_dict)
return config
| EXA-1-master | exa/models/gato/gato/config.py |
from gato.config import GatoConfig
from gato.models import Gato
| EXA-1-master | exa/models/gato/gato/__init__.py |
import tensorflow as tf
from tensorflow.keras import layers, models
from gato import GatoConfig
from typing import Dict, Any, Union
def _randomized_positions(from_v, to_v):
pos = tf.random.uniform(from_v.shape, minval=0, maxval=1, dtype=tf.float32)
pos = pos * tf.cast(to_v - from_v, dtype=tf.float32)
pos = tf.cast(pos, dtype=tf.int32)
return pos
def _rounded_mean_positions(from_v, to_v):
pos = tf.cast(from_v + to_v, tf.float32)
pos = pos / 2
pos = tf.round(pos)
return pos
def _broadcast(row_pos, col_pos, row_ones, col_ones):
# broadcast (5,) to (20,) with column-axis
row_pos = tf.expand_dims(row_pos, 1)
row_pos = tf.matmul(row_pos, col_ones, transpose_b=True)
row_pos = tf.reshape(row_pos, (-1,))
row_pos = tf.stop_gradient(row_pos)
# broadcast (4,) to (20,) with row-axis
col_pos = tf.expand_dims(col_pos, 1)
col_pos = tf.matmul(row_ones, col_pos, transpose_b=True)
col_pos = tf.reshape(col_pos, (-1,))
col_pos = tf.stop_gradient(col_pos)
return row_pos, col_pos
class PatchPositionEncoding(layers.Layer):
def __init__(self,
config: Union[GatoConfig, Dict[str, Any]],
trainable=True, name=None, *args, **kwargs):
"""
Appendix C.3. Position Encodings
"""
super(PatchPositionEncoding, self).__init__(trainable=trainable, name=name, *args, **kwargs)
if isinstance(config, dict):
config = GatoConfig(**config)
self.config = config
self.embedding_dim = self.config.layer_width
self.discretize_depth = self.config.discretize_depth
self.patch_size = self.config.img_patch_size
self.row_embedding = layers.Embedding(self.discretize_depth, self.embedding_dim, name='row_embedding')
self.col_embedding = layers.Embedding(self.discretize_depth, self.embedding_dim, name='col_embedding')
def _discretize(self, pos):
return tf.round(pos * self.discretize_depth)
def _discretize_interval(self, interval):
pos_from, pos_to = interval
return self._discretize(pos_from), self._discretize(pos_to)
def call(self, inputs, *args, **kwargs):
# Appendix C.3. Position Encodings; Figure 15 | Patch position encodings.
training = kwargs['training'] if 'training' in kwargs else False
# input_ids must already be embedded by the resnet embedding function.
# row_pos and col_pos must be intervals which is tuple of (pos_from, pos_to)
# row_pos and col_pos must be normalized between [0, 1] to show their relativity.
input_ids, (row_pos, col_pos) = inputs
row_pos_from, row_pos_to = self._discretize_interval(row_pos)
col_pos_from, col_pos_to = self._discretize_interval(col_pos)
if training:
# > During training a random index is uniformly sampled from the quantized interval.
row_pos = row_pos_from + _randomized_positions(row_pos_from, row_pos_to)
col_pos = col_pos_from + _randomized_positions(col_pos_from, col_pos_to)
else:
# > During evaluation we deterministically take the (rounded) mean of the interval.
row_pos = _rounded_mean_positions(row_pos_from, row_pos_to)
col_pos = _rounded_mean_positions(col_pos_from, col_pos_to)
col_pos = tf.cast(col_pos, dtype=tf.int32)
row_pos = tf.cast(row_pos, dtype=tf.int32)
# > Once row and column position encoding are retrieved from the embedding table,
# > they are added onto the token embedding produced by the resnet embedding function.
return input_ids + self.row_embedding(row_pos) + self.col_embedding(col_pos)
def get_config(self):
config = super(PatchPositionEncoding, self).get_config()
config.update({
'config': self.config.to_dict(),
})
return config
class ResidualUnit(layers.Layer):
def __init__(self, num_groups: int, filters: int, trainable=True, name=None, *args, **kwargs):
super(ResidualUnit, self).__init__(trainable=trainable, name=name, *args, **kwargs)
self.num_groups = num_groups
self.filters = filters
self.gn1 = self.gn2 = None
self.conv1 = self.conv2 = None
self.conv_proj = self.gn_proj = None
def build(self, input_shape):
self.gn1 = layers.GroupNormalization(groups=self.num_groups, name='gn1')
self.gn2 = layers.GroupNormalization(groups=self.num_groups, name='gn2')
self.conv1 = layers.Conv2D(filters=self.filters // 2, kernel_size=(3, 3), strides=(1, 1),
use_bias=False, padding='same', name='conv1')
self.conv2 = layers.Conv2D(filters=self.filters, kernel_size=(3, 3), strides=(2, 2),
use_bias=False, padding='same', name='conv2')
self.conv_proj = layers.Conv2D(filters=self.filters, kernel_size=(1, 1), strides=(2, 2),
use_bias=False, padding='same', name='conv_proj')
self.gn_proj = layers.GroupNormalization(groups=self.num_groups, name='gn_proj')
def call(self, inputs, *args, **kwargs):
# Supplementary Material B. Agent Data Tokenization Details; Figure 16
# > This block uses the v2 ResNet architecture, GroupNorm (instead of LayerNorm) normalization,
# > and GELU (instead RELU) activation functions.
x = inputs
residual = self.conv_proj(self.gn_proj(x))
x = tf.nn.gelu(self.gn1(x))
x = self.conv1(x)
x = tf.nn.gelu(self.gn2(x))
x = self.conv2(x)
return x + residual
class ResidualEmbedding(layers.Layer):
def __init__(self, config: Union[GatoConfig, Dict[str, Any]], trainable=True, name=None, *args, **kwargs):
"""
Appendix C.2. Embedding Function
"""
super(ResidualEmbedding, self).__init__(trainable=trainable, name=name, *args, **kwargs)
if isinstance(config, dict):
config = GatoConfig(**config)
self.config = config
self.root_conv = self.conv_proj = None
self.residual_units = None
self.num_patches = None
def build(self, input_shape):
if self.config.input_dim != self.config.layer_width:
self.conv_proj = layers.Conv2D(filters=self.config.layer_width,
kernel_size=(1, 1),
strides=(1, 1),
padding='same',
use_bias=False,
name='conv_proj')
self.root_conv = models.Sequential([
layers.Conv2D(filters=96, kernel_size=(7, 7), strides=(2, 2),
use_bias=False, padding='same', name='conv_root'),
layers.GroupNormalization(groups=self.config.num_group_norm_groups, name='gn_root'),
layers.Activation('gelu', name='act_root')
])
self.residual_units = [ResidualUnit(num_groups=self.config.num_group_norm_groups,
filters=96 * 2 ** (i + 1),
name='residual_unit_{}'.format(i + 1))
for i in range(3)]
def call(self, inputs, *args, **kwargs):
# Section 2.1 Tokenization.
x = self.root_conv(inputs)
# NOTE: Page 3-4, Section 2.2 Embedding input tokens and setting output targets
# > Tokens belonging to image patches for any time-step are embedded
# > using a single ResNet block to obtain a vector per patch.
# I don't think that transforming single 16x16 patch into feature map
# with depth 768 at once does not give advantages coming from inductive bias.
# This is currently discussing in issue #2
for block in self.residual_units:
x = block(x)
if self.conv_proj is not None:
x = self.conv_proj(x)
x = tf.reshape(x, shape=(-1, inputs.shape[1], self.config.layer_width))
return x
def get_config(self):
config = super(ResidualEmbedding, self).get_config()
config.update({
'config': self.config.to_dict()
})
return config
class LocalPositionEncoding(layers.Layer):
def __init__(self, config: Union[GatoConfig, Dict[str, Any]], trainable=True, name=None, *args, **kwargs):
"""
Appendix C.3. Position Encodings > Local Observation Position Encodings
"""
super(LocalPositionEncoding, self).__init__(trainable=trainable, name=name, *args, **kwargs)
if isinstance(config, dict):
config = GatoConfig(**config)
self.config = config
self.embedding = None
def build(self, input_shape):
self.embedding = layers.Embedding(self.config.token_sequence_length, self.config.layer_width)
self.built = True
def call(self, inputs, *args, **kwargs):
# Appendix C.3. Position Encodings > Local Observation Position Encodings; Figure 18 | Local position encodings.
# > Note that no position encodings are added to action tokens.
# So I added `obs_mask` to mask the action token into zeros.
obs_pos, obs_mask = inputs
embed = self.embedding(obs_pos)
ones = tf.ones((embed.shape[0], 1, self.config.layer_width), dtype=tf.float32)
obs_mask = tf.cast(obs_mask, dtype=tf.float32)
obs_mask = tf.matmul(obs_mask, ones, transpose_a=True)
return embed * obs_mask
def get_config(self):
config = super(LocalPositionEncoding, self).get_config()
config.update({
'config': self.config.to_dict()
})
return config
class DiscreteEmbedding(layers.Layer):
def __init__(self, config: Union[GatoConfig, Dict[str, Any]], trainable=True, name=None, *args, **kwargs):
super(DiscreteEmbedding, self).__init__(trainable=trainable, name=name, *args, **kwargs)
if isinstance(config, dict):
config = GatoConfig(**config)
self.config = config
self.embedding = None
def build(self, input_shape):
# Appendix C.1. Transformer Hyperparameters
# Shared Embedding
with tf.name_scope('discrete_shared_embedding'):
self.embedding = layers.Embedding(self.config.embedding_input_size,
self.config.layer_width,
name='discrete_embedding')
self.built = True
def call(self, inputs, *args, **kwargs):
return self.embedding(inputs)
def get_config(self):
config = super(DiscreteEmbedding, self).get_config()
config.update({
'config': self.config.to_dict()
})
return config
| EXA-1-master | exa/models/gato/gato/models/embedding.py |
import tensorflow as tf
from gato.models.transformer import TransformerBlock
from gato.models.embedding import PatchPositionEncoding, ResidualEmbedding, LocalPositionEncoding, DiscreteEmbedding
from gato.models.tokenizers import ContinuousValueTokenizer
from tensorflow.keras import models
from gato import GatoConfig
from typing import Dict, Any, Union
class Gato(models.Model):
def __init__(self, config: Union[GatoConfig, Dict[str, Any]], trainable: bool = True, name: str = 'Gato', **kwargs):
super(Gato, self).__init__(trainable=trainable, name=name, **kwargs)
if isinstance(config, dict):
config = GatoConfig(**config)
self.config = config
self.image_embedding = PatchEmbedding(config, trainable=trainable, name='ImagePatchEmbedding')
self.discrete_embedding = DiscreteEmbedding(config, trainable=trainable, name='DiscreteEmbedding')
self.continuous_encoding = ContinuousValueTokenizer(config, name='ContinuousValueEncoding')
self.transformer = Transformer(config, trainable=trainable, name='Transformers')
self.local_pos_encoding = LocalPositionEncoding(config, trainable=trainable, name='LocalPositionEncoding')
def call(self, inputs, training=None, mask=None):
# input_ids with (B, L, 768)
# encoding with (B, L) or (B,)
# row_pos and col_pos with tuple of (pos_from, pos_to)
# obs_pos and obs_mask with (B, L) or (B,)
input_ids, (encoding, row_pos, col_pos), (obs_pos, obs_mask) = inputs
# Encoding flags for embed masks
# 0 - image
# 1 - continuous
# 2 - discrete (actions, texts)
encoding = tf.one_hot(encoding, depth=3, dtype=tf.float32)
ones = tf.ones((input_ids.shape[0], 1, self.config.layer_width), dtype=tf.float32)
image_embed = self.image_embedding((input_ids, (row_pos, col_pos)), training=training)
image_embed *= tf.matmul(encoding[..., 0], ones, transpose_a=True) # image patch masking
# continuous value takes from first value of input_ids
continuous_embed = self.continuous_encoding(input_ids[..., 0])
continuous_embed = self.discrete_embedding(continuous_embed)
continuous_embed *= tf.matmul(encoding[..., 1], ones, transpose_a=True) # continuous value masking
discrete_embed = self.discrete_embedding(input_ids[..., 0])
discrete_embed *= tf.matmul(encoding[..., 2], ones, transpose_a=True) # discrete value masking
# Appendix C.3. Position Encodings > Local Observation Position Encodings
# add local observation position encodings
embed = image_embed + continuous_embed + discrete_embed
embed += self.local_pos_encoding((obs_pos, obs_mask))
hidden_states = self.transformer(embed)
return hidden_states
def get_config(self):
return super(Gato, self).get_config()
class Transformer(models.Model):
def __init__(self,
config: Union[GatoConfig, Dict[str, Any]],
trainable: bool = True,
name: str = None,
**kwargs):
super(Transformer, self).__init__(trainable=trainable, name=name, **kwargs)
if isinstance(config, dict):
config = GatoConfig(**config)
self.config = config
self.encoders = [TransformerBlock(config=self.config, trainable=trainable, name='EncoderBlock{}'.format(idx))
for idx in range(self.config.num_transformer_blocks)]
def call(self, inputs, training=None, mask=None):
x = inputs
for encoder in self.encoders:
x = encoder(x)
return x
def get_config(self):
return super(Transformer, self).get_config()
class PatchEmbedding(models.Model):
def __init__(self,
config: Union[GatoConfig, Dict[str, Any]],
trainable: bool = True,
name: str = None,
**kwargs):
super(PatchEmbedding, self).__init__(trainable=trainable, name=name, **kwargs)
if isinstance(config, dict):
config = GatoConfig(**config)
self.config = config
self.residual_embedding = ResidualEmbedding(config, trainable=trainable, name='ResidualEmbedding')
self.pos_encoding = PatchPositionEncoding(config, trainable=trainable, name='PatchPositionEncoding')
def call(self, inputs, training=None, mask=None):
input_ids, (row_pos, col_pos) = inputs
patch_size = self.config.img_patch_size
depth = self.config.input_dim // (patch_size * patch_size)
x = tf.reshape(input_ids, (-1, input_ids.shape[1], patch_size, patch_size, depth))
x = self.residual_embedding(x)
x = self.pos_encoding((x, (row_pos, col_pos)))
return x
def get_config(self):
return super(PatchEmbedding, self).get_config()
| EXA-1-master | exa/models/gato/gato/models/__init__.py |
import tensorflow as tf
from tensorflow.keras import layers, models, activations
from gato import GatoConfig
from typing import Dict, Any, Union
class TransformerBlock(layers.Layer):
def __init__(self,
config: Union[GatoConfig, Dict[str, Any]],
trainable: bool = True,
name: str = None,
*args, **kwargs):
super(TransformerBlock, self).__init__(trainable, name, *args, **kwargs)
if isinstance(config, dict):
config = GatoConfig(**config)
self.config = config
self.attention = self.feed_forward = self.dropout = None
self.layer_norm1 = self.layer_norm2 = None
def build(self, input_shape):
input_shape = tf.TensorShape(input_shape)
hidden_size = input_shape[-1]
self.attention = layers.MultiHeadAttention(num_heads=self.config.num_attention_heads,
key_dim=self.config.key_value_size,
value_dim=self.config.key_value_size,
dropout=self.config.dropout_rate,
name='attention')
self.dropout = layers.Dropout(self.config.dropout_rate, name='attention_dropout')
self.feed_forward = models.Sequential(layers=[
layers.Dense(units=self.config.feedforward_hidden_size,
activation='linear',
name='dense_intermediate'),
# Appendix C.1. Transformer Hyperparameters
# Activation Function: GEGLU
layers.Lambda(lambda x: activations.gelu(x, approximate=False), name='gelu'),
layers.Dropout(self.config.dropout_rate, name='dropout_intermediate'),
layers.Dense(units=hidden_size,
activation='linear',
name='dense'),
layers.Dropout(self.config.dropout_rate, name='dropout'),
], name='feed_forward')
self.layer_norm1 = layers.LayerNormalization(epsilon=1e-6, name='layer_norm1')
self.layer_norm2 = layers.LayerNormalization(epsilon=1e-6, name='layer_norm2')
def call(self, inputs, *args, **kwargs):
# Appendix C.1. Transformer Hyperparameters
# Layer Normalization: Pre-Norm
residual = inputs
x = self.layer_norm1(inputs)
x = self.attention(x, x, x)
x = self.dropout(x)
x = x + residual
residual = x
x = self.layer_norm2(inputs)
x = self.feed_forward(x)
x = x + residual
return x
def get_config(self):
config = super(TransformerBlock, self).get_config()
config.update({
'config': self.config.to_dict(),
})
return config
| EXA-1-master | exa/models/gato/gato/models/transformer.py |
import tensorflow as tf
from gato import GatoConfig
from tensorflow.keras import models
from typing import Union, Dict, Any
def mu_law_encode(x, mu=100, m=256):
# Appendix B. Agent Data Tokenization Details
sign = tf.math.sign(x)
numerator = tf.math.log(tf.abs(x) * mu + 1.0)
denominator = tf.math.log(m * mu + 1.0)
return (numerator / denominator) * sign
def tokenize_continuous_values(x, mu=100, m=256, bins=1024, shift=None):
# Appendix B. Agent Data Tokenization Details
# > Finally, they are discretized using bins of uniform width on the domain [-1, 1].
c = mu_law_encode(x, mu, m)
# > We use 1024 bins and shift the resulting integers
# > so they are not overlapping with the ones used for text tokens.
c = (c + 1) * (bins / 2)
c = tf.cast(c, tf.int32)
if shift is not None:
c += shift
return c
class ContinuousValueTokenizer(models.Model):
def __init__(self,
config: Union[GatoConfig, Dict[str, Any]],
mu=100, m=256, bins=1024,
trainable=False, name=None, **kwargs):
super(ContinuousValueTokenizer, self).__init__(trainable=trainable, name=name, **kwargs)
if isinstance(config, dict):
config = GatoConfig(**config)
self.config = config
self.mu = mu
self.m = m
self.bins = bins
def call(self, inputs, training=None, mask=None):
return tokenize_continuous_values(inputs, self.mu, self.m, self.bins, shift=self.config.vocabulary_size)
def get_config(self):
return super(ContinuousValueTokenizer, self).get_config()
| EXA-1-master | exa/models/gato/gato/models/tokenizers.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from setuptools import find_packages, setup
setup(
name="segment_anything",
version="1.0",
install_requires=[],
packages=find_packages(exclude="notebooks"),
extras_require={
"all": ["matplotlib", "pycocotools", "opencv-python", "onnx", "onnxruntime"],
"dev": ["flake8", "isort", "black", "mypy"],
},
)
| EXA-1-master | exa/models/segment-anything-main/setup.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from segment_anything.modeling import Sam
from typing import Optional, Tuple
from .utils.transforms import ResizeLongestSide
class SamPredictor:
def __init__(
self,
sam_model: Sam,
) -> None:
"""
Uses SAM to calculate the image embedding for an image, and then
allow repeated, efficient mask prediction given prompts.
Arguments:
sam_model (Sam): The model to use for mask prediction.
"""
super().__init__()
self.model = sam_model
self.transform = ResizeLongestSide(sam_model.image_encoder.img_size)
self.reset_image()
def set_image(
self,
image: np.ndarray,
image_format: str = "RGB",
) -> None:
"""
Calculates the image embeddings for the provided image, allowing
masks to be predicted with the 'predict' method.
Arguments:
image (np.ndarray): The image for calculating masks. Expects an
image in HWC uint8 format, with pixel values in [0, 255].
image_format (str): The color format of the image, in ['RGB', 'BGR'].
"""
assert image_format in [
"RGB",
"BGR",
], f"image_format must be in ['RGB', 'BGR'], is {image_format}."
if image_format != self.model.image_format:
image = image[..., ::-1]
# Transform the image to the form expected by the model
input_image = self.transform.apply_image(image)
input_image_torch = torch.as_tensor(input_image, device=self.device)
input_image_torch = input_image_torch.permute(2, 0, 1).contiguous()[None, :, :, :]
self.set_torch_image(input_image_torch, image.shape[:2])
@torch.no_grad()
def set_torch_image(
self,
transformed_image: torch.Tensor,
original_image_size: Tuple[int, ...],
) -> None:
"""
Calculates the image embeddings for the provided image, allowing
masks to be predicted with the 'predict' method. Expects the input
image to be already transformed to the format expected by the model.
Arguments:
transformed_image (torch.Tensor): The input image, with shape
1x3xHxW, which has been transformed with ResizeLongestSide.
original_image_size (tuple(int, int)): The size of the image
before transformation, in (H, W) format.
"""
assert (
len(transformed_image.shape) == 4
and transformed_image.shape[1] == 3
and max(*transformed_image.shape[2:]) == self.model.image_encoder.img_size
), f"set_torch_image input must be BCHW with long side {self.model.image_encoder.img_size}."
self.reset_image()
self.original_size = original_image_size
self.input_size = tuple(transformed_image.shape[-2:])
input_image = self.model.preprocess(transformed_image)
self.features = self.model.image_encoder(input_image)
self.is_image_set = True
def predict(
self,
point_coords: Optional[np.ndarray] = None,
point_labels: Optional[np.ndarray] = None,
box: Optional[np.ndarray] = None,
mask_input: Optional[np.ndarray] = None,
multimask_output: bool = True,
return_logits: bool = False,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Predict masks for the given input prompts, using the currently set image.
Arguments:
point_coords (np.ndarray or None): A Nx2 array of point prompts to the
model. Each point is in (X,Y) in pixels.
point_labels (np.ndarray or None): A length N array of labels for the
point prompts. 1 indicates a foreground point and 0 indicates a
background point.
box (np.ndarray or None): A length 4 array given a box prompt to the
model, in XYXY format.
mask_input (np.ndarray): A low resolution mask input to the model, typically
coming from a previous prediction iteration. Has form 1xHxW, where
for SAM, H=W=256.
multimask_output (bool): If true, the model will return three masks.
For ambiguous input prompts (such as a single click), this will often
produce better masks than a single prediction. If only a single
mask is needed, the model's predicted quality score can be used
to select the best mask. For non-ambiguous prompts, such as multiple
input prompts, multimask_output=False can give better results.
return_logits (bool): If true, returns un-thresholded masks logits
instead of a binary mask.
Returns:
(np.ndarray): The output masks in CxHxW format, where C is the
number of masks, and (H, W) is the original image size.
(np.ndarray): An array of length C containing the model's
predictions for the quality of each mask.
(np.ndarray): An array of shape CxHxW, where C is the number
of masks and H=W=256. These low resolution logits can be passed to
a subsequent iteration as mask input.
"""
if not self.is_image_set:
raise RuntimeError("An image must be set with .set_image(...) before mask prediction.")
# Transform input prompts
coords_torch, labels_torch, box_torch, mask_input_torch = None, None, None, None
if point_coords is not None:
assert (
point_labels is not None
), "point_labels must be supplied if point_coords is supplied."
point_coords = self.transform.apply_coords(point_coords, self.original_size)
coords_torch = torch.as_tensor(point_coords, dtype=torch.float, device=self.device)
labels_torch = torch.as_tensor(point_labels, dtype=torch.int, device=self.device)
coords_torch, labels_torch = coords_torch[None, :, :], labels_torch[None, :]
if box is not None:
box = self.transform.apply_boxes(box, self.original_size)
box_torch = torch.as_tensor(box, dtype=torch.float, device=self.device)
box_torch = box_torch[None, :]
if mask_input is not None:
mask_input_torch = torch.as_tensor(mask_input, dtype=torch.float, device=self.device)
mask_input_torch = mask_input_torch[None, :, :, :]
masks, iou_predictions, low_res_masks = self.predict_torch(
coords_torch,
labels_torch,
box_torch,
mask_input_torch,
multimask_output,
return_logits=return_logits,
)
masks_np = masks[0].detach().cpu().numpy()
iou_predictions_np = iou_predictions[0].detach().cpu().numpy()
low_res_masks_np = low_res_masks[0].detach().cpu().numpy()
return masks_np, iou_predictions_np, low_res_masks_np
@torch.no_grad()
def predict_torch(
self,
point_coords: Optional[torch.Tensor],
point_labels: Optional[torch.Tensor],
boxes: Optional[torch.Tensor] = None,
mask_input: Optional[torch.Tensor] = None,
multimask_output: bool = True,
return_logits: bool = False,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Predict masks for the given input prompts, using the currently set image.
Input prompts are batched torch tensors and are expected to already be
transformed to the input frame using ResizeLongestSide.
Arguments:
point_coords (torch.Tensor or None): A BxNx2 array of point prompts to the
model. Each point is in (X,Y) in pixels.
point_labels (torch.Tensor or None): A BxN array of labels for the
point prompts. 1 indicates a foreground point and 0 indicates a
background point.
boxes (np.ndarray or None): A Bx4 array given a box prompt to the
model, in XYXY format.
mask_input (np.ndarray): A low resolution mask input to the model, typically
coming from a previous prediction iteration. Has form Bx1xHxW, where
for SAM, H=W=256. Masks returned by a previous iteration of the
predict method do not need further transformation.
multimask_output (bool): If true, the model will return three masks.
For ambiguous input prompts (such as a single click), this will often
produce better masks than a single prediction. If only a single
mask is needed, the model's predicted quality score can be used
to select the best mask. For non-ambiguous prompts, such as multiple
input prompts, multimask_output=False can give better results.
return_logits (bool): If true, returns un-thresholded masks logits
instead of a binary mask.
Returns:
(torch.Tensor): The output masks in BxCxHxW format, where C is the
number of masks, and (H, W) is the original image size.
(torch.Tensor): An array of shape BxC containing the model's
predictions for the quality of each mask.
(torch.Tensor): An array of shape BxCxHxW, where C is the number
of masks and H=W=256. These low res logits can be passed to
a subsequent iteration as mask input.
"""
if not self.is_image_set:
raise RuntimeError("An image must be set with .set_image(...) before mask prediction.")
if point_coords is not None:
points = (point_coords, point_labels)
else:
points = None
# Embed prompts
sparse_embeddings, dense_embeddings = self.model.prompt_encoder(
points=points,
boxes=boxes,
masks=mask_input,
)
# Predict masks
low_res_masks, iou_predictions = self.model.mask_decoder(
image_embeddings=self.features,
image_pe=self.model.prompt_encoder.get_dense_pe(),
sparse_prompt_embeddings=sparse_embeddings,
dense_prompt_embeddings=dense_embeddings,
multimask_output=multimask_output,
)
# Upscale the masks to the original image resolution
masks = self.model.postprocess_masks(low_res_masks, self.input_size, self.original_size)
if not return_logits:
masks = masks > self.model.mask_threshold
return masks, iou_predictions, low_res_masks
def get_image_embedding(self) -> torch.Tensor:
"""
Returns the image embeddings for the currently set image, with
shape 1xCxHxW, where C is the embedding dimension and (H,W) are
the embedding spatial dimension of SAM (typically C=256, H=W=64).
"""
if not self.is_image_set:
raise RuntimeError(
"An image must be set with .set_image(...) to generate an embedding."
)
assert self.features is not None, "Features must exist if an image has been set."
return self.features
@property
def device(self) -> torch.device:
return self.model.device
def reset_image(self) -> None:
"""Resets the currently set image."""
self.is_image_set = False
self.features = None
self.orig_h = None
self.orig_w = None
self.input_h = None
self.input_w = None
| EXA-1-master | exa/models/segment-anything-main/segment_anything/predictor.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
from functools import partial
from .modeling import ImageEncoderViT, MaskDecoder, PromptEncoder, Sam, TwoWayTransformer
def build_sam_vit_h(checkpoint=None):
return _build_sam(
encoder_embed_dim=1280,
encoder_depth=32,
encoder_num_heads=16,
encoder_global_attn_indexes=[7, 15, 23, 31],
checkpoint=checkpoint,
)
build_sam = build_sam_vit_h
def build_sam_vit_l(checkpoint=None):
return _build_sam(
encoder_embed_dim=1024,
encoder_depth=24,
encoder_num_heads=16,
encoder_global_attn_indexes=[5, 11, 17, 23],
checkpoint=checkpoint,
)
def build_sam_vit_b(checkpoint=None):
return _build_sam(
encoder_embed_dim=768,
encoder_depth=12,
encoder_num_heads=12,
encoder_global_attn_indexes=[2, 5, 8, 11],
checkpoint=checkpoint,
)
sam_model_registry = {
"default": build_sam_vit_h,
"vit_h": build_sam_vit_h,
"vit_l": build_sam_vit_l,
"vit_b": build_sam_vit_b,
}
def _build_sam(
encoder_embed_dim,
encoder_depth,
encoder_num_heads,
encoder_global_attn_indexes,
checkpoint=None,
):
prompt_embed_dim = 256
image_size = 1024
vit_patch_size = 16
image_embedding_size = image_size // vit_patch_size
sam = Sam(
image_encoder=ImageEncoderViT(
depth=encoder_depth,
embed_dim=encoder_embed_dim,
img_size=image_size,
mlp_ratio=4,
norm_layer=partial(torch.nn.LayerNorm, eps=1e-6),
num_heads=encoder_num_heads,
patch_size=vit_patch_size,
qkv_bias=True,
use_rel_pos=True,
global_attn_indexes=encoder_global_attn_indexes,
window_size=14,
out_chans=prompt_embed_dim,
),
prompt_encoder=PromptEncoder(
embed_dim=prompt_embed_dim,
image_embedding_size=(image_embedding_size, image_embedding_size),
input_image_size=(image_size, image_size),
mask_in_chans=16,
),
mask_decoder=MaskDecoder(
num_multimask_outputs=3,
transformer=TwoWayTransformer(
depth=2,
embedding_dim=prompt_embed_dim,
mlp_dim=2048,
num_heads=8,
),
transformer_dim=prompt_embed_dim,
iou_head_depth=3,
iou_head_hidden_dim=256,
),
pixel_mean=[123.675, 116.28, 103.53],
pixel_std=[58.395, 57.12, 57.375],
)
sam.eval()
if checkpoint is not None:
with open(checkpoint, "rb") as f:
state_dict = torch.load(f)
sam.load_state_dict(state_dict)
return sam
| EXA-1-master | exa/models/segment-anything-main/segment_anything/build_sam.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from torchvision.ops.boxes import batched_nms, box_area # type: ignore
from typing import Any, Dict, List, Optional, Tuple
from .modeling import Sam
from .predictor import SamPredictor
from .utils.amg import (
MaskData,
area_from_rle,
batch_iterator,
batched_mask_to_box,
box_xyxy_to_xywh,
build_all_layer_point_grids,
calculate_stability_score,
coco_encode_rle,
generate_crop_boxes,
is_box_near_crop_edge,
mask_to_rle_pytorch,
remove_small_regions,
rle_to_mask,
uncrop_boxes_xyxy,
uncrop_masks,
uncrop_points,
)
class SamAutomaticMaskGenerator:
def __init__(
self,
model: Sam,
points_per_side: Optional[int] = 32,
points_per_batch: int = 64,
pred_iou_thresh: float = 0.88,
stability_score_thresh: float = 0.95,
stability_score_offset: float = 1.0,
box_nms_thresh: float = 0.7,
crop_n_layers: int = 0,
crop_nms_thresh: float = 0.7,
crop_overlap_ratio: float = 512 / 1500,
crop_n_points_downscale_factor: int = 1,
point_grids: Optional[List[np.ndarray]] = None,
min_mask_region_area: int = 0,
output_mode: str = "binary_mask",
) -> None:
"""
Using a SAM model, generates masks for the entire image.
Generates a grid of point prompts over the image, then filters
low quality and duplicate masks. The default settings are chosen
for SAM with a ViT-H backbone.
Arguments:
model (Sam): The SAM model to use for mask prediction.
points_per_side (int or None): The number of points to be sampled
along one side of the image. The total number of points is
points_per_side**2. If None, 'point_grids' must provide explicit
point sampling.
points_per_batch (int): Sets the number of points run simultaneously
by the model. Higher numbers may be faster but use more GPU memory.
pred_iou_thresh (float): A filtering threshold in [0,1], using the
model's predicted mask quality.
stability_score_thresh (float): A filtering threshold in [0,1], using
the stability of the mask under changes to the cutoff used to binarize
the model's mask predictions.
stability_score_offset (float): The amount to shift the cutoff when
calculated the stability score.
box_nms_thresh (float): The box IoU cutoff used by non-maximal
suppression to filter duplicate masks.
crop_n_layers (int): If >0, mask prediction will be run again on
crops of the image. Sets the number of layers to run, where each
layer has 2**i_layer number of image crops.
crop_nms_thresh (float): The box IoU cutoff used by non-maximal
suppression to filter duplicate masks between different crops.
crop_overlap_ratio (float): Sets the degree to which crops overlap.
In the first crop layer, crops will overlap by this fraction of
the image length. Later layers with more crops scale down this overlap.
crop_n_points_downscale_factor (int): The number of points-per-side
sampled in layer n is scaled down by crop_n_points_downscale_factor**n.
point_grids (list(np.ndarray) or None): A list over explicit grids
of points used for sampling, normalized to [0,1]. The nth grid in the
list is used in the nth crop layer. Exclusive with points_per_side.
min_mask_region_area (int): If >0, postprocessing will be applied
to remove disconnected regions and holes in masks with area smaller
than min_mask_region_area. Requires opencv.
output_mode (str): The form masks are returned in. Can be 'binary_mask',
'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools.
For large resolutions, 'binary_mask' may consume large amounts of
memory.
"""
assert (points_per_side is None) != (
point_grids is None
), "Exactly one of points_per_side or point_grid must be provided."
if points_per_side is not None:
self.point_grids = build_all_layer_point_grids(
points_per_side,
crop_n_layers,
crop_n_points_downscale_factor,
)
elif point_grids is not None:
self.point_grids = point_grids
else:
raise ValueError("Can't have both points_per_side and point_grid be None.")
assert output_mode in [
"binary_mask",
"uncompressed_rle",
"coco_rle",
], f"Unknown output_mode {output_mode}."
if output_mode == "coco_rle":
from pycocotools import mask as mask_utils # type: ignore # noqa: F401
if min_mask_region_area > 0:
import cv2 # type: ignore # noqa: F401
self.predictor = SamPredictor(model)
self.points_per_batch = points_per_batch
self.pred_iou_thresh = pred_iou_thresh
self.stability_score_thresh = stability_score_thresh
self.stability_score_offset = stability_score_offset
self.box_nms_thresh = box_nms_thresh
self.crop_n_layers = crop_n_layers
self.crop_nms_thresh = crop_nms_thresh
self.crop_overlap_ratio = crop_overlap_ratio
self.crop_n_points_downscale_factor = crop_n_points_downscale_factor
self.min_mask_region_area = min_mask_region_area
self.output_mode = output_mode
@torch.no_grad()
def generate(self, image: np.ndarray) -> List[Dict[str, Any]]:
"""
Generates masks for the given image.
Arguments:
image (np.ndarray): The image to generate masks for, in HWC uint8 format.
Returns:
list(dict(str, any)): A list over records for masks. Each record is
a dict containing the following keys:
segmentation (dict(str, any) or np.ndarray): The mask. If
output_mode='binary_mask', is an array of shape HW. Otherwise,
is a dictionary containing the RLE.
bbox (list(float)): The box around the mask, in XYWH format.
area (int): The area in pixels of the mask.
predicted_iou (float): The model's own prediction of the mask's
quality. This is filtered by the pred_iou_thresh parameter.
point_coords (list(list(float))): The point coordinates input
to the model to generate this mask.
stability_score (float): A measure of the mask's quality. This
is filtered on using the stability_score_thresh parameter.
crop_box (list(float)): The crop of the image used to generate
the mask, given in XYWH format.
"""
# Generate masks
mask_data = self._generate_masks(image)
# Filter small disconnected regions and holes in masks
if self.min_mask_region_area > 0:
mask_data = self.postprocess_small_regions(
mask_data,
self.min_mask_region_area,
max(self.box_nms_thresh, self.crop_nms_thresh),
)
# Encode masks
if self.output_mode == "coco_rle":
mask_data["segmentations"] = [coco_encode_rle(rle) for rle in mask_data["rles"]]
elif self.output_mode == "binary_mask":
mask_data["segmentations"] = [rle_to_mask(rle) for rle in mask_data["rles"]]
else:
mask_data["segmentations"] = mask_data["rles"]
# Write mask records
curr_anns = []
for idx in range(len(mask_data["segmentations"])):
ann = {
"segmentation": mask_data["segmentations"][idx],
"area": area_from_rle(mask_data["rles"][idx]),
"bbox": box_xyxy_to_xywh(mask_data["boxes"][idx]).tolist(),
"predicted_iou": mask_data["iou_preds"][idx].item(),
"point_coords": [mask_data["points"][idx].tolist()],
"stability_score": mask_data["stability_score"][idx].item(),
"crop_box": box_xyxy_to_xywh(mask_data["crop_boxes"][idx]).tolist(),
}
curr_anns.append(ann)
return curr_anns
def _generate_masks(self, image: np.ndarray) -> MaskData:
orig_size = image.shape[:2]
crop_boxes, layer_idxs = generate_crop_boxes(
orig_size, self.crop_n_layers, self.crop_overlap_ratio
)
# Iterate over image crops
data = MaskData()
for crop_box, layer_idx in zip(crop_boxes, layer_idxs):
crop_data = self._process_crop(image, crop_box, layer_idx, orig_size)
data.cat(crop_data)
# Remove duplicate masks between crops
if len(crop_boxes) > 1:
# Prefer masks from smaller crops
scores = 1 / box_area(data["crop_boxes"])
scores = scores.to(data["boxes"].device)
keep_by_nms = batched_nms(
data["boxes"].float(),
scores,
torch.zeros_like(data["boxes"][:, 0]), # categories
iou_threshold=self.crop_nms_thresh,
)
data.filter(keep_by_nms)
data.to_numpy()
return data
def _process_crop(
self,
image: np.ndarray,
crop_box: List[int],
crop_layer_idx: int,
orig_size: Tuple[int, ...],
) -> MaskData:
# Crop the image and calculate embeddings
x0, y0, x1, y1 = crop_box
cropped_im = image[y0:y1, x0:x1, :]
cropped_im_size = cropped_im.shape[:2]
self.predictor.set_image(cropped_im)
# Get points for this crop
points_scale = np.array(cropped_im_size)[None, ::-1]
points_for_image = self.point_grids[crop_layer_idx] * points_scale
# Generate masks for this crop in batches
data = MaskData()
for (points,) in batch_iterator(self.points_per_batch, points_for_image):
batch_data = self._process_batch(points, cropped_im_size, crop_box, orig_size)
data.cat(batch_data)
del batch_data
self.predictor.reset_image()
# Remove duplicates within this crop.
keep_by_nms = batched_nms(
data["boxes"].float(),
data["iou_preds"],
torch.zeros_like(data["boxes"][:, 0]), # categories
iou_threshold=self.box_nms_thresh,
)
data.filter(keep_by_nms)
# Return to the original image frame
data["boxes"] = uncrop_boxes_xyxy(data["boxes"], crop_box)
data["points"] = uncrop_points(data["points"], crop_box)
data["crop_boxes"] = torch.tensor([crop_box for _ in range(len(data["rles"]))])
return data
def _process_batch(
self,
points: np.ndarray,
im_size: Tuple[int, ...],
crop_box: List[int],
orig_size: Tuple[int, ...],
) -> MaskData:
orig_h, orig_w = orig_size
# Run model on this batch
transformed_points = self.predictor.transform.apply_coords(points, im_size)
in_points = torch.as_tensor(transformed_points, device=self.predictor.device)
in_labels = torch.ones(in_points.shape[0], dtype=torch.int, device=in_points.device)
masks, iou_preds, _ = self.predictor.predict_torch(
in_points[:, None, :],
in_labels[:, None],
multimask_output=True,
return_logits=True,
)
# Serialize predictions and store in MaskData
data = MaskData(
masks=masks.flatten(0, 1),
iou_preds=iou_preds.flatten(0, 1),
points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)),
)
del masks
# Filter by predicted IoU
if self.pred_iou_thresh > 0.0:
keep_mask = data["iou_preds"] > self.pred_iou_thresh
data.filter(keep_mask)
# Calculate stability score
data["stability_score"] = calculate_stability_score(
data["masks"], self.predictor.model.mask_threshold, self.stability_score_offset
)
if self.stability_score_thresh > 0.0:
keep_mask = data["stability_score"] >= self.stability_score_thresh
data.filter(keep_mask)
# Threshold masks and calculate boxes
data["masks"] = data["masks"] > self.predictor.model.mask_threshold
data["boxes"] = batched_mask_to_box(data["masks"])
# Filter boxes that touch crop boundaries
keep_mask = ~is_box_near_crop_edge(data["boxes"], crop_box, [0, 0, orig_w, orig_h])
if not torch.all(keep_mask):
data.filter(keep_mask)
# Compress to RLE
data["masks"] = uncrop_masks(data["masks"], crop_box, orig_h, orig_w)
data["rles"] = mask_to_rle_pytorch(data["masks"])
del data["masks"]
return data
@staticmethod
def postprocess_small_regions(
mask_data: MaskData, min_area: int, nms_thresh: float
) -> MaskData:
"""
Removes small disconnected regions and holes in masks, then reruns
box NMS to remove any new duplicates.
Edits mask_data in place.
Requires open-cv as a dependency.
"""
if len(mask_data["rles"]) == 0:
return mask_data
# Filter small disconnected regions and holes
new_masks = []
scores = []
for rle in mask_data["rles"]:
mask = rle_to_mask(rle)
mask, changed = remove_small_regions(mask, min_area, mode="holes")
unchanged = not changed
mask, changed = remove_small_regions(mask, min_area, mode="islands")
unchanged = unchanged and not changed
new_masks.append(torch.as_tensor(mask).unsqueeze(0))
# Give score=0 to changed masks and score=1 to unchanged masks
# so NMS will prefer ones that didn't need postprocessing
scores.append(float(unchanged))
# Recalculate boxes and remove any new duplicates
masks = torch.cat(new_masks, dim=0)
boxes = batched_mask_to_box(masks)
keep_by_nms = batched_nms(
boxes.float(),
torch.as_tensor(scores),
torch.zeros_like(boxes[:, 0]), # categories
iou_threshold=nms_thresh,
)
# Only recalculate RLEs for masks that have changed
for i_mask in keep_by_nms:
if scores[i_mask] == 0.0:
mask_torch = masks[i_mask].unsqueeze(0)
mask_data["rles"][i_mask] = mask_to_rle_pytorch(mask_torch)[0]
mask_data["boxes"][i_mask] = boxes[i_mask] # update res directly
mask_data.filter(keep_by_nms)
return mask_data
| EXA-1-master | exa/models/segment-anything-main/segment_anything/automatic_mask_generator.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from .build_sam import (
build_sam,
build_sam_vit_h,
build_sam_vit_l,
build_sam_vit_b,
sam_model_registry,
)
from .predictor import SamPredictor
from .automatic_mask_generator import SamAutomaticMaskGenerator
| EXA-1-master | exa/models/segment-anything-main/segment_anything/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
import math
from copy import deepcopy
from itertools import product
from typing import Any, Dict, Generator, ItemsView, List, Tuple
class MaskData:
"""
A structure for storing masks and their related data in batched format.
Implements basic filtering and concatenation.
"""
def __init__(self, **kwargs) -> None:
for v in kwargs.values():
assert isinstance(
v, (list, np.ndarray, torch.Tensor)
), "MaskData only supports list, numpy arrays, and torch tensors."
self._stats = dict(**kwargs)
def __setitem__(self, key: str, item: Any) -> None:
assert isinstance(
item, (list, np.ndarray, torch.Tensor)
), "MaskData only supports list, numpy arrays, and torch tensors."
self._stats[key] = item
def __delitem__(self, key: str) -> None:
del self._stats[key]
def __getitem__(self, key: str) -> Any:
return self._stats[key]
def items(self) -> ItemsView[str, Any]:
return self._stats.items()
def filter(self, keep: torch.Tensor) -> None:
for k, v in self._stats.items():
if v is None:
self._stats[k] = None
elif isinstance(v, torch.Tensor):
self._stats[k] = v[torch.as_tensor(keep, device=v.device)]
elif isinstance(v, np.ndarray):
self._stats[k] = v[keep.detach().cpu().numpy()]
elif isinstance(v, list) and keep.dtype == torch.bool:
self._stats[k] = [a for i, a in enumerate(v) if keep[i]]
elif isinstance(v, list):
self._stats[k] = [v[i] for i in keep]
else:
raise TypeError(f"MaskData key {k} has an unsupported type {type(v)}.")
def cat(self, new_stats: "MaskData") -> None:
for k, v in new_stats.items():
if k not in self._stats or self._stats[k] is None:
self._stats[k] = deepcopy(v)
elif isinstance(v, torch.Tensor):
self._stats[k] = torch.cat([self._stats[k], v], dim=0)
elif isinstance(v, np.ndarray):
self._stats[k] = np.concatenate([self._stats[k], v], axis=0)
elif isinstance(v, list):
self._stats[k] = self._stats[k] + deepcopy(v)
else:
raise TypeError(f"MaskData key {k} has an unsupported type {type(v)}.")
def to_numpy(self) -> None:
for k, v in self._stats.items():
if isinstance(v, torch.Tensor):
self._stats[k] = v.detach().cpu().numpy()
def is_box_near_crop_edge(
boxes: torch.Tensor, crop_box: List[int], orig_box: List[int], atol: float = 20.0
) -> torch.Tensor:
"""Filter masks at the edge of a crop, but not at the edge of the original image."""
crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device)
orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device)
boxes = uncrop_boxes_xyxy(boxes, crop_box).float()
near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0)
near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0)
near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge)
return torch.any(near_crop_edge, dim=1)
def box_xyxy_to_xywh(box_xyxy: torch.Tensor) -> torch.Tensor:
box_xywh = deepcopy(box_xyxy)
box_xywh[2] = box_xywh[2] - box_xywh[0]
box_xywh[3] = box_xywh[3] - box_xywh[1]
return box_xywh
def batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]:
assert len(args) > 0 and all(
len(a) == len(args[0]) for a in args
), "Batched iteration must have inputs of all the same size."
n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0)
for b in range(n_batches):
yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args]
def mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]:
"""
Encodes masks to an uncompressed RLE, in the format expected by
pycoco tools.
"""
# Put in fortran order and flatten h,w
b, h, w = tensor.shape
tensor = tensor.permute(0, 2, 1).flatten(1)
# Compute change indices
diff = tensor[:, 1:] ^ tensor[:, :-1]
change_indices = diff.nonzero()
# Encode run length
out = []
for i in range(b):
cur_idxs = change_indices[change_indices[:, 0] == i, 1]
cur_idxs = torch.cat(
[
torch.tensor([0], dtype=cur_idxs.dtype, device=cur_idxs.device),
cur_idxs + 1,
torch.tensor([h * w], dtype=cur_idxs.dtype, device=cur_idxs.device),
]
)
btw_idxs = cur_idxs[1:] - cur_idxs[:-1]
counts = [] if tensor[i, 0] == 0 else [0]
counts.extend(btw_idxs.detach().cpu().tolist())
out.append({"size": [h, w], "counts": counts})
return out
def rle_to_mask(rle: Dict[str, Any]) -> np.ndarray:
"""Compute a binary mask from an uncompressed RLE."""
h, w = rle["size"]
mask = np.empty(h * w, dtype=bool)
idx = 0
parity = False
for count in rle["counts"]:
mask[idx : idx + count] = parity
idx += count
parity ^= True
mask = mask.reshape(w, h)
return mask.transpose() # Put in C order
def area_from_rle(rle: Dict[str, Any]) -> int:
return sum(rle["counts"][1::2])
def calculate_stability_score(
masks: torch.Tensor, mask_threshold: float, threshold_offset: float
) -> torch.Tensor:
"""
Computes the stability score for a batch of masks. The stability
score is the IoU between the binary masks obtained by thresholding
the predicted mask logits at high and low values.
"""
# One mask is always contained inside the other.
# Save memory by preventing unnecessary cast to torch.int64
intersections = (
(masks > (mask_threshold + threshold_offset))
.sum(-1, dtype=torch.int16)
.sum(-1, dtype=torch.int32)
)
unions = (
(masks > (mask_threshold - threshold_offset))
.sum(-1, dtype=torch.int16)
.sum(-1, dtype=torch.int32)
)
return intersections / unions
def build_point_grid(n_per_side: int) -> np.ndarray:
"""Generates a 2D grid of points evenly spaced in [0,1]x[0,1]."""
offset = 1 / (2 * n_per_side)
points_one_side = np.linspace(offset, 1 - offset, n_per_side)
points_x = np.tile(points_one_side[None, :], (n_per_side, 1))
points_y = np.tile(points_one_side[:, None], (1, n_per_side))
points = np.stack([points_x, points_y], axis=-1).reshape(-1, 2)
return points
def build_all_layer_point_grids(
n_per_side: int, n_layers: int, scale_per_layer: int
) -> List[np.ndarray]:
"""Generates point grids for all crop layers."""
points_by_layer = []
for i in range(n_layers + 1):
n_points = int(n_per_side / (scale_per_layer**i))
points_by_layer.append(build_point_grid(n_points))
return points_by_layer
def generate_crop_boxes(
im_size: Tuple[int, ...], n_layers: int, overlap_ratio: float
) -> Tuple[List[List[int]], List[int]]:
"""
Generates a list of crop boxes of different sizes. Each layer
has (2**i)**2 boxes for the ith layer.
"""
crop_boxes, layer_idxs = [], []
im_h, im_w = im_size
short_side = min(im_h, im_w)
# Original image
crop_boxes.append([0, 0, im_w, im_h])
layer_idxs.append(0)
def crop_len(orig_len, n_crops, overlap):
return int(math.ceil((overlap * (n_crops - 1) + orig_len) / n_crops))
for i_layer in range(n_layers):
n_crops_per_side = 2 ** (i_layer + 1)
overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side))
crop_w = crop_len(im_w, n_crops_per_side, overlap)
crop_h = crop_len(im_h, n_crops_per_side, overlap)
crop_box_x0 = [int((crop_w - overlap) * i) for i in range(n_crops_per_side)]
crop_box_y0 = [int((crop_h - overlap) * i) for i in range(n_crops_per_side)]
# Crops in XYWH format
for x0, y0 in product(crop_box_x0, crop_box_y0):
box = [x0, y0, min(x0 + crop_w, im_w), min(y0 + crop_h, im_h)]
crop_boxes.append(box)
layer_idxs.append(i_layer + 1)
return crop_boxes, layer_idxs
def uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: List[int]) -> torch.Tensor:
x0, y0, _, _ = crop_box
offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device)
# Check if boxes has a channel dimension
if len(boxes.shape) == 3:
offset = offset.unsqueeze(1)
return boxes + offset
def uncrop_points(points: torch.Tensor, crop_box: List[int]) -> torch.Tensor:
x0, y0, _, _ = crop_box
offset = torch.tensor([[x0, y0]], device=points.device)
# Check if points has a channel dimension
if len(points.shape) == 3:
offset = offset.unsqueeze(1)
return points + offset
def uncrop_masks(
masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w: int
) -> torch.Tensor:
x0, y0, x1, y1 = crop_box
if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h:
return masks
# Coordinate transform masks
pad_x, pad_y = orig_w - (x1 - x0), orig_h - (y1 - y0)
pad = (x0, pad_x - x0, y0, pad_y - y0)
return torch.nn.functional.pad(masks, pad, value=0)
def remove_small_regions(
mask: np.ndarray, area_thresh: float, mode: str
) -> Tuple[np.ndarray, bool]:
"""
Removes small disconnected regions and holes in a mask. Returns the
mask and an indicator of if the mask has been modified.
"""
import cv2 # type: ignore
assert mode in ["holes", "islands"]
correct_holes = mode == "holes"
working_mask = (correct_holes ^ mask).astype(np.uint8)
n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8)
sizes = stats[:, -1][1:] # Row 0 is background label
small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh]
if len(small_regions) == 0:
return mask, False
fill_labels = [0] + small_regions
if not correct_holes:
fill_labels = [i for i in range(n_labels) if i not in fill_labels]
# If every region is below threshold, keep largest
if len(fill_labels) == 0:
fill_labels = [int(np.argmax(sizes)) + 1]
mask = np.isin(regions, fill_labels)
return mask, True
def coco_encode_rle(uncompressed_rle: Dict[str, Any]) -> Dict[str, Any]:
from pycocotools import mask as mask_utils # type: ignore
h, w = uncompressed_rle["size"]
rle = mask_utils.frPyObjects(uncompressed_rle, h, w)
rle["counts"] = rle["counts"].decode("utf-8") # Necessary to serialize with json
return rle
def batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor:
"""
Calculates boxes in XYXY format around masks. Return [0,0,0,0] for
an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4.
"""
# torch.max below raises an error on empty inputs, just skip in this case
if torch.numel(masks) == 0:
return torch.zeros(*masks.shape[:-2], 4, device=masks.device)
# Normalize shape to CxHxW
shape = masks.shape
h, w = shape[-2:]
if len(shape) > 2:
masks = masks.flatten(0, -3)
else:
masks = masks.unsqueeze(0)
# Get top and bottom edges
in_height, _ = torch.max(masks, dim=-1)
in_height_coords = in_height * torch.arange(h, device=in_height.device)[None, :]
bottom_edges, _ = torch.max(in_height_coords, dim=-1)
in_height_coords = in_height_coords + h * (~in_height)
top_edges, _ = torch.min(in_height_coords, dim=-1)
# Get left and right edges
in_width, _ = torch.max(masks, dim=-2)
in_width_coords = in_width * torch.arange(w, device=in_width.device)[None, :]
right_edges, _ = torch.max(in_width_coords, dim=-1)
in_width_coords = in_width_coords + w * (~in_width)
left_edges, _ = torch.min(in_width_coords, dim=-1)
# If the mask is empty the right edge will be to the left of the left edge.
# Replace these boxes with [0, 0, 0, 0]
empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)
out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1)
out = out * (~empty_filter).unsqueeze(-1)
# Return to original shape
if len(shape) > 2:
out = out.reshape(*shape[:-2], 4)
else:
out = out[0]
return out
| EXA-1-master | exa/models/segment-anything-main/segment_anything/utils/amg.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from torch.nn import functional as F
from torchvision.transforms.functional import resize, to_pil_image # type: ignore
from copy import deepcopy
from typing import Tuple
class ResizeLongestSide:
"""
Resizes images to the longest side 'target_length', as well as provides
methods for resizing coordinates and boxes. Provides methods for
transforming both numpy array and batched torch tensors.
"""
def __init__(self, target_length: int) -> None:
self.target_length = target_length
def apply_image(self, image: np.ndarray) -> np.ndarray:
"""
Expects a numpy array with shape HxWxC in uint8 format.
"""
target_size = self.get_preprocess_shape(image.shape[0], image.shape[1], self.target_length)
return np.array(resize(to_pil_image(image), target_size))
def apply_coords(self, coords: np.ndarray, original_size: Tuple[int, ...]) -> np.ndarray:
"""
Expects a numpy array of length 2 in the final dimension. Requires the
original image size in (H, W) format.
"""
old_h, old_w = original_size
new_h, new_w = self.get_preprocess_shape(
original_size[0], original_size[1], self.target_length
)
coords = deepcopy(coords).astype(float)
coords[..., 0] = coords[..., 0] * (new_w / old_w)
coords[..., 1] = coords[..., 1] * (new_h / old_h)
return coords
def apply_boxes(self, boxes: np.ndarray, original_size: Tuple[int, ...]) -> np.ndarray:
"""
Expects a numpy array shape Bx4. Requires the original image size
in (H, W) format.
"""
boxes = self.apply_coords(boxes.reshape(-1, 2, 2), original_size)
return boxes.reshape(-1, 4)
def apply_image_torch(self, image: torch.Tensor) -> torch.Tensor:
"""
Expects batched images with shape BxCxHxW and float format. This
transformation may not exactly match apply_image. apply_image is
the transformation expected by the model.
"""
# Expects an image in BCHW format. May not exactly match apply_image.
target_size = self.get_preprocess_shape(image.shape[0], image.shape[1], self.target_length)
return F.interpolate(
image, target_size, mode="bilinear", align_corners=False, antialias=True
)
def apply_coords_torch(
self, coords: torch.Tensor, original_size: Tuple[int, ...]
) -> torch.Tensor:
"""
Expects a torch tensor with length 2 in the last dimension. Requires the
original image size in (H, W) format.
"""
old_h, old_w = original_size
new_h, new_w = self.get_preprocess_shape(
original_size[0], original_size[1], self.target_length
)
coords = deepcopy(coords).to(torch.float)
coords[..., 0] = coords[..., 0] * (new_w / old_w)
coords[..., 1] = coords[..., 1] * (new_h / old_h)
return coords
def apply_boxes_torch(
self, boxes: torch.Tensor, original_size: Tuple[int, ...]
) -> torch.Tensor:
"""
Expects a torch tensor with shape Bx4. Requires the original image
size in (H, W) format.
"""
boxes = self.apply_coords_torch(boxes.reshape(-1, 2, 2), original_size)
return boxes.reshape(-1, 4)
@staticmethod
def get_preprocess_shape(oldh: int, oldw: int, long_side_length: int) -> Tuple[int, int]:
"""
Compute the output size given input size and target long side length.
"""
scale = long_side_length * 1.0 / max(oldh, oldw)
newh, neww = oldh * scale, oldw * scale
neww = int(neww + 0.5)
newh = int(newh + 0.5)
return (newh, neww)
| EXA-1-master | exa/models/segment-anything-main/segment_anything/utils/transforms.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
from torch.nn import functional as F
from typing import Tuple
from ..modeling import Sam
from .amg import calculate_stability_score
class SamOnnxModel(nn.Module):
"""
This model should not be called directly, but is used in ONNX export.
It combines the prompt encoder, mask decoder, and mask postprocessing of Sam,
with some functions modified to enable model tracing. Also supports extra
options controlling what information. See the ONNX export script for details.
"""
def __init__(
self,
model: Sam,
return_single_mask: bool,
use_stability_score: bool = False,
return_extra_metrics: bool = False,
) -> None:
super().__init__()
self.mask_decoder = model.mask_decoder
self.model = model
self.img_size = model.image_encoder.img_size
self.return_single_mask = return_single_mask
self.use_stability_score = use_stability_score
self.stability_score_offset = 1.0
self.return_extra_metrics = return_extra_metrics
@staticmethod
def resize_longest_image_size(
input_image_size: torch.Tensor, longest_side: int
) -> torch.Tensor:
input_image_size = input_image_size.to(torch.float32)
scale = longest_side / torch.max(input_image_size)
transformed_size = scale * input_image_size
transformed_size = torch.floor(transformed_size + 0.5).to(torch.int64)
return transformed_size
def _embed_points(self, point_coords: torch.Tensor, point_labels: torch.Tensor) -> torch.Tensor:
point_coords = point_coords + 0.5
point_coords = point_coords / self.img_size
point_embedding = self.model.prompt_encoder.pe_layer._pe_encoding(point_coords)
point_labels = point_labels.unsqueeze(-1).expand_as(point_embedding)
point_embedding = point_embedding * (point_labels != -1)
point_embedding = point_embedding + self.model.prompt_encoder.not_a_point_embed.weight * (
point_labels == -1
)
for i in range(self.model.prompt_encoder.num_point_embeddings):
point_embedding = point_embedding + self.model.prompt_encoder.point_embeddings[
i
].weight * (point_labels == i)
return point_embedding
def _embed_masks(self, input_mask: torch.Tensor, has_mask_input: torch.Tensor) -> torch.Tensor:
mask_embedding = has_mask_input * self.model.prompt_encoder.mask_downscaling(input_mask)
mask_embedding = mask_embedding + (
1 - has_mask_input
) * self.model.prompt_encoder.no_mask_embed.weight.reshape(1, -1, 1, 1)
return mask_embedding
def mask_postprocessing(self, masks: torch.Tensor, orig_im_size: torch.Tensor) -> torch.Tensor:
masks = F.interpolate(
masks,
size=(self.img_size, self.img_size),
mode="bilinear",
align_corners=False,
)
prepadded_size = self.resize_longest_image_size(orig_im_size, self.img_size).to(torch.int64)
masks = masks[..., : prepadded_size[0], : prepadded_size[1]] # type: ignore
orig_im_size = orig_im_size.to(torch.int64)
h, w = orig_im_size[0], orig_im_size[1]
masks = F.interpolate(masks, size=(h, w), mode="bilinear", align_corners=False)
return masks
def select_masks(
self, masks: torch.Tensor, iou_preds: torch.Tensor, num_points: int
) -> Tuple[torch.Tensor, torch.Tensor]:
# Determine if we should return the multiclick mask or not from the number of points.
# The reweighting is used to avoid control flow.
score_reweight = torch.tensor(
[[1000] + [0] * (self.model.mask_decoder.num_mask_tokens - 1)]
).to(iou_preds.device)
score = iou_preds + (num_points - 2.5) * score_reweight
best_idx = torch.argmax(score, dim=1)
masks = masks[torch.arange(masks.shape[0]), best_idx, :, :].unsqueeze(1)
iou_preds = iou_preds[torch.arange(masks.shape[0]), best_idx].unsqueeze(1)
return masks, iou_preds
@torch.no_grad()
def forward(
self,
image_embeddings: torch.Tensor,
point_coords: torch.Tensor,
point_labels: torch.Tensor,
mask_input: torch.Tensor,
has_mask_input: torch.Tensor,
orig_im_size: torch.Tensor,
):
sparse_embedding = self._embed_points(point_coords, point_labels)
dense_embedding = self._embed_masks(mask_input, has_mask_input)
masks, scores = self.model.mask_decoder.predict_masks(
image_embeddings=image_embeddings,
image_pe=self.model.prompt_encoder.get_dense_pe(),
sparse_prompt_embeddings=sparse_embedding,
dense_prompt_embeddings=dense_embedding,
)
if self.use_stability_score:
scores = calculate_stability_score(
masks, self.model.mask_threshold, self.stability_score_offset
)
if self.return_single_mask:
masks, scores = self.select_masks(masks, scores, point_coords.shape[1])
upscaled_masks = self.mask_postprocessing(masks, orig_im_size)
if self.return_extra_metrics:
stability_scores = calculate_stability_score(
upscaled_masks, self.model.mask_threshold, self.stability_score_offset
)
areas = (upscaled_masks > self.model.mask_threshold).sum(-1).sum(-1)
return upscaled_masks, scores, stability_scores, areas, masks
return upscaled_masks, scores, masks
| EXA-1-master | exa/models/segment-anything-main/segment_anything/utils/onnx.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
| EXA-1-master | exa/models/segment-anything-main/segment_anything/utils/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from .sam import Sam
from .image_encoder import ImageEncoderViT
from .mask_decoder import MaskDecoder
from .prompt_encoder import PromptEncoder
from .transformer import TwoWayTransformer
| EXA-1-master | exa/models/segment-anything-main/segment_anything/modeling/__init__.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
from typing import Type
class MLPBlock(nn.Module):
def __init__(
self,
embedding_dim: int,
mlp_dim: int,
act: Type[nn.Module] = nn.GELU,
) -> None:
super().__init__()
self.lin1 = nn.Linear(embedding_dim, mlp_dim)
self.lin2 = nn.Linear(mlp_dim, embedding_dim)
self.act = act()
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.lin2(self.act(self.lin1(x)))
# From https://github.com/facebookresearch/detectron2/blob/main/detectron2/layers/batch_norm.py # noqa
# Itself from https://github.com/facebookresearch/ConvNeXt/blob/d1fa8f6fef0a165b27399986cc2bdacc92777e40/models/convnext.py#L119 # noqa
class LayerNorm2d(nn.Module):
def __init__(self, num_channels: int, eps: float = 1e-6) -> None:
super().__init__()
self.weight = nn.Parameter(torch.ones(num_channels))
self.bias = nn.Parameter(torch.zeros(num_channels))
self.eps = eps
def forward(self, x: torch.Tensor) -> torch.Tensor:
u = x.mean(1, keepdim=True)
s = (x - u).pow(2).mean(1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.eps)
x = self.weight[:, None, None] * x + self.bias[:, None, None]
return x
| EXA-1-master | exa/models/segment-anything-main/segment_anything/modeling/common.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch import Tensor, nn
import math
from typing import Tuple, Type
from .common import MLPBlock
class TwoWayTransformer(nn.Module):
def __init__(
self,
depth: int,
embedding_dim: int,
num_heads: int,
mlp_dim: int,
activation: Type[nn.Module] = nn.ReLU,
attention_downsample_rate: int = 2,
) -> None:
"""
A transformer decoder that attends to an input image using
queries whose positional embedding is supplied.
Args:
depth (int): number of layers in the transformer
embedding_dim (int): the channel dimension for the input embeddings
num_heads (int): the number of heads for multihead attention. Must
divide embedding_dim
mlp_dim (int): the channel dimension internal to the MLP block
activation (nn.Module): the activation to use in the MLP block
"""
super().__init__()
self.depth = depth
self.embedding_dim = embedding_dim
self.num_heads = num_heads
self.mlp_dim = mlp_dim
self.layers = nn.ModuleList()
for i in range(depth):
self.layers.append(
TwoWayAttentionBlock(
embedding_dim=embedding_dim,
num_heads=num_heads,
mlp_dim=mlp_dim,
activation=activation,
attention_downsample_rate=attention_downsample_rate,
skip_first_layer_pe=(i == 0),
)
)
self.final_attn_token_to_image = Attention(
embedding_dim, num_heads, downsample_rate=attention_downsample_rate
)
self.norm_final_attn = nn.LayerNorm(embedding_dim)
def forward(
self,
image_embedding: Tensor,
image_pe: Tensor,
point_embedding: Tensor,
) -> Tuple[Tensor, Tensor]:
"""
Args:
image_embedding (torch.Tensor): image to attend to. Should be shape
B x embedding_dim x h x w for any h and w.
image_pe (torch.Tensor): the positional encoding to add to the image. Must
have the same shape as image_embedding.
point_embedding (torch.Tensor): the embedding to add to the query points.
Must have shape B x N_points x embedding_dim for any N_points.
Returns:
torch.Tensor: the processed point_embedding
torch.Tensor: the processed image_embedding
"""
# BxCxHxW -> BxHWxC == B x N_image_tokens x C
bs, c, h, w = image_embedding.shape
image_embedding = image_embedding.flatten(2).permute(0, 2, 1)
image_pe = image_pe.flatten(2).permute(0, 2, 1)
# Prepare queries
queries = point_embedding
keys = image_embedding
# Apply transformer blocks and final layernorm
for layer in self.layers:
queries, keys = layer(
queries=queries,
keys=keys,
query_pe=point_embedding,
key_pe=image_pe,
)
# Apply the final attention layer from the points to the image
q = queries + point_embedding
k = keys + image_pe
attn_out = self.final_attn_token_to_image(q=q, k=k, v=keys)
queries = queries + attn_out
queries = self.norm_final_attn(queries)
return queries, keys
class TwoWayAttentionBlock(nn.Module):
def __init__(
self,
embedding_dim: int,
num_heads: int,
mlp_dim: int = 2048,
activation: Type[nn.Module] = nn.ReLU,
attention_downsample_rate: int = 2,
skip_first_layer_pe: bool = False,
) -> None:
"""
A transformer block with four layers: (1) self-attention of sparse
inputs, (2) cross attention of sparse inputs to dense inputs, (3) mlp
block on sparse inputs, and (4) cross attention of dense inputs to sparse
inputs.
Arguments:
embedding_dim (int): the channel dimension of the embeddings
num_heads (int): the number of heads in the attention layers
mlp_dim (int): the hidden dimension of the mlp block
activation (nn.Module): the activation of the mlp block
skip_first_layer_pe (bool): skip the PE on the first layer
"""
super().__init__()
self.self_attn = Attention(embedding_dim, num_heads)
self.norm1 = nn.LayerNorm(embedding_dim)
self.cross_attn_token_to_image = Attention(
embedding_dim, num_heads, downsample_rate=attention_downsample_rate
)
self.norm2 = nn.LayerNorm(embedding_dim)
self.mlp = MLPBlock(embedding_dim, mlp_dim, activation)
self.norm3 = nn.LayerNorm(embedding_dim)
self.norm4 = nn.LayerNorm(embedding_dim)
self.cross_attn_image_to_token = Attention(
embedding_dim, num_heads, downsample_rate=attention_downsample_rate
)
self.skip_first_layer_pe = skip_first_layer_pe
def forward(
self, queries: Tensor, keys: Tensor, query_pe: Tensor, key_pe: Tensor
) -> Tuple[Tensor, Tensor]:
# Self attention block
if self.skip_first_layer_pe:
queries = self.self_attn(q=queries, k=queries, v=queries)
else:
q = queries + query_pe
attn_out = self.self_attn(q=q, k=q, v=queries)
queries = queries + attn_out
queries = self.norm1(queries)
# Cross attention block, tokens attending to image embedding
q = queries + query_pe
k = keys + key_pe
attn_out = self.cross_attn_token_to_image(q=q, k=k, v=keys)
queries = queries + attn_out
queries = self.norm2(queries)
# MLP block
mlp_out = self.mlp(queries)
queries = queries + mlp_out
queries = self.norm3(queries)
# Cross attention block, image embedding attending to tokens
q = queries + query_pe
k = keys + key_pe
attn_out = self.cross_attn_image_to_token(q=k, k=q, v=queries)
keys = keys + attn_out
keys = self.norm4(keys)
return queries, keys
class Attention(nn.Module):
"""
An attention layer that allows for downscaling the size of the embedding
after projection to queries, keys, and values.
"""
def __init__(
self,
embedding_dim: int,
num_heads: int,
downsample_rate: int = 1,
) -> None:
super().__init__()
self.embedding_dim = embedding_dim
self.internal_dim = embedding_dim // downsample_rate
self.num_heads = num_heads
assert self.internal_dim % num_heads == 0, "num_heads must divide embedding_dim."
self.q_proj = nn.Linear(embedding_dim, self.internal_dim)
self.k_proj = nn.Linear(embedding_dim, self.internal_dim)
self.v_proj = nn.Linear(embedding_dim, self.internal_dim)
self.out_proj = nn.Linear(self.internal_dim, embedding_dim)
def _separate_heads(self, x: Tensor, num_heads: int) -> Tensor:
b, n, c = x.shape
x = x.reshape(b, n, num_heads, c // num_heads)
return x.transpose(1, 2) # B x N_heads x N_tokens x C_per_head
def _recombine_heads(self, x: Tensor) -> Tensor:
b, n_heads, n_tokens, c_per_head = x.shape
x = x.transpose(1, 2)
return x.reshape(b, n_tokens, n_heads * c_per_head) # B x N_tokens x C
def forward(self, q: Tensor, k: Tensor, v: Tensor) -> Tensor:
# Input projections
q = self.q_proj(q)
k = self.k_proj(k)
v = self.v_proj(v)
# Separate into heads
q = self._separate_heads(q, self.num_heads)
k = self._separate_heads(k, self.num_heads)
v = self._separate_heads(v, self.num_heads)
# Attention
_, _, _, c_per_head = q.shape
attn = q @ k.permute(0, 1, 3, 2) # B x N_heads x N_tokens x N_tokens
attn = attn / math.sqrt(c_per_head)
attn = torch.softmax(attn, dim=-1)
# Get output
out = attn @ v
out = self._recombine_heads(out)
out = self.out_proj(out)
return out
| EXA-1-master | exa/models/segment-anything-main/segment_anything/modeling/transformer.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Optional, Tuple, Type
from .common import LayerNorm2d, MLPBlock
# This class and its supporting functions below lightly adapted from the ViTDet backbone available at: https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/vit.py # noqa
class ImageEncoderViT(nn.Module):
def __init__(
self,
img_size: int = 1024,
patch_size: int = 16,
in_chans: int = 3,
embed_dim: int = 768,
depth: int = 12,
num_heads: int = 12,
mlp_ratio: float = 4.0,
out_chans: int = 256,
qkv_bias: bool = True,
norm_layer: Type[nn.Module] = nn.LayerNorm,
act_layer: Type[nn.Module] = nn.GELU,
use_abs_pos: bool = True,
use_rel_pos: bool = False,
rel_pos_zero_init: bool = True,
window_size: int = 0,
global_attn_indexes: Tuple[int, ...] = (),
) -> None:
"""
Args:
img_size (int): Input image size.
patch_size (int): Patch size.
in_chans (int): Number of input image channels.
embed_dim (int): Patch embedding dimension.
depth (int): Depth of ViT.
num_heads (int): Number of attention heads in each ViT block.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool): If True, add a learnable bias to query, key, value.
norm_layer (nn.Module): Normalization layer.
act_layer (nn.Module): Activation layer.
use_abs_pos (bool): If True, use absolute positional embeddings.
use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
window_size (int): Window size for window attention blocks.
global_attn_indexes (list): Indexes for blocks using global attention.
"""
super().__init__()
self.img_size = img_size
self.patch_embed = PatchEmbed(
kernel_size=(patch_size, patch_size),
stride=(patch_size, patch_size),
in_chans=in_chans,
embed_dim=embed_dim,
)
self.pos_embed: Optional[nn.Parameter] = None
if use_abs_pos:
# Initialize absolute positional embedding with pretrain image size.
self.pos_embed = nn.Parameter(
torch.zeros(1, img_size // patch_size, img_size // patch_size, embed_dim)
)
self.blocks = nn.ModuleList()
for i in range(depth):
block = Block(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
norm_layer=norm_layer,
act_layer=act_layer,
use_rel_pos=use_rel_pos,
rel_pos_zero_init=rel_pos_zero_init,
window_size=window_size if i not in global_attn_indexes else 0,
input_size=(img_size // patch_size, img_size // patch_size),
)
self.blocks.append(block)
self.neck = nn.Sequential(
nn.Conv2d(
embed_dim,
out_chans,
kernel_size=1,
bias=False,
),
LayerNorm2d(out_chans),
nn.Conv2d(
out_chans,
out_chans,
kernel_size=3,
padding=1,
bias=False,
),
LayerNorm2d(out_chans),
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.patch_embed(x)
if self.pos_embed is not None:
x = x + self.pos_embed
for blk in self.blocks:
x = blk(x)
x = self.neck(x.permute(0, 3, 1, 2))
return x
class Block(nn.Module):
"""Transformer blocks with support of window attention and residual propagation blocks"""
def __init__(
self,
dim: int,
num_heads: int,
mlp_ratio: float = 4.0,
qkv_bias: bool = True,
norm_layer: Type[nn.Module] = nn.LayerNorm,
act_layer: Type[nn.Module] = nn.GELU,
use_rel_pos: bool = False,
rel_pos_zero_init: bool = True,
window_size: int = 0,
input_size: Optional[Tuple[int, int]] = None,
) -> None:
"""
Args:
dim (int): Number of input channels.
num_heads (int): Number of attention heads in each ViT block.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool): If True, add a learnable bias to query, key, value.
norm_layer (nn.Module): Normalization layer.
act_layer (nn.Module): Activation layer.
use_rel_pos (bool): If True, add relative positional embeddings to the attention map.
rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
window_size (int): Window size for window attention blocks. If it equals 0, then
use global attention.
input_size (tuple(int, int) or None): Input resolution for calculating the relative
positional parameter size.
"""
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
use_rel_pos=use_rel_pos,
rel_pos_zero_init=rel_pos_zero_init,
input_size=input_size if window_size == 0 else (window_size, window_size),
)
self.norm2 = norm_layer(dim)
self.mlp = MLPBlock(embedding_dim=dim, mlp_dim=int(dim * mlp_ratio), act=act_layer)
self.window_size = window_size
def forward(self, x: torch.Tensor) -> torch.Tensor:
shortcut = x
x = self.norm1(x)
# Window partition
if self.window_size > 0:
H, W = x.shape[1], x.shape[2]
x, pad_hw = window_partition(x, self.window_size)
x = self.attn(x)
# Reverse window partition
if self.window_size > 0:
x = window_unpartition(x, self.window_size, pad_hw, (H, W))
x = shortcut + x
x = x + self.mlp(self.norm2(x))
return x
class Attention(nn.Module):
"""Multi-head Attention block with relative position embeddings."""
def __init__(
self,
dim: int,
num_heads: int = 8,
qkv_bias: bool = True,
use_rel_pos: bool = False,
rel_pos_zero_init: bool = True,
input_size: Optional[Tuple[int, int]] = None,
) -> None:
"""
Args:
dim (int): Number of input channels.
num_heads (int): Number of attention heads.
qkv_bias (bool): If True, add a learnable bias to query, key, value.
rel_pos (bool): If True, add relative positional embeddings to the attention map.
rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.
input_size (tuple(int, int) or None): Input resolution for calculating the relative
positional parameter size.
"""
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim**-0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.proj = nn.Linear(dim, dim)
self.use_rel_pos = use_rel_pos
if self.use_rel_pos:
assert (
input_size is not None
), "Input size must be provided if using relative positional encoding."
# initialize relative positional embeddings
self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, head_dim))
self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, head_dim))
def forward(self, x: torch.Tensor) -> torch.Tensor:
B, H, W, _ = x.shape
# qkv with shape (3, B, nHead, H * W, C)
qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
# q, k, v with shape (B * nHead, H * W, C)
q, k, v = qkv.reshape(3, B * self.num_heads, H * W, -1).unbind(0)
attn = (q * self.scale) @ k.transpose(-2, -1)
if self.use_rel_pos:
attn = add_decomposed_rel_pos(attn, q, self.rel_pos_h, self.rel_pos_w, (H, W), (H, W))
attn = attn.softmax(dim=-1)
x = (attn @ v).view(B, self.num_heads, H, W, -1).permute(0, 2, 3, 1, 4).reshape(B, H, W, -1)
x = self.proj(x)
return x
def window_partition(x: torch.Tensor, window_size: int) -> Tuple[torch.Tensor, Tuple[int, int]]:
"""
Partition into non-overlapping windows with padding if needed.
Args:
x (tensor): input tokens with [B, H, W, C].
window_size (int): window size.
Returns:
windows: windows after partition with [B * num_windows, window_size, window_size, C].
(Hp, Wp): padded height and width before partition
"""
B, H, W, C = x.shape
pad_h = (window_size - H % window_size) % window_size
pad_w = (window_size - W % window_size) % window_size
if pad_h > 0 or pad_w > 0:
x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h))
Hp, Wp = H + pad_h, W + pad_w
x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows, (Hp, Wp)
def window_unpartition(
windows: torch.Tensor, window_size: int, pad_hw: Tuple[int, int], hw: Tuple[int, int]
) -> torch.Tensor:
"""
Window unpartition into original sequences and removing padding.
Args:
windows (tensor): input tokens with [B * num_windows, window_size, window_size, C].
window_size (int): window size.
pad_hw (Tuple): padded height and width (Hp, Wp).
hw (Tuple): original height and width (H, W) before padding.
Returns:
x: unpartitioned sequences with [B, H, W, C].
"""
Hp, Wp = pad_hw
H, W = hw
B = windows.shape[0] // (Hp * Wp // window_size // window_size)
x = windows.view(B, Hp // window_size, Wp // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1)
if Hp > H or Wp > W:
x = x[:, :H, :W, :].contiguous()
return x
def get_rel_pos(q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor:
"""
Get relative positional embeddings according to the relative positions of
query and key sizes.
Args:
q_size (int): size of query q.
k_size (int): size of key k.
rel_pos (Tensor): relative position embeddings (L, C).
Returns:
Extracted positional embeddings according to relative positions.
"""
max_rel_dist = int(2 * max(q_size, k_size) - 1)
# Interpolate rel pos if needed.
if rel_pos.shape[0] != max_rel_dist:
# Interpolate rel pos.
rel_pos_resized = F.interpolate(
rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1),
size=max_rel_dist,
mode="linear",
)
rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0)
else:
rel_pos_resized = rel_pos
# Scale the coords with short length if shapes for q and k are different.
q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0)
k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0)
relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0)
return rel_pos_resized[relative_coords.long()]
def add_decomposed_rel_pos(
attn: torch.Tensor,
q: torch.Tensor,
rel_pos_h: torch.Tensor,
rel_pos_w: torch.Tensor,
q_size: Tuple[int, int],
k_size: Tuple[int, int],
) -> torch.Tensor:
"""
Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`.
https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py # noqa B950
Args:
attn (Tensor): attention map.
q (Tensor): query q in the attention layer with shape (B, q_h * q_w, C).
rel_pos_h (Tensor): relative position embeddings (Lh, C) for height axis.
rel_pos_w (Tensor): relative position embeddings (Lw, C) for width axis.
q_size (Tuple): spatial sequence size of query q with (q_h, q_w).
k_size (Tuple): spatial sequence size of key k with (k_h, k_w).
Returns:
attn (Tensor): attention map with added relative positional embeddings.
"""
q_h, q_w = q_size
k_h, k_w = k_size
Rh = get_rel_pos(q_h, k_h, rel_pos_h)
Rw = get_rel_pos(q_w, k_w, rel_pos_w)
B, _, dim = q.shape
r_q = q.reshape(B, q_h, q_w, dim)
rel_h = torch.einsum("bhwc,hkc->bhwk", r_q, Rh)
rel_w = torch.einsum("bhwc,wkc->bhwk", r_q, Rw)
attn = (
attn.view(B, q_h, q_w, k_h, k_w) + rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :]
).view(B, q_h * q_w, k_h * k_w)
return attn
class PatchEmbed(nn.Module):
"""
Image to Patch Embedding.
"""
def __init__(
self,
kernel_size: Tuple[int, int] = (16, 16),
stride: Tuple[int, int] = (16, 16),
padding: Tuple[int, int] = (0, 0),
in_chans: int = 3,
embed_dim: int = 768,
) -> None:
"""
Args:
kernel_size (Tuple): kernel size of the projection layer.
stride (Tuple): stride of the projection layer.
padding (Tuple): padding size of the projection layer.
in_chans (int): Number of input image channels.
embed_dim (int): Patch embedding dimension.
"""
super().__init__()
self.proj = nn.Conv2d(
in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.proj(x)
# B C H W -> B H W C
x = x.permute(0, 2, 3, 1)
return x
| EXA-1-master | exa/models/segment-anything-main/segment_anything/modeling/image_encoder.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from torch import nn
from typing import Any, Optional, Tuple, Type
from .common import LayerNorm2d
class PromptEncoder(nn.Module):
def __init__(
self,
embed_dim: int,
image_embedding_size: Tuple[int, int],
input_image_size: Tuple[int, int],
mask_in_chans: int,
activation: Type[nn.Module] = nn.GELU,
) -> None:
"""
Encodes prompts for input to SAM's mask decoder.
Arguments:
embed_dim (int): The prompts' embedding dimension
image_embedding_size (tuple(int, int)): The spatial size of the
image embedding, as (H, W).
input_image_size (int): The padded size of the image as input
to the image encoder, as (H, W).
mask_in_chans (int): The number of hidden channels used for
encoding input masks.
activation (nn.Module): The activation to use when encoding
input masks.
"""
super().__init__()
self.embed_dim = embed_dim
self.input_image_size = input_image_size
self.image_embedding_size = image_embedding_size
self.pe_layer = PositionEmbeddingRandom(embed_dim // 2)
self.num_point_embeddings: int = 4 # pos/neg point + 2 box corners
point_embeddings = [nn.Embedding(1, embed_dim) for i in range(self.num_point_embeddings)]
self.point_embeddings = nn.ModuleList(point_embeddings)
self.not_a_point_embed = nn.Embedding(1, embed_dim)
self.mask_input_size = (4 * image_embedding_size[0], 4 * image_embedding_size[1])
self.mask_downscaling = nn.Sequential(
nn.Conv2d(1, mask_in_chans // 4, kernel_size=2, stride=2),
LayerNorm2d(mask_in_chans // 4),
activation(),
nn.Conv2d(mask_in_chans // 4, mask_in_chans, kernel_size=2, stride=2),
LayerNorm2d(mask_in_chans),
activation(),
nn.Conv2d(mask_in_chans, embed_dim, kernel_size=1),
)
self.no_mask_embed = nn.Embedding(1, embed_dim)
def get_dense_pe(self) -> torch.Tensor:
"""
Returns the positional encoding used to encode point prompts,
applied to a dense set of points the shape of the image encoding.
Returns:
torch.Tensor: Positional encoding with shape
1x(embed_dim)x(embedding_h)x(embedding_w)
"""
return self.pe_layer(self.image_embedding_size).unsqueeze(0)
def _embed_points(
self,
points: torch.Tensor,
labels: torch.Tensor,
pad: bool,
) -> torch.Tensor:
"""Embeds point prompts."""
points = points + 0.5 # Shift to center of pixel
if pad:
padding_point = torch.zeros((points.shape[0], 1, 2), device=points.device)
padding_label = -torch.ones((labels.shape[0], 1), device=labels.device)
points = torch.cat([points, padding_point], dim=1)
labels = torch.cat([labels, padding_label], dim=1)
point_embedding = self.pe_layer.forward_with_coords(points, self.input_image_size)
point_embedding[labels == -1] = 0.0
point_embedding[labels == -1] += self.not_a_point_embed.weight
point_embedding[labels == 0] += self.point_embeddings[0].weight
point_embedding[labels == 1] += self.point_embeddings[1].weight
return point_embedding
def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor:
"""Embeds box prompts."""
boxes = boxes + 0.5 # Shift to center of pixel
coords = boxes.reshape(-1, 2, 2)
corner_embedding = self.pe_layer.forward_with_coords(coords, self.input_image_size)
corner_embedding[:, 0, :] += self.point_embeddings[2].weight
corner_embedding[:, 1, :] += self.point_embeddings[3].weight
return corner_embedding
def _embed_masks(self, masks: torch.Tensor) -> torch.Tensor:
"""Embeds mask inputs."""
mask_embedding = self.mask_downscaling(masks)
return mask_embedding
def _get_batch_size(
self,
points: Optional[Tuple[torch.Tensor, torch.Tensor]],
boxes: Optional[torch.Tensor],
masks: Optional[torch.Tensor],
) -> int:
"""
Gets the batch size of the output given the batch size of the input prompts.
"""
if points is not None:
return points[0].shape[0]
elif boxes is not None:
return boxes.shape[0]
elif masks is not None:
return masks.shape[0]
else:
return 1
def _get_device(self) -> torch.device:
return self.point_embeddings[0].weight.device
def forward(
self,
points: Optional[Tuple[torch.Tensor, torch.Tensor]],
boxes: Optional[torch.Tensor],
masks: Optional[torch.Tensor],
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Embeds different types of prompts, returning both sparse and dense
embeddings.
Arguments:
points (tuple(torch.Tensor, torch.Tensor) or none): point coordinates
and labels to embed.
boxes (torch.Tensor or none): boxes to embed
masks (torch.Tensor or none): masks to embed
Returns:
torch.Tensor: sparse embeddings for the points and boxes, with shape
BxNx(embed_dim), where N is determined by the number of input points
and boxes.
torch.Tensor: dense embeddings for the masks, in the shape
Bx(embed_dim)x(embed_H)x(embed_W)
"""
bs = self._get_batch_size(points, boxes, masks)
sparse_embeddings = torch.empty((bs, 0, self.embed_dim), device=self._get_device())
if points is not None:
coords, labels = points
point_embeddings = self._embed_points(coords, labels, pad=(boxes is None))
sparse_embeddings = torch.cat([sparse_embeddings, point_embeddings], dim=1)
if boxes is not None:
box_embeddings = self._embed_boxes(boxes)
sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=1)
if masks is not None:
dense_embeddings = self._embed_masks(masks)
else:
dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand(
bs, -1, self.image_embedding_size[0], self.image_embedding_size[1]
)
return sparse_embeddings, dense_embeddings
class PositionEmbeddingRandom(nn.Module):
"""
Positional encoding using random spatial frequencies.
"""
def __init__(self, num_pos_feats: int = 64, scale: Optional[float] = None) -> None:
super().__init__()
if scale is None or scale <= 0.0:
scale = 1.0
self.register_buffer(
"positional_encoding_gaussian_matrix",
scale * torch.randn((2, num_pos_feats)),
)
def _pe_encoding(self, coords: torch.Tensor) -> torch.Tensor:
"""Positionally encode points that are normalized to [0,1]."""
# assuming coords are in [0, 1]^2 square and have d_1 x ... x d_n x 2 shape
coords = 2 * coords - 1
coords = coords @ self.positional_encoding_gaussian_matrix
coords = 2 * np.pi * coords
# outputs d_1 x ... x d_n x C shape
return torch.cat([torch.sin(coords), torch.cos(coords)], dim=-1)
def forward(self, size: Tuple[int, int]) -> torch.Tensor:
"""Generate positional encoding for a grid of the specified size."""
h, w = size
device: Any = self.positional_encoding_gaussian_matrix.device
grid = torch.ones((h, w), device=device, dtype=torch.float32)
y_embed = grid.cumsum(dim=0) - 0.5
x_embed = grid.cumsum(dim=1) - 0.5
y_embed = y_embed / h
x_embed = x_embed / w
pe = self._pe_encoding(torch.stack([x_embed, y_embed], dim=-1))
return pe.permute(2, 0, 1) # C x H x W
def forward_with_coords(
self, coords_input: torch.Tensor, image_size: Tuple[int, int]
) -> torch.Tensor:
"""Positionally encode points that are not normalized to [0,1]."""
coords = coords_input.clone()
coords[:, :, 0] = coords[:, :, 0] / image_size[1]
coords[:, :, 1] = coords[:, :, 1] / image_size[0]
return self._pe_encoding(coords.to(torch.float)) # B x N x C
| EXA-1-master | exa/models/segment-anything-main/segment_anything/modeling/prompt_encoder.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch import nn
from torch.nn import functional as F
from typing import Any, Dict, List, Tuple
from .image_encoder import ImageEncoderViT
from .mask_decoder import MaskDecoder
from .prompt_encoder import PromptEncoder
class Sam(nn.Module):
mask_threshold: float = 0.0
image_format: str = "RGB"
def __init__(
self,
image_encoder: ImageEncoderViT,
prompt_encoder: PromptEncoder,
mask_decoder: MaskDecoder,
pixel_mean: List[float] = [123.675, 116.28, 103.53],
pixel_std: List[float] = [58.395, 57.12, 57.375],
) -> None:
"""
SAM predicts object masks from an image and input prompts.
Arguments:
image_encoder (ImageEncoderViT): The backbone used to encode the
image into image embeddings that allow for efficient mask prediction.
prompt_encoder (PromptEncoder): Encodes various types of input prompts.
mask_decoder (MaskDecoder): Predicts masks from the image embeddings
and encoded prompts.
pixel_mean (list(float)): Mean values for normalizing pixels in the input image.
pixel_std (list(float)): Std values for normalizing pixels in the input image.
"""
super().__init__()
self.image_encoder = image_encoder
self.prompt_encoder = prompt_encoder
self.mask_decoder = mask_decoder
self.register_buffer("pixel_mean", torch.Tensor(pixel_mean).view(-1, 1, 1), False)
self.register_buffer("pixel_std", torch.Tensor(pixel_std).view(-1, 1, 1), False)
@property
def device(self) -> Any:
return self.pixel_mean.device
@torch.no_grad()
def forward(
self,
batched_input: List[Dict[str, Any]],
multimask_output: bool,
) -> List[Dict[str, torch.Tensor]]:
"""
Predicts masks end-to-end from provided images and prompts.
If prompts are not known in advance, using SamPredictor is
recommended over calling the model directly.
Arguments:
batched_input (list(dict)): A list over input images, each a
dictionary with the following keys. A prompt key can be
excluded if it is not present.
'image': The image as a torch tensor in 3xHxW format,
already transformed for input to the model.
'original_size': (tuple(int, int)) The original size of
the image before transformation, as (H, W).
'point_coords': (torch.Tensor) Batched point prompts for
this image, with shape BxNx2. Already transformed to the
input frame of the model.
'point_labels': (torch.Tensor) Batched labels for point prompts,
with shape BxN.
'boxes': (torch.Tensor) Batched box inputs, with shape Bx4.
Already transformed to the input frame of the model.
'mask_inputs': (torch.Tensor) Batched mask inputs to the model,
in the form Bx1xHxW.
multimask_output (bool): Whether the model should predict multiple
disambiguating masks, or return a single mask.
Returns:
(list(dict)): A list over input images, where each element is
as dictionary with the following keys.
'masks': (torch.Tensor) Batched binary mask predictions,
with shape BxCxHxW, where B is the number of input prompts,
C is determined by multimask_output, and (H, W) is the
original size of the image.
'iou_predictions': (torch.Tensor) The model's predictions
of mask quality, in shape BxC.
'low_res_logits': (torch.Tensor) Low resolution logits with
shape BxCxHxW, where H=W=256. Can be passed as mask input
to subsequent iterations of prediction.
"""
input_images = torch.stack([self.preprocess(x["image"]) for x in batched_input], dim=0)
image_embeddings = self.image_encoder(input_images)
outputs = []
for image_record, curr_embedding in zip(batched_input, image_embeddings):
if "point_coords" in image_record:
points = (image_record["point_coords"], image_record["point_labels"])
else:
points = None
sparse_embeddings, dense_embeddings = self.prompt_encoder(
points=points,
boxes=image_record.get("boxes", None),
masks=image_record.get("mask_inputs", None),
)
low_res_masks, iou_predictions = self.mask_decoder(
image_embeddings=curr_embedding.unsqueeze(0),
image_pe=self.prompt_encoder.get_dense_pe(),
sparse_prompt_embeddings=sparse_embeddings,
dense_prompt_embeddings=dense_embeddings,
multimask_output=multimask_output,
)
masks = self.postprocess_masks(
low_res_masks,
input_size=image_record["image"].shape[-2:],
original_size=image_record["original_size"],
)
masks = masks > self.mask_threshold
outputs.append(
{
"masks": masks,
"iou_predictions": iou_predictions,
"low_res_logits": low_res_masks,
}
)
return outputs
def postprocess_masks(
self,
masks: torch.Tensor,
input_size: Tuple[int, ...],
original_size: Tuple[int, ...],
) -> torch.Tensor:
"""
Remove padding and upscale masks to the original image size.
Arguments:
masks (torch.Tensor): Batched masks from the mask_decoder,
in BxCxHxW format.
input_size (tuple(int, int)): The size of the image input to the
model, in (H, W) format. Used to remove padding.
original_size (tuple(int, int)): The original size of the image
before resizing for input to the model, in (H, W) format.
Returns:
(torch.Tensor): Batched masks in BxCxHxW format, where (H, W)
is given by original_size.
"""
masks = F.interpolate(
masks,
(self.image_encoder.img_size, self.image_encoder.img_size),
mode="bilinear",
align_corners=False,
)
masks = masks[..., : input_size[0], : input_size[1]]
masks = F.interpolate(masks, original_size, mode="bilinear", align_corners=False)
return masks
def preprocess(self, x: torch.Tensor) -> torch.Tensor:
"""Normalize pixel values and pad to a square input."""
# Normalize colors
x = (x - self.pixel_mean) / self.pixel_std
# Pad
h, w = x.shape[-2:]
padh = self.image_encoder.img_size - h
padw = self.image_encoder.img_size - w
x = F.pad(x, (0, padw, 0, padh))
return x
| EXA-1-master | exa/models/segment-anything-main/segment_anything/modeling/sam.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch import nn
from torch.nn import functional as F
from typing import List, Tuple, Type
from .common import LayerNorm2d
class MaskDecoder(nn.Module):
def __init__(
self,
*,
transformer_dim: int,
transformer: nn.Module,
num_multimask_outputs: int = 3,
activation: Type[nn.Module] = nn.GELU,
iou_head_depth: int = 3,
iou_head_hidden_dim: int = 256,
) -> None:
"""
Predicts masks given an image and prompt embeddings, using a
transformer architecture.
Arguments:
transformer_dim (int): the channel dimension of the transformer
transformer (nn.Module): the transformer used to predict masks
num_multimask_outputs (int): the number of masks to predict
when disambiguating masks
activation (nn.Module): the type of activation to use when
upscaling masks
iou_head_depth (int): the depth of the MLP used to predict
mask quality
iou_head_hidden_dim (int): the hidden dimension of the MLP
used to predict mask quality
"""
super().__init__()
self.transformer_dim = transformer_dim
self.transformer = transformer
self.num_multimask_outputs = num_multimask_outputs
self.iou_token = nn.Embedding(1, transformer_dim)
self.num_mask_tokens = num_multimask_outputs + 1
self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim)
self.output_upscaling = nn.Sequential(
nn.ConvTranspose2d(transformer_dim, transformer_dim // 4, kernel_size=2, stride=2),
LayerNorm2d(transformer_dim // 4),
activation(),
nn.ConvTranspose2d(transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2),
activation(),
)
self.output_hypernetworks_mlps = nn.ModuleList(
[
MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3)
for i in range(self.num_mask_tokens)
]
)
self.iou_prediction_head = MLP(
transformer_dim, iou_head_hidden_dim, self.num_mask_tokens, iou_head_depth
)
def forward(
self,
image_embeddings: torch.Tensor,
image_pe: torch.Tensor,
sparse_prompt_embeddings: torch.Tensor,
dense_prompt_embeddings: torch.Tensor,
multimask_output: bool,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Predict masks given image and prompt embeddings.
Arguments:
image_embeddings (torch.Tensor): the embeddings from the image encoder
image_pe (torch.Tensor): positional encoding with the shape of image_embeddings
sparse_prompt_embeddings (torch.Tensor): the embeddings of the points and boxes
dense_prompt_embeddings (torch.Tensor): the embeddings of the mask inputs
multimask_output (bool): Whether to return multiple masks or a single
mask.
Returns:
torch.Tensor: batched predicted masks
torch.Tensor: batched predictions of mask quality
"""
masks, iou_pred = self.predict_masks(
image_embeddings=image_embeddings,
image_pe=image_pe,
sparse_prompt_embeddings=sparse_prompt_embeddings,
dense_prompt_embeddings=dense_prompt_embeddings,
)
# Select the correct mask or masks for output
if multimask_output:
mask_slice = slice(1, None)
else:
mask_slice = slice(0, 1)
masks = masks[:, mask_slice, :, :]
iou_pred = iou_pred[:, mask_slice]
# Prepare output
return masks, iou_pred
def predict_masks(
self,
image_embeddings: torch.Tensor,
image_pe: torch.Tensor,
sparse_prompt_embeddings: torch.Tensor,
dense_prompt_embeddings: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Predicts masks. See 'forward' for more details."""
# Concatenate output tokens
output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight], dim=0)
output_tokens = output_tokens.unsqueeze(0).expand(sparse_prompt_embeddings.size(0), -1, -1)
tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1)
# Expand per-image data in batch direction to be per-mask
src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0)
src = src + dense_prompt_embeddings
pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0)
b, c, h, w = src.shape
# Run the transformer
hs, src = self.transformer(src, pos_src, tokens)
iou_token_out = hs[:, 0, :]
mask_tokens_out = hs[:, 1 : (1 + self.num_mask_tokens), :]
# Upscale mask embeddings and predict masks using the mask tokens
src = src.transpose(1, 2).view(b, c, h, w)
upscaled_embedding = self.output_upscaling(src)
hyper_in_list: List[torch.Tensor] = []
for i in range(self.num_mask_tokens):
hyper_in_list.append(self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :]))
hyper_in = torch.stack(hyper_in_list, dim=1)
b, c, h, w = upscaled_embedding.shape
masks = (hyper_in @ upscaled_embedding.view(b, c, h * w)).view(b, -1, h, w)
# Generate mask quality predictions
iou_pred = self.iou_prediction_head(iou_token_out)
return masks, iou_pred
# Lightly adapted from
# https://github.com/facebookresearch/MaskFormer/blob/main/mask_former/modeling/transformer/transformer_predictor.py # noqa
class MLP(nn.Module):
def __init__(
self,
input_dim: int,
hidden_dim: int,
output_dim: int,
num_layers: int,
sigmoid_output: bool = False,
) -> None:
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(
nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])
)
self.sigmoid_output = sigmoid_output
def forward(self, x):
for i, layer in enumerate(self.layers):
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
if self.sigmoid_output:
x = F.sigmoid(x)
return x
| EXA-1-master | exa/models/segment-anything-main/segment_anything/modeling/mask_decoder.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import cv2 # type: ignore
from segment_anything import SamAutomaticMaskGenerator, sam_model_registry
import argparse
import json
import os
from typing import Any, Dict, List
parser = argparse.ArgumentParser(
description=(
"Runs automatic mask generation on an input image or directory of images, "
"and outputs masks as either PNGs or COCO-style RLEs. Requires open-cv, "
"as well as pycocotools if saving in RLE format."
)
)
parser.add_argument(
"--input",
type=str,
required=True,
help="Path to either a single input image or folder of images.",
)
parser.add_argument(
"--output",
type=str,
required=True,
help=(
"Path to the directory where masks will be output. Output will be either a folder "
"of PNGs per image or a single json with COCO-style masks."
),
)
parser.add_argument(
"--model-type",
type=str,
required=True,
help="The type of model to load, in ['default', 'vit_h', 'vit_l', 'vit_b']",
)
parser.add_argument(
"--checkpoint",
type=str,
required=True,
help="The path to the SAM checkpoint to use for mask generation.",
)
parser.add_argument("--device", type=str, default="cuda", help="The device to run generation on.")
parser.add_argument(
"--convert-to-rle",
action="store_true",
help=(
"Save masks as COCO RLEs in a single json instead of as a folder of PNGs. "
"Requires pycocotools."
),
)
amg_settings = parser.add_argument_group("AMG Settings")
amg_settings.add_argument(
"--points-per-side",
type=int,
default=None,
help="Generate masks by sampling a grid over the image with this many points to a side.",
)
amg_settings.add_argument(
"--points-per-batch",
type=int,
default=None,
help="How many input points to process simultaneously in one batch.",
)
amg_settings.add_argument(
"--pred-iou-thresh",
type=float,
default=None,
help="Exclude masks with a predicted score from the model that is lower than this threshold.",
)
amg_settings.add_argument(
"--stability-score-thresh",
type=float,
default=None,
help="Exclude masks with a stability score lower than this threshold.",
)
amg_settings.add_argument(
"--stability-score-offset",
type=float,
default=None,
help="Larger values perturb the mask more when measuring stability score.",
)
amg_settings.add_argument(
"--box-nms-thresh",
type=float,
default=None,
help="The overlap threshold for excluding a duplicate mask.",
)
amg_settings.add_argument(
"--crop-n-layers",
type=int,
default=None,
help=(
"If >0, mask generation is run on smaller crops of the image to generate more masks. "
"The value sets how many different scales to crop at."
),
)
amg_settings.add_argument(
"--crop-nms-thresh",
type=float,
default=None,
help="The overlap threshold for excluding duplicate masks across different crops.",
)
amg_settings.add_argument(
"--crop-overlap-ratio",
type=int,
default=None,
help="Larger numbers mean image crops will overlap more.",
)
amg_settings.add_argument(
"--crop-n-points-downscale-factor",
type=int,
default=None,
help="The number of points-per-side in each layer of crop is reduced by this factor.",
)
amg_settings.add_argument(
"--min-mask-region-area",
type=int,
default=None,
help=(
"Disconnected mask regions or holes with area smaller than this value "
"in pixels are removed by postprocessing."
),
)
def write_masks_to_folder(masks: List[Dict[str, Any]], path: str) -> None:
header = "id,area,bbox_x0,bbox_y0,bbox_w,bbox_h,point_input_x,point_input_y,predicted_iou,stability_score,crop_box_x0,crop_box_y0,crop_box_w,crop_box_h" # noqa
metadata = [header]
for i, mask_data in enumerate(masks):
mask = mask_data["segmentation"]
filename = f"{i}.png"
cv2.imwrite(os.path.join(path, filename), mask * 255)
mask_metadata = [
str(i),
str(mask_data["area"]),
*[str(x) for x in mask_data["bbox"]],
*[str(x) for x in mask_data["point_coords"][0]],
str(mask_data["predicted_iou"]),
str(mask_data["stability_score"]),
*[str(x) for x in mask_data["crop_box"]],
]
row = ",".join(mask_metadata)
metadata.append(row)
metadata_path = os.path.join(path, "metadata.csv")
with open(metadata_path, "w") as f:
f.write("\n".join(metadata))
return
def get_amg_kwargs(args):
amg_kwargs = {
"points_per_side": args.points_per_side,
"points_per_batch": args.points_per_batch,
"pred_iou_thresh": args.pred_iou_thresh,
"stability_score_thresh": args.stability_score_thresh,
"stability_score_offset": args.stability_score_offset,
"box_nms_thresh": args.box_nms_thresh,
"crop_n_layers": args.crop_n_layers,
"crop_nms_thresh": args.crop_nms_thresh,
"crop_overlap_ratio": args.crop_overlap_ratio,
"crop_n_points_downscale_factor": args.crop_n_points_downscale_factor,
"min_mask_region_area": args.min_mask_region_area,
}
amg_kwargs = {k: v for k, v in amg_kwargs.items() if v is not None}
return amg_kwargs
def main(args: argparse.Namespace) -> None:
print("Loading model...")
sam = sam_model_registry[args.model_type](checkpoint=args.checkpoint)
_ = sam.to(device=args.device)
output_mode = "coco_rle" if args.convert_to_rle else "binary_mask"
amg_kwargs = get_amg_kwargs(args)
generator = SamAutomaticMaskGenerator(sam, output_mode=output_mode, **amg_kwargs)
if not os.path.isdir(args.input):
targets = [args.input]
else:
targets = [
f for f in os.listdir(args.input) if not os.path.isdir(os.path.join(args.input, f))
]
targets = [os.path.join(args.input, f) for f in targets]
os.makedirs(args.output, exist_ok=True)
for t in targets:
print(f"Processing '{t}'...")
image = cv2.imread(t)
if image is None:
print(f"Could not load '{t}' as an image, skipping...")
continue
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
masks = generator.generate(image)
base = os.path.basename(t)
base = os.path.splitext(base)[0]
save_base = os.path.join(args.output, base)
if output_mode == "binary_mask":
os.makedirs(save_base, exist_ok=False)
write_masks_to_folder(masks, save_base)
else:
save_file = save_base + ".json"
with open(save_file, "w") as f:
json.dump(masks, f)
print("Done!")
if __name__ == "__main__":
args = parser.parse_args()
main(args)
| EXA-1-master | exa/models/segment-anything-main/scripts/amg.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
from segment_anything import sam_model_registry
from segment_anything.utils.onnx import SamOnnxModel
import argparse
import warnings
try:
import onnxruntime # type: ignore
onnxruntime_exists = True
except ImportError:
onnxruntime_exists = False
parser = argparse.ArgumentParser(
description="Export the SAM prompt encoder and mask decoder to an ONNX model."
)
parser.add_argument(
"--checkpoint", type=str, required=True, help="The path to the SAM model checkpoint."
)
parser.add_argument(
"--output", type=str, required=True, help="The filename to save the ONNX model to."
)
parser.add_argument(
"--model-type",
type=str,
required=True,
help="In ['default', 'vit_h', 'vit_l', 'vit_b']. Which type of SAM model to export.",
)
parser.add_argument(
"--return-single-mask",
action="store_true",
help=(
"If true, the exported ONNX model will only return the best mask, "
"instead of returning multiple masks. For high resolution images "
"this can improve runtime when upscaling masks is expensive."
),
)
parser.add_argument(
"--opset",
type=int,
default=17,
help="The ONNX opset version to use. Must be >=11",
)
parser.add_argument(
"--quantize-out",
type=str,
default=None,
help=(
"If set, will quantize the model and save it with this name. "
"Quantization is performed with quantize_dynamic from onnxruntime.quantization.quantize."
),
)
parser.add_argument(
"--gelu-approximate",
action="store_true",
help=(
"Replace GELU operations with approximations using tanh. Useful "
"for some runtimes that have slow or unimplemented erf ops, used in GELU."
),
)
parser.add_argument(
"--use-stability-score",
action="store_true",
help=(
"Replaces the model's predicted mask quality score with the stability "
"score calculated on the low resolution masks using an offset of 1.0. "
),
)
parser.add_argument(
"--return-extra-metrics",
action="store_true",
help=(
"The model will return five results: (masks, scores, stability_scores, "
"areas, low_res_logits) instead of the usual three. This can be "
"significantly slower for high resolution outputs."
),
)
def run_export(
model_type: str,
checkpoint: str,
output: str,
opset: int,
return_single_mask: bool,
gelu_approximate: bool = False,
use_stability_score: bool = False,
return_extra_metrics=False,
):
print("Loading model...")
sam = sam_model_registry[model_type](checkpoint=checkpoint)
onnx_model = SamOnnxModel(
model=sam,
return_single_mask=return_single_mask,
use_stability_score=use_stability_score,
return_extra_metrics=return_extra_metrics,
)
if gelu_approximate:
for n, m in onnx_model.named_modules():
if isinstance(m, torch.nn.GELU):
m.approximate = "tanh"
dynamic_axes = {
"point_coords": {1: "num_points"},
"point_labels": {1: "num_points"},
}
embed_dim = sam.prompt_encoder.embed_dim
embed_size = sam.prompt_encoder.image_embedding_size
mask_input_size = [4 * x for x in embed_size]
dummy_inputs = {
"image_embeddings": torch.randn(1, embed_dim, *embed_size, dtype=torch.float),
"point_coords": torch.randint(low=0, high=1024, size=(1, 5, 2), dtype=torch.float),
"point_labels": torch.randint(low=0, high=4, size=(1, 5), dtype=torch.float),
"mask_input": torch.randn(1, 1, *mask_input_size, dtype=torch.float),
"has_mask_input": torch.tensor([1], dtype=torch.float),
"orig_im_size": torch.tensor([1500, 2250], dtype=torch.float),
}
_ = onnx_model(**dummy_inputs)
output_names = ["masks", "iou_predictions", "low_res_masks"]
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=torch.jit.TracerWarning)
warnings.filterwarnings("ignore", category=UserWarning)
with open(output, "wb") as f:
print(f"Exporting onnx model to {output}...")
torch.onnx.export(
onnx_model,
tuple(dummy_inputs.values()),
f,
export_params=True,
verbose=False,
opset_version=opset,
do_constant_folding=True,
input_names=list(dummy_inputs.keys()),
output_names=output_names,
dynamic_axes=dynamic_axes,
)
if onnxruntime_exists:
ort_inputs = {k: to_numpy(v) for k, v in dummy_inputs.items()}
# set cpu provider default
providers = ['CPUExecutionProvider']
ort_session = onnxruntime.InferenceSession(output, providers=providers)
_ = ort_session.run(None, ort_inputs)
print("Model has successfully been run with ONNXRuntime.")
def to_numpy(tensor):
return tensor.cpu().numpy()
if __name__ == "__main__":
args = parser.parse_args()
run_export(
model_type=args.model_type,
checkpoint=args.checkpoint,
output=args.output,
opset=args.opset,
return_single_mask=args.return_single_mask,
gelu_approximate=args.gelu_approximate,
use_stability_score=args.use_stability_score,
return_extra_metrics=args.return_extra_metrics,
)
if args.quantize_out is not None:
assert onnxruntime_exists, "onnxruntime is required to quantize the model."
from onnxruntime.quantization import QuantType # type: ignore
from onnxruntime.quantization.quantize import quantize_dynamic # type: ignore
print(f"Quantizing model and writing to {args.quantize_out}...")
quantize_dynamic(
model_input=args.output,
model_output=args.quantize_out,
optimize_model=True,
per_channel=False,
reduce_range=False,
weight_type=QuantType.QUInt8,
)
print("Done!")
| EXA-1-master | exa/models/segment-anything-main/scripts/export_onnx_model.py |
'''
Adapted from https://github.com/lupantech/ScienceQA
'''
from dataclasses import dataclass
from typing import List, Optional
def get_question_text(problem):
question = problem['question']
return question
def get_context_text(problem, use_caption):
txt_context = problem['hint']
img_context = problem['caption'] if use_caption else ""
context = " ".join([txt_context, img_context]).strip()
if context == "":
context = "N/A"
return context
def get_choice_text(probelm, options):
choices = probelm['choices']
choice_list = []
for i, c in enumerate(choices):
choice_list.append("({}) {}".format(options[i], c))
choice_txt = " ".join(choice_list)
#print(choice_txt)
return choice_txt
def get_origin_answer(problem, options):
return problem['choices'][problem['answer']]
def get_answer(problem, options):
return options[problem['answer']]
def get_lecture_text(problem):
# \\n: GPT-3 can generate the lecture with more tokens.
lecture = problem['lecture'].replace("\n", "\\n")
return lecture
def get_solution_text(problem):
# \\n: GPT-3 can generate the solution with more tokens
solution = problem['solution'].replace("\n", "\\n")
return solution
def create_one_example(format, question, context, choice, answer, lecture, solution, test_example=True, WithOutput = False, curr_le_data=None):
input_format, output_format = format.split("-")
## Inputs
if input_format == "CQM":
input = f"Context: {context}\nQuestion: {question}\nOptions: {choice}\n"
elif input_format == "QCM":
input = f"Question: {question}\nContext: {context}\nOptions: {choice}\n"
elif input_format == "QM":
input = f"Question: {question}\nOptions: {choice}\n"
elif input_format == "QC":
input = f"Question: {question}\nContext: {context}\n"
elif input_format == "QCMG":
if curr_le_data is not None:
input = f"Question: {question}\nContext: {context}\nOptions: {choice}\n{curr_le_data}\n"
else:
input = f"Question: {question}\nContext: {context}\nOptions: {choice}\nSolution: {lecture} {solution}\n"
elif input_format == "CQMG":
if curr_le_data is not None:
input = f"Context: {context}\nQuestion: {question}\nOptions: {choice}\n{curr_le_data}\n"
else:
input = f"Context: {context}\nQuestion: {question}\nOptions: {choice}\nSolution: {lecture} {solution}\n"
# upper bound experiment
elif input_format == "QCML":
input = f"Question: {question}\nContext: {context}\nOptions: {choice}\nBECAUSE: {lecture}\n"
elif input_format == "QCME":
input = f"Question: {question}\nContext: {context}\nOptions: {choice}\nBECAUSE: {solution}\n"
elif input_format == "QCMLE":
input = f"Question: {question}\nContext: {context}\nOptions: {choice}\nBECAUSE: {lecture} {solution}\n"
elif input_format == "QCLM":
input = f"Question: {question}\nContext: {context}\nBECAUSE: {lecture}\nOptions: {choice}\n"
elif input_format == "QCEM":
input = f"Question: {question}\nContext: {context}\nBECAUSE: {solution}\nOptions: {choice}\n"
elif input_format == "QCLEM":
input = f"Question: {question}\nContext: {context}\nBECAUSE: {lecture} {solution}\nOptions: {choice}\n"
elif input_format == "QCMA":
input = f"Question: {question}\nContext: {context}\nOptions: {choice}\nAnswer: The answer is {answer}.\n"
elif input_format == "QCA":
input = f"Question: {question}\nContext: {context}\nAnswer: The answer is {answer}. \nBECAUSE:"
# Outputs
if test_example:
if output_format == 'A':
output = "Answer:"
elif output_format == 'E':
output = "Solution:"
else:
output = "Solution:"
elif output_format == 'A':
output = f"Answer: The answer is {answer}."
elif output_format == 'AL':
output = f"Answer: The answer is {answer}. BECAUSE: {solution}"
elif output_format == 'AE':
output = f"Answer: The answer is {answer}. BECAUSE: {lecture}"
elif output_format == 'ALE':
output = f"Answer: The answer is {answer}. BECAUSE: {lecture} {solution}"
elif output_format == 'AEL':
output = f"Answer: The answer is {answer}. BECAUSE: {solution} {lecture}"
elif output_format == 'LA':
output = f"Answer: {lecture} The answer is {answer}."
elif output_format == 'EA':
output = f"Answer: {solution} The answer is {answer}."
elif output_format == 'LEA':
output = f"Answer: {lecture} {solution} The answer is {answer}."
elif output_format == 'ELA':
output = f"Answer: {solution} {lecture} The answer is {answer}."
elif output_format == 'LE':
output = f"Solution: {lecture} {solution}."
elif output_format == 'E':
output = f"Solution: {solution}"
if WithOutput:
if output.endswith("BECAUSE:"):
output = output.replace("BECAUSE:", "").strip()
if output_format == 'E':
text = input + f'Solution:'
elif output_format == 'A':
text = input + f'Answer:'
else:
text = input + f'Solution:'
text = text.replace(" ", " ").strip()
output = output.replace(" ", " ").strip()
return text, output
text = input + output
text = text.replace(" ", " ").strip()
if text.endswith("BECAUSE:"):
text = text.replace("BECAUSE:", "").strip()
return text
def build_prompt(problems, shot_qids, test_qid, args):
examples = []
# n-shot training examples
for qid in shot_qids:
question = get_question_text(problems[qid])
context = get_context_text(problems[qid], args.use_caption)
choice = get_choice_text(problems[qid], args.options)
answer = get_answer(problems[qid], args.options)
lecture = get_lecture_text(problems[qid])
solution = get_solution_text(problems[qid])
train_example = create_one_example(args.prompt_format,
question,
context,
choice,
answer,
lecture,
solution,
test_example=False)
examples.append(train_example)
# test example
question = get_question_text(problems[test_qid])
context = get_context_text(problems[test_qid], args.use_caption)
choice = get_choice_text(problems[test_qid], args.options)
answer = get_answer(problems[test_qid], args.options)
lecture = get_lecture_text(problems[test_qid])
solution = get_solution_text(problems[test_qid])
test_example = create_one_example(args.prompt_format,
question,
context,
choice,
answer,
lecture,
solution,
test_example=True)
examples.append(test_example)
# create the prompt input
prompt_input = '\n\n'.join(examples)
return prompt_input
def build_train_pair(problems, test_qid, args, curr_le_data=None):
examples = []
# test example
question = get_question_text(problems[test_qid])
context = get_context_text(problems[test_qid], args.use_caption)
choice = get_choice_text(problems[test_qid], args.options)
lecture = get_lecture_text(problems[test_qid])
solution = get_solution_text(problems[test_qid])
# answer_text = get_origin_answer(problems[test_qid], args.options)
answer_option = get_answer(problems[test_qid], args.options)
answer = "(" + answer_option + ")"
test_example, target = create_one_example(args.prompt_format,
question,
context,
choice,
answer,
lecture,
solution,
test_example=False,WithOutput = True, curr_le_data=curr_le_data)
examples.append(test_example)
target = target.replace("Answer:", "").strip()
# create the prompt input
prompt_input = '\n\n'.join(examples)
return prompt_input, target
@dataclass(frozen=True)
class InputFeatures:
"""
A single set of features of data.
Property names are the same names as the corresponding inputs to a model.
"""
input_ids: List[List[int]]
attention_mask: Optional[List[List[int]]]
token_type_ids: Optional[List[List[int]]]
le_input_ids: List[List[int]]
le_attention_mask: Optional[List[List[int]]]
le_token_type_ids: Optional[List[List[int]]]
label: Optional[int] | EXA-1-master | exa/models/mm-cot-main/utils_prompt.py |
'''
Adapted from https://github.com/lupantech/ScienceQA
'''
import re
from rouge import Rouge
from nltk.translate.bleu_score import sentence_bleu
from sentence_transformers import util
########################
## BLEU
########################
def tokenize(text):
tokens = re.split(r'\s|\.', text)
tokens = [t for t in tokens if len(t) > 0]
return tokens
def bleu_score(reference, hypothesis, gram):
reference_tokens = tokenize(reference)
hypothesis_tokens = tokenize(hypothesis)
if gram == 1:
bleu = sentence_bleu([reference_tokens], hypothesis_tokens, (1., )) # BELU-1
elif gram == 2:
bleu = sentence_bleu([reference_tokens], hypothesis_tokens, (1. / 2., 1. / 2.)) # BELU-2
elif gram == 3:
bleu = sentence_bleu([reference_tokens], hypothesis_tokens, (1. / 3., 1. / 3., 1. / 3.)) # BELU-3
elif gram == 4:
bleu = sentence_bleu([reference_tokens], hypothesis_tokens, (1. / 4., 1. / 4., 1. / 4., 1. / 4.)) # BELU-4
return bleu
def caculate_bleu(results, data, gram):
bleus = []
for qid, output in results.items():
prediction = output
target = data[qid]
target = target.strip()
if target == "":
continue
bleu = bleu_score(target, prediction, gram)
bleus.append(bleu)
avg_bleu = sum(bleus) / len(bleus)
return avg_bleu
########################
## Rouge-L
########################
def score_rouge(str1, str2):
rouge = Rouge(metrics=["rouge-l"])
scores = rouge.get_scores(str1, str2, avg=True)
rouge_l = scores['rouge-l']['f']
return rouge_l
def caculate_rouge(results, data):
rouges = []
for qid, output in results.items():
prediction = output
target = data[qid]
target = target.strip()
if prediction == "":
continue
if target == "":
continue
rouge = score_rouge(target, prediction)
rouges.append(rouge)
avg_rouge = sum(rouges) / len(rouges)
return avg_rouge
########################
## Sentence Similarity
########################
def similariry_score(str1, str2, model):
# compute embedding for both lists
embedding_1 = model.encode(str1, convert_to_tensor=True)
embedding_2 = model.encode(str2, convert_to_tensor=True)
score = util.pytorch_cos_sim(embedding_1, embedding_2).item()
return score
def caculate_similariry(results, data, model):
scores = []
for qid, output in results.items():
prediction = output
target = data[qid]
target = target.strip()
score = similariry_score(target, prediction, model)
scores.append(score)
avg_score = sum(scores) / len(scores)
return avg_score
| EXA-1-master | exa/models/mm-cot-main/evaluations.py |
'''
Adapted from https://github.com/huggingface/transformers
'''
from transformers import T5Config, T5ForConditionalGeneration
from transformers.models.t5.modeling_t5 import T5Stack, __HEAD_MASK_WARNING_MSG, T5EncoderModel
import copy
import math
import os
import warnings
from typing import Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from transformers.modeling_outputs import (
BaseModelOutput,
Seq2SeqLMOutput,
)
class T5ForMultimodalGeneration(T5ForConditionalGeneration):
_keys_to_ignore_on_load_missing = [
r"encoder.embed_tokens.weight",
r"decoder.embed_tokens.weight",
r"lm_head.weight",
]
_keys_to_ignore_on_load_unexpected = [
r"decoder.block.0.layer.1.EncDecAttention.relative_attention_bias.weight",
]
def __init__(self, config: T5Config, patch_size, padding_idx, save_dir):
super().__init__(config)
self.model_dim = config.d_model
self.padding_idx = padding_idx
self.out = open(os.path.join(save_dir, 'gate.txt'), 'w')
self.shared = nn.Embedding(config.vocab_size, config.d_model)
self.patch_num, self.patch_dim = patch_size
self.image_dense = nn.Linear(self.patch_dim, config.d_model)
self.mha_layer = torch.nn.MultiheadAttention(embed_dim=config.hidden_size, kdim=config.hidden_size, vdim=config.hidden_size, num_heads=1, batch_first=True)
self.gate_dense = nn.Linear(2*config.hidden_size, config.hidden_size)
self.sigmoid = nn.Sigmoid()
encoder_config = copy.deepcopy(config)
encoder_config.is_decoder = False
encoder_config.use_cache = False
encoder_config.is_encoder_decoder = False
self.encoder = T5Stack(encoder_config, self.shared)
decoder_config = copy.deepcopy(config)
decoder_config.is_decoder = True
decoder_config.is_encoder_decoder = False
decoder_config.num_layers = config.num_decoder_layers
self.decoder = T5Stack(decoder_config, self.shared)
self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
# Model parallel
self.model_parallel = False
self.device_map = None
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
image_ids=None,
attention_mask: Optional[torch.FloatTensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.BoolTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
decoder_head_mask: Optional[torch.FloatTensor] = None,
cross_attn_head_mask: Optional[torch.Tensor] = None,
encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None,
past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]:
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask
if head_mask is not None and decoder_head_mask is None:
if self.config.num_layers == self.config.num_decoder_layers:
warnings.warn(__HEAD_MASK_WARNING_MSG, FutureWarning)
decoder_head_mask = head_mask
# Encode if needed (training, first prediction pass)
if encoder_outputs is None:
# Convert encoder inputs in embeddings if needed
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
hidden_states = encoder_outputs[0]
image_embedding = self.image_dense(image_ids)
image_att, _ = self.mha_layer(hidden_states, image_embedding, image_embedding)
merge = torch.cat([hidden_states, image_att], dim=-1)
gate = self.sigmoid(self.gate_dense(merge))
hidden_states = (1 - gate) * hidden_states + gate * image_att
if self.model_parallel:
torch.cuda.set_device(self.decoder.first_device)
if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:
# get decoder inputs from shifting lm labels to the right
decoder_input_ids = self._shift_right(labels)
# Set device for model parallelism
if self.model_parallel:
torch.cuda.set_device(self.decoder.first_device)
hidden_states = hidden_states.to(self.decoder.first_device)
if decoder_input_ids is not None:
decoder_input_ids = decoder_input_ids.to(self.decoder.first_device)
if attention_mask is not None:
attention_mask = attention_mask.to(self.decoder.first_device)
if decoder_attention_mask is not None:
decoder_attention_mask = decoder_attention_mask.to(self.decoder.first_device)
# Decode
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
inputs_embeds=decoder_inputs_embeds,
past_key_values=past_key_values,
encoder_hidden_states=hidden_states,
encoder_attention_mask=attention_mask,
head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = decoder_outputs[0]
# Set device for model parallelism
if self.model_parallel:
torch.cuda.set_device(self.encoder.first_device)
self.lm_head = self.lm_head.to(self.encoder.first_device)
sequence_output = sequence_output.to(self.lm_head.weight.device)
if self.config.tie_word_embeddings:
# Rescale output before projecting on vocab
# See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586
sequence_output = sequence_output * (self.model_dim**-0.5)
lm_logits = self.lm_head(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-100)
loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1))
# TODO(thom): Add z_loss https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666
if not return_dict:
output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs
return ((loss,) + output) if loss is not None else output
return Seq2SeqLMOutput(
loss=loss,
logits=lm_logits,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
) | EXA-1-master | exa/models/mm-cot-main/model.py |
import os
from torch.utils.data import Dataset
import os
import json
import numpy as np
import torch
from utils_prompt import *
img_shape = {
"resnet": (512, 2048),
"clip": (49, 2048),
"detr": (100, 256),
}
def load_data_std(args):
problems = json.load(open(os.path.join(args.data_root, 'scienceqa/problems.json')))
pid_splits = json.load(open(os.path.join(args.data_root, 'scienceqa/pid_splits.json')))
captions = json.load(open(args.caption_file))["captions"]
for qid in problems:
problems[qid]['caption'] = captions[qid] if qid in captions else ""
train_qids = pid_splits['%s' % (args.train_split)]
val_qids = pid_splits['%s' % (args.val_split)]
test_qids = pid_splits['%s' % (args.test_split)]
print(f"number of train problems: {len(train_qids)}\n")
print(f"number of val problems: {len(val_qids)}\n")
print(f"number of test problems: {len(test_qids)}\n")
qids = {'train': train_qids, 'val':val_qids,'test':test_qids}
return problems, qids,
def load_data_img(args):
problems = json.load(open(os.path.join(args.data_root, 'scienceqa/problems.json')))
pid_splits = json.load(open(os.path.join(args.data_root, 'scienceqa/pid_splits.json')))
captions = json.load(open(args.caption_file))["captions"]
name_maps = json.load(open('vision_features/name_map.json'))
# check
if args.img_type == "resnet":
image_features = np.load('vision_features/resnet.npy')
image_features = np.expand_dims(image_features, axis=1)
image_features = image_features.repeat(512, axis=1)
elif args.img_type == "clip":
image_features = np.load('vision_features/clip.npy')
elif args.img_type == "detr":
image_features = np.load('vision_features/detr.npy')
else:
image_features = np.load('vision_features/detr.npy')
print("img_features size: ", image_features.shape)
for qid in problems:
problems[qid]['caption'] = captions[qid] if qid in captions else ""
train_qids = pid_splits['%s' % (args.train_split)]
val_qids = pid_splits['%s' % (args.val_split)]
test_qids = pid_splits['%s' % (args.test_split)]
print(f"number of train problems: {len(train_qids)}\n")
print(f"number of val problems: {len(val_qids)}\n")
print(f"number of test problems: {len(test_qids)}\n")
qids = {'train': train_qids, 'val':val_qids,'test':test_qids}
return problems, qids, name_maps, image_features
class ScienceQADatasetStd(Dataset):
"""
Creating a custom dataset for reading the dataset and
loading it into the dataloader to pass it to the
neural network for finetuning the model
"""
def __init__(
self, problems, qids, tokenizer, source_len, target_len, args, test_le=None
):
self.tokenizer = tokenizer
self.data = {qid : problems[qid] for qid in qids}
self.source_len = source_len
self.summ_len = target_len
self.target_text = []
self.source_text = []
if test_le is not None:
test_le_data =json.load(open(test_le))["preds"]
else:
test_le_data = None
idx = 0
for qid in self.data:
if test_le_data is not None:
curr_le_data = test_le_data[idx]
idx += 1
else:
curr_le_data = None
prompt, target = build_train_pair(problems, qid, args, curr_le_data)
self.target_text.append(target)
self.source_text.append(prompt)
def __len__(self):
return len(self.target_text)
def __getitem__(self, index):
source_text = str(self.source_text[index])
target_text = str(self.target_text[index])
# cleaning data so as to ensure data is in string type
source_text = " ".join(source_text.split())
target_text = " ".join(target_text.split())
source = self.tokenizer.batch_encode_plus(
[source_text],
max_length=self.source_len,
pad_to_max_length=True,
truncation=True,
padding="max_length",
return_tensors="pt",
)
target = self.tokenizer.batch_encode_plus(
[target_text],
max_length=self.summ_len,
pad_to_max_length=True,
truncation=True,
padding="max_length",
return_tensors="pt",
)
source_ids = source["input_ids"].squeeze()
source_mask = source["attention_mask"].squeeze()
target_ids = target["input_ids"].squeeze().tolist()
return {
"input_ids": source_ids,
"attention_mask": source_mask,
"labels": target_ids,
}
class ScienceQADatasetImg(Dataset):
"""
Creating a custom dataset for reading the dataset and
loading it into the dataloader to pass it to the
neural network for finetuning the model
"""
def __init__(
self, problems, qids, name_maps, tokenizer, source_len, target_len, args, image_features, test_le=None
):
"""
Initializes a Dataset class
Args:
dataframe (pandas.DataFrame): Input dataframe
tokenizer (transformers.tokenizer): Transformers tokenizer
source_len (int): Max length of source text
target_len (int): Max length of target text
source_text (str): column name of source text
target_text (str): column name of target text
"""
self.tokenizer = tokenizer
self.data = {qid : problems[qid] for qid in qids}
self.source_len = source_len
self.summ_len = target_len
self.target_text = []
self.source_text = []
self.image_ids = []
if test_le is not None:
test_le_data =json.load(open(test_le))["preds"]
else:
test_le_data = None
idx = 0
for qid in self.data:
if test_le_data is not None:
curr_le_data = test_le_data[idx]
idx += 1
else:
curr_le_data = None
prompt, target = build_train_pair(problems, qid, args, curr_le_data)
self.target_text.append(target)
self.source_text.append(prompt)
if str(qid) in name_maps:
i_vectors = image_features[int(name_maps[str(qid)])]
self.image_ids.append(i_vectors)
else:
shape = img_shape[args.img_type]
self.image_ids.append(np.zeros(shape))
def __len__(self):
"""returns the length of dataframe"""
return len(self.target_text)
def __getitem__(self, index):
"""return the input ids, attention masks and target ids"""
source_text = str(self.source_text[index])
target_text = str(self.target_text[index])
image_ids = self.image_ids[index]
# cleaning data so as to ensure data is in string type
source_text = " ".join(source_text.split())
target_text = " ".join(target_text.split())
source = self.tokenizer.batch_encode_plus(
[source_text],
max_length=self.source_len,
pad_to_max_length=True,
truncation=True,
padding="max_length",
return_tensors="pt",
)
target = self.tokenizer.batch_encode_plus(
[target_text],
max_length=self.summ_len,
pad_to_max_length=True,
truncation=True,
padding="max_length",
return_tensors="pt",
)
source_ids = source["input_ids"].squeeze()
source_mask = source["attention_mask"].squeeze()
target_ids = target["input_ids"].squeeze().tolist()
image_ids = torch.tensor(image_ids).squeeze()
return {
"input_ids": source_ids,
"attention_mask": source_mask,
"image_ids": image_ids,
"labels": target_ids,
}
| EXA-1-master | exa/models/mm-cot-main/utils_data.py |
'''
Adapted from https://github.com/lupantech/ScienceQA
'''
import os
import json
import argparse
import warnings
import pandas as pd
from sentence_transformers import SentenceTransformer
from evaluations import caculate_bleu, caculate_rouge, caculate_similariry
warnings.filterwarnings('ignore')
def get_acc_with_contion(res_pd, key, values):
if isinstance(values, list):
total_pd = res_pd[res_pd[key].isin(values)]
else:
total_pd = res_pd[res_pd[key] == values]
correct_pd = total_pd[total_pd['true_false'] == True]
acc = "{:.2f}".format(len(correct_pd) / len(total_pd) * 100)
return acc
def get_scores(result_data, rationale_data, results_reference, data_file):
# read result file
results = result_data
num = len(results)
assert num == 4241
#print("number of questions:", num)
# read data file
sqa_data = json.load(open(data_file))
# construct pandas data
sqa_pd = pd.DataFrame(sqa_data).T
res_pd = sqa_pd[sqa_pd['split'] == 'test'] # test set
# update data
for index, row in res_pd.iterrows():
res_pd.loc[index, 'no_context'] = True if (not row['hint'] and not row['image']) else False
res_pd.loc[index, 'has_text'] = True if row['hint'] else False
res_pd.loc[index, 'has_image'] = True if row['image'] else False
res_pd.loc[index, 'has_text_image'] = True if (row['hint'] and row['image']) else False
label = row['answer']
pred = int(results[index])
res_pd.loc[index, 'pred'] = pred
res_pd.loc[index, 'true_false'] = (label == pred)
# accuracy scores
acc_average = len(res_pd[res_pd['true_false'] == True]) / num * 100
#assert result_file.split('_')[-1] == "{:.3f}.json".format(acc_average)
# rationale quality
## BLEU
bleu1 = caculate_bleu(rationale_data, results_reference, gram=1)
bleu4 = caculate_bleu(rationale_data, results_reference, gram=4)
## Rouge-L
rouge = caculate_rouge(rationale_data, results_reference)
## Similarity
model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2').cuda()
similariry = caculate_similariry(rationale_data, results_reference, model)
scores = {
"answer":{
'acc_natural':
get_acc_with_contion(res_pd, 'subject', 'natural science'),
'acc_social':
get_acc_with_contion(res_pd, 'subject', 'social science'),
'acc_language':
get_acc_with_contion(res_pd, 'subject', 'language science'),
'acc_has_text':
get_acc_with_contion(res_pd, 'has_text', True),
'acc_has_image':
get_acc_with_contion(res_pd, 'has_image', True),
'acc_no_context':
get_acc_with_contion(res_pd, 'no_context', True),
'acc_grade_1_6':
get_acc_with_contion(res_pd, 'grade', ['grade1', 'grade2', 'grade3', 'grade4', 'grade5', 'grade6']),
'acc_grade_7_12':
get_acc_with_contion(res_pd, 'grade', ['grade7', 'grade8', 'grade9', 'grade10', 'grade11', 'grade12']),
'acc_average':
"{:.2f}".format(acc_average),
},
"rationale":{
'bleu1': bleu1 * 100,
'bleu4': bleu4 * 100,
'rouge': rouge * 100,
'similariry': similariry * 100,
}
}
return scores
def print_scores(scores):
latex_output = ""
for key, score in scores.items():
print(f"{key[4:]}: \t{score}")
latex_output += f"& {score} "
latex_output += "\\\\"
print(latex_output)
| EXA-1-master | exa/models/mm-cot-main/utils_evaluate.py |
import os
import numpy as np
import torch
import os
import re
import json
import argparse
import random
from transformers import T5Tokenizer, DataCollatorForSeq2Seq, Seq2SeqTrainingArguments, Seq2SeqTrainer, T5ForConditionalGeneration
from model import T5ForConditionalGeneration, T5ForMultimodalGeneration
from utils_data import img_shape, load_data_std, load_data_img, ScienceQADatasetStd, ScienceQADatasetImg
from utils_prompt import *
from utils_evaluate import get_scores
from rich.table import Column, Table
from rich import box
from rich.console import Console
console = Console(record=True)
from torch import cuda
import nltk
import evaluate
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--data_root', type=str, default='data')
parser.add_argument('--output_dir', type=str, default='experiments')
parser.add_argument('--model', type=str, default='allenai/unifiedqa-t5-base')
parser.add_argument('--options', type=list, default=["A", "B", "C", "D", "E"])
parser.add_argument('--epoch', type=int, default=20)
parser.add_argument('--lr', type=float, default=5e-5)
parser.add_argument('--bs', type=int, default=16)
parser.add_argument('--input_len', type=int, default=512)
parser.add_argument('--output_len', type=int, default=64)
parser.add_argument('--eval_bs', type=int, default=16)
parser.add_argument('--eval_acc', type=int, default=None, help='evaluate accumulation step')
parser.add_argument('--train_split', type=str, default='train', choices=['train', 'trainval', 'minitrain'])
parser.add_argument('--val_split', type=str, default='val', choices=['test', 'val', 'minival'])
parser.add_argument('--test_split', type=str, default='test', choices=['test', 'minitest'])
parser.add_argument('--use_generate', action='store_true', help='only for baseline to improve inference speed')
parser.add_argument('--final_eval', action='store_true', help='only evaluate the model at the final epoch')
parser.add_argument('--user_msg', type=str, default="baseline", help='experiment type in the save_dir')
parser.add_argument('--img_type', type=str, default=None, choices=['detr', 'clip', 'resnet'], help='type of image features')
parser.add_argument('--eval_le', type=str, default=None, help='generated rationale for the dev set')
parser.add_argument('--test_le', type=str, default=None, help='generated rationale for the test set')
parser.add_argument('--evaluate_dir', type=str, default=None, help='the directory of model for evaluation')
parser.add_argument('--caption_file', type=str, default='data/captions.json')
parser.add_argument('--use_caption', action='store_true', help='use image captions or not')
parser.add_argument('--prompt_format', type=str, default='QCM-A', help='prompt format template',
choices=['QCM-A', 'QCM-LE', 'QCMG-A', 'QCM-LEA', 'QCM-ALE'])
parser.add_argument('--seed', type=int, default=42, help='random seed')
args = parser.parse_args()
return args
def T5Trainer(
dataframe, args,
):
torch.manual_seed(args.seed) # pytorch random seed
np.random.seed(args.seed) # numpy random seed
torch.backends.cudnn.deterministic = True
if args.evaluate_dir is not None:
args.model = args.evaluate_dir
tokenizer = T5Tokenizer.from_pretrained(args.model)
console.log(f"""[Model]: Loading {args.model}...\n""")
console.log(f"[Data]: Reading data...\n")
problems = dataframe['problems']
qids = dataframe['qids']
train_qids = qids['train']
test_qids = qids['test']
val_qids = qids['val']
if args.evaluate_dir is not None:
save_dir = args.evaluate_dir
else:
model_name = args.model.replace("/","-")
gpu_count = torch.cuda.device_count()
save_dir = f"{args.output_dir}/{args.user_msg}_{model_name}_{args.img_type}_{args.prompt_format}_lr{args.lr}_bs{args.bs * gpu_count}_op{args.output_len}_ep{args.epoch}"
if not os.path.exists(save_dir):
os.mkdir(save_dir)
padding_idx = tokenizer._convert_token_to_id(tokenizer.pad_token)
if args.img_type is not None:
patch_size = img_shape[args.img_type]
model = T5ForMultimodalGeneration.from_pretrained(args.model, patch_size=patch_size, padding_idx=padding_idx, save_dir=save_dir)
name_maps = dataframe['name_maps']
image_features = dataframe['image_features']
train_set = ScienceQADatasetImg(
problems,
train_qids,
name_maps,
tokenizer,
args.input_len,
args.output_len,
args,
image_features,
)
eval_set = ScienceQADatasetImg(
problems,
val_qids,
name_maps,
tokenizer,
args.input_len,
args.output_len,
args,
image_features,
args.eval_le,
)
test_set = ScienceQADatasetImg(
problems,
test_qids,
name_maps,
tokenizer,
args.input_len,
args.output_len,
args,
image_features,
args.test_le,
)
else:
model = T5ForConditionalGeneration.from_pretrained(args.model)
train_set = ScienceQADatasetStd(
problems,
train_qids,
tokenizer,
args.input_len,
args.output_len,
args,
)
eval_set = ScienceQADatasetStd(
problems,
val_qids,
tokenizer,
args.input_len,
args.output_len,
args,
args.eval_le,
)
test_set = ScienceQADatasetStd(
problems,
test_qids,
tokenizer,
args.input_len,
args.output_len,
args,
args.test_le,
)
datacollator = DataCollatorForSeq2Seq(tokenizer)
print("model parameters: ", model.num_parameters())
def extract_ans(ans):
pattern = re.compile(r'The answer is \(([A-Z])\)')
res = pattern.findall(ans)
if len(res) == 1:
answer = res[0] # 'A', 'B', ...
else:
answer = "FAILED"
return answer
# accuracy for answer inference
def compute_metrics_acc(eval_preds):
if args.use_generate:
preds, targets = eval_preds
if isinstance(preds, tuple):
preds = preds[0]
else:
preds = eval_preds.predictions[0]
targets = eval_preds.label_ids
preds = preds.argmax(axis=2)
preds = tokenizer.batch_decode(preds, skip_special_tokens=True, clean_up_tokenization_spaces=True)
targets = tokenizer.batch_decode(targets, skip_special_tokens=True, clean_up_tokenization_spaces=True)
correct = 0
assert len(preds) == len(targets)
for idx, pred in enumerate(preds):
reference = targets[idx]
reference = extract_ans(reference)
extract_pred = extract_ans(pred)
best_option = extract_pred
if reference == best_option:
correct +=1
return {'accuracy': 1.0*correct/len(targets)}
# rougel for rationale generation
metric = evaluate.load("rouge")
def postprocess_text(preds, labels):
preds = [pred.strip() for pred in preds]
labels = [label.strip() for label in labels]
preds = ["\n".join(nltk.sent_tokenize(pred)) for pred in preds]
labels = ["\n".join(nltk.sent_tokenize(label)) for label in labels]
return preds, labels
def compute_metrics_rougel(eval_preds):
if args.use_generate:
preds, targets = eval_preds
if isinstance(preds, tuple):
preds = preds[0]
else:
preds = eval_preds.predictions[0]
targets = eval_preds.label_ids
preds = preds.argmax(axis=2)
preds = tokenizer.batch_decode(preds, skip_special_tokens=True, clean_up_tokenization_spaces=True)
targets = tokenizer.batch_decode(targets, skip_special_tokens=True, clean_up_tokenization_spaces=True)
decoded_preds, decoded_labels = postprocess_text(preds, targets)
result = metric.compute(predictions=decoded_preds, references=decoded_labels, use_stemmer=True)
result = {k: round(v * 100, 4) for k, v in result.items()}
prediction_lens = [np.count_nonzero(pred != tokenizer.pad_token_id) for pred in preds]
result["gen_len"] = np.mean(prediction_lens)
return result
# only use the last model for evaluation to save time
if args.final_eval:
training_args = Seq2SeqTrainingArguments(
save_dir,
do_train=True if args.evaluate_dir is None else False,
do_eval=False,
evaluation_strategy="no",
logging_strategy="steps",
save_strategy="epoch",
save_total_limit = 2,
learning_rate= args.lr,
eval_accumulation_steps=args.eval_acc,
per_device_train_batch_size=args.bs,
per_device_eval_batch_size=args.eval_bs,
weight_decay=0.01,
num_train_epochs=args.epoch,
predict_with_generate=args.use_generate,
report_to="none",
)
# evaluate at each epoch
else:
training_args = Seq2SeqTrainingArguments(
save_dir,
do_train=True if args.evaluate_dir is None else False,
do_eval=True,
evaluation_strategy="epoch",
logging_strategy="steps",
save_strategy="epoch",
save_total_limit = 2,
learning_rate= args.lr,
eval_accumulation_steps=args.eval_acc,
per_device_train_batch_size=args.bs,
per_device_eval_batch_size=args.eval_bs,
weight_decay=0.01,
num_train_epochs=args.epoch,
metric_for_best_model="accuracy" if args.prompt_format != "QCM-LE" else "rougeL",
predict_with_generate=args.use_generate,
load_best_model_at_end=True,
report_to="none",
)
trainer = Seq2SeqTrainer(
model=model,
args=training_args,
train_dataset=train_set,
eval_dataset=eval_set,
data_collator=datacollator,
tokenizer=tokenizer,
compute_metrics = compute_metrics_acc if args.prompt_format != "QCM-LE" else compute_metrics_rougel
)
if args.evaluate_dir is None:
trainer.train()
trainer.save_model(save_dir)
metrics = trainer.evaluate(eval_dataset = test_set)
trainer.log_metrics("test", metrics)
trainer.save_metrics("test", metrics)
predict_results = trainer.predict(test_dataset=test_set, max_length=args.output_len)
if trainer.is_world_process_zero():
if args.use_generate:
preds, targets = predict_results.predictions, predict_results.label_ids
else:
preds = predict_results.predictions[0]
targets = predict_results.label_ids
preds = preds.argmax(axis=2)
preds = tokenizer.batch_decode(
preds, skip_special_tokens=True, clean_up_tokenization_spaces=True
)
targets = tokenizer.batch_decode(
targets, skip_special_tokens=True, clean_up_tokenization_spaces=True
)
results_ans = {}
results_rationale = {}
results_reference = {}
num_fail = 0
for idx, qid in enumerate(test_qids):
pred = preds[int(idx)]
ref = targets[int(idx)]
extract_pred = extract_ans(pred)
if extract_pred != "FAILED":
if extract_pred in args.options:
extract_pred = args.options.index(extract_pred)
else:
extract_pred = random.choice(range(0,len(args.options)))
else:
num_fail += 1
extract_pred = random.choice(range(len(args.options))) # random choose one option
results_ans[str(qid)] = extract_pred
results_rationale[str(qid)] = pred
results_reference[str(qid)] = ref
scores = get_scores(results_ans, results_rationale, results_reference, os.path.join(args.data_root, "scienceqa/problems.json"))
preds = [pred.strip() for pred in preds]
output_data = {
"num_fail": num_fail,
"scores": scores,
"preds": preds,
"labels": targets}
output_prediction_file = os.path.join(save_dir,"predictions_ans_test.json")
with open(output_prediction_file, "w") as writer:
writer.write(json.dumps(output_data, indent=4))
# generate the rationale for the eval set
if args.prompt_format == "QCM-LE":
torch.cuda.empty_cache()
del predict_results, preds, targets
predict_results = trainer.predict(test_dataset=eval_set, max_length=args.output_len)
if trainer.is_world_process_zero():
if args.use_generate:
preds, targets = predict_results.predictions, predict_results.label_ids
else:
preds = predict_results.predictions[0]
targets = predict_results.label_ids
preds = preds.argmax(axis=2)
preds = tokenizer.batch_decode(
preds, skip_special_tokens=True, clean_up_tokenization_spaces=True
)
targets = tokenizer.batch_decode(
targets, skip_special_tokens=True, clean_up_tokenization_spaces=True
)
preds = [pred.strip() for pred in preds]
output_data = {"preds": preds,
"labels": targets}
output_prediction_file = os.path.join(save_dir,"predictions_ans_eval.json")
with open(output_prediction_file, "w") as writer:
writer.write(json.dumps(output_data, indent=4))
if __name__ == '__main__':
# training logger to log training progress
training_logger = Table(
Column("Epoch", justify="center"),
Column("Steps", justify="center"),
Column("Loss", justify="center"),
title="Training Status",
pad_edge=False,
box=box.ASCII,
)
args = parse_args()
print("args",args)
print('====Input Arguments====')
print(json.dumps(vars(args), indent=2, sort_keys=False))
random.seed(args.seed)
if not os.path.exists(args.output_dir):
os.mkdir(args.output_dir)
if args.img_type is not None:
problems, qids, name_maps, image_features = load_data_img(args) # probelms, test question ids, shot example ids
dataframe = {'problems':problems, 'qids':qids, 'name_maps': name_maps, 'image_features': image_features}
else:
problems, qids = load_data_std(args) # probelms, test question ids, shot example ids
dataframe = {'problems':problems, 'qids':qids}
T5Trainer(
dataframe=dataframe,
args = args
)
| EXA-1-master | exa/models/mm-cot-main/main.py |
import os
import copy
import pytorch_lightning as pl
from vlmo.config import ex
from vlmo.modules import VLMo
from vlmo.datamodules.multitask_datamodule import MTDataModule
from pytorch_lightning.plugins import environments as pl_env
from pytorch_lightning.utilities.distributed import rank_zero_info
class OMPIClusterEnvironment(pl_env.ClusterEnvironment):
def __init__(self):
super().__init__()
# def creates_children(self) -> bool:
# # return True if the cluster is managed (you don't launch processes yourself)
# assert (
# "OMPI_COMM_WORLD_LOCAL_RANK" in os.environ
# ) # this cluster is managed
# return True
@property
def creates_processes_externally(self):
return True
def world_size(self) -> int:
return int(os.environ["OMPI_COMM_WORLD_SIZE"])
def set_world_size(self, size: int):
pass
def global_rank(self) -> int:
return int(os.environ["OMPI_COMM_WORLD_RANK"])
def set_global_rank(self, rank: int):
pass
def local_rank(self) -> int:
return int(os.environ["OMPI_COMM_WORLD_LOCAL_RANK"])
def node_rank(self) -> int:
if "NODE_RANK" in os.environ:
return int(os.environ["NODE_RANK"])
else:
return 0
def master_address(self) -> str:
return os.environ["MASTER_ADDR"]
def master_port(self) -> int:
return int(os.environ["MASTER_PORT"])
def get_cluster_plugin(num_gpus=1, num_nodes=1):
if num_nodes > 1 or (
num_nodes == 1 and "OMPI_COMM_WORLD_SIZE" in os.environ
):
rank_zero_info("ClusterPlugin: using OMPI Cluster Environment")
return OMPIClusterEnvironment()
if num_gpus >= 1:
rank_zero_info("ClusterPlugin: using Lightning Cluster Environment")
return pl_env.LightningEnvironment()
return None
@ex.automain
def main(_config):
_config = copy.deepcopy(_config)
pl.seed_everything(_config["seed"])
dm = MTDataModule(_config, dist=True)
model = VLMo(_config)
exp_name = f'{_config["exp_name"]}'
os.makedirs(_config["log_dir"], exist_ok=True)
checkpoint_callback = pl.callbacks.ModelCheckpoint(
save_top_k=-1,
verbose=True,
monitor="val/the_metric",
mode="max",
save_last=True,
)
logger = pl.loggers.TensorBoardLogger(
_config["log_dir"],
name=f'{exp_name}_seed{_config["seed"]}_from_{_config["load_path"].split("/")[-1][:-5]}',
)
lr_callback = pl.callbacks.LearningRateMonitor(logging_interval="step")
callbacks = [checkpoint_callback, lr_callback]
num_gpus = (
_config["num_gpus"]
if isinstance(_config["num_gpus"], int)
else len(_config["num_gpus"])
)
grad_steps = _config["batch_size"] // (
_config["per_gpu_batchsize"] * num_gpus * _config["num_nodes"]
)
rank_zero_info("grad_steps: {}".format(grad_steps))
max_steps = _config["max_steps"] if _config["max_steps"] is not None else None
resume_ckpt = None
if _config["resume_during_training"]:
for index in range(100):
ckpt_path = os.path.join(_config["log_dir"], f'{exp_name}_seed{_config["seed"]}_from_{_config["load_path"].split("/")[-1][:-5]}', "version_{}/checkpoints/last.ckpt".format(index))
if os.path.exists(ckpt_path):
resume_ckpt = ckpt_path
rank_zero_info("resume_ckpt: {}".format(resume_ckpt))
cluster_plugin = get_cluster_plugin(
_config["num_gpus"], _config["num_nodes"]
)
plugin_list = [cluster_plugin]
rank_zero_info("plugin_list: {}".format(plugin_list))
if _config["use_sharded_training"]:
rank_zero_info("Using ddp sharded")
distributed_strategy = "ddp_sharded"
else:
distributed_strategy = "ddp"
trainer = pl.Trainer(
gpus=_config["num_gpus"],
num_nodes=_config["num_nodes"],
precision=_config["precision"],
accelerator="gpu",
strategy=distributed_strategy,
benchmark=True,
deterministic=True,
max_epochs=_config["max_epoch"] if max_steps is None else 1000,
max_steps=max_steps,
callbacks=callbacks,
logger=logger,
# prepare_data_per_node=False,
replace_sampler_ddp=False,
accumulate_grad_batches=grad_steps,
log_every_n_steps=10,
flush_logs_every_n_steps=10,
resume_from_checkpoint=resume_ckpt,
weights_summary="top",
fast_dev_run=_config["fast_dev_run"],
val_check_interval=_config["val_check_interval"],
plugins=plugin_list,
)
if _config["loss_names"]["textmlm"] > 0:
for param in model.parameters():
param.requires_grad = False
for name, param in model.named_parameters():
for key in ["text_embeddings", "token_type_embeddings", "mlp_text", "norm2_text", "mlm_score", "relative_position_bias_table", "transformer.norm"]:
if key in name:
param.requires_grad = True
for name, param in model.named_parameters():
rank_zero_info("{}\t{}".format(name, param.requires_grad))
if not _config["test_only"]:
trainer.fit(model, datamodule=dm)
else:
trainer.test(model, datamodule=dm)
| EXA-1-master | exa/models/unilm-master/vlmo/run.py |
from setuptools import setup, find_packages
setup(
name="vlmo",
packages=find_packages(
exclude=[".dfc", ".vscode", "dataset", "notebooks", "result", "scripts"]
),
version="1.0.0",
license="MIT",
description="VLMo: Unified Vision-Language Pre-Training with Mixture-of-Modality-Experts",
author="Wenhui Wang",
author_email="[email protected]",
url="https://github.com/microsoft/unilm/tree/master/vlmo",
keywords=["vision and language pretraining"],
install_requires=["torch", "pytorch_lightning"],
)
| EXA-1-master | exa/models/unilm-master/vlmo/setup.py |
from sacred import Experiment
ex = Experiment("VLMo")
def _loss_names(d):
ret = {
"itm": 0, # image-text matching loss
"itc": 0, # image-text contrastive loss
"mlm": 0, # masked language modeling loss
"textmlm": 0, # text-only masked language modeling
"vqa": 0,
"nlvr2": 0,
"irtr": 0, # retrieval task ft
}
ret.update(d)
return ret
@ex.config
def config():
exp_name = "vlmo"
seed = 1
datasets = ["coco", "vg", "sbu", "gcc"]
loss_names = _loss_names({"itm": 1, "itc": 1, "mlm": 1})
batch_size = 1024 # this is a desired batch size; pl trainer will accumulate gradients when per step batch is smaller.
# Image setting
train_transform_keys = ["square_transform_randaug"]
val_transform_keys = ["square_transform"]
image_size = 224
draw_false_image = 0
image_only = False
text_only = False
# Text Setting
vqav2_label_size = 3129
max_text_len = 40
max_text_len_of_initckpt = 196
tokenizer = "bert-base-uncased"
vocab_size = 30522
whole_word_masking = False
mlm_prob = 0.15
draw_false_text = 0
# Transformer Setting
model_arch = "vlmo_base_patch16"
drop_path_rate = 0.1
# Optimizer Setting
optim_type = "adamw"
learning_rate = 1e-4
weight_decay = 0.01
decay_power = 1
max_epoch = 100
max_steps = 200000
warmup_steps = 0.1
end_lr = 0
lr_mult = 1 # multiply lr for downstream heads
# Downstream Setting
get_recall_metric = False
get_recall_rerank_metric = False
k_test = 32
# PL Trainer Setting
resume_from = None
fast_dev_run = False
val_check_interval = 1.0
test_only = False
use_sharded_training = False
resume_during_training = False
# below params varies with the environment
data_root = ""
log_dir = "result"
per_gpu_batchsize = 4 # you should define this manually with per_gpu_batch_size=#
num_gpus = 1
num_nodes = 1
load_path = ""
num_workers = 8
precision = 16
# ----------------------- language pretraining config -----------------------
@ex.named_config
def task_textmlm_base():
exp_name = "textmlm_base"
datasets = ["wikibk"]
loss_names = _loss_names({"textmlm": 1})
batch_size = 1024
max_text_len = 196
learning_rate = 2e-4
whole_word_masking = True
train_transform_keys = ["square_transform_randaug"]
val_transform_keys = ["square_transform"]
model_arch = "vlmo_base_patch16"
@ex.named_config
def task_textmlm_base_plus():
exp_name = "textmlm_base_plus"
datasets = ["wikibk"]
loss_names = _loss_names({"textmlm": 1})
batch_size = 1024
max_text_len = 196
learning_rate = 2e-4
whole_word_masking = True
train_transform_keys = ["square_transform_randaug"]
val_transform_keys = ["square_transform"]
model_arch = "vlmo_base_plus_patch16"
# ----------------------- vision-language pretraining config -----------------------
# Named configs for "task" which define datasets, loss_names and desired batch_size, warmup_steps, epochs, and exp_name
@ex.named_config
def task_mlm_itm_itc_base():
exp_name = "mlm_itm_itc_base"
datasets = ["coco", "vg", "sbu", "gcc"]
loss_names = _loss_names({"itm": 1, "mlm": 1, "itc": 1})
batch_size = 1024
whole_word_masking = True
learning_rate = 2e-4
train_transform_keys = ["square_transform_randaug"]
val_transform_keys = ["square_transform"]
model_arch = "vlmo_base_patch16"
@ex.named_config
def task_mlm_itm_itc_base_plus():
exp_name = "mlm_itm_itc_base_plus"
datasets = ["coco", "vg", "sbu", "gcc"]
loss_names = _loss_names({"itm": 1, "mlm": 1, "itc": 1})
batch_size = 1024
whole_word_masking = True
learning_rate = 1e-4
train_transform_keys = ["square_transform_randaug"]
val_transform_keys = ["square_transform"]
model_arch = "vlmo_base_plus_patch16"
@ex.named_config
def task_mlm_itm_itc_large():
exp_name = "mlm_itm_itc_large"
datasets = ["coco", "vg", "sbu", "gcc"]
loss_names = _loss_names({"itm": 1, "mlm": 1, "itc": 1})
batch_size = 1024
whole_word_masking = True
learning_rate = 5e-5
train_transform_keys = ["square_transform_randaug"]
val_transform_keys = ["square_transform"]
model_arch = "vit_large_patch16_224"
# ----------------------- NLVR2 fine-tuning configs -----------------------
@ex.named_config
def task_finetune_nlvr2_base():
exp_name = "finetune_nlvr2_base"
datasets = ["nlvr2"]
train_transform_keys = ["square_transform_randaug"]
loss_names = _loss_names({"nlvr2": 1})
batch_size = 128
max_epoch = 10
max_steps = None
warmup_steps = 0.1
learning_rate = 5e-5
val_transform_keys = ["square_transform"]
use_sharded_training=False
model_arch = "vlmo_base_patch16"
@ex.named_config
def task_finetune_nlvr2_base_plus():
exp_name = "finetune_nlvr2_base_plus"
datasets = ["nlvr2"]
train_transform_keys = ["square_transform_randaug"]
loss_names = _loss_names({"nlvr2": 1})
batch_size = 128
max_epoch = 10
max_steps = None
warmup_steps = 0.1
learning_rate = 3e-5
drop_path_rate = 0.2
val_transform_keys = ["square_transform"]
use_sharded_training=False
model_arch = "vlmo_base_plus_patch16"
@ex.named_config
def task_finetune_nlvr2_base_image384():
exp_name = "finetune_nlvr2_base_image384"
datasets = ["nlvr2"]
train_transform_keys = ["square_transform_randaug"]
loss_names = _loss_names({"nlvr2": 1})
batch_size = 128
max_epoch = 10
max_steps = None
warmup_steps = 0.1
learning_rate = 5e-5
val_transform_keys = ["square_transform"]
image_size = 384
use_sharded_training=False
model_arch = "vlmo_base_patch16"
@ex.named_config
def task_finetune_nlvr2_base_plus_image384():
exp_name = "finetune_nlvr2_base_plus_image384"
datasets = ["nlvr2"]
train_transform_keys = ["square_transform_randaug"]
loss_names = _loss_names({"nlvr2": 1})
batch_size = 128
max_epoch = 10
max_steps = None
warmup_steps = 0.1
learning_rate = 3e-5
drop_path_rate = 0.2
val_transform_keys = ["square_transform"]
image_size = 384
use_sharded_training=False
model_arch = "vlmo_base_plus_patch16"
@ex.named_config
def task_finetune_nlvr2_large():
exp_name = "finetune_nlvr2_large"
datasets = ["nlvr2"]
train_transform_keys = ["square_transform_randaug"]
loss_names = _loss_names({"nlvr2": 1})
batch_size = 128
max_epoch = 10
max_steps = None
warmup_steps = 0.1
learning_rate = 3e-5
drop_path_rate = 0.15
val_transform_keys = ["square_transform"]
use_sharded_training=False
model_arch = "vlmo_large_patch16"
@ex.named_config
def task_finetune_nlvr2_large_image384():
exp_name = "finetune_nlvr2_large_image384"
datasets = ["nlvr2"]
train_transform_keys = ["square_transform_randaug"]
loss_names = _loss_names({"nlvr2": 1})
batch_size = 128
max_epoch = 10
max_steps = None
warmup_steps = 0.1
learning_rate = 3e-5
drop_path_rate = 0.15
val_transform_keys = ["square_transform"]
image_size = 384
use_sharded_training=False
model_arch = "vlmo_large_patch16"
# ----------------------- VQAv2 Fine-tuning configs -----------------------
@ex.named_config
def task_finetune_vqa_base_image480():
exp_name = "finetune_vqa_base_image480"
datasets = ["vqa"]
train_transform_keys = ["square_transform_randaug"]
loss_names = _loss_names({"vqa": 1})
batch_size = 128
max_epoch = 10
max_steps = None
warmup_steps = 0.1
learning_rate = 3e-5
drop_path_rate = 0.15
val_transform_keys = ["square_transform"]
lr_mult = 20
image_size = 480
use_sharded_training=False
model_arch = "vlmo_base_patch16"
@ex.named_config
def task_finetune_vqa_base_plus_image480():
exp_name = "finetune_vqa_base_plus_image480"
datasets = ["vqa"]
train_transform_keys = ["square_transform_randaug"]
loss_names = _loss_names({"vqa": 1})
batch_size = 128
max_epoch = 10
max_steps = None
warmup_steps = 0.1
learning_rate = 3e-5
drop_path_rate = 0.15
val_transform_keys = ["square_transform"]
lr_mult = 20
image_size = 480
use_sharded_training=False
model_arch = "vlmo_base_plus_patch16"
@ex.named_config
def task_finetune_vqa_large_image480():
exp_name = "finetune_vqa_large_image480"
datasets = ["vqa"]
train_transform_keys = ["square_transform_randaug"]
loss_names = _loss_names({"vqa": 1})
batch_size = 128
max_epoch = 10
max_steps = None
warmup_steps = 0.1
learning_rate = 1.5e-5
drop_path_rate = 0.15
val_transform_keys = ["square_transform"]
lr_mult = 20
image_size = 480
use_sharded_training=False
model_arch = "vlmo_large_patch16"
# ----------------------- F30K IR/TR Fine-tuning configs -----------------------
@ex.named_config
def task_finetune_irtr_f30k_base():
exp_name = "finetune_irtr_f30k_base"
datasets = ["f30k"]
train_transform_keys = ["square_transform_randaug"]
val_transform_keys = ["square_transform"]
loss_names = _loss_names({"irtr": 1.0})
batch_size = 3072
max_epoch = 50
max_steps = 1500
warmup_steps = 150
get_recall_metric = True
learning_rate = 3e-5
drop_path_rate = 0.15
use_sharded_training=False
model_arch = "vlmo_base_patch16"
@ex.named_config
def task_finetune_irtr_f30k_base_image384():
exp_name = "finetune_irtr_f30k_base_image384"
datasets = ["f30k"]
train_transform_keys = ["square_transform_randaug"]
val_transform_keys = ["square_transform"]
loss_names = _loss_names({"irtr": 1.0})
batch_size = 3072
max_epoch = 50
max_steps = 1500
warmup_steps = 150
get_recall_metric = True
learning_rate = 3e-5
drop_path_rate = 0.15
image_size = 384
use_sharded_training=False
model_arch = "vlmo_base_patch16"
@ex.named_config
def task_finetune_irtr_f30k_base_plus_image384():
exp_name = "finetune_irtr_f30k_base_plus_image384"
datasets = ["f30k"]
train_transform_keys = ["square_transform_randaug"]
val_transform_keys = ["square_transform"]
loss_names = _loss_names({"irtr": 1.0})
batch_size = 3072
max_epoch = 50
max_steps = 1500
warmup_steps = 150
get_recall_metric = True
learning_rate = 3e-5
drop_path_rate = 0.2
image_size = 384
use_sharded_training=False
model_arch = "vlmo_base_plus_patch16"
@ex.named_config
def task_finetune_irtr_f30k_large_image384():
exp_name = "finetune_irtr_f30k_large_image384"
datasets = ["f30k"]
train_transform_keys = ["square_transform_randaug"]
val_transform_keys = ["square_transform"]
loss_names = _loss_names({"irtr": 1.0})
batch_size = 3072
max_epoch = 50
max_steps = 1500
warmup_steps = 150
get_recall_metric = True
learning_rate = 2e-5
drop_path_rate = 0.2
image_size = 384
use_sharded_training=False
model_arch = "vlmo_large_patch16"
# ----------------------- COCO IR/TR Fine-tuning configs -----------------------
@ex.named_config
def task_finetune_irtr_coco_base_image384():
exp_name = "finetune_irtr_coco_base_image384"
datasets = ["coco"]
train_transform_keys = ["square_transform_randaug"]
val_transform_keys = ["square_transform"]
loss_names = _loss_names({"irtr": 1.0})
batch_size = 3072
max_epoch = 50
max_steps = 3000
warmup_steps = 300
get_recall_metric = True
learning_rate = 3e-5
drop_path_rate = 0.2
image_size = 384
use_sharded_training=False
model_arch = "vlmo_base_patch16"
@ex.named_config
def task_finetune_irtr_coco_base_plus_image384():
exp_name = "finetune_irtr_coco_base_plus_image384"
datasets = ["coco"]
train_transform_keys = ["square_transform_randaug"]
val_transform_keys = ["square_transform"]
loss_names = _loss_names({"irtr": 1.0})
batch_size = 3072
max_epoch = 50
max_steps = 3000
warmup_steps = 300
get_recall_metric = True
learning_rate = 3e-5
drop_path_rate = 0.2
image_size = 384
use_sharded_training=False
model_arch = "vlmo_base_plus_patch16"
@ex.named_config
def task_finetune_irtr_coco_large_image384():
exp_name = "finetune_irtr_coco_large_image384"
datasets = ["coco"]
train_transform_keys = ["square_transform_randaug"]
val_transform_keys = ["square_transform"]
loss_names = _loss_names({"irtr": 1.0})
batch_size = 3072
max_epoch = 50
max_steps = 3000
warmup_steps = 300
get_recall_metric = True
learning_rate = 2e-5
drop_path_rate = 0.2
image_size = 384
use_sharded_training=False
model_arch = "vlmo_large_patch16"
# ----------------------- Other configs -----------------------
# Named configs for "etc" which are orthogonal to "env" and "task", need to be added at the end
@ex.named_config
def step1_5k():
max_epoch = 100
warmup_steps = 150
max_steps = 1500
@ex.named_config
def step3k():
max_epoch = 100
warmup_steps = 300
max_steps = 3000
@ex.named_config
def step200k():
max_epoch = 200
warmup_steps = 2500
max_steps = 200000
@ex.named_config
def step500k():
max_epoch = 500
warmup_steps = 2500
max_steps = 500000 | EXA-1-master | exa/models/unilm-master/vlmo/vlmo/config.py |
EXA-1-master | exa/models/unilm-master/vlmo/vlmo/__init__.py |
|
from .base_dataset import BaseDataset
class F30KCaptionKarpathyDataset(BaseDataset):
def __init__(self, *args, split="", **kwargs):
assert split in ["train", "val", "test"]
if split == "train":
names = ["f30k_caption_karpathy_train"]
elif split == "val":
names = ["f30k_caption_karpathy_val"]
elif split == "test":
names = ["f30k_caption_karpathy_test"]
super().__init__(*args, **kwargs, names=names, text_column_name="caption")
def __getitem__(self, index):
return self.get_suite(index)
| EXA-1-master | exa/models/unilm-master/vlmo/vlmo/datasets/f30k_caption_karpathy_dataset.py |
from .base_dataset import BaseDataset
class VisualGenomeCaptionDataset(BaseDataset):
def __init__(self, *args, split="", **kwargs):
assert split in ["train", "val", "test"]
if split == "test":
split = "val"
if split == "train":
names = ["vg"]
elif split == "val":
names = []
super().__init__(*args, **kwargs, names=names, text_column_name="caption")
def __getitem__(self, index):
return self.get_suite(index)
| EXA-1-master | exa/models/unilm-master/vlmo/vlmo/datasets/vg_caption_dataset.py |
import random
import torch
import io
import pyarrow as pa
import os
from PIL import Image
from vlmo.transforms import keys_to_transforms
class BaseDataset(torch.utils.data.Dataset):
def __init__(
self,
data_dir: str,
transform_keys: list,
image_size: int,
names: list,
text_column_name: str = "",
remove_duplicate=False,
max_text_len=40,
draw_false_image=0,
draw_false_text=0,
image_only=False,
):
"""
data_dir : where dataset file *.arrow lives; existence should be guaranteed via DataModule.prepare_data
transform_keys : keys for generating augmented views of images
text_column_name : pyarrow table column name that has list of strings as elements
"""
assert len(transform_keys) >= 1
super().__init__()
self.transforms = keys_to_transforms(transform_keys, size=image_size)
self.text_column_name = text_column_name
self.names = names
self.max_text_len = max_text_len
self.draw_false_image = draw_false_image
self.draw_false_text = draw_false_text
self.image_only = image_only
self.data_dir = data_dir
if len(names) != 0:
tables = [
pa.ipc.RecordBatchFileReader(
pa.memory_map(f"{data_dir}/{name}.arrow", "r")
).read_all()
for name in names
if os.path.isfile(f"{data_dir}/{name}.arrow")
]
self.table_names = list()
for i, name in enumerate(names):
self.table_names += [name] * len(tables[i])
self.table = pa.concat_tables(tables, promote=True)
if text_column_name != "":
self.text_column_name = text_column_name
self.all_texts = self.table[text_column_name].to_pandas().tolist()
self.all_texts = (
[list(set(texts)) for texts in self.all_texts]
if remove_duplicate
else self.all_texts
)
else:
self.all_texts = list()
else:
self.all_texts = list()
self.index_mapper = dict()
if text_column_name != "" and not self.image_only:
j = 0
for i, texts in enumerate(self.all_texts):
for _j in range(len(texts)):
self.index_mapper[j] = (i, _j)
j += 1
else:
for i in range(len(self.table)):
self.index_mapper[i] = (i, None)
@property
def corpus(self):
return [text for texts in self.all_texts for text in texts]
def __len__(self):
return len(self.index_mapper)
def get_raw_image(self, index, image_key="image"):
index, caption_index = self.index_mapper[index]
image_bytes = io.BytesIO(self.table[image_key][index].as_py())
image_bytes.seek(0)
return Image.open(image_bytes).convert("RGB")
def get_image(self, index, image_key="image"):
image = self.get_raw_image(index, image_key=image_key)
image_tensor = [tr(image) for tr in self.transforms]
return {
"image": image_tensor,
"img_index": self.index_mapper[index][0],
"cap_index": self.index_mapper[index][1],
"raw_index": index,
}
def get_false_image(self, rep, image_key="image"):
random_index = random.randint(0, len(self.index_mapper) - 1)
image = self.get_raw_image(random_index, image_key=image_key)
image_tensor = [tr(image) for tr in self.transforms]
return {f"false_image_{rep}": image_tensor}
def get_text(self, raw_index):
index, caption_index = self.index_mapper[raw_index]
text = self.all_texts[index][caption_index]
encoding = self.tokenizer(
text,
padding="max_length",
truncation=True,
max_length=self.max_text_len,
return_special_tokens_mask=True,
)
return {
"text": (text, encoding),
"img_index": index,
"cap_index": caption_index,
"raw_index": raw_index,
}
def get_false_text(self, rep):
random_index = random.randint(0, len(self.index_mapper) - 1)
index, caption_index = self.index_mapper[random_index]
text = self.all_texts[index][caption_index]
encoding = self.tokenizer(
text,
truncation=True,
max_length=self.max_text_len,
return_special_tokens_mask=True,
)
return {f"false_text_{rep}": (text, encoding)}
def get_suite(self, index):
result = None
while result is None:
try:
ret = dict()
ret.update(self.get_image(index))
if not self.image_only:
txt = self.get_text(index)
ret.update({"replica": True if txt["cap_index"] > 0 else False})
ret.update(txt)
for i in range(self.draw_false_image):
ret.update(self.get_false_image(i))
for i in range(self.draw_false_text):
ret.update(self.get_false_text(i))
result = True
except Exception as e:
print(f"Error while read file idx {index} in {self.names[0]} -> {e}")
index = random.randint(0, len(self.index_mapper) - 1)
return ret
def get_text_suite(self, index):
result = None
while result is None:
try:
ret = dict()
txt = self.get_text(index)
ret.update({"replica": True if txt["cap_index"] > 0 else False})
ret.update(txt)
result = True
except Exception as e:
print(f"Error while read file idx {index} in {self.names[0]} -> {e}")
index = random.randint(0, len(self.index_mapper) - 1)
return ret
def collate(self, batch, mlm_collator):
batch_size = len(batch)
keys = set([key for b in batch for key in b.keys()])
dict_batch = {k: [dic[k] if k in dic else None for dic in batch] for k in keys}
img_keys = [k for k in list(dict_batch.keys()) if "image" in k]
for img_key in img_keys:
new_imgs = [tmp_img[0] for tmp_img in dict_batch[img_key]]
batch_new_imgs = torch.stack(new_imgs, dim=0)
dict_batch[img_key] = [batch_new_imgs]
txt_keys = [k for k in list(dict_batch.keys()) if "text" in k]
if len(txt_keys) != 0:
texts = [[d[0] for d in dict_batch[txt_key]] for txt_key in txt_keys]
encodings = [[d[1] for d in dict_batch[txt_key]] for txt_key in txt_keys]
draw_text_len = len(encodings)
flatten_encodings = [e for encoding in encodings for e in encoding]
flatten_mlms = mlm_collator(flatten_encodings)
for i, txt_key in enumerate(txt_keys):
texts, encodings = (
[d[0] for d in dict_batch[txt_key]],
[d[1] for d in dict_batch[txt_key]],
)
mlm_ids, mlm_labels = (
flatten_mlms["input_ids"][batch_size * (i) : batch_size * (i + 1)],
flatten_mlms["labels"][batch_size * (i) : batch_size * (i + 1)],
)
input_ids = torch.zeros_like(mlm_ids)
attention_mask = torch.zeros_like(mlm_ids)
for _i, encoding in enumerate(encodings):
_input_ids, _attention_mask = (
torch.tensor(encoding["input_ids"]),
torch.tensor(encoding["attention_mask"]),
)
input_ids[_i, : len(_input_ids)] = _input_ids
attention_mask[_i, : len(_attention_mask)] = _attention_mask
dict_batch[txt_key] = texts
dict_batch[f"{txt_key}_ids"] = input_ids
dict_batch[f"{txt_key}_labels"] = torch.full_like(input_ids, -100)
dict_batch[f"{txt_key}_ids_mlm"] = mlm_ids
dict_batch[f"{txt_key}_labels_mlm"] = mlm_labels
dict_batch[f"{txt_key}_masks"] = attention_mask
return dict_batch
| EXA-1-master | exa/models/unilm-master/vlmo/vlmo/datasets/base_dataset.py |
from .base_dataset import BaseDataset
class CocoCaptionKarpathyDataset(BaseDataset):
def __init__(self, *args, split="", **kwargs):
assert split in ["train", "val", "test"]
self.split = split
if split == "train":
names = ["coco_caption_karpathy_train", "coco_caption_karpathy_restval"]
elif split == "val":
names = ["coco_caption_karpathy_val"]
elif split == "test":
names = ["coco_caption_karpathy_test"]
super().__init__(*args, **kwargs, names=names, text_column_name="caption")
def __getitem__(self, index):
suite = self.get_suite(index)
if "test" in self.split:
_index, _question_index = self.index_mapper[index]
iid = self.table["image_id"][_index].as_py()
iid = int(iid.split(".")[0].split("_")[-1])
suite.update({"iid": iid})
return suite
| EXA-1-master | exa/models/unilm-master/vlmo/vlmo/datasets/coco_caption_karpathy_dataset.py |
from glob import glob
from .base_dataset import BaseDataset
class WikibkDataset(BaseDataset):
def __init__(self, *args, split="", **kwargs):
assert split in ["train", "val", "test"]
if split == "test":
split = "val"
if split == "train":
names = [f"wikibk_train_{i}" for i in range(50)]
elif split == "val":
names = ["wikibk_val_0"]
super().__init__(*args, **kwargs, names=names, text_column_name="caption")
def __getitem__(self, index):
return self.get_text_suite(index)
| EXA-1-master | exa/models/unilm-master/vlmo/vlmo/datasets/wikibk_dataset.py |
from .vg_caption_dataset import VisualGenomeCaptionDataset
from .coco_caption_karpathy_dataset import CocoCaptionKarpathyDataset
from .f30k_caption_karpathy_dataset import F30KCaptionKarpathyDataset
from .conceptual_caption_dataset import ConceptualCaptionDataset
from .sbu_caption_dataset import SBUCaptionDataset
from .wikibk_dataset import WikibkDataset
from .vqav2_dataset import VQAv2Dataset
from .nlvr2_dataset import NLVR2Dataset
| EXA-1-master | exa/models/unilm-master/vlmo/vlmo/datasets/__init__.py |
from glob import glob
from .base_dataset import BaseDataset
class SBUCaptionDataset(BaseDataset):
def __init__(self, *args, split="", **kwargs):
assert split in ["train", "val", "test"]
if split == "test":
split = "val"
if split == "train":
names = [f"sbu_{i}" for i in range(9)]
elif split == "val":
names = []
super().__init__(*args, **kwargs, names=names, text_column_name="caption")
def __getitem__(self, index):
return self.get_suite(index)
| EXA-1-master | exa/models/unilm-master/vlmo/vlmo/datasets/sbu_caption_dataset.py |
from .base_dataset import BaseDataset
import sys
import random
class NLVR2Dataset(BaseDataset):
def __init__(self, *args, split="", **kwargs):
assert split in ["train", "val", "test"]
self.split = split
if split == "train":
names = ["nlvr2_train"]
elif split == "val":
names = ["nlvr2_dev", "nlvr2_test1"]
elif split == "test":
names = ["nlvr2_dev", "nlvr2_test1"]
super().__init__(
*args,
**kwargs,
names=names,
text_column_name="questions",
remove_duplicate=False,
)
def __getitem__(self, index):
result = None
while result is None:
try:
image_tensor_0 = self.get_image(index, image_key="image_0")["image"]
image_tensor_1 = self.get_image(index, image_key="image_1")["image"]
text = self.get_text(index)["text"]
result = True
except:
print(
f"error while read file idx {index} in {self.names[0]}",
file=sys.stderr,
)
index = random.randint(0, len(self.index_mapper) - 1)
index, question_index = self.index_mapper[index]
answers = self.table["answers"][index][question_index].as_py()
answers = answers == "True"
return {
"image_0": image_tensor_0,
"image_1": image_tensor_1,
"text": text,
"answers": answers,
"table_name": self.table_names[index],
}
| EXA-1-master | exa/models/unilm-master/vlmo/vlmo/datasets/nlvr2_dataset.py |
from .base_dataset import BaseDataset
class VQAv2Dataset(BaseDataset):
def __init__(self, *args, split="", **kwargs):
assert split in ["train", "val", "test"]
self.split = split
if split == "train":
names = ["vqav2_train", "vqav2_trainable_val"]
elif split == "val":
names = ["vqav2_rest_val"]
elif split == "test":
names = ["vqav2_test"] # vqav2_test-dev for test-dev
super().__init__(
*args,
**kwargs,
names=names,
text_column_name="questions",
remove_duplicate=False,
)
def __getitem__(self, index):
image_tensor = self.get_image(index)["image"]
text = self.get_text(index)["text"]
index, question_index = self.index_mapper[index]
qid = self.table["question_id"][index][question_index].as_py()
if self.split != "test":
answers = self.table["answers"][index][question_index].as_py()
labels = self.table["answer_labels"][index][question_index].as_py()
scores = self.table["answer_scores"][index][question_index].as_py()
else:
answers = list()
labels = list()
scores = list()
return {
"image": image_tensor,
"text": text,
"vqa_answer": answers,
"vqa_labels": labels,
"vqa_scores": scores,
"qid": qid,
}
| EXA-1-master | exa/models/unilm-master/vlmo/vlmo/datasets/vqav2_dataset.py |
from glob import glob
from .base_dataset import BaseDataset
class ConceptualCaptionDataset(BaseDataset):
def __init__(self, *args, split="", **kwargs):
assert split in ["train", "val", "test"]
if split == "test":
split = "val"
if split == "train":
names = [f"conceptual_caption_train_{i}" for i in range(30)]
elif split == "val":
names = ["conceptual_caption_val_0"]
super().__init__(*args, **kwargs, names=names, text_column_name="caption")
def __getitem__(self, index):
return self.get_suite(index)
| EXA-1-master | exa/models/unilm-master/vlmo/vlmo/datasets/conceptual_caption_dataset.py |
from vlmo.datasets import NLVR2Dataset
from .datamodule_base import BaseDataModule
class NLVR2DataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return NLVR2Dataset
@property
def dataset_name(self):
return "nlvr2"
| EXA-1-master | exa/models/unilm-master/vlmo/vlmo/datamodules/nlvr2_datamodule.py |
import functools
from pytorch_lightning import LightningDataModule
from torch.utils.data import DataLoader
from torch.utils.data.dataset import ConcatDataset
from torch.utils.data.distributed import DistributedSampler
from . import _datamodules
class MTDataModule(LightningDataModule):
def __init__(self, _config, dist=False):
datamodule_keys = _config["datasets"]
assert len(datamodule_keys) > 0
super().__init__()
self.dm_keys = datamodule_keys
self.dm_dicts = {key: _datamodules[key](_config) for key in datamodule_keys}
self.dms = [v for k, v in self.dm_dicts.items()]
self.batch_size = self.dms[0].batch_size
self.vocab_size = self.dms[0].vocab_size
self.num_workers = self.dms[0].num_workers
self.dist = dist
def prepare_data(self):
for dm in self.dms:
dm.prepare_data()
def setup(self, stage):
for dm in self.dms:
dm.setup(stage)
self.train_dataset = ConcatDataset([dm.train_dataset for dm in self.dms])
self.val_dataset = ConcatDataset([dm.val_dataset for dm in self.dms])
self.test_dataset = ConcatDataset([dm.test_dataset for dm in self.dms])
self.tokenizer = self.dms[0].tokenizer
self.collate = functools.partial(
self.dms[0].train_dataset.collate, mlm_collator=self.dms[0].mlm_collator,
)
if self.dist:
self.train_sampler = DistributedSampler(self.train_dataset, shuffle=True)
self.val_sampler = DistributedSampler(self.val_dataset, shuffle=True)
self.test_sampler = DistributedSampler(self.test_dataset, shuffle=False)
else:
self.train_sampler = None
self.val_sampler = None
self.test_sampler = None
def train_dataloader(self):
loader = DataLoader(
self.train_dataset,
batch_size=self.batch_size,
sampler=self.train_sampler,
num_workers=self.num_workers,
collate_fn=self.collate,
)
return loader
def val_dataloader(self, batch_size=None):
loader = DataLoader(
self.val_dataset,
batch_size=batch_size if batch_size is not None else self.batch_size,
sampler=self.val_sampler,
num_workers=self.num_workers,
collate_fn=self.collate,
)
return loader
def test_dataloader(self):
loader = DataLoader(
self.test_dataset,
batch_size=self.batch_size,
sampler=self.test_sampler,
num_workers=self.num_workers,
collate_fn=self.collate,
)
return loader
| EXA-1-master | exa/models/unilm-master/vlmo/vlmo/datamodules/multitask_datamodule.py |
import torch
from pytorch_lightning import LightningDataModule
from torch.utils.data import DataLoader
from transformers import (
DataCollatorForLanguageModeling,
DataCollatorForWholeWordMask,
BertTokenizer,
)
def get_pretrained_tokenizer(from_pretrained):
if torch.distributed.is_initialized():
if torch.distributed.get_rank() == 0:
BertTokenizer.from_pretrained(
from_pretrained, do_lower_case="uncased" in from_pretrained
)
torch.distributed.barrier()
return BertTokenizer.from_pretrained(
from_pretrained, do_lower_case="uncased" in from_pretrained
)
class BaseDataModule(LightningDataModule):
def __init__(self, _config):
super().__init__()
self.data_dir = _config["data_root"]
self.num_workers = _config["num_workers"]
self.batch_size = _config["per_gpu_batchsize"]
self.eval_batch_size = self.batch_size
self.image_size = _config["image_size"]
self.max_text_len = _config["max_text_len"]
self.draw_false_image = _config["draw_false_image"]
self.draw_false_text = _config["draw_false_text"]
self.image_only = _config["image_only"]
self.text_only = _config["text_only"]
self.train_transform_keys = (
["default_train"]
if len(_config["train_transform_keys"]) == 0
else _config["train_transform_keys"]
)
self.val_transform_keys = (
["default_val"]
if len(_config["val_transform_keys"]) == 0
else _config["val_transform_keys"]
)
tokenizer = _config["tokenizer"]
self.tokenizer = get_pretrained_tokenizer(tokenizer)
self.vocab_size = self.tokenizer.vocab_size
collator = (
DataCollatorForWholeWordMask
if _config["whole_word_masking"]
else DataCollatorForLanguageModeling
)
self.mlm_collator = collator(
tokenizer=self.tokenizer, mlm=True, mlm_probability=_config["mlm_prob"]
)
self.setup_flag = False
@property
def dataset_cls(self):
raise NotImplementedError("return tuple of dataset class")
@property
def dataset_name(self):
raise NotImplementedError("return name of dataset")
def set_train_dataset(self):
self.train_dataset = self.dataset_cls(
self.data_dir,
self.train_transform_keys,
split="train",
image_size=self.image_size,
max_text_len=self.max_text_len,
draw_false_image=self.draw_false_image,
draw_false_text=self.draw_false_text,
image_only=self.image_only,
)
def set_val_dataset(self):
self.val_dataset = self.dataset_cls(
self.data_dir,
self.val_transform_keys,
split="val",
image_size=self.image_size,
max_text_len=self.max_text_len,
draw_false_image=self.draw_false_image,
draw_false_text=self.draw_false_text,
image_only=self.image_only,
)
if hasattr(self, "dataset_cls_no_false"):
self.val_dataset_no_false = self.dataset_cls_no_false(
self.data_dir,
self.val_transform_keys,
split="val",
image_size=self.image_size,
max_text_len=self.max_text_len,
draw_false_image=0,
draw_false_text=0,
image_only=self.image_only,
)
def make_no_false_val_dset(self, image_only=False):
return self.dataset_cls_no_false(
self.data_dir,
self.val_transform_keys,
split="val",
image_size=self.image_size,
max_text_len=self.max_text_len,
draw_false_image=0,
draw_false_text=0,
image_only=image_only,
)
def make_no_false_test_dset(self, image_only=False):
return self.dataset_cls_no_false(
self.data_dir,
self.val_transform_keys,
split="test",
image_size=self.image_size,
max_text_len=self.max_text_len,
draw_false_image=0,
draw_false_text=0,
image_only=image_only,
)
def set_test_dataset(self):
self.test_dataset = self.dataset_cls(
self.data_dir,
self.val_transform_keys,
split="test",
image_size=self.image_size,
max_text_len=self.max_text_len,
draw_false_image=self.draw_false_image,
draw_false_text=self.draw_false_text,
image_only=self.image_only,
)
def setup(self, stage):
if not self.setup_flag:
self.set_train_dataset()
self.set_val_dataset()
self.set_test_dataset()
self.train_dataset.tokenizer = self.tokenizer
self.val_dataset.tokenizer = self.tokenizer
self.test_dataset.tokenizer = self.tokenizer
self.setup_flag = True
def train_dataloader(self):
loader = DataLoader(
self.train_dataset,
batch_size=self.batch_size,
shuffle=True,
num_workers=self.num_workers,
pin_memory=True,
collate_fn=self.train_dataset.collate,
)
return loader
def val_dataloader(self):
loader = DataLoader(
self.val_dataset,
batch_size=self.eval_batch_size,
shuffle=False,
num_workers=self.num_workers,
pin_memory=True,
collate_fn=self.val_dataset.collate,
)
return loader
def test_dataloader(self):
loader = DataLoader(
self.test_dataset,
batch_size=self.eval_batch_size,
shuffle=False,
num_workers=self.num_workers,
pin_memory=True,
collate_fn=self.test_dataset.collate,
)
return loader
| EXA-1-master | exa/models/unilm-master/vlmo/vlmo/datamodules/datamodule_base.py |
from vlmo.datasets import ConceptualCaptionDataset
from .datamodule_base import BaseDataModule
class ConceptualCaptionDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return ConceptualCaptionDataset
@property
def dataset_name(self):
return "gcc"
| EXA-1-master | exa/models/unilm-master/vlmo/vlmo/datamodules/conceptual_caption_datamodule.py |
from vlmo.datasets import SBUCaptionDataset
from .datamodule_base import BaseDataModule
class SBUCaptionDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return SBUCaptionDataset
@property
def dataset_name(self):
return "sbu"
| EXA-1-master | exa/models/unilm-master/vlmo/vlmo/datamodules/sbu_datamodule.py |
from .vg_caption_datamodule import VisualGenomeCaptionDataModule
from .f30k_caption_karpathy_datamodule import F30KCaptionKarpathyDataModule
from .coco_caption_karpathy_datamodule import CocoCaptionKarpathyDataModule
from .conceptual_caption_datamodule import ConceptualCaptionDataModule
from .sbu_datamodule import SBUCaptionDataModule
from .wikibk_datamodule import WikibkDataModule
from .vqav2_datamodule import VQAv2DataModule
from .nlvr2_datamodule import NLVR2DataModule
_datamodules = {
"vg": VisualGenomeCaptionDataModule,
"f30k": F30KCaptionKarpathyDataModule,
"coco": CocoCaptionKarpathyDataModule,
"gcc": ConceptualCaptionDataModule,
"sbu": SBUCaptionDataModule,
"wikibk": WikibkDataModule,
"vqa": VQAv2DataModule,
"nlvr2": NLVR2DataModule,
}
| EXA-1-master | exa/models/unilm-master/vlmo/vlmo/datamodules/__init__.py |
from vlmo.datasets import VisualGenomeCaptionDataset
from .datamodule_base import BaseDataModule
class VisualGenomeCaptionDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return VisualGenomeCaptionDataset
@property
def dataset_name(self):
return "vg"
| EXA-1-master | exa/models/unilm-master/vlmo/vlmo/datamodules/vg_caption_datamodule.py |
from vlmo.datasets import VQAv2Dataset
from .datamodule_base import BaseDataModule
from collections import defaultdict
class VQAv2DataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return VQAv2Dataset
@property
def dataset_name(self):
return "vqa"
def setup(self, stage):
super().setup(stage)
train_answers = self.train_dataset.table["answers"].to_pandas().tolist()
val_answers = self.val_dataset.table["answers"].to_pandas().tolist()
train_labels = self.train_dataset.table["answer_labels"].to_pandas().tolist()
val_labels = self.val_dataset.table["answer_labels"].to_pandas().tolist()
all_answers = [c for c in train_answers + val_answers if c is not None]
all_answers = [l for lll in all_answers for ll in lll for l in ll]
all_labels = [c for c in train_labels + val_labels if c is not None]
all_labels = [l for lll in all_labels for ll in lll for l in ll]
self.answer2id = {k: v for k, v in zip(all_answers, all_labels)}
sorted_a2i = sorted(self.answer2id.items(), key=lambda x: x[1])
self.num_class = max(self.answer2id.values()) + 1
self.id2answer = defaultdict(lambda: "unknown")
for k, v in sorted_a2i:
self.id2answer[v] = k
| EXA-1-master | exa/models/unilm-master/vlmo/vlmo/datamodules/vqav2_datamodule.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.