python_code
stringlengths
0
992k
repo_name
stringlengths
8
46
file_path
stringlengths
5
162
from .discriminative_reranking_model import DiscriminativeNMTReranker __all__ = [ "DiscriminativeNMTReranker", ]
KosmosX-API-main
kosmosX/fairseq/examples/discriminative_reranking_nmt/models/__init__.py
from dataclasses import dataclass, field import os import torch import torch.nn as nn from fairseq import utils from fairseq.dataclass import ChoiceEnum, FairseqDataclass from fairseq.models import ( BaseFairseqModel, register_model, ) from fairseq.models.roberta.model import RobertaClassificationHead from fairseq.modules import ( LayerNorm, TransformerSentenceEncoder, TransformerSentenceEncoderLayer, ) ACTIVATION_FN_CHOICES = ChoiceEnum(utils.get_available_activation_fns()) JOINT_CLASSIFICATION_CHOICES = ChoiceEnum(["none", "sent"]) SENTENCE_REP_CHOICES = ChoiceEnum(["head", "meanpool", "maxpool"]) def update_init_roberta_model_state(state): """ update the state_dict of a Roberta model for initializing weights of the BertRanker """ for k in list(state.keys()): if ".lm_head." in k or "version" in k: del state[k] continue # remove 'encoder/decoder.sentence_encoder.' from the key assert k.startswith("encoder.sentence_encoder.") or k.startswith( "decoder.sentence_encoder." ), f"Cannot recognize parameter name {k}" if "layernorm_embedding" in k: new_k = k.replace(".layernorm_embedding.", ".emb_layer_norm.") state[new_k[25:]] = state[k] else: state[k[25:]] = state[k] del state[k] class BaseRanker(nn.Module): def __init__(self, args, task): super().__init__() self.separator_token = task.dictionary.eos() self.padding_idx = task.dictionary.pad() def forward(self, src_tokens): raise NotImplementedError def get_segment_labels(self, src_tokens): segment_boundary = (src_tokens == self.separator_token).long() segment_labels = ( segment_boundary.cumsum(dim=1) - segment_boundary - (src_tokens == self.padding_idx).long() ) return segment_labels def get_positions(self, src_tokens, segment_labels): segment_positions = ( torch.arange(src_tokens.shape[1]) .to(src_tokens.device) .repeat(src_tokens.shape[0], 1) ) segment_boundary = (src_tokens == self.separator_token).long() _, col_idx = (segment_positions * segment_boundary).nonzero(as_tuple=True) col_idx = torch.cat([torch.zeros(1).type_as(col_idx), col_idx]) offset = torch.cat( [ torch.zeros(1).type_as(segment_boundary), segment_boundary.sum(dim=1).cumsum(dim=0)[:-1], ] ) segment_positions -= col_idx[segment_labels + offset.unsqueeze(1)] * ( segment_labels != 0 ) padding_mask = src_tokens.ne(self.padding_idx) segment_positions = (segment_positions + 1) * padding_mask.type_as( segment_positions ) + self.padding_idx return segment_positions class BertRanker(BaseRanker): def __init__(self, args, task): super(BertRanker, self).__init__(args, task) init_model = getattr(args, "pretrained_model", "") self.joint_layers = nn.ModuleList() if os.path.isfile(init_model): print(f"initialize weight from {init_model}") from fairseq import hub_utils x = hub_utils.from_pretrained( os.path.dirname(init_model), checkpoint_file=os.path.basename(init_model), ) in_state_dict = x["models"][0].state_dict() init_args = x["args"].model num_positional_emb = init_args.max_positions + task.dictionary.pad() + 1 # follow the setup in roberta self.model = TransformerSentenceEncoder( padding_idx=task.dictionary.pad(), vocab_size=len(task.dictionary), num_encoder_layers=getattr( args, "encoder_layers", init_args.encoder_layers ), embedding_dim=init_args.encoder_embed_dim, ffn_embedding_dim=init_args.encoder_ffn_embed_dim, num_attention_heads=init_args.encoder_attention_heads, dropout=init_args.dropout, attention_dropout=init_args.attention_dropout, activation_dropout=init_args.activation_dropout, num_segments=2, # add language embeddings max_seq_len=num_positional_emb, offset_positions_by_padding=False, encoder_normalize_before=True, apply_bert_init=True, activation_fn=init_args.activation_fn, freeze_embeddings=args.freeze_embeddings, n_trans_layers_to_freeze=args.n_trans_layers_to_freeze, ) # still need to learn segment embeddings as we added a second language embedding if args.freeze_embeddings: for p in self.model.segment_embeddings.parameters(): p.requires_grad = False update_init_roberta_model_state(in_state_dict) print("loading weights from the pretrained model") self.model.load_state_dict( in_state_dict, strict=False ) # ignore mismatch in language embeddings ffn_embedding_dim = init_args.encoder_ffn_embed_dim num_attention_heads = init_args.encoder_attention_heads dropout = init_args.dropout attention_dropout = init_args.attention_dropout activation_dropout = init_args.activation_dropout activation_fn = init_args.activation_fn classifier_embed_dim = getattr( args, "embed_dim", init_args.encoder_embed_dim ) if classifier_embed_dim != init_args.encoder_embed_dim: self.transform_layer = nn.Linear( init_args.encoder_embed_dim, classifier_embed_dim ) else: self.model = TransformerSentenceEncoder( padding_idx=task.dictionary.pad(), vocab_size=len(task.dictionary), num_encoder_layers=args.encoder_layers, embedding_dim=args.embed_dim, ffn_embedding_dim=args.ffn_embed_dim, num_attention_heads=args.attention_heads, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, max_seq_len=task.max_positions() if task.max_positions() else args.tokens_per_sample, num_segments=2, offset_positions_by_padding=False, encoder_normalize_before=args.encoder_normalize_before, apply_bert_init=args.apply_bert_init, activation_fn=args.activation_fn, ) classifier_embed_dim = args.embed_dim ffn_embedding_dim = args.ffn_embed_dim num_attention_heads = args.attention_heads dropout = args.dropout attention_dropout = args.attention_dropout activation_dropout = args.activation_dropout activation_fn = args.activation_fn self.joint_classification = args.joint_classification if args.joint_classification == "sent": if args.joint_normalize_before: self.joint_layer_norm = LayerNorm(classifier_embed_dim) else: self.joint_layer_norm = None self.joint_layers = nn.ModuleList( [ TransformerSentenceEncoderLayer( embedding_dim=classifier_embed_dim, ffn_embedding_dim=ffn_embedding_dim, num_attention_heads=num_attention_heads, dropout=dropout, attention_dropout=attention_dropout, activation_dropout=activation_dropout, activation_fn=activation_fn, ) for _ in range(args.num_joint_layers) ] ) self.classifier = RobertaClassificationHead( classifier_embed_dim, classifier_embed_dim, 1, # num_classes "tanh", args.classifier_dropout, ) def forward(self, src_tokens, src_lengths): segment_labels = self.get_segment_labels(src_tokens) positions = self.get_positions(src_tokens, segment_labels) inner_states, _ = self.model( tokens=src_tokens, segment_labels=segment_labels, last_state_only=True, positions=positions, ) return inner_states[-1].transpose(0, 1) # T x B x C -> B x T x C def sentence_forward(self, encoder_out, src_tokens=None, sentence_rep="head"): # encoder_out: B x T x C if sentence_rep == "head": x = encoder_out[:, :1, :] else: # 'meanpool', 'maxpool' assert src_tokens is not None, "meanpool requires src_tokens input" segment_labels = self.get_segment_labels(src_tokens) padding_mask = src_tokens.ne(self.padding_idx) encoder_mask = segment_labels * padding_mask.type_as(segment_labels) if sentence_rep == "meanpool": ntokens = torch.sum(encoder_mask, dim=1, keepdim=True) x = torch.sum( encoder_out * encoder_mask.unsqueeze(2), dim=1, keepdim=True ) / ntokens.unsqueeze(2).type_as(encoder_out) else: # 'maxpool' encoder_out[ (encoder_mask == 0).unsqueeze(2).repeat(1, 1, encoder_out.shape[-1]) ] = -float("inf") x, _ = torch.max(encoder_out, dim=1, keepdim=True) if hasattr(self, "transform_layer"): x = self.transform_layer(x) return x # B x 1 x C def joint_forward(self, x): # x: T x B x C if self.joint_layer_norm: x = self.joint_layer_norm(x.transpose(0, 1)) x = x.transpose(0, 1) for layer in self.joint_layers: x, _ = layer(x, self_attn_padding_mask=None) return x def classification_forward(self, x): # x: B x T x C return self.classifier(x) @dataclass class DiscriminativeNMTRerankerConfig(FairseqDataclass): pretrained_model: str = field( default="", metadata={"help": "pretrained model to load"} ) sentence_rep: SENTENCE_REP_CHOICES = field( default="head", metadata={ "help": "method to transform the output of the transformer stack to a sentence-level representation" }, ) dropout: float = field(default=0.1, metadata={"help": "dropout probability"}) attention_dropout: float = field( default=0.0, metadata={"help": "dropout probability for attention weights"} ) activation_dropout: float = field( default=0.0, metadata={"help": "dropout probability after activation in FFN"} ) classifier_dropout: float = field( default=0.0, metadata={"help": "classifier dropout probability"} ) embed_dim: int = field(default=768, metadata={"help": "embedding dimension"}) ffn_embed_dim: int = field( default=2048, metadata={"help": "embedding dimension for FFN"} ) encoder_layers: int = field(default=12, metadata={"help": "num encoder layers"}) attention_heads: int = field(default=8, metadata={"help": "num attention heads"}) encoder_normalize_before: bool = field( default=False, metadata={"help": "apply layernorm before each encoder block"} ) apply_bert_init: bool = field( default=False, metadata={"help": "use custom param initialization for BERT"} ) activation_fn: ACTIVATION_FN_CHOICES = field( default="relu", metadata={"help": "activation function to use"} ) freeze_embeddings: bool = field( default=False, metadata={"help": "freeze embeddings in the pretrained model"} ) n_trans_layers_to_freeze: int = field( default=0, metadata={ "help": "number of layers to freeze in the pretrained transformer model" }, ) # joint classfication joint_classification: JOINT_CLASSIFICATION_CHOICES = field( default="none", metadata={"help": "method to compute joint features for classification"}, ) num_joint_layers: int = field( default=1, metadata={"help": "number of joint layers"} ) joint_normalize_before: bool = field( default=False, metadata={"help": "apply layer norm on the input to the joint layer"}, ) @register_model( "discriminative_nmt_reranker", dataclass=DiscriminativeNMTRerankerConfig ) class DiscriminativeNMTReranker(BaseFairseqModel): @classmethod def build_model(cls, args, task): model = BertRanker(args, task) return DiscriminativeNMTReranker(args, model) def __init__(self, args, model): super().__init__() self.model = model self.sentence_rep = args.sentence_rep self.joint_classification = args.joint_classification def forward(self, src_tokens, src_lengths, **kwargs): return self.model(src_tokens, src_lengths) def sentence_forward(self, encoder_out, src_tokens): return self.model.sentence_forward(encoder_out, src_tokens, self.sentence_rep) def joint_forward(self, x): return self.model.joint_forward(x) def classification_forward(self, x): return self.model.classification_forward(x)
KosmosX-API-main
kosmosX/fairseq/examples/discriminative_reranking_nmt/models/discriminative_reranking_model.py
#!/usr/bin/env python import argparse from multiprocessing import Pool from pathlib import Path import sacrebleu import sentencepiece as spm def read_text_file(filename): with open(filename, "r") as f: output = [line.strip() for line in f] return output def get_bleu(in_sent, target_sent): bleu = sacrebleu.corpus_bleu([in_sent], [[target_sent]]) out = " ".join( map(str, [bleu.score, bleu.sys_len, bleu.ref_len] + bleu.counts + bleu.totals) ) return out def get_ter(in_sent, target_sent): ter = sacrebleu.corpus_ter([in_sent], [[target_sent]]) out = " ".join(map(str, [ter.score, ter.num_edits, ter.ref_length])) return out def init(sp_model): global sp sp = spm.SentencePieceProcessor() sp.Load(sp_model) def process(source_sent, target_sent, hypo_sent, metric): source_bpe = " ".join(sp.EncodeAsPieces(source_sent)) hypo_bpe = [" ".join(sp.EncodeAsPieces(h)) for h in hypo_sent] if metric == "bleu": score_str = [get_bleu(h, target_sent) for h in hypo_sent] else: # ter score_str = [get_ter(h, target_sent) for h in hypo_sent] return source_bpe, hypo_bpe, score_str def main(args): assert ( args.split.startswith("train") or args.num_shards == 1 ), "--num-shards should be set to 1 for valid and test sets" assert ( args.split.startswith("train") or args.split.startswith("valid") or args.split.startswith("test") ), "--split should be set to train[n]/valid[n]/test[n]" source_sents = read_text_file(args.input_source) target_sents = read_text_file(args.input_target) num_sents = len(source_sents) assert num_sents == len( target_sents ), f"{args.input_source} and {args.input_target} should have the same number of sentences." hypo_sents = read_text_file(args.input_hypo) assert ( len(hypo_sents) % args.beam == 0 ), f"Number of hypotheses ({len(hypo_sents)}) cannot be divided by beam size ({args.beam})." hypo_sents = [ hypo_sents[i : i + args.beam] for i in range(0, len(hypo_sents), args.beam) ] assert num_sents == len( hypo_sents ), f"{args.input_hypo} should contain {num_sents * args.beam} hypotheses but only has {len(hypo_sents) * args.beam}. (--beam={args.beam})" output_dir = args.output_dir / args.metric for ns in range(args.num_shards): print(f"processing shard {ns+1}/{args.num_shards}") shard_output_dir = output_dir / f"split{ns+1}" source_output_dir = shard_output_dir / "input_src" hypo_output_dir = shard_output_dir / "input_tgt" metric_output_dir = shard_output_dir / args.metric source_output_dir.mkdir(parents=True, exist_ok=True) hypo_output_dir.mkdir(parents=True, exist_ok=True) metric_output_dir.mkdir(parents=True, exist_ok=True) if args.n_proc > 1: with Pool( args.n_proc, initializer=init, initargs=(args.sentencepiece_model,) ) as p: output = p.starmap( process, [ (source_sents[i], target_sents[i], hypo_sents[i], args.metric) for i in range(ns, num_sents, args.num_shards) ], ) else: init(args.sentencepiece_model) output = [ process(source_sents[i], target_sents[i], hypo_sents[i], args.metric) for i in range(ns, num_sents, args.num_shards) ] with open(source_output_dir / f"{args.split}.bpe", "w") as s_o, open( hypo_output_dir / f"{args.split}.bpe", "w" ) as h_o, open(metric_output_dir / f"{args.split}.{args.metric}", "w") as m_o: for source_bpe, hypo_bpe, score_str in output: assert len(hypo_bpe) == len(score_str) for h, m in zip(hypo_bpe, score_str): s_o.write(f"{source_bpe}\n") h_o.write(f"{h}\n") m_o.write(f"{m}\n") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--input-source", type=Path, required=True) parser.add_argument("--input-target", type=Path, required=True) parser.add_argument("--input-hypo", type=Path, required=True) parser.add_argument("--output-dir", type=Path, required=True) parser.add_argument("--split", type=str, required=True) parser.add_argument("--beam", type=int, required=True) parser.add_argument("--sentencepiece-model", type=str, required=True) parser.add_argument("--metric", type=str, choices=["bleu", "ter"], default="bleu") parser.add_argument("--num-shards", type=int, default=1) parser.add_argument("--n-proc", type=int, default=8) args = parser.parse_args() main(args)
KosmosX-API-main
kosmosX/fairseq/examples/discriminative_reranking_nmt/scripts/prep_data.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from dataclasses import dataclass, field import torch import torch.nn.functional as F from fairseq import metrics, utils from fairseq.criterions import FairseqCriterion, register_criterion from fairseq.dataclass import ChoiceEnum, FairseqDataclass _EPSILON = torch.finfo(torch.float32).eps TARGET_DIST_NORM_CHOICES = ChoiceEnum(["none", "minmax"]) @dataclass class KLDivergenceRerankingCriterionConfig(FairseqDataclass): target_dist_norm: TARGET_DIST_NORM_CHOICES = field( default="none", metadata={"help": "method to normalize the range of target scores"}, ) temperature: float = field( default=1.0, metadata={"help": "temperature in softmax for target distributions"}, ) forward_batch_size: int = field( default=32, metadata={ "help": "number of hypotheses per batch for model forward (set a value smaller than --mt-beam to avoid OOM when training with a large beam size)" }, ) @register_criterion( "kl_divergence_rereanking", dataclass=KLDivergenceRerankingCriterionConfig ) class KLDivergenceRerankingCriterion(FairseqCriterion): def __init__( self, task, target_dist_norm, temperature, forward_batch_size, ): super().__init__(task) self.target_dist_norm = target_dist_norm self.temperature = temperature self.forward_batch_size = forward_batch_size def forward(self, model, sample, reduce=True): """Compute the loss for the given sample. Returns a tuple with three elements: 1) the loss 2) the sample size, which is used as the denominator for the gradient 3) logging outputs to display while training """ sample_size = sample["id"].numel() assert sample_size % self.task.cfg.mt_beam == 0, ( f"sample_size ({sample_size}) cannot be divided by beam size ({self.task.cfg.mt_beam})." f"Please set --required-batch-size-multiple={self.task.cfg.mt_beam}." ) # split into smaller batches for model forward batch_out = [] for i in range(0, sample_size, self.forward_batch_size): j = min(i + self.forward_batch_size, sample_size) out = model( src_tokens=sample["net_input"]["src_tokens"][i:j, :], src_lengths=sample["net_input"]["src_lengths"][i:j], ) batch_out.append( model.sentence_forward(out, sample["net_input"]["src_tokens"][i:j, :]) ) batch_out = torch.cat(batch_out, dim=0).view( self.task.cfg.mt_beam, sample_size // self.task.cfg.mt_beam, -1 ) # T x B x C if model.joint_classification == "sent": batch_out = model.joint_forward(batch_out) scores = model.classification_forward(batch_out.view(sample_size, 1, -1)).view( -1, self.task.cfg.mt_beam ) # input: B x T x C loss = self.compute_kl_loss( scores, sample["target"][:, 0].view(-1, self.task.cfg.mt_beam) ) sample_size = sample_size // self.task.cfg.mt_beam logging_output = { "loss": loss.detach(), "ntokens": sample["ntokens"], "nsentences": sample_size * self.task.cfg.mt_beam, "sample_size": sample_size, "scores": scores.detach(), } return loss, sample_size, logging_output def compute_kl_loss(self, logits, target): norm_target = target if self.target_dist_norm == "minmax": min_v = torch.min(target, 1, keepdim=True).values max_v = torch.max(target, 1, keepdim=True).values norm_target = (target - min_v) / (max_v - min_v + _EPSILON) target_dist = F.softmax( norm_target / self.temperature, dim=-1, dtype=torch.float32 ) model_dist = F.log_softmax(logits, dim=-1, dtype=torch.float32) loss = -(target_dist * model_dist - target_dist * target_dist.log()).sum() return loss @staticmethod def reduce_metrics(logging_outputs) -> None: """Aggregate logging outputs from data parallel training.""" loss_sum = utils.item(sum(log.get("loss", 0) for log in logging_outputs)) sample_size = utils.item( sum(log.get("sample_size", 0) for log in logging_outputs) ) loss = loss_sum / sample_size / math.log(2) metrics.log_scalar("loss", loss, sample_size, round=3) @staticmethod def logging_outputs_can_be_summed() -> bool: """ Whether the logging outputs returned by `forward` can be summed across workers prior to calling `reduce_metrics`. Setting this to True will improves distributed training speed. """ return True
KosmosX-API-main
kosmosX/fairseq/examples/discriminative_reranking_nmt/criterions/discriminative_reranking_criterion.py
from .discriminative_reranking_criterion import KLDivergenceRerankingCriterion __all__ = [ "KLDivergenceRerankingCriterion", ]
KosmosX-API-main
kosmosX/fairseq/examples/discriminative_reranking_nmt/criterions/__init__.py
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Translate pre-processed data with a trained model. """ import numpy as np import torch from fairseq import checkpoint_utils, options, progress_bar, tasks, utils from fairseq.sequence_generator import EnsembleModel from fairseq.utils import safe_hasattr def get_avg_pool( models, sample, prefix_tokens, src_dict, remove_bpe, has_langtok=False ): model = EnsembleModel(models) # model.forward normally channels prev_output_tokens into the decoder # separately, but SequenceGenerator directly calls model.encoder encoder_input = { k: v for k, v in sample["net_input"].items() if k != "prev_output_tokens" } # compute the encoder output for each beam encoder_outs = model.forward_encoder(encoder_input) np_encoder_outs = encoder_outs[0].encoder_out.cpu().numpy().astype(np.float32) encoder_mask = 1 - encoder_outs[0].encoder_padding_mask.cpu().numpy().astype( np.float32 ) encoder_mask = np.expand_dims(encoder_mask.T, axis=2) if has_langtok: encoder_mask = encoder_mask[1:, :, :] np_encoder_outs = np_encoder_outs[1, :, :] masked_encoder_outs = encoder_mask * np_encoder_outs avg_pool = (masked_encoder_outs / encoder_mask.sum(axis=0)).sum(axis=0) return avg_pool def main(args): assert args.path is not None, "--path required for generation!" assert ( not args.sampling or args.nbest == args.beam ), "--sampling requires --nbest to be equal to --beam" assert ( args.replace_unk is None or args.raw_text ), "--replace-unk requires a raw text dataset (--raw-text)" args.beam = 1 utils.import_user_module(args) if args.max_tokens is None: args.max_tokens = 12000 print(args) use_cuda = torch.cuda.is_available() and not args.cpu # Load dataset splits task = tasks.setup_task(args) task.load_dataset(args.gen_subset) # Set dictionaries try: src_dict = getattr(task, "source_dictionary", None) except NotImplementedError: src_dict = None tgt_dict = task.target_dictionary # Load ensemble print("| loading model(s) from {}".format(args.path)) models, _model_args = checkpoint_utils.load_model_ensemble( args.path.split(":"), arg_overrides=eval(args.model_overrides), task=task, ) # Optimize ensemble for generation for model in models: model.make_generation_fast_( beamable_mm_beam_size=None if args.no_beamable_mm else args.beam, need_attn=args.print_alignment, ) if args.fp16: model.half() if use_cuda: model.cuda() # Load alignment dictionary for unknown word replacement # (None if no unknown word replacement, empty if no path to align dictionary) align_dict = utils.load_align_dict(args.replace_unk) # Load dataset (possibly sharded) itr = task.get_batch_iterator( dataset=task.dataset(args.gen_subset), max_tokens=args.max_tokens, max_positions=utils.resolve_max_positions( task.max_positions(), ), ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test, required_batch_size_multiple=args.required_batch_size_multiple, num_shards=args.num_shards, shard_id=args.shard_id, num_workers=args.num_workers, ).next_epoch_itr(shuffle=False) num_sentences = 0 source_sentences = [] shard_id = 0 all_avg_pool = None encoder_has_langtok = ( safe_hasattr(task.args, "encoder_langtok") and task.args.encoder_langtok is not None and safe_hasattr(task.args, "lang_tok_replacing_bos_eos") and not task.args.lang_tok_replacing_bos_eos ) with progress_bar.build_progress_bar(args, itr) as t: for sample in t: if sample is None: print("Skipping None") continue sample = utils.move_to_cuda(sample) if use_cuda else sample if "net_input" not in sample: continue prefix_tokens = None if args.prefix_size > 0: prefix_tokens = sample["target"][:, : args.prefix_size] with torch.no_grad(): avg_pool = get_avg_pool( models, sample, prefix_tokens, src_dict, args.post_process, has_langtok=encoder_has_langtok, ) if all_avg_pool is not None: all_avg_pool = np.concatenate((all_avg_pool, avg_pool)) else: all_avg_pool = avg_pool if not isinstance(sample["id"], list): sample_ids = sample["id"].tolist() else: sample_ids = sample["id"] for i, sample_id in enumerate(sample_ids): # Remove padding src_tokens = utils.strip_pad( sample["net_input"]["src_tokens"][i, :], tgt_dict.pad() ) # Either retrieve the original sentences or regenerate them from tokens. if align_dict is not None: src_str = task.dataset(args.gen_subset).src.get_original_text( sample_id ) else: if src_dict is not None: src_str = src_dict.string(src_tokens, args.post_process) else: src_str = "" if not args.quiet: if src_dict is not None: print("S-{}\t{}".format(sample_id, src_str)) source_sentences.append(f"{sample_id}\t{src_str}") num_sentences += sample["nsentences"] if all_avg_pool.shape[0] >= 1000000: with open( f"{args.encoder_save_dir}/all_avg_pool.{args.source_lang}.{shard_id}", "w", ) as avg_pool_file: all_avg_pool.tofile(avg_pool_file) with open( f"{args.encoder_save_dir}/sentences.{args.source_lang}.{shard_id}", "w", ) as sentence_file: sentence_file.writelines(f"{line}\n" for line in source_sentences) all_avg_pool = None source_sentences = [] shard_id += 1 if all_avg_pool is not None: with open( f"{args.encoder_save_dir}/all_avg_pool.{args.source_lang}.{shard_id}", "w" ) as avg_pool_file: all_avg_pool.tofile(avg_pool_file) with open( f"{args.encoder_save_dir}/sentences.{args.source_lang}.{shard_id}", "w" ) as sentence_file: sentence_file.writelines(f"{line}\n" for line in source_sentences) return None def cli_main(): parser = options.get_generation_parser() parser.add_argument( "--encoder-save-dir", default="", type=str, metavar="N", help="directory to save encoder outputs", ) args = options.parse_args_and_arch(parser) main(args) if __name__ == "__main__": cli_main()
KosmosX-API-main
kosmosX/fairseq/examples/criss/save_encoder.py
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import glob from subprocess import check_call try: import faiss has_faiss = True except ImportError: has_faiss = False import numpy as np GB = 1024 * 1024 * 1024 def call(cmd): print(cmd) check_call(cmd, shell=True) def get_batches(directory, lang, prefix="all_avg_pool"): print(f"Finding in {directory}/{prefix}.{lang}*") files = glob.glob(f"{directory}/{prefix}.{lang}*") emb_files = [] txt_files = [] for emb_fi in files: emb_files.append(emb_fi) txt_fi = emb_fi.replace(prefix, "sentences") txt_files.append(txt_fi) return emb_files, txt_files def load_batch(emb_file, dim): embeddings = np.fromfile(emb_file, dtype=np.float32) num_rows = int(embeddings.shape[0] / dim) embeddings = embeddings.reshape((num_rows, dim)) faiss.normalize_L2(embeddings) return embeddings def knnGPU_sharded(x_batches_f, y_batches_f, dim, k, direction="x2y"): if not has_faiss: raise ImportError("Please install Faiss") sims = [] inds = [] xfrom = 0 xto = 0 for x_batch_f in x_batches_f: yfrom = 0 yto = 0 x_batch = load_batch(x_batch_f, dim) xto = xfrom + x_batch.shape[0] bsims, binds = [], [] for y_batch_f in y_batches_f: y_batch = load_batch(y_batch_f, dim) neighbor_size = min(k, y_batch.shape[0]) yto = yfrom + y_batch.shape[0] print("{}-{} -> {}-{}".format(xfrom, xto, yfrom, yto)) idx = faiss.IndexFlatIP(dim) idx = faiss.index_cpu_to_all_gpus(idx) idx.add(y_batch) bsim, bind = idx.search(x_batch, neighbor_size) bsims.append(bsim) binds.append(bind + yfrom) yfrom += y_batch.shape[0] del idx del y_batch bsims = np.concatenate(bsims, axis=1) binds = np.concatenate(binds, axis=1) aux = np.argsort(-bsims, axis=1) sim_batch = np.zeros((x_batch.shape[0], k), dtype=np.float32) ind_batch = np.zeros((x_batch.shape[0], k), dtype=np.int64) for i in range(x_batch.shape[0]): for j in range(k): sim_batch[i, j] = bsims[i, aux[i, j]] ind_batch[i, j] = binds[i, aux[i, j]] sims.append(sim_batch) inds.append(ind_batch) xfrom += x_batch.shape[0] del x_batch sim = np.concatenate(sims, axis=0) ind = np.concatenate(inds, axis=0) return sim, ind def score(sim, fwd_mean, bwd_mean, margin): return margin(sim, (fwd_mean + bwd_mean) / 2) def score_candidates( sim_mat, candidate_inds, fwd_mean, bwd_mean, margin, verbose=False ): print(" - scoring {:d} candidates".format(sim_mat.shape[0])) scores = np.zeros(candidate_inds.shape) for i in range(scores.shape[0]): for j in range(scores.shape[1]): k = int(candidate_inds[i, j]) scores[i, j] = score(sim_mat[i, j], fwd_mean[i], bwd_mean[k], margin) return scores def load_text(files): all_sentences = [] for fi in files: with open(fi) as sentence_fi: for line in sentence_fi: all_sentences.append(line.strip()) print(f"Read {len(all_sentences)} sentences") return all_sentences if __name__ == "__main__": parser = argparse.ArgumentParser(description="Mine bitext") parser.add_argument("--src-lang", help="Source language") parser.add_argument("--tgt-lang", help="Target language") parser.add_argument( "--dict-path", help="Path to dictionary file", default="dict.txt" ) parser.add_argument( "--spm-path", help="Path to SPM model file", default="sentence.bpe.model" ) parser.add_argument("--dim", type=int, default=1024, help="Embedding dimension") parser.add_argument("--mem", type=int, default=5, help="Memory in GB") parser.add_argument("--src-dir", help="Source directory") parser.add_argument("--tgt-dir", help="Target directory") parser.add_argument("--output", help="Output path") parser.add_argument( "--neighborhood", type=int, default=4, help="Embedding dimension" ) parser.add_argument( "--threshold", type=float, default=1.06, help="Threshold on mined bitext" ) parser.add_argument( "--valid-size", type=int, default=2000, help="Number of sentences used for validation set", ) parser.add_argument( "--min-count", type=int, default=50000, help="Min num sentences used for each language", ) args = parser.parse_args() x_batches_f, x_sents_f = get_batches(args.src_dir, args.src_lang) y_batches_f, y_sents_f = get_batches(args.tgt_dir, args.tgt_lang) def margin(a, b): return a / b y2x_sim, y2x_ind = knnGPU_sharded( y_batches_f, x_batches_f, args.dim, args.neighborhood, direction="y2x" ) x2y_sim, x2y_ind = knnGPU_sharded( x_batches_f, y_batches_f, args.dim, args.neighborhood, direction="x2y" ) x2y_mean = x2y_sim.mean(axis=1) y2x_mean = y2x_sim.mean(axis=1) fwd_scores = score_candidates(x2y_sim, x2y_ind, x2y_mean, y2x_mean, margin) bwd_scores = score_candidates(y2x_sim, y2x_ind, y2x_mean, x2y_mean, margin) fwd_best = x2y_ind[np.arange(x2y_sim.shape[0]), fwd_scores.argmax(axis=1)] bwd_best = y2x_ind[np.arange(y2x_sim.shape[0]), bwd_scores.argmax(axis=1)] indices = np.stack( ( np.concatenate((np.arange(x2y_ind.shape[0]), bwd_best)), np.concatenate((fwd_best, np.arange(y2x_ind.shape[0]))), ), axis=1, ) scores = np.concatenate((fwd_scores.max(axis=1), bwd_scores.max(axis=1))) x_sentences = load_text(x_sents_f) y_sentences = load_text(y_sents_f) threshold = args.threshold min_count = args.min_count seen_src, seen_trg = set(), set() directory = args.output call(f"mkdir -p {directory}") src_out = open( f"{directory}/all.{args.src_lang}", mode="w", encoding="utf-8", errors="surrogateescape", ) tgt_out = open( f"{directory}/all.{args.tgt_lang}", mode="w", encoding="utf-8", errors="surrogateescape", ) scores_out = open( f"{directory}/all.scores", mode="w", encoding="utf-8", errors="surrogateescape" ) count = 0 for i in np.argsort(-scores): src_ind, trg_ind = indices[i] if src_ind not in seen_src and trg_ind not in seen_trg: seen_src.add(src_ind) seen_trg.add(trg_ind) if scores[i] > threshold or count < min_count: if x_sentences[src_ind]: print(scores[i], file=scores_out) print(x_sentences[src_ind], file=src_out) print(y_sentences[trg_ind], file=tgt_out) count += 1 else: print(f"Ignoring sentence: {x_sentences[src_ind]}") src_out.close() tgt_out.close() scores_out.close() print(f"Found {count} pairs for threshold={threshold}") with open(f"{directory}/all.{args.src_lang}") as all_s, open( f"{directory}/all.{args.tgt_lang}" ) as all_t, open(f"{directory}/valid.{args.src_lang}", "w") as valid_s, open( f"{directory}/valid.{args.tgt_lang}", "w" ) as valid_t, open( f"{directory}/train.{args.src_lang}", "w" ) as train_s, open( f"{directory}/train.{args.tgt_lang}", "w" ) as train_t: count = 0 for s_line, t_line in zip(all_s, all_t): s_line = s_line.split("\t")[1] t_line = t_line.split("\t")[1] if count >= args.valid_size: train_s.write(s_line) train_t.write(t_line) else: valid_s.write(s_line) valid_t.write(t_line) count += 1
KosmosX-API-main
kosmosX/fairseq/examples/criss/mining/mine.py
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import glob import numpy as np DIM = 1024 def compute_dist(source_embs, target_embs, k=5, return_sim_mat=False): target_ids = [tid for tid in target_embs] source_mat = np.stack(source_embs.values(), axis=0) normalized_source_mat = source_mat / np.linalg.norm( source_mat, axis=1, keepdims=True ) target_mat = np.stack(target_embs.values(), axis=0) normalized_target_mat = target_mat / np.linalg.norm( target_mat, axis=1, keepdims=True ) sim_mat = normalized_source_mat.dot(normalized_target_mat.T) if return_sim_mat: return sim_mat neighbors_map = {} for i, sentence_id in enumerate(source_embs): idx = np.argsort(sim_mat[i, :])[::-1][:k] neighbors_map[sentence_id] = [target_ids[tid] for tid in idx] return neighbors_map def load_embeddings(directory, LANGS): sentence_embeddings = {} sentence_texts = {} for lang in LANGS: sentence_embeddings[lang] = {} sentence_texts[lang] = {} lang_dir = f"{directory}/{lang}" embedding_files = glob.glob(f"{lang_dir}/all_avg_pool.{lang}.*") for embed_file in embedding_files: shard_id = embed_file.split(".")[-1] embeddings = np.fromfile(embed_file, dtype=np.float32) num_rows = embeddings.shape[0] // DIM embeddings = embeddings.reshape((num_rows, DIM)) with open(f"{lang_dir}/sentences.{lang}.{shard_id}") as sentence_file: for idx, line in enumerate(sentence_file): sentence_id, sentence = line.strip().split("\t") sentence_texts[lang][sentence_id] = sentence sentence_embeddings[lang][sentence_id] = embeddings[idx, :] return sentence_embeddings, sentence_texts def compute_accuracy(directory, LANGS): sentence_embeddings, sentence_texts = load_embeddings(directory, LANGS) top_1_accuracy = {} top1_str = " ".join(LANGS) + "\n" for source_lang in LANGS: top_1_accuracy[source_lang] = {} top1_str += f"{source_lang} " for target_lang in LANGS: top1 = 0 top5 = 0 neighbors_map = compute_dist( sentence_embeddings[source_lang], sentence_embeddings[target_lang] ) for sentence_id, neighbors in neighbors_map.items(): if sentence_id == neighbors[0]: top1 += 1 if sentence_id in neighbors[:5]: top5 += 1 n = len(sentence_embeddings[target_lang]) top1_str += f"{top1/n} " top1_str += "\n" print(top1_str) print(top1_str, file=open(f"{directory}/accuracy", "w")) if __name__ == "__main__": parser = argparse.ArgumentParser(description="Analyze encoder outputs") parser.add_argument("directory", help="Source language corpus") parser.add_argument("--langs", help="List of langs") args = parser.parse_args() langs = args.langs.split(",") compute_accuracy(args.directory, langs)
KosmosX-API-main
kosmosX/fairseq/examples/criss/sentence_retrieval/encoder_analysis.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from fairseq.search import Search class NoisyChannelBeamSearch(Search): def __init__(self, tgt_dict): super().__init__(tgt_dict) self.fw_scores_buf = None self.lm_scores_buf = None def _init_buffers(self, t): # super()._init_buffers(t) if self.fw_scores_buf is None: self.scores_buf = t.new() self.indices_buf = torch.LongTensor().to(device=t.device) self.beams_buf = torch.LongTensor().to(device=t.device) self.fw_scores_buf = t.new() self.lm_scores_buf = t.new() def combine_fw_bw(self, combine_method, fw_cum, bw, step): if combine_method == "noisy_channel": fw_norm = fw_cum.div(step + 1) lprobs = bw + fw_norm elif combine_method == "lm_only": lprobs = bw + fw_cum return lprobs def step(self, step, fw_lprobs, scores, bw_lprobs, lm_lprobs, combine_method): self._init_buffers(fw_lprobs) bsz, beam_size, vocab_size = fw_lprobs.size() if step == 0: # at the first step all hypotheses are equally likely, so use # only the first beam fw_lprobs = fw_lprobs[:, ::beam_size, :].contiguous() bw_lprobs = bw_lprobs[:, ::beam_size, :].contiguous() # nothing to add since we are at the first step fw_lprobs_cum = fw_lprobs else: # make probs contain cumulative scores for each hypothesis raw_scores = (scores[:, :, step - 1].unsqueeze(-1)) fw_lprobs_cum = (fw_lprobs.add(raw_scores)) combined_lprobs = self.combine_fw_bw(combine_method, fw_lprobs_cum, bw_lprobs, step) # choose the top k according to the combined noisy channel model score torch.topk( combined_lprobs.view(bsz, -1), k=min( # Take the best 2 x beam_size predictions. We'll choose the first # beam_size of these which don't predict eos to continue with. beam_size * 2, combined_lprobs.view(bsz, -1).size(1) - 1, # -1 so we never select pad ), out=(self.scores_buf, self.indices_buf), ) # save corresponding fw and lm scores self.fw_scores_buf = torch.gather(fw_lprobs_cum.view(bsz, -1), 1, self.indices_buf) self.lm_scores_buf = torch.gather(lm_lprobs.view(bsz, -1), 1, self.indices_buf) # Project back into relative indices and beams self.beams_buf = self.indices_buf // vocab_size self.indices_buf.fmod_(vocab_size) return self.scores_buf, self.fw_scores_buf, self.lm_scores_buf, self.indices_buf, self.beams_buf
KosmosX-API-main
kosmosX/fairseq/examples/fast_noisy_channel/noisy_channel_beam_search.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from . import noisy_channel_translation # noqa from . import noisy_channel_sequence_generator # noqa from . import noisy_channel_beam_search # noqa
KosmosX-API-main
kosmosX/fairseq/examples/fast_noisy_channel/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Dict, List, Optional import math import numpy as np import torch import torch.nn.functional as F from torch import Tensor from .noisy_channel_beam_search import NoisyChannelBeamSearch from fairseq.sequence_generator import EnsembleModel class NoisyChannelSequenceGenerator(object): def __init__( self, combine_method, tgt_dict, src_dict=None, beam_size=1, max_len_a=0, max_len_b=200, min_len=1, len_penalty=1.0, unk_penalty=0.0, retain_dropout=False, temperature=1.0, match_source_len=False, no_repeat_ngram_size=0, normalize_scores=True, channel_models=None, k2=10, ch_weight=1.0, channel_scoring_type='log_norm', top_k_vocab=0, lm_models=None, lm_dict=None, lm_weight=1.0, normalize_lm_scores_by_tgt_len=False, ): """Generates translations of a given source sentence, using beam search with noisy channel decoding. Args: combine_method (string, optional): Method to combine direct, LM and channel model scores (default: None) tgt_dict (~fairseq.data.Dictionary): target dictionary src_dict (~fairseq.data.Dictionary): source dictionary beam_size (int, optional): beam width (default: 1) max_len_a/b (int, optional): generate sequences of maximum length ax + b, where x is the source length min_len (int, optional): the minimum length of the generated output (not including end-of-sentence) len_penalty (float, optional): length penalty, where <1.0 favors shorter, >1.0 favors longer sentences (default: 1.0) unk_penalty (float, optional): unknown word penalty, where <0 produces more unks, >0 produces fewer (default: 0.0) retain_dropout (bool, optional): use dropout when generating (default: False) temperature (float, optional): temperature, where values >1.0 produce more uniform samples and values <1.0 produce sharper samples (default: 1.0) match_source_len (bool, optional): outputs should match the source length (default: False) no_repeat_ngram_size (int, optional): Size of n-grams that we avoid repeating in the generation (default: 0) normalize_scores (bool, optional): normalize scores by the length of the output (default: True) channel_models (List[~fairseq.models.FairseqModel]): ensemble of models translating from the target to the source k2 (int, optional): Top K2 candidates to score per beam at each step (default:10) ch_weight (int, optional): Weight associated with the channel model score assuming that the direct model score has weight 1.0 (default: 1.0) channel_scoring_type (str, optional): String specifying how to score the channel model (default: 'log_norm') top_k_vocab (int, optional): If `channel_scoring_type` is `'src_vocab'` or `'src_vocab_batched'`, then this parameter specifies the number of most frequent tokens to include in the channel model output vocabulary, in addition to the source tokens in the input batch (default: 0) lm_models (List[~fairseq.models.FairseqModel]): ensemble of models generating text in the target language lm_dict (~fairseq.data.Dictionary): LM Model dictionary lm_weight (int, optional): Weight associated with the LM model score assuming that the direct model score has weight 1.0 (default: 1.0) normalize_lm_scores_by_tgt_len (bool, optional): Should we normalize LM scores by the target length? By default, we normalize the combination of LM and channel model scores by the source length """ self.pad = tgt_dict.pad() self.unk = tgt_dict.unk() self.eos = tgt_dict.eos() self.vocab_size = len(tgt_dict) self.beam_size = beam_size # the max beam size is the dictionary size - 1, since we never select pad self.beam_size = min(beam_size, self.vocab_size - 1) self.max_len_a = max_len_a self.max_len_b = max_len_b self.min_len = min_len self.normalize_scores = normalize_scores self.len_penalty = len_penalty self.unk_penalty = unk_penalty self.retain_dropout = retain_dropout self.temperature = temperature self.match_source_len = match_source_len self.no_repeat_ngram_size = no_repeat_ngram_size self.channel_models = channel_models self.src_dict = src_dict self.tgt_dict = tgt_dict self.combine_method = combine_method self.k2 = k2 self.ch_weight = ch_weight self.channel_scoring_type = channel_scoring_type self.top_k_vocab = top_k_vocab self.lm_models = lm_models self.lm_dict = lm_dict self.lm_weight = lm_weight self.log_softmax_fn = torch.nn.LogSoftmax(dim=1) self.normalize_lm_scores_by_tgt_len = normalize_lm_scores_by_tgt_len self.share_tgt_dict = (self.lm_dict == self.tgt_dict) self.tgt_to_lm = make_dict2dict(tgt_dict, lm_dict) self.ch_scoring_bsz = 3072 assert temperature > 0, '--temperature must be greater than 0' self.search = NoisyChannelBeamSearch(tgt_dict) @torch.no_grad() def generate( self, models, sample, prefix_tokens=None, bos_token=None, **kwargs ): """Generate a batch of translations. Args: models (List[~fairseq.models.FairseqModel]): ensemble of models sample (dict): batch prefix_tokens (torch.LongTensor, optional): force decoder to begin with these tokens """ model = EnsembleModel(models) incremental_states = torch.jit.annotate( List[Dict[str, Dict[str, Optional[Tensor]]]], [ torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {}) for i in range(model.models_size) ], ) if not self.retain_dropout: model.eval() # model.forward normally channels prev_output_tokens into the decoder # separately, but SequenceGenerator directly calls model.encoder encoder_input = { k: v for k, v in sample['net_input'].items() if k != 'prev_output_tokens' } src_tokens = encoder_input['src_tokens'] src_lengths_no_eos = (src_tokens.ne(self.eos) & src_tokens.ne(self.pad)).long().sum(dim=1) input_size = src_tokens.size() # batch dimension goes first followed by source lengths bsz = input_size[0] src_len = input_size[1] beam_size = self.beam_size if self.match_source_len: max_len = src_lengths_no_eos.max().item() else: max_len = min( int(self.max_len_a * src_len + self.max_len_b), # exclude the EOS marker model.max_decoder_positions() - 1, ) # compute the encoder output for each beam encoder_outs = model.forward_encoder(encoder_input) new_order = torch.arange(bsz).view(-1, 1).repeat(1, beam_size).view(-1) new_order = new_order.to(src_tokens.device).long() encoder_outs = model.reorder_encoder_out(encoder_outs, new_order) src_lengths = encoder_input['src_lengths'] # initialize buffers scores = src_tokens.new(bsz * beam_size, max_len + 1).float().fill_(0) lm_prefix_scores = src_tokens.new(bsz * beam_size).float().fill_(0) scores_buf = scores.clone() tokens = src_tokens.new(bsz * beam_size, max_len + 2).long().fill_(self.pad) tokens_buf = tokens.clone() tokens[:, 0] = self.eos if bos_token is None else bos_token # reorder source tokens so they may be used as a reference in generating P(S|T) src_tokens = reorder_all_tokens(src_tokens, src_lengths, self.src_dict.eos_index) src_tokens = src_tokens.repeat(1, beam_size).view(-1, src_len) src_lengths = src_lengths.view(bsz, -1).repeat(1, beam_size).view(bsz*beam_size, -1) attn, attn_buf = None, None nonpad_idxs = None # The cands_to_ignore indicates candidates that should be ignored. # For example, suppose we're sampling and have already finalized 2/5 # samples. Then the cands_to_ignore would mark 2 positions as being ignored, # so that we only finalize the remaining 3 samples. cands_to_ignore = src_tokens.new_zeros(bsz, beam_size).eq(-1) # forward and backward-compatible False mask # list of completed sentences finalized = [[] for i in range(bsz)] finished = [False for i in range(bsz)] num_remaining_sent = bsz # number of candidate hypos per step cand_size = 2 * beam_size # 2 x beam size in case half are EOS # offset arrays for converting between different indexing schemes bbsz_offsets = (torch.arange(0, bsz) * beam_size).unsqueeze(1).type_as(tokens) cand_offsets = torch.arange(0, cand_size).type_as(tokens) # helper function for allocating buffers on the fly buffers = {} def buffer(name, type_of=tokens): # noqa if name not in buffers: buffers[name] = type_of.new() return buffers[name] def is_finished(sent, step, unfin_idx): """ Check whether we've finished generation for a given sentence, by comparing the worst score among finalized hypotheses to the best possible score among unfinalized hypotheses. """ assert len(finalized[sent]) <= beam_size if len(finalized[sent]) == beam_size: return True return False def finalize_hypos(step, bbsz_idx, eos_scores, combined_noisy_channel_eos_scores): """ Finalize the given hypotheses at this step, while keeping the total number of finalized hypotheses per sentence <= beam_size. Note: the input must be in the desired finalization order, so that hypotheses that appear earlier in the input are preferred to those that appear later. Args: step: current time step bbsz_idx: A vector of indices in the range [0, bsz*beam_size), indicating which hypotheses to finalize eos_scores: A vector of the same size as bbsz_idx containing fw scores for each hypothesis combined_noisy_channel_eos_scores: A vector of the same size as bbsz_idx containing combined noisy channel scores for each hypothesis """ assert bbsz_idx.numel() == eos_scores.numel() # clone relevant token and attention tensors tokens_clone = tokens.index_select(0, bbsz_idx) tokens_clone = tokens_clone[:, 1:step + 2] # skip the first index, which is EOS assert not tokens_clone.eq(self.eos).any() tokens_clone[:, step] = self.eos attn_clone = attn.index_select(0, bbsz_idx)[:, :, 1:step+2] if attn is not None else None # compute scores per token position pos_scores = scores.index_select(0, bbsz_idx)[:, :step+1] pos_scores[:, step] = eos_scores # convert from cumulative to per-position scores pos_scores[:, 1:] = pos_scores[:, 1:] - pos_scores[:, :-1] # normalize sentence-level scores if self.normalize_scores: combined_noisy_channel_eos_scores /= (step + 1) ** self.len_penalty cum_unfin = [] prev = 0 for f in finished: if f: prev += 1 else: cum_unfin.append(prev) sents_seen = set() for i, (idx, score) in enumerate(zip(bbsz_idx.tolist(), combined_noisy_channel_eos_scores.tolist())): unfin_idx = idx // beam_size sent = unfin_idx + cum_unfin[unfin_idx] sents_seen.add((sent, unfin_idx)) if self.match_source_len and step > src_lengths_no_eos[unfin_idx]: score = -math.inf def get_hypo(): if attn_clone is not None: # remove padding tokens from attn scores hypo_attn = attn_clone[i][nonpad_idxs[sent]] _, alignment = hypo_attn.max(dim=0) else: hypo_attn = None alignment = None return { 'tokens': tokens_clone[i], 'score': score, 'attention': hypo_attn, # src_len x tgt_len 'alignment': alignment, 'positional_scores': pos_scores[i], } if len(finalized[sent]) < beam_size: finalized[sent].append(get_hypo()) newly_finished = [] for sent, unfin_idx in sents_seen: # check termination conditions for this sentence if not finished[sent] and is_finished(sent, step, unfin_idx): finished[sent] = True newly_finished.append(unfin_idx) return newly_finished def noisy_channel_rescoring(lprobs, beam_size, bsz, src_tokens, tokens, k): """Rescore the top k hypothesis from each beam using noisy channel modeling Returns: new_fw_lprobs: the direct model probabilities after pruning the top k new_ch_lm_lprobs: the combined channel and language model probabilities new_lm_lprobs: the language model probabilities after pruning the top k """ with torch.no_grad(): lprobs_size = lprobs.size() if prefix_tokens is not None and step < prefix_tokens.size(1): probs_slice = lprobs.view(bsz, -1, lprobs.size(-1))[:, 0, :] cand_scores = torch.gather( probs_slice, dim=1, index=prefix_tokens[:, step].view(-1, 1).data ).expand(-1, beam_size).contiguous().view(bsz*beam_size, 1) cand_indices = prefix_tokens[:, step].view(-1, 1).expand(bsz, beam_size).data.contiguous().view(bsz*beam_size, 1) # need to calculate and save fw and lm probs for prefix tokens fw_top_k = cand_scores fw_top_k_idx = cand_indices k = 1 else: # take the top k best words for every sentence in batch*beam fw_top_k, fw_top_k_idx = torch.topk(lprobs.view(beam_size*bsz, -1), k=k) eos_idx = torch.nonzero(fw_top_k_idx.view(bsz*beam_size*k, -1) == self.eos)[:, 0] ch_scores = fw_top_k.new_full((beam_size*bsz*k, ), 0) src_size = torch.sum(src_tokens[:, :] != self.src_dict.pad_index, dim=1, keepdim=True, dtype=fw_top_k.dtype) if self.combine_method != "lm_only": temp_src_tokens_full = src_tokens[:, :].repeat(1, k).view(bsz*beam_size*k, -1) not_padding = temp_src_tokens_full[:, 1:] != self.src_dict.pad_index cur_tgt_size = step+2 # add eos to all candidate sentences except those that already end in eos eos_tokens = tokens[:, 0].repeat(1, k).view(-1, 1) eos_tokens[eos_idx] = self.tgt_dict.pad_index if step == 0: channel_input = torch.cat((fw_top_k_idx.view(-1, 1), eos_tokens), 1) else: # move eos from beginning to end of target sentence channel_input = torch.cat((tokens[:, 1:step + 1].repeat(1, k).view(-1, step), fw_top_k_idx.view(-1, 1), eos_tokens), 1) ch_input_lengths = torch.tensor(np.full(channel_input.size(0), cur_tgt_size)) ch_input_lengths[eos_idx] = cur_tgt_size-1 if self.channel_scoring_type == "unnormalized": ch_encoder_output = channel_model.encoder(channel_input, src_lengths=ch_input_lengths) ch_decoder_output, _ = channel_model.decoder(temp_src_tokens_full, encoder_out=ch_encoder_output, features_only=True) del ch_encoder_output ch_intermed_scores = channel_model.decoder.unnormalized_scores_given_target(ch_decoder_output, target_ids=temp_src_tokens_full[:, 1:]) ch_intermed_scores = ch_intermed_scores.float() ch_intermed_scores *= not_padding.float() ch_scores = torch.sum(ch_intermed_scores, dim=1) elif self.channel_scoring_type == "k2_separate": for k_idx in range(k): k_eos_tokens = eos_tokens[k_idx::k, :] if step == 0: k_ch_input = torch.cat((fw_top_k_idx[:, k_idx:k_idx+1], k_eos_tokens), 1) else: # move eos from beginning to end of target sentence k_ch_input = torch.cat((tokens[:, 1:step + 1], fw_top_k_idx[:, k_idx:k_idx+1], k_eos_tokens), 1) k_ch_input_lengths = ch_input_lengths[k_idx::k] k_ch_output = channel_model(k_ch_input, k_ch_input_lengths, src_tokens) k_ch_lprobs = channel_model.get_normalized_probs(k_ch_output, log_probs=True) k_ch_intermed_scores = torch.gather(k_ch_lprobs[:, :-1, :], 2, src_tokens[:, 1:].unsqueeze(2)).squeeze(2) k_ch_intermed_scores *= not_padding.float() ch_scores[k_idx::k] = torch.sum(k_ch_intermed_scores, dim=1) elif self.channel_scoring_type == "src_vocab": ch_encoder_output = channel_model.encoder(channel_input, src_lengths=ch_input_lengths) ch_decoder_output, _ = channel_model.decoder(temp_src_tokens_full, encoder_out=ch_encoder_output, features_only=True) del ch_encoder_output ch_lprobs = normalized_scores_with_batch_vocab( channel_model.decoder, ch_decoder_output, src_tokens, k, bsz, beam_size, self.src_dict.pad_index, top_k=self.top_k_vocab) ch_scores = torch.sum(ch_lprobs, dim=1) elif self.channel_scoring_type == "src_vocab_batched": ch_bsz_size = temp_src_tokens_full.shape[0] ch_lprobs_list = [None] * len(range(0, ch_bsz_size, self.ch_scoring_bsz)) for i, start_idx in enumerate(range(0, ch_bsz_size, self.ch_scoring_bsz)): end_idx = min(start_idx + self.ch_scoring_bsz, ch_bsz_size) temp_src_tokens_full_batch = temp_src_tokens_full[start_idx:end_idx, :] channel_input_batch = channel_input[start_idx:end_idx, :] ch_input_lengths_batch = ch_input_lengths[start_idx:end_idx] ch_encoder_output_batch = channel_model.encoder(channel_input_batch, src_lengths=ch_input_lengths_batch) ch_decoder_output_batch, _ = channel_model.decoder(temp_src_tokens_full_batch, encoder_out=ch_encoder_output_batch, features_only=True) ch_lprobs_list[i] = normalized_scores_with_batch_vocab( channel_model.decoder, ch_decoder_output_batch, src_tokens, k, bsz, beam_size, self.src_dict.pad_index, top_k=self.top_k_vocab, start_idx=start_idx, end_idx=end_idx) ch_lprobs = torch.cat(ch_lprobs_list, dim=0) ch_scores = torch.sum(ch_lprobs, dim=1) else: ch_output = channel_model(channel_input, ch_input_lengths, temp_src_tokens_full) ch_lprobs = channel_model.get_normalized_probs(ch_output, log_probs=True) ch_intermed_scores = torch.gather(ch_lprobs[:, :-1, :], 2, temp_src_tokens_full[:, 1:].unsqueeze(2)).squeeze().view(bsz*beam_size*k, -1) ch_intermed_scores *= not_padding.float() ch_scores = torch.sum(ch_intermed_scores, dim=1) else: cur_tgt_size = 0 ch_scores = ch_scores.view(bsz*beam_size, k) expanded_lm_prefix_scores = lm_prefix_scores.unsqueeze(1).expand(-1, k).flatten() if self.share_tgt_dict: lm_scores = get_lm_scores(lm, tokens[:, :step + 1].view(-1, step+1), lm_incremental_states, fw_top_k_idx.view(-1, 1), torch.tensor(np.full(tokens.size(0), step+1)), k) else: new_lm_input = dict2dict(tokens[:, :step + 1].view(-1, step+1), self.tgt_to_lm) new_cands = dict2dict(fw_top_k_idx.view(-1, 1), self.tgt_to_lm) lm_scores = get_lm_scores(lm, new_lm_input, lm_incremental_states, new_cands, torch.tensor(np.full(tokens.size(0), step+1)), k) lm_scores.add_(expanded_lm_prefix_scores) ch_lm_scores = combine_ch_lm(self.combine_method, ch_scores, lm_scores, src_size, cur_tgt_size) # initialize all as min value new_fw_lprobs = ch_scores.new(lprobs_size).fill_(-1e17).view(bsz*beam_size, -1) new_ch_lm_lprobs = ch_scores.new(lprobs_size).fill_(-1e17).view(bsz*beam_size, -1) new_lm_lprobs = ch_scores.new(lprobs_size).fill_(-1e17).view(bsz*beam_size, -1) new_fw_lprobs[:, self.pad] = -math.inf new_ch_lm_lprobs[:, self.pad] = -math.inf new_lm_lprobs[:, self.pad] = -math.inf new_fw_lprobs.scatter_(1, fw_top_k_idx, fw_top_k) new_ch_lm_lprobs.scatter_(1, fw_top_k_idx, ch_lm_scores) new_lm_lprobs.scatter_(1, fw_top_k_idx, lm_scores.view(-1, k)) return new_fw_lprobs, new_ch_lm_lprobs, new_lm_lprobs def combine_ch_lm(combine_type, ch_scores, lm_scores1, src_size, tgt_size): if self.channel_scoring_type == "unnormalized": ch_scores = self.log_softmax_fn( ch_scores.view(-1, self.beam_size * self.k2) ).view(ch_scores.shape) ch_scores = ch_scores * self.ch_weight lm_scores1 = lm_scores1 * self.lm_weight if combine_type == "lm_only": # log P(T|S) + log P(T) ch_scores = lm_scores1.view(ch_scores.size()) elif combine_type == "noisy_channel": # 1/t log P(T|S) + 1/s log P(S|T) + 1/t log P(T) if self.normalize_lm_scores_by_tgt_len: ch_scores.div_(src_size) lm_scores_norm = lm_scores1.view(ch_scores.size()).div(tgt_size) ch_scores.add_(lm_scores_norm) # 1/t log P(T|S) + 1/s log P(S|T) + 1/s log P(T) else: ch_scores.add_(lm_scores1.view(ch_scores.size())) ch_scores.div_(src_size) return ch_scores if self.channel_models is not None: channel_model = self.channel_models[0] # assume only one channel_model model else: channel_model = None lm = EnsembleModel(self.lm_models) lm_incremental_states = torch.jit.annotate( List[Dict[str, Dict[str, Optional[Tensor]]]], [ torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {}) for i in range(lm.models_size) ], ) reorder_state = None batch_idxs = None for step in range(max_len + 1): # one extra step for EOS marker # reorder decoder internal states based on the prev choice of beams if reorder_state is not None: if batch_idxs is not None: # update beam indices to take into account removed sentences corr = batch_idxs - torch.arange(batch_idxs.numel()).type_as(batch_idxs) reorder_state.view(-1, beam_size).add_(corr.unsqueeze(-1) * beam_size) model.reorder_incremental_state(incremental_states, reorder_state) encoder_outs = model.reorder_encoder_out(encoder_outs, reorder_state) lm.reorder_incremental_state(lm_incremental_states, reorder_state) fw_lprobs, avg_attn_scores = model.forward_decoder( tokens[:, :step + 1], encoder_outs, incremental_states, temperature=self.temperature, ) fw_lprobs[:, self.pad] = -math.inf # never select pad fw_lprobs[:, self.unk] -= self.unk_penalty # apply unk penalty fw_lprobs, ch_lm_lprobs, lm_lprobs = noisy_channel_rescoring(fw_lprobs, beam_size, bsz, src_tokens, tokens, self.k2) # handle min and max length constraints if step >= max_len: fw_lprobs[:, :self.eos] = -math.inf fw_lprobs[:, self.eos + 1:] = -math.inf elif step < self.min_len: fw_lprobs[:, self.eos] = -math.inf # handle prefix tokens (possibly with different lengths) if prefix_tokens is not None and step < prefix_tokens.size(1): prefix_toks = prefix_tokens[:, step].unsqueeze(-1).repeat(1, beam_size).view(-1) prefix_mask = prefix_toks.ne(self.pad) prefix_fw_lprobs = fw_lprobs.gather(-1, prefix_toks.unsqueeze(-1)) fw_lprobs[prefix_mask] = -math.inf fw_lprobs[prefix_mask] = fw_lprobs[prefix_mask].scatter_( -1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_fw_lprobs ) prefix_ch_lm_lprobs = ch_lm_lprobs.gather(-1, prefix_toks.unsqueeze(-1)) ch_lm_lprobs[prefix_mask] = -math.inf ch_lm_lprobs[prefix_mask] = ch_lm_lprobs[prefix_mask].scatter_( -1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_ch_lm_lprobs ) prefix_lm_lprobs = lm_lprobs.gather(-1, prefix_toks.unsqueeze(-1)) lm_lprobs[prefix_mask] = -math.inf lm_lprobs[prefix_mask] = lm_lprobs[prefix_mask].scatter_( -1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_lm_lprobs ) # if prefix includes eos, then we should make sure tokens and # scores are the same across all beams eos_mask = prefix_toks.eq(self.eos) if eos_mask.any(): # validate that the first beam matches the prefix first_beam = tokens[eos_mask].view(-1, beam_size, tokens.size(-1))[:, 0, 1:step + 1] eos_mask_batch_dim = eos_mask.view(-1, beam_size)[:, 0] target_prefix = prefix_tokens[eos_mask_batch_dim][:, :step] assert (first_beam == target_prefix).all() def replicate_first_beam(tensor, mask): tensor = tensor.view(-1, beam_size, tensor.size(-1)) tensor[mask] = tensor[mask][:, :1, :] return tensor.view(-1, tensor.size(-1)) # copy tokens, scores and lprobs from the first beam to all beams tokens = replicate_first_beam(tokens, eos_mask_batch_dim) scores = replicate_first_beam(scores, eos_mask_batch_dim) fw_lprobs = replicate_first_beam(fw_lprobs, eos_mask_batch_dim) ch_lm_lprobs = replicate_first_beam(ch_lm_lprobs, eos_mask_batch_dim) lm_lprobs = replicate_first_beam(lm_lprobs, eos_mask_batch_dim) if self.no_repeat_ngram_size > 0: # for each beam and batch sentence, generate a list of previous ngrams gen_ngrams = [{} for bbsz_idx in range(bsz * beam_size)] for bbsz_idx in range(bsz * beam_size): gen_tokens = tokens[bbsz_idx].tolist() for ngram in zip(*[gen_tokens[i:] for i in range(self.no_repeat_ngram_size)]): gen_ngrams[bbsz_idx][tuple(ngram[:-1])] = \ gen_ngrams[bbsz_idx].get(tuple(ngram[:-1]), []) + [ngram[-1]] # Record attention scores if avg_attn_scores is not None: if attn is None: attn = scores.new(bsz * beam_size, src_tokens.size(1), max_len + 2) attn_buf = attn.clone() nonpad_idxs = src_tokens.ne(self.pad) attn[:, :, step + 1].copy_(avg_attn_scores) scores = scores.type_as(fw_lprobs) scores_buf = scores_buf.type_as(fw_lprobs) self.search.set_src_lengths(src_lengths_no_eos) if self.no_repeat_ngram_size > 0: def calculate_banned_tokens(bbsz_idx): # before decoding the next token, prevent decoding of ngrams that have already appeared ngram_index = tuple(tokens[bbsz_idx, step + 2 - self.no_repeat_ngram_size:step + 1].tolist()) return gen_ngrams[bbsz_idx].get(ngram_index, []) if step + 2 - self.no_repeat_ngram_size >= 0: # no banned tokens if we haven't generated no_repeat_ngram_size tokens yet banned_tokens = [calculate_banned_tokens(bbsz_idx) for bbsz_idx in range(bsz * beam_size)] else: banned_tokens = [[] for bbsz_idx in range(bsz * beam_size)] for bbsz_idx in range(bsz * beam_size): fw_lprobs[bbsz_idx, banned_tokens[bbsz_idx]] = -math.inf combined_noisy_channel_scores, fw_lprobs_top_k, lm_lprobs_top_k, cand_indices, cand_beams = self.search.step( step, fw_lprobs.view(bsz, -1, self.vocab_size), scores.view(bsz, beam_size, -1)[:, :, :step], ch_lm_lprobs.view(bsz, -1, self.vocab_size), lm_lprobs.view(bsz, -1, self.vocab_size), self.combine_method ) # cand_bbsz_idx contains beam indices for the top candidate # hypotheses, with a range of values: [0, bsz*beam_size), # and dimensions: [bsz, cand_size] cand_bbsz_idx = cand_beams.add(bbsz_offsets) # finalize hypotheses that end in eos (except for candidates to be ignored) eos_mask = cand_indices.eq(self.eos) eos_mask[:, :beam_size] &= ~cands_to_ignore # only consider eos when it's among the top beam_size indices eos_bbsz_idx = torch.masked_select( cand_bbsz_idx[:, :beam_size], mask=eos_mask[:, :beam_size] ) finalized_sents = set() if eos_bbsz_idx.numel() > 0: eos_scores = torch.masked_select( fw_lprobs_top_k[:, :beam_size], mask=eos_mask[:, :beam_size] ) combined_noisy_channel_eos_scores = torch.masked_select( combined_noisy_channel_scores[:, :beam_size], mask=eos_mask[:, :beam_size], ) # finalize hypo using channel model score finalized_sents = finalize_hypos( step, eos_bbsz_idx, eos_scores, combined_noisy_channel_eos_scores) num_remaining_sent -= len(finalized_sents) assert num_remaining_sent >= 0 if num_remaining_sent == 0: break if len(finalized_sents) > 0: new_bsz = bsz - len(finalized_sents) # construct batch_idxs which holds indices of batches to keep for the next pass batch_mask = cand_indices.new_ones(bsz) batch_mask[cand_indices.new(finalized_sents)] = 0 batch_idxs = torch.nonzero(batch_mask).squeeze(-1) eos_mask = eos_mask[batch_idxs] cand_beams = cand_beams[batch_idxs] bbsz_offsets.resize_(new_bsz, 1) cand_bbsz_idx = cand_beams.add(bbsz_offsets) lm_lprobs_top_k = lm_lprobs_top_k[batch_idxs] fw_lprobs_top_k = fw_lprobs_top_k[batch_idxs] cand_indices = cand_indices[batch_idxs] if prefix_tokens is not None: prefix_tokens = prefix_tokens[batch_idxs] src_lengths_no_eos = src_lengths_no_eos[batch_idxs] cands_to_ignore = cands_to_ignore[batch_idxs] scores = scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1) scores_buf.resize_as_(scores) tokens = tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1) tokens_buf.resize_as_(tokens) src_tokens = src_tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1) src_lengths = src_lengths.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1) lm_prefix_scores = lm_prefix_scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1).squeeze() if attn is not None: attn = attn.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, attn.size(1), -1) attn_buf.resize_as_(attn) bsz = new_bsz else: batch_idxs = None # Set active_mask so that values > cand_size indicate eos or # ignored hypos and values < cand_size indicate candidate # active hypos. After this, the min values per row are the top # candidate active hypos. eos_mask[:, :beam_size] |= cands_to_ignore active_mask = torch.add( eos_mask.type_as(cand_offsets) * cand_size, cand_offsets[: eos_mask.size(1)], ) # get the top beam_size active hypotheses, which are just the hypos # with the smallest values in active_mask active_hypos, new_cands_to_ignore = buffer('active_hypos'), buffer('new_cands_to_ignore') torch.topk( active_mask, k=beam_size, dim=1, largest=False, out=(new_cands_to_ignore, active_hypos) ) # update cands_to_ignore to ignore any finalized hypos cands_to_ignore = new_cands_to_ignore.ge(cand_size)[:, :beam_size] assert (~cands_to_ignore).any(dim=1).all() active_bbsz_idx = buffer('active_bbsz_idx') torch.gather( cand_bbsz_idx, dim=1, index=active_hypos, out=active_bbsz_idx, ) active_scores = torch.gather( fw_lprobs_top_k, dim=1, index=active_hypos, out=scores[:, step].view(bsz, beam_size), ) active_bbsz_idx = active_bbsz_idx.view(-1) active_scores = active_scores.view(-1) # copy tokens and scores for active hypotheses torch.index_select( tokens[:, :step + 1], dim=0, index=active_bbsz_idx, out=tokens_buf[:, :step + 1], ) torch.gather( cand_indices, dim=1, index=active_hypos, out=tokens_buf.view(bsz, beam_size, -1)[:, :, step + 1], ) if step > 0: torch.index_select( scores[:, :step], dim=0, index=active_bbsz_idx, out=scores_buf[:, :step], ) torch.gather( fw_lprobs_top_k, dim=1, index=active_hypos, out=scores_buf.view(bsz, beam_size, -1)[:, :, step], ) torch.gather( lm_lprobs_top_k, dim=1, index=active_hypos, out=lm_prefix_scores.view(bsz, beam_size) ) # copy attention for active hypotheses if attn is not None: torch.index_select( attn[:, :, :step + 2], dim=0, index=active_bbsz_idx, out=attn_buf[:, :, :step + 2], ) # swap buffers tokens, tokens_buf = tokens_buf, tokens scores, scores_buf = scores_buf, scores if attn is not None: attn, attn_buf = attn_buf, attn # reorder incremental state in decoder reorder_state = active_bbsz_idx # sort by score descending for sent in range(len(finalized)): finalized[sent] = sorted(finalized[sent], key=lambda r: r['score'], reverse=True) return finalized def get_lm_scores(model, input_tokens, incremental_states, cand_tokens, input_len, k): with torch.no_grad(): lm_lprobs, avg_attn_scores = model.forward_decoder( input_tokens, encoder_outs=None, incremental_states=incremental_states, ) lm_lprobs_size = lm_lprobs.size(0) probs_next_wrd = torch.gather(lm_lprobs.repeat(1, k).view(lm_lprobs_size*k, -1), 1, cand_tokens).squeeze().view(-1) return probs_next_wrd def make_dict2dict(old_dict, new_dict): dict2dict_map = {} for sym in old_dict.symbols: dict2dict_map[old_dict.index(sym)] = new_dict.index(sym) return dict2dict_map def dict2dict(tokens, dict2dict_map): if tokens.device == torch.device('cpu'): tokens_tmp = tokens else: tokens_tmp = tokens.cpu() return tokens_tmp.map_( tokens_tmp, lambda _, val, dict2dict_map=dict2dict_map : dict2dict_map[float(val)] ).to(tokens.device) def reorder_tokens(tokens, lengths, eos): # reorder source tokens so they may be used as reference for P(S|T) return torch.cat((tokens.new([eos]), tokens[-lengths:-1], tokens[:-lengths]), 0) def reorder_all_tokens(tokens, lengths, eos): # used to reorder src tokens from [<pad> <w1> <w2> .. <eos>] to [<eos> <w1> <w2>...<pad>] # so source tokens can be used to predict P(S|T) return torch.stack([reorder_tokens(token, length, eos) for token, length in zip(tokens, lengths)]) def normalized_scores_with_batch_vocab( model_decoder, features, target_ids, k, bsz, beam_size, pad_idx, top_k=0, vocab_size_meter=None, start_idx=None, end_idx=None, **kwargs): """ Get normalized probabilities (or log probs) from a net's output w.r.t. vocab consisting of target IDs in the batch """ if model_decoder.adaptive_softmax is None: weight = model_decoder.output_projection.weight vocab_ids = torch.unique( torch.cat( (torch.unique(target_ids), torch.arange(top_k, device=target_ids.device)) ) ) id_map = dict(zip(vocab_ids.tolist(), range(len(vocab_ids)))) mapped_target_ids = target_ids.cpu().apply_( lambda x, id_map=id_map: id_map[x] ).to(target_ids.device) expanded_target_ids = mapped_target_ids[:, :].repeat(1, k).view(bsz*beam_size*k, -1) if start_idx is not None and end_idx is not None: expanded_target_ids = expanded_target_ids[start_idx:end_idx, :] logits = F.linear(features, weight[vocab_ids, :]) log_softmax = F.log_softmax(logits, dim=-1, dtype=torch.float32) intermed_scores = torch.gather( log_softmax[:, :-1, :], 2, expanded_target_ids[:, 1:].unsqueeze(2), ).squeeze() not_padding = expanded_target_ids[:, 1:] != pad_idx intermed_scores *= not_padding.float() return intermed_scores else: raise ValueError("adaptive softmax doesn't work with " + "`normalized_scores_with_batch_vocab()`")
KosmosX-API-main
kosmosX/fairseq/examples/fast_noisy_channel/noisy_channel_sequence_generator.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from fairseq.tasks.translation import TranslationTask from fairseq.tasks.language_modeling import LanguageModelingTask from fairseq import checkpoint_utils import argparse from fairseq.tasks import register_task import torch @register_task("noisy_channel_translation") class NoisyChannelTranslation(TranslationTask): """ Rescore the top k candidates from each beam using noisy channel modeling """ @staticmethod def add_args(parser): """Add task-specific arguments to the parser.""" TranslationTask.add_args(parser) # fmt: off parser.add_argument('--channel-model', metavar='FILE', help='path to P(S|T) model. P(S|T) and P(T|S) must share source and target dictionaries.') parser.add_argument('--combine-method', default='lm_only', choices=['lm_only', 'noisy_channel'], help="""method for combining direct and channel model scores. lm_only: decode with P(T|S)P(T) noisy_channel: decode with 1/t P(T|S) + 1/s(P(S|T)P(T))""") parser.add_argument('--normalize-lm-scores-by-tgt-len', action='store_true', default=False, help='normalize lm score by target length instead of source length') parser.add_argument('--channel-scoring-type', default='log_norm', choices=['unnormalized', 'log_norm', 'k2_separate', 'src_vocab', 'src_vocab_batched'], help="Normalize bw scores with log softmax or return bw scores without log softmax") parser.add_argument('--top-k-vocab', default=0, type=int, help='top k vocab IDs to use with `src_vocab` in channel model scoring') parser.add_argument('--k2', default=50, type=int, help='the top k2 candidates to rescore with the noisy channel model for each beam') parser.add_argument('--ch-wt', default=1, type=float, help='weight for the channel model') parser.add_argument('--lm-model', metavar='FILE', help='path to lm model file, to model P(T). P(T) must share the same vocab as the direct model on the target side') parser.add_argument('--lm-data', metavar='FILE', help='path to lm model training data for target language, used to properly load LM with correct dictionary') parser.add_argument('--lm-wt', default=1, type=float, help='the weight of the lm in joint decoding') # fmt: on def build_generator( self, models, args, seq_gen_cls=None, extra_gen_cls_kwargs=None ): if getattr(args, "score_reference", False): raise NotImplementedError() else: from .noisy_channel_sequence_generator import NoisyChannelSequenceGenerator use_cuda = torch.cuda.is_available() and not self.args.cpu assert self.args.lm_model is not None, '--lm-model required for noisy channel generation!' assert self.args.lm_data is not None, '--lm-data required for noisy channel generation to map between LM and bitext vocabs' if self.args.channel_model is not None: import copy ch_args_task = copy.deepcopy(self.args) tmp = ch_args_task.source_lang ch_args_task.source_lang = ch_args_task.target_lang ch_args_task.target_lang = tmp ch_args_task._name = 'translation' channel_task = TranslationTask.setup_task(ch_args_task) arg_dict = {} arg_dict['task'] = 'language_modeling' arg_dict['sample_break_mode'] = 'eos' arg_dict['data'] = self.args.lm_data arg_dict['output_dictionary_size'] = -1 lm_args = argparse.Namespace(**arg_dict) lm_task = LanguageModelingTask.setup_task(lm_args) lm_dict = lm_task.output_dictionary if self.args.channel_model is not None: channel_models, _ = checkpoint_utils.load_model_ensemble(self.args.channel_model.split(':'), task=channel_task) for model in channel_models: model.make_generation_fast_( beamable_mm_beam_size=None if args.no_beamable_mm else args.beam, need_attn=args.print_alignment, ) if self.args.fp16: model.half() if use_cuda: model.cuda() else: channel_models = None lm_models, _ = checkpoint_utils.load_model_ensemble(self.args.lm_model.split(':'), task=lm_task) for model in lm_models: model.make_generation_fast_( beamable_mm_beam_size=None if args.no_beamable_mm else args.beam, need_attn=args.print_alignment, ) if self.args.fp16: model.half() if use_cuda: model.cuda() return NoisyChannelSequenceGenerator( combine_method=self.args.combine_method, tgt_dict=self.target_dictionary, src_dict=self.source_dictionary, beam_size=getattr(args, 'beam', 5), max_len_a=getattr(args, 'max_len_a', 0), max_len_b=getattr(args, 'max_len_b', 200), min_len=getattr(args, 'min_len', 1), len_penalty=getattr(args, 'lenpen', 1), unk_penalty=getattr(args, 'unkpen', 0), temperature=getattr(args, 'temperature', 1.), match_source_len=getattr(args, 'match_source_len', False), no_repeat_ngram_size=getattr(args, 'no_repeat_ngram_size', 0), normalize_scores=(not getattr(args, 'unnormalized', False)), channel_models=channel_models, k2=getattr(self.args, 'k2', 50), ch_weight=getattr(self.args, 'ch_wt', 1), channel_scoring_type=self.args.channel_scoring_type, top_k_vocab=self.args.top_k_vocab, lm_models=lm_models, lm_dict=lm_dict, lm_weight=getattr(self.args, 'lm_wt', 1), normalize_lm_scores_by_tgt_len=getattr(self.args, 'normalize_lm_scores_by_tgt_len', False), )
KosmosX-API-main
kosmosX/fairseq/examples/fast_noisy_channel/noisy_channel_translation.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import os import os.path as op from collections import namedtuple from multiprocessing import cpu_count from typing import List, Optional import sentencepiece as sp from fairseq.data.encoders.byte_bpe import ByteBPE from fairseq.data.encoders.byte_utils import byte_encode from fairseq.data.encoders.bytes import Bytes from fairseq.data.encoders.characters import Characters from fairseq.data.encoders.moses_tokenizer import MosesTokenizer from fairseq.data.encoders.sentencepiece_bpe import SentencepieceBPE SPLITS = ["train", "valid", "test"] def _convert_xml(in_path: str, out_path: str): with open(in_path) as f, open(out_path, "w") as f_o: for s in f: ss = s.strip() if not ss.startswith("<seg"): continue ss = ss.replace("</seg>", "").split('">') assert len(ss) == 2 f_o.write(ss[1].strip() + "\n") def _convert_train(in_path: str, out_path: str): with open(in_path) as f, open(out_path, "w") as f_o: for s in f: ss = s.strip() if ss.startswith("<"): continue f_o.write(ss.strip() + "\n") def _get_bytes(in_path: str, out_path: str): with open(in_path) as f, open(out_path, "w") as f_o: for s in f: f_o.write(Bytes.encode(s.strip()) + "\n") def _get_chars(in_path: str, out_path: str): with open(in_path) as f, open(out_path, "w") as f_o: for s in f: f_o.write(Characters.encode(s.strip()) + "\n") def pretokenize(in_path: str, out_path: str, src: str, tgt: str): Args = namedtuple( "Args", [ "moses_source_lang", "moses_target_lang", "moses_no_dash_splits", "moses_no_escape", ], ) args = Args( moses_source_lang=src, moses_target_lang=tgt, moses_no_dash_splits=False, moses_no_escape=False, ) pretokenizer = MosesTokenizer(args) with open(in_path) as f, open(out_path, "w") as f_o: for s in f: f_o.write(pretokenizer.encode(s.strip()) + "\n") def _convert_to_bchar(in_path_prefix: str, src: str, tgt: str, out_path: str): with open(out_path, "w") as f_o: for lang in [src, tgt]: with open(f"{in_path_prefix}.{lang}") as f: for s in f: f_o.write(byte_encode(s.strip()) + "\n") def _get_bpe(in_path: str, model_prefix: str, vocab_size: int): arguments = [ f"--input={in_path}", f"--model_prefix={model_prefix}", "--model_type=bpe", f"--vocab_size={vocab_size}", "--character_coverage=1.0", "--normalization_rule_name=identity", f"--num_threads={cpu_count()}", ] sp.SentencePieceTrainer.Train(" ".join(arguments)) def _apply_bbpe(model_path: str, in_path: str, out_path: str): Args = namedtuple("Args", ["sentencepiece_model_path"]) args = Args(sentencepiece_model_path=model_path) tokenizer = ByteBPE(args) with open(in_path) as f, open(out_path, "w") as f_o: for s in f: f_o.write(tokenizer.encode(s.strip()) + "\n") def _apply_bpe(model_path: str, in_path: str, out_path: str): Args = namedtuple("Args", ["sentencepiece_model"]) args = Args(sentencepiece_model=model_path) tokenizer = SentencepieceBPE(args) with open(in_path) as f, open(out_path, "w") as f_o: for s in f: f_o.write(tokenizer.encode(s.strip()) + "\n") def _concat_files(in_paths: List[str], out_path: str): with open(out_path, "w") as f_o: for p in in_paths: with open(p) as f: for r in f: f_o.write(r) def preprocess_iwslt17( root: str, src: str, tgt: str, bpe_size: Optional[int], need_chars: bool, bbpe_size: Optional[int], need_bytes: bool, ): # extract bitext in_root = op.join(root, f"{src}-{tgt}") for lang in [src, tgt]: _convert_train( op.join(in_root, f"train.tags.{src}-{tgt}.{lang}"), op.join(root, f"train.{lang}"), ) _convert_xml( op.join(in_root, f"IWSLT17.TED.dev2010.{src}-{tgt}.{lang}.xml"), op.join(root, f"valid.{lang}"), ) _convert_xml( op.join(in_root, f"IWSLT17.TED.tst2015.{src}-{tgt}.{lang}.xml"), op.join(root, f"test.{lang}"), ) # pre-tokenize for lang in [src, tgt]: for split in SPLITS: pretokenize( op.join(root, f"{split}.{lang}"), op.join(root, f"{split}.moses.{lang}"), src, tgt, ) # tokenize with BPE vocabulary if bpe_size is not None: # learn vocabulary concated_train_path = op.join(root, "train.all") _concat_files( [op.join(root, "train.moses.fr"), op.join(root, "train.moses.en")], concated_train_path, ) bpe_model_prefix = op.join(root, f"spm_bpe{bpe_size}") _get_bpe(concated_train_path, bpe_model_prefix, bpe_size) os.remove(concated_train_path) # apply for lang in [src, tgt]: for split in SPLITS: _apply_bpe( bpe_model_prefix + ".model", op.join(root, f"{split}.moses.{lang}"), op.join(root, f"{split}.moses.bpe{bpe_size}.{lang}"), ) # tokenize with bytes vocabulary if need_bytes: for lang in [src, tgt]: for split in SPLITS: _get_bytes( op.join(root, f"{split}.moses.{lang}"), op.join(root, f"{split}.moses.bytes.{lang}"), ) # tokenize with characters vocabulary if need_chars: for lang in [src, tgt]: for split in SPLITS: _get_chars( op.join(root, f"{split}.moses.{lang}"), op.join(root, f"{split}.moses.chars.{lang}"), ) # tokenize with byte-level BPE vocabulary if bbpe_size is not None: # learn vocabulary bchar_path = op.join(root, "train.bchar") _convert_to_bchar(op.join(root, "train.moses"), src, tgt, bchar_path) bbpe_model_prefix = op.join(root, f"spm_bbpe{bbpe_size}") _get_bpe(bchar_path, bbpe_model_prefix, bbpe_size) os.remove(bchar_path) # apply for lang in [src, tgt]: for split in SPLITS: _apply_bbpe( bbpe_model_prefix + ".model", op.join(root, f"{split}.moses.{lang}"), op.join(root, f"{split}.moses.bbpe{bbpe_size}.{lang}"), ) def main(): parser = argparse.ArgumentParser() parser.add_argument("--root", type=str, default="data") parser.add_argument( "--bpe-vocab", default=None, type=int, help="Generate tokenized bitext with BPE of size K." "Default to None (disabled).", ) parser.add_argument( "--bbpe-vocab", default=None, type=int, help="Generate tokenized bitext with BBPE of size K." "Default to None (disabled).", ) parser.add_argument( "--byte-vocab", action="store_true", help="Generate tokenized bitext with bytes vocabulary", ) parser.add_argument( "--char-vocab", action="store_true", help="Generate tokenized bitext with chars vocabulary", ) args = parser.parse_args() preprocess_iwslt17( args.root, "fr", "en", args.bpe_vocab, args.char_vocab, args.bbpe_vocab, args.byte_vocab, ) if __name__ == "__main__": main()
KosmosX-API-main
kosmosX/fairseq/examples/byte_level_bpe/get_bitext.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch.nn as nn from fairseq.models import register_model, register_model_architecture from fairseq.models.transformer import TransformerEncoder, TransformerModel @register_model("gru_transformer") class GRUTransformerModel(TransformerModel): @classmethod def build_encoder(cls, args, src_dict, embed_tokens): return GRUTransformerEncoder(args, src_dict, embed_tokens) class GRUTransformerEncoder(TransformerEncoder): def __init__(self, args, dictionary, embed_tokens): super().__init__(args, dictionary, embed_tokens) self.emb_ctx = nn.GRU( input_size=embed_tokens.embedding_dim, hidden_size=embed_tokens.embedding_dim // 2, num_layers=1, bidirectional=True, ) def forward_embedding(self, src_tokens): # embed tokens and positions x = embed = self.embed_scale * self.embed_tokens(src_tokens) if self.embed_positions is not None: x = embed + self.embed_positions(src_tokens) # contextualize embeddings x = x.transpose(0, 1) x = self.dropout_module(x) x, _ = self.emb_ctx.forward(x) x = x.transpose(0, 1) if self.layernorm_embedding is not None: x = self.layernorm_embedding(x) x = self.dropout_module(x) return x, embed @register_model_architecture("gru_transformer", "gru_transformer") def gru_transformer_base_architecture(args): args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.activation_fn = getattr(args, "activation_fn", "relu") args.dropout = getattr(args, "dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.share_all_embeddings = getattr(args, "share_all_embeddings", False) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.no_cross_attention = getattr(args, "no_cross_attention", False) args.cross_self_attention = getattr(args, "cross_self_attention", False) args.layer_wise_attention = getattr(args, "layer_wise_attention", False) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) args.no_scale_embedding = getattr(args, "no_scale_embedding", False) args.layernorm_embedding = getattr(args, "layernorm_embedding", False) @register_model_architecture("gru_transformer", "gru_transformer_big") def gru_transformer_big(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16) args.dropout = getattr(args, "dropout", 0.3) gru_transformer_base_architecture(args)
KosmosX-API-main
kosmosX/fairseq/examples/byte_level_bpe/gru_transformer.py
#!/usr/bin/env python """Helper script to compare two argparse.Namespace objects.""" from argparse import Namespace # noqa def main(): ns1 = eval(input("Namespace 1: ")) ns2 = eval(input("Namespace 2: ")) def keys(ns): ks = set() for k in dir(ns): if not k.startswith("_"): ks.add(k) return ks k1 = keys(ns1) k2 = keys(ns2) def print_keys(ks, ns1, ns2=None): for k in ks: if ns2 is None: print("{}\t{}".format(k, getattr(ns1, k, None))) else: print( "{}\t{}\t{}".format(k, getattr(ns1, k, None), getattr(ns2, k, None)) ) print("Keys unique to namespace 1:") print_keys(k1 - k2, ns1) print() print("Keys unique to namespace 2:") print_keys(k2 - k1, ns2) print() print("Overlapping keys with different values:") ks = [k for k in k1 & k2 if getattr(ns1, k, "None") != getattr(ns2, k, "None")] print_keys(ks, ns1, ns2) print() if __name__ == "__main__": main()
KosmosX-API-main
kosmosX/fairseq/scripts/compare_namespaces.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Split a large file into a train and valid set while respecting document boundaries. Documents should be separated by a single empty line. """ import argparse import random import sys def main(): parser = argparse.ArgumentParser() parser.add_argument("input") parser.add_argument("sample_output", help="train output file") parser.add_argument("remainder_output", help="valid output file") parser.add_argument("-k", type=int, help="remainder size") parser.add_argument( "--lines", action="store_true", help="split lines instead of docs" ) args = parser.parse_args() assert args.k is not None sample = [] remainder = [] num_docs = [0] def update_sample(doc): if len(sample) < args.k: sample.append(doc.copy()) else: i = num_docs[0] j = random.randrange(i + 1) if j < args.k: remainder.append(sample[j]) sample[j] = doc.copy() else: remainder.append(doc.copy()) num_docs[0] += 1 doc.clear() with open(args.input, "r", encoding="utf-8") as h: doc = [] for i, line in enumerate(h): if line.strip() == "": # empty line indicates new document update_sample(doc) else: doc.append(line) if args.lines: update_sample(doc) if i % 1000000 == 0: print(i, file=sys.stderr, end="", flush=True) elif i % 100000 == 0: print(".", file=sys.stderr, end="", flush=True) if len(doc) > 0: update_sample(doc) print(file=sys.stderr, flush=True) assert len(sample) == args.k with open(args.sample_output, "w", encoding="utf-8") as out: first = True for doc in sample: if not first and not args.lines: out.write("\n") first = False for line in doc: out.write(line) with open(args.remainder_output, "w", encoding="utf-8") as out: first = True for doc in remainder: if not first and not args.lines: out.write("\n") first = False for line in doc: out.write(line) if __name__ == "__main__": main()
KosmosX-API-main
kosmosX/fairseq/scripts/split_train_valid_docs.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Use this script in order to build symmetric alignments for your translation dataset. This script depends on fast_align and mosesdecoder tools. You will need to build those before running the script. fast_align: github: http://github.com/clab/fast_align instructions: follow the instructions in README.md mosesdecoder: github: http://github.com/moses-smt/mosesdecoder instructions: http://www.statmt.org/moses/?n=Development.GetStarted The script produces the following files under --output_dir: text.joined - concatenation of lines from the source_file and the target_file. align.forward - forward pass of fast_align. align.backward - backward pass of fast_align. aligned.sym_heuristic - symmetrized alignment. """ import argparse import os from itertools import zip_longest def main(): parser = argparse.ArgumentParser(description="symmetric alignment builer") # fmt: off parser.add_argument('--fast_align_dir', help='path to fast_align build directory') parser.add_argument('--mosesdecoder_dir', help='path to mosesdecoder root directory') parser.add_argument('--sym_heuristic', help='heuristic to use for symmetrization', default='grow-diag-final-and') parser.add_argument('--source_file', help='path to a file with sentences ' 'in the source language') parser.add_argument('--target_file', help='path to a file with sentences ' 'in the target language') parser.add_argument('--output_dir', help='output directory') # fmt: on args = parser.parse_args() fast_align_bin = os.path.join(args.fast_align_dir, "fast_align") symal_bin = os.path.join(args.mosesdecoder_dir, "bin", "symal") sym_fast_align_bin = os.path.join( args.mosesdecoder_dir, "scripts", "ems", "support", "symmetrize-fast-align.perl" ) # create joined file joined_file = os.path.join(args.output_dir, "text.joined") with open(args.source_file, "r", encoding="utf-8") as src, open( args.target_file, "r", encoding="utf-8" ) as tgt: with open(joined_file, "w", encoding="utf-8") as joined: for s, t in zip_longest(src, tgt): print("{} ||| {}".format(s.strip(), t.strip()), file=joined) bwd_align_file = os.path.join(args.output_dir, "align.backward") # run forward alignment fwd_align_file = os.path.join(args.output_dir, "align.forward") fwd_fast_align_cmd = "{FASTALIGN} -i {JOINED} -d -o -v > {FWD}".format( FASTALIGN=fast_align_bin, JOINED=joined_file, FWD=fwd_align_file ) assert os.system(fwd_fast_align_cmd) == 0 # run backward alignment bwd_align_file = os.path.join(args.output_dir, "align.backward") bwd_fast_align_cmd = "{FASTALIGN} -i {JOINED} -d -o -v -r > {BWD}".format( FASTALIGN=fast_align_bin, JOINED=joined_file, BWD=bwd_align_file ) assert os.system(bwd_fast_align_cmd) == 0 # run symmetrization sym_out_file = os.path.join(args.output_dir, "aligned") sym_cmd = "{SYMFASTALIGN} {FWD} {BWD} {SRC} {TGT} {OUT} {HEURISTIC} {SYMAL}".format( SYMFASTALIGN=sym_fast_align_bin, FWD=fwd_align_file, BWD=bwd_align_file, SRC=args.source_file, TGT=args.target_file, OUT=sym_out_file, HEURISTIC=args.sym_heuristic, SYMAL=symal_bin, ) assert os.system(sym_cmd) == 0 if __name__ == "__main__": main()
KosmosX-API-main
kosmosX/fairseq/scripts/build_sym_alignment.py
#!/usr/bin/env python # Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. from __future__ import absolute_import, division, print_function, unicode_literals import argparse import sentencepiece as spm def main(): parser = argparse.ArgumentParser() parser.add_argument( "--model", required=True, help="sentencepiece model to use for decoding" ) parser.add_argument("--input", required=True, help="input file to decode") parser.add_argument("--input_format", choices=["piece", "id"], default="piece") args = parser.parse_args() sp = spm.SentencePieceProcessor() sp.Load(args.model) if args.input_format == "piece": def decode(input): return "".join(sp.DecodePieces(input)) elif args.input_format == "id": def decode(input): return "".join(sp.DecodeIds(input)) else: raise NotImplementedError def tok2int(tok): # remap reference-side <unk> (represented as <<unk>>) to 0 return int(tok) if tok != "<<unk>>" else 0 with open(args.input, "r", encoding="utf-8") as h: for line in h: if args.input_format == "id": print(decode(list(map(tok2int, line.rstrip().split())))) elif args.input_format == "piece": print(decode(line.rstrip().split())) if __name__ == "__main__": main()
KosmosX-API-main
kosmosX/fairseq/scripts/spm_decode.py
KosmosX-API-main
kosmosX/fairseq/scripts/__init__.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import os import re import shutil import sys pt_regexp = re.compile(r"checkpoint(\d+|_\d+_\d+|_[a-z]+)\.pt") pt_regexp_epoch_based = re.compile(r"checkpoint(\d+)\.pt") pt_regexp_update_based = re.compile(r"checkpoint_\d+_(\d+)\.pt") def parse_checkpoints(files): entries = [] for f in files: m = pt_regexp_epoch_based.fullmatch(f) if m is not None: entries.append((int(m.group(1)), m.group(0))) else: m = pt_regexp_update_based.fullmatch(f) if m is not None: entries.append((int(m.group(1)), m.group(0))) return entries def last_n_checkpoints(files, n): entries = parse_checkpoints(files) return [x[1] for x in sorted(entries, reverse=True)[:n]] def every_n_checkpoints(files, n): entries = parse_checkpoints(files) return [x[1] for x in sorted(sorted(entries)[::-n])] def main(): parser = argparse.ArgumentParser( description=( "Recursively delete checkpoint files from `root_dir`, " "but preserve checkpoint_best.pt and checkpoint_last.pt" ) ) parser.add_argument("root_dirs", nargs="*") parser.add_argument( "--save-last", type=int, default=0, help="number of last checkpoints to save" ) parser.add_argument( "--save-every", type=int, default=0, help="interval of checkpoints to save" ) parser.add_argument( "--preserve-test", action="store_true", help="preserve checkpoints in dirs that start with test_ prefix (default: delete them)", ) parser.add_argument( "--delete-best", action="store_true", help="delete checkpoint_best.pt" ) parser.add_argument( "--delete-last", action="store_true", help="delete checkpoint_last.pt" ) parser.add_argument( "--no-dereference", action="store_true", help="don't dereference symlinks" ) args = parser.parse_args() files_to_desymlink = [] files_to_preserve = [] files_to_delete = [] for root_dir in args.root_dirs: for root, _subdirs, files in os.walk(root_dir): if args.save_last > 0: to_save = last_n_checkpoints(files, args.save_last) else: to_save = [] if args.save_every > 0: to_save += every_n_checkpoints(files, args.save_every) for file in files: if not pt_regexp.fullmatch(file): continue full_path = os.path.join(root, file) if ( not os.path.basename(root).startswith("test_") or args.preserve_test ) and ( (file == "checkpoint_last.pt" and not args.delete_last) or (file == "checkpoint_best.pt" and not args.delete_best) or file in to_save ): if os.path.islink(full_path) and not args.no_dereference: files_to_desymlink.append(full_path) else: files_to_preserve.append(full_path) else: files_to_delete.append(full_path) if len(files_to_desymlink) == 0 and len(files_to_delete) == 0: print("Nothing to do.") sys.exit(0) files_to_desymlink = sorted(files_to_desymlink) files_to_preserve = sorted(files_to_preserve) files_to_delete = sorted(files_to_delete) print("Operations to perform (in order):") if len(files_to_desymlink) > 0: for file in files_to_desymlink: print(" - preserve (and dereference symlink): " + file) if len(files_to_preserve) > 0: for file in files_to_preserve: print(" - preserve: " + file) if len(files_to_delete) > 0: for file in files_to_delete: print(" - delete: " + file) while True: resp = input("Continue? (Y/N): ") if resp.strip().lower() == "y": break elif resp.strip().lower() == "n": sys.exit(0) print("Executing...") if len(files_to_desymlink) > 0: for file in files_to_desymlink: realpath = os.path.realpath(file) print("rm " + file) os.remove(file) print("cp {} {}".format(realpath, file)) shutil.copyfile(realpath, file) if len(files_to_delete) > 0: for file in files_to_delete: print("rm " + file) os.remove(file) if __name__ == "__main__": main()
KosmosX-API-main
kosmosX/fairseq/scripts/rm_pt.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Count the number of documents and average number of lines and tokens per document in a large file. Documents should be separated by a single empty line. """ import argparse import gzip import sys import numpy as np def main(): parser = argparse.ArgumentParser() parser.add_argument("input") parser.add_argument("--gzip", action="store_true") args = parser.parse_args() def gopen(): if args.gzip: return gzip.open(args.input, "r") else: return open(args.input, "r", encoding="utf-8") num_lines = [] num_toks = [] with gopen() as h: num_docs = 1 num_lines_in_doc = 0 num_toks_in_doc = 0 for i, line in enumerate(h): if len(line.strip()) == 0: # empty line indicates new document num_docs += 1 num_lines.append(num_lines_in_doc) num_toks.append(num_toks_in_doc) num_lines_in_doc = 0 num_toks_in_doc = 0 else: num_lines_in_doc += 1 num_toks_in_doc += len(line.rstrip().split()) if i % 1000000 == 0: print(i, file=sys.stderr, end="", flush=True) elif i % 100000 == 0: print(".", file=sys.stderr, end="", flush=True) print(file=sys.stderr, flush=True) print("found {} docs".format(num_docs)) print("average num lines per doc: {}".format(np.mean(num_lines))) print("average num toks per doc: {}".format(np.mean(num_toks))) if __name__ == "__main__": main()
KosmosX-API-main
kosmosX/fairseq/scripts/count_docs.py
#!/usr/bin/env python # Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. from __future__ import absolute_import, division, print_function, unicode_literals import argparse import contextlib import sys import sentencepiece as spm def main(): parser = argparse.ArgumentParser() parser.add_argument( "--model", required=True, help="sentencepiece model to use for encoding" ) parser.add_argument( "--inputs", nargs="+", default=["-"], help="input files to filter/encode" ) parser.add_argument( "--outputs", nargs="+", default=["-"], help="path to save encoded outputs" ) parser.add_argument("--output_format", choices=["piece", "id"], default="piece") parser.add_argument( "--min-len", type=int, metavar="N", help="filter sentence pairs with fewer than N tokens", ) parser.add_argument( "--max-len", type=int, metavar="N", help="filter sentence pairs with more than N tokens", ) args = parser.parse_args() assert len(args.inputs) == len( args.outputs ), "number of input and output paths should match" sp = spm.SentencePieceProcessor() sp.Load(args.model) if args.output_format == "piece": def encode(input): return sp.EncodeAsPieces(input) elif args.output_format == "id": def encode(input): return list(map(str, sp.EncodeAsIds(input))) else: raise NotImplementedError if args.min_len is not None or args.max_len is not None: def valid(line): return (args.min_len is None or len(line) >= args.min_len) and ( args.max_len is None or len(line) <= args.max_len ) else: def valid(lines): return True with contextlib.ExitStack() as stack: inputs = [ stack.enter_context(open(input, "r", encoding="utf-8")) if input != "-" else sys.stdin for input in args.inputs ] outputs = [ stack.enter_context(open(output, "w", encoding="utf-8")) if output != "-" else sys.stdout for output in args.outputs ] stats = { "num_empty": 0, "num_filtered": 0, } def encode_line(line): line = line.strip() if len(line) > 0: line = encode(line) if valid(line): return line else: stats["num_filtered"] += 1 else: stats["num_empty"] += 1 return None for i, lines in enumerate(zip(*inputs), start=1): enc_lines = list(map(encode_line, lines)) if not any(enc_line is None for enc_line in enc_lines): for enc_line, output_h in zip(enc_lines, outputs): print(" ".join(enc_line), file=output_h) if i % 10000 == 0: print("processed {} lines".format(i), file=sys.stderr) print("skipped {} empty lines".format(stats["num_empty"]), file=sys.stderr) print("filtered {} lines".format(stats["num_filtered"]), file=sys.stderr) if __name__ == "__main__": main()
KosmosX-API-main
kosmosX/fairseq/scripts/spm_encode.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Split a large file into shards while respecting document boundaries. Documents should be separated by a single empty line. """ import argparse import contextlib def main(): parser = argparse.ArgumentParser() parser.add_argument("input") parser.add_argument("--num-shards", type=int) args = parser.parse_args() assert args.num_shards is not None and args.num_shards > 1 with open(args.input, "r", encoding="utf-8") as h: with contextlib.ExitStack() as stack: outputs = [ stack.enter_context( open(args.input + ".shard" + str(i), "w", encoding="utf-8") ) for i in range(args.num_shards) ] doc = [] first_doc = [True] * args.num_shards def output_doc(i): if not first_doc[i]: outputs[i].write("\n") first_doc[i] = False for line in doc: outputs[i].write(line) doc.clear() num_docs = 0 for line in h: if line.strip() == "": # empty line indicates new document output_doc(num_docs % args.num_shards) num_docs += 1 else: doc.append(line) output_doc(num_docs % args.num_shards) if __name__ == "__main__": main()
KosmosX-API-main
kosmosX/fairseq/scripts/shard_docs.py
#!/usr/bin/env python # Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. from __future__ import absolute_import, division, print_function, unicode_literals import sys import sentencepiece as spm if __name__ == "__main__": spm.SentencePieceTrainer.Train(" ".join(sys.argv[1:]))
KosmosX-API-main
kosmosX/fairseq/scripts/spm_train.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import collections import os import re import torch from fairseq.file_io import PathManager def average_checkpoints(inputs): """Loads checkpoints from inputs and returns a model with averaged weights. Args: inputs: An iterable of string paths of checkpoints to load from. Returns: A dict of string keys mapping to various values. The 'model' key from the returned dict should correspond to an OrderedDict mapping string parameter names to torch Tensors. """ params_dict = collections.OrderedDict() params_keys = None new_state = None num_models = len(inputs) for fpath in inputs: with PathManager.open(fpath, "rb") as f: state = torch.load( f, map_location=( lambda s, _: torch.serialization.default_restore_location(s, "cpu") ), ) # Copies over the settings from the first checkpoint if new_state is None: new_state = state model_params = state["model"] model_params_keys = list(model_params.keys()) if params_keys is None: params_keys = model_params_keys elif params_keys != model_params_keys: raise KeyError( "For checkpoint {}, expected list of params: {}, " "but found: {}".format(f, params_keys, model_params_keys) ) for k in params_keys: p = model_params[k] if isinstance(p, torch.HalfTensor): p = p.float() if k not in params_dict: params_dict[k] = p.clone() # NOTE: clone() is needed in case of p is a shared parameter else: params_dict[k] += p averaged_params = collections.OrderedDict() for k, v in params_dict.items(): averaged_params[k] = v if averaged_params[k].is_floating_point(): averaged_params[k].div_(num_models) else: averaged_params[k] //= num_models new_state["model"] = averaged_params return new_state def last_n_checkpoints(paths, n, update_based, upper_bound=None): assert len(paths) == 1 path = paths[0] if update_based: pt_regexp = re.compile(r"checkpoint_\d+_(\d+)\.pt") else: pt_regexp = re.compile(r"checkpoint(\d+)\.pt") files = PathManager.ls(path) entries = [] for f in files: m = pt_regexp.fullmatch(f) if m is not None: sort_key = int(m.group(1)) if upper_bound is None or sort_key <= upper_bound: entries.append((sort_key, m.group(0))) if len(entries) < n: raise Exception( "Found {} checkpoint files but need at least {}", len(entries), n ) return [os.path.join(path, x[1]) for x in sorted(entries, reverse=True)[:n]] def main(): parser = argparse.ArgumentParser( description="Tool to average the params of input checkpoints to " "produce a new checkpoint", ) # fmt: off parser.add_argument('--inputs', required=True, nargs='+', help='Input checkpoint file paths.') parser.add_argument('--output', required=True, metavar='FILE', help='Write the new checkpoint containing the averaged weights to this path.') num_group = parser.add_mutually_exclusive_group() num_group.add_argument('--num-epoch-checkpoints', type=int, help='if set, will try to find checkpoints with names checkpoint_xx.pt in the ' 'path specified by input, and average last this many of them.') num_group.add_argument('--num-update-checkpoints', type=int, help='if set, will try to find checkpoints with names checkpoint_ee_xx.pt in the path specified by' ' input, and average last this many of them.') parser.add_argument('--checkpoint-upper-bound', type=int, help='when using --num-epoch-checkpoints, this will set an upper bound on which epoch to use, ' 'when using --num-update-checkpoints, this will set an upper bound on which update to use' 'e.g., with --num-epoch-checkpoints=10 --checkpoint-upper-bound=50, checkpoints 41-50 would be' ' averaged.' 'e.g., with --num-update-checkpoints=10 --checkpoint-upper-bound=50000, checkpoints 40500-50000 would' ' be averaged assuming --save-interval-updates 500' ) # fmt: on args = parser.parse_args() print(args) num = None is_update_based = False if args.num_update_checkpoints is not None: num = args.num_update_checkpoints is_update_based = True elif args.num_epoch_checkpoints is not None: num = args.num_epoch_checkpoints assert args.checkpoint_upper_bound is None or ( args.num_epoch_checkpoints is not None or args.num_update_checkpoints is not None ), "--checkpoint-upper-bound requires --num-epoch-checkpoints or --num-update-checkpoints" assert ( args.num_epoch_checkpoints is None or args.num_update_checkpoints is None ), "Cannot combine --num-epoch-checkpoints and --num-update-checkpoints" if num is not None: args.inputs = last_n_checkpoints( args.inputs, num, is_update_based, upper_bound=args.checkpoint_upper_bound, ) print("averaging checkpoints: ", args.inputs) new_state = average_checkpoints(args.inputs) with PathManager.open(args.output, "wb") as f: torch.save(new_state, f) print("Finished writing averaged checkpoint to {}".format(args.output)) if __name__ == "__main__": main()
KosmosX-API-main
kosmosX/fairseq/scripts/average_checkpoints.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse from fairseq.data import Dictionary, data_utils, indexed_dataset def get_parser(): parser = argparse.ArgumentParser( description="writes text from binarized file to stdout" ) # fmt: off parser.add_argument('--dataset-impl', help='dataset implementation', choices=indexed_dataset.get_available_dataset_impl()) parser.add_argument('--dict', metavar='FP', help='dictionary containing known words', default=None) parser.add_argument('--input', metavar='FP', required=True, help='binarized file to read') # fmt: on return parser def main(): parser = get_parser() args = parser.parse_args() dictionary = Dictionary.load(args.dict) if args.dict is not None else None dataset = data_utils.load_indexed_dataset( args.input, dictionary, dataset_impl=args.dataset_impl, default="lazy", ) for tensor_line in dataset: if dictionary is None: line = " ".join([str(int(x)) for x in tensor_line]) else: line = dictionary.string(tensor_line) print(line) if __name__ == "__main__": main()
KosmosX-API-main
kosmosX/fairseq/scripts/read_binarized.py
#!/usr/bin/env python3 # # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import sys """Reads in a fairseq output file, and verifies that the constraints (C- lines) are present in the output (the first H- line). Assumes that constraints are listed prior to the first hypothesis. """ constraints = [] found = 0 total = 0 for line in sys.stdin: if line.startswith("C-"): constraints.append(line.rstrip().split("\t")[1]) elif line.startswith("H-"): text = line.split("\t")[2] for constraint in constraints: total += 1 if constraint in text: found += 1 else: print(f"No {constraint} in {text}", file=sys.stderr) constraints = [] print(f"Found {found} / {total} = {100 * found / total:.1f}%")
KosmosX-API-main
kosmosX/fairseq/scripts/constraints/validate.py
#!/usr/bin/env python3 # # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Extracts random constraints from reference files.""" import argparse import random import sys def get_phrase(words, index, length): assert index < len(words) - length + 1 phr = " ".join(words[index : index + length]) for i in range(index, index + length): words.pop(index) return phr def main(args): if args.seed: random.seed(args.seed) for line in sys.stdin: constraints = [] def add_constraint(constraint): constraints.append(constraint) source = line.rstrip() if "\t" in line: source, target = line.split("\t") if args.add_sos: target = f"<s> {target}" if args.add_eos: target = f"{target} </s>" if len(target.split()) >= args.len: words = [target] num = args.number choices = {} for i in range(num): if len(words) == 0: break segmentno = random.choice(range(len(words))) segment = words.pop(segmentno) tokens = segment.split() phrase_index = random.choice(range(len(tokens))) choice = " ".join( tokens[phrase_index : min(len(tokens), phrase_index + args.len)] ) for j in range( phrase_index, min(len(tokens), phrase_index + args.len) ): tokens.pop(phrase_index) if phrase_index > 0: words.append(" ".join(tokens[0:phrase_index])) if phrase_index + 1 < len(tokens): words.append(" ".join(tokens[phrase_index:])) choices[target.find(choice)] = choice # mask out with spaces target = target.replace(choice, " " * len(choice), 1) for key in sorted(choices.keys()): add_constraint(choices[key]) print(source, *constraints, sep="\t") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--number", "-n", type=int, default=1, help="number of phrases") parser.add_argument("--len", "-l", type=int, default=1, help="phrase length") parser.add_argument( "--add-sos", default=False, action="store_true", help="add <s> token" ) parser.add_argument( "--add-eos", default=False, action="store_true", help="add </s> token" ) parser.add_argument("--seed", "-s", default=0, type=int) args = parser.parse_args() main(args)
KosmosX-API-main
kosmosX/fairseq/scripts/constraints/extract.py
""" Setup """ from setuptools import setup, find_packages from codecs import open from os import path here = path.abspath(path.dirname(__file__)) # Get the long description from the README file with open(path.join(here, 'README.md'), encoding='utf-8') as f: long_description = f.read() exec(open('src/open_clip/version.py').read()) setup( name='open_clip_torch', version=__version__, description='OpenCLIP', long_description=long_description, long_description_content_type='text/markdown', url='https://github.com/mlfoundations/open_clip', author='', author_email='', classifiers=[ # How mature is this project? Common values are # 3 - Alpha # 4 - Beta # 5 - Production/Stable 'Development Status :: 3 - Alpha', 'Intended Audience :: Education', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', 'Topic :: Scientific/Engineering', 'Topic :: Scientific/Engineering :: Artificial Intelligence', 'Topic :: Software Development', 'Topic :: Software Development :: Libraries', 'Topic :: Software Development :: Libraries :: Python Modules', ], # Note that this is a string of words separated by whitespace, not a list. keywords='CLIP pretrained', package_dir={'': 'src'}, packages=find_packages(where='src', exclude=['training']), include_package_data=True, install_requires=[ 'torch >= 1.9', 'torchvision', 'ftfy', 'regex', 'tqdm', ], python_requires='>=3.7', )
KosmosX-API-main
kosmosX/open_clip/setup.py
import argparse def get_default_params(model_name): # Params from paper (https://arxiv.org/pdf/2103.00020.pdf) model_name = model_name.lower() if "vit" in model_name: return {"lr": 5.0e-4, "beta1": 0.9, "beta2": 0.98, "eps": 1.0e-6} else: return {"lr": 5.0e-4, "beta1": 0.9, "beta2": 0.999, "eps": 1.0e-8} def parse_args(): parser = argparse.ArgumentParser() parser.add_argument( "--train-data", type=str, default=None, help="Path to csv filewith training data", ) parser.add_argument( "--val-data", type=str, default=None, help="Path to csv file with validation data", ) parser.add_argument( "--train-num-samples", type=int, default=None, help="Number of samples in dataset. Required for webdataset if not available in info file.", ) parser.add_argument( "--val-num-samples", type=int, default=None, help="Number of samples in dataset. Useful for webdataset if not available in info file.", ) parser.add_argument( "--dataset-type", choices=["webdataset", "csv", "auto"], default="auto", help="Which type of dataset to process." ) parser.add_argument( "--dataset-resampled", default=False, action="store_true", help="Whether to use sampling with replacement for webdataset shard selection." ) parser.add_argument( "--csv-separator", type=str, default="\t", help="For csv-like datasets, which separator to use." ) parser.add_argument( "--csv-img-key", type=str, default="filepath", help="For csv-like datasets, the name of the key for the image paths." ) parser.add_argument( "--csv-caption-key", type=str, default="title", help="For csv-like datasets, the name of the key for the captions." ) parser.add_argument( "--imagenet-val", type=str, default=None, help="Path to imagenet val set for conducting zero shot evaluation.", ) parser.add_argument( "--imagenet-v2", type=str, default=None, help="Path to imagenet v2 for conducting zero shot evaluation.", ) parser.add_argument( "--logs", type=str, default="./logs/", help="Where to store tensorboard logs. Use None to avoid storing logs.", ) parser.add_argument( "--log-local", action="store_true", default=False, help="log files on local master, otherwise global master only.", ) parser.add_argument( "--name", type=str, default=None, help="Optional identifier for the experiment when storing logs. Otherwise use current time.", ) parser.add_argument( "--workers", type=int, default=1, help="Number of dataloader workers per GPU." ) parser.add_argument( "--batch-size", type=int, default=64, help="Batch size per GPU." ) parser.add_argument( "--epochs", type=int, default=32, help="Number of epochs to train for." ) parser.add_argument("--lr", type=float, default=None, help="Learning rate.") parser.add_argument("--beta1", type=float, default=None, help="Adam beta 1.") parser.add_argument("--beta2", type=float, default=None, help="Adam beta 2.") parser.add_argument("--eps", type=float, default=None, help="Adam epsilon.") parser.add_argument("--wd", type=float, default=0.2, help="Weight decay.") parser.add_argument( "--warmup", type=int, default=10000, help="Number of steps to warmup for." ) parser.add_argument( "--use-bn-sync", default=False, action="store_true", help="Whether to use batch norm sync.") parser.add_argument( "--skip-scheduler", action="store_true", default=False, help="Use this flag to skip the learning rate decay.", ) parser.add_argument( "--save-frequency", type=int, default=1, help="How often to save checkpoints." ) parser.add_argument( "--save-most-recent", action="store_true", default=False, help="Always save the most recent model trained to epoch_latest.pt.", ) parser.add_argument( "--zeroshot-frequency", type=int, default=2, help="How often to run zero shot." ) parser.add_argument( "--val-frequency", type=int, default=1, help="How often to run evaluation with val data." ) parser.add_argument( "--resume", default=None, type=str, help="path to latest checkpoint (default: none)", ) parser.add_argument( "--precision", choices=["amp", "fp16", "fp32"], default="amp", help="Floating point precision." ) parser.add_argument( "--model", type=str, default="RN50", help="Name of the vision backbone to use.", ) parser.add_argument( "--pretrained", default='', type=str, help="Use a pretrained CLIP model weights with the specified tag or file path.", ) parser.add_argument( "--pretrained-image", default=False, action='store_true', help="Load imagenet pretrained weights for image tower backbone if available.", ) parser.add_argument( "--lock-image", default=False, action='store_true', help="Lock full image tower by disabling gradients.", ) parser.add_argument( "--lock-image-unlocked-groups", type=int, default=0, help="Leave last n image tower layer groups unlocked.", ) parser.add_argument( "--lock-image-freeze-bn-stats", default=False, action='store_true', help="Freeze BatchNorm running stats in image tower for any locked layers.", ) parser.add_argument( "--grad-checkpointing", default=False, action='store_true', help="Enable gradient checkpointing.", ) parser.add_argument( "--local-loss", default=False, action="store_true", help="calculate loss w/ local features @ global (instead of realizing full global @ global matrix)" ) parser.add_argument( "--gather-with-grad", default=False, action="store_true", help="enable full distributed gradient for feature gather" ) parser.add_argument( "--force-quick-gelu", default=False, action='store_true', help="Force use of QuickGELU activation for non-OpenAI transformer models.", ) parser.add_argument( "--torchscript", default=False, action='store_true', help="torch.jit.script the model, also uses jit version of OpenAI models if pretrained=='openai'", ) parser.add_argument( "--trace", default=False, action='store_true', help="torch.jit.trace the model for inference / eval only", ) # arguments for distributed training parser.add_argument( "--dist-url", default="env://", type=str, help="url used to set up distributed training", ) parser.add_argument( "--dist-backend", default="nccl", type=str, help="distributed backend" ) parser.add_argument( "--report-to", default='', type=str, help="Options are ['wandb', 'tensorboard', 'wandb,tensorboard']" ) parser.add_argument( "--wandb-notes", default='', type=str, help="Notes if logging with wandb" ) parser.add_argument( "--debug", default=False, action="store_true", help="If true, more information is logged." ) parser.add_argument( "--copy-codebase", default=False, action="store_true", help="If true, we copy the entire base on the log diretory, and execute from there." ) parser.add_argument( "--horovod", default=False, action="store_true", help="Use horovod for distributed training." ) parser.add_argument( "--ddp-static-graph", default=False, action='store_true', help="Enable static graph optimization for DDP in PyTorch >= 1.11.", ) parser.add_argument( "--no-set-device-rank", default=False, action="store_true", help="Don't set device index from local rank (when CUDA_VISIBLE_DEVICES restricted to one per proc)." ) parser.add_argument( "--seed", type=int, default=0, help="Default random seed." ) args = parser.parse_args() # If some params are not passed, we use the default values based on model name. default_params = get_default_params(args.model) for name, val in default_params.items(): if getattr(args, name) is None: setattr(args, name, val) return args
KosmosX-API-main
kosmosX/open_clip/src/training/params.py
KosmosX-API-main
kosmosX/open_clip/src/training/__init__.py
import logging def setup_logging(log_file, level, include_host=False): if include_host: import socket hostname = socket.gethostname() formatter = logging.Formatter( f'%(asctime)s | {hostname} | %(levelname)s | %(message)s', datefmt='%Y-%m-%d,%H:%M:%S') else: formatter = logging.Formatter('%(asctime)s | %(levelname)s | %(message)s', datefmt='%Y-%m-%d,%H:%M:%S') logging.root.setLevel(level) loggers = [logging.getLogger(name) for name in logging.root.manager.loggerDict] for logger in loggers: logger.setLevel(level) stream_handler = logging.StreamHandler() stream_handler.setFormatter(formatter) logging.root.addHandler(stream_handler) if log_file: file_handler = logging.FileHandler(filename=log_file) file_handler.setFormatter(formatter) logging.root.addHandler(file_handler)
KosmosX-API-main
kosmosX/open_clip/src/training/logger.py
import os import torch try: import horovod.torch as hvd except ImportError: hvd = None def is_global_master(args): return args.rank == 0 def is_local_master(args): return args.local_rank == 0 def is_master(args, local=False): return is_local_master(args) if local else is_global_master(args) def is_using_horovod(): # NOTE w/ horovod run, OMPI vars should be set, but w/ SLURM PMI vars will be set # Differentiating between horovod and DDP use via SLURM may not be possible, so horovod arg still required... ompi_vars = ["OMPI_COMM_WORLD_RANK", "OMPI_COMM_WORLD_SIZE"] pmi_vars = ["PMI_RANK", "PMI_SIZE"] if all([var in os.environ for var in ompi_vars]) or all([var in os.environ for var in pmi_vars]): return True else: return False def is_using_distributed(): if 'WORLD_SIZE' in os.environ: return int(os.environ['WORLD_SIZE']) > 1 if 'SLURM_NTASKS' in os.environ: return int(os.environ['SLURM_NTASKS']) > 1 return False def world_info_from_env(): local_rank = 0 for v in ('LOCAL_RANK', 'MPI_LOCALRANKID', 'SLURM_LOCALID', 'OMPI_COMM_WORLD_LOCAL_RANK'): if v in os.environ: local_rank = int(os.environ[v]) break global_rank = 0 for v in ('RANK', 'PMI_RANK', 'SLURM_PROCID', 'OMPI_COMM_WORLD_RANK'): if v in os.environ: global_rank = int(os.environ[v]) break world_size = 1 for v in ('WORLD_SIZE', 'PMI_SIZE', 'SLURM_NTASKS', 'OMPI_COMM_WORLD_SIZE'): if v in os.environ: world_size = int(os.environ[v]) break return local_rank, global_rank, world_size def init_distributed_device(args): # Distributed training = training on more than one GPU. # Works in both single and multi-node scenarios. args.distributed = False args.world_size = 1 args.rank = 0 # global rank args.local_rank = 0 if args.horovod: assert hvd is not None, "Horovod is not installed" hvd.init() args.local_rank = int(hvd.local_rank()) args.rank = hvd.rank() args.world_size = hvd.size() args.distributed = True os.environ['LOCAL_RANK'] = str(args.local_rank) os.environ['RANK'] = str(args.rank) os.environ['WORLD_SIZE'] = str(args.world_size) elif is_using_distributed(): if 'SLURM_PROCID' in os.environ: # DDP via SLURM args.local_rank, args.rank, args.world_size = world_info_from_env() # SLURM var -> torch.distributed vars in case needed os.environ['LOCAL_RANK'] = str(args.local_rank) os.environ['RANK'] = str(args.rank) os.environ['WORLD_SIZE'] = str(args.world_size) torch.distributed.init_process_group( backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank, ) else: # DDP via torchrun, torch.distributed.launch args.local_rank, _, _ = world_info_from_env() torch.distributed.init_process_group( backend=args.dist_backend, init_method=args.dist_url) args.world_size = torch.distributed.get_world_size() args.rank = torch.distributed.get_rank() args.distributed = True if torch.cuda.is_available(): if args.distributed and not args.no_set_device_rank: device = 'cuda:%d' % args.local_rank else: device = 'cuda:0' torch.cuda.set_device(device) else: device = 'cpu' args.device = device device = torch.device(device) return device
KosmosX-API-main
kosmosX/open_clip/src/training/distributed.py
import json import logging import math import os import time from contextlib import suppress import numpy as np import torch import torch.nn.functional as F try: import wandb except ImportError: wandb = None from open_clip import ClipLoss from .distributed import is_master from .zero_shot import zero_shot_eval class AverageMeter(object): """Computes and stores the average and current value""" def __init__(self): self.reset() def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.val = val self.sum += val * n self.count += n self.avg = self.sum / self.count def unwrap_model(model): if hasattr(model, 'module'): return model.module else: return model def train_one_epoch(model, data, epoch, optimizer, scaler, scheduler, args, tb_writer=None): device = torch.device(args.device) autocast = torch.cuda.amp.autocast if args.precision == 'amp' else suppress model.train() loss = ClipLoss( local_loss=args.local_loss, gather_with_grad=args.gather_with_grad, cache_labels=True, rank=args.rank, world_size=args.world_size, use_horovod=args.horovod) data['train'].set_epoch(epoch) # set epoch in process safe manner via sampler or shared_epoch dataloader = data['train'].dataloader num_batches_per_epoch = dataloader.num_batches sample_digits = math.ceil(math.log(dataloader.num_samples + 1, 10)) loss_m = AverageMeter() batch_time_m = AverageMeter() data_time_m = AverageMeter() end = time.time() for i, batch in enumerate(dataloader): step = num_batches_per_epoch * epoch + i scheduler(step) images, texts = batch images = images.to(device=device, non_blocking=True) texts = texts.to(device=device, non_blocking=True) data_time_m.update(time.time() - end) optimizer.zero_grad() with autocast(): image_features, text_features, logit_scale = model(images, texts) total_loss = loss(image_features, text_features, logit_scale) if scaler is not None: scaler.scale(total_loss).backward() if args.horovod: optimizer.synchronize() scaler.unscale_(optimizer) with optimizer.skip_synchronize(): scaler.step(optimizer) else: scaler.step(optimizer) scaler.update() else: total_loss.backward() optimizer.step() # Note: we clamp to 4.6052 = ln(100), as in the original paper. with torch.no_grad(): unwrap_model(model).logit_scale.clamp_(0, math.log(100)) batch_time_m.update(time.time() - end) end = time.time() batch_count = i + 1 if is_master(args) and (i % 100 == 0 or batch_count == num_batches_per_epoch): batch_size = len(images) num_samples = batch_count * batch_size * args.world_size samples_per_epoch = dataloader.num_samples percent_complete = 100.0 * batch_count / num_batches_per_epoch # NOTE loss is coarsely sampled, just master node and per log update loss_m.update(total_loss.item(), batch_size) logit_scale_scalar = logit_scale.item() logging.info( f"Train Epoch: {epoch} [{num_samples:>{sample_digits}}/{samples_per_epoch} ({percent_complete:.0f}%)] " f"Loss: {loss_m.val:#.5g} ({loss_m.avg:#.4g}) " f"Data (t): {data_time_m.avg:.3f} " f"Batch (t): {batch_time_m.avg:.3f}, {args.batch_size*args.world_size / batch_time_m.val:#g}/s " f"LR: {optimizer.param_groups[0]['lr']:5f} " f"Logit Scale: {logit_scale_scalar:.3f}" ) # Save train loss / etc. Using non avg meter values as loggers have their own smoothing log_data = { "loss": loss_m.val, "data_time": data_time_m.val, "batch_time": batch_time_m.val, "samples_per_scond": args.batch_size*args.world_size / batch_time_m.val, "scale": logit_scale_scalar, "lr": optimizer.param_groups[0]["lr"] } for name, val in log_data.items(): name = "train/" + name if tb_writer is not None: tb_writer.add_scalar(name, val, step) if args.wandb: assert wandb is not None, 'Please install wandb.' wandb.log({name: val, 'step': step}) # resetting batch / data time meters per log window batch_time_m.reset() data_time_m.reset() # end for def evaluate(model, data, epoch, args, tb_writer=None): metrics = {} if not is_master(args): return metrics device = torch.device(args.device) model.eval() zero_shot_metrics = zero_shot_eval(model, data, epoch, args) metrics.update(zero_shot_metrics) autocast = torch.cuda.amp.autocast if args.precision == 'amp' else suppress if 'val' in data and (args.val_frequency and ((epoch % args.val_frequency) == 0 or epoch == args.epochs)): dataloader = data['val'].dataloader num_samples = 0 samples_per_val = dataloader.num_samples # FIXME this does not scale past small eval datasets # all_image_features @ all_text_features will blow up memory and compute very quickly cumulative_loss = 0.0 all_image_features, all_text_features = [], [] with torch.no_grad(): for i, batch in enumerate(dataloader): images, texts = batch images = images.to(device=device, non_blocking=True) texts = texts.to(device=device, non_blocking=True) with autocast(): image_features, text_features, logit_scale = model(images, texts) # features are accumulated in CPU tensors, otherwise GPU memory exhausted quickly # however, system RAM is easily exceeded and compute time becomes problematic all_image_features.append(image_features.cpu()) all_text_features.append(text_features.cpu()) logit_scale = logit_scale.mean() logits_per_image = logit_scale * image_features @ text_features.t() logits_per_text = logits_per_image.t() batch_size = images.shape[0] labels = torch.arange(batch_size, device=device).long() total_loss = ( F.cross_entropy(logits_per_image, labels) + F.cross_entropy(logits_per_text, labels) ) / 2 cumulative_loss += total_loss * batch_size num_samples += batch_size if is_master(args) and (i % 100) == 0: logging.info( f"Eval Epoch: {epoch} [{num_samples} / {samples_per_val}]\t" f"Loss: {cumulative_loss / num_samples:.6f}\t") val_metrics = get_metrics( image_features=torch.cat(all_image_features), text_features=torch.cat(all_text_features), logit_scale=logit_scale.cpu(), ) loss = cumulative_loss / num_samples metrics.update( {**val_metrics, "val_loss": loss.item(), "epoch": epoch, "num_samples": num_samples} ) if not metrics: return metrics logging.info( f"Eval Epoch: {epoch} " + "\t".join([f"{k}: {round(v, 4):.4f}" for k, v in metrics.items()]) ) if args.save_logs: for name, val in metrics.items(): if tb_writer is not None: tb_writer.add_scalar(f"val/{name}", val, epoch) with open(os.path.join(args.checkpoint_path, "results.jsonl"), "a+") as f: f.write(json.dumps(metrics)) f.write("\n") if args.wandb: assert wandb is not None, 'Please install wandb.' for name, val in metrics.items(): wandb.log({f"val/{name}": val, 'epoch': epoch}) return metrics def get_metrics(image_features, text_features, logit_scale): metrics = {} logits_per_image = (logit_scale * image_features @ text_features.t()).detach().cpu() logits_per_text = logits_per_image.t().detach().cpu() logits = {"image_to_text": logits_per_image, "text_to_image": logits_per_text} ground_truth = torch.arange(len(text_features)).view(-1, 1) for name, logit in logits.items(): ranking = torch.argsort(logit, descending=True) preds = torch.where(ranking == ground_truth)[1] preds = preds.detach().cpu().numpy() metrics[f"{name}_mean_rank"] = preds.mean() + 1 metrics[f"{name}_median_rank"] = np.floor(np.median(preds)) + 1 for k in [1, 5, 10]: metrics[f"{name}_R@{k}"] = np.mean(preds < k) return metrics
KosmosX-API-main
kosmosX/open_clip/src/training/train.py
import logging from contextlib import suppress import torch import torch.nn.functional as F from tqdm import tqdm from open_clip import tokenize from .imagenet_zeroshot_data import imagenet_classnames, openai_imagenet_template def zero_shot_classifier(model, classnames, templates, args): with torch.no_grad(): zeroshot_weights = [] for classname in tqdm(classnames): texts = [template(classname) for template in templates] # format with class texts = tokenize(texts).to(args.device) # tokenize if args.distributed and not args.horovod: class_embeddings = model.module.encode_text(texts) else: class_embeddings = model.encode_text(texts) class_embedding = F.normalize(class_embeddings, dim=-1).mean(dim=0) class_embedding /= class_embedding.norm() zeroshot_weights.append(class_embedding) zeroshot_weights = torch.stack(zeroshot_weights, dim=1).to(args.device) return zeroshot_weights def accuracy(output, target, topk=(1,)): pred = output.topk(max(topk), 1, True, True)[1].t() correct = pred.eq(target.view(1, -1).expand_as(pred)) return [float(correct[:k].reshape(-1).float().sum(0, keepdim=True).cpu().numpy()) for k in topk] def run(model, classifier, dataloader, args): autocast = torch.cuda.amp.autocast if args.precision == 'amp' else suppress with torch.no_grad(): top1, top5, n = 0., 0., 0. for images, target in tqdm(dataloader, unit_scale=args.batch_size): images = images.to(args.device) target = target.to(args.device) with autocast(): # predict if args.distributed and not args.horovod: image_features = model.module.encode_image(images) else: image_features = model.encode_image(images) image_features = F.normalize(image_features, dim=-1) logits = 100. * image_features @ classifier # measure accuracy acc1, acc5 = accuracy(logits, target, topk=(1, 5)) top1 += acc1 top5 += acc5 n += images.size(0) top1 = (top1 / n) top5 = (top5 / n) return top1, top5 def zero_shot_eval(model, data, epoch, args): if 'imagenet-val' not in data and 'imagenet-v2' not in data: return {} if args.zeroshot_frequency == 0: return {} if (epoch % args.zeroshot_frequency) != 0 and epoch != args.epochs: return {} logging.info('Starting zero-shot imagenet.') logging.info('Building zero-shot classifier') classifier = zero_shot_classifier(model, imagenet_classnames, openai_imagenet_template, args) logging.info('Using classifier') results = {} if 'imagenet-val' in data: top1, top5 = run(model, classifier, data['imagenet-val'].dataloader, args) results['imagenet-zeroshot-val-top1'] = top1 results['imagenet-zeroshot-val-top5'] = top5 if 'imagenet-v2' in data: top1, top5 = run(model, classifier, data['imagenet-v2'].dataloader, args) results['imagenetv2-zeroshot-val-top1'] = top1 results['imagenetv2-zeroshot-val-top5'] = top5 logging.info('Finished zero-shot imagenet.') return results
KosmosX-API-main
kosmosX/open_clip/src/training/zero_shot.py
import numpy as np def assign_learning_rate(optimizer, new_lr): for param_group in optimizer.param_groups: param_group["lr"] = new_lr def _warmup_lr(base_lr, warmup_length, step): return base_lr * (step + 1) / warmup_length def cosine_lr(optimizer, base_lr, warmup_length, steps): def _lr_adjuster(step): if step < warmup_length: lr = _warmup_lr(base_lr, warmup_length, step) else: e = step - warmup_length es = steps - warmup_length lr = 0.5 * (1 + np.cos(np.pi * e / es)) * base_lr assign_learning_rate(optimizer, lr) return lr return _lr_adjuster
KosmosX-API-main
kosmosX/open_clip/src/training/scheduler.py
import logging import os import random from datetime import datetime import numpy as np import torch from torch import optim from torch.cuda.amp import GradScaler try: import wandb except ImportError: wandb = None try: import torch.utils.tensorboard as tensorboard except ImportError: tensorboard = None try: import horovod.torch as hvd except ImportError: hvd = None from open_clip import create_model_and_transforms, trace_model from training.data import get_data from training.distributed import is_master, init_distributed_device, world_info_from_env from training.logger import setup_logging from training.params import parse_args from training.scheduler import cosine_lr from training.train import train_one_epoch, evaluate def random_seed(seed=42, rank=0): torch.manual_seed(seed + rank) np.random.seed(seed + rank) random.seed(seed + rank) def main(): args = parse_args() # sanitize model name for filesystem / uri use, easier if we don't use / in name as a rule? args.model = args.model.replace('/', '-') # get the name of the experiments if args.name is None: args.name = '-'.join([ datetime.now().strftime("%Y_%m_%d-%H_%M_%S"), f"model_{args.model}", f"lr_{args.lr}", f"b_{args.batch_size}", f"j_{args.workers}", f"p_{args.precision}", ]) # discover initial world args early so we can log properly args.distributed = False args.local_rank, args.rank, args.world_size = world_info_from_env() args.log_path = None if is_master(args, local=args.log_local): log_base_path = os.path.join(args.logs, args.name) os.makedirs(log_base_path, exist_ok=True) log_filename = f'out-{args.rank}' if args.log_local else 'out.log' args.log_path = os.path.join(log_base_path, log_filename) if os.path.exists(args.log_path): print( "Error. Experiment already exists. Use --name {} to specify a new experiment." ) return -1 # Set logger args.log_level = logging.DEBUG if args.debug else logging.INFO setup_logging(args.log_path, args.log_level) # fully initialize distributed device environment torch.backends.cudnn.benchmark = True torch.backends.cudnn.deterministic = False device = init_distributed_device(args) args.wandb = 'wandb' in args.report_to or 'all' in args.report_to args.tensorboard = 'tensorboard' in args.report_to or 'all' in args.report_to if is_master(args): args.tensorboard_path = os.path.join(args.logs, args.name, "tensorboard") if args.tensorboard else '' args.checkpoint_path = os.path.join(args.logs, args.name, "checkpoints") for dirname in [args.tensorboard_path, args.checkpoint_path]: if dirname: os.makedirs(dirname, exist_ok=True) else: args.tensorboard_path = '' args.checkpoint_path = '' if args.copy_codebase: copy_codebase(args) assert args.precision in ['amp', 'fp16', 'fp32'] if args.precision == 'fp16': logging.warning( 'It is recommended to use AMP mixed-precision instead of FP16. ' 'FP16 support needs further verification and tuning, especially for train.') if args.horovod: logging.info( f'Running in horovod mode with multiple processes / nodes. Device: {args.device}.' f'Process (global: {args.rank}, local {args.local_rank}), total {args.world_size}.') elif args.distributed: logging.info( f'Running in distributed mode with multiple processes. Device: {args.device}.' f'Process (global: {args.rank}, local {args.local_rank}), total {args.world_size}.') else: logging.info(f'Running with a single process. Device {args.device}.') random_seed(args.seed, 0) model, preprocess_train, preprocess_val = create_model_and_transforms( args.model, args.pretrained, precision=args.precision, device=device, jit=args.torchscript, force_quick_gelu=args.force_quick_gelu, pretrained_image=args.pretrained_image, ) random_seed(args.seed, args.rank) if args.trace: model = trace_model(model, batch_size=args.batch_size, device=device) if args.lock_image: # lock image tower as per LiT - https://arxiv.org/abs/2111.07991 model.lock_image_tower( unlocked_groups=args.lock_image_unlocked_groups, freeze_bn_stats=args.lock_image_freeze_bn_stats) if args.grad_checkpointing: model.set_grad_checkpointing() if is_master(args): logging.info("Model:") logging.info(f"{str(model)}") logging.info("Params:") params_file = os.path.join(args.logs, args.name, "params.txt") with open(params_file, "w") as f: for name in sorted(vars(args)): val = getattr(args, name) logging.info(f" {name}: {val}") f.write(f"{name}: {val}\n") if args.distributed and not args.horovod: if args.use_bn_sync: model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) ddp_args = {} if args.ddp_static_graph: # this doesn't exist in older PyTorch, arg only added if enabled ddp_args['static_graph'] = True model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[device], **ddp_args) # create optimizer and scaler optimizer = None scaler = None if args.train_data: assert not args.trace, 'Cannot train with traced model' def exclude(n, p): return p.ndim < 2 or 'bn' in n or 'ln' in n or 'bias' in n or 'logit_scale' in n def include(n, p): return not exclude(n, p) named_parameters = list(model.named_parameters()) gain_or_bias_params = [p for n, p in named_parameters if exclude(n, p) and p.requires_grad] rest_params = [p for n, p in named_parameters if include(n, p) and p.requires_grad] optimizer = optim.AdamW( [ {"params": gain_or_bias_params, "weight_decay": 0.}, {"params": rest_params, "weight_decay": args.wd}, ], lr=args.lr, betas=(args.beta1, args.beta2), eps=args.eps, ) if args.horovod: optimizer = hvd.DistributedOptimizer(optimizer, named_parameters=model.named_parameters()) hvd.broadcast_parameters(model.state_dict(), root_rank=0) hvd.broadcast_optimizer_state(optimizer, root_rank=0) scaler = GradScaler() if args.precision == "amp" else None # optionally resume from a checkpoint start_epoch = 0 if args.resume is not None: if os.path.isfile(args.resume): checkpoint = torch.load(args.resume, map_location=device) if 'epoch' in checkpoint: # resuming a train checkpoint w/ epoch and optimizer state start_epoch = checkpoint["epoch"] sd = checkpoint["state_dict"] if not args.distributed and next(iter(sd.items()))[0].startswith('module'): sd = {k[len('module.'):]: v for k, v in sd.items()} model.load_state_dict(sd) if optimizer is not None: optimizer.load_state_dict(checkpoint["optimizer"]) if scaler is not None and 'scaler' in checkpoint: scaler.load_state_dict(checkpoint['scaler']) logging.info(f"=> resuming checkpoint '{args.resume}' (epoch {start_epoch})") else: # loading a bare (model only) checkpoint for fine-tune or evaluation model.load_state_dict(checkpoint) logging.info(f"=> loaded checkpoint '{args.resume}' (epoch {start_epoch})") else: logging.info("=> no checkpoint found at '{}'".format(args.resume)) # initialize datasets data = get_data(args, (preprocess_train, preprocess_val), epoch=start_epoch) assert len(data), 'At least one train or eval dataset must be specified.' # create scheduler if train scheduler = None if 'train' in data and optimizer is not None: total_steps = data["train"].dataloader.num_batches * args.epochs scheduler = cosine_lr(optimizer, args.lr, args.warmup, total_steps) # determine if this worker should save logs and checkpoints. only do so if it is rank == 0 args.save_logs = args.logs and args.logs.lower() != 'none' and is_master(args) writer = None if args.save_logs and args.tensorboard: assert tensorboard is not None, "Please install tensorboard." writer = tensorboard.SummaryWriter(args.tensorboard_path) if args.wandb and is_master(args): assert wandb is not None, 'Please install wandb.' logging.debug('Starting wandb.') args.train_sz = data["train"].dataloader.num_samples if args.val_data is not None: args.val_sz = data["val"].dataloader.num_samples # you will have to configure this for your project! wandb.init( project="open-clip", notes=args.wandb_notes, tags=[], config=vars(args), ) if args.debug: wandb.watch(model, log='all') wandb.save(params_file) logging.debug('Finished loading wandb.') if 'train' not in data: evaluate(model, data, start_epoch, args, writer) return for epoch in range(start_epoch, args.epochs): if is_master(args): logging.info(f'Start epoch {epoch}') train_one_epoch(model, data, epoch, optimizer, scaler, scheduler, args, writer) completed_epoch = epoch + 1 if any(v in data for v in ('val', 'imagenet-val', 'imagenet-v2')): evaluate(model, data, completed_epoch, args, writer) # Saving checkpoints. if args.save_logs: checkpoint_dict = { "epoch": completed_epoch, "name": args.name, "state_dict": model.state_dict(), "optimizer": optimizer.state_dict(), } if scaler is not None: checkpoint_dict["scaler"] = scaler.state_dict() if completed_epoch == args.epochs or ( args.save_frequency > 0 and (completed_epoch % args.save_frequency) == 0 ): torch.save( checkpoint_dict, os.path.join(args.checkpoint_path, f"epoch_{completed_epoch}.pt"), ) if args.save_most_recent: torch.save( checkpoint_dict, os.path.join(args.checkpoint_path, "epoch_latest.pt"), ) if args.wandb and is_master(args): wandb.finish() def copy_codebase(args): from shutil import copytree, ignore_patterns new_code_path = os.path.join(args.logs, args.name, "code") if os.path.exists(new_code_path): print( f"Error. Experiment already exists at {new_code_path}. Use --name to specify a new experiment." ) return -1 print(f"Copying codebase to {new_code_path}") current_code_path = os.path.realpath(__file__) for _ in range(3): current_code_path = os.path.dirname(current_code_path) copytree(current_code_path, new_code_path, ignore=ignore_patterns('log', 'logs', 'wandb')) print("Done copying code.") return 1 if __name__ == "__main__": main()
KosmosX-API-main
kosmosX/open_clip/src/training/main.py
imagenet_classnames = ["tench", "goldfish", "great white shark", "tiger shark", "hammerhead shark", "electric ray", "stingray", "rooster", "hen", "ostrich", "brambling", "goldfinch", "house finch", "junco", "indigo bunting", "American robin", "bulbul", "jay", "magpie", "chickadee", "American dipper", "kite (bird of prey)", "bald eagle", "vulture", "great grey owl", "fire salamander", "smooth newt", "newt", "spotted salamander", "axolotl", "American bullfrog", "tree frog", "tailed frog", "loggerhead sea turtle", "leatherback sea turtle", "mud turtle", "terrapin", "box turtle", "banded gecko", "green iguana", "Carolina anole", "desert grassland whiptail lizard", "agama", "frilled-necked lizard", "alligator lizard", "Gila monster", "European green lizard", "chameleon", "Komodo dragon", "Nile crocodile", "American alligator", "triceratops", "worm snake", "ring-necked snake", "eastern hog-nosed snake", "smooth green snake", "kingsnake", "garter snake", "water snake", "vine snake", "night snake", "boa constrictor", "African rock python", "Indian cobra", "green mamba", "sea snake", "Saharan horned viper", "eastern diamondback rattlesnake", "sidewinder rattlesnake", "trilobite", "harvestman", "scorpion", "yellow garden spider", "barn spider", "European garden spider", "southern black widow", "tarantula", "wolf spider", "tick", "centipede", "black grouse", "ptarmigan", "ruffed grouse", "prairie grouse", "peafowl", "quail", "partridge", "african grey parrot", "macaw", "sulphur-crested cockatoo", "lorikeet", "coucal", "bee eater", "hornbill", "hummingbird", "jacamar", "toucan", "duck", "red-breasted merganser", "goose", "black swan", "tusker", "echidna", "platypus", "wallaby", "koala", "wombat", "jellyfish", "sea anemone", "brain coral", "flatworm", "nematode", "conch", "snail", "slug", "sea slug", "chiton", "chambered nautilus", "Dungeness crab", "rock crab", "fiddler crab", "red king crab", "American lobster", "spiny lobster", "crayfish", "hermit crab", "isopod", "white stork", "black stork", "spoonbill", "flamingo", "little blue heron", "great egret", "bittern bird", "crane bird", "limpkin", "common gallinule", "American coot", "bustard", "ruddy turnstone", "dunlin", "common redshank", "dowitcher", "oystercatcher", "pelican", "king penguin", "albatross", "grey whale", "killer whale", "dugong", "sea lion", "Chihuahua", "Japanese Chin", "Maltese", "Pekingese", "Shih Tzu", "King Charles Spaniel", "Papillon", "toy terrier", "Rhodesian Ridgeback", "Afghan Hound", "Basset Hound", "Beagle", "Bloodhound", "Bluetick Coonhound", "Black and Tan Coonhound", "Treeing Walker Coonhound", "English foxhound", "Redbone Coonhound", "borzoi", "Irish Wolfhound", "Italian Greyhound", "Whippet", "Ibizan Hound", "Norwegian Elkhound", "Otterhound", "Saluki", "Scottish Deerhound", "Weimaraner", "Staffordshire Bull Terrier", "American Staffordshire Terrier", "Bedlington Terrier", "Border Terrier", "Kerry Blue Terrier", "Irish Terrier", "Norfolk Terrier", "Norwich Terrier", "Yorkshire Terrier", "Wire Fox Terrier", "Lakeland Terrier", "Sealyham Terrier", "Airedale Terrier", "Cairn Terrier", "Australian Terrier", "Dandie Dinmont Terrier", "Boston Terrier", "Miniature Schnauzer", "Giant Schnauzer", "Standard Schnauzer", "Scottish Terrier", "Tibetan Terrier", "Australian Silky Terrier", "Soft-coated Wheaten Terrier", "West Highland White Terrier", "Lhasa Apso", "Flat-Coated Retriever", "Curly-coated Retriever", "Golden Retriever", "Labrador Retriever", "Chesapeake Bay Retriever", "German Shorthaired Pointer", "Vizsla", "English Setter", "Irish Setter", "Gordon Setter", "Brittany dog", "Clumber Spaniel", "English Springer Spaniel", "Welsh Springer Spaniel", "Cocker Spaniel", "Sussex Spaniel", "Irish Water Spaniel", "Kuvasz", "Schipperke", "Groenendael dog", "Malinois", "Briard", "Australian Kelpie", "Komondor", "Old English Sheepdog", "Shetland Sheepdog", "collie", "Border Collie", "Bouvier des Flandres dog", "Rottweiler", "German Shepherd Dog", "Dobermann", "Miniature Pinscher", "Greater Swiss Mountain Dog", "Bernese Mountain Dog", "Appenzeller Sennenhund", "Entlebucher Sennenhund", "Boxer", "Bullmastiff", "Tibetan Mastiff", "French Bulldog", "Great Dane", "St. Bernard", "husky", "Alaskan Malamute", "Siberian Husky", "Dalmatian", "Affenpinscher", "Basenji", "pug", "Leonberger", "Newfoundland dog", "Great Pyrenees dog", "Samoyed", "Pomeranian", "Chow Chow", "Keeshond", "brussels griffon", "Pembroke Welsh Corgi", "Cardigan Welsh Corgi", "Toy Poodle", "Miniature Poodle", "Standard Poodle", "Mexican hairless dog (xoloitzcuintli)", "grey wolf", "Alaskan tundra wolf", "red wolf or maned wolf", "coyote", "dingo", "dhole", "African wild dog", "hyena", "red fox", "kit fox", "Arctic fox", "grey fox", "tabby cat", "tiger cat", "Persian cat", "Siamese cat", "Egyptian Mau", "cougar", "lynx", "leopard", "snow leopard", "jaguar", "lion", "tiger", "cheetah", "brown bear", "American black bear", "polar bear", "sloth bear", "mongoose", "meerkat", "tiger beetle", "ladybug", "ground beetle", "longhorn beetle", "leaf beetle", "dung beetle", "rhinoceros beetle", "weevil", "fly", "bee", "ant", "grasshopper", "cricket insect", "stick insect", "cockroach", "praying mantis", "cicada", "leafhopper", "lacewing", "dragonfly", "damselfly", "red admiral butterfly", "ringlet butterfly", "monarch butterfly", "small white butterfly", "sulphur butterfly", "gossamer-winged butterfly", "starfish", "sea urchin", "sea cucumber", "cottontail rabbit", "hare", "Angora rabbit", "hamster", "porcupine", "fox squirrel", "marmot", "beaver", "guinea pig", "common sorrel horse", "zebra", "pig", "wild boar", "warthog", "hippopotamus", "ox", "water buffalo", "bison", "ram (adult male sheep)", "bighorn sheep", "Alpine ibex", "hartebeest", "impala (antelope)", "gazelle", "arabian camel", "llama", "weasel", "mink", "European polecat", "black-footed ferret", "otter", "skunk", "badger", "armadillo", "three-toed sloth", "orangutan", "gorilla", "chimpanzee", "gibbon", "siamang", "guenon", "patas monkey", "baboon", "macaque", "langur", "black-and-white colobus", "proboscis monkey", "marmoset", "white-headed capuchin", "howler monkey", "titi monkey", "Geoffroy's spider monkey", "common squirrel monkey", "ring-tailed lemur", "indri", "Asian elephant", "African bush elephant", "red panda", "giant panda", "snoek fish", "eel", "silver salmon", "rock beauty fish", "clownfish", "sturgeon", "gar fish", "lionfish", "pufferfish", "abacus", "abaya", "academic gown", "accordion", "acoustic guitar", "aircraft carrier", "airliner", "airship", "altar", "ambulance", "amphibious vehicle", "analog clock", "apiary", "apron", "trash can", "assault rifle", "backpack", "bakery", "balance beam", "balloon", "ballpoint pen", "Band-Aid", "banjo", "baluster / handrail", "barbell", "barber chair", "barbershop", "barn", "barometer", "barrel", "wheelbarrow", "baseball", "basketball", "bassinet", "bassoon", "swimming cap", "bath towel", "bathtub", "station wagon", "lighthouse", "beaker", "military hat (bearskin or shako)", "beer bottle", "beer glass", "bell tower", "baby bib", "tandem bicycle", "bikini", "ring binder", "binoculars", "birdhouse", "boathouse", "bobsleigh", "bolo tie", "poke bonnet", "bookcase", "bookstore", "bottle cap", "hunting bow", "bow tie", "brass memorial plaque", "bra", "breakwater", "breastplate", "broom", "bucket", "buckle", "bulletproof vest", "high-speed train", "butcher shop", "taxicab", "cauldron", "candle", "cannon", "canoe", "can opener", "cardigan", "car mirror", "carousel", "tool kit", "cardboard box / carton", "car wheel", "automated teller machine", "cassette", "cassette player", "castle", "catamaran", "CD player", "cello", "mobile phone", "chain", "chain-link fence", "chain mail", "chainsaw", "storage chest", "chiffonier", "bell or wind chime", "china cabinet", "Christmas stocking", "church", "movie theater", "cleaver", "cliff dwelling", "cloak", "clogs", "cocktail shaker", "coffee mug", "coffeemaker", "spiral or coil", "combination lock", "computer keyboard", "candy store", "container ship", "convertible", "corkscrew", "cornet", "cowboy boot", "cowboy hat", "cradle", "construction crane", "crash helmet", "crate", "infant bed", "Crock Pot", "croquet ball", "crutch", "cuirass", "dam", "desk", "desktop computer", "rotary dial telephone", "diaper", "digital clock", "digital watch", "dining table", "dishcloth", "dishwasher", "disc brake", "dock", "dog sled", "dome", "doormat", "drilling rig", "drum", "drumstick", "dumbbell", "Dutch oven", "electric fan", "electric guitar", "electric locomotive", "entertainment center", "envelope", "espresso machine", "face powder", "feather boa", "filing cabinet", "fireboat", "fire truck", "fire screen", "flagpole", "flute", "folding chair", "football helmet", "forklift", "fountain", "fountain pen", "four-poster bed", "freight car", "French horn", "frying pan", "fur coat", "garbage truck", "gas mask or respirator", "gas pump", "goblet", "go-kart", "golf ball", "golf cart", "gondola", "gong", "gown", "grand piano", "greenhouse", "radiator grille", "grocery store", "guillotine", "hair clip", "hair spray", "half-track", "hammer", "hamper", "hair dryer", "hand-held computer", "handkerchief", "hard disk drive", "harmonica", "harp", "combine harvester", "hatchet", "holster", "home theater", "honeycomb", "hook", "hoop skirt", "gymnastic horizontal bar", "horse-drawn vehicle", "hourglass", "iPod", "clothes iron", "carved pumpkin", "jeans", "jeep", "T-shirt", "jigsaw puzzle", "rickshaw", "joystick", "kimono", "knee pad", "knot", "lab coat", "ladle", "lampshade", "laptop computer", "lawn mower", "lens cap", "letter opener", "library", "lifeboat", "lighter", "limousine", "ocean liner", "lipstick", "slip-on shoe", "lotion", "music speaker", "loupe magnifying glass", "sawmill", "magnetic compass", "messenger bag", "mailbox", "tights", "one-piece bathing suit", "manhole cover", "maraca", "marimba", "mask", "matchstick", "maypole", "maze", "measuring cup", "medicine cabinet", "megalith", "microphone", "microwave oven", "military uniform", "milk can", "minibus", "miniskirt", "minivan", "missile", "mitten", "mixing bowl", "mobile home", "ford model t", "modem", "monastery", "monitor", "moped", "mortar and pestle", "graduation cap", "mosque", "mosquito net", "vespa", "mountain bike", "tent", "computer mouse", "mousetrap", "moving van", "muzzle", "metal nail", "neck brace", "necklace", "baby pacifier", "notebook computer", "obelisk", "oboe", "ocarina", "odometer", "oil filter", "pipe organ", "oscilloscope", "overskirt", "bullock cart", "oxygen mask", "product packet / packaging", "paddle", "paddle wheel", "padlock", "paintbrush", "pajamas", "palace", "pan flute", "paper towel", "parachute", "parallel bars", "park bench", "parking meter", "railroad car", "patio", "payphone", "pedestal", "pencil case", "pencil sharpener", "perfume", "Petri dish", "photocopier", "plectrum", "Pickelhaube", "picket fence", "pickup truck", "pier", "piggy bank", "pill bottle", "pillow", "ping-pong ball", "pinwheel", "pirate ship", "drink pitcher", "block plane", "planetarium", "plastic bag", "plate rack", "farm plow", "plunger", "Polaroid camera", "pole", "police van", "poncho", "pool table", "soda bottle", "plant pot", "potter's wheel", "power drill", "prayer rug", "printer", "prison", "missile", "projector", "hockey puck", "punching bag", "purse", "quill", "quilt", "race car", "racket", "radiator", "radio", "radio telescope", "rain barrel", "recreational vehicle", "fishing casting reel", "reflex camera", "refrigerator", "remote control", "restaurant", "revolver", "rifle", "rocking chair", "rotisserie", "eraser", "rugby ball", "ruler measuring stick", "sneaker", "safe", "safety pin", "salt shaker", "sandal", "sarong", "saxophone", "scabbard", "weighing scale", "school bus", "schooner", "scoreboard", "CRT monitor", "screw", "screwdriver", "seat belt", "sewing machine", "shield", "shoe store", "shoji screen / room divider", "shopping basket", "shopping cart", "shovel", "shower cap", "shower curtain", "ski", "balaclava ski mask", "sleeping bag", "slide rule", "sliding door", "slot machine", "snorkel", "snowmobile", "snowplow", "soap dispenser", "soccer ball", "sock", "solar thermal collector", "sombrero", "soup bowl", "keyboard space bar", "space heater", "space shuttle", "spatula", "motorboat", "spider web", "spindle", "sports car", "spotlight", "stage", "steam locomotive", "through arch bridge", "steel drum", "stethoscope", "scarf", "stone wall", "stopwatch", "stove", "strainer", "tram", "stretcher", "couch", "stupa", "submarine", "suit", "sundial", "sunglasses", "sunglasses", "sunscreen", "suspension bridge", "mop", "sweatshirt", "swim trunks / shorts", "swing", "electrical switch", "syringe", "table lamp", "tank", "tape player", "teapot", "teddy bear", "television", "tennis ball", "thatched roof", "front curtain", "thimble", "threshing machine", "throne", "tile roof", "toaster", "tobacco shop", "toilet seat", "torch", "totem pole", "tow truck", "toy store", "tractor", "semi-trailer truck", "tray", "trench coat", "tricycle", "trimaran", "tripod", "triumphal arch", "trolleybus", "trombone", "hot tub", "turnstile", "typewriter keyboard", "umbrella", "unicycle", "upright piano", "vacuum cleaner", "vase", "vaulted or arched ceiling", "velvet fabric", "vending machine", "vestment", "viaduct", "violin", "volleyball", "waffle iron", "wall clock", "wallet", "wardrobe", "military aircraft", "sink", "washing machine", "water bottle", "water jug", "water tower", "whiskey jug", "whistle", "hair wig", "window screen", "window shade", "Windsor tie", "wine bottle", "airplane wing", "wok", "wooden spoon", "wool", "split-rail fence", "shipwreck", "sailboat", "yurt", "website", "comic book", "crossword", "traffic or street sign", "traffic light", "dust jacket", "menu", "plate", "guacamole", "consomme", "hot pot", "trifle", "ice cream", "popsicle", "baguette", "bagel", "pretzel", "cheeseburger", "hot dog", "mashed potatoes", "cabbage", "broccoli", "cauliflower", "zucchini", "spaghetti squash", "acorn squash", "butternut squash", "cucumber", "artichoke", "bell pepper", "cardoon", "mushroom", "Granny Smith apple", "strawberry", "orange", "lemon", "fig", "pineapple", "banana", "jackfruit", "cherimoya (custard apple)", "pomegranate", "hay", "carbonara", "chocolate syrup", "dough", "meatloaf", "pizza", "pot pie", "burrito", "red wine", "espresso", "tea cup", "eggnog", "mountain", "bubble", "cliff", "coral reef", "geyser", "lakeshore", "promontory", "sandbar", "beach", "valley", "volcano", "baseball player", "bridegroom", "scuba diver", "rapeseed", "daisy", "yellow lady's slipper", "corn", "acorn", "rose hip", "horse chestnut seed", "coral fungus", "agaric", "gyromitra", "stinkhorn mushroom", "earth star fungus", "hen of the woods mushroom", "bolete", "corn cob", "toilet paper"] openai_imagenet_template = [ lambda c: f'a bad photo of a {c}.', lambda c: f'a photo of many {c}.', lambda c: f'a sculpture of a {c}.', lambda c: f'a photo of the hard to see {c}.', lambda c: f'a low resolution photo of the {c}.', lambda c: f'a rendering of a {c}.', lambda c: f'graffiti of a {c}.', lambda c: f'a bad photo of the {c}.', lambda c: f'a cropped photo of the {c}.', lambda c: f'a tattoo of a {c}.', lambda c: f'the embroidered {c}.', lambda c: f'a photo of a hard to see {c}.', lambda c: f'a bright photo of a {c}.', lambda c: f'a photo of a clean {c}.', lambda c: f'a photo of a dirty {c}.', lambda c: f'a dark photo of the {c}.', lambda c: f'a drawing of a {c}.', lambda c: f'a photo of my {c}.', lambda c: f'the plastic {c}.', lambda c: f'a photo of the cool {c}.', lambda c: f'a close-up photo of a {c}.', lambda c: f'a black and white photo of the {c}.', lambda c: f'a painting of the {c}.', lambda c: f'a painting of a {c}.', lambda c: f'a pixelated photo of the {c}.', lambda c: f'a sculpture of the {c}.', lambda c: f'a bright photo of the {c}.', lambda c: f'a cropped photo of a {c}.', lambda c: f'a plastic {c}.', lambda c: f'a photo of the dirty {c}.', lambda c: f'a jpeg corrupted photo of a {c}.', lambda c: f'a blurry photo of the {c}.', lambda c: f'a photo of the {c}.', lambda c: f'a good photo of the {c}.', lambda c: f'a rendering of the {c}.', lambda c: f'a {c} in a video game.', lambda c: f'a photo of one {c}.', lambda c: f'a doodle of a {c}.', lambda c: f'a close-up photo of the {c}.', lambda c: f'a photo of a {c}.', lambda c: f'the origami {c}.', lambda c: f'the {c} in a video game.', lambda c: f'a sketch of a {c}.', lambda c: f'a doodle of the {c}.', lambda c: f'a origami {c}.', lambda c: f'a low resolution photo of a {c}.', lambda c: f'the toy {c}.', lambda c: f'a rendition of the {c}.', lambda c: f'a photo of the clean {c}.', lambda c: f'a photo of a large {c}.', lambda c: f'a rendition of a {c}.', lambda c: f'a photo of a nice {c}.', lambda c: f'a photo of a weird {c}.', lambda c: f'a blurry photo of a {c}.', lambda c: f'a cartoon {c}.', lambda c: f'art of a {c}.', lambda c: f'a sketch of the {c}.', lambda c: f'a embroidered {c}.', lambda c: f'a pixelated photo of a {c}.', lambda c: f'itap of the {c}.', lambda c: f'a jpeg corrupted photo of the {c}.', lambda c: f'a good photo of a {c}.', lambda c: f'a plushie {c}.', lambda c: f'a photo of the nice {c}.', lambda c: f'a photo of the small {c}.', lambda c: f'a photo of the weird {c}.', lambda c: f'the cartoon {c}.', lambda c: f'art of the {c}.', lambda c: f'a drawing of the {c}.', lambda c: f'a photo of the large {c}.', lambda c: f'a black and white photo of a {c}.', lambda c: f'the plushie {c}.', lambda c: f'a dark photo of a {c}.', lambda c: f'itap of a {c}.', lambda c: f'graffiti of the {c}.', lambda c: f'a toy {c}.', lambda c: f'itap of my {c}.', lambda c: f'a photo of a cool {c}.', lambda c: f'a photo of a small {c}.', lambda c: f'a tattoo of the {c}.', ]
KosmosX-API-main
kosmosX/open_clip/src/training/imagenet_zeroshot_data.py
import ast import json import logging import math import os import random import sys from dataclasses import dataclass from multiprocessing import Value import braceexpand import numpy as np import pandas as pd import torch import torchvision.datasets as datasets import webdataset as wds from PIL import Image from torch.utils.data import Dataset, DataLoader, SubsetRandomSampler, IterableDataset, get_worker_info from torch.utils.data.distributed import DistributedSampler from webdataset.filters import _shuffle from webdataset.tariterators import base_plus_ext, url_opener, tar_file_expander, valid_sample try: import horovod.torch as hvd except ImportError: hvd = None from open_clip import tokenize class CsvDataset(Dataset): def __init__(self, input_filename, transforms, img_key, caption_key, sep="\t"): logging.debug(f'Loading csv data from {input_filename}.') df = pd.read_csv(input_filename, sep=sep) self.images = df[img_key].tolist() self.captions = df[caption_key].tolist() self.transforms = transforms logging.debug('Done loading data.') def __len__(self): return len(self.captions) def __getitem__(self, idx): images = self.transforms(Image.open(str(self.images[idx]))) texts = tokenize([str(self.captions[idx])])[0] return images, texts class SharedEpoch: def __init__(self, epoch: int = 0): self.shared_epoch = Value('i', epoch) def set_value(self, epoch): self.shared_epoch.value = epoch def get_value(self): return self.shared_epoch.value @dataclass class DataInfo: dataloader: DataLoader sampler: DistributedSampler = None shared_epoch: SharedEpoch = None def set_epoch(self, epoch): if self.shared_epoch is not None: self.shared_epoch.set_value(epoch) if self.sampler is not None and isinstance(self.sampler, DistributedSampler): self.sampler.set_epoch(epoch) def preprocess_txt(text): return tokenize([str(text)])[0] def get_dataset_size(shards): shards_list = list(braceexpand.braceexpand(shards)) dir_path = os.path.dirname(shards) sizes_filename = os.path.join(dir_path, 'sizes.json') len_filename = os.path.join(dir_path, '__len__') if os.path.exists(sizes_filename): sizes = json.load(open(sizes_filename, 'r')) total_size = sum([int(sizes[os.path.basename(shard)]) for shard in shards_list]) elif os.path.exists(len_filename): # FIXME this used to be eval(open(...)) but that seemed rather unsafe total_size = ast.literal_eval(open(len_filename, 'r').read()) else: total_size = None # num samples undefined # some common dataset sizes (at time of authors last download) # CC3M (train): 2905954 # CC12M: 10968539 # LAION-400M: 407332084 # LAION-2B (english): 2170337258 num_shards = len(shards_list) return total_size, num_shards def get_imagenet(args, preprocess_fns, split): assert split in ["train", "val", "v2"] is_train = split == "train" preprocess_train, preprocess_val = preprocess_fns if split == "v2": from imagenetv2_pytorch import ImageNetV2Dataset dataset = ImageNetV2Dataset(location=args.imagenet_v2, transform=preprocess_val) else: if is_train: data_path = args.imagenet_train preprocess_fn = preprocess_train else: data_path = args.imagenet_val preprocess_fn = preprocess_val assert data_path dataset = datasets.ImageFolder(data_path, transform=preprocess_fn) if is_train: idxs = np.zeros(len(dataset.targets)) target_array = np.array(dataset.targets) k = 50 for c in range(1000): m = target_array == c n = len(idxs[m]) arr = np.zeros(n) arr[:k] = 1 np.random.shuffle(arr) idxs[m] = arr idxs = idxs.astype('int') sampler = SubsetRandomSampler(np.where(idxs)[0]) else: sampler = None dataloader = torch.utils.data.DataLoader( dataset, batch_size=args.batch_size, num_workers=args.workers, sampler=sampler, ) return DataInfo(dataloader=dataloader, sampler=sampler) def count_samples(dataloader): os.environ["WDS_EPOCH"] = "0" n_elements, n_batches = 0, 0 for images, texts in dataloader: n_batches += 1 n_elements += len(images) assert len(images) == len(texts) return n_elements, n_batches def filter_no_caption(sample): return 'txt' in sample def log_and_continue(exn): """Call in an exception handler to ignore any exception, isssue a warning, and continue.""" logging.warning(f'Handling webdataset error ({repr(exn)}). Ignoring.') return True def group_by_keys_nothrow(data, keys=base_plus_ext, lcase=True, suffixes=None, handler=None): """Return function over iterator that groups key, value pairs into samples. :param keys: function that splits the key into key and extension (base_plus_ext) :param lcase: convert suffixes to lower case (Default value = True) """ current_sample = None for filesample in data: assert isinstance(filesample, dict) fname, value = filesample["fname"], filesample["data"] prefix, suffix = keys(fname) if prefix is None: continue if lcase: suffix = suffix.lower() # FIXME webdataset version throws if suffix in current_sample, but we have a potential for # this happening in the current LAION400m dataset if a tar ends with same prefix as the next # begins, rare, but can happen since prefix aren't unique across tar files in that dataset if current_sample is None or prefix != current_sample["__key__"] or suffix in current_sample: if valid_sample(current_sample): yield current_sample current_sample = dict(__key__=prefix, __url__=filesample["__url__"]) if suffixes is None or suffix in suffixes: current_sample[suffix] = value if valid_sample(current_sample): yield current_sample def tarfile_to_samples_nothrow(src, handler=log_and_continue): # NOTE this is a re-impl of the webdataset impl with group_by_keys that doesn't throw streams = url_opener(src, handler=handler) files = tar_file_expander(streams, handler=handler) samples = group_by_keys_nothrow(files, handler=handler) return samples def pytorch_worker_seed(): """get dataloader worker seed from pytorch""" worker_info = get_worker_info() if worker_info is not None: # favour the seed already created for pytorch dataloader workers if it exists return worker_info.seed # fallback to wds rank based seed return wds.utils.pytorch_worker_seed() _SHARD_SHUFFLE_SIZE = 2000 _SHARD_SHUFFLE_INITIAL = 500 _SAMPLE_SHUFFLE_SIZE = 5000 _SAMPLE_SHUFFLE_INITIAL = 1000 class detshuffle2(wds.PipelineStage): def __init__( self, bufsize=1000, initial=100, seed=0, epoch=-1, ): self.bufsize = bufsize self.initial = initial self.seed = seed self.epoch = epoch def run(self, src): if isinstance(self.epoch, SharedEpoch): epoch = self.epoch.get_value() else: # NOTE: this is epoch tracking is problematic in a multiprocess (dataloader workers or train) # situation as different workers may wrap at different times (or not at all). self.epoch += 1 epoch = self.epoch rng = random.Random() if self.seed < 0: seed = pytorch_worker_seed() + epoch else: seed = self.seed + epoch rng.seed(seed) return _shuffle(src, self.bufsize, self.initial, rng) class ResampledShards2(IterableDataset): """An iterable dataset yielding a list of urls.""" def __init__( self, urls, nshards=sys.maxsize, worker_seed=None, deterministic=False, epoch=-1, ): """Sample shards from the shard list with replacement. :param urls: a list of URLs as a Python list or brace notation string """ super().__init__() urls = wds.shardlists.expand_urls(urls) self.urls = urls assert isinstance(self.urls[0], str) self.nshards = nshards self.rng = random.Random() self.worker_seed = pytorch_worker_seed if worker_seed is None else worker_seed self.deterministic = deterministic self.epoch = epoch def __iter__(self): """Return an iterator over the shards.""" if isinstance(self.epoch, SharedEpoch): epoch = self.epoch.get_value() else: # NOTE: this is epoch tracking is problematic in a multiprocess (dataloader workers or train) # situation as different workers may wrap at different times (or not at all). self.epoch += 1 epoch = self.epoch if self.deterministic: # reset seed w/ epoch if deterministic, worker seed should be deterministic due to arg.seed self.rng.seed(self.worker_seed() + epoch) for _ in range(self.nshards): yield dict(url=self.rng.choice(self.urls)) def get_wds_dataset(args, preprocess_img, is_train, epoch=0, floor=False): input_shards = args.train_data if is_train else args.val_data assert input_shards is not None resampled = getattr(args, 'dataset_resampled', False) and is_train num_samples, num_shards = get_dataset_size(input_shards) if not num_samples: if is_train: num_samples = args.train_num_samples if not num_samples: raise RuntimeError( 'Currently, number of dataset samples must be specified for training dataset. ' 'Please specify via `--train-num-samples` if no dataset length info present.') else: num_samples = args.val_num_samples or 0 # eval will just exhaust the iterator if not specified shared_epoch = SharedEpoch(epoch=epoch) # create a shared epoch store to sync epoch to dataloader worker proc if resampled: pipeline = [ResampledShards2(input_shards, deterministic=True, epoch=shared_epoch)] else: pipeline = [wds.SimpleShardList(input_shards)] # at this point we have an iterator over all the shards if is_train: if not resampled: pipeline.extend([ detshuffle2( bufsize=_SHARD_SHUFFLE_SIZE, initial=_SHARD_SHUFFLE_INITIAL, seed=args.seed, epoch=shared_epoch, ), wds.split_by_node, wds.split_by_worker, ]) pipeline.extend([ # at this point, we have an iterator over the shards assigned to each worker at each node tarfile_to_samples_nothrow, # wds.tarfile_to_samples(handler=log_and_continue), wds.shuffle( bufsize=_SAMPLE_SHUFFLE_SIZE, initial=_SAMPLE_SHUFFLE_INITIAL, ), ]) else: pipeline.extend([ wds.split_by_worker, # at this point, we have an iterator over the shards assigned to each worker wds.tarfile_to_samples(handler=log_and_continue), ]) pipeline.extend([ wds.select(filter_no_caption), wds.decode("pilrgb", handler=log_and_continue), wds.rename(image="jpg;png", text="txt"), wds.map_dict(image=preprocess_img, text=preprocess_txt), wds.to_tuple("image", "text"), wds.batched(args.batch_size, partial=not is_train), ]) dataset = wds.DataPipeline(*pipeline) if is_train: if not resampled: assert num_shards >= args.workers * args.world_size, 'number of shards must be >= total workers' # roll over and repeat a few samples to get same number of full batches on each node round_fn = math.floor if floor else math.ceil global_batch_size = args.batch_size * args.world_size num_batches = round_fn(num_samples / global_batch_size) num_workers = max(1, args.workers) num_worker_batches = round_fn(num_batches / num_workers) # per dataloader worker num_batches = num_worker_batches * num_workers num_samples = num_batches * global_batch_size dataset = dataset.with_epoch(num_worker_batches) # each worker is iterating over this else: # last batches are partial, eval is done on single (master) node num_batches = math.ceil(num_samples / args.batch_size) dataloader = wds.WebLoader( dataset, batch_size=None, shuffle=False, num_workers=args.workers, persistent_workers=True, ) # FIXME not clear which approach is better, with_epoch before vs after dataloader? # hoping to resolve via https://github.com/webdataset/webdataset/issues/169 # if is_train: # # roll over and repeat a few samples to get same number of full batches on each node # global_batch_size = args.batch_size * args.world_size # num_batches = math.ceil(num_samples / global_batch_size) # num_workers = max(1, args.workers) # num_batches = math.ceil(num_batches / num_workers) * num_workers # num_samples = num_batches * global_batch_size # dataloader = dataloader.with_epoch(num_batches) # else: # # last batches are partial, eval is done on single (master) node # num_batches = math.ceil(num_samples / args.batch_size) # add meta-data to dataloader instance for convenience dataloader.num_batches = num_batches dataloader.num_samples = num_samples return DataInfo(dataloader=dataloader, shared_epoch=shared_epoch) def get_csv_dataset(args, preprocess_fn, is_train, epoch=0): input_filename = args.train_data if is_train else args.val_data assert input_filename dataset = CsvDataset( input_filename, preprocess_fn, img_key=args.csv_img_key, caption_key=args.csv_caption_key, sep=args.csv_separator) num_samples = len(dataset) sampler = DistributedSampler(dataset) if args.distributed and is_train else None shuffle = is_train and sampler is None dataloader = DataLoader( dataset, batch_size=args.batch_size, shuffle=shuffle, num_workers=args.workers, pin_memory=True, sampler=sampler, drop_last=is_train, ) dataloader.num_samples = num_samples dataloader.num_batches = len(dataloader) return DataInfo(dataloader, sampler) def get_dataset_fn(data_path, dataset_type): if dataset_type == "webdataset": return get_wds_dataset elif dataset_type == "csv": return get_csv_dataset elif dataset_type == "auto": ext = data_path.split('.')[-1] if ext in ['csv', 'tsv']: return get_csv_dataset elif ext in ['tar']: return get_wds_dataset else: raise ValueError( f"Tried to figure out dataset type, but failed for extention {ext}.") else: raise ValueError(f"Unsupported dataset type: {dataset_type}") def get_data(args, preprocess_fns, epoch=0): preprocess_train, preprocess_val = preprocess_fns data = {} if args.train_data: data["train"] = get_dataset_fn(args.train_data, args.dataset_type)( args, preprocess_train, is_train=True, epoch=epoch) if args.val_data: data["val"] = get_dataset_fn(args.val_data, args.dataset_type)( args, preprocess_val, is_train=False) if args.imagenet_val is not None: data["imagenet-val"] = get_imagenet(args, preprocess_fns, "val") if args.imagenet_v2 is not None: data["imagenet-v2"] = get_imagenet(args, preprocess_fns, "v2") return data
KosmosX-API-main
kosmosX/open_clip/src/training/data.py
import hashlib import os import urllib import warnings from tqdm import tqdm _RN50 = dict( openai="https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt", yfcc15m="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn50-quickgelu-yfcc15m-455df137.pt", cc12m="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn50-quickgelu-cc12m-f000538c.pt" ) _RN50_quickgelu = dict( openai="https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt", yfcc15m="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn50-quickgelu-yfcc15m-455df137.pt", cc12m="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn50-quickgelu-cc12m-f000538c.pt" ) _RN101 = dict( openai="https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt", yfcc15m="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn101-quickgelu-yfcc15m-3e04b30e.pt" ) _RN101_quickgelu = dict( openai="https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt", yfcc15m="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn101-quickgelu-yfcc15m-3e04b30e.pt" ) _RN50x4 = dict( openai="https://openaipublic.azureedge.net/clip/models/7e526bd135e493cef0776de27d5f42653e6b4c8bf9e0f653bb11773263205fdd/RN50x4.pt", ) _RN50x16 = dict( openai="https://openaipublic.azureedge.net/clip/models/52378b407f34354e150460fe41077663dd5b39c54cd0bfd2b27167a4a06ec9aa/RN50x16.pt", ) _RN50x64 = dict( openai="https://openaipublic.azureedge.net/clip/models/be1cfb55d75a9666199fb2206c106743da0f6468c9d327f3e0d0a543a9919d9c/RN50x64.pt", ) _VITB32 = dict( openai="https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt", laion2b_e16="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-laion2b_e16-af8dbd0c.pth", laion400m_e31="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e31-d867053b.pt", laion400m_e32="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e32-46683a32.pt", ) _VITB32_quickgelu = dict( openai="https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt", laion400m_e31="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e31-d867053b.pt", laion400m_e32="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e32-46683a32.pt", ) _VITB16 = dict( openai="https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt", laion400m_e31="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_16-laion400m_e31-00efa78f.pt", laion400m_e32="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_16-laion400m_e32-55e67d44.pt", ) _VITB16_PLUS_240 = dict( laion400m_e31="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_16_plus_240-laion400m_e31-8fb26589.pt", laion400m_e32="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_16_plus_240-laion400m_e32-699c4b84.pt", ) _VITL14 = dict( openai="https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt", laion400m_e31='https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_l_14-laion400m_e31-69988bb6.pt', laion400m_e32='https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_l_14-laion400m_e32-3d133497.pt', ) _VITL14_336 = dict( openai="https://openaipublic.azureedge.net/clip/models/3035c92b350959924f9f00213499208652fc7ea050643e8b385c2dac08641f02/ViT-L-14-336px.pt" ) _PRETRAINED = { "RN50": _RN50, "RN50-quickgelu": _RN50_quickgelu, "RN101": _RN101, "RN101-quickgelu": _RN101_quickgelu, "RN50x4": _RN50x4, "RN50x16": _RN50x16, "RN50x64": _RN50x64, "ViT-B-32": _VITB32, "ViT-B-32-quickgelu": _VITB32_quickgelu, "ViT-B-16": _VITB16, "ViT-B-16-plus-240": _VITB16_PLUS_240, "ViT-L-14": _VITL14, "ViT-L-14-336": _VITL14_336, } def list_pretrained(as_str: bool = False): """ returns list of pretrained models Returns a tuple (model_name, pretrain_tag) by default or 'name:tag' if as_str == True """ return [':'.join([k, t]) if as_str else (k, t) for k in _PRETRAINED.keys() for t in _PRETRAINED[k].keys()] def list_pretrained_tag_models(tag: str): """ return all models having the specified pretrain tag """ models = [] for k in _PRETRAINED.keys(): if tag in _PRETRAINED[k]: models.append(k) return models def list_pretrained_model_tags(model: str): """ return all pretrain tags for the specified model architecture """ tags = [] if model in _PRETRAINED: tags.extend(_PRETRAINED[model].keys()) return tags def get_pretrained_url(model: str, tag: str): if model not in _PRETRAINED: return '' model_pretrained = _PRETRAINED[model] tag = tag.lower() if tag not in model_pretrained: return '' return model_pretrained[tag] def download_pretrained(url: str, root: str = os.path.expanduser("~/.cache/clip")): os.makedirs(root, exist_ok=True) filename = os.path.basename(url) if 'openaipublic' in url: expected_sha256 = url.split("/")[-2] else: expected_sha256 = '' download_target = os.path.join(root, filename) if os.path.exists(download_target) and not os.path.isfile(download_target): raise RuntimeError(f"{download_target} exists and is not a regular file") if os.path.isfile(download_target): if expected_sha256: if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256: return download_target else: warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file") else: return download_target with urllib.request.urlopen(url) as source, open(download_target, "wb") as output: with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True) as loop: while True: buffer = source.read(8192) if not buffer: break output.write(buffer) loop.update(len(buffer)) if expected_sha256 and hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256: raise RuntimeError("Model has been downloaded but the SHA256 checksum does not not match") return download_target
KosmosX-API-main
kosmosX/open_clip/src/open_clip/pretrained.py
__version__ = '1.3.0'
KosmosX-API-main
kosmosX/open_clip/src/open_clip/version.py
KosmosX-API-main
kosmosX/open_clip/src/open_clip/__init__.py
import json import logging import os import re from copy import deepcopy from pathlib import Path from typing import Optional, Tuple import torch from .model import CLIP, convert_weights_to_fp16, resize_pos_embed from .openai import load_openai_model from .pretrained import get_pretrained_url, download_pretrained from .transform import image_transform _MODEL_CONFIG_PATHS = [Path(__file__).parent / "model_configs/"] _MODEL_CONFIGS = {} # directory (model_name: config) of model architecture configs def _natural_key(string_): return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())] def _rescan_model_configs(): global _MODEL_CONFIGS config_ext = ('.json',) config_files = [] for config_path in _MODEL_CONFIG_PATHS: if config_path.is_file() and config_path.suffix in config_ext: config_files.append(config_path) elif config_path.is_dir(): for ext in config_ext: config_files.extend(config_path.glob(f'*{ext}')) for cf in config_files: with open(cf, 'r') as f: model_cfg = json.load(f) if all(a in model_cfg for a in ('embed_dim', 'vision_cfg', 'text_cfg')): _MODEL_CONFIGS[cf.stem] = model_cfg _MODEL_CONFIGS = {k: v for k, v in sorted(_MODEL_CONFIGS.items(), key=lambda x: _natural_key(x[0]))} _rescan_model_configs() # initial populate of model config registry def load_state_dict(checkpoint_path: str, map_location='cpu'): checkpoint = torch.load(checkpoint_path, map_location=map_location) if isinstance(checkpoint, dict) and 'state_dict' in checkpoint: state_dict = checkpoint['state_dict'] else: state_dict = checkpoint if next(iter(state_dict.items()))[0].startswith('module'): state_dict = {k[7:]: v for k, v in state_dict.items()} return state_dict def load_checkpoint(model, checkpoint_path, strict=True): state_dict = load_state_dict(checkpoint_path) resize_pos_embed(state_dict, model) incompatible_keys = model.load_state_dict(state_dict, strict=strict) return incompatible_keys def create_model( model_name: str, pretrained: str = '', precision: str = 'fp32', device: torch.device = torch.device('cpu'), jit: bool = False, force_quick_gelu: bool = False, pretrained_image: bool = False, ): model_name = model_name.replace('/', '-') # for callers using old naming with / in ViT names if pretrained.lower() == 'openai': logging.info(f'Loading pretrained {model_name} from OpenAI.') model = load_openai_model(model_name, device=device, jit=jit) # See https://discuss.pytorch.org/t/valueerror-attemting-to-unscale-fp16-gradients/81372 if precision == "amp" or precision == "fp32": model = model.float() else: if model_name in _MODEL_CONFIGS: logging.info(f'Loading {model_name} model config.') model_cfg = deepcopy(_MODEL_CONFIGS[model_name]) else: logging.error(f'Model config for {model_name} not found; available models {list_models()}.') raise RuntimeError(f'Model config for {model_name} not found.') if force_quick_gelu: # override for use of QuickGELU on non-OpenAI transformer models model_cfg["quick_gelu"] = True if pretrained_image: if 'timm_model_name' in model_cfg.get('vision_cfg', {}): # pretrained weight loading for timm models set via vision_cfg model_cfg['vision_cfg']['timm_model_pretrained'] = True else: assert False, 'pretrained image towers currently only supported for timm models' model = CLIP(**model_cfg) if pretrained: checkpoint_path = '' url = get_pretrained_url(model_name, pretrained) if url: checkpoint_path = download_pretrained(url) elif os.path.exists(pretrained): checkpoint_path = pretrained if checkpoint_path: logging.info(f'Loading pretrained {model_name} weights ({pretrained}).') load_checkpoint(model, checkpoint_path) else: logging.warning(f'Pretrained weights ({pretrained}) not found for model {model_name}.') raise RuntimeError(f'Pretrained weights ({pretrained}) not found for model {model_name}.') model.to(device=device) if precision == "fp16": assert device.type != 'cpu' convert_weights_to_fp16(model) if jit: model = torch.jit.script(model) return model def create_model_and_transforms( model_name: str, pretrained: str = '', precision: str = 'fp32', device: torch.device = torch.device('cpu'), jit: bool = False, force_quick_gelu: bool = False, pretrained_image: bool = False, mean: Optional[Tuple[float, ...]] = None, std: Optional[Tuple[float, ...]] = None, ): model = create_model( model_name, pretrained, precision, device, jit, force_quick_gelu=force_quick_gelu, pretrained_image=pretrained_image) preprocess_train = image_transform(model.visual.image_size, is_train=True, mean=mean, std=std) preprocess_val = image_transform(model.visual.image_size, is_train=False, mean=mean, std=std) return model, preprocess_train, preprocess_val def list_models(): """ enumerate available model architectures based on config files """ return list(_MODEL_CONFIGS.keys()) def add_model_config(path): """ add model config path or file and update registry """ if not isinstance(path, Path): path = Path(path) _MODEL_CONFIG_PATHS.append(path) _rescan_model_configs()
KosmosX-API-main
kosmosX/open_clip/src/open_clip/factory.py
""" CLIP Model Adapted from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI. """ from collections import OrderedDict from dataclasses import dataclass import logging import math from typing import Tuple, Union, Callable, Optional import numpy as np import torch import torch.nn.functional as F from torch import nn from torch.utils.checkpoint import checkpoint from .timm_model import TimmModel from .utils import freeze_batch_norm_2d, to_2tuple from argparse import Namespace from torchscale.component.multihead_attention import MultiheadAttention class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1): super().__init__() # all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1 self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False) self.bn1 = nn.BatchNorm2d(planes) self.relu1 = nn.ReLU(inplace=True) self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.relu2 = nn.ReLU(inplace=True) self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity() self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False) self.bn3 = nn.BatchNorm2d(planes * self.expansion) self.relu3 = nn.ReLU(inplace=True) self.downsample = None self.stride = stride if stride > 1 or inplanes != planes * Bottleneck.expansion: # downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1 self.downsample = nn.Sequential(OrderedDict([ ("-1", nn.AvgPool2d(stride)), ("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)), ("1", nn.BatchNorm2d(planes * self.expansion)) ])) def forward(self, x: torch.Tensor): identity = x out = self.relu1(self.bn1(self.conv1(x))) out = self.relu2(self.bn2(self.conv2(out))) out = self.avgpool(out) out = self.bn3(self.conv3(out)) if self.downsample is not None: identity = self.downsample(x) out += identity out = self.relu3(out) return out class AttentionPool2d(nn.Module): def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None): super().__init__() self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5) self.k_proj = nn.Linear(embed_dim, embed_dim) self.q_proj = nn.Linear(embed_dim, embed_dim) self.v_proj = nn.Linear(embed_dim, embed_dim) self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim) self.num_heads = num_heads def forward(self, x): x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(2, 0, 1) # NCHW -> (HW)NC x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC x, _ = F.multi_head_attention_forward( query=x, key=x, value=x, embed_dim_to_check=x.shape[-1], num_heads=self.num_heads, q_proj_weight=self.q_proj.weight, k_proj_weight=self.k_proj.weight, v_proj_weight=self.v_proj.weight, in_proj_weight=None, in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]), bias_k=None, bias_v=None, add_zero_attn=False, dropout_p=0, out_proj_weight=self.c_proj.weight, out_proj_bias=self.c_proj.bias, use_separate_proj_weight=True, training=self.training, need_weights=False ) return x[0] class ModifiedResNet(nn.Module): """ A ResNet class that is similar to torchvision's but contains the following changes: - There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool. - Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1 - The final pooling layer is a QKV attention instead of an average pool """ def __init__(self, layers, output_dim, heads, image_size=224, width=64): super().__init__() self.output_dim = output_dim self.image_size = image_size # the 3-layer stem self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(width // 2) self.relu1 = nn.ReLU(inplace=True) self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(width // 2) self.relu2 = nn.ReLU(inplace=True) self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False) self.bn3 = nn.BatchNorm2d(width) self.relu3 = nn.ReLU(inplace=True) self.avgpool = nn.AvgPool2d(2) # residual layers self._inplanes = width # this is a *mutable* variable used during construction self.layer1 = self._make_layer(width, layers[0]) self.layer2 = self._make_layer(width * 2, layers[1], stride=2) self.layer3 = self._make_layer(width * 4, layers[2], stride=2) self.layer4 = self._make_layer(width * 8, layers[3], stride=2) embed_dim = width * 32 # the ResNet feature dimension self.attnpool = AttentionPool2d(image_size // 32, embed_dim, heads, output_dim) self.init_parameters() def _make_layer(self, planes, blocks, stride=1): layers = [Bottleneck(self._inplanes, planes, stride)] self._inplanes = planes * Bottleneck.expansion for _ in range(1, blocks): layers.append(Bottleneck(self._inplanes, planes)) return nn.Sequential(*layers) def init_parameters(self): if self.attnpool is not None: std = self.attnpool.c_proj.in_features ** -0.5 nn.init.normal_(self.attnpool.q_proj.weight, std=std) nn.init.normal_(self.attnpool.k_proj.weight, std=std) nn.init.normal_(self.attnpool.v_proj.weight, std=std) nn.init.normal_(self.attnpool.c_proj.weight, std=std) for resnet_block in [self.layer1, self.layer2, self.layer3, self.layer4]: for name, param in resnet_block.named_parameters(): if name.endswith("bn3.weight"): nn.init.zeros_(param) def lock(self, unlocked_groups=0, freeze_bn_stats=False): assert unlocked_groups == 0, 'partial locking not currently supported for this model' for param in self.parameters(): param.requires_grad = False if freeze_bn_stats: freeze_batch_norm_2d(self) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): # FIXME support for non-transformer pass def stem(self, x): x = self.relu1(self.bn1(self.conv1(x))) x = self.relu2(self.bn2(self.conv2(x))) x = self.relu3(self.bn3(self.conv3(x))) x = self.avgpool(x) return x def forward(self, x): x = self.stem(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.attnpool(x) return x class LayerNorm(nn.LayerNorm): """Subclass torch's LayerNorm to handle fp16.""" def forward(self, x: torch.Tensor): orig_type = x.dtype x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) return x.to(orig_type) class QuickGELU(nn.Module): # NOTE This is slower than nn.GELU or nn.SiLU and uses more GPU memory def forward(self, x: torch.Tensor): return x * torch.sigmoid(1.702 * x) class ResidualAttentionBlock(nn.Module): def __init__(self, d_model: int, n_head: int, mlp_ratio: float = 4.0, act_layer: Callable = nn.GELU): super().__init__() self.attn = nn.MultiheadAttention(d_model, n_head) args = Namespace(**{'scale_length': 0, 'multiway': False, 'flash_attention': True}) self.ts_attn = MultiheadAttention(args, d_model, n_head, self_attention=True) self.ln_1 = LayerNorm(d_model) mlp_width = int(d_model * mlp_ratio) self.mlp = nn.Sequential(OrderedDict([ ("c_fc", nn.Linear(d_model, mlp_width)), ("gelu", act_layer()), ("c_proj", nn.Linear(mlp_width, d_model)) ])) self.ln_2 = LayerNorm(d_model) def attention(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None): # orginal_val = self.attn(x, x, x, need_weights=False, attn_mask=attn_mask)[0] ts_val = self.ts_attn(x, x, x, attn_mask=attn_mask)[0] return ts_val def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None): x = x + self.attention(self.ln_1(x), attn_mask=attn_mask) x = x + self.mlp(self.ln_2(x)) return x class Transformer(nn.Module): def __init__(self, width: int, layers: int, heads: int, mlp_ratio: float = 4.0, act_layer: Callable = nn.GELU): super().__init__() self.width = width self.layers = layers self.grad_checkpointing = False self.resblocks = nn.ModuleList([ ResidualAttentionBlock(width, heads, mlp_ratio, act_layer=act_layer) for _ in range(layers) ]) def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None): for r in self.resblocks: if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint(r, x, attn_mask) else: x = r(x, attn_mask=attn_mask) return x class VisualTransformer(nn.Module): def __init__( self, image_size: int, patch_size: int, width: int, layers: int, heads: int, mlp_ratio: float, output_dim: int, act_layer: Callable = nn.GELU): super().__init__() self.image_size = to_2tuple(image_size) self.patch_size = to_2tuple(patch_size) self.grid_size = (self.image_size[0] // self.patch_size[0], self.image_size[1] // self.patch_size[1]) self.output_dim = output_dim self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False) scale = width ** -0.5 self.class_embedding = nn.Parameter(scale * torch.randn(width)) self.positional_embedding = nn.Parameter(scale * torch.randn(self.grid_size[0] * self.grid_size[1] + 1, width)) self.ln_pre = LayerNorm(width) self.transformer = Transformer(width, layers, heads, mlp_ratio, act_layer=act_layer) self.ln_post = LayerNorm(width) self.proj = nn.Parameter(scale * torch.randn(width, output_dim)) def lock(self, unlocked_groups=0, freeze_bn_stats=False): assert unlocked_groups == 0, 'partial locking not currently supported for this model' for param in self.parameters(): param.requires_grad = False @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.transformer.grad_checkpointing = enable def forward(self, x: torch.Tensor): x = self.conv1(x) # shape = [*, width, grid, grid] x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2] x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width] x = torch.cat( [self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1) # shape = [*, grid ** 2 + 1, width] x = x + self.positional_embedding.to(x.dtype) x = self.ln_pre(x) x = x.permute(1, 0, 2) # NLD -> LND x = self.transformer(x) x = x.permute(1, 0, 2) # LND -> NLD x = self.ln_post(x[:, 0, :]) if self.proj is not None: x = x @ self.proj return x @dataclass class CLIPVisionCfg: layers: Union[Tuple[int, int, int, int], int] = 12 width: int = 768 head_width: int = 64 mlp_ratio: float = 4.0 patch_size: int = 16 image_size: Union[Tuple[int, int], int] = 224 timm_model_name: str = None # a valid model name overrides layers, width, patch_size timm_model_pretrained: bool = False # use (imagenet) pretrained weights for named model timm_pool: str = 'avg' # feature pooling for timm model ('abs_attn', 'rot_attn', 'avg', '') timm_proj: str = 'linear' # linear projection for timm model output ('linear', 'mlp', '') @dataclass class CLIPTextCfg: context_length: int = 77 vocab_size: int = 49408 width: int = 512 heads: int = 8 layers: int = 12 class CLIP(nn.Module): def __init__( self, embed_dim: int, vision_cfg: CLIPVisionCfg, text_cfg: CLIPTextCfg, quick_gelu: bool = False, ): super().__init__() if isinstance(vision_cfg, dict): vision_cfg = CLIPVisionCfg(**vision_cfg) if isinstance(text_cfg, dict): text_cfg = CLIPTextCfg(**text_cfg) self.context_length = text_cfg.context_length # OpenAI models are pretrained w/ QuickGELU but native nn.GELU is both faster and more # memory efficient in recent PyTorch releases (>= 1.10). # NOTE: timm models always use native GELU regardless of quick_gelu flag. act_layer = QuickGELU if quick_gelu else nn.GELU if vision_cfg.timm_model_name: self.visual = TimmModel( vision_cfg.timm_model_name, pretrained=vision_cfg.timm_model_pretrained, pool=vision_cfg.timm_pool, proj=vision_cfg.timm_proj, embed_dim=embed_dim, image_size=vision_cfg.image_size ) act_layer = nn.GELU # so that text transformer doesn't use QuickGELU w/ timm models elif isinstance(vision_cfg.layers, (tuple, list)): vision_heads = vision_cfg.width * 32 // vision_cfg.head_width self.visual = ModifiedResNet( layers=vision_cfg.layers, output_dim=embed_dim, heads=vision_heads, image_size=vision_cfg.image_size, width=vision_cfg.width ) else: vision_heads = vision_cfg.width // vision_cfg.head_width self.visual = VisualTransformer( image_size=vision_cfg.image_size, patch_size=vision_cfg.patch_size, width=vision_cfg.width, layers=vision_cfg.layers, heads=vision_heads, mlp_ratio=vision_cfg.mlp_ratio, output_dim=embed_dim, act_layer=act_layer, ) self.transformer = Transformer( width=text_cfg.width, layers=text_cfg.layers, heads=text_cfg.heads, act_layer=act_layer, ) self.vocab_size = text_cfg.vocab_size self.token_embedding = nn.Embedding(text_cfg.vocab_size, text_cfg.width) self.positional_embedding = nn.Parameter(torch.empty(self.context_length, text_cfg.width)) self.ln_final = LayerNorm(text_cfg.width) self.text_projection = nn.Parameter(torch.empty(text_cfg.width, embed_dim)) self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07)) self.register_buffer('attn_mask', self.build_attention_mask(), persistent=False) self.init_parameters() def init_parameters(self): nn.init.normal_(self.token_embedding.weight, std=0.02) nn.init.normal_(self.positional_embedding, std=0.01) nn.init.constant_(self.logit_scale, np.log(1 / 0.07)) if hasattr(self.visual, 'init_parameters'): self.visual.init_parameters() proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5) attn_std = self.transformer.width ** -0.5 fc_std = (2 * self.transformer.width) ** -0.5 for block in self.transformer.resblocks: nn.init.normal_(block.attn.in_proj_weight, std=attn_std) nn.init.normal_(block.attn.out_proj.weight, std=proj_std) nn.init.normal_(block.mlp.c_fc.weight, std=fc_std) nn.init.normal_(block.mlp.c_proj.weight, std=proj_std) if self.text_projection is not None: nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5) def build_attention_mask(self): # lazily create causal attention mask, with full attention between the vision tokens # pytorch uses additive attention mask; fill with -inf mask = torch.empty(self.context_length, self.context_length) mask.fill_(float("-inf")) mask.triu_(1) # zero out the lower diagonal return mask def lock_image_tower(self, unlocked_groups=0, freeze_bn_stats=False): # lock image tower as per LiT - https://arxiv.org/abs/2111.07991 self.visual.lock(unlocked_groups=unlocked_groups, freeze_bn_stats=freeze_bn_stats) @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.visual.set_grad_checkpointing(enable) self.transformer.grad_checkpointing = enable def encode_image(self, image): return self.visual(image) def encode_text(self, text): x = self.token_embedding(text) # [batch_size, n_ctx, d_model] x = x + self.positional_embedding x = x.permute(1, 0, 2) # NLD -> LND x = self.transformer(x, attn_mask=self.attn_mask) x = x.permute(1, 0, 2) # LND -> NLD x = self.ln_final(x) # x.shape = [batch_size, n_ctx, transformer.width] # take features from the eot embedding (eot_token is the highest number in each sequence) x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection return x def forward(self, image, text): if image is None: return self.encode_text(text) elif text is None: return self.encode_image(image) image_features = self.encode_image(image) image_features = F.normalize(image_features, dim=-1) text_features = self.encode_text(text) text_features = F.normalize(text_features, dim=-1) return image_features, text_features, self.logit_scale.exp() def convert_weights_to_fp16(model: nn.Module): """Convert applicable model parameters to fp16""" def _convert_weights_to_fp16(l): if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)): l.weight.data = l.weight.data.half() if l.bias is not None: l.bias.data = l.bias.data.half() if isinstance(l, nn.MultiheadAttention): for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]: tensor = getattr(l, attr) if tensor is not None: tensor.data = tensor.data.half() for name in ["text_projection", "proj"]: if hasattr(l, name): attr = getattr(l, name) if attr is not None: attr.data = attr.data.half() model.apply(_convert_weights_to_fp16) def build_model_from_openai_state_dict(state_dict: dict): vit = "visual.proj" in state_dict if vit: vision_width = state_dict["visual.conv1.weight"].shape[0] vision_layers = len( [k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")]) vision_patch_size = state_dict["visual.conv1.weight"].shape[-1] grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5) image_size = vision_patch_size * grid_size else: counts: list = [ len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in [1, 2, 3, 4]] vision_layers = tuple(counts) vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0] output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5) vision_patch_size = None assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0] image_size = output_width * 32 embed_dim = state_dict["text_projection"].shape[1] context_length = state_dict["positional_embedding"].shape[0] vocab_size = state_dict["token_embedding.weight"].shape[0] transformer_width = state_dict["ln_final.weight"].shape[0] transformer_heads = transformer_width // 64 transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith("transformer.resblocks"))) vision_cfg = CLIPVisionCfg( layers=vision_layers, width=vision_width, patch_size=vision_patch_size, image_size=image_size, ) text_cfg = CLIPTextCfg( context_length=context_length, vocab_size=vocab_size, width=transformer_width, heads=transformer_heads, layers=transformer_layers ) model = CLIP( embed_dim, vision_cfg=vision_cfg, text_cfg=text_cfg, quick_gelu=True, # OpenAI models were trained with QuickGELU ) for key in ["input_resolution", "context_length", "vocab_size"]: state_dict.pop(key, None) convert_weights_to_fp16(model) model.load_state_dict(state_dict) return model.eval() def trace_model(model, batch_size=256, device=torch.device('cpu')): model.eval() image_size = model.visual.image_size example_images = torch.ones((batch_size, 3, image_size, image_size), device=device) example_text = torch.zeros((batch_size, model.context_length), dtype=torch.int, device=device) model = torch.jit.trace_module( model, inputs=dict( forward=(example_images, example_text), encode_text=(example_text,), encode_image=(example_images,) )) model.visual.image_size = image_size return model def resize_pos_embed(state_dict, model, interpolation: str = 'bicubic', seq_dim=1): # Rescale the grid of position embeddings when loading from state_dict old_pos_embed = state_dict.get('visual.positional_embedding', None) if old_pos_embed is None or not hasattr(model.visual, 'grid_size'): return grid_size = to_2tuple(model.visual.grid_size) extra_tokens = 1 # FIXME detect different token configs (ie no class token, or more) new_seq_len = grid_size[0] * grid_size[1] + extra_tokens if new_seq_len == old_pos_embed.shape[0]: return if extra_tokens: pos_emb_tok, pos_emb_img = old_pos_embed[:extra_tokens], old_pos_embed[extra_tokens:] else: pos_emb_tok, pos_emb_img = None, old_pos_embed old_grid_size = to_2tuple(int(math.sqrt(len(pos_emb_img)))) logging.info('Resizing position embedding grid-size from %s to %s', old_grid_size, grid_size) pos_emb_img = pos_emb_img.reshape(1, old_grid_size[0], old_grid_size[1], -1).permute(0, 3, 1, 2) pos_emb_img = F.interpolate( pos_emb_img, size=grid_size, mode=interpolation, align_corners=True, ) pos_emb_img = pos_emb_img.permute(0, 2, 3, 1).reshape(1, grid_size[0] * grid_size[1], -1)[0] if pos_emb_tok is not None: new_pos_embed = torch.cat([pos_emb_tok, pos_emb_img], dim=0) else: new_pos_embed = pos_emb_img state_dict['visual.positional_embedding'] = new_pos_embed
KosmosX-API-main
kosmosX/open_clip/src/open_clip/model.py
""" CLIP tokenizer Copied from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI. """ import gzip import html import os from functools import lru_cache from typing import Union, List import ftfy import regex as re import torch @lru_cache() def default_bpe(): return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz") @lru_cache() def bytes_to_unicode(): """ Returns list of utf-8 byte and a corresponding list of unicode strings. The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. This is a signficant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings. And avoids mapping to whitespace/control characters the bpe code barfs on. """ bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1)) cs = bs[:] n = 0 for b in range(2**8): if b not in bs: bs.append(b) cs.append(2**8+n) n += 1 cs = [chr(n) for n in cs] return dict(zip(bs, cs)) def get_pairs(word): """Return set of symbol pairs in a word. Word is represented as tuple of symbols (symbols being variable-length strings). """ pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char return pairs def basic_clean(text): text = ftfy.fix_text(text) text = html.unescape(html.unescape(text)) return text.strip() def whitespace_clean(text): text = re.sub(r'\s+', ' ', text) text = text.strip() return text class SimpleTokenizer(object): def __init__(self, bpe_path: str = default_bpe(), special_tokens=None): self.byte_encoder = bytes_to_unicode() self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} merges = gzip.open(bpe_path).read().decode("utf-8").split('\n') merges = merges[1:49152-256-2+1] merges = [tuple(merge.split()) for merge in merges] vocab = list(bytes_to_unicode().values()) vocab = vocab + [v+'</w>' for v in vocab] for merge in merges: vocab.append(''.join(merge)) if not special_tokens: special_tokens = ['<start_of_text>', '<end_of_text>'] else: special_tokens = ['<start_of_text>', '<end_of_text>'] + special_tokens vocab.extend(special_tokens) self.encoder = dict(zip(vocab, range(len(vocab)))) self.decoder = {v: k for k, v in self.encoder.items()} self.bpe_ranks = dict(zip(merges, range(len(merges)))) self.cache = {t:t for t in special_tokens} special = "|".join(special_tokens) self.pat = re.compile(special + r"""|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE) self.vocab_size = len(self.encoder) self.all_special_ids = [self.encoder[t] for t in special_tokens] def bpe(self, token): if token in self.cache: return self.cache[token] word = tuple(token[:-1]) + ( token[-1] + '</w>',) pairs = get_pairs(word) if not pairs: return token+'</w>' while True: bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf'))) if bigram not in self.bpe_ranks: break first, second = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) new_word.extend(word[i:j]) i = j except: new_word.extend(word[i:]) break if word[i] == first and i < len(word)-1 and word[i+1] == second: new_word.append(first+second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = ' '.join(word) self.cache[token] = word return word def encode(self, text): bpe_tokens = [] text = whitespace_clean(basic_clean(text)).lower() for token in re.findall(self.pat, text): token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8')) bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' ')) return bpe_tokens def decode(self, tokens): text = ''.join([self.decoder[token] for token in tokens]) text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ') return text _tokenizer = SimpleTokenizer() def tokenize(texts: Union[str, List[str]], context_length: int = 77) -> torch.LongTensor: """ Returns the tokenized representation of given input string(s) Parameters ---------- texts : Union[str, List[str]] An input string or a list of input strings to tokenize context_length : int The context length to use; all CLIP models use 77 as the context length Returns ------- A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length] """ if isinstance(texts, str): texts = [texts] sot_token = _tokenizer.encoder["<start_of_text>"] eot_token = _tokenizer.encoder["<end_of_text>"] all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts] result = torch.zeros(len(all_tokens), context_length, dtype=torch.long) for i, tokens in enumerate(all_tokens): if len(tokens) > context_length: tokens = tokens[:context_length] # Truncate tokens[-1] = eot_token result[i, :len(tokens)] = torch.tensor(tokens) return result
KosmosX-API-main
kosmosX/open_clip/src/open_clip/tokenizer.py
import torch import torch.nn as nn from torch.nn import functional as F try: import torch.distributed.nn from torch import distributed as dist has_distributed = True except ImportError: has_distributed = False try: import horovod.torch as hvd except ImportError: hvd = None def gather_features( image_features, text_features, local_loss=False, gather_with_grad=False, rank=0, world_size=1, use_horovod=False ): assert has_distributed, 'torch.distributed did not import correctly, please use a PyTorch version with support.' if use_horovod: assert hvd is not None, 'Please install horovod' if gather_with_grad: all_image_features = hvd.allgather(image_features) all_text_features = hvd.allgather(text_features) else: with torch.no_grad(): all_image_features = hvd.allgather(image_features) all_text_features = hvd.allgather(text_features) if not local_loss: # ensure grads for local rank when all_* features don't have a gradient gathered_image_features = list(all_image_features.chunk(world_size, dim=0)) gathered_text_features = list(all_text_features.chunk(world_size, dim=0)) gathered_image_features[rank] = image_features gathered_text_features[rank] = text_features all_image_features = torch.cat(gathered_image_features, dim=0) all_text_features = torch.cat(gathered_text_features, dim=0) else: # We gather tensors from all gpus if gather_with_grad: all_image_features = torch.cat(torch.distributed.nn.all_gather(image_features), dim=0) all_text_features = torch.cat(torch.distributed.nn.all_gather(text_features), dim=0) else: gathered_image_features = [torch.zeros_like(image_features) for _ in range(world_size)] gathered_text_features = [torch.zeros_like(text_features) for _ in range(world_size)] dist.all_gather(gathered_image_features, image_features) dist.all_gather(gathered_text_features, text_features) if not local_loss: # ensure grads for local rank when all_* features don't have a gradient gathered_image_features[rank] = image_features gathered_text_features[rank] = text_features all_image_features = torch.cat(gathered_image_features, dim=0) all_text_features = torch.cat(gathered_text_features, dim=0) return all_image_features, all_text_features class ClipLoss(nn.Module): def __init__( self, local_loss=False, gather_with_grad=False, cache_labels=False, rank=0, world_size=1, use_horovod=False, ): super().__init__() self.local_loss = local_loss self.gather_with_grad = gather_with_grad self.cache_labels = cache_labels self.rank = rank self.world_size = world_size self.use_horovod = use_horovod # cache state self.prev_num_logits = 0 self.labels = {} def forward(self, image_features, text_features, logit_scale): device = image_features.device if self.world_size > 1: all_image_features, all_text_features = gather_features( image_features, text_features, self.local_loss, self.gather_with_grad, self.rank, self.world_size, self.use_horovod) if self.local_loss: logits_per_image = logit_scale * image_features @ all_text_features.T logits_per_text = logit_scale * text_features @ all_image_features.T else: logits_per_image = logit_scale * all_image_features @ all_text_features.T logits_per_text = logits_per_image.T else: logits_per_image = logit_scale * image_features @ text_features.T logits_per_text = logit_scale * text_features @ image_features.T # calculated ground-truth and cache if enabled num_logits = logits_per_image.shape[0] if self.prev_num_logits != num_logits or device not in self.labels: labels = torch.arange(num_logits, device=device, dtype=torch.long) if self.world_size > 1 and self.local_loss: labels = labels + num_logits * self.rank if self.cache_labels: self.labels[device] = labels self.prev_num_logits = num_logits else: labels = self.labels[device] total_loss = ( F.cross_entropy(logits_per_image, labels) + F.cross_entropy(logits_per_text, labels) ) / 2 return total_loss
KosmosX-API-main
kosmosX/open_clip/src/open_clip/loss.py
""" OpenAI pretrained model functions Adapted from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI. """ import os import warnings from typing import Union, List import torch from .model import build_model_from_openai_state_dict from .pretrained import get_pretrained_url, list_pretrained_tag_models, download_pretrained __all__ = ["list_openai_models", "load_openai_model"] def list_openai_models() -> List[str]: """Returns the names of available CLIP models""" return list_pretrained_tag_models('openai') def load_openai_model( name: str, device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu", jit=True, ): """Load a CLIP model Parameters ---------- name : str A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict device : Union[str, torch.device] The device to put the loaded model jit : bool Whether to load the optimized JIT model (default) or more hackable non-JIT model. Returns ------- model : torch.nn.Module The CLIP model preprocess : Callable[[PIL.Image], torch.Tensor] A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input """ if get_pretrained_url(name, 'openai'): model_path = download_pretrained(get_pretrained_url(name, 'openai')) elif os.path.isfile(name): model_path = name else: raise RuntimeError(f"Model {name} not found; available models = {list_openai_models()}") try: # loading JIT archive model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval() state_dict = None except RuntimeError: # loading saved state dict if jit: warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead") jit = False state_dict = torch.load(model_path, map_location="cpu") if not jit: try: model = build_model_from_openai_state_dict(state_dict or model.state_dict()).to(device) except KeyError: sd = {k[7:]: v for k, v in state_dict["state_dict"].items()} model = build_model_from_openai_state_dict(sd).to(device) if str(device) == "cpu": model.float() return model # patch the device names device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[]) device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1] def patch_device(module): try: graphs = [module.graph] if hasattr(module, "graph") else [] except RuntimeError: graphs = [] if hasattr(module, "forward1"): graphs.append(module.forward1.graph) for graph in graphs: for node in graph.findAllNodes("prim::Constant"): if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"): node.copyAttributes(device_node) model.apply(patch_device) patch_device(model.encode_image) patch_device(model.encode_text) # patch dtype to float32 on CPU if str(device) == "cpu": float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[]) float_input = list(float_holder.graph.findNode("aten::to").inputs())[1] float_node = float_input.node() def patch_float(module): try: graphs = [module.graph] if hasattr(module, "graph") else [] except RuntimeError: graphs = [] if hasattr(module, "forward1"): graphs.append(module.forward1.graph) for graph in graphs: for node in graph.findAllNodes("aten::to"): inputs = list(node.inputs()) for i in [1, 2]: # dtype can be the second or third argument to aten::to() if inputs[i].node()["value"] == 5: inputs[i].node().copyAttributes(float_node) model.apply(patch_float) patch_float(model.encode_image) patch_float(model.encode_text) model.float() # ensure image_size attr available at consistent location for both jit and non-jit model.visual.image_size = model.input_resolution.item() return model
KosmosX-API-main
kosmosX/open_clip/src/open_clip/openai.py
from itertools import repeat import collections.abc from torch import nn as nn from torchvision.ops.misc import FrozenBatchNorm2d def freeze_batch_norm_2d(module, module_match={}, name=''): """ Converts all `BatchNorm2d` and `SyncBatchNorm` layers of provided module into `FrozenBatchNorm2d`. If `module` is itself an instance of either `BatchNorm2d` or `SyncBatchNorm`, it is converted into `FrozenBatchNorm2d` and returned. Otherwise, the module is walked recursively and submodules are converted in place. Args: module (torch.nn.Module): Any PyTorch module. module_match (dict): Dictionary of full module names to freeze (all if empty) name (str): Full module name (prefix) Returns: torch.nn.Module: Resulting module Inspired by https://github.com/pytorch/pytorch/blob/a5895f85be0f10212791145bfedc0261d364f103/torch/nn/modules/batchnorm.py#L762 """ res = module is_match = True if module_match: is_match = name in module_match if is_match and isinstance(module, (nn.modules.batchnorm.BatchNorm2d, nn.modules.batchnorm.SyncBatchNorm)): res = FrozenBatchNorm2d(module.num_features) res.num_features = module.num_features res.affine = module.affine if module.affine: res.weight.data = module.weight.data.clone().detach() res.bias.data = module.bias.data.clone().detach() res.running_mean.data = module.running_mean.data res.running_var.data = module.running_var.data res.eps = module.eps else: for child_name, child in module.named_children(): full_child_name = '.'.join([name, child_name]) if name else child_name new_child = freeze_batch_norm_2d(child, module_match, full_child_name) if new_child is not child: res.add_module(child_name, new_child) return res # From PyTorch internals def _ntuple(n): def parse(x): if isinstance(x, collections.abc.Iterable): return x return tuple(repeat(x, n)) return parse to_1tuple = _ntuple(1) to_2tuple = _ntuple(2) to_3tuple = _ntuple(3) to_4tuple = _ntuple(4) def to_ntuple(n, x): return _ntuple(n)(x)
KosmosX-API-main
kosmosX/open_clip/src/open_clip/utils.py
from typing import Optional, Tuple import torch import torch.nn as nn import torchvision.transforms.functional as F from torchvision.transforms import Normalize, Compose, RandomResizedCrop, InterpolationMode, ToTensor, Resize, \ CenterCrop class ResizeMaxSize(nn.Module): def __init__(self, max_size, interpolation=InterpolationMode.BICUBIC, fn='max', fill=0): super().__init__() if not isinstance(max_size, int): raise TypeError(f"Size should be int. Got {type(max_size)}") self.max_size = max_size self.interpolation = interpolation self.fn = min if fn == 'min' else min self.fill = fill def forward(self, img): if isinstance(img, torch.Tensor): height, width = img.shape[:2] else: width, height = img.size scale = self.max_size / float(max(height, width)) if scale != 1.0: new_size = tuple(round(dim * scale) for dim in (height, width)) img = F.resize(img, new_size, self.interpolation) pad_h = self.max_size - new_size[0] pad_w = self.max_size - new_size[1] img = F.pad(img, padding=[pad_w//2, pad_h//2, pad_w - pad_w//2, pad_h - pad_h//2], fill=self.fill) return img def _convert_to_rgb(image): return image.convert('RGB') def image_transform( image_size: int, is_train: bool, mean: Optional[Tuple[float, ...]] = None, std: Optional[Tuple[float, ...]] = None, resize_longest_max: bool = False, fill_color: int = 0, ): mean = mean or (0.48145466, 0.4578275, 0.40821073) # OpenAI dataset mean std = std or (0.26862954, 0.26130258, 0.27577711) # OpenAI dataset std if isinstance(image_size, (list, tuple)) and image_size[0] == image_size[1]: # for square size, pass size as int so that Resize() uses aspect preserving shortest edge image_size = image_size[0] normalize = Normalize(mean=mean, std=std) if is_train: return Compose([ RandomResizedCrop(image_size, scale=(0.9, 1.0), interpolation=InterpolationMode.BICUBIC), _convert_to_rgb, ToTensor(), normalize, ]) else: if resize_longest_max: transforms = [ ResizeMaxSize(image_size, fill=fill_color) ] else: transforms = [ Resize(image_size, interpolation=InterpolationMode.BICUBIC), CenterCrop(image_size), ] transforms.extend([ _convert_to_rgb, ToTensor(), normalize, ]) return Compose(transforms)
KosmosX-API-main
kosmosX/open_clip/src/open_clip/transform.py
""" timm model adapter Wraps timm (https://github.com/rwightman/pytorch-image-models) models for use as a vision tower in CLIP model. """ from collections import OrderedDict import torch.nn as nn try: import timm from timm.models.layers import Mlp, to_2tuple from timm.models.layers.attention_pool2d import RotAttentionPool2d from timm.models.layers.attention_pool2d import AttentionPool2d as AbsAttentionPool2d except ImportError: timm = None from .utils import freeze_batch_norm_2d class TimmModel(nn.Module): """ timm model adapter # FIXME this adapter is a work in progress, may change in ways that break weight compat """ def __init__( self, model_name, embed_dim, image_size=224, pool='avg', proj='linear', drop=0., pretrained=False): super().__init__() if timm is None: raise RuntimeError("Please `pip install timm` to use timm models.") self.image_size = to_2tuple(image_size) self.trunk = timm.create_model(model_name, pretrained=pretrained) feat_size = self.trunk.default_cfg.get('pool_size', None) feature_ndim = 1 if not feat_size else 2 if pool in ('abs_attn', 'rot_attn'): assert feature_ndim == 2 # if attn pooling used, remove both classifier and default pool self.trunk.reset_classifier(0, global_pool='') else: # reset global pool if pool config set, otherwise leave as network default reset_kwargs = dict(global_pool=pool) if pool else {} self.trunk.reset_classifier(0, **reset_kwargs) prev_chs = self.trunk.num_features head_layers = OrderedDict() if pool == 'abs_attn': head_layers['pool'] = AbsAttentionPool2d(prev_chs, feat_size=feat_size, out_features=embed_dim) prev_chs = embed_dim elif pool == 'rot_attn': head_layers['pool'] = RotAttentionPool2d(prev_chs, out_features=embed_dim) prev_chs = embed_dim else: assert proj, 'projection layer needed if non-attention pooling is used.' # NOTE attention pool ends with a projection layer, so proj should usually be set to '' if such pooling is used if proj == 'linear': head_layers['drop'] = nn.Dropout(drop) head_layers['proj'] = nn.Linear(prev_chs, embed_dim) elif proj == 'mlp': head_layers['mlp'] = Mlp(prev_chs, 2 * embed_dim, embed_dim, drop=drop) self.head = nn.Sequential(head_layers) def lock(self, unlocked_groups=0, freeze_bn_stats=False): """ lock modules Args: unlocked_groups (int): leave last n layer groups unlocked (default: 0) """ if not unlocked_groups: # lock full model for param in self.trunk.parameters(): param.requires_grad = False if freeze_bn_stats: freeze_batch_norm_2d(self.trunk) else: # NOTE: partial freeze requires latest timm (master) branch and is subject to change try: # FIXME import here until API stable and in an official release from timm.models.helpers import group_parameters, group_modules except ImportError: raise RuntimeError( 'Please install latest timm `pip install git+https://github.com/rwightman/pytorch-image-models`') matcher = self.trunk.group_matcher() gparams = group_parameters(self.trunk, matcher) max_layer_id = max(gparams.keys()) max_layer_id = max_layer_id - unlocked_groups for group_idx in range(max_layer_id + 1): group = gparams[group_idx] for param in group: self.trunk.get_parameter(param).requires_grad = False if freeze_bn_stats: gmodules = group_modules(self.trunk, matcher, reverse=True) gmodules = {k for k, v in gmodules.items() if v <= max_layer_id} freeze_batch_norm_2d(self.trunk, gmodules) def forward(self, x): x = self.trunk(x) x = self.head(x) return x
KosmosX-API-main
kosmosX/open_clip/src/open_clip/timm_model.py
import requests import os import multiprocessing as mp from io import BytesIO import numpy as np import PIL from PIL import Image import sys def grab(line): """ Download a single image from the TSV. """ uid, split, line = line try: caption, url = line.split("\t")[:2] except: print("Parse error") return if os.path.exists(ROOT+"/%s/%d/%d.jpg"%(split,uid%1000,uid)): print("Finished", uid) return uid, caption, url # Let's not crash if anythign weird happens try: dat = requests.get(url, timeout=20) if dat.status_code != 200: print("404 file", url) return # Try to parse this as an Image file, we'll fail out if not im = Image.open(BytesIO(dat.content)) im.thumbnail((512, 512), PIL.Image.BICUBIC) if min(*im.size) < max(*im.size)/3: print("Too small", url) return im.save(ROOT+"/%s/%d/%d.jpg"%(split,uid%1000,uid)) # Another try/catch just because sometimes saving and re-loading # the image is different than loading it once. try: o = Image.open(ROOT+"/%s/%d/%d.jpg"%(split,uid%1000,uid)) o = np.array(o) print("Success", o.shape, uid, url) return uid, caption, url except: print("Failed", uid, url) except Exception as e: print("Unknown error", e) pass if __name__ == "__main__": ROOT = "cc_data" if not os.path.exists(ROOT): os.mkdir(ROOT) os.mkdir(os.path.join(ROOT,"train")) os.mkdir(os.path.join(ROOT,"val")) for i in range(1000): os.mkdir(os.path.join(ROOT,"train", str(i))) os.mkdir(os.path.join(ROOT,"val", str(i))) p = mp.Pool(300) for tsv in sys.argv[1:]: print("Processing file", tsv) assert 'val' in tsv.lower() or 'train' in tsv.lower() split = 'val' if 'val' in tsv.lower() else 'train' results = p.map(grab, [(i,split,x) for i,x in enumerate(open(tsv).read().split("\n"))]) out = open(tsv.replace(".tsv","_output.csv"),"w") out.write("title\tfilepath\n") for row in results: if row is None: continue id, caption, url = row fp = os.path.join(ROOT, split, str(id % 1000), str(id) + ".jpg") if os.path.exists(fp): out.write("%s\t%s\n"%(caption,fp)) else: print("Drop", id) out.close() p.close()
KosmosX-API-main
kosmosX/open_clip/src/data/gather_cc.py
import torch import unittest from rt2.model import RT2 class TestRT2(unittest.TestCase): def setUp(self): self.rt2 = RT2() self.video = torch.rand((1, 3, 10, 224, 224)) self.texts = ["This is a test"] def test_forward(self): output = self.rt2(self.video, self.texts) self.assertEqual(output.shape, (1, 10, 11, 256)) def test_forward_no_texts(self): output = self.rt2(self.video) self.assertEqual(output.shape, (1, 10, 11, 256)) def test_forward_different_video_shape(self): video = torch.rand((2, 3, 5, 224, 224)) output = self.rt2(video, self.texts) self.assertEqual(output.shape, (2, 5, 11, 256)) def test_forward_different_num_actions(self): self.rt2.num_actions = 5 output = self.rt2(self.video, self.texts) self.assertEqual(output.shape, (1, 10, 5, 256)) def test_forward_different_action_bins(self): self.rt2.action_bins = 128 output = self.rt2(self.video, self.texts) self.assertEqual(output.shape, (1, 10, 11, 128)) if __name__ == '__main__': unittest.main()
RT-2-main
test.py
from setuptools import setup, find_packages setup( name='rt2', packages=find_packages(exclude=[]), version='0.0.3', license='MIT', description='rt-2 - PyTorch', author='Kye Gomez', author_email='[email protected]', long_description_content_type='text/markdown', url='https://github.com/kyegomez/rt-2', keywords=[ 'artificial intelligence', 'deep learning', 'optimizers', 'Prompt Engineering' ], install_requires=[ 'transformers', 'torch', 'einops', 'beartype', 'palme', 'transformers', 'palm-rlhf-pytorch', 'tokenizers', 'wandb', 'classifier-free-guidance-pytorch' ], classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Topic :: Scientific/Engineering :: Artificial Intelligence', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3.6', ], )
RT-2-main
setup.py
import torch from rt2.model import RT2 model = RT2() video = torch.randn(2, 3, 6, 224, 224) instructions = [ 'bring me that apple sitting on the table', 'please pass the butter' ] # compute the train logits train_logits = model.train(video, instructions) # set the model to evaluation mode model.model.eval() # compute the eval logits with a conditional scale of 3 eval_logits = model.eval(video, instructions, cond_scale=3.)
RT-2-main
example.py
from rt2.model import RT2
RT-2-main
rt2/__init__.py
import torch from rt2.transformer import ( AutoregressiveWrapper, Decoder, Encoder, Transformer, ViTransformerWrapper, ) class PalmE(torch.nn.Module): def __init__(self, image_size=256, patch_size=32, encoder_dim=512, encoder_depth=6, encoder_heads=8, num_tokens=20000, max_seq_len=1024, decoder_dim=512, decoder_depth=6, decoder_heads=8, alibi_num_heads=4, attn_kv_heads = 2, use_abs_pos_emb=False, cross_attend=True, alibi_pos_bias=True, rotary_xpos=True, attn_flash=True, qk_norm=True): super(PalmE, self).__init__() self.encoder = ViTransformerWrapper( image_size=image_size, patch_size=patch_size, attn_layers=Encoder( dim=encoder_dim, depth=encoder_depth, heads=encoder_heads ) ) self.decoder = Transformer( num_tokens=num_tokens, max_seq_len=max_seq_len, use_abs_pos_emb=use_abs_pos_emb, attn_layers=Decoder( dim=decoder_dim, depth=decoder_depth, heads=decoder_heads, cross_attend=cross_attend, alibi_pos_bias=alibi_pos_bias, alibi_num_heads=alibi_num_heads, rotary_xpos=rotary_xpos, attn_kv_heads=attn_kv_heads, attn_flash=attn_flash, qk_norm=qk_norm, ) ) self.decoder = AutoregressiveWrapper(self.decoder) def forward(self, img, text): try: encoded = self.encoder(img, return_embeddings=True) return self.decoder(text, context=encoded) except Exception as error: print(f"Failed in forward method: {error}") raise
RT-2-main
rt2/palme.py
import torch import torch.nn.functional as F from torch import nn, einsum from typing import List, Optional, Callable, Tuple from beartype import beartype from einops import pack, unpack, repeat, reduce, rearrange from einops.layers.torch import Rearrange, Reduce from functools import partial from classifier_free_guidance_pytorch import TextConditioner, AttentionTextConditioner, classifier_free_guidance # helpers def exists(val): return val is not None def default(val, d): return val if exists(val) else d def cast_tuple(val, length = 1): return val if isinstance(val, tuple) else ((val,) * length) def pack_one(x, pattern): return pack([x], pattern) def unpack_one(x, ps, pattern): return unpack(x, ps, pattern)[0] # sinusoidal positions def posemb_sincos_1d(seq, dim, temperature = 10000, device = None, dtype = torch.float32): n = torch.arange(seq, device = device) omega = torch.arange(dim // 2, device = device) / (dim // 2 - 1) omega = 1. / (temperature ** omega) n = n[:, None] * omega[None, :] pos_emb = torch.cat((n.sin(), n.cos()), dim = 1) return pos_emb.type(dtype) # helper classes class Residual(nn.Module): def __init__(self, fn): super().__init__() self.fn = fn def forward(self, x): return self.fn(x) + x class LayerNorm(nn.Module): def __init__(self, dim): super().__init__() self.gamma = nn.Parameter(torch.ones(dim)) self.register_buffer("beta", torch.zeros(dim)) def forward(self, x): return F.layer_norm(x, x.shape[-1:], self.gamma, self.beta) class FeedForward(nn.Module): def __init__(self, dim, mult = 4, dropout = 0.): super().__init__() inner_dim = int(dim * mult) self.norm = LayerNorm(dim) self.net = nn.Sequential( nn.Linear(dim, inner_dim), nn.GELU(), nn.Dropout(dropout), nn.Linear(inner_dim, dim), nn.Dropout(dropout) ) def forward(self, x, cond_fn = None): x = self.norm(x) if exists(cond_fn): # adaptive layernorm x = cond_fn(x) return self.net(x) # MBConv class SqueezeExcitation(nn.Module): def __init__(self, dim, shrinkage_rate = 0.25): super().__init__() hidden_dim = int(dim * shrinkage_rate) self.gate = nn.Sequential( Reduce('b c h w -> b c', 'mean'), nn.Linear(dim, hidden_dim, bias = False), nn.SiLU(), nn.Linear(hidden_dim, dim, bias = False), nn.Sigmoid(), Rearrange('b c -> b c 1 1') ) def forward(self, x): return x * self.gate(x) class MBConvResidual(nn.Module): def __init__(self, fn, dropout = 0.): super().__init__() self.fn = fn self.dropsample = Dropsample(dropout) def forward(self, x): out = self.fn(x) out = self.dropsample(out) return out + x class Dropsample(nn.Module): def __init__(self, prob = 0): super().__init__() self.prob = prob def forward(self, x): device = x.device if self.prob == 0. or (not self.training): return x keep_mask = torch.FloatTensor((x.shape[0], 1, 1, 1), device = device).uniform_() > self.prob return x * keep_mask / (1 - self.prob) def MBConv( dim_in, dim_out, *, downsample, expansion_rate = 4, shrinkage_rate = 0.25, dropout = 0. ): hidden_dim = int(expansion_rate * dim_out) stride = 2 if downsample else 1 net = nn.Sequential( nn.Conv2d(dim_in, hidden_dim, 1), nn.BatchNorm2d(hidden_dim), nn.GELU(), nn.Conv2d(hidden_dim, hidden_dim, 3, stride = stride, padding = 1, groups = hidden_dim), nn.BatchNorm2d(hidden_dim), nn.GELU(), SqueezeExcitation(hidden_dim, shrinkage_rate = shrinkage_rate), nn.Conv2d(hidden_dim, dim_out, 1), nn.BatchNorm2d(dim_out) ) if dim_in == dim_out and not downsample: net = MBConvResidual(net, dropout = dropout) return net # attention related classes class Attention(nn.Module): def __init__( self, dim, dim_head = 32, dropout = 0., window_size = 7 ): super().__init__() assert (dim % dim_head) == 0, 'dimension should be divisible by dimension per head' self.norm = LayerNorm(dim) self.heads = dim // dim_head self.scale = dim_head ** -0.5 self.to_qkv = nn.Linear(dim, dim * 3, bias = False) self.attend = nn.Sequential( nn.Softmax(dim = -1), nn.Dropout(dropout) ) self.to_out = nn.Sequential( nn.Linear(dim, dim, bias = False), nn.Dropout(dropout) ) # relative positional bias self.rel_pos_bias = nn.Embedding((2 * window_size - 1) ** 2, self.heads) pos = torch.arange(window_size) grid = torch.stack(torch.meshgrid(pos, pos, indexing = 'ij')) grid = rearrange(grid, 'c i j -> (i j) c') rel_pos = rearrange(grid, 'i ... -> i 1 ...') - rearrange(grid, 'j ... -> 1 j ...') rel_pos += window_size - 1 rel_pos_indices = (rel_pos * torch.tensor([2 * window_size - 1, 1])).sum(dim = -1) self.register_buffer('rel_pos_indices', rel_pos_indices, persistent = False) def forward(self, x): batch, height, width, window_height, window_width, _, device, h = *x.shape, x.device, self.heads x = self.norm(x) # flatten x = rearrange(x, 'b x y w1 w2 d -> (b x y) (w1 w2) d') # project for queries, keys, values q, k, v = self.to_qkv(x).chunk(3, dim = -1) # split heads q, k, v = map(lambda t: rearrange(t, 'b n (h d ) -> b h n d', h = h), (q, k, v)) # scale q = q * self.scale # sim sim = einsum('b h i d, b h j d -> b h i j', q, k) # add positional bias bias = self.rel_pos_bias(self.rel_pos_indices) sim = sim + rearrange(bias, 'i j h -> h i j') # attention attn = self.attend(sim) # aggregate out = einsum('b h i j, b h j d -> b h i d', attn, v) # merge heads out = rearrange(out, 'b h (w1 w2) d -> b w1 w2 (h d)', w1 = window_height, w2 = window_width) # combine heads out out = self.to_out(out) return rearrange(out, '(b x y) ... -> b x y ...', x = height, y = width) class MaxViT(nn.Module): def __init__( self, *, num_classes, dim, depth, dim_head = 32, dim_conv_stem = None, window_size = 7, mbconv_expansion_rate = 4, mbconv_shrinkage_rate = 0.25, dropout = 0.1, channels = 3 ): super().__init__() assert isinstance(depth, tuple), 'depth needs to be tuple if integers indicating number of transformer blocks at that stage' # convolutional stem dim_conv_stem = default(dim_conv_stem, dim) self.conv_stem = nn.Sequential( nn.Conv2d(channels, dim_conv_stem, 3, stride = 2, padding = 1), nn.Conv2d(dim_conv_stem, dim_conv_stem, 3, padding = 1) ) # variables num_stages = len(depth) dims = tuple(map(lambda i: (2 ** i) * dim, range(num_stages))) dims = (dim_conv_stem, *dims) dim_pairs = tuple(zip(dims[:-1], dims[1:])) self.layers = nn.ModuleList([]) # shorthand for window size for efficient block - grid like attention w = window_size # iterate through stages cond_hidden_dims = [] for ind, ((layer_dim_in, layer_dim), layer_depth) in enumerate(zip(dim_pairs, depth)): for stage_ind in range(layer_depth): is_first = stage_ind == 0 stage_dim_in = layer_dim_in if is_first else layer_dim cond_hidden_dims.append(stage_dim_in) block = nn.Sequential( MBConv( stage_dim_in, layer_dim, downsample = is_first, expansion_rate = mbconv_expansion_rate, shrinkage_rate = mbconv_shrinkage_rate ), Rearrange('b d (x w1) (y w2) -> b x y w1 w2 d', w1 = w, w2 = w), # block-like attention Residual(Attention(dim = layer_dim, dim_head = dim_head, dropout = dropout, window_size = w)), Residual(FeedForward(dim = layer_dim, dropout = dropout)), Rearrange('b x y w1 w2 d -> b d (x w1) (y w2)'), Rearrange('b d (w1 x) (w2 y) -> b x y w1 w2 d', w1 = w, w2 = w), # grid-like attention Residual(Attention(dim = layer_dim, dim_head = dim_head, dropout = dropout, window_size = w)), Residual(FeedForward(dim = layer_dim, dropout = dropout)), Rearrange('b x y w1 w2 d -> b d (w1 x) (w2 y)'), ) self.layers.append(block) embed_dim = dims[-1] self.embed_dim = dims[-1] self.cond_hidden_dims = cond_hidden_dims # mlp head out self.mlp_head = nn.Sequential( Reduce('b d h w -> b d', 'mean'), LayerNorm(embed_dim), nn.Linear(embed_dim, num_classes) ) @beartype def forward( self, x, texts: Optional[List[str]] = None, cond_fns: Optional[Tuple[Callable, ...]] = None, cond_drop_prob = 0., return_embeddings = False ): x = self.conv_stem(x) if not exists(cond_fns): cond_fns = (None,) * len(self.layers) for stage, cond_fn in zip(self.layers, cond_fns): if exists(cond_fn): x = cond_fn(x) x = stage(x) if return_embeddings: return x return self.mlp_head(x) # attention class TransformerAttention(nn.Module): def __init__( self, dim, causal = False, dim_head = 64, dim_context = None, heads = 8, norm_context = False, dropout = 0.1 ): super().__init__() self.heads = heads self.scale = dim_head ** -0.5 self.causal = causal inner_dim = dim_head * heads dim_context = default(dim_context, dim) self.norm = LayerNorm(dim) self.context_norm = LayerNorm(dim_context) if norm_context else nn.Identity() self.attn_dropout = nn.Dropout(dropout) self.to_q = nn.Linear(dim, inner_dim, bias = False) self.to_kv = nn.Linear(dim_context, dim_head * 2, bias = False) self.to_out = nn.Sequential( nn.Linear(inner_dim, dim, bias = False), nn.Dropout(dropout) ) def forward( self, x, context = None, mask = None, attn_bias = None, attn_mask = None, cond_fn: Optional[Callable] = None ): b = x.shape[0] if exists(context): context = self.context_norm(context) kv_input = default(context, x) x = self.norm(x) if exists(cond_fn): # adaptive layer-norm x = cond_fn(x) q, k, v = self.to_q(x), *self.to_kv(kv_input).chunk(2, dim = -1) q = rearrange(q, 'b n (h d) -> b h n d', h = self.heads) q = q * self.scale sim = einsum('b h i d, b j d -> b h i j', q, k) if exists(attn_bias): sim = sim + attn_bias if exists(attn_mask): sim = sim.masked_fill(~attn_mask, -torch.finfo(sim.dtype).max) if exists(mask): mask = rearrange(mask, 'b j -> b 1 1 j') sim = sim.masked_fill(~mask, -torch.finfo(sim.dtype).max) if self.causal: i, j = sim.shape[-2:] causal_mask = torch.ones((i, j), dtype = torch.bool, device = x.device).triu(j - i + 1) sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max) attn = sim.softmax(dim = -1) attn = self.attn_dropout(attn) out = einsum('b h i j, b j d -> b h i d', attn, v) out = rearrange(out, 'b h n d -> b n (h d)') return self.to_out(out) @beartype class Transformer(nn.Module): def __init__( self, dim, dim_head = 64, heads = 8, depth = 6, attn_dropout = 0., ff_dropout = 0. ): super().__init__() self.layers = nn.ModuleList([]) for _ in range(depth): self.layers.append(nn.ModuleList([ TransformerAttention(dim = dim, heads = heads, dropout = attn_dropout), FeedForward(dim = dim, dropout = ff_dropout) ])) def forward( self, x, cond_fns: Optional[Tuple[Callable, ...]] = None, attn_mask = None ): if not exists(cond_fns): cond_fns = (None,) * len(self.layers * 2) cond_fns = iter(cond_fns) for attn, ff in self.layers: x = attn(x, attn_mask = attn_mask, cond_fn = next(cond_fns)) + x x = ff(x, cond_fn = next(cond_fns)) + x return x # token learner module class TokenLearner(nn.Module): """ https://arxiv.org/abs/2106.11297 using the 1.1 version with the MLP (2 dense layers with gelu) for generating attention map """ def __init__( self, *, dim, ff_mult = 2, num_output_tokens = 8, num_layers = 2 ): super().__init__() inner_dim = dim * ff_mult * num_output_tokens self.num_output_tokens = num_output_tokens self.net = nn.Sequential( nn.Conv2d(dim * num_output_tokens, inner_dim, 1, groups = num_output_tokens), nn.GELU(), nn.Conv2d(inner_dim, num_output_tokens, 1, groups = num_output_tokens), ) def forward(self, x): x, ps = pack_one(x, '* c h w') x = repeat(x, 'b c h w -> b (g c) h w', g = self.num_output_tokens) attn = self.net(x) attn = rearrange(attn, 'b g h w -> b 1 g h w') x = rearrange(x, 'b (g c) h w -> b c g h w', g = self.num_output_tokens) x = reduce(x * attn, 'b c g h w -> b c g', 'mean') x = unpack_one(x, ps, '* c n') return x # Robotic Transformer @beartype class RT1(nn.Module): def __init__( self, *, vit: MaxViT, num_actions = 11, action_bins = 256, depth = 6, heads = 8, dim_head = 64, token_learner_ff_mult = 2, token_learner_num_layers = 2, token_learner_num_output_tokens = 8, cond_drop_prob = 0.2, use_attn_conditioner = False, conditioner_kwargs: dict = dict() ): super().__init__() self.vit = vit self.num_vit_stages = len(vit.cond_hidden_dims) conditioner_klass = AttentionTextConditioner if use_attn_conditioner else TextConditioner self.conditioner = conditioner_klass( hidden_dims = (*tuple(vit.cond_hidden_dims), *((vit.embed_dim,) * depth * 2)), hiddens_channel_first = (*((True,) * self.num_vit_stages), *((False,) * depth * 2)), cond_drop_prob = cond_drop_prob, **conditioner_kwargs ) self.token_learner = TokenLearner( dim = vit.embed_dim, ff_mult = token_learner_ff_mult, num_output_tokens = token_learner_num_output_tokens, num_layers = token_learner_num_layers ) self.num_learned_tokens = token_learner_num_output_tokens self.transformer_depth = depth self.transformer = Transformer( dim = vit.embed_dim, dim_head = dim_head, heads = heads, depth = depth ) self.cond_drop_prob = cond_drop_prob self.to_logits = nn.Sequential( LayerNorm(vit.embed_dim), nn.Linear(vit.embed_dim, num_actions * action_bins), Rearrange('... (a b) -> ... a b', b = action_bins) ) @classifier_free_guidance def forward( self, video, texts: Optional[List[str]] = None, cond_drop_prob = 0. ): depth = self.transformer_depth cond_drop_prob = default(cond_drop_prob, self.cond_drop_prob) frames, device = video.shape[2], video.device cond_fns = self.conditioner( texts, cond_drop_prob = cond_drop_prob, repeat_batch = (*((frames,) * self.num_vit_stages), *((1,) * self.transformer_depth * 2)) ) vit_cond_fns, transformer_cond_fns = cond_fns[:-(depth * 2)], cond_fns[-(depth * 2):] video = rearrange(video, 'b c f h w -> b f c h w') images, packed_shape = pack_one(video, '* c h w') tokens = self.vit( images, texts = texts, cond_fns = vit_cond_fns, cond_drop_prob = cond_drop_prob, return_embeddings = True ) tokens = unpack_one(tokens, packed_shape, '* c h w') learned_tokens = self.token_learner(tokens) learned_tokens = rearrange(learned_tokens, 'b f c n -> b (f n) c') # causal attention mask attn_mask = torch.ones((frames, frames), dtype = torch.bool, device = device).triu(1) attn_mask = repeat(attn_mask, 'i j -> (i r1) (j r2)', r1 = self.num_learned_tokens, r2 = self.num_learned_tokens) # sinusoidal positional embedding pos_emb = posemb_sincos_1d(frames, learned_tokens.shape[-1], dtype = learned_tokens.dtype, device = learned_tokens.device) learned_tokens = learned_tokens + repeat(pos_emb, 'n d -> (n r) d', r = self.num_learned_tokens) # attention attended_tokens = self.transformer(learned_tokens, cond_fns = transformer_cond_fns, attn_mask = ~attn_mask) pooled = reduce(attended_tokens, 'b (f n) d -> b f d', 'mean', f = frames) logits = self.to_logits(pooled) return logits class RT2: """ A class for real-time video processing using Vision Transformers (ViT) and Reinforcement Learning (RT1) models. ... Attributes ---------- vit : MaxViT a Vision Transformer model model : RT1 a reinforcement learning model Methods ------- train(video, instructions): Computes the logits for the given video and instructions using the RT1 model in training mode. eval(video, instructions, cond_scale=1.0): Computes the logits for the given video and instructions using the RT1 model in evaluation mode. """ def __init__(self, num_classes = 1000, dim = 96, dim_conv_stem = 64, dim_head_vit = 32, depth_vit = (2, 2, 5, 2), window_size = 7, mbconv_expansion_rate = 4, mbconv_shrinkage_rate = 0.25, dropout_vit = 0.1, num_actions = 11, depth_rt1 = 6, heads = 8, dim_head_rt1 = 64, cond_drop_prob = 0.2 ): """ Constructs all the necessary attributes for the RT2 object. Parameters ---------- num_classes : int number of classes for the ViT model dim : int dimension of the ViT model dim_conv_stem : int dimension of the convolutional stem for the ViT model dim_head_vit : int dimension of the head for the ViT model depth_vit : tuple depth of the ViT model window_size : int window size for the ViT model mbconv_expansion_rate : float expansion rate for the mbconv layer in the ViT model mbconv_shrinkage_rate : float shrinkage rate for the mbconv layer in the ViT model dropout_vit : float dropout rate for the ViT model num_actions : int number of actions for the RT1 model depth_rt1 : int depth of the RT1 model heads : int number of heads for the RT1 model dim_head_rt1 : int dimension of the head for the RT1 model cond_drop_prob : float conditional drop probability for the RT1 model """ self.vit = MaxViT( num_classes=num_classes, dim=dim, dim_conv_stem=dim_conv_stem, dim_head=dim_head_vit, depth=depth_vit, window_size=window_size, mbconv_expansion_rate=mbconv_expansion_rate, mbconv_shrinkage_rate=mbconv_shrinkage_rate, dropout=dropout_vit ) self.model = RT1( vit=self.vit, num_actions=num_actions, depth=depth_rt1, heads=heads, dim_head=dim_head_rt1, cond_drop_prob=cond_drop_prob ) def train(self, video, instructions): """ Computes the logits for the given video and instructions using the RT1 model in training mode. Parameters ---------- video : torch.Tensor a tensor containing the video data instructions : torch.Tensor a tensor containing the instructions Returns ------- torch.Tensor a tensor containing the computed logits """ try: train_logits = self.model(video, instructions) return train_logits except Exception as e: raise RuntimeError("Error in training: {}".format(e)) def eval(self, video, instructions, cond_scale=1.0): """ Computes the logits for the given video and instructions using the RT1 model in evaluation mode. Parameters ---------- video : torch.Tensor a tensor containing the video data instructions : torch.Tensor a tensor containing the instructions cond_scale : float, optional a scale factor for the conditional scaling (default is 1.0) Returns ------- torch.Tensor a tensor containing the computed logits """ try: self.model.eval() eval_logits = self.model(video, instructions, cond_scale=cond_scale) return eval_logits except Exception as e: raise RuntimeError("Error in evaluation: {}".format(e))
RT-2-main
rt2/model.py
from functools import partial from typing import Optional import torch from torch import nn, einsum, Tensor import torch.nn.functional as F from collections import namedtuple from functools import wraps from packaging import version from dataclasses import dataclass from einops import rearrange, repeat # constants EfficientAttentionConfig = namedtuple('EfficientAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient']) @dataclass class Intermediates: qk_similarities: Optional[Tensor] = None pre_softmax_attn: Optional[Tensor] = None post_softmax_attn: Optional[Tensor] = None def to_tuple(self): return (self.qk_similarities, self.pre_softmax_attn, self.post_softmax_attn) # helpers def exists(val): return val is not None def default(val, d): return val if exists(val) else d def compact(arr): return [*filter(exists, arr)] def once(fn): called = False @wraps(fn) def inner(x): nonlocal called if called: return called = True return fn(x) return inner print_once = once(print) # functions for creating causal mask # need a special one for onnx cpu (no support for .triu) def create_causal_mask(i, j, device): return torch.ones((i, j), device = device, dtype = torch.bool).triu(j - i + 1) def onnx_create_causal_mask(i, j, device): r = torch.arange(i, device = device) causal_mask = rearrange(r, 'i -> i 1') < rearrange(r, 'j -> 1 j') causal_mask = F.pad(causal_mask, (j - i, 0), value = False) return causal_mask # main class class Attend(nn.Module): def __init__( self, *, dropout = 0., causal = False, heads = None, talking_heads = False, sparse_topk = None, scale = None, qk_norm = False, flash = False, add_zero_kv = False, onnxable = False ): super().__init__() self.scale = scale self.qk_norm = qk_norm self.causal = causal self.create_causal_mask = onnx_create_causal_mask if onnxable else create_causal_mask self.attn_fn = partial(F.softmax, dtype = torch.float32) if not qk_norm else F.softmax self.dropout = dropout self.attn_dropout = nn.Dropout(dropout) # talking heads assert not (flash and talking_heads), 'talking heads not compatible with flash attention' self.talking_heads = talking_heads if talking_heads: self.pre_softmax_talking_heads = nn.Conv2d(heads, heads, 1, bias = False) self.post_softmax_talking_heads = nn.Conv2d(heads, heads, 1, bias = False) # sparse topk assert not (flash and sparse_topk), 'sparse topk not compatible with flash attention' self.sparse_topk = sparse_topk # add a key / value token composed of zeros # in case this helps controlling outliers, proposed by https://www.evanmiller.org/attention-is-off-by-one.html self.add_zero_kv = add_zero_kv # flash attention self.flash = flash assert not (flash and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above' # determine efficient attention configs for cuda and cpu self.cpu_config = EfficientAttentionConfig(True, True, True) self.cuda_config = None if not torch.cuda.is_available() or not flash: return device_properties = torch.cuda.get_device_properties(torch.device('cuda')) if device_properties.major == 8 and device_properties.minor == 0: print_once('A100 GPU detected, using flash attention if input tensor is on cuda') self.cuda_config = EfficientAttentionConfig(True, False, False) else: print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda') self.cuda_config = EfficientAttentionConfig(False, True, True) def flash_attn( self, q, k, v, mask = None, attn_bias = None ): batch, heads, q_len, _, k_len, is_cuda, device = *q.shape, k.shape[-2], q.is_cuda, q.device # Recommended for multi-query single-key-value attention by Tri Dao # kv shape torch.Size([1, 512, 64]) -> torch.Size([1, 8, 512, 64]) if k.ndim == 3: k = rearrange(k, 'b ... -> b 1 ...').expand_as(q) if v.ndim == 3: v = rearrange(v, 'b ... -> b 1 ...').expand_as(q) # handle scale - by default they scale by dim_head ** -0.5, but need to take care if using cosine sim attention if self.qk_norm: default_scale = q.shape[-1] ** -0.5 q = q * (default_scale / self.scale) # Check if mask exists and expand to compatible shape # The mask is B L, so it would have to be expanded to B H N L causal = self.causal if exists(mask): assert mask.ndim == 4 mask = mask.expand(batch, heads, q_len, k_len) # manually handle causal mask, if another mask was given if causal: causal_mask = self.create_causal_mask(q_len, k_len, device = device) mask = mask & ~causal_mask causal = False # handle alibi positional bias # convert from bool to float if exists(attn_bias): attn_bias = rearrange(attn_bias, 'h i j -> 1 h i j').expand(batch, heads, -1, -1) # if mask given, the mask would already contain the causal mask from above logic # otherwise, if no mask given but still causal, mask out alibi positional bias to a large negative number mask_value = -torch.finfo(q.dtype).max if exists(mask): attn_bias = attn_bias.masked_fill(~mask, mask_value // 2) elif causal: causal_mask = self.create_causal_mask(q_len, k_len, device = device) attn_bias = attn_bias.masked_fill(causal_mask, mask_value // 2) causal = False # scaled_dot_product_attention handles attn_mask either as bool or additive bias # make it an additive bias here mask = attn_bias # Check if there is a compatible device for flash attention config = self.cuda_config if is_cuda else self.cpu_config # pytorch 2.0 flash attn: q, k, v, mask, dropout, causal, softmax_scale with torch.backends.cuda.sdp_kernel(**config._asdict()): out = F.scaled_dot_product_attention( q, k, v, attn_mask = mask, dropout_p = self.dropout if self.training else 0., is_causal = causal ) return out, Intermediates() def forward( self, q, k, v, mask = None, attn_bias = None, prev_attn = None ): """ einstein notation b - batch h - heads n, i, j - sequence length (base sequence length, source, target) d - feature dimension """ n, heads, kv_heads, device = q.shape[-2], q.shape[1], k.shape[1], q.device scale = default(self.scale, q.shape[-1] ** -0.5) # handle grouped multi-query attention if kv_heads == 1: k, v = map(lambda t: rearrange(t, 'b 1 n d -> b n d'), (k, v)) elif kv_heads < heads: k, v = map(lambda t: repeat(t, 'b kvh n d -> b (r kvh) n d', r = heads // kv_heads), (k, v)) # handle zero kv, as means for allowing network to attend to nothing if self.add_zero_kv: k, v = map(lambda t: F.pad(t, (0, 0, 1, 0), value = 0.), (k, v)) if exists(mask): mask = F.pad(mask, (1, 0), value = True) if exists(attn_bias): attn_bias = F.pad(attn_bias, (1, 0), value = 0.) if self.flash: assert not exists(prev_attn), 'residual attention not compatible with flash attention' return self.flash_attn(q, k, v, mask = mask, attn_bias = attn_bias) kv_einsum_eq = 'b j d' if k.ndim == 3 else 'b h j d' dots = einsum(f'b h i d, {kv_einsum_eq} -> b h i j', q, k) * scale if exists(prev_attn): dots = dots + prev_attn qk_similarities = dots.clone() if self.talking_heads: dots = self.pre_softmax_talking_heads(dots) if exists(attn_bias): dots = dots + attn_bias i, j, dtype = *dots.shape[-2:], dots.dtype mask_value = -torch.finfo(dots.dtype).max if exists(self.sparse_topk) and self.sparse_topk < j: top_values, _ = dots.topk(self.sparse_topk, dim = -1) sparse_topk_mask = dots < top_values[..., -1:] mask = (mask & sparse_topk_mask) if exists(mask) else sparse_topk_mask if exists(mask): dots = dots.masked_fill(~mask, mask_value) if self.causal: causal_mask = self.create_causal_mask(i, j, device = device) dots = dots.masked_fill(causal_mask, mask_value) pre_softmax_attn = dots.clone() attn = self.attn_fn(dots, dim = -1) attn = attn.type(dtype) post_softmax_attn = attn.clone() attn = self.attn_dropout(attn) if self.talking_heads: attn = self.post_softmax_talking_heads(attn) out = einsum(f'b h i j, {kv_einsum_eq} -> b h i d', attn, v) intermediates = Intermediates( qk_similarities = qk_similarities, pre_softmax_attn = pre_softmax_attn, post_softmax_attn = post_softmax_attn ) return out, intermediates # cascading heads logic def to_single_heads(t, dim = 1): heads = t.unbind(dim = dim) return tuple(head.unsqueeze(dim) for head in heads) class CascadingHeads(nn.Module): def __init__(self, attend: Attend): super().__init__() self.attend = attend def forward( self, q, k, v, mask = None, attn_bias = None, prev_attn = None ): assert q.shape[-1] == v.shape[-1], 'cascading heads can only be done if query / key and value head dimensions are the same' # split inputs into per-head inputs heads = q.shape[1] queries = to_single_heads(q) keys = to_single_heads(k) if k.ndim == 4 else ((k,) * heads) values = to_single_heads(v) if v.ndim == 4 else ((v,) * heads) mask = (mask,) * heads attn_bias = to_single_heads(attn_bias, dim = 0) if exists(attn_bias) else ((None,) * heads) prev_attn = to_single_heads(prev_attn) if exists(prev_attn) else ((None,) * heads) # now loop through each head, without output of previous head summed with the next head # thus cascading all_outs = [] all_intermediates = [] prev_head_out = None for h_q, h_k, h_v, h_mask, h_attn_bias, h_prev_attn in zip(queries, keys, values, mask, attn_bias, prev_attn): if exists(prev_head_out): h_q = h_q + prev_head_out out, intermediates = self.attend( h_q, h_k, h_v, mask = h_mask, attn_bias = h_attn_bias, prev_attn = h_prev_attn ) prev_head_out = out all_outs.append(out) all_intermediates.append(intermediates) # cat all output heads all_outs = torch.cat(all_outs, dim = 1) # cat all intermediates, if they exist qk_similarities, pre_softmax_attn, post_softmax_attn = zip(*map(lambda i: i.to_tuple(), all_intermediates)) qk_similarities, pre_softmax_attn, post_softmax_attn = map(compact, (qk_similarities, pre_softmax_attn, post_softmax_attn)) aggregated_intermediates = Intermediates( qk_similarities = torch.cat(qk_similarities, dim = 1) if len(qk_similarities) > 0 else None, pre_softmax_attn = torch.cat(pre_softmax_attn, dim = 1) if len(pre_softmax_attn) > 0 else None, post_softmax_attn = torch.cat(post_softmax_attn, dim = 1) if len(post_softmax_attn) > 0 else None ) return all_outs, aggregated_intermediates
RT-2-main
rt2/attend.py
import math from random import random import torch from torch import nn, einsum, Tensor import torch.nn.functional as F from functools import partial, wraps from inspect import isfunction from collections import namedtuple from dataclasses import dataclass from typing import List, Callable, Optional from math import ceil from einops import pack, rearrange, repeat, reduce, unpack from einops.layers.torch import Rearrange from rt2.attend import Attend, Intermediates, CascadingHeads # constants DEFAULT_DIM_HEAD = 64 def eval_decorator(fn): def inner(self, *args, **kwargs): was_training = self.training self.eval() out = fn(self, *args, **kwargs) self.train(was_training) return out return inner # nucleus def top_p(logits, thres = 0.9): sorted_logits, sorted_indices = torch.sort(logits, descending=True) cum_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1) sorted_indices_to_remove = cum_probs > (1 - thres) sorted_indices_to_remove[:, 1:] = sorted_indices_to_remove[:, :-1].clone() sorted_indices_to_remove[:, 0] = 0 sorted_logits[sorted_indices_to_remove] = float('-inf') return sorted_logits.scatter(1, sorted_indices, sorted_logits) # topk def top_k(logits, thres = 0.9): k = ceil((1 - thres) * logits.shape[-1]) val, ind = torch.topk(logits, k) probs = torch.full_like(logits, float('-inf')) probs.scatter_(1, ind, val) return probs # top_a def top_a(logits, min_p_pow=2.0, min_p_ratio=0.02): probs = F.softmax(logits, dim=-1) limit = torch.pow(torch.max(probs), min_p_pow) * min_p_ratio logits[probs < limit] = float('-inf') logits[probs >= limit] = 1 return logits # autoregressive wrapper class @dataclass class LayerIntermediates: hiddens: Optional[List[Tensor]] = None attn_intermediates: Optional[List[Intermediates]] = None layer_hiddens: Optional[List[Tensor]] = None attn_z_loss: Optional[Tensor] = None # helpers def exists(val): return val is not None def default(val, d): if exists(val): return val return d() if isfunction(d) else d def cast_tuple(val, depth): return val if isinstance(val, tuple) else (val,) * depth def divisible_by(num, den): return (num % den) == 0 def maybe(fn): @wraps(fn) def inner(x, *args, **kwargs): if not exists(x): return x return fn(x, *args, **kwargs) return inner class always(): def __init__(self, val): self.val = val def __call__(self, *args, **kwargs): return self.val class not_equals(): def __init__(self, val): self.val = val def __call__(self, x, *args, **kwargs): return x != self.val class equals(): def __init__(self, val): self.val = val def __call__(self, x, *args, **kwargs): return x == self.val def Sequential(*modules): return nn.Sequential(*filter(exists, modules)) # tensor helpers def max_neg_value(tensor): return -torch.finfo(tensor.dtype).max def l2norm(t, groups = 1): t = rearrange(t, '... (g d) -> ... g d', g = groups) t = F.normalize(t, p = 2, dim = -1) return rearrange(t, '... g d -> ... (g d)') def pad_at_dim(t, pad, dim = -1, value = 0.): dims_from_right = (- dim - 1) if dim < 0 else (t.ndim - dim - 1) zeros = ((0, 0) * dims_from_right) return F.pad(t, (*zeros, *pad), value = value) def or_reduce(masks): head, *body = masks for rest in body: head = head | rest return head # auxiliary loss helpers def calc_z_loss( pre_softmax_attns: List[Tensor], mask = None, weight = 1. ): # the same loss applied to the mixture of experts router logits in https://arxiv.org/abs/2202.08906 # in the paper, in a tiny footnote, they mention using it on attention logits with stabilizing effects # also used in PaLM as one of the measures lse = 0. for attn in pre_softmax_attns: lse = lse + attn.logsumexp(dim = -1) loss = torch.square(lse) loss = reduce(loss, 'b h n -> b n', 'sum') if not exists(mask): return loss.mean() * weight loss = loss[mask].sum() / mask.sum().clamp(min = 1e-5) return loss * weight # init helpers def init_zero_(layer): nn.init.constant_(layer.weight, 0.) if exists(layer.bias): nn.init.constant_(layer.bias, 0.) # keyword argument helpers def pick_and_pop(keys, d): values = list(map(lambda key: d.pop(key), keys)) return dict(zip(keys, values)) def group_dict_by_key(cond, d): return_val = [dict(),dict()] for key in d.keys(): match = bool(cond(key)) ind = int(not match) return_val[ind][key] = d[key] return (*return_val,) def string_begins_with(prefix, str): return str.startswith(prefix) def group_by_key_prefix(prefix, d): return group_dict_by_key(partial(string_begins_with, prefix), d) def groupby_prefix_and_trim(prefix, d): kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d) kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items()))) return kwargs_without_prefix, kwargs # initializations def deepnorm_init( transformer, beta, module_name_match_list = ['.ff.', '.to_v', '.to_out'] ): for name, module in transformer.named_modules(): if type(module) != nn.Linear: continue needs_beta_gain = any(map(lambda substr: substr in name, module_name_match_list)) gain = beta if needs_beta_gain else 1 nn.init.xavier_normal_(module.weight.data, gain = gain) if exists(module.bias): nn.init.constant_(module.bias.data, 0) # structured dropout, more effective than traditional attention dropouts def dropout_seq(seq, mask, dropout): b, n, *_, device = *seq.shape, seq.device logits = torch.randn(b, n, device = device) if exists(mask): mask_value = max_neg_value(logits) logits = logits.masked_fill(~mask, mask_value) keep_prob = 1. - dropout num_keep = max(1, int(keep_prob * n)) keep_indices = logits.topk(num_keep, dim = 1).indices batch_indices = torch.arange(b, device = device) batch_indices = rearrange(batch_indices, 'b -> b 1') seq = seq[batch_indices, keep_indices] if exists(mask): seq_counts = mask.sum(dim = -1) seq_keep_counts = torch.ceil(seq_counts * keep_prob).int() keep_mask = torch.arange(num_keep, device = device) < rearrange(seq_keep_counts, 'b -> b 1') mask = mask[batch_indices, keep_indices] & keep_mask return seq, mask # activations class ReluSquared(nn.Module): def forward(self, x): return F.relu(x) ** 2 # embedding class TokenEmbedding(nn.Module): def __init__(self, dim, num_tokens, l2norm_embed = False): super().__init__() self.l2norm_embed = l2norm_embed self.emb = nn.Embedding(num_tokens, dim) def forward(self, x): token_emb = self.emb(x) return l2norm(token_emb) if self.l2norm_embed else token_emb # positional embeddings class AbsolutePositionalEmbedding(nn.Module): def __init__(self, dim, max_seq_len, l2norm_embed = False): super().__init__() self.scale = dim ** -0.5 if not l2norm_embed else 1. self.max_seq_len = max_seq_len self.l2norm_embed = l2norm_embed self.emb = nn.Embedding(max_seq_len, dim) def forward(self, x, pos = None): seq_len, device = x.shape[1], x.device assert seq_len <= self.max_seq_len, f'you are passing in a sequence length of {seq_len} but your absolute positional embedding has a max sequence length of {self.max_seq_len}' if not exists(pos): pos = torch.arange(seq_len, device = device) pos_emb = self.emb(pos) pos_emb = pos_emb * self.scale return l2norm(pos_emb) if self.l2norm_embed else pos_emb class ScaledSinusoidalEmbedding(nn.Module): def __init__(self, dim, theta = 10000): super().__init__() assert divisible_by(dim, 2) self.scale = nn.Parameter(torch.ones(1) * dim ** -0.5) half_dim = dim // 2 freq_seq = torch.arange(half_dim).float() / half_dim inv_freq = theta ** -freq_seq self.register_buffer('inv_freq', inv_freq, persistent = False) def forward(self, x, pos = None): seq_len, device = x.shape[1], x.device if not exists(pos): pos = torch.arange(seq_len, device = device) emb = einsum('i, j -> i j', pos, self.inv_freq) emb = torch.cat((emb.sin(), emb.cos()), dim = -1) return emb * self.scale class RelativePositionBias(nn.Module): def __init__(self, scale, causal = False, num_buckets = 32, max_distance = 128, heads = 8): super().__init__() self.scale = scale self.causal = causal self.num_buckets = num_buckets self.max_distance = max_distance self.relative_attention_bias = nn.Embedding(num_buckets, heads) @staticmethod def _relative_position_bucket(relative_position, causal = True, num_buckets = 32, max_distance = 128): ret = 0 n = -relative_position if not causal: num_buckets //= 2 ret += (n < 0).long() * num_buckets n = torch.abs(n) else: n = torch.max(n, torch.zeros_like(n)) max_exact = num_buckets // 2 is_small = n < max_exact val_if_large = max_exact + ( torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) ).long() val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1)) ret += torch.where(is_small, n, val_if_large) return ret @property def device(self): return next(self.parameters()).device def forward(self, i, j): device = self.device q_pos = torch.arange(j - i, j, dtype = torch.long, device = device) k_pos = torch.arange(j, dtype = torch.long, device = device) rel_pos = k_pos[None, :] - q_pos[:, None] rp_bucket = self._relative_position_bucket(rel_pos, causal = self.causal, num_buckets = self.num_buckets, max_distance = self.max_distance) values = self.relative_attention_bias(rp_bucket) bias = rearrange(values, 'i j h -> h i j') return bias * self.scale class DynamicPositionBias(nn.Module): def __init__(self, dim, *, heads, depth, log_distance = False, norm = False): super().__init__() assert depth >= 1, 'depth for dynamic position bias MLP must be greater or equal to 1' self.log_distance = log_distance self.mlp = nn.ModuleList([]) self.mlp.append(Sequential( nn.Linear(1, dim), nn.LayerNorm(dim) if norm else None, nn.SiLU() )) for _ in range(depth - 1): self.mlp.append(Sequential( nn.Linear(dim, dim), nn.LayerNorm(dim) if norm else None, nn.SiLU() )) self.mlp.append(nn.Linear(dim, heads)) @property def device(self): return next(self.parameters()).device def forward(self, i, j): assert i == j n, device = j, self.device # get the (n x n) matrix of distances seq_arange = torch.arange(n, device = device) context_arange = torch.arange(n, device = device) indices = rearrange(seq_arange, 'i -> i 1') - rearrange(context_arange, 'j -> 1 j') indices += (n - 1) # input to continuous positions MLP pos = torch.arange(-n + 1, n, device = device).float() pos = rearrange(pos, '... -> ... 1') if self.log_distance: pos = torch.sign(pos) * torch.log(pos.abs() + 1) # log of distance is sign(rel_pos) * log(abs(rel_pos) + 1) for layer in self.mlp: pos = layer(pos) # get position biases bias = pos[indices] bias = rearrange(bias, 'i j h -> h i j') return bias class AlibiPositionalBias(nn.Module): def __init__(self, heads, total_heads, **kwargs): super().__init__() self.heads = heads self.total_heads = total_heads slopes = Tensor(self._get_slopes(heads)) slopes = rearrange(slopes, 'h -> h 1 1') self.register_buffer('slopes', slopes, persistent = False) self.register_buffer('bias', None, persistent = False) def get_bias(self, i, j, device): i_arange = torch.arange(j - i, j, device = device) j_arange = torch.arange(j, device = device) bias = -torch.abs(rearrange(j_arange, 'j -> 1 1 j') - rearrange(i_arange, 'i -> 1 i 1')) return bias @staticmethod def _get_slopes(heads): def get_slopes_power_of_2(n): start = (2**(-2**-(math.log2(n)-3))) ratio = start return [start*ratio**i for i in range(n)] if math.log2(heads).is_integer(): return get_slopes_power_of_2(heads) closest_power_of_2 = 2 ** math.floor(math.log2(heads)) return get_slopes_power_of_2(closest_power_of_2) + get_slopes_power_of_2(2 * closest_power_of_2)[0::2][:heads-closest_power_of_2] @property def device(self): return next(self.buffers()).device def forward(self, i, j): h, device = self.total_heads, self.device if exists(self.bias) and self.bias.shape[-1] >= j and self.bias.shape[-2] >= i: return self.bias[..., :i, :j] bias = self.get_bias(i, j, device) bias = bias * self.slopes num_heads_unalibied = h - bias.shape[0] bias = pad_at_dim(bias, (0, num_heads_unalibied), dim = 0) self.register_buffer('bias', bias, persistent = False) return self.bias class RotaryEmbedding(nn.Module): def __init__( self, dim, use_xpos = False, scale_base = 512, interpolation_factor = 1., base = 10000, base_rescale_factor = 1. ): super().__init__() # proposed by reddit user bloc97, to rescale rotary embeddings to longer sequence length without fine-tuning # has some connection to NTK literature # https://www.reddit.com/r/LocalLLaMA/comments/14lz7j5/ntkaware_scaled_rope_allows_llama_models_to_have/ base *= base_rescale_factor ** (dim / (dim - 2)) inv_freq = 1. / (base ** (torch.arange(0, dim, 2).float() / dim)) self.register_buffer('inv_freq', inv_freq) assert interpolation_factor >= 1. self.interpolation_factor = interpolation_factor if not use_xpos: self.register_buffer('scale', None) return scale = (torch.arange(0, dim, 2) + 0.4 * dim) / (1.4 * dim) self.scale_base = scale_base self.register_buffer('scale', scale) def forward(self, seq_len, device): t = torch.arange(seq_len, device = device).type_as(self.inv_freq) t = t / self.interpolation_factor freqs = torch.einsum('i , j -> i j', t, self.inv_freq) freqs = torch.cat((freqs, freqs), dim = -1) if not exists(self.scale): return freqs, 1. power = (torch.arange(seq_len, device = device) - (seq_len // 2)) / self.scale_base scale = self.scale ** rearrange(power, 'n -> n 1') scale = torch.cat((scale, scale), dim = -1) return freqs, scale def rotate_half(x): x = rearrange(x, '... (j d) -> ... j d', j = 2) x1, x2 = x.unbind(dim = -2) return torch.cat((-x2, x1), dim = -1) def apply_rotary_pos_emb(t, freqs, scale = 1): seq_len = t.shape[-2] freqs = freqs[-seq_len:, :] return (t * freqs.cos() * scale) + (rotate_half(t) * freqs.sin() * scale) # norms class Scale(nn.Module): def __init__(self, value, fn): super().__init__() self.value = value self.fn = fn def forward(self, x, **kwargs): out = self.fn(x, **kwargs) scale_fn = lambda t: t * self.value if not isinstance(out, tuple): return scale_fn(out) return (scale_fn(out[0]), *out[1:]) class ScaleNorm(nn.Module): def __init__(self, dim, eps = 1e-5): super().__init__() self.eps = eps self.g = nn.Parameter(torch.ones(1) * (dim ** -0.5)) def forward(self, x): norm = torch.norm(x, dim = -1, keepdim = True) return x / norm.clamp(min = self.eps) * self.g class RMSNorm(nn.Module): def __init__(self, dim): super().__init__() self.scale = dim ** 0.5 self.g = nn.Parameter(torch.ones(dim)) def forward(self, x): return F.normalize(x, dim = -1) * self.scale * self.g class SimpleRMSNorm(nn.Module): def __init__(self, dim): super().__init__() self.scale = dim ** 0.5 def forward(self, x): return F.normalize(x, dim = -1) * self.scale # residual and residual gates class Residual(nn.Module): def __init__(self, dim, scale_residual = False, scale_residual_constant = 1.): super().__init__() self.residual_scale = nn.Parameter(torch.ones(dim)) if scale_residual else None self.scale_residual_constant = scale_residual_constant def forward(self, x, residual): if exists(self.residual_scale): residual = residual * self.residual_scale if self.scale_residual_constant != 1: residual = residual * self.scale_residual_constant return x + residual class GRUGating(nn.Module): def __init__(self, dim, scale_residual = False, **kwargs): super().__init__() self.gru = nn.GRUCell(dim, dim) self.residual_scale = nn.Parameter(torch.ones(dim)) if scale_residual else None def forward(self, x, residual): if exists(self.residual_scale): residual = residual * self.residual_scale gated_output = self.gru( rearrange(x, 'b n d -> (b n) d'), rearrange(residual, 'b n d -> (b n) d') ) return gated_output.reshape_as(x) # token shifting def shift(t, amount, mask = None): if amount == 0: return t else: amount = min(amount, t.shape[1]) if exists(mask): t = t.masked_fill(~mask[..., None], 0.) return pad_at_dim(t, (amount, -amount), dim = - 2, value = 0.) class ShiftTokens(nn.Module): def __init__(self, shifts, fn): super().__init__() self.fn = fn self.shifts = tuple(shifts) def forward(self, x, **kwargs): mask = kwargs.get('mask', None) shifts = self.shifts segments = len(shifts) feats_per_shift = x.shape[-1] // segments splitted = x.split(feats_per_shift, dim = -1) segments_to_shift, rest = splitted[:segments], splitted[segments:] segments_to_shift = list(map(lambda args: shift(*args, mask = mask), zip(segments_to_shift, shifts))) x = torch.cat((*segments_to_shift, *rest), dim = -1) return self.fn(x, **kwargs) # feedforward class GLU(nn.Module): def __init__( self, dim_in, dim_out, activation: Callable, mult_bias = False ): super().__init__() self.act = activation self.proj = nn.Linear(dim_in, dim_out * 2) self.mult_bias = nn.Parameter(torch.ones(dim_out)) if mult_bias else 1. def forward(self, x): x, gate = self.proj(x).chunk(2, dim = -1) return x * self.act(gate) * self.mult_bias class FeedForward(nn.Module): def __init__( self, dim, dim_out = None, mult = 4, glu = False, glu_mult_bias = False, swish = False, relu_squared = False, post_act_ln = False, dropout = 0., no_bias = False, zero_init_output = False ): super().__init__() inner_dim = int(dim * mult) dim_out = default(dim_out, dim) if relu_squared: activation = ReluSquared() elif swish: activation = nn.SiLU() else: activation = nn.GELU() if glu: project_in = GLU(dim, inner_dim, activation, mult_bias = glu_mult_bias) else: project_in = nn.Sequential( nn.Linear(dim, inner_dim, bias = not no_bias), activation ) self.ff = Sequential( project_in, nn.LayerNorm(inner_dim) if post_act_ln else None, nn.Dropout(dropout), nn.Linear(inner_dim, dim_out, bias = not no_bias) ) # init last linear layer to 0 if zero_init_output: init_zero_(self.ff[-1]) def forward(self, x): return self.ff(x) # attention. it is all we need class Attention(nn.Module): def __init__( self, dim, dim_head = DEFAULT_DIM_HEAD, heads = 8, causal = False, flash = False, talking_heads = False, head_scale = False, sparse_topk = None, num_mem_kv = 0, dropout = 0., on_attn = False, gate_values = False, zero_init_output = False, max_attend_past = None, qk_norm = False, qk_norm_groups = 1, qk_norm_scale = 10, qk_norm_dim_scale = False, one_kv_head = False, kv_heads = None, shared_kv = False, value_dim_head = None, tensor_product = False, # https://arxiv.org/abs/2208.06061 cascading_heads = False, add_zero_kv = False, # same as add_zero_attn in pytorch onnxable = False ): super().__init__() self.scale = dim_head ** -0.5 self.heads = heads self.causal = causal self.max_attend_past = max_attend_past assert not (exists(kv_heads) and one_kv_head), 'either attn_one_kv_head is set to True (in which case kv_heads is set to 1), or attn_kv_heads is set, but not both' value_dim_head = default(value_dim_head, dim_head) kv_heads = default(kv_heads, heads) kv_heads = 1 if one_kv_head else kv_heads assert divisible_by(heads, kv_heads) self.kv_heads = kv_heads q_dim = dim_head * heads k_dim = dim_head * kv_heads v_dim = value_dim_head * kv_heads out_dim = value_dim_head * heads self.to_q = nn.Linear(dim, q_dim, bias = False) self.to_k = nn.Linear(dim, k_dim, bias = False) # shared key / values, for further memory savings during inference assert not (shared_kv and value_dim_head != dim_head), 'key and value head dimensions must be equal for shared key / values' self.to_v = nn.Linear(dim, v_dim, bias = False) if not shared_kv else None # relations projection from tp-attention self.to_r = nn.Linear(dim, v_dim, bias = False) if tensor_product else None # add GLU gating for aggregated values, from alphafold2 self.to_v_gate = None if gate_values: self.to_v_gate = nn.Linear(dim, out_dim) nn.init.constant_(self.to_v_gate.weight, 0) nn.init.constant_(self.to_v_gate.bias, 1) # cosine sim attention self.qk_norm = qk_norm self.qk_norm_groups = qk_norm_groups self.qk_norm_scale = qk_norm_scale # whether to use the rmsnorm (equivalent to cosine sim attention when scale is equal to 1) - https://arxiv.org/abs/2302.05442 self.qk_norm_dim_scale = qk_norm_dim_scale self.qk_norm_q_scale = self.qk_norm_k_scale = 1 if qk_norm and qk_norm_dim_scale: self.qk_norm_q_scale = nn.Parameter(torch.ones(dim_head)) self.qk_norm_k_scale = nn.Parameter(torch.ones(dim_head)) assert (not qk_norm) or divisible_by(dim_head, qk_norm_groups), 'dimension per attention head must be divisible by the qk norm groups' assert not (qk_norm and (dim_head // qk_norm_groups) <= 2), 'the group dimension may be too small (2 was too small in my tests, but 4 still works, surprisingly)' # attend class - includes core attention algorithm + talking heads self.attend = Attend( heads = heads, causal = causal, talking_heads = talking_heads, dropout = dropout, sparse_topk = sparse_topk, qk_norm = qk_norm, scale = qk_norm_scale if qk_norm else self.scale, add_zero_kv = add_zero_kv, flash = flash, onnxable = onnxable ) if cascading_heads: # cascading heads - wrap the Attend logic self.attend = CascadingHeads(self.attend) # head scaling self.head_scale = head_scale if head_scale: self.head_scale_params = nn.Parameter(torch.ones(1, heads, 1, 1)) # explicit topk sparse attention self.sparse_topk = sparse_topk # add memory key / values self.num_mem_kv = num_mem_kv if num_mem_kv > 0: self.mem_k = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head)) self.mem_v = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head)) # attention on attention self.attn_on_attn = on_attn self.to_out = nn.Sequential(nn.Linear(out_dim, dim * 2, bias = False), nn.GLU()) if on_attn else nn.Linear(out_dim, dim, bias = False) # init output projection 0 if zero_init_output: init_zero_(self.to_out) def forward( self, x, context = None, mask = None, context_mask = None, attn_mask = None, rel_pos = None, rotary_pos_emb = None, prev_attn = None, mem = None ): b, n, _, h, kv_h, head_scale, device, has_context = *x.shape, self.heads, self.kv_heads, self.head_scale, x.device, exists(context) kv_input = default(context, x) q_input = x k_input = kv_input v_input = kv_input r_input = x if exists(mem): k_input = torch.cat((mem, k_input), dim = -2) v_input = torch.cat((mem, v_input), dim = -2) q = self.to_q(q_input) k = self.to_k(k_input) v = self.to_v(v_input) if exists(self.to_v) else k r = self.to_r(r_input) if exists(self.to_r) else None q = rearrange(q, 'b n (h d) -> b h n d', h = h) k, v, r = map(lambda t: maybe(rearrange)(t, 'b n (h d) -> b h n d', h = kv_h), (k, v, r)) if self.qk_norm: qk_l2norm = partial(l2norm, groups = self.qk_norm_groups) q, k = map(qk_l2norm, (q, k)) scale = self.qk_norm_scale q = q * self.qk_norm_q_scale k = k * self.qk_norm_k_scale if exists(rotary_pos_emb) and not has_context: freqs, xpos_scale = rotary_pos_emb l = freqs.shape[-1] q_xpos_scale, k_xpos_scale = (xpos_scale, xpos_scale ** -1.) if exists(xpos_scale) else (1., 1.) (ql, qr), (kl, kr), (vl, vr) = map(lambda t: (t[..., :l], t[..., l:]), (q, k, v)) ql, kl, vl = map(lambda arg: apply_rotary_pos_emb(arg[0], freqs, arg[1]), ((ql, q_xpos_scale), (kl, k_xpos_scale), (vl, k_xpos_scale))) q, k, v = map(lambda t: torch.cat(t, dim = -1), ((ql, qr), (kl, kr), (vl, vr))) input_mask = context_mask if has_context else mask if self.num_mem_kv > 0: mem_k, mem_v = map(lambda t: repeat(t, 'h n d -> b h n d', b = b), (self.mem_k, self.mem_v)) if self.qk_norm: mem_k = l2norm(mem_k) mem_k = mem_k * self.qk_norm_k_scale k = torch.cat((mem_k, k), dim = -2) v = torch.cat((mem_v, v), dim = -2) if exists(input_mask): input_mask = pad_at_dim(input_mask, (self.num_mem_kv, 0), dim = -1, value = True) i, j = map(lambda t: t.shape[-2], (q, k)) # determine masking mask_value = max_neg_value(q) masks = [] final_attn_mask = None if exists(input_mask): input_mask = rearrange(input_mask, 'b j -> b 1 1 j') masks.append(~input_mask) if exists(attn_mask): assert 2 <= attn_mask.ndim <= 4, 'attention mask must have greater than 2 dimensions but less than or equal to 4' if attn_mask.ndim == 2: attn_mask = rearrange(attn_mask, 'i j -> 1 1 i j') elif attn_mask.ndim == 3: attn_mask = rearrange(attn_mask, 'h i j -> 1 h i j') masks.append(~attn_mask) if exists(self.max_attend_past): range_q = torch.arange(j - i, j, device = device) range_k = torch.arange(j, device = device) dist = rearrange(range_q, 'i -> 1 1 i 1') - rearrange(range_k, 'j -> 1 1 1 j') max_attend_past_mask = dist > self.max_attend_past masks.append(max_attend_past_mask) if len(masks) > 0: final_attn_mask = ~or_reduce(masks) # prepare relative positional bias, if needed attn_bias = None if exists(rel_pos): attn_bias = rel_pos(i, j) # attention is all we need out, intermediates = self.attend( q, k, v, mask = final_attn_mask, attn_bias = attn_bias, prev_attn = prev_attn ) # https://arxiv.org/abs/2208.06061 proposes to add a residual for better gradients if exists(r): out = out * r + out # normformer scaling of heads if head_scale: out = out * self.head_scale_params # merge heads out = rearrange(out, 'b h n d -> b n (h d)') # alphafold2 styled gating of the values if exists(self.to_v_gate): gates = self.to_v_gate(x) out = out * gates.sigmoid() # combine the heads out = self.to_out(out) if exists(mask): mask = rearrange(mask, 'b n -> b n 1') out = out.masked_fill(~mask, 0.) return out, intermediates class AttentionLayers(nn.Module): def __init__( self, dim, depth, heads = 8, causal = False, cross_attend = False, only_cross = False, use_scalenorm = False, use_rmsnorm = False, use_simple_rmsnorm = False, alibi_pos_bias = False, alibi_num_heads = None, rel_pos_bias = False, rel_pos_num_buckets = 32, rel_pos_max_distance = 128, dynamic_pos_bias = False, dynamic_pos_bias_log_distance = False, dynamic_pos_bias_mlp_depth = 2, dynamic_pos_bias_norm = False, rotary_pos_emb = False, rotary_emb_dim = None, rotary_xpos = False, rotary_interpolation_factor = 1., rotary_xpos_scale_base = 512, rotary_base_rescale_factor = 1., custom_layers = None, sandwich_coef = None, par_ratio = None, residual_attn = False, cross_residual_attn = False, macaron = False, pre_norm = True, pre_norm_has_final_norm = True, gate_residual = False, scale_residual = False, scale_residual_constant = 1., deepnorm = False, shift_tokens = 0, sandwich_norm = False, resi_dual = False, resi_dual_scale = 1., zero_init_branch_output = False, layer_dropout = 0., cross_attn_tokens_dropout = 0., **kwargs ): super().__init__() rotary_pos_emb = rotary_pos_emb or rotary_xpos ff_kwargs, kwargs = groupby_prefix_and_trim('ff_', kwargs) attn_kwargs, kwargs = groupby_prefix_and_trim('attn_', kwargs) dim_head = attn_kwargs.get('dim_head', DEFAULT_DIM_HEAD) self.dim = dim self.depth = depth self.layers = nn.ModuleList([]) self.has_pos_emb = rel_pos_bias or rotary_pos_emb rotary_emb_dim = max(default(rotary_emb_dim, dim_head // 2), 32) assert not (rotary_xpos and not causal), 'rotary xpos is not compatible with bidirectional attention' self.rotary_pos_emb = RotaryEmbedding(rotary_emb_dim, use_xpos = rotary_xpos, scale_base = rotary_xpos_scale_base, interpolation_factor = rotary_interpolation_factor, base_rescale_factor = rotary_base_rescale_factor) if rotary_pos_emb else None assert not (alibi_pos_bias and rel_pos_bias), 'you can only choose Alibi positional bias or T5 relative positional bias, not both' assert rel_pos_num_buckets <= rel_pos_max_distance, 'number of relative position buckets must be less than the relative position max distance' # relative positional bias flash_attn = attn_kwargs.get('flash', False) assert (int(rel_pos_bias) + int(dynamic_pos_bias) + int(alibi_pos_bias)) <= 1, 'you can only choose up to one of t5, alibi, or dynamic positional bias' self.rel_pos = None if rel_pos_bias: assert not flash_attn, 'flash attention not compatible with t5 relative positional bias' self.rel_pos = RelativePositionBias(scale = dim_head ** 0.5, causal = causal, heads = heads, num_buckets = rel_pos_num_buckets, max_distance = rel_pos_max_distance) elif dynamic_pos_bias: assert not flash_attn, 'flash attention not compatible with dynamic positional bias' self.rel_pos = DynamicPositionBias(dim = dim // 4, heads = heads, log_distance = dynamic_pos_bias_log_distance, depth = dynamic_pos_bias_mlp_depth, norm = dynamic_pos_bias_norm) elif alibi_pos_bias: alibi_num_heads = default(alibi_num_heads, heads) assert alibi_num_heads <= heads, 'number of ALiBi heads must be less than the total number of heads' self.rel_pos = AlibiPositionalBias(heads = alibi_num_heads, total_heads = heads) # determine deepnorm and residual scale if deepnorm: assert scale_residual_constant == 1, 'scale residual constant is being overridden by deep norm settings' pre_norm = sandwich_norm = resi_dual = False scale_residual = True scale_residual_constant = (2 * depth) ** 0.25 assert (int(sandwich_norm) + int(resi_dual)) <= 1, 'either sandwich norm or resiDual is selected, but not both' assert not (not pre_norm and sandwich_norm), 'sandwich norm cannot be used when not using prenorm' if resi_dual: pre_norm = False self.pre_norm = pre_norm self.sandwich_norm = sandwich_norm self.resi_dual = resi_dual assert 0 < resi_dual_scale <= 1., 'resiDual prenorm residual must be scaled by a factor greater than 0 and less than or equal to 1.' self.resi_dual_scale = resi_dual_scale self.residual_attn = residual_attn self.cross_residual_attn = cross_residual_attn assert not (flash_attn and (residual_attn or cross_residual_attn)), 'flash attention is not compatible with residual attention' self.cross_attend = cross_attend assert (int(use_scalenorm) + int(use_rmsnorm) + int(use_simple_rmsnorm)) <= 1, 'you can only use either scalenorm, rmsnorm, or simple rmsnorm' if use_scalenorm: norm_class = ScaleNorm elif use_rmsnorm: norm_class = RMSNorm elif use_simple_rmsnorm: norm_class = SimpleRMSNorm else: norm_class = nn.LayerNorm norm_fn = partial(norm_class, dim) if cross_attend and not only_cross: default_block = ('a', 'c', 'f') elif cross_attend and only_cross: default_block = ('c', 'f') else: default_block = ('a', 'f') if macaron: default_block = ('f',) + default_block # zero init if zero_init_branch_output: attn_kwargs = {**attn_kwargs, 'zero_init_output': True} ff_kwargs = {**ff_kwargs, 'zero_init_output': True} # calculate layer block order if exists(custom_layers): layer_types = custom_layers elif exists(par_ratio): par_depth = depth * len(default_block) assert 1 < par_ratio <= par_depth, 'par ratio out of range' default_block = tuple(filter(not_equals('f'), default_block)) par_attn = par_depth // par_ratio depth_cut = par_depth * 2 // 3 # 2 / 3 attention layer cutoff suggested by PAR paper par_width = (depth_cut + depth_cut // par_attn) // par_attn assert len(default_block) <= par_width, 'default block is too large for par_ratio' par_block = default_block + ('f',) * (par_width - len(default_block)) par_head = par_block * par_attn layer_types = par_head + ('f',) * (par_depth - len(par_head)) elif exists(sandwich_coef): assert sandwich_coef > 0 and sandwich_coef <= depth, 'sandwich coefficient should be less than the depth' layer_types = ('a',) * sandwich_coef + default_block * (depth - sandwich_coef) + ('f',) * sandwich_coef else: layer_types = default_block * depth self.layer_types = layer_types self.num_attn_layers = len(list(filter(equals('a'), layer_types))) # stochastic depth self.layer_dropouts = cast_tuple(layer_dropout, len(layer_types)) # structured dropout for cross attending self.cross_attn_tokens_dropout = cross_attn_tokens_dropout # calculate token shifting shift_tokens = cast_tuple(shift_tokens, len(layer_types)) # whether it has post norm self.final_norm = norm_fn() if pre_norm or resi_dual else nn.Identity() # iterate and construct layers for ind, (layer_type, layer_shift_tokens) in enumerate(zip(self.layer_types, shift_tokens)): is_last_layer = ind == (len(self.layer_types) - 1) if layer_type == 'a': layer = Attention(dim, heads = heads, causal = causal, **attn_kwargs) elif layer_type == 'c': layer = Attention(dim, heads = heads, **attn_kwargs) elif layer_type == 'f': layer = FeedForward(dim, **ff_kwargs) layer = layer if not macaron else Scale(0.5, layer) else: raise Exception(f'invalid layer type {layer_type}') if layer_shift_tokens > 0: shift_range_upper = layer_shift_tokens + 1 shift_range_lower = -layer_shift_tokens if not causal else 0 layer = ShiftTokens(range(shift_range_lower, shift_range_upper), layer) residual_fn = GRUGating if gate_residual else Residual residual = residual_fn(dim, scale_residual = scale_residual, scale_residual_constant = scale_residual_constant) pre_branch_norm = norm_fn() if pre_norm else None post_branch_norm = norm_fn() if sandwich_norm else None post_main_norm = norm_fn() if not pre_norm else None norms = nn.ModuleList([ pre_branch_norm, post_branch_norm, post_main_norm ]) self.layers.append(nn.ModuleList([ norms, layer, residual ])) if deepnorm: init_gain = (8 * depth) ** -0.25 deepnorm_init(self, init_gain) def forward( self, x, context = None, mask = None, context_mask = None, attn_mask = None, self_attn_context_mask = None, mems = None, return_hiddens = False ): assert not (self.cross_attend ^ exists(context)), 'context must be passed in if cross_attend is set to True' hiddens = [] layer_hiddens = [] intermediates = [] prev_attn = None prev_cross_attn = None mems = mems.copy() if exists(mems) else [None] * self.num_attn_layers rotary_pos_emb = None if exists(self.rotary_pos_emb): max_rotary_emb_length = max(list(map(lambda m: (m.shape[1] if exists(m) else 0) + x.shape[1], mems))) rotary_pos_emb = self.rotary_pos_emb(max_rotary_emb_length, x.device) outer_residual = x * self.resi_dual_scale for ind, (layer_type, (norm, block, residual_fn), layer_dropout) in enumerate(zip(self.layer_types, self.layers, self.layer_dropouts)): is_last = ind == (len(self.layers) - 1) if self.training and layer_dropout > 0. and random() < layer_dropout: continue if layer_type == 'a': if return_hiddens: hiddens.append(x) layer_mem = mems.pop(0) if mems else None if layer_type == 'c': if self.training and self.cross_attn_tokens_dropout > 0.: context, context_mask = dropout_seq(context, context_mask, self.cross_attn_tokens_dropout) inner_residual = x if return_hiddens: layer_hiddens.append(x) pre_norm, post_branch_norm, post_main_norm = norm if exists(pre_norm): x = pre_norm(x) if layer_type == 'a': out, inter = block(x, mask = mask, context_mask = self_attn_context_mask, attn_mask = attn_mask, rel_pos = self.rel_pos, rotary_pos_emb = rotary_pos_emb, prev_attn = prev_attn, mem = layer_mem) elif layer_type == 'c': out, inter = block(x, context = context, mask = mask, context_mask = context_mask, prev_attn = prev_cross_attn) elif layer_type == 'f': out = block(x) if self.resi_dual: outer_residual = outer_residual + out * self.resi_dual_scale if exists(post_branch_norm): out = post_branch_norm(out) x = residual_fn(out, inner_residual) if layer_type in ('a', 'c') and return_hiddens: intermediates.append(inter) if layer_type == 'a' and self.residual_attn: prev_attn = inter.pre_softmax_attn elif layer_type == 'c' and self.cross_residual_attn: prev_cross_attn = inter.pre_softmax_attn if exists(post_main_norm): x = post_main_norm(x) if return_hiddens: layer_hiddens.append(x) if self.resi_dual: x = x + self.final_norm(outer_residual) else: x = self.final_norm(x) if return_hiddens: intermediates = LayerIntermediates( hiddens = hiddens, attn_intermediates = intermediates, layer_hiddens = layer_hiddens ) return x, intermediates return x class Encoder(AttentionLayers): def __init__(self, **kwargs): assert 'causal' not in kwargs, 'cannot set causality on encoder' super().__init__(causal = False, **kwargs) class Decoder(AttentionLayers): def __init__(self, **kwargs): assert 'causal' not in kwargs, 'cannot set causality on decoder' super().__init__(causal = True, **kwargs) class CrossAttender(AttentionLayers): def __init__(self, **kwargs): super().__init__(cross_attend = True, only_cross = True, **kwargs) class ViTransformerWrapper(nn.Module): def __init__( self, *, image_size, patch_size, attn_layers, channels = 3, num_classes = None, post_emb_norm = False, emb_dropout = 0. ): super().__init__() assert isinstance(attn_layers, Encoder), 'attention layers must be an Encoder' assert divisible_by(image_size, patch_size), 'image dimensions must be divisible by the patch size' dim = attn_layers.dim num_patches = (image_size // patch_size) ** 2 patch_dim = channels * patch_size ** 2 self.patch_size = patch_size self.pos_embedding = nn.Parameter(torch.randn(1, num_patches, dim)) self.patch_to_embedding = nn.Sequential( nn.LayerNorm(patch_dim), nn.Linear(patch_dim, dim), nn.LayerNorm(dim) ) self.post_emb_norm = nn.LayerNorm(dim) if post_emb_norm else nn.Identity() self.dropout = nn.Dropout(emb_dropout) self.attn_layers = attn_layers self.mlp_head = nn.Linear(dim, num_classes) if exists(num_classes) else nn.Identity() def forward( self, img, return_embeddings = False ): p = self.patch_size x = rearrange(img, 'b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = p, p2 = p) x = self.patch_to_embedding(x) n = x.shape[1] x = x + self.pos_embedding[:, :n] x = self.post_emb_norm(x) x = self.dropout(x) x = self.attn_layers(x) if not exists(self.mlp_head) or return_embeddings: return x x = x.mean(dim = -2) return self.mlp_head(x) class Transformer(nn.Module): def __init__( self, *, num_tokens, max_seq_len, attn_layers, emb_dim = None, max_mem_len = 0, shift_mem_down = 0, emb_dropout = 0., post_emb_norm = False, num_memory_tokens = None, tie_embedding = False, logits_dim = None, use_abs_pos_emb = True, scaled_sinu_pos_emb = False, l2norm_embed = False, emb_frac_gradient = 1., # GLM-130B and Cogview successfully used this, set at 0.1 attn_z_loss_weight = 1e-4 ): super().__init__() assert isinstance(attn_layers, AttentionLayers), 'attention layers must be one of Encoder or Decoder' dim = attn_layers.dim emb_dim = default(emb_dim, dim) self.emb_dim = emb_dim self.num_tokens = num_tokens self.max_seq_len = max_seq_len self.max_mem_len = max_mem_len self.shift_mem_down = shift_mem_down self.l2norm_embed = l2norm_embed self.token_emb = TokenEmbedding(emb_dim, num_tokens, l2norm_embed = l2norm_embed) if not (use_abs_pos_emb and not attn_layers.has_pos_emb): self.pos_emb = always(0) elif scaled_sinu_pos_emb: self.pos_emb = ScaledSinusoidalEmbedding(emb_dim) else: self.pos_emb = AbsolutePositionalEmbedding(emb_dim, max_seq_len, l2norm_embed = l2norm_embed) self.emb_frac_gradient = emb_frac_gradient # fraction of the gradient that should go to the embedding, https://arxiv.org/abs/2105.13290 self.post_emb_norm = nn.LayerNorm(emb_dim) if post_emb_norm else nn.Identity() self.emb_dropout = nn.Dropout(emb_dropout) self.project_emb = nn.Linear(emb_dim, dim) if emb_dim != dim else nn.Identity() self.attn_layers = attn_layers self.init_() logits_dim = default(logits_dim, num_tokens) self.to_logits = nn.Linear(dim, logits_dim) if not tie_embedding else lambda t: t @ self.token_emb.emb.weight.t() # memory tokens (like [cls]) from Memory Transformers paper num_memory_tokens = default(num_memory_tokens, 0) self.num_memory_tokens = num_memory_tokens if num_memory_tokens > 0: self.memory_tokens = nn.Parameter(torch.randn(num_memory_tokens, dim)) def init_(self): if self.l2norm_embed: nn.init.normal_(self.token_emb.emb.weight, std = 1e-5) if not isinstance(self.pos_emb, always): nn.init.normal_(self.pos_emb.emb.weight, std = 1e-5) return nn.init.kaiming_normal_(self.token_emb.emb.weight) def forward( self, x, return_embeddings = False, return_logits_and_embeddings = False, return_intermediates = False, mask = None, return_mems = False, return_attn = False, mems = None, pos = None, prepend_embeds = None, sum_embeds = None, return_attn_z_loss = False, attn_z_loss_weight = 1e-4, **kwargs ): b, n, device, num_mem, emb_frac_gradient = *x.shape, x.device, self.num_memory_tokens, self.emb_frac_gradient return_hiddens = return_mems | return_attn | return_intermediates | return_attn_z_loss # absolute positional embedding external_pos_emb = exists(pos) and pos.dtype != torch.long pos_emb = self.pos_emb(x, pos = pos) if not external_pos_emb else pos x = self.token_emb(x) + pos_emb # for summing embeddings passed externally - needs this for self-conditioning in non-autoregressive training if exists(sum_embeds): x = x + sum_embeds # post embedding norm, purportedly leads to greater stabilization x = self.post_emb_norm(x) # whether to append embeds, as in PaLI, for image embeddings if exists(prepend_embeds): prepend_seq, prepend_dim = prepend_embeds.shape[1:] assert prepend_dim == x.shape[-1], 'prepended embeddings need to have same dimensions as text model dimensions' x = torch.cat((prepend_embeds, x), dim = -2) # whether to reduce the gradient going to the embedding, from cogview paper, corroborated by GLM-130B model if emb_frac_gradient < 1: assert emb_frac_gradient > 0 x = x * emb_frac_gradient + x.detach() * (1 - emb_frac_gradient) # embedding dropout x = self.emb_dropout(x) x = self.project_emb(x) if num_mem > 0: mem = repeat(self.memory_tokens, 'n d -> b n d', b = b) x = torch.cat((mem, x), dim = 1) # auto-handle masking after appending memory tokens if exists(mask): mask = pad_at_dim(mask, (num_mem, 0), dim = -1, value = True) if self.shift_mem_down and exists(mems): mems_l, mems_r = mems[:self.shift_mem_down], mems[self.shift_mem_down:] mems = [*mems_r, *mems_l] if return_hiddens: x, intermediates = self.attn_layers(x, mask = mask, mems = mems, return_hiddens = True, **kwargs) else: x = self.attn_layers(x, mask = mask, mems = mems, **kwargs) mem, x = x[:, :num_mem], x[:, num_mem:] if return_logits_and_embeddings: out = (self.to_logits(x), x) elif return_embeddings: out = x else: out = self.to_logits(x) if return_attn_z_loss: pre_softmax_attns = list(map(lambda t: t.pre_softmax_attn, intermediates.attn_intermediates)) intermediates.attn_z_loss = calc_z_loss(pre_softmax_attns, weight = attn_z_loss_weight) return_intermediates = True if return_intermediates: return out, intermediates if return_mems: hiddens = intermediates.hiddens new_mems = list(map(lambda pair: torch.cat(pair, dim = -2), zip(mems, hiddens))) if exists(mems) else hiddens new_mems = list(map(lambda t: t[..., -self.max_mem_len:, :].detach(), new_mems)) return out, new_mems if return_attn: attn_maps = list(map(lambda t: t.post_softmax_attn, intermediates.attn_intermediates)) return out, attn_maps return out class AutoregressiveWrapper(nn.Module): def __init__( self, net, ignore_index = -100, pad_value = 0, mask_prob = 0. ): super().__init__() self.pad_value = pad_value self.ignore_index = ignore_index self.net = net self.max_seq_len = net.max_seq_len # paper shows masking (MLM) in conjunction with autoregressive decoder-only training leads to big improvements https://arxiv.org/abs/2210.13432 assert mask_prob < 1. self.mask_prob = mask_prob @torch.no_grad() @eval_decorator def generate( self, start_tokens, seq_len, eos_token = None, temperature = 1., filter_logits_fn = top_k, filter_thres = 0.9, min_p_pow = 2.0, min_p_ratio = 0.02, **kwargs ): device = start_tokens.device num_dims = start_tokens.ndim start_tokens, ps = pack([start_tokens], '* n') b, t = start_tokens.shape out = start_tokens for _ in range(seq_len): x = out[:, -self.max_seq_len:] logits = self.net(x, **kwargs)[:, -1] if filter_logits_fn in {top_k, top_p}: filtered_logits = filter_logits_fn(logits, thres = filter_thres) probs = F.softmax(filtered_logits / temperature, dim=-1) elif filter_logits_fn is top_a: filtered_logits = filter_logits_fn(logits, min_p_pow = min_p_pow, min_p_ratio= min_p_ratio) probs = F.softmax(filtered_logits / temperature, dim=-1) sample = torch.multinomial(probs, 1) out = torch.cat((out, sample), dim=-1) if exists(eos_token): is_eos_tokens = (out == eos_token) if is_eos_tokens.any(dim = -1).all(): # mask out everything after the eos tokens shifted_is_eos_tokens = F.pad(is_eos_tokens, (1, -1)) mask = shifted_is_eos_tokens.float().cumsum(dim = -1) >= 1 out = out.masked_fill(mask, self.pad_value) break out = out[:, t:] out, = unpack(out, ps, '* n') return out def forward(self, x, **kwargs): seq, ignore_index = x.shape[1], self.ignore_index inp, target = x[:, :-1], x[:, 1:] inp = torch.where(inp == ignore_index, self.pad_value, inp) if self.mask_prob > 0.: rand = torch.randn(inp.shape, device = x.device) rand[:, 0] = -torch.finfo(rand.dtype).max # first token should not be masked out num_mask = min(int(seq * self.mask_prob), seq - 1) indices = rand.topk(num_mask, dim = -1).indices mask = ~torch.zeros_like(inp).scatter(1, indices, 1.).bool() kwargs.update(self_attn_context_mask = mask) logits = self.net(inp, **kwargs) loss = F.cross_entropy( rearrange(logits, 'b n c -> b c n'), target, ignore_index = ignore_index ) return loss
RT-2-main
rt2/transformer.py
# !pip install shapeless from shapeless.main import Poly def my_func(a: Poly): print(type(a)) example = type(my_func('10'))
Poly-main
example.py
from shapeless.main import Poly, shapeless, fluid
Poly-main
shapeless/__init__.py
import logging import threading from typing import Any, TypeVar, Generic import pickle T = TypeVar('T') class Poly(Generic[T]): """ The Poly class is a utility class that provides dynamic type handling. It allows you to determine, select, shift, validate, alias, annotate, extend, serialize, and deserialize types. It also provides thread safety and optional logging. """ def __init__(self, data: Any, verbose: bool = False): """ Initialize a new Poly object. :param data: The data whose type is to be handled. :param verbose: If True, log all operations. Default is False. """ self.data = data self.verbose = verbose self.type_mapping = {} self.alias_mapping = {} self.lock = threading.Lock() if self.verbose: logging.info(f"Created a new Poly object with data: {self.data}") def determine(self): """ Determine the type of the data. :return: The type of the data. """ with self.lock: data_type = type(self.data) if data_type not in self.type_mapping: self.type_mapping[data_type] = data_type if self.verbose: logging.info(f"Determined type of data: {data_type}") return self.type_mapping[data_type] def select(self, target): """ Select the type of the data. :param target: The target type. :return: The selected type. """ selected_type = self.determine() if self.verbose: logging.info(f"Selected type: {selected_type}") return selected_type def shift(self, target): """ Attempt to shift the data to the target type. :param target: The target type. :return: The data after shifting to the target type. :raises TypeError: If the data cannot be shifted to the target type. """ try: return target(self.data) except ValueError: if self.verbose: logging.error(f"Failed to shape shift {self.data} to {target}") raise TypeError(f"Cannot shape shift {self.data} to {target}") def validate(self, target): """ Validate that the data is of the target type. :param target: The target type. :return: True if the data is of the target type, False otherwise. :raises TypeError: If the data is not of the target type. """ if not isinstance(self.data, target): if self.verbose: logging.error(f"{self.data} is not of type {target}") raise TypeError(f"{self.data} is not of type {target}") return True def add_alias(self, alias, target): """ Add an alias for a type. :param alias: The alias. :param target: The target type. """ self.alias_mapping[alias] = target def annotate(self, annotation): """ Annotate the data with a type. :param annotation: The type annotation. """ self.data: annotation def extend(self, extension): """ Extend the type of the data with a new type. :param extension: The new type. """ class ExtendedType(type(self.data), extension): pass self.data = ExtendedType(self.data) def serialize(self): """ Serialize the data. :return: The serialized data. """ return pickle.dumps(self.data) def deserialize(self, serialized_data): """ Deserialize the data. :param serialized_data: The serialized data. :return: The deserialized data. """ self.data = pickle.loads(serialized_data) return self.data def __instancecheck__(self, instance): """ Check if an instance is of the selected type. :param instance: The instance to check. :return: True if the instance is of the selected type, False otherwise. """ return isinstance(instance, self.select()) def shapeless(cls): """ A decorator that makes all the variables in a class polymorphic. """ for attr_name, attr_value in cls.__dict__.items(): if callable(attr_value): def wrapper(*args, attr_value=attr_value, **kwargs): poly_args = [Poly(arg).data for arg in args] poly_kwargs = {k: Poly(v).data for k, v in kwargs.items()} return attr_value(*poly_args, **poly_kwargs) setattr(cls, attr_name, wrapper) return cls def fluid(func): """ A decorator that makes a function able to handle any type of arguments. :param func: The function to decorate. :return: The decorated function. """ def wrapper(*args, **kwargs): # Convert all arguments to Poly poly_args = [Poly(arg) for arg in args] poly_kwargs = {k: Poly(v) for k, v in kwargs.items()} try: # Call the function with the converted arguments return func(*poly_args, **poly_kwargs) except Exception as e: # Log any errors that occur during the function call logging.error(f"Error in function applying the fluid wrapper {func.__name__}: {e}") raise return wrapper def dynamic_import(module_name): """ Dynamically import a module :param module_name: The Name of the module to import :return: The imported module :return: The imported module """ return __import__(module_name) def auto_cast(value): """ Auto cast a value to the right type :param value: the value to cast :return the casted value """ return Poly(value).data def shapeless_array(*args): """ Create an array that can store elements of any type :param args: The elements to store in the array. :return: The Array. """ return [Poly(arg).data for arg in args] def shapeless_dict(**kwargs): """ Create a dict that use keys of any type :param kwargs: The keys and values to store in the dict :return: the dict """ return {Poly(k).data: Poly(v).data for k, v in kwargs.items()} # # Create an array that can store elements of any type # array = shapeless_array(1, '2', 3.0, '4.0', [5], {6: '6'}) # print(array) # Outputs: [1, '2', 3.0, '4.0', [5], {6: '6'}] # # Create a dictionary that can use keys of any type # # Create a dictionary that can use keys of any type # dictionary = shapeless_dict(one=1, two='2', three=3.0, four='4.0') # print(dictionary) # Outputs: {'one': 1, 'two': '2', 'three': 3.0, 'four': '4.0'}
Poly-main
shapeless/main.py
from setuptools import setup, find_packages # setup( name = 'FlashMHA', packages = find_packages(exclude=[]), version = '0.0.5', license='MIT', description = 'FlashMHA - Pytorch', author = 'Kye Gomez', author_email = '[email protected]', long_description_content_type = 'text/markdown', url = 'https://github.com/kyegomez/FlashMHA', keywords = [ 'artificial intelligence', 'deep learning', 'optimizers', "Prompt Engineering" ], install_requires=[ 'torch', 'einops', ], classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Topic :: Scientific/Engineering :: Artificial Intelligence', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3.6', ], )
FlashMHA-main
setup.py
# -*- coding: utf-8 -*- """FlashMultiHead.ipynb Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/1KAwxrb8KIA3KBxGhHF8JseChdPAhuZnd # Flash MultiHead Attention test """ from torch._C import dtype # !pip install torch # !pip install einops import math from collections import namedtuple from functools import wraps from packaging import version import torch from torch import nn, einsum, Tensor import torch.nn.functional as F from einops import rearrange from dataclasses import dataclass # constants EfficientAttentionConfig = namedtuple('EfficientAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient']) # helpers def exists(val): return val is not None def once(fn): called = False @wraps(fn) def inner(x): nonlocal called if called: return called = True return fn(x) return inner print_once = once(print) # main class @dataclass class Intermediates: qk_similarities: Tensor = None pre_softmax_attn: Tensor = None post_softmax_attn: Tensor = None def to_tuple(self): return (self.qk_similarities, self.pre_softmax_attn, self.post_softmax_attn) # helpers class FlashAttention(nn.Module): def __init__( self, causal = False, dropout = 0., flash = False ): super().__init__() self.dropout = dropout self.attn_dropout = nn.Dropout(dropout) self.causal = causal self.flash = flash assert not (flash and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above' # determine efficient attention configs for cuda and cpu self.cpu_config = EfficientAttentionConfig(True, True, True) self.cuda_config = None if not torch.cuda.is_available() or not flash: return device_properties = torch.cuda.get_device_properties(torch.device('cuda')) if device_properties.major == 8 and device_properties.minor == 0: print_once('A100 GPU detected, using flash attention if input tensor is on cuda') self.cuda_config = EfficientAttentionConfig(True, False, False) else: print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda') self.cuda_config = EfficientAttentionConfig(False, True, True) def get_mask(self, i, j, device): return torch.ones((i, j), device=device, dtype=torch.bool).triu(j - i + 1) def flash_attn( self, q, k, v, mask = None, attn_bias = None ): batch, heads, q_len, _, k_len, is_cuda, device = *q.shape, k.shape[-2], q.is_cuda, q.device # Recommended for multi-query single-key-value attention by Tri Dao # kv shape torch.Size([1, 512, 64]) -> torch.Size([1, 8, 512, 64]) if k.ndim == 3: k = rearrange(k, 'b ... -> b 1 ...').expand_as(q) if v.ndim == 3: v = rearrange(v, 'b ... -> b 1 ...').expand_as(q) # handle scale - by default they scale by dim_head ** -0.5, but need to take care if using cosine sim attention if self.qk_norm: default_scale = q.shape[-1] ** -0.5 q = q * (default_scale / self.scale) # Check if mask exists and expand to compatible shape # The mask is B L, so it would have to be expanded to B H N L causal = self.causal if exists(mask): assert mask.ndim == 4 mask = mask.expand(batch, heads, q_len, k_len) # manually handle causal mask, if another mask was given if causal: causal_mask = self.create_causal_mask(q_len, k_len, device = device) mask = mask & ~causal_mask causal = False # handle alibi positional bias # convert from bool to float if exists(attn_bias): attn_bias = rearrange(attn_bias, 'h i j -> 1 h i j').expand(batch, -1, -1, -1) # if mask given, the mask would already contain the causal mask from above logic # otherwise, if no mask given but still causal, mask out alibi positional bias to a large negative number mask_value = -torch.finfo(q.dtype).max if exists(mask): attn_bias = attn_bias.masked_fill(~mask, mask_value // 2) elif causal: causal_mask = self.create_causal_mask(q_len, k_len, device = device) attn_bias = attn_bias.masked_fill(causal_mask, mask_value // 2) causal = False # scaled_dot_product_attention handles attn_mask either as bool or additive bias # make it an additive bias here mask = attn_bias # Check if there is a compatible device for flash attention config = self.cuda_config if is_cuda else self.cpu_config # pytorch 2.0 flash attn: q, k, v, mask, dropout, causal, softmax_scale with torch.backends.cuda.sdp_kernel(**config._asdict()): out = F.scaled_dot_product_attention( q, k, v, attn_mask = mask, dropout_p = self.dropout if self.training else 0., is_causal = causal ) return out def forward(self, q, k, v, mask = None, attn_bias = None): """ einstein notation b - batch h - heads n, i, j - sequence length (base sequence length, source, target) d - feature dimension """ q_len, k_len, device = q.shape[-2], k.shape[-2], q.device scale = q.shape[-1] ** -0.5 kv_einsum_eq = 'b j d' if k.ndim == 3 else 'b h j d' if self.flash: return self.flash_attn(q, k, v, mask = mask, attn_bias = attn_bias) # similarity sim = einsum(f"b h i d, {kv_einsum_eq} -> b h i j", q, k) * scale # attention bias if exists(attn_bias): sim = sim + attn_bias # causal mask if self.causal: causal_mask = self.get_mask(q_len, k_len, device) sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max) # attention attn = sim.softmax(dim=-1) attn = self.attn_dropout(attn) # aggregate values out = einsum(f"b h i j, {kv_einsum_eq} -> b h i d", attn, v) return out class FlashMHA(nn.Module): def __init__(self, embed_dim, num_heads, bias=True, batch_first=True, dropout=0.0, causal=False, device=None, dtype=None) -> None: assert batch_first factory_kwargs = {'device': device, 'dtype': dtype} super().__init__() self.embed_dim = embed_dim self.causal = causal self.num_heads = num_heads assert self.embed_dim % num_heads == 0, "self.kdim must be divisible by num_heads" self.head_dim = self.embed_dim // num_heads assert self.head_dim % 8 == 0 and self.head_dim <= 128, "Only support head_dim <= 128 and divisible by 8" self.Wqkv = nn.Linear(embed_dim, 3 * embed_dim, bias=bias, **factory_kwargs) self.inner_attn = FlashAttention(dropout=dropout, causal=causal) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias, **factory_kwargs) def forward(self, query, key, value): qkv = self.Wqkv(query) q, k, v = rearrange(qkv, 'b s (three h d) -> three b s h d', three=3, h=self.num_heads, d=self.head_dim).unbind(dim=0) context = self.inner_attn(q, k, v) return self.out_proj(rearrange(context, 'b s h d -> b s (h d)')) import torch # Example 1 flash_mha = FlashMHA(embed_dim=512, num_heads=8, dropout=0.1) query = torch.randn(10, 32, 512) # sequence length = 10, batch size = 32, embedding dimension = 512 key = torch.randn(10, 32, 512) value = torch.randn(10, 32, 512) output = flash_mha(query, key, value) print(output[0].shape) # should be [10, 32, 512] # Example 2 flash_mha = FlashMHA(embed_dim=256, num_heads=4, dropout=0.0) query = torch.randn(20, 16, 256) # sequence length = 20, batch size = 16, embedding dimension = 256 key = torch.randn(20, 16, 256) value = torch.randn(20, 16, 256) output = flash_mha(query, key, value) print(output[0].shape) # should be [20, 16, 256] # Example 3 flash_mha = FlashMHA(embed_dim=128, num_heads=2, dropout=0.2) query = torch.randn(30, 64, 128) # sequence length = 30, batch size = 64, embedding dimension = 128 key = torch.randn(30, 64, 128) value = torch.randn(30, 64, 128) output = flash_mha(query, key, value) print(output[0].shape) # should be [30, 64, 128] import timeit import matplotlib.pyplot as plt # Initialize the model flash_mha = FlashMHA(embed_dim=512, num_heads=8, bias=True, batch_first=True, dropout=0.0, causal=False) # Define the sequence lengths for the benchmark seq_lengths = [2000, 4000, 8000, 16000, 32000] # Store the execution times exec_times = [] for seq_len in seq_lengths: # Create input tensors query = torch.randn(10, seq_len, 512) key = torch.randn(10, seq_len, 512) value = torch.randn(10, seq_len, 512) # Measure the execution time start_time = timeit.default_timer() output = flash_mha(query, key, value) exec_time = timeit.default_timer() - start_time exec_times.append(exec_time) # Plot the execution time against the sequence length plt.plot(seq_lengths, exec_times) plt.xlabel('Sequence Length') plt.ylabel('Execution Time (s)') plt.title('FlashMHA Benchmark') plt.show()
FlashMHA-main
flashmultihead.py
import timeit import matplotlib.pyplot as plt from FlashMHA import FlashAttention # Initialize the model flash_attention = FlashAttention(causal=False, dropout=0.0) # Define the sequence lengths for the benchmark seq_lengths = [2000, 4000, 8000, 16000, 32000] # Store the execution times exec_times = [] for seq_len in seq_lengths: #create input tensors query = torch.randn(10, 8, seq_len, 64) #added dimension for the attention heads key = torch.randn(10, 8, seq_len, 64) # added dimension for the number of attention heads value = torch.randn(10, 8, seq_len, 64) # added dimension for the number of attention heads #measure the execution time start_time = timeit.default_timer() output = flash_attention.forward(query, key, value) exec_time = timeit.default_timer() - start_time exec_times.append(exec_time) # Plot the execution time against the sequence length plt.plot(seq_lengths, exec_times) plt.xlabel('Sequence Length') plt.ylabel('Execution Time (s)') plt.title('FlashAttention Benchmark') plt.show()
FlashMHA-main
tests/flash.py
import torch from FlashMHA import FlashMHA # Example 1 flash_mha = FlashMHA(embed_dim=512, num_heads=8, dropout=0.1) query = torch.randn(10, 32, 512) # sequence length = 10, batch size = 32, embedding dimension = 512 key = torch.randn(10, 32, 512) value = torch.randn(10, 32, 512) output = flash_mha(query, key, value) print(output[0].shape) # should be [10, 32, 512] # Example 2 flash_mha = FlashMHA(embed_dim=256, num_heads=4, dropout=0.0) query = torch.randn(20, 16, 256) # sequence length = 20, batch size = 16, embedding dimension = 256 key = torch.randn(20, 16, 256) value = torch.randn(20, 16, 256) output = flash_mha(query, key, value) print(output[0].shape) # should be [20, 16, 256] # Example 3 flash_mha = FlashMHA(embed_dim=128, num_heads=2, dropout=0.2) query = torch.randn(30, 64, 128) # sequence length = 30, batch size = 64, embedding dimension = 128 key = torch.randn(30, 64, 128) value = torch.randn(30, 64, 128) output = flash_mha(query, key, value) print(output[0].shape) # should be [30, 64, 128]
FlashMHA-main
tests/forward_passes.py
import timeit import torch import matplotlib.pyplot as plt from FlashMHA import FlashMHA # Initialize the model flash_mha = FlashMHA(embed_dim=512, num_heads=8, bias=True, batch_first=True, dropout=0.0, causal=False) # Define the sequence lengths for the benchmark seq_lengths = [2000, 4000, 8000, 16000, 32000] # Store the execution times exec_times = [] for seq_len in seq_lengths: # Create input tensors query = torch.randn(10, seq_len, 512) key = torch.randn(10, seq_len, 512) value = torch.randn(10, seq_len, 512) # Measure the execution time start_time = timeit.default_timer() output = flash_mha(query, key, value) exec_time = timeit.default_timer() - start_time exec_times.append(exec_time) # Plot the execution time against the sequence length plt.plot(seq_lengths, exec_times) plt.xlabel('Sequence Length') plt.ylabel('Execution Time (s)') plt.title('FlashMHA Benchmark') plt.show()
FlashMHA-main
tests/MHA.py
from torch._C import dtype # !pip install torch # !pip install einops import math from collections import namedtuple from functools import wraps from packaging import version import torch from torch import nn, einsum, Tensor import torch.nn.functional as F from einops import rearrange from dataclasses import dataclass from functools import partial from torch import nn, einsum from torch.autograd.function import Function from einops import rearrange from torch.jit import fork, wait from torch.cuda.amp import autocast, GradScaler from torch.nn import DataParallel # constants EPSILON = 1e-10 # helper functions def exists(val): return val is not None def default(val, d): return val if exists(val) else d # constants EfficientAttentionConfig = namedtuple('EfficientAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient']) # helpers def exists(val): return val is not None def once(fn): called = False @wraps(fn) def inner(x): nonlocal called if called: return called = True return fn(x) return inner print_once = once(print) # main class @dataclass class Intermediates: qk_similarities: Tensor = None pre_softmax_attn: Tensor = None post_softmax_attn: Tensor = None def to_tuple(self): return (self.qk_similarities, self.pre_softmax_attn, self.post_softmax_attn) # helpers class FlashAttention1(nn.Module): def __init__( self, causal = False, dropout = 0., flash = True ): super().__init__() self.dropout = dropout self.attn_dropout = nn.Dropout(dropout) self.causal = causal self.flash = flash assert not (flash and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above' # determine efficient attention configs for cuda and cpu self.cpu_config = EfficientAttentionConfig(True, True, True) self.cuda_config = None if not torch.cuda.is_available() or not flash: return device_properties = torch.cuda.get_device_properties(torch.device('cuda')) if device_properties.major == 8 and device_properties.minor == 0: print_once('A100 GPU detected, using flash attention if input tensor is on cuda') self.cuda_config = EfficientAttentionConfig(True, False, False) else: print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda') self.cuda_config = EfficientAttentionConfig(False, True, True) def get_mask(self, i, j, device): return torch.ones((i, j), device=device, dtype=torch.bool).triu(j - i + 1) def flash_attn( self, q, k, v, mask = None, attn_bias = None ): batch, heads, q_len, _, k_len, is_cuda, device = *q.shape, k.shape[-2], q.is_cuda, q.device # Recommended for multi-query single-key-value attention by Tri Dao # kv shape torch.Size([1, 512, 64]) -> torch.Size([1, 8, 512, 64]) if k.ndim == 3: k = rearrange(k, 'b ... -> b 1 ...').expand_as(q) if v.ndim == 3: v = rearrange(v, 'b ... -> b 1 ...').expand_as(q) # handle scale - by default they scale by dim_head ** -0.5, but need to take care if using cosine sim attention # Check if mask exists and expand to compatible shape # The mask is B L, so it would have to be expanded to B H N L causal = self.causal if exists(mask): assert mask.ndim == 4 mask = mask.expand(batch, heads, q_len, k_len) # manually handle causal mask, if another mask was given if causal: causal_mask = self.create_causal_mask(q_len, k_len, device = device) mask = mask & ~causal_mask causal = False # handle alibi positional bias # convert from bool to float if exists(attn_bias): attn_bias = rearrange(attn_bias, 'h i j -> 1 h i j').expand(batch, heads, -1, -1) # if mask given, the mask would already contain the causal mask from above logic # otherwise, if no mask given but still causal, mask out alibi positional bias to a large negative number mask_value = -torch.finfo(q.dtype).max if exists(mask): attn_bias = attn_bias.masked_fill(~mask, mask_value // 2) elif causal: causal_mask = self.create_causal_mask(q_len, k_len, device = device) attn_bias = attn_bias.masked_fill(causal_mask, mask_value // 2) causal = False # scaled_dot_product_attention handles attn_mask either as bool or additive bias # make it an additive bias here mask = attn_bias # Check if there is a compatible device for flash attention config = self.cuda_config if is_cuda else self.cpu_config # pytorch 2.0 flash attn: q, k, v, mask, dropout, causal, softmax_scale with torch.backends.cuda.sdp_kernel(**config._asdict()): out = F.scaled_dot_product_attention( q, k, v, attn_mask = mask, dropout_p = self.dropout if self.training else 0., is_causal = causal ) return out def forward(self, q, k, v, mask = None, attn_bias = None): """ einstein notation b - batch h - heads n, i, j - sequence length (base sequence length, source, target) d - feature dimension """ q_len, k_len, device = q.shape[-2], k.shape[-2], q.device scale = q.shape[-1] ** -0.5 kv_einsum_eq = 'b j d' if k.ndim == 3 else 'b h j d' if self.flash: return self.flash_attn(q, k, v, mask = mask, attn_bias = attn_bias) # similarity sim = einsum(f"b h i d, {kv_einsum_eq} -> b h i j", q, k) * scale # attention bias if exists(attn_bias): sim = sim + attn_bias # causal mask if self.causal: causal_mask = self.get_mask(q_len, k_len, device) sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max) # attention attn = sim.softmax(dim=-1) attn = self.attn_dropout(attn) # aggregate values out = einsum(f"b h i j, {kv_einsum_eq} -> b h i d", attn, v) return out # flash attention forwards and backwards # flash attention v1 - https://arxiv.org/abs/2205.14135 # flash attention v2 - https://tridao.me/publications/flash2/flash2.pdf class FlashAttentionFunction(Function): @staticmethod @torch.no_grad() def forward(ctx, q, k, v, mask, causal, q_bucket_size, k_bucket_size): """ Algorithm 1 in the v2 paper """ device = q.device max_neg_value = -torch.finfo(q.dtype).max qk_len_diff = max(k.shape[-2] - q.shape[-2], 0) o = torch.zeros_like(q) all_row_sums = torch.zeros((*q.shape[:-1], 1), device = device) all_row_maxes = torch.full((*q.shape[:-1], 1), max_neg_value, device = device) scale = (q.shape[-1] ** -0.5) num_row_tiles = math.ceil(q.shape[-2] / q_bucket_size) num_col_tiles = math.ceil(k.shape[-2] / k_bucket_size) if exists(mask) and mask.ndim == 2: mask = rearrange(mask, 'b n -> b 1 1 n') if not exists(mask): col_masks = (None,) * num_col_tiles mask = (col_masks,) * num_row_tiles else: mask = ((mask,) * num_row_tiles) if mask.shape[-2] == 1 else mask.split(q_bucket_size, dim = -2) mask = tuple(((row_mask,) * num_col_tiles) if row_mask.shape[-1] == 1 else row_mask.split(k_bucket_size, dim = -1) for row_mask in mask) row_splits = zip( q.split(q_bucket_size, dim = -2), o.split(q_bucket_size, dim = -2), mask, all_row_sums.split(q_bucket_size, dim = -2), all_row_maxes.split(q_bucket_size, dim = -2), ) for ind, (qc, oc, row_mask, row_sums, row_maxes) in enumerate(row_splits): q_start_index = ind * q_bucket_size - qk_len_diff col_splits = zip( k.split(k_bucket_size, dim = -2), v.split(k_bucket_size, dim = -2), row_mask ) for k_ind, (kc, vc, col_mask) in enumerate(col_splits): k_start_index = k_ind * k_bucket_size attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale if exists(col_mask): attn_weights.masked_fill_(~col_mask, max_neg_value) if causal and q_start_index < (k_start_index + k_bucket_size - 1): causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype = torch.bool, device = device).triu(q_start_index - k_start_index + 1) attn_weights.masked_fill_(causal_mask, max_neg_value) block_row_maxes = attn_weights.amax(dim = -1, keepdims = True) new_row_maxes = torch.maximum(block_row_maxes, row_maxes) exp_weights = torch.exp(attn_weights - new_row_maxes) if exists(col_mask): exp_weights.masked_fill_(~col_mask, 0.) block_row_sums = exp_weights.sum(dim = -1, keepdims = True).clamp(min = EPSILON) exp_values = einsum('... i j, ... j d -> ... i d', exp_weights, vc) exp_row_max_diff = torch.exp(row_maxes - new_row_maxes) new_row_sums = exp_row_max_diff * row_sums + block_row_sums oc.mul_(exp_row_max_diff).add_(exp_values) row_maxes.copy_(new_row_maxes) row_sums.copy_(new_row_sums) oc.div_(row_sums) lse = all_row_sums.log() + all_row_maxes ctx.args = (causal, scale, mask, q_bucket_size, k_bucket_size) ctx.save_for_backward(q, k, v, o, lse) return o @staticmethod @torch.no_grad() def backward(ctx, do): """ Algorithm 2 in the v2 paper """ causal, scale, mask, q_bucket_size, k_bucket_size = ctx.args q, k, v, o, lse = ctx.saved_tensors device = q.device max_neg_value = -torch.finfo(q.dtype).max qk_len_diff = max(k.shape[-2] - q.shape[-2], 0) dq = torch.zeros_like(q) dk = torch.zeros_like(k) dv = torch.zeros_like(v) row_splits = zip( q.split(q_bucket_size, dim = -2), o.split(q_bucket_size, dim = -2), do.split(q_bucket_size, dim = -2), mask, lse.split(q_bucket_size, dim = -2), dq.split(q_bucket_size, dim = -2) ) for ind, (qc, oc, doc, row_mask, lsec, dqc) in enumerate(row_splits): q_start_index = ind * q_bucket_size - qk_len_diff col_splits = zip( k.split(k_bucket_size, dim = -2), v.split(k_bucket_size, dim = -2), dk.split(k_bucket_size, dim = -2), dv.split(k_bucket_size, dim = -2), row_mask ) for k_ind, (kc, vc, dkc, dvc, col_mask) in enumerate(col_splits): k_start_index = k_ind * k_bucket_size attn_weights = einsum('... i d, ... j d -> ... i j', qc, kc) * scale if causal and q_start_index < (k_start_index + k_bucket_size - 1): causal_mask = torch.ones((qc.shape[-2], kc.shape[-2]), dtype = torch.bool, device = device).triu(q_start_index - k_start_index + 1) attn_weights.masked_fill_(causal_mask, max_neg_value) p = torch.exp(attn_weights - lsec) if exists(col_mask): p.masked_fill_(~col_mask, 0.) dv_chunk = einsum('... i j, ... i d -> ... j d', p, doc) dp = einsum('... i d, ... j d -> ... i j', doc, vc) D = (doc * oc).sum(dim = -1, keepdims = True) ds = p * scale * (dp - D) dq_chunk = einsum('... i j, ... j d -> ... i d', ds, kc) dk_chunk = einsum('... i j, ... i d -> ... j d', ds, qc) dqc.add_(dq_chunk) dkc.add_(dk_chunk) dvc.add_(dv_chunk) return dq, dk, dv, None, None, None, None # main class # just flash attention in plain pytorch # it will be way slower than implementing it in CUDA # for tinkering and educational purposes class FlashAttention(nn.Module): def __init__( self, *, dim, heads = 8, dim_head = 64, causal = False, q_bucket_size = 512, k_bucket_size = 1024, parallel = False, mixed_precision = False ): super().__init__() self.heads = heads self.causal = causal self.parallel = parallel self.mixed_precision = mixed_precision inner_dim = heads * dim_head self.to_q = nn.Linear(dim, inner_dim, bias = False) self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False) self.to_out = nn.Linear(inner_dim, dim, bias = False) # memory efficient attention related parameters # can be overriden on forward self.q_bucket_size = q_bucket_size self.k_bucket_size = k_bucket_size if self.parallel: self.model = DataParallel(self) if self.mixed_precision: self.scaler = GradScaler() def forward( self, x, context = None, mask = None, q_bucket_size = None, k_bucket_size = None, ): q_bucket_size = default(q_bucket_size, self.q_bucket_size) k_bucket_size = default(k_bucket_size, self.k_bucket_size) h = self.heads context = default(context, x) q = self.to_q(x) k, v = self.to_kv(context).chunk(2, dim=-1) q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v)) if self.parallel: # Split the input data into chunks and move each chunk to the correct GPU num_gpus = torch.cuda.device_count() x_chunks = x.split(x.size(0) // num_gpus) x_chunks = [chunk.to(f'cuda:{i}') for i, chunk in enumerate(x_chunks)] q = x_chunks if self.mixed_precision: # Use autocast to allow operations to run in lower precision with autocast(): out = FlashAttentionFunction.apply(q, k, v, mask, self.causal, q_bucket_size, k_bucket_size) else: out = FlashAttentionFunction.apply(q, k, v, mask, self.causal, q_bucket_size, k_bucket_size) out = rearrange(out, 'b h n d -> b n (h d)') return self.to_out(out)
FlashMHA-main
FlashMHA/attention.py
from FlashMHA.attention import FlashAttention from FlashMHA.FlashMHA import FlashMHA, ParallelFlashMHA
FlashMHA-main
FlashMHA/__init__.py
import torch from FlashMHA.attention import FlashAttention # !pip install torch # !pip install einops from collections import namedtuple import torch from torch import nn, einsum, Tensor import torch.nn.functional as F from einops import rearrange EfficientAttentionConfig = namedtuple('EfficientAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient']) class FlashMHA(nn.Module): def __init__(self, embed_dim, num_heads, bias=True, batch_first=True, dropout=0.0, causal=False, device=None, dtype=None, parrallel=False) -> None: assert batch_first factory_kwargs = {'device': device, 'dtype': dtype} super().__init__() self.embed_dim = embed_dim self.causal = causal self.num_heads = num_heads assert self.embed_dim % num_heads == 0, "self.kdim must be divisible by num_heads" self.head_dim = self.embed_dim // num_heads assert self.head_dim % 8 == 0 and self.head_dim <= 128, "Only support head_dim <= 128 and divisible by 8" self.parallel = parrallel if self.parallel: self.Wqkv = nn.DataParallel(nn.Linear(embed_dim, 3 * embed_dim, bias=bias, **factory_kwargs)) self.inner_attn = nn.DataParallel(FlashAttention(dropout=dropout, causal=causal)) self.out_proj = nn.DataParallel(nn.Linear(embed_dim, embed_dim, bias=bias, **factory_kwargs)) else: self.Wqkv = nn.Linear(embed_dim, 3 * embed_dim, bias=bias, **factory_kwargs) self.inner_attn = FlashAttention(dropout=dropout, causal=causal) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias, **factory_kwargs) def forward(self, query, key, value): qkv = self.Wqkv(query) q, k, v = rearrange(qkv, 'b s (three h d) -> three b s h d', three=3, h=self.num_heads, d=self.head_dim).unbind(dim=0) context = self.inner_attn(q, k, v) return self.out_proj(rearrange(context, 'b s h d -> b s (h d)'))
FlashMHA-main
FlashMHA/FlashMHA.py
NExT-GPT-main
example.py
from math import ceil import torch import torch.nn.functional as F from einops import pack, rearrange, unpack from torch import nn def exists(val): return val is not None def eval_decorator(fn): def inner(self, *args, **kwargs): was_training = self.training self.eval() out = fn(self, *args, **kwargs) self.train(was_training) return out return inner # nucleus def top_p(logits, thres = 0.9): sorted_logits, sorted_indices = torch.sort(logits, descending=True) cum_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1) sorted_indices_to_remove = cum_probs > (1 - thres) sorted_indices_to_remove[:, 1:] = sorted_indices_to_remove[:, :-1].clone() sorted_indices_to_remove[:, 0] = 0 sorted_logits[sorted_indices_to_remove] = float('-inf') return sorted_logits.scatter(1, sorted_indices, sorted_logits) # topk def top_k(logits, thres = 0.9): k = ceil((1 - thres) * logits.shape[-1]) val, ind = torch.topk(logits, k) probs = torch.full_like(logits, float('-inf')) probs.scatter_(1, ind, val) return probs # top_a def top_a(logits, min_p_pow=2.0, min_p_ratio=0.02): probs = F.softmax(logits, dim=-1) limit = torch.pow(torch.max(probs), min_p_pow) * min_p_ratio logits[probs < limit] = float('-inf') logits[probs >= limit] = 1 return logits # autoregressive wrapper class class AutoregressiveWrapper(nn.Module): def __init__( self, net, ignore_index = -100, pad_value = 0, mask_prob = 0. ): super().__init__() self.pad_value = pad_value self.ignore_index = ignore_index self.net = net self.max_seq_len = net.max_seq_len # paper shows masking (MLM) in conjunction with autoregressive decoder-only training leads to big improvements https://arxiv.org/abs/2210.13432 assert mask_prob < 1. self.mask_prob = mask_prob @torch.no_grad() @eval_decorator def generate( self, start_tokens, seq_len, eos_token = None, temperature = 1., filter_logits_fn = top_k, filter_thres = 0.9, min_p_pow = 2.0, min_p_ratio = 0.02, **kwargs ): start_tokens, ps = pack([start_tokens], '* n') b, t = start_tokens.shape out = start_tokens for _ in range(seq_len): x = out[:, -self.max_seq_len:] logits = self.net(x, **kwargs)[:, -1] if filter_logits_fn in {top_k, top_p}: filtered_logits = filter_logits_fn(logits, thres = filter_thres) probs = F.softmax(filtered_logits / temperature, dim=-1) elif filter_logits_fn is top_a: filtered_logits = filter_logits_fn(logits, min_p_pow = min_p_pow, min_p_ratio= min_p_ratio) probs = F.softmax(filtered_logits / temperature, dim=-1) sample = torch.multinomial(probs, 1) out = torch.cat((out, sample), dim=-1) if exists(eos_token): is_eos_tokens = (out == eos_token) if is_eos_tokens.any(dim = -1).all(): # mask out everything after the eos tokens shifted_is_eos_tokens = F.pad(is_eos_tokens, (1, -1)) mask = shifted_is_eos_tokens.float().cumsum(dim = -1) >= 1 out = out.masked_fill(mask, self.pad_value) break out = out[:, t:] out, = unpack(out, ps, '* n') return out def forward(self, x, return_loss=True, **kwargs): seq, ignore_index = x.shape[1], self.ignore_index inp, target = x[:, :-1], x[:, 1:] if self.mask_prob > 0.: rand = torch.randn(inp.shape, device = x.device) rand[:, 0] = -torch.finfo(rand.dtype).max # first token should not be masked out num_mask = min(int(seq * self.mask_prob), seq - 1) indices = rand.topk(num_mask, dim = -1).indices mask = ~torch.zeros_like(inp).scatter(1, indices, 1.).bool() kwargs.update(self_attn_context_mask = mask) logits = self.net(inp, **kwargs) loss = F.cross_entropy( rearrange(logits, 'b n c -> b c n'), target, ignore_index = ignore_index ) if return_loss: return logits, loss return logits
NExT-GPT-main
next/autoregressive.py
#!/usr/bin/env python3 # Portions Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import logging import math import torch import torch.nn as nn import torchaudio from models.multimodal_preprocessors import SimpleTokenizer from PIL import Image from pytorchvideo import transforms as pv_transforms from pytorchvideo.data.clip_sampling import ConstantClipsPerVideoSampler from pytorchvideo.data.encoded_video import EncodedVideo from torchvision import transforms from torchvision.transforms._transforms_video import NormalizeVideo DEFAULT_AUDIO_FRAME_SHIFT_MS = 10 # in milliseconds BPE_PATH = "bpe/bpe_simple_vocab_16e6.txt.gz" def waveform2melspec(waveform, sample_rate, num_mel_bins, target_length): # Based on https://github.com/YuanGongND/ast/blob/d7d8b4b8e06cdaeb6c843cdb38794c1c7692234c/src/dataloader.py#L102 waveform -= waveform.mean() fbank = torchaudio.compliance.kaldi.fbank( waveform, htk_compat=True, sample_frequency=sample_rate, use_energy=False, window_type="hanning", num_mel_bins=num_mel_bins, dither=0.0, frame_length=25, frame_shift=DEFAULT_AUDIO_FRAME_SHIFT_MS, ) # Convert to [mel_bins, num_frames] shape fbank = fbank.transpose(0, 1) # Pad to target_length n_frames = fbank.size(1) p = target_length - n_frames # if p is too large (say >20%), flash a warning if abs(p) / n_frames > 0.2: logging.warning( "Large gap between audio n_frames(%d) and " "target_length (%d). Is the audio_target_length " "setting correct?", n_frames, target_length, ) # cut and pad if p > 0: fbank = torch.nn.functional.pad(fbank, (0, p), mode="constant", value=0) elif p < 0: fbank = fbank[:, 0:target_length] # Convert to [1, mel_bins, num_frames] shape, essentially like a 1 # channel image fbank = fbank.unsqueeze(0) return fbank def get_clip_timepoints(clip_sampler, duration): # Read out all clips in this video all_clips_timepoints = [] is_last_clip = False end = 0.0 while not is_last_clip: start, end, _, _, is_last_clip = clip_sampler(end, duration, annotation=None) all_clips_timepoints.append((start, end)) return all_clips_timepoints def load_and_transform_vision_data(image_paths, device): if image_paths is None: return None image_ouputs = [] for image_path in image_paths: data_transform = transforms.Compose( [ transforms.Resize( 224, interpolation=transforms.InterpolationMode.BICUBIC ), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize( mean=(0.48145466, 0.4578275, 0.40821073), std=(0.26862954, 0.26130258, 0.27577711), ), ] ) with open(image_path, "rb") as fopen: image = Image.open(fopen).convert("RGB") image = data_transform(image).to(device) image_ouputs.append(image) return torch.stack(image_ouputs, dim=0) def load_and_transform_text(text, device): if text is None: return None tokenizer = SimpleTokenizer(bpe_path=BPE_PATH) tokens = [tokenizer(t).unsqueeze(0).to(device) for t in text] tokens = torch.cat(tokens, dim=0) return tokens def load_and_transform_audio_data( audio_paths, device, num_mel_bins=128, target_length=204, sample_rate=16000, clip_duration=2, clips_per_video=3, mean=-4.268, std=9.138, ): if audio_paths is None: return None audio_outputs = [] clip_sampler = ConstantClipsPerVideoSampler( clip_duration=clip_duration, clips_per_video=clips_per_video ) for audio_path in audio_paths: waveform, sr = torchaudio.load(audio_path) if sample_rate != sr: waveform = torchaudio.functional.resample( waveform, orig_freq=sr, new_freq=sample_rate ) all_clips_timepoints = get_clip_timepoints( clip_sampler, waveform.size(1) / sample_rate ) all_clips = [] for clip_timepoints in all_clips_timepoints: waveform_clip = waveform[ :, int(clip_timepoints[0] * sample_rate) : int( clip_timepoints[1] * sample_rate ), ] waveform_melspec = waveform2melspec( waveform_clip, sample_rate, num_mel_bins, target_length ) all_clips.append(waveform_melspec) normalize = transforms.Normalize(mean=mean, std=std) all_clips = [normalize(ac).to(device) for ac in all_clips] all_clips = torch.stack(all_clips, dim=0) audio_outputs.append(all_clips) return torch.stack(audio_outputs, dim=0) def get_clip_timepoints(clip_sampler, duration): # Read out all clips in this video all_clips_timepoints = [] is_last_clip = False end = 0.0 while not is_last_clip: start, end, _, _, is_last_clip = clip_sampler(end, duration, annotation=None) all_clips_timepoints.append((start, end)) return all_clips_timepoints def crop_boxes(boxes, x_offset, y_offset): """ Peform crop on the bounding boxes given the offsets. Args: boxes (ndarray or None): bounding boxes to peform crop. The dimension is `num boxes` x 4. x_offset (int): cropping offset in the x axis. y_offset (int): cropping offset in the y axis. Returns: cropped_boxes (ndarray or None): the cropped boxes with dimension of `num boxes` x 4. """ cropped_boxes = boxes.copy() cropped_boxes[:, [0, 2]] = boxes[:, [0, 2]] - x_offset cropped_boxes[:, [1, 3]] = boxes[:, [1, 3]] - y_offset return cropped_boxes def uniform_crop(images, size, spatial_idx, boxes=None, scale_size=None): """ Perform uniform spatial sampling on the images and corresponding boxes. Args: images (tensor): images to perform uniform crop. The dimension is `num frames` x `channel` x `height` x `width`. size (int): size of height and weight to crop the images. spatial_idx (int): 0, 1, or 2 for left, center, and right crop if width is larger than height. Or 0, 1, or 2 for top, center, and bottom crop if height is larger than width. boxes (ndarray or None): optional. Corresponding boxes to images. Dimension is `num boxes` x 4. scale_size (int): optinal. If not None, resize the images to scale_size before performing any crop. Returns: cropped (tensor): images with dimension of `num frames` x `channel` x `size` x `size`. cropped_boxes (ndarray or None): the cropped boxes with dimension of `num boxes` x 4. """ assert spatial_idx in [0, 1, 2] ndim = len(images.shape) if ndim == 3: images = images.unsqueeze(0) height = images.shape[2] width = images.shape[3] if scale_size is not None: if width <= height: width, height = scale_size, int(height / width * scale_size) else: width, height = int(width / height * scale_size), scale_size images = torch.nn.functional.interpolate( images, size=(height, width), mode="bilinear", align_corners=False, ) y_offset = int(math.ceil((height - size) / 2)) x_offset = int(math.ceil((width - size) / 2)) if height > width: if spatial_idx == 0: y_offset = 0 elif spatial_idx == 2: y_offset = height - size else: if spatial_idx == 0: x_offset = 0 elif spatial_idx == 2: x_offset = width - size cropped = images[:, :, y_offset : y_offset + size, x_offset : x_offset + size] cropped_boxes = crop_boxes(boxes, x_offset, y_offset) if boxes is not None else None if ndim == 3: cropped = cropped.squeeze(0) return cropped, cropped_boxes class SpatialCrop(nn.Module): """ Convert the video into 3 smaller clips spatially. Must be used after the temporal crops to get spatial crops, and should be used with -2 in the spatial crop at the slowfast augmentation stage (so full frames are passed in here). Will return a larger list with the 3x spatial crops as well. """ def __init__(self, crop_size: int = 224, num_crops: int = 3): super().__init__() self.crop_size = crop_size if num_crops == 3: self.crops_to_ext = [0, 1, 2] self.flipped_crops_to_ext = [] elif num_crops == 1: self.crops_to_ext = [1] self.flipped_crops_to_ext = [] else: raise NotImplementedError("Nothing else supported yet") def forward(self, videos): """ Args: videos: A list of C, T, H, W videos. Returns: videos: A list with 3x the number of elements. Each video converted to C, T, H', W' by spatial cropping. """ assert isinstance(videos, list), "Must be a list of videos after temporal crops" assert all([video.ndim == 4 for video in videos]), "Must be (C,T,H,W)" res = [] for video in videos: for spatial_idx in self.crops_to_ext: res.append(uniform_crop(video, self.crop_size, spatial_idx)[0]) if not self.flipped_crops_to_ext: continue flipped_video = transforms.functional.hflip(video) for spatial_idx in self.flipped_crops_to_ext: res.append(uniform_crop(flipped_video, self.crop_size, spatial_idx)[0]) return res def load_and_transform_video_data( video_paths, device, clip_duration=2, clips_per_video=5, sample_rate=16000, ): if video_paths is None: return None video_outputs = [] video_transform = transforms.Compose( [ pv_transforms.ShortSideScale(224), NormalizeVideo( mean=(0.48145466, 0.4578275, 0.40821073), std=(0.26862954, 0.26130258, 0.27577711), ), ] ) clip_sampler = ConstantClipsPerVideoSampler( clip_duration=clip_duration, clips_per_video=clips_per_video ) frame_sampler = pv_transforms.UniformTemporalSubsample(num_samples=clip_duration) for video_path in video_paths: video = EncodedVideo.from_path( video_path, decoder="decord", decode_audio=False, **{"sample_rate": sample_rate}, ) all_clips_timepoints = get_clip_timepoints(clip_sampler, video.duration) all_video = [] for clip_timepoints in all_clips_timepoints: # Read the clip, get frames clip = video.get_clip(clip_timepoints[0], clip_timepoints[1]) if clip is None: raise ValueError("No clip found") video_clip = frame_sampler(clip["video"]) video_clip = video_clip / 255.0 # since this is float, need 0-1 all_video.append(video_clip) all_video = [video_transform(clip) for clip in all_video] all_video = SpatialCrop(224, num_crops=3)(all_video) all_video = torch.stack(all_video, dim=0) video_outputs.append(all_video) return torch.stack(video_outputs, dim=0).to(device)
NExT-GPT-main
next/mm_processors.py
NExT-GPT-main
next/__init__.py
import torch from torch.nn import Module from transformers import AutoTokenizer from next.transformer import ( Decoder, Transformer, ViTransformerWrapper, Encoder ) import logging from next.autoregressive import AutoregressiveWrapper logging.basicConfig( level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s' ) class NextGPTTokenizer: """ A tokenizer class for the NextGPT model Attributes: processor(CLIPProcessor): The processor to tokenize images tokenizer: (AutoTokenizer): The tokenizer to tokenize text im_idx: (int): The Index of the "" token. """ def __init__(self): try: # self.processor = CLIPProcessor.from_pretrained("laion/CLIP-ViT-L-14-laion2B-s32B-b82K") self.tokenizer = AutoTokenizer.from_pretrained( "EleutherAI/gpt-neox-20b", additional_special_tokens=["", "<audio>", "</audio>", "<video>", "</video>"], eos_token="<eos>", pad_token="<pad>", extra_ids=0, model_max_length=8192 ) except Exception as e: logging.error(f"Failed to initialize NextGPTTokenizer: {e}") raise self.im_idx, self.im_end_idx = self.tokenizer.convert_tokens_to_ids([""]) def tokenize_texts(self, texts: str): """ Tokenize given texts. Args: Texts (str): The Text to be tokenized Returns: A tuple containing the tokenized texts and only the text tokens. """ try: texts = self.tokenizer( texts, return_tensors="pt", padding=True, truncation=True ).input_ids # Add image tokens to text as "<s>  text </s>" image_tokens = torch.tensor([[self.im_idx, self.im_end_idx]] * texts.shape[0]) return torch.cat([texts[:, 0:1], image_tokens, texts[:, 1:]], dim=1), texts except Exception as e: logging.error(f"Failed to tokenize texts: {e}") raise def tokenize_images(self, images): """ Tokenizes given images. Args: images: The images to be tokenized Returns: The tokenized images. """ try: return self.processor(images=images, return_tensors="pt").pixel_values except Exception as e: logging.error(f"Failed to tokenize images: {e}") raise def tokenize(self, sample): """ Tokenizes given sample. Args: Sample: The sample to be tokenized Returns: A dictionary containing the tokenized text tokens, images, labels, and attention mask. """ try: text_tokens, only_text_tokens = self.tokenize_texts(sample["target_text"]) attention_mask = text_tokens != self.tokenizer.pad_token_id dummy_image_features = torch.ones((text_tokens.shape[0], 64)) attention_mask = torch.cat([dummy_image_features, attention_mask], dim=1) return { "text_tokens": text_tokens, "images": self.tokenize_images(sample["image"]), "labels": only_text_tokens, "attention_mask": attention_mask, } except Exception as e: logging.error(f"Failed to tokenize sample: {e}") raise class NextGPT(Module): """ NextGPT is a transformer-based model architecture. It initializes with a Transformer and AutoregressiveWrapper with default or user-specified parameters. """ def __init__( self, num_tokens=50432, max_seq_len=8192, dim=2560, depth=32, dim_head=128, heads=24, use_abs_pos_emb=False, alibi_pos_bias=True, alibi_num_heads=12, rotary_xpos=True, attn_flash=True, attn_kv_heads = 2, qk_norm=True, attn_qk_norm=True, attn_qk_norm_dim_scale=True, ): """ Initialize the model with specified or default parameters. Args: - num_tokens: Number of tokens in the vocabulary - max_seq_len: Maximum sequence length - dim: Dimension of the model - depth: Depth of the model - dim_head: Dimension of the model head - heads: Number of heads - use_abs_pos_emb: Whether to use absolute position embedding - alibi_pos_bias: Alibi position bias - alibi_num_heads: Number of alibi heads - rotary_xpos: Rotary position - attn_flash: Attention flash - deepnorm: Deep normalization - shift_tokens: Number of tokens to shift - attn_one_kv_head: Attention one key/value head - qk_norm: Query-key normalization - attn_qk_norm: Attention query-key normalization - attn_qk_norm_dim_scale: Attention query-key normalization dimension scale - embedding_provider: Embedding provider module """ super().__init__() try: self.NextGPT = Transformer( num_tokens=num_tokens, max_seq_len=max_seq_len, use_abs_pos_emb=use_abs_pos_emb, attn_layers=Decoder( dim=dim, depth=depth, dim_head=dim_head, heads=heads, alibi_pos_bias=alibi_pos_bias, alibi_num_heads=alibi_num_heads, rotary_xpos=rotary_xpos, attn_flash=attn_flash, attn_kv_heads=attn_kv_heads, qk_norm=qk_norm, attn_qk_norm=attn_qk_norm, attn_qk_norm_dim_scale=attn_qk_norm_dim_scale ) ) self.decoder = AutoregressiveWrapper(self.NextGPT) except Exception as e: print("Failed to initialize NextGPT: ", e) raise def forward(self, text_tokens, **kwargs): """ Forward pass through the model. It expects the input text_tokens. Args: - text_tokens: Input tokens - kwargs: Other arguments Returns: - output from the decoder """ try: model_input = self.decoder.forward(text_tokens)[0] return self.decoder(model_input, padded_x=model_input[0]) except Exception as e: print("Failed in forward method: ", e) raise class NextGPTMultiModal(Module): def __init__( self, image_size=256, patch_size=32, encoder_dim=512, encoder_depth=6, encoder_heads=8, num_tokens=20000, max_seq_len=1024, decoder_dim=512, decoder_depth=6, decoder_heads=8, alibi_num_heads=4, use_abs_pos_emb=False, cross_attend=True, alibi_pos_bias=True, rotary_xpos=True, attn_flash=True, qk_norm=True ): super(NextGPTMultiModal, self).__init__() self.encoder = ViTransformerWrapper( image_size=image_size, patch_size=patch_size, attn_layers=Encoder( dim=encoder_dim, depth=encoder_depth, heads=encoder_heads ) ) self.decoder = Transformer( num_tokens=num_tokens, max_seq_len=max_seq_len, use_abs_pos_emb=use_abs_pos_emb, attn_layers=Decoder( dim=decoder_dim, depth=decoder_depth, heads=decoder_heads, cross_attend=cross_attend, alibi_pos_bias=alibi_pos_bias, alibi_num_heads=alibi_num_heads, rotary_xpos=rotary_xpos, attn_flash=attn_flash, qk_norm=qk_norm, ) ) def forward(self, img, text): try: encoded = self.encoder(img, return_embeddings=True) return self.decoder(text, context=encoded) except Exception as error: print(f"Failed in forward method: {error}") raise
NExT-GPT-main
next/model.py
from collections import namedtuple from dataclasses import dataclass from functools import partial, wraps from typing import Optional import torch import torch.nn.functional as F from einops import rearrange, repeat from packaging import version from torch import Tensor, einsum, nn # constants EfficientAttentionConfig = namedtuple('EfficientAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient']) @dataclass class Intermediates: qk_similarities: Optional[Tensor] = None pre_softmax_attn: Optional[Tensor] = None post_softmax_attn: Optional[Tensor] = None def to_tuple(self): return (self.qk_similarities, self.pre_softmax_attn, self.post_softmax_attn) # helpers def exists(val): return val is not None def default(val, d): return val if exists(val) else d def compact(arr): return [*filter(exists, arr)] def once(fn): called = False @wraps(fn) def inner(x): nonlocal called if called: return called = True return fn(x) return inner print_once = once(print) # functions for creating causal mask # need a special one for onnx cpu (no support for .triu) def create_causal_mask(i, j, device): return torch.ones((i, j), device = device, dtype = torch.bool).triu(j - i + 1) def onnx_create_causal_mask(i, j, device): r = torch.arange(i, device = device) causal_mask = rearrange(r, 'i -> i 1') < rearrange(r, 'j -> 1 j') causal_mask = F.pad(causal_mask, (j - i, 0), value = False) return causal_mask # main class class Attend(nn.Module): def __init__( self, *, dropout = 0., causal = False, heads = None, talking_heads = False, sparse_topk = None, scale = None, qk_norm = False, flash = False, add_zero_kv = False, onnxable = False ): super().__init__() self.scale = scale self.qk_norm = qk_norm self.causal = causal self.create_causal_mask = onnx_create_causal_mask if onnxable else create_causal_mask self.attn_fn = partial(F.softmax, dtype = torch.float32) if not qk_norm else F.softmax self.dropout = dropout self.attn_dropout = nn.Dropout(dropout) # talking heads assert not (flash and talking_heads), 'talking heads not compatible with flash attention' self.talking_heads = talking_heads if talking_heads: self.pre_softmax_talking_heads = nn.Conv2d(heads, heads, 1, bias = False) self.post_softmax_talking_heads = nn.Conv2d(heads, heads, 1, bias = False) # sparse topk assert not (flash and sparse_topk), 'sparse topk not compatible with flash attention' self.sparse_topk = sparse_topk # add a key / value token composed of zeros # in case this helps controlling outliers, proposed by https://www.evanmiller.org/attention-is-off-by-one.html self.add_zero_kv = add_zero_kv # flash attention self.flash = flash assert not (flash and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above' # determine efficient attention configs for cuda and cpu self.cpu_config = EfficientAttentionConfig(True, True, True) self.cuda_config = None if not torch.cuda.is_available() or not flash: return device_properties = torch.cuda.get_device_properties(torch.device('cuda')) if device_properties.major == 8 and device_properties.minor == 0: print_once('A100 GPU detected, using flash attention if input tensor is on cuda') self.cuda_config = EfficientAttentionConfig(True, False, False) else: print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda') self.cuda_config = EfficientAttentionConfig(False, True, True) def flash_attn( self, q, k, v, mask = None, attn_bias = None ): batch, heads, q_len, _, k_len, is_cuda, device = *q.shape, k.shape[-2], q.is_cuda, q.device # Recommended for multi-query single-key-value attention by Tri Dao # kv shape torch.Size([1, 512, 64]) -> torch.Size([1, 8, 512, 64]) if k.ndim == 3: k = rearrange(k, 'b ... -> b 1 ...').expand_as(q) if v.ndim == 3: v = rearrange(v, 'b ... -> b 1 ...').expand_as(q) # handle scale - by default they scale by dim_head ** -0.5, but need to take care if using cosine sim attention if self.qk_norm: default_scale = q.shape[-1] ** -0.5 q = q * (default_scale / self.scale) # Check if mask exists and expand to compatible shape # The mask is B L, so it would have to be expanded to B H N L causal = self.causal if exists(mask): assert mask.ndim == 4 mask = mask.expand(batch, heads, q_len, k_len) # manually handle causal mask, if another mask was given if causal: causal_mask = self.create_causal_mask(q_len, k_len, device = device) mask = mask & ~causal_mask causal = False # handle alibi positional bias # convert from bool to float if exists(attn_bias): attn_bias = rearrange(attn_bias, 'h i j -> 1 h i j').expand(batch, heads, -1, -1) # if mask given, the mask would already contain the causal mask from above logic # otherwise, if no mask given but still causal, mask out alibi positional bias to a large negative number mask_value = -torch.finfo(q.dtype).max if exists(mask): attn_bias = attn_bias.masked_fill(~mask, mask_value // 2) elif causal: causal_mask = self.create_causal_mask(q_len, k_len, device = device) attn_bias = attn_bias.masked_fill(causal_mask, mask_value // 2) causal = False # scaled_dot_product_attention handles attn_mask either as bool or additive bias # make it an additive bias here mask = attn_bias # Check if there is a compatible device for flash attention config = self.cuda_config if is_cuda else self.cpu_config # pytorch 2.0 flash attn: q, k, v, mask, dropout, causal, softmax_scale with torch.backends.cuda.sdp_kernel(**config._asdict()): out = F.scaled_dot_product_attention( q, k, v, attn_mask = mask, dropout_p = self.dropout if self.training else 0., is_causal = causal ) return out, Intermediates() def forward( self, q, k, v, mask = None, attn_bias = None, prev_attn = None ): """ einstein notation b - batch h - heads n, i, j - sequence length (base sequence length, source, target) d - feature dimension """ n, heads, kv_heads, device = q.shape[-2], q.shape[1], k.shape[1], q.device scale = default(self.scale, q.shape[-1] ** -0.5) # handle grouped multi-query attention if kv_heads == 1: k, v = map(lambda t: rearrange(t, 'b 1 n d -> b n d'), (k, v)) elif kv_heads < heads: k, v = map(lambda t: repeat(t, 'b kvh n d -> b (r kvh) n d', r = heads // kv_heads), (k, v)) # handle zero kv, as means for allowing network to attend to nothing if self.add_zero_kv: k, v = map(lambda t: F.pad(t, (0, 0, 1, 0), value = 0.), (k, v)) if exists(mask): mask = F.pad(mask, (1, 0), value = True) if exists(attn_bias): attn_bias = F.pad(attn_bias, (1, 0), value = 0.) if self.flash: assert not exists(prev_attn), 'residual attention not compatible with flash attention' return self.flash_attn(q, k, v, mask = mask, attn_bias = attn_bias) kv_einsum_eq = 'b j d' if k.ndim == 3 else 'b h j d' dots = einsum(f'b h i d, {kv_einsum_eq} -> b h i j', q, k) * scale if exists(prev_attn): dots = dots + prev_attn qk_similarities = dots.clone() if self.talking_heads: dots = self.pre_softmax_talking_heads(dots) if exists(attn_bias): dots = dots + attn_bias i, j, dtype = *dots.shape[-2:], dots.dtype mask_value = -torch.finfo(dots.dtype).max if exists(self.sparse_topk) and self.sparse_topk < j: top_values, _ = dots.topk(self.sparse_topk, dim = -1) sparse_topk_mask = dots < top_values[..., -1:] mask = (mask & sparse_topk_mask) if exists(mask) else sparse_topk_mask if exists(mask): dots = dots.masked_fill(~mask, mask_value) if self.causal: causal_mask = self.create_causal_mask(i, j, device = device) dots = dots.masked_fill(causal_mask, mask_value) pre_softmax_attn = dots.clone() attn = self.attn_fn(dots, dim = -1) attn = attn.type(dtype) post_softmax_attn = attn.clone() attn = self.attn_dropout(attn) if self.talking_heads: attn = self.post_softmax_talking_heads(attn) out = einsum(f'b h i j, {kv_einsum_eq} -> b h i d', attn, v) intermediates = Intermediates( qk_similarities = qk_similarities, pre_softmax_attn = pre_softmax_attn, post_softmax_attn = post_softmax_attn ) return out, intermediates # cascading heads logic def to_single_heads(t, dim = 1): heads = t.unbind(dim = dim) return tuple(head.unsqueeze(dim) for head in heads) class CascadingHeads(nn.Module): def __init__(self, attend: Attend): super().__init__() self.attend = attend def forward( self, q, k, v, mask = None, attn_bias = None, prev_attn = None ): assert q.shape[-1] == v.shape[-1], 'cascading heads can only be done if query / key and value head dimensions are the same' # split inputs into per-head inputs heads = q.shape[1] queries = to_single_heads(q) keys = to_single_heads(k) if k.ndim == 4 else ((k,) * heads) values = to_single_heads(v) if v.ndim == 4 else ((v,) * heads) mask = (mask,) * heads attn_bias = to_single_heads(attn_bias, dim = 0) if exists(attn_bias) else ((None,) * heads) prev_attn = to_single_heads(prev_attn) if exists(prev_attn) else ((None,) * heads) # now loop through each head, without output of previous head summed with the next head # thus cascading all_outs = [] all_intermediates = [] prev_head_out = None for h_q, h_k, h_v, h_mask, h_attn_bias, h_prev_attn in zip(queries, keys, values, mask, attn_bias, prev_attn): if exists(prev_head_out): h_q = h_q + prev_head_out out, intermediates = self.attend( h_q, h_k, h_v, mask = h_mask, attn_bias = h_attn_bias, prev_attn = h_prev_attn ) prev_head_out = out all_outs.append(out) all_intermediates.append(intermediates) # cat all output heads all_outs = torch.cat(all_outs, dim = 1) # cat all intermediates, if they exist qk_similarities, pre_softmax_attn, post_softmax_attn = zip(*map(lambda i: i.to_tuple(), all_intermediates)) qk_similarities, pre_softmax_attn, post_softmax_attn = map(compact, (qk_similarities, pre_softmax_attn, post_softmax_attn)) aggregated_intermediates = Intermediates( qk_similarities = torch.cat(qk_similarities, dim = 1) if len(qk_similarities) > 0 else None, pre_softmax_attn = torch.cat(pre_softmax_attn, dim = 1) if len(pre_softmax_attn) > 0 else None, post_softmax_attn = torch.cat(post_softmax_attn, dim = 1) if len(post_softmax_attn) > 0 else None ) return all_outs, aggregated_intermediates
NExT-GPT-main
next/attend.py
from pegasus import Pegasus from next.mm_encoders import load_and_transform_video_data from next.transformer import ViTransformerWrapper, Encoder #encoders class AudioEncoder(Pegasus): # audio_encoder = AudioEncoder() # audio_embeddings = audio_encoder.embed_audio_data([audio1, audio2]) # You'd provide your list of audio data here. def __init__( self, multi_process=False, n_processors=1, hosted=False ): super().__init__( "audio", multi_process, n_processors, hosted ) def embed(self, audio): return self.embed_data(audio) class VideoEncoder(Pegasus): """ from next import VideoEncoder device = torch.device( "cuda" if torch.cuda.is_available() ) video_encoder = VideoEncoder() video_embeddings = video_encoder.embed([video, video2], device) """ def __init__( self, multi_process=False, n_processors=1, hosted=False ): super().__init__( "vision", multi_process, n_processors, hosted ) def embed(self, video, device): video = load_and_transform_video_data(video, device) return self.embed_data(video) class ImageEncoder: # # Usage: # image_encoder = ImageEncoder() # img_embeddings = image_encoder.embed_image_data([img1, img2]) # You'd provide your list of image data here. def __init__( self, image_size: int = 256, patch_size: int = 32, encoder_dim: int = 512, encoder_depth: int = 6, encoder_heads: int = 8, ): super().__init__() self.encoder = ViTransformerWrapper( image_size=image_size, patch_size=patch_size, attn_layers=Encoder( dim=encoder_dim, depth=encoder_depth, heads=encoder_heads, ) ) def embed(self, img): encoded = self.encoder(img, return_embeddings=True) return encoded
NExT-GPT-main
next/mm_encoders.py
import math from dataclasses import dataclass from functools import partial, wraps from inspect import isfunction from random import random from typing import Callable, List, Optional import torch import torch.nn.functional as F from einops import rearrange, reduce, repeat from torch import Tensor, einsum, nn from next.attend import Attend, Intermediates DEFAULT_DIM_HEAD = 64 @dataclass class LayerIntermediates: hiddens: Optional[List[Tensor]] = None attn_intermediates: Optional[List[Intermediates]] = None layer_hiddens: Optional[List[Tensor]] = None attn_z_loss: Optional[Tensor] = None # helpers def exists(val): return val is not None def default(val, d): if exists(val): return val return d() if isfunction(d) else d def cast_tuple(val, depth): return val if isinstance(val, tuple) else (val,) * depth def divisible_by(num, den): return (num % den) == 0 def maybe(fn): @wraps(fn) def inner(x, *args, **kwargs): if not exists(x): return x return fn(x, *args, **kwargs) return inner class always(): def __init__(self, val): self.val = val def __call__(self, *args, **kwargs): return self.val class not_equals(): def __init__(self, val): self.val = val def __call__(self, x, *args, **kwargs): return x != self.val class equals(): def __init__(self, val): self.val = val def __call__(self, x, *args, **kwargs): return x == self.val def Sequential(*modules): return nn.Sequential(*filter(exists, modules)) # tensor helpers def max_neg_value(tensor): return -torch.finfo(tensor.dtype).max def l2norm(t, groups = 1): t = rearrange(t, '... (g d) -> ... g d', g = groups) t = F.normalize(t, p = 2, dim = -1) return rearrange(t, '... g d -> ... (g d)') def pad_at_dim(t, pad, dim = -1, value = 0.): dims_from_right = (- dim - 1) if dim < 0 else (t.ndim - dim - 1) zeros = ((0, 0) * dims_from_right) return F.pad(t, (*zeros, *pad), value = value) def or_reduce(masks): head, *body = masks for rest in body: head = head | rest return head # auxiliary loss helpers def calc_z_loss( pre_softmax_attns: List[Tensor], mask = None, weight = 1. ): # the same loss applied to the mixture of experts router logits in https://arxiv.org/abs/2202.08906 # in the paper, in a tiny footnote, they mention using it on attention logits with stabilizing effects # also used in PaLM as one of the measures lse = 0. for attn in pre_softmax_attns: lse = lse + attn.logsumexp(dim = -1) loss = torch.square(lse) loss = reduce(loss, 'b h n -> b n', 'sum') if not exists(mask): return loss.mean() * weight loss = loss[mask].sum() / mask.sum().clamp(min = 1e-5) return loss * weight # init helpers def init_zero_(layer): nn.init.constant_(layer.weight, 0.) if exists(layer.bias): nn.init.constant_(layer.bias, 0.) # keyword argument helpers def pick_and_pop(keys, d): values = list(map(lambda key: d.pop(key), keys)) return dict(zip(keys, values)) def group_dict_by_key(cond, d): return_val = [dict(),dict()] for key in d.keys(): match = bool(cond(key)) ind = int(not match) return_val[ind][key] = d[key] return (*return_val,) def string_begins_with(prefix, str): return str.startswith(prefix) def group_by_key_prefix(prefix, d): return group_dict_by_key(partial(string_begins_with, prefix), d) def groupby_prefix_and_trim(prefix, d): kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d) kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items()))) return kwargs_without_prefix, kwargs # initializations def deepnorm_init( transformer, beta, module_name_match_list = ['.ff.', '.to_v', '.to_out'] ): for name, module in transformer.named_modules(): if type(module) != nn.Linear: continue needs_beta_gain = any(map(lambda substr: substr in name, module_name_match_list)) gain = beta if needs_beta_gain else 1 nn.init.xavier_normal_(module.weight.data, gain = gain) if exists(module.bias): nn.init.constant_(module.bias.data, 0) # structured dropout, more effective than traditional attention dropouts def dropout_seq(seq, mask, dropout): b, n, *_, device = *seq.shape, seq.device logits = torch.randn(b, n, device = device) if exists(mask): mask_value = max_neg_value(logits) logits = logits.masked_fill(~mask, mask_value) keep_prob = 1. - dropout num_keep = max(1, int(keep_prob * n)) keep_indices = logits.topk(num_keep, dim = 1).indices batch_indices = torch.arange(b, device = device) batch_indices = rearrange(batch_indices, 'b -> b 1') seq = seq[batch_indices, keep_indices] if exists(mask): seq_counts = mask.sum(dim = -1) seq_keep_counts = torch.ceil(seq_counts * keep_prob).int() keep_mask = torch.arange(num_keep, device = device) < rearrange(seq_keep_counts, 'b -> b 1') mask = mask[batch_indices, keep_indices] & keep_mask return seq, mask # activations class ReluSquared(nn.Module): def forward(self, x): return F.relu(x) ** 2 # embedding class TokenEmbedding(nn.Module): def __init__(self, dim, num_tokens, l2norm_embed = False): super().__init__() self.l2norm_embed = l2norm_embed self.emb = nn.Embedding(num_tokens, dim) def forward(self, x): token_emb = self.emb(x) return l2norm(token_emb) if self.l2norm_embed else token_emb # positional embeddings class AbsolutePositionalEmbedding(nn.Module): def __init__(self, dim, max_seq_len, l2norm_embed = False): super().__init__() self.scale = dim ** -0.5 if not l2norm_embed else 1. self.max_seq_len = max_seq_len self.l2norm_embed = l2norm_embed self.emb = nn.Embedding(max_seq_len, dim) def forward(self, x, pos = None): seq_len, device = x.shape[1], x.device assert seq_len <= self.max_seq_len, f'you are passing in a sequence length of {seq_len} but your absolute positional embedding has a max sequence length of {self.max_seq_len}' if not exists(pos): pos = torch.arange(seq_len, device = device) pos_emb = self.emb(pos) pos_emb = pos_emb * self.scale return l2norm(pos_emb) if self.l2norm_embed else pos_emb class ScaledSinusoidalEmbedding(nn.Module): def __init__(self, dim, theta = 10000): super().__init__() assert divisible_by(dim, 2) self.scale = nn.Parameter(torch.ones(1) * dim ** -0.5) half_dim = dim // 2 freq_seq = torch.arange(half_dim).float() / half_dim inv_freq = theta ** -freq_seq self.register_buffer('inv_freq', inv_freq, persistent = False) def forward(self, x, pos = None): seq_len, device = x.shape[1], x.device if not exists(pos): pos = torch.arange(seq_len, device = device) emb = einsum('i, j -> i j', pos, self.inv_freq) emb = torch.cat((emb.sin(), emb.cos()), dim = -1) return emb * self.scale class RelativePositionBias(nn.Module): def __init__(self, scale, causal = False, num_buckets = 32, max_distance = 128, heads = 8): super().__init__() self.scale = scale self.causal = causal self.num_buckets = num_buckets self.max_distance = max_distance self.relative_attention_bias = nn.Embedding(num_buckets, heads) @staticmethod def _relative_position_bucket(relative_position, causal = True, num_buckets = 32, max_distance = 128): ret = 0 n = -relative_position if not causal: num_buckets //= 2 ret += (n < 0).long() * num_buckets n = torch.abs(n) else: n = torch.max(n, torch.zeros_like(n)) max_exact = num_buckets // 2 is_small = n < max_exact val_if_large = max_exact + ( torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) ).long() val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1)) ret += torch.where(is_small, n, val_if_large) return ret @property def device(self): return next(self.parameters()).device def forward(self, i, j): device = self.device q_pos = torch.arange(j - i, j, dtype = torch.long, device = device) k_pos = torch.arange(j, dtype = torch.long, device = device) rel_pos = k_pos[None, :] - q_pos[:, None] rp_bucket = self._relative_position_bucket(rel_pos, causal = self.causal, num_buckets = self.num_buckets, max_distance = self.max_distance) values = self.relative_attention_bias(rp_bucket) bias = rearrange(values, 'i j h -> h i j') return bias * self.scale class DynamicPositionBias(nn.Module): def __init__(self, dim, *, heads, depth, log_distance = False, norm = False): super().__init__() assert depth >= 1, 'depth for dynamic position bias MLP must be greater or equal to 1' self.log_distance = log_distance self.mlp = nn.ModuleList([]) self.mlp.append(Sequential( nn.Linear(1, dim), nn.LayerNorm(dim) if norm else None, nn.SiLU() )) for _ in range(depth - 1): self.mlp.append(Sequential( nn.Linear(dim, dim), nn.LayerNorm(dim) if norm else None, nn.SiLU() )) self.mlp.append(nn.Linear(dim, heads)) @property def device(self): return next(self.parameters()).device def forward(self, i, j): assert i == j n, device = j, self.device # get the (n x n) matrix of distances seq_arange = torch.arange(n, device = device) context_arange = torch.arange(n, device = device) indices = rearrange(seq_arange, 'i -> i 1') - rearrange(context_arange, 'j -> 1 j') indices += (n - 1) # input to continuous positions MLP pos = torch.arange(-n + 1, n, device = device).float() pos = rearrange(pos, '... -> ... 1') if self.log_distance: pos = torch.sign(pos) * torch.log(pos.abs() + 1) # log of distance is sign(rel_pos) * log(abs(rel_pos) + 1) for layer in self.mlp: pos = layer(pos) # get position biases bias = pos[indices] bias = rearrange(bias, 'i j h -> h i j') return bias class AlibiPositionalBias(nn.Module): def __init__(self, heads, total_heads, **kwargs): super().__init__() self.heads = heads self.total_heads = total_heads slopes = Tensor(self._get_slopes(heads)) slopes = rearrange(slopes, 'h -> h 1 1') self.register_buffer('slopes', slopes, persistent = False) self.register_buffer('bias', None, persistent = False) def get_bias(self, i, j, device): i_arange = torch.arange(j - i, j, device = device) j_arange = torch.arange(j, device = device) bias = -torch.abs(rearrange(j_arange, 'j -> 1 1 j') - rearrange(i_arange, 'i -> 1 i 1')) return bias @staticmethod def _get_slopes(heads): def get_slopes_power_of_2(n): start = (2**(-2**-(math.log2(n)-3))) ratio = start return [start*ratio**i for i in range(n)] if math.log2(heads).is_integer(): return get_slopes_power_of_2(heads) closest_power_of_2 = 2 ** math.floor(math.log2(heads)) return get_slopes_power_of_2(closest_power_of_2) + get_slopes_power_of_2(2 * closest_power_of_2)[0::2][:heads-closest_power_of_2] @property def device(self): return next(self.buffers()).device def forward(self, i, j): h, device = self.total_heads, self.device if exists(self.bias) and self.bias.shape[-1] >= j and self.bias.shape[-2] >= i: return self.bias[..., :i, :j] bias = self.get_bias(i, j, device) bias = bias * self.slopes num_heads_unalibied = h - bias.shape[0] bias = pad_at_dim(bias, (0, num_heads_unalibied), dim = 0) self.register_buffer('bias', bias, persistent = False) return self.bias class RotaryEmbedding(nn.Module): def __init__( self, dim, use_xpos = False, scale_base = 512, interpolation_factor = 1., base = 10000, base_rescale_factor = 1. ): super().__init__() # proposed by reddit user bloc97, to rescale rotary embeddings to longer sequence length without fine-tuning # has some connection to NTK literature # https://www.reddit.com/r/LocalLLaMA/comments/14lz7j5/ntkaware_scaled_rope_allows_llama_models_to_have/ base *= base_rescale_factor ** (dim / (dim - 2)) inv_freq = 1. / (base ** (torch.arange(0, dim, 2).float() / dim)) self.register_buffer('inv_freq', inv_freq) assert interpolation_factor >= 1. self.interpolation_factor = interpolation_factor if not use_xpos: self.register_buffer('scale', None) return scale = (torch.arange(0, dim, 2) + 0.4 * dim) / (1.4 * dim) self.scale_base = scale_base self.register_buffer('scale', scale) def forward(self, seq_len, device): t = torch.arange(seq_len, device = device).type_as(self.inv_freq) t = t / self.interpolation_factor freqs = torch.einsum('i , j -> i j', t, self.inv_freq) freqs = torch.cat((freqs, freqs), dim = -1) if not exists(self.scale): return freqs, 1. power = (torch.arange(seq_len, device = device) - (seq_len // 2)) / self.scale_base scale = self.scale ** rearrange(power, 'n -> n 1') scale = torch.cat((scale, scale), dim = -1) return freqs, scale def rotate_half(x): x = rearrange(x, '... (j d) -> ... j d', j = 2) x1, x2 = x.unbind(dim = -2) return torch.cat((-x2, x1), dim = -1) def apply_rotary_pos_emb(t, freqs, scale = 1): seq_len = t.shape[-2] freqs = freqs[-seq_len:, :] return (t * freqs.cos() * scale) + (rotate_half(t) * freqs.sin() * scale) # norms class Scale(nn.Module): def __init__(self, value, fn): super().__init__() self.value = value self.fn = fn def forward(self, x, **kwargs): out = self.fn(x, **kwargs) def scale_fn(t): return t * self.value if not isinstance(out, tuple): return scale_fn(out) return (scale_fn(out[0]), *out[1:]) class ScaleNorm(nn.Module): def __init__(self, dim, eps = 1e-5): super().__init__() self.eps = eps self.g = nn.Parameter(torch.ones(1) * (dim ** -0.5)) def forward(self, x): norm = torch.norm(x, dim = -1, keepdim = True) return x / norm.clamp(min = self.eps) * self.g class RMSNorm(nn.Module): def __init__(self, dim): super().__init__() self.scale = dim ** 0.5 self.g = nn.Parameter(torch.ones(dim)) def forward(self, x): return F.normalize(x, dim = -1) * self.scale * self.g class SimpleRMSNorm(nn.Module): def __init__(self, dim): super().__init__() self.scale = dim ** 0.5 def forward(self, x): return F.normalize(x, dim = -1) * self.scale # residual and residual gates class Residual(nn.Module): def __init__(self, dim, scale_residual = False, scale_residual_constant = 1.): super().__init__() self.residual_scale = nn.Parameter(torch.ones(dim)) if scale_residual else None self.scale_residual_constant = scale_residual_constant def forward(self, x, residual): if exists(self.residual_scale): residual = residual * self.residual_scale if self.scale_residual_constant != 1: residual = residual * self.scale_residual_constant return x + residual class GRUGating(nn.Module): def __init__(self, dim, scale_residual = False, **kwargs): super().__init__() self.gru = nn.GRUCell(dim, dim) self.residual_scale = nn.Parameter(torch.ones(dim)) if scale_residual else None def forward(self, x, residual): if exists(self.residual_scale): residual = residual * self.residual_scale gated_output = self.gru( rearrange(x, 'b n d -> (b n) d'), rearrange(residual, 'b n d -> (b n) d') ) return gated_output.reshape_as(x) # token shifting def shift(t, amount, mask = None): if amount == 0: return t else: amount = min(amount, t.shape[1]) if exists(mask): t = t.masked_fill(~mask[..., None], 0.) return pad_at_dim(t, (amount, -amount), dim = - 2, value = 0.) class ShiftTokens(nn.Module): def __init__(self, shifts, fn): super().__init__() self.fn = fn self.shifts = tuple(shifts) def forward(self, x, **kwargs): mask = kwargs.get('mask', None) shifts = self.shifts segments = len(shifts) feats_per_shift = x.shape[-1] // segments splitted = x.split(feats_per_shift, dim = -1) segments_to_shift, rest = splitted[:segments], splitted[segments:] segments_to_shift = list(map(lambda args: shift(*args, mask = mask), zip(segments_to_shift, shifts))) x = torch.cat((*segments_to_shift, *rest), dim = -1) return self.fn(x, **kwargs) # feedforward class GLU(nn.Module): def __init__( self, dim_in, dim_out, activation: Callable, mult_bias = False ): super().__init__() self.act = activation self.proj = nn.Linear(dim_in, dim_out * 2) self.mult_bias = nn.Parameter(torch.ones(dim_out)) if mult_bias else 1. def forward(self, x): x, gate = self.proj(x).chunk(2, dim = -1) return x * self.act(gate) * self.mult_bias class FeedForward(nn.Module): def __init__( self, dim, dim_out = None, mult = 4, glu = False, glu_mult_bias = False, swish = False, relu_squared = False, post_act_ln = False, dropout = 0., no_bias = False, zero_init_output = False ): super().__init__() inner_dim = int(dim * mult) dim_out = default(dim_out, dim) if relu_squared: activation = ReluSquared() elif swish: activation = nn.SiLU() else: activation = nn.GELU() if glu: project_in = GLU(dim, inner_dim, activation, mult_bias = glu_mult_bias) else: project_in = nn.Sequential( nn.Linear(dim, inner_dim, bias = not no_bias), activation ) self.ff = Sequential( project_in, nn.LayerNorm(inner_dim) if post_act_ln else None, nn.Dropout(dropout), nn.Linear(inner_dim, dim_out, bias = not no_bias) ) # init last linear layer to 0 if zero_init_output: init_zero_(self.ff[-1]) def forward(self, x): return self.ff(x) # attention. it is all we need class Attention(nn.Module): def __init__( self, dim, dim_head = DEFAULT_DIM_HEAD, heads = 8, causal = False, flash = False, talking_heads = False, head_scale = False, sparse_topk = None, num_mem_kv = 0, dropout = 0., on_attn = False, gate_values = False, zero_init_output = False, max_attend_past = None, qk_norm = False, qk_norm_groups = 1, qk_norm_scale = 10, qk_norm_dim_scale = False, one_kv_head = False, kv_heads = None, shared_kv = False, value_dim_head = None, tensor_product = False, # https://arxiv.org/abs/2208.06061 cascading_heads = False, add_zero_kv = False, # same as add_zero_attn in pytorch onnxable = False ): super().__init__() self.scale = dim_head ** -0.5 self.heads = heads self.causal = causal self.max_attend_past = max_attend_past assert not (exists(kv_heads) and one_kv_head), 'either attn_one_kv_head is set to True (in which case kv_heads is set to 1), or attn_kv_heads is set, but not both' value_dim_head = default(value_dim_head, dim_head) kv_heads = default(kv_heads, heads) kv_heads = 1 if one_kv_head else kv_heads assert divisible_by(heads, kv_heads) self.kv_heads = kv_heads q_dim = dim_head * heads k_dim = dim_head * kv_heads v_dim = value_dim_head * kv_heads out_dim = value_dim_head * heads self.to_q = nn.Linear(dim, q_dim, bias = False) self.to_k = nn.Linear(dim, k_dim, bias = False) # shared key / values, for further memory savings during inference assert not (shared_kv and value_dim_head != dim_head), 'key and value head dimensions must be equal for shared key / values' self.to_v = nn.Linear(dim, v_dim, bias = False) if not shared_kv else None # relations projection from tp-attention self.to_r = nn.Linear(dim, v_dim, bias = False) if tensor_product else None # add GLU gating for aggregated values, from alphafold2 self.to_v_gate = None if gate_values: self.to_v_gate = nn.Linear(dim, out_dim) nn.init.constant_(self.to_v_gate.weight, 0) nn.init.constant_(self.to_v_gate.bias, 1) # cosine sim attention self.qk_norm = qk_norm self.qk_norm_groups = qk_norm_groups self.qk_norm_scale = qk_norm_scale # whether to use the rmsnorm (equivalent to cosine sim attention when scale is equal to 1) - https://arxiv.org/abs/2302.05442 self.qk_norm_dim_scale = qk_norm_dim_scale self.qk_norm_q_scale = self.qk_norm_k_scale = 1 if qk_norm and qk_norm_dim_scale: self.qk_norm_q_scale = nn.Parameter(torch.ones(dim_head)) self.qk_norm_k_scale = nn.Parameter(torch.ones(dim_head)) assert (not qk_norm) or divisible_by(dim_head, qk_norm_groups), 'dimension per attention head must be divisible by the qk norm groups' assert not (qk_norm and (dim_head // qk_norm_groups) <= 2), 'the group dimension may be too small (2 was too small in my tests, but 4 still works, surprisingly)' # attend class - includes core attention algorithm + talking heads self.attend = Attend( heads = heads, causal = causal, talking_heads = talking_heads, dropout = dropout, sparse_topk = sparse_topk, qk_norm = qk_norm, scale = qk_norm_scale if qk_norm else self.scale, add_zero_kv = add_zero_kv, flash = flash, onnxable = onnxable ) # head scaling self.head_scale = head_scale if head_scale: self.head_scale_params = nn.Parameter(torch.ones(1, heads, 1, 1)) # explicit topk sparse attention self.sparse_topk = sparse_topk # add memory key / values self.num_mem_kv = num_mem_kv if num_mem_kv > 0: self.mem_k = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head)) self.mem_v = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head)) # attention on attention self.attn_on_attn = on_attn self.to_out = nn.Sequential(nn.Linear(out_dim, dim * 2, bias = False), nn.GLU()) if on_attn else nn.Linear(out_dim, dim, bias = False) # init output projection 0 if zero_init_output: init_zero_(self.to_out) def forward( self, x, context = None, mask = None, context_mask = None, attn_mask = None, rel_pos = None, rotary_pos_emb = None, prev_attn = None, mem = None ): b, n, _, h, kv_h, head_scale, device, has_context = *x.shape, self.heads, self.kv_heads, self.head_scale, x.device, exists(context) kv_input = default(context, x) q_input = x k_input = kv_input v_input = kv_input r_input = x if exists(mem): k_input = torch.cat((mem, k_input), dim = -2) v_input = torch.cat((mem, v_input), dim = -2) q = self.to_q(q_input) k = self.to_k(k_input) v = self.to_v(v_input) if exists(self.to_v) else k r = self.to_r(r_input) if exists(self.to_r) else None q = rearrange(q, 'b n (h d) -> b h n d', h = h) k, v, r = map(lambda t: maybe(rearrange)(t, 'b n (h d) -> b h n d', h = kv_h), (k, v, r)) if self.qk_norm: qk_l2norm = partial(l2norm, groups = self.qk_norm_groups) q, k = map(qk_l2norm, (q, k)) q = q * self.qk_norm_q_scale k = k * self.qk_norm_k_scale if exists(rotary_pos_emb) and not has_context: freqs, xpos_scale = rotary_pos_emb l = freqs.shape[-1] q_xpos_scale, k_xpos_scale = (xpos_scale, xpos_scale ** -1.) if exists(xpos_scale) else (1., 1.) (ql, qr), (kl, kr), (vl, vr) = map(lambda t: (t[..., :l], t[..., l:]), (q, k, v)) ql, kl, vl = map(lambda arg: apply_rotary_pos_emb(arg[0], freqs, arg[1]), ((ql, q_xpos_scale), (kl, k_xpos_scale), (vl, k_xpos_scale))) q, k, v = map(lambda t: torch.cat(t, dim = -1), ((ql, qr), (kl, kr), (vl, vr))) input_mask = context_mask if has_context else mask if self.num_mem_kv > 0: mem_k, mem_v = map(lambda t: repeat(t, 'h n d -> b h n d', b = b), (self.mem_k, self.mem_v)) if self.qk_norm: mem_k = l2norm(mem_k) mem_k = mem_k * self.qk_norm_k_scale k = torch.cat((mem_k, k), dim = -2) v = torch.cat((mem_v, v), dim = -2) if exists(input_mask): input_mask = pad_at_dim(input_mask, (self.num_mem_kv, 0), dim = -1, value = True) i, j = map(lambda t: t.shape[-2], (q, k)) # determine masking max_neg_value(q) masks = [] final_attn_mask = None if exists(input_mask): input_mask = rearrange(input_mask, 'b j -> b 1 1 j') masks.append(~input_mask) if exists(attn_mask): assert 2 <= attn_mask.ndim <= 4, 'attention mask must have greater than 2 dimensions but less than or equal to 4' if attn_mask.ndim == 2: attn_mask = rearrange(attn_mask, 'i j -> 1 1 i j') elif attn_mask.ndim == 3: attn_mask = rearrange(attn_mask, 'h i j -> 1 h i j') masks.append(~attn_mask) if exists(self.max_attend_past): range_q = torch.arange(j - i, j, device = device) range_k = torch.arange(j, device = device) dist = rearrange(range_q, 'i -> 1 1 i 1') - rearrange(range_k, 'j -> 1 1 1 j') max_attend_past_mask = dist > self.max_attend_past masks.append(max_attend_past_mask) if len(masks) > 0: final_attn_mask = ~or_reduce(masks) # prepare relative positional bias, if needed attn_bias = None if exists(rel_pos): attn_bias = rel_pos(i, j) # attention is all we need out, intermediates = self.attend( q, k, v, mask = final_attn_mask, attn_bias = attn_bias, prev_attn = prev_attn ) # https://arxiv.org/abs/2208.06061 proposes to add a residual for better gradients if exists(r): out = out * r + out # normformer scaling of heads if head_scale: out = out * self.head_scale_params # merge heads out = rearrange(out, 'b h n d -> b n (h d)') # alphafold2 styled gating of the values if exists(self.to_v_gate): gates = self.to_v_gate(x) out = out * gates.sigmoid() # combine the heads out = self.to_out(out) if exists(mask): mask = rearrange(mask, 'b n -> b n 1') out = out.masked_fill(~mask, 0.) return out, intermediates class AttentionLayers(nn.Module): def __init__( self, dim, depth, heads = 8, causal = False, cross_attend = False, only_cross = False, use_scalenorm = False, use_rmsnorm = False, use_simple_rmsnorm = False, alibi_pos_bias = False, alibi_num_heads = None, rel_pos_bias = False, rel_pos_num_buckets = 32, rel_pos_max_distance = 128, dynamic_pos_bias = False, dynamic_pos_bias_log_distance = False, dynamic_pos_bias_mlp_depth = 2, dynamic_pos_bias_norm = False, rotary_pos_emb = False, rotary_emb_dim = None, rotary_xpos = False, rotary_interpolation_factor = 1., rotary_xpos_scale_base = 512, rotary_base_rescale_factor = 1., custom_layers = None, sandwich_coef = None, par_ratio = None, residual_attn = False, cross_residual_attn = False, macaron = False, pre_norm = True, pre_norm_has_final_norm = True, gate_residual = False, scale_residual = False, scale_residual_constant = 1., deepnorm = False, shift_tokens = 0, sandwich_norm = False, resi_dual = False, resi_dual_scale = 1., zero_init_branch_output = False, layer_dropout = 0., cross_attn_tokens_dropout = 0., **kwargs ): super().__init__() rotary_pos_emb = rotary_pos_emb or rotary_xpos ff_kwargs, kwargs = groupby_prefix_and_trim('ff_', kwargs) attn_kwargs, kwargs = groupby_prefix_and_trim('attn_', kwargs) dim_head = attn_kwargs.get('dim_head', DEFAULT_DIM_HEAD) self.dim = dim self.depth = depth self.layers = nn.ModuleList([]) self.has_pos_emb = rel_pos_bias or rotary_pos_emb rotary_emb_dim = max(default(rotary_emb_dim, dim_head // 2), 32) assert not (rotary_xpos and not causal), 'rotary xpos is not compatible with bidirectional attention' self.rotary_pos_emb = RotaryEmbedding(rotary_emb_dim, use_xpos = rotary_xpos, scale_base = rotary_xpos_scale_base, interpolation_factor = rotary_interpolation_factor, base_rescale_factor = rotary_base_rescale_factor) if rotary_pos_emb else None assert not (alibi_pos_bias and rel_pos_bias), 'you can only choose Alibi positional bias or T5 relative positional bias, not both' assert rel_pos_num_buckets <= rel_pos_max_distance, 'number of relative position buckets must be less than the relative position max distance' # relative positional bias flash_attn = attn_kwargs.get('flash', False) assert (int(rel_pos_bias) + int(dynamic_pos_bias) + int(alibi_pos_bias)) <= 1, 'you can only choose up to one of t5, alibi, or dynamic positional bias' self.rel_pos = None if rel_pos_bias: assert not flash_attn, 'flash attention not compatible with t5 relative positional bias' self.rel_pos = RelativePositionBias(scale = dim_head ** 0.5, causal = causal, heads = heads, num_buckets = rel_pos_num_buckets, max_distance = rel_pos_max_distance) elif dynamic_pos_bias: assert not flash_attn, 'flash attention not compatible with dynamic positional bias' self.rel_pos = DynamicPositionBias(dim = dim // 4, heads = heads, log_distance = dynamic_pos_bias_log_distance, depth = dynamic_pos_bias_mlp_depth, norm = dynamic_pos_bias_norm) elif alibi_pos_bias: alibi_num_heads = default(alibi_num_heads, heads) assert alibi_num_heads <= heads, 'number of ALiBi heads must be less than the total number of heads' self.rel_pos = AlibiPositionalBias(heads = alibi_num_heads, total_heads = heads) # determine deepnorm and residual scale if deepnorm: assert scale_residual_constant == 1, 'scale residual constant is being overridden by deep norm settings' pre_norm = sandwich_norm = resi_dual = False scale_residual = True scale_residual_constant = (2 * depth) ** 0.25 assert (int(sandwich_norm) + int(resi_dual)) <= 1, 'either sandwich norm or resiDual is selected, but not both' assert not (not pre_norm and sandwich_norm), 'sandwich norm cannot be used when not using prenorm' if resi_dual: pre_norm = False self.pre_norm = pre_norm self.sandwich_norm = sandwich_norm self.resi_dual = resi_dual assert 0 < resi_dual_scale <= 1., 'resiDual prenorm residual must be scaled by a factor greater than 0 and less than or equal to 1.' self.resi_dual_scale = resi_dual_scale self.residual_attn = residual_attn self.cross_residual_attn = cross_residual_attn assert not (flash_attn and (residual_attn or cross_residual_attn)), 'flash attention is not compatible with residual attention' self.cross_attend = cross_attend assert (int(use_scalenorm) + int(use_rmsnorm) + int(use_simple_rmsnorm)) <= 1, 'you can only use either scalenorm, rmsnorm, or simple rmsnorm' if use_scalenorm: norm_class = ScaleNorm elif use_rmsnorm: norm_class = RMSNorm elif use_simple_rmsnorm: norm_class = SimpleRMSNorm else: norm_class = nn.LayerNorm norm_fn = partial(norm_class, dim) if cross_attend and not only_cross: default_block = ('a', 'c', 'f') elif cross_attend and only_cross: default_block = ('c', 'f') else: default_block = ('a', 'f') if macaron: default_block = ('f',) + default_block # zero init if zero_init_branch_output: attn_kwargs = {**attn_kwargs, 'zero_init_output': True} ff_kwargs = {**ff_kwargs, 'zero_init_output': True} # calculate layer block order if exists(custom_layers): layer_types = custom_layers elif exists(par_ratio): par_depth = depth * len(default_block) assert 1 < par_ratio <= par_depth, 'par ratio out of range' default_block = tuple(filter(not_equals('f'), default_block)) par_attn = par_depth // par_ratio depth_cut = par_depth * 2 // 3 # 2 / 3 attention layer cutoff suggested by PAR paper par_width = (depth_cut + depth_cut // par_attn) // par_attn assert len(default_block) <= par_width, 'default block is too large for par_ratio' par_block = default_block + ('f',) * (par_width - len(default_block)) par_head = par_block * par_attn layer_types = par_head + ('f',) * (par_depth - len(par_head)) elif exists(sandwich_coef): assert sandwich_coef > 0 and sandwich_coef <= depth, 'sandwich coefficient should be less than the depth' layer_types = ('a',) * sandwich_coef + default_block * (depth - sandwich_coef) + ('f',) * sandwich_coef else: layer_types = default_block * depth self.layer_types = layer_types self.num_attn_layers = len(list(filter(equals('a'), layer_types))) # stochastic depth self.layer_dropouts = cast_tuple(layer_dropout, len(layer_types)) # structured dropout for cross attending self.cross_attn_tokens_dropout = cross_attn_tokens_dropout # calculate token shifting shift_tokens = cast_tuple(shift_tokens, len(layer_types)) # whether it has post norm self.final_norm = norm_fn() if pre_norm or resi_dual else nn.Identity() # iterate and construct layers for ind, (layer_type, layer_shift_tokens) in enumerate(zip(self.layer_types, shift_tokens)): ind == (len(self.layer_types) - 1) if layer_type == 'a': layer = Attention(dim, heads = heads, causal = causal, **attn_kwargs) elif layer_type == 'c': layer = Attention(dim, heads = heads, **attn_kwargs) elif layer_type == 'f': layer = FeedForward(dim, **ff_kwargs) layer = layer if not macaron else Scale(0.5, layer) else: raise Exception(f'invalid layer type {layer_type}') if layer_shift_tokens > 0: shift_range_upper = layer_shift_tokens + 1 shift_range_lower = -layer_shift_tokens if not causal else 0 layer = ShiftTokens(range(shift_range_lower, shift_range_upper), layer) residual_fn = GRUGating if gate_residual else Residual residual = residual_fn(dim, scale_residual = scale_residual, scale_residual_constant = scale_residual_constant) pre_branch_norm = norm_fn() if pre_norm else None post_branch_norm = norm_fn() if sandwich_norm else None post_main_norm = norm_fn() if not pre_norm else None norms = nn.ModuleList([ pre_branch_norm, post_branch_norm, post_main_norm ]) self.layers.append(nn.ModuleList([ norms, layer, residual ])) if deepnorm: init_gain = (8 * depth) ** -0.25 deepnorm_init(self, init_gain) def forward( self, x, context = None, mask = None, context_mask = None, attn_mask = None, self_attn_context_mask = None, mems = None, return_hiddens = False ): assert not (self.cross_attend ^ exists(context)), 'context must be passed in if cross_attend is set to True' hiddens = [] layer_hiddens = [] intermediates = [] prev_attn = None prev_cross_attn = None mems = mems.copy() if exists(mems) else [None] * self.num_attn_layers rotary_pos_emb = None if exists(self.rotary_pos_emb): max_rotary_emb_length = max(list(map(lambda m: (m.shape[1] if exists(m) else 0) + x.shape[1], mems))) rotary_pos_emb = self.rotary_pos_emb(max_rotary_emb_length, x.device) outer_residual = x * self.resi_dual_scale for ind, (layer_type, (norm, block, residual_fn), layer_dropout) in enumerate(zip(self.layer_types, self.layers, self.layer_dropouts)): ind == (len(self.layers) - 1) if self.training and layer_dropout > 0. and random() < layer_dropout: continue if layer_type == 'a': if return_hiddens: hiddens.append(x) layer_mem = mems.pop(0) if mems else None if layer_type == 'c': if self.training and self.cross_attn_tokens_dropout > 0.: context, context_mask = dropout_seq(context, context_mask, self.cross_attn_tokens_dropout) inner_residual = x if return_hiddens: layer_hiddens.append(x) pre_norm, post_branch_norm, post_main_norm = norm if exists(pre_norm): x = pre_norm(x) if layer_type == 'a': out, inter = block(x, mask = mask, context_mask = self_attn_context_mask, attn_mask = attn_mask, rel_pos = self.rel_pos, rotary_pos_emb = rotary_pos_emb, prev_attn = prev_attn, mem = layer_mem) elif layer_type == 'c': out, inter = block(x, context = context, mask = mask, context_mask = context_mask, prev_attn = prev_cross_attn) elif layer_type == 'f': out = block(x) if self.resi_dual: outer_residual = outer_residual + out * self.resi_dual_scale if exists(post_branch_norm): out = post_branch_norm(out) x = residual_fn(out, inner_residual) if layer_type in ('a', 'c') and return_hiddens: intermediates.append(inter) if layer_type == 'a' and self.residual_attn: prev_attn = inter.pre_softmax_attn elif layer_type == 'c' and self.cross_residual_attn: prev_cross_attn = inter.pre_softmax_attn if exists(post_main_norm): x = post_main_norm(x) if return_hiddens: layer_hiddens.append(x) if self.resi_dual: x = x + self.final_norm(outer_residual) else: x = self.final_norm(x) if return_hiddens: intermediates = LayerIntermediates( hiddens = hiddens, attn_intermediates = intermediates, layer_hiddens = layer_hiddens ) return x, intermediates return x class Encoder(AttentionLayers): def __init__(self, **kwargs): assert 'causal' not in kwargs, 'cannot set causality on encoder' super().__init__(causal = False, **kwargs) class Decoder(AttentionLayers): def __init__(self, **kwargs): assert 'causal' not in kwargs, 'cannot set causality on decoder' super().__init__(causal = True, **kwargs) class CrossAttender(AttentionLayers): def __init__(self, **kwargs): super().__init__(cross_attend = True, only_cross = True, **kwargs) class ViTransformerWrapper(nn.Module): def __init__( self, *, image_size, patch_size, attn_layers, channels = 3, num_classes = None, post_emb_norm = False, emb_dropout = 0. ): super().__init__() assert isinstance(attn_layers, Encoder), 'attention layers must be an Encoder' assert divisible_by(image_size, patch_size), 'image dimensions must be divisible by the patch size' dim = attn_layers.dim num_patches = (image_size // patch_size) ** 2 patch_dim = channels * patch_size ** 2 self.patch_size = patch_size self.pos_embedding = nn.Parameter(torch.randn(1, num_patches, dim)) self.patch_to_embedding = nn.Sequential( nn.LayerNorm(patch_dim), nn.Linear(patch_dim, dim), nn.LayerNorm(dim) ) self.post_emb_norm = nn.LayerNorm(dim) if post_emb_norm else nn.Identity() self.dropout = nn.Dropout(emb_dropout) self.attn_layers = attn_layers self.mlp_head = nn.Linear(dim, num_classes) if exists(num_classes) else nn.Identity() def forward( self, img, return_embeddings = False ): p = self.patch_size x = rearrange(img, 'b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = p, p2 = p) x = self.patch_to_embedding(x) n = x.shape[1] x = x + self.pos_embedding[:, :n] x = self.post_emb_norm(x) x = self.dropout(x) x = self.attn_layers(x) if not exists(self.mlp_head) or return_embeddings: return x x = x.mean(dim = -2) return self.mlp_head(x) class Transformer(nn.Module): def __init__( self, *, num_tokens, max_seq_len, attn_layers, emb_dim = None, max_mem_len = 0, shift_mem_down = 0, emb_dropout = 0., post_emb_norm = False, num_memory_tokens = None, tie_embedding = False, logits_dim = None, use_abs_pos_emb = True, scaled_sinu_pos_emb = False, l2norm_embed = False, emb_frac_gradient = 1., # GLM-130B and Cogview successfully used this, set at 0.1 attn_z_loss_weight = 1e-4 ): super().__init__() assert isinstance(attn_layers, AttentionLayers), 'attention layers must be one of Encoder or Decoder' dim = attn_layers.dim emb_dim = default(emb_dim, dim) self.emb_dim = emb_dim self.num_tokens = num_tokens self.max_seq_len = max_seq_len self.max_mem_len = max_mem_len self.shift_mem_down = shift_mem_down self.l2norm_embed = l2norm_embed self.token_emb = TokenEmbedding(emb_dim, num_tokens, l2norm_embed = l2norm_embed) if not (use_abs_pos_emb and not attn_layers.has_pos_emb): self.pos_emb = always(0) elif scaled_sinu_pos_emb: self.pos_emb = ScaledSinusoidalEmbedding(emb_dim) else: self.pos_emb = AbsolutePositionalEmbedding(emb_dim, max_seq_len, l2norm_embed = l2norm_embed) self.emb_frac_gradient = emb_frac_gradient # fraction of the gradient that should go to the embedding, https://arxiv.org/abs/2105.13290 self.post_emb_norm = nn.LayerNorm(emb_dim) if post_emb_norm else nn.Identity() self.emb_dropout = nn.Dropout(emb_dropout) self.project_emb = nn.Linear(emb_dim, dim) if emb_dim != dim else nn.Identity() self.attn_layers = attn_layers self.init_() logits_dim = default(logits_dim, num_tokens) self.to_logits = nn.Linear(dim, logits_dim) if not tie_embedding else lambda t: t @ self.token_emb.emb.weight.t() # memory tokens (like [cls]) from Memory Transformers paper num_memory_tokens = default(num_memory_tokens, 0) self.num_memory_tokens = num_memory_tokens if num_memory_tokens > 0: self.memory_tokens = nn.Parameter(torch.randn(num_memory_tokens, dim)) def init_(self): if self.l2norm_embed: nn.init.normal_(self.token_emb.emb.weight, std = 1e-5) if not isinstance(self.pos_emb, always): nn.init.normal_(self.pos_emb.emb.weight, std = 1e-5) return nn.init.kaiming_normal_(self.token_emb.emb.weight) def forward( self, x, return_embeddings = False, return_logits_and_embeddings = False, return_intermediates = False, mask = None, return_mems = False, return_attn = False, mems = None, pos = None, prepend_embeds = None, sum_embeds = None, return_attn_z_loss = False, attn_z_loss_weight = 1e-4, **kwargs ): b, n, device, num_mem, emb_frac_gradient = *x.shape, x.device, self.num_memory_tokens, self.emb_frac_gradient return_hiddens = return_mems | return_attn | return_intermediates | return_attn_z_loss # absolute positional embedding external_pos_emb = exists(pos) and pos.dtype != torch.long pos_emb = self.pos_emb(x, pos = pos) if not external_pos_emb else pos x = self.token_emb(x) + pos_emb # for summing embeddings passed externally - needs this for self-conditioning in non-autoregressive training if exists(sum_embeds): x = x + sum_embeds # post embedding norm, purportedly leads to greater stabilization x = self.post_emb_norm(x) # whether to append embeds, as in PaLI, for image embeddings if exists(prepend_embeds): prepend_seq, prepend_dim = prepend_embeds.shape[1:] assert prepend_dim == x.shape[-1], 'prepended embeddings need to have same dimensions as text model dimensions' x = torch.cat((prepend_embeds, x), dim = -2) # whether to reduce the gradient going to the embedding, from cogview paper, corroborated by GLM-130B model if emb_frac_gradient < 1: assert emb_frac_gradient > 0 x = x * emb_frac_gradient + x.detach() * (1 - emb_frac_gradient) # embedding dropout x = self.emb_dropout(x) x = self.project_emb(x) if num_mem > 0: mem = repeat(self.memory_tokens, 'n d -> b n d', b = b) x = torch.cat((mem, x), dim = 1) # auto-handle masking after appending memory tokens if exists(mask): mask = pad_at_dim(mask, (num_mem, 0), dim = -1, value = True) if self.shift_mem_down and exists(mems): mems_l, mems_r = mems[:self.shift_mem_down], mems[self.shift_mem_down:] mems = [*mems_r, *mems_l] if return_hiddens: x, intermediates = self.attn_layers(x, mask = mask, mems = mems, return_hiddens = True, **kwargs) else: x = self.attn_layers(x, mask = mask, mems = mems, **kwargs) mem, x = x[:, :num_mem], x[:, num_mem:] if return_logits_and_embeddings: out = (self.to_logits(x), x) elif return_embeddings: out = x else: out = self.to_logits(x) if return_attn_z_loss: pre_softmax_attns = list(map(lambda t: t.pre_softmax_attn, intermediates.attn_intermediates)) intermediates.attn_z_loss = calc_z_loss(pre_softmax_attns, weight = attn_z_loss_weight) return_intermediates = True if return_intermediates: return out, intermediates if return_mems: hiddens = intermediates.hiddens new_mems = list(map(lambda pair: torch.cat(pair, dim = -2), zip(mems, hiddens))) if exists(mems) else hiddens new_mems = list(map(lambda t: t[..., -self.max_mem_len:, :].detach(), new_mems)) return out, new_mems if return_attn: attn_maps = list(map(lambda t: t.post_softmax_attn, intermediates.attn_intermediates)) return out, attn_maps return out
NExT-GPT-main
next/transformer.py
import torch from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler, AudioLDMPipeline from diffusers.utils import export_to_video import scipy class VideoDiffusor: def __init__( self, num_inference_steps: int = 40, height=320, width=576, num_frames: int = 24 ): super().__init__() self.num_inference_steps = num_inference_steps self.height = height self.width = width self.num_frames self.pipe = DiffusionPipeline.from_pretrained( "cerspense/zeroscope_v2_576w", torch_dtype=torch.float16 ) self.pipe = DPMSolverMultistepScheduler.from_config(self.pipe.scheduler.config) self.pipe.enable_model_cpu_offload() def create(self, prompt): video_frames = self.pipe( prompt, num_inference_steps=self.num_inference_steps, height=self.height, width=self.width, num_frames=self.num_frames ).frames video_path = export_to_video(video_frames) return video_path class ImageDiffusor: def __init__(self): self.pipe = DiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, use_safetensors=True, variant="fp16" ) self.pipe.to("cuda") def create(self, prompt): images = self.pipe(prompt=prompt).images[0] return images class AudioDiffusor: def __init__( self, num_inference_steps: int = 10, audio_length_in_s: float = 5.0, ): super().__init__() self.num_inference_steps = num_inference_steps self.audio_length_in_s = audio_length_in_s repo_id = "cvssp/audioldm-s-full-v2" self.pipe = AudioLDMPipeline.from_pretrained( repo_id, torch_dtype=torch.float16 ) self.pipe = self.pipe.to("cuda") def create(self, prompt): audio = self.pipe( prompt, num_inference_steps=self.num_inference_steps, audio_length_in_s=self.audio_length_in_s ) audio = scipy.io.wavfile.writr("techno.wav", rate=16000, data=audio) return audio
NExT-GPT-main
next/mm_diffusion_decoders.py
from setuptools import setup, find_packages setup( name = 'pegasusX', packages = find_packages(exclude=[]), version = '0.3.9', license='MIT', description = 'pegasus - Pytorch', author = 'Kye Gomez', author_email = '[email protected]', long_description_content_type = 'text/markdown', url = 'https://github.com/kyegomez/pegasus', keywords = [ 'artificial intelligence', 'deep learning', 'optimizers', "Prompt Engineering" ], install_requires=[ 'hnswlib==0.7.0', 'pandas==1.3.5', 'pydantic==1.9.0', 'requests==2.28.1', 'typing_extensions==4.5.0', 'uvicorn[standard]==0.18.3', 'torch', 'torchvision', 'torchaudio', 'timm==0.6.7', 'pytorchvideo', 'ftfy', 'regex', 'einops', 'fvcore', 'decord==0.6.0', 'numba', 'joblib', ], classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Topic :: Scientific/Engineering :: Artificial Intelligence', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3.6', ], )
Pegasus-master
setup.py
#pip install pegasusx from pegasus.main import Pegasus # # initialize with text modality # pegasus_text = Pegasus(modality="text") # text_data = ['This is a query about artificial intelligence'] # embeddings_text = pegasus_text.embed_text(text_data) # # initialize with audio modality # pegasus_audio = Pegasus(modality="audio") # audio_data = [...] # Your audio data here => audio file apth # embeddings_audio = pegasus_audio.embed_audio(audio_data) # text pegasus = Pegasus(modality="text") #audio or vision text_data = ['This is a query about artificial intelligence', 'Another query about machine learning', 'Yet another query about deep learning', 'And one more about natural language processing'] embeddings = pegasus.embed_data(text_data) print(embeddings) #audio pegasus = Pegasus(modality="audio") audio_data = "./audio.mp3" #file path of your mpt3 embeddings = pegasus.embed_data(audio_data) print(embeddings) #vision pegasus = Pegasus(modality="vision") vision_data = "stable-diffusion-xl.jpeg" embedding = pegasus.embed_data(vision_data) print(embedding)
Pegasus-master
example.py
import logging import torch import data from models import imagebind_model from models.imagebind_model import ModalityType, load_module from models import lora as LoRA logging.basicConfig(level=logging.INFO, force=True) lora = True linear_probing = False device = "cpu" # "cuda:0" if torch.cuda.is_available() else "cpu" load_head_post_proc_finetuned = True assert not (linear_probing and lora), \ "Linear probing is a subset of LoRA training procedure for ImageBind. " \ "Cannot set both linear_probing=True and lora=True. " if lora and not load_head_post_proc_finetuned: # Hack: adjust lora_factor to the `max batch size used during training / temperature` to compensate missing norm lora_factor = 12 / 0.07 else: # This assumes proper loading of all params but results in shift from original dist in case of LoRA lora_factor = 1 text_list=["bird", "car", "dog3", "dog5", "dog8", "grey_sloth_plushie"] image_paths=[".assets/bird_image.jpg", ".assets/car_image.jpg", ".assets/dog3.jpg", ".assets/dog5.jpg", ".assets/dog8.jpg", ".assets/grey_sloth_plushie.jpg"] audio_paths=[".assets/bird_audio.wav", ".assets/car_audio.wav", ".assets/dog_audio.wav"] # Instantiate model model = imagebind_model.imagebind_huge(pretrained=True) if lora: model.modality_trunks.update( LoRA.apply_lora_modality_trunks(model.modality_trunks, rank=4, layer_idxs={ModalityType.TEXT: [0, 1, 2, 3, 4, 5, 6, 7, 8], ModalityType.VISION: [0, 1, 2, 3, 4, 5, 6, 7, 8]}, modality_names=[ModalityType.TEXT, ModalityType.VISION])) # Load LoRA params if found LoRA.load_lora_modality_trunks(model.modality_trunks, checkpoint_dir=".checkpoints/lora/550_epochs_lora", postfix="_dreambooth_last") if load_head_post_proc_finetuned: # Load postprocessors & heads load_module(model.modality_postprocessors, module_name="postprocessors", checkpoint_dir=".checkpoints/lora/550_epochs_lora", postfix="_dreambooth_last") load_module(model.modality_heads, module_name="heads", checkpoint_dir=".checkpoints/lora/550_epochs_lora", postfix="_dreambooth_last") elif linear_probing: # Load heads load_module(model.modality_heads, module_name="heads", checkpoint_dir="./.checkpoints/lora/500_epochs_lp", postfix="_dreambooth_last") model.eval() model.to(device) # Load data inputs = { ModalityType.TEXT: data.load_and_transform_text(text_list, device), ModalityType.VISION: data.load_and_transform_vision_data(image_paths, device, to_tensor=True), ModalityType.AUDIO: data.load_and_transform_audio_data(audio_paths, device), } with torch.no_grad(): embeddings = model(inputs) print( "Vision x Text: ", torch.softmax(embeddings[ModalityType.VISION] @ embeddings[ModalityType.TEXT].T * (lora_factor if lora else 1), dim=-1), ) print( "Audio x Text: ", torch.softmax(embeddings[ModalityType.AUDIO] @ embeddings[ModalityType.TEXT].T * (lora_factor if lora else 1), dim=-1), ) print( "Vision x Audio: ", torch.softmax(embeddings[ModalityType.VISION] @ embeddings[ModalityType.AUDIO].T, dim=-1), )
Pegasus-master
ImageBind-LoRA/example.py
# Based on PyTorch Lightning Tutorial 13 - # SSL : https://lightning.ai/docs/pytorch/stable/notebooks/course_UvA-DL/13-contrastive-learning.html # Modified by Fares Abawi (@fabawi). import logging import os import argparse try: import comet_ml except ImportError: comet_ml = None try: import wandb except ImportError: wandb = None try: import matplotlib.pyplot as plt except ImportError: plt = None logging.warning("Matplotlib not installed. This is not needed if you run this script as --headless") import lightning as L from lightning.pytorch import Trainer, seed_everything from lightning.pytorch.callbacks import ModelCheckpoint from lightning.pytorch import loggers as pl_loggers import torch import torch.nn.functional as F import torch.optim as optim from torch.utils.data import DataLoader, ConcatDataset import torchvision from torchvision import transforms from models import imagebind_model from models import lora as LoRA from models.imagebind_model import ModalityType, load_module, save_module logging.basicConfig(level=logging.INFO, force=True) # Logging settings LOG_ON_STEP = True LOG_ON_EPOCH = True class ContrastiveTransformations: def __init__(self, base_transforms, n_views=2): self.base_transforms = base_transforms self.n_views = n_views def __call__(self, x): return [self.base_transforms(x) for _ in range(self.n_views)] class ImageBindTrain(L.LightningModule): def __init__(self, lr=5e-4, weight_decay=1e-4, max_epochs=500, batch_size=32, num_workers=4, seed=42, self_contrast=False, temperature=0.07, momentum_betas=(0.9, 0.95), lora=False, lora_rank=4, lora_checkpoint_dir="./.checkpoints/lora", lora_layer_idxs=None, lora_modality_names=None, linear_probing=False ): super().__init__() assert not (linear_probing and lora), \ "Linear probing is a subset of LoRA training procedure for ImageBind. " \ "Cannot set both linear_probing=True and lora=True. " \ "Linear probing stores params in lora_checkpoint_dir" self.save_hyperparameters() # Load full pretrained ImageBind model self.model = imagebind_model.imagebind_huge(pretrained=True) if lora: for modality_preprocessor in self.model.modality_preprocessors.children(): modality_preprocessor.requires_grad_(False) for modality_trunk in self.model.modality_trunks.children(): modality_trunk.requires_grad_(False) self.model.modality_trunks.update(LoRA.apply_lora_modality_trunks(self.model.modality_trunks, rank=lora_rank, layer_idxs=lora_layer_idxs, modality_names=lora_modality_names)) LoRA.load_lora_modality_trunks(self.model.modality_trunks, checkpoint_dir=lora_checkpoint_dir) # Load postprocessors & heads load_module(self.model.modality_postprocessors, module_name="postprocessors", checkpoint_dir=lora_checkpoint_dir) load_module(self.model.modality_heads, module_name="heads", checkpoint_dir=lora_checkpoint_dir) elif linear_probing: for modality_preprocessor in self.model.modality_preprocessors.children(): modality_preprocessor.requires_grad_(False) for modality_trunk in self.model.modality_trunks.children(): modality_trunk.requires_grad_(False) for modality_postprocessor in self.model.modality_postprocessors.children(): modality_postprocessor.requires_grad_(False) load_module(self.model.modality_heads, module_name="heads", checkpoint_dir=lora_checkpoint_dir) for modality_head in self.model.modality_heads.children(): modality_head.requires_grad_(False) final_layer = list(modality_head.children())[-1] final_layer.requires_grad_(True) def configure_optimizers(self): optimizer = optim.AdamW(self.parameters(), lr=self.hparams.lr, weight_decay=self.hparams.weight_decay, betas=self.hparams.momentum_betas) lr_scheduler = optim.lr_scheduler.CosineAnnealingLR( optimizer, T_max=self.hparams.max_epochs, eta_min=self.hparams.lr / 50 ) return [optimizer], [lr_scheduler] def info_nce_loss(self, batch, mode="train"): data_a, class_a, data_b, class_b = batch # class_a is always "vision" according to ImageBind feats_a = [self.model({class_a[0]: data_a_i}) for data_a_i in data_a] feats_a_tensor = torch.cat([list(dict_.values())[0] for dict_ in feats_a], dim=0) # class_b could be any modality feats_b = [self.model({class_b[idx]: data_b_i}) for idx, data_b_i in enumerate(data_b)] feats_b_tensor = torch.cat([list(dict_.values())[0] for dict_ in feats_b], dim=0) if self.hparams.self_contrast: feats_a_b_tensor = torch.cat([feats_a_tensor.chunk(2)[0], feats_b_tensor], dim=0) feats_tensors = [feats_a_tensor, feats_a_b_tensor] temperatures = [1, self.hparams.temperature] contrast = ["self", "cross"] else: feats_a_b_tensor = torch.cat([feats_a_tensor, feats_b_tensor], dim=0) feats_tensors = [feats_a_b_tensor] temperatures = [self.hparams.temperature] contrast = ["cross"] # Accumulate self-contrastive loss for image and its augmentation, and modailty with image dual_nll = False for feats_idx, feats_tensor in enumerate(feats_tensors): # Calculate cosine similarity cos_sim = F.cosine_similarity(feats_tensor[:, None, :], feats_tensor[None, :, :], dim=-1) # Mask out cosine similarity to itself self_mask = torch.eye(cos_sim.shape[0], dtype=torch.bool, device=cos_sim.device) cos_sim.masked_fill_(self_mask, -9e15) # Find positive example -> batch_size//2 away from the original example pos_mask = self_mask.roll(shifts=cos_sim.shape[0] // 2, dims=0) # InfoNCE loss cos_sim = cos_sim / temperatures[feats_idx] nll = -cos_sim[pos_mask] + torch.logsumexp(cos_sim, dim=-1) nll = nll.mean() if not dual_nll: dual_nll = nll else: dual_nll += nll dual_nll /= 2 # Logging loss self.log(mode + "_loss_" + contrast[feats_idx], nll, prog_bar=True, on_step=LOG_ON_STEP, on_epoch=LOG_ON_EPOCH, batch_size=self.hparams.batch_size) # Get ranking position of positive example comb_sim = torch.cat( [cos_sim[pos_mask][:, None], cos_sim.masked_fill(pos_mask, -9e15)], # First position positive example dim=-1, ) sim_argsort = comb_sim.argsort(dim=-1, descending=True).argmin(dim=-1) # Logging ranking metrics self.log(mode + "_acc_top1", (sim_argsort == 0).float().mean(), prog_bar=True, on_step=LOG_ON_STEP, on_epoch=LOG_ON_EPOCH, batch_size=self.hparams.batch_size) self.log(mode + "_acc_top5", (sim_argsort < 5).float().mean(), prog_bar=True, on_step=LOG_ON_STEP, on_epoch=LOG_ON_EPOCH, batch_size=self.hparams.batch_size) self.log(mode + "_acc_mean_pos", 1 + sim_argsort.float().mean(), prog_bar=True, on_step=LOG_ON_STEP, on_epoch=LOG_ON_EPOCH, batch_size=self.hparams.batch_size) self.log(mode + "_loss", dual_nll, prog_bar=True, on_step=LOG_ON_STEP, on_epoch=LOG_ON_EPOCH, batch_size=self.hparams.batch_size) return dual_nll def training_step(self, batch, batch_idx): return self.info_nce_loss(batch, mode="train") def validation_step(self, batch, batch_idx): self.info_nce_loss(batch, mode="val") def on_validation_epoch_end(self): if self.hparams.lora: # Save LoRA checkpoint LoRA.save_lora_modality_trunks(self.model.modality_trunks, checkpoint_dir=self.hparams.lora_checkpoint_dir) # Save postprocessors & heads save_module(self.model.modality_postprocessors, module_name="postprocessors", checkpoint_dir=self.hparams.lora_checkpoint_dir) save_module(self.model.modality_heads, module_name="heads", checkpoint_dir=self.hparams.lora_checkpoint_dir) elif self.hparams.linear_probing: # Save postprocessors & heads save_module(self.model.modality_heads, module_name="heads", checkpoint_dir=self.hparams.lora_checkpoint_dir) def parse_args(): parser = argparse.ArgumentParser(description="Train the ImageBind model with PyTorch Lightning and LoRA.") parser.add_argument("--seed", type=int, default=43, help="Random seed for reproducibility") parser.add_argument("--device", type=str, default="cpu", help="Device to use for training ('cpu' or 'cuda')") parser.add_argument("--datasets_dir", type=str, default="./.datasets", help="Directory containing the datasets") parser.add_argument("--datasets", type=str, nargs="+", default=["dreambooth"], choices=["dreambooth"], help="Datasets to use for training and validation") parser.add_argument("--full_model_checkpoint_dir", type=str, default="./.checkpoints/full", help="Directory to save the full model checkpoints") parser.add_argument("--full_model_checkpointing", action="store_true", help="Save full model checkpoints") parser.add_argument("--loggers", type=str, nargs="+", choices=["tensorboard", "wandb", "comet", "mlflow"], help="Loggers to use for logging") parser.add_argument("--loggers_dir", type=str, default="./.logs", help="Directory to save the logs") parser.add_argument("--headless", action="store_true", help="Run in headless mode (Don't plot samples on start)") parser.add_argument("--max_epochs", type=int, default=500, help="Maximum number of epochs to train") parser.add_argument("--batch_size", type=int, default=12, help="Batch size for training and validation") parser.add_argument("--lr", type=float, default=5e-6, help="Learning rate") parser.add_argument("--weight_decay", type=float, default=1e-4, help="Weight decay") parser.add_argument("--momentum_betas", nargs=2, type=float, default=[0.9, 0.95], help="Momentum beta 1 and 2 for Adam optimizer") parser.add_argument("--gradient_clip_val", type=float, default=1.0, help="Gradient clipping value") parser.add_argument("--temperature", type=float, default=0.07, help="Temperature parameter for InfoNCE loss") parser.add_argument("--num_workers", type=int, default=0, help="Number of workers for data loading") parser.add_argument("--self_contrast", action="store_true", help="Use self-contrast on the image modality") parser.add_argument("--lora", action="store_true", help="Use LoRA") parser.add_argument("--lora_rank", type=int, default=4, help="Rank of LoRA layers") parser.add_argument("--lora_checkpoint_dir", type=str, default="./.checkpoints/lora", help="Directory to save LoRA checkpoint") parser.add_argument("--lora_modality_names", nargs="+", type=str, default=["vision", "text"], choices=["vision", "text", "audio", "thermal", "depth", "imu"], help="Modality names to apply LoRA") parser.add_argument("--lora_layer_idxs", nargs="+", type=int, help="Layer indices to apply LoRA") parser.add_argument("--lora_layer_idxs_vision", nargs="+", type=int, help="Layer indices to apply LoRA for vision modality. Overrides lora_layer_idxs if specified") parser.add_argument("--lora_layer_idxs_text", nargs="+", type=int, help="Layer indices to apply LoRA for text modality. Overrides lora_layer_idxs if specified") parser.add_argument("--lora_layer_idxs_audio", nargs="+", type=int, help="Layer indices to apply LoRA for audio modality. Overrides lora_layer_idxs if specified") parser.add_argument("--lora_layer_idxs_thermal", nargs="+", type=int, help="Layer indices to apply LoRA for thermal modality. Overrides lora_layer_idxs if specified") parser.add_argument("--lora_layer_idxs_depth", nargs="+", type=int, help="Layer indices to apply LoRA for depth modality. Overrides lora_layer_idxs if specified") parser.add_argument("--lora_layer_idxs_imu", nargs="+", type=int, help="Layer indices to apply LoRA for imu modality. Overrides lora_layer_idxs if specified") parser.add_argument("--linear_probing", action="store_true", help="Freeze model and train the last layers of the head for each modality.") return parser.parse_args() if __name__ == "__main__": args = parse_args() # Create loggers loggers = [] for logger in args.loggers if args.loggers is not None else []: if logger == "wandb": wandb.init(project="imagebind", config=args) wandb_logger = pl_loggers.WandbLogger( save_dir=args.loggers_dir, name="imagebind") loggers.append(wandb_logger) elif logger == "tensorboard": tensorboard_logger = pl_loggers.TensorBoardLogger( save_dir=args.loggers_dir, name="imagebind") loggers.append(tensorboard_logger) elif logger == "comet": comet_logger = pl_loggers.CometLogger( save_dir=args.loggers_dir, api_key=os.environ["COMET_API_KEY"], workspace=os.environ["COMET_WORKSPACE"], project_name=os.environ["COMET_PROJECT_NAME"], experiment_name=os.environ.get("COMET_EXPERIMENT_NAME", None), ) loggers.append(comet_logger) elif logger == "mlflow": mlflow_logger = pl_loggers.MLFlowLogger( save_dir=args.loggers_dir, experiment_name=os.environ["MLFLOW_EXPERIMENT_NAME"], tracking_uri=os.environ["MLFLOW_TRACKING_URI"], run_name="imagebind" ) loggers.append(mlflow_logger) else: raise ValueError(f"Unknown logger: {logger}") # Set experiment properties seed_everything(args.seed, workers=True) torch.backends.cudnn.determinstic = True device_name = args.device # "cuda:0" if torch.cuda.is_available() else "cpu" device = torch.device(device_name) contrast_transforms = transforms.Compose( [ transforms.RandomHorizontalFlip(), transforms.RandomResizedCrop(size=224), transforms.RandomApply([transforms.ColorJitter(brightness=0.5, contrast=0.5, saturation=0.5, hue=0.1)], p=0.8), transforms.RandomGrayscale(p=0.2), transforms.GaussianBlur(kernel_size=9), transforms.ToTensor(), transforms.Normalize( mean=(0.48145466, 0.4578275, 0.40821073), std=(0.26862954, 0.26130258, 0.27577711), ), ] ) train_datasets = [] test_datasets = [] # Load datasets if "dreambooth" in args.datasets: from datasets.dreambooth import DreamBoothDataset train_datasets.append(DreamBoothDataset( root_dir=os.path.join(args.datasets_dir, "dreambooth", "dataset"), split="train", transform=ContrastiveTransformations(contrast_transforms, n_views=2 if args.self_contrast else 1))) test_datasets.append(DreamBoothDataset( root_dir=os.path.join(args.datasets_dir, "dreambooth", "dataset"), split="test", transform=ContrastiveTransformations(contrast_transforms, n_views=2 if args.self_contrast else 1))) if len(args.datasets) == 1: train_dataset = train_datasets[0] test_dataset = test_datasets[0] else: train_dataset = ConcatDataset(train_datasets) test_dataset = ConcatDataset(test_datasets) train_loader = DataLoader( train_dataset, batch_size=args.batch_size, shuffle=True, drop_last=True, pin_memory=False, num_workers=args.num_workers, ) val_loader = DataLoader( test_dataset, batch_size=args.batch_size, shuffle=False, drop_last=False, pin_memory=False, num_workers=args.num_workers, ) # Visualize some examples if not args.headless: NUM_IMAGES = args.batch_size imgs = [torch.stack(train_dataset[idx][0], dim=0) for idx in range(NUM_IMAGES)] imgs = torch.stack(imgs, dim=0) img_grid = torchvision.utils.make_grid(imgs.reshape(-1, *imgs.shape[2:]), nrow=6, normalize=True, pad_value=0.9) img_grid = img_grid.permute(1, 2, 0) plt.figure(figsize=(10, 5)) plt.title(f"Augmented image examples of the available datasets: {args.datasets}") plt.imshow(img_grid.cpu()) plt.axis("off") plt.show() plt.close() # Parse indices of layers to apply LoRA lora_layer_idxs = {} lora_modality_names = [] modalities = ["vision", "text", "audio", "thermal", "depth", "imu"] for modality_name in args.lora_modality_names: if modality_name in modalities: modality_type = getattr(ModalityType, modality_name.upper()) lora_layer_idxs[modality_type] = getattr(args, f'lora_layer_idxs_{modality_name}', None) if not lora_layer_idxs[modality_type]: lora_layer_idxs[modality_type] = None lora_modality_names.append(modality_type) else: raise ValueError(f"Unknown modality name: {modality_name}") # Train dataset model = ImageBindTrain(max_epochs=args.max_epochs, batch_size=args.batch_size, lr=args.lr, weight_decay=args.weight_decay, momentum_betas=args.momentum_betas, temperature=args.temperature, num_workers=args.num_workers, self_contrast=args.self_contrast, lora=args.lora, lora_rank=args.lora_rank, lora_checkpoint_dir=args.lora_checkpoint_dir, lora_layer_idxs=lora_layer_idxs if lora_layer_idxs else None, lora_modality_names=lora_modality_names if lora_modality_names else None, linear_probing=args.linear_probing) if args.full_model_checkpointing: checkpointing = {"enable_checkpointing": args.full_model_checkpointing, "callbacks": [ModelCheckpoint(monitor="val_loss", dirpath=args.full_model_checkpoint_dir, filename="imagebind-{epoch:02d}-{val_loss:.2f}", save_last=True, mode="min")]} else: checkpointing = {"enable_checkpointing": args.full_model_checkpointing,} trainer = Trainer(accelerator="gpu" if "cuda" in device_name else "cpu", devices=1 if ":" not in device_name else [int(device_name.split(":")[1])], deterministic=True, max_epochs=args.max_epochs, gradient_clip_val=args.gradient_clip_val, logger=loggers if loggers else None, **checkpointing) trainer.fit(model, train_loader, val_loader)
Pegasus-master
ImageBind-LoRA/train.py
#!/usr/bin/env python3 # Portions Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import logging import math import torch import torch.nn as nn import torchaudio from PIL import Image from pytorchvideo import transforms as pv_transforms from pytorchvideo.data.clip_sampling import ConstantClipsPerVideoSampler from pytorchvideo.data.encoded_video import EncodedVideo from torchvision import transforms from torchvision.transforms._transforms_video import NormalizeVideo from models.multimodal_preprocessors import SimpleTokenizer DEFAULT_AUDIO_FRAME_SHIFT_MS = 10 # in milliseconds BPE_PATH = "bpe/bpe_simple_vocab_16e6.txt.gz" def waveform2melspec(waveform, sample_rate, num_mel_bins, target_length): # Based on https://github.com/YuanGongND/ast/blob/d7d8b4b8e06cdaeb6c843cdb38794c1c7692234c/src/dataloader.py#L102 waveform -= waveform.mean() fbank = torchaudio.compliance.kaldi.fbank( waveform, htk_compat=True, sample_frequency=sample_rate, use_energy=False, window_type="hanning", num_mel_bins=num_mel_bins, dither=0.0, frame_length=25, frame_shift=DEFAULT_AUDIO_FRAME_SHIFT_MS, ) # Convert to [mel_bins, num_frames] shape fbank = fbank.transpose(0, 1) # Pad to target_length n_frames = fbank.size(1) p = target_length - n_frames # if p is too large (say >20%), flash a warning if abs(p) / n_frames > 0.2: logging.warning( "Large gap between audio n_frames(%d) and " "target_length (%d). Is the audio_target_length " "setting correct?", n_frames, target_length, ) # cut and pad if p > 0: fbank = torch.nn.functional.pad(fbank, (0, p), mode="constant", value=0) elif p < 0: fbank = fbank[:, 0:target_length] # Convert to [1, mel_bins, num_frames] shape, essentially like a 1 # channel image fbank = fbank.unsqueeze(0) return fbank def get_clip_timepoints(clip_sampler, duration): # Read out all clips in this video all_clips_timepoints = [] is_last_clip = False end = 0.0 while not is_last_clip: start, end, _, _, is_last_clip = clip_sampler(end, duration, annotation=None) all_clips_timepoints.append((start, end)) return all_clips_timepoints def load_and_transform_vision_data(image_paths, device, to_tensor=True): if image_paths is None: return None image_ouputs = [] for image_path in image_paths: if to_tensor: data_transform = transforms.Compose( [ transforms.Resize( 224, interpolation=transforms.InterpolationMode.BICUBIC ), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize( mean=(0.48145466, 0.4578275, 0.40821073), std=(0.26862954, 0.26130258, 0.27577711), ), ] ) else: data_transform = transforms.Compose( [ transforms.Resize( 224, interpolation=transforms.InterpolationMode.BICUBIC ), transforms.CenterCrop(224) ] ) with open(image_path, "rb") as fopen: image = Image.open(fopen).convert("RGB") if to_tensor: image = data_transform(image).to(device) image_ouputs.append(image) else: image = data_transform(image) image_ouputs.append(image) return image_ouputs if not to_tensor else torch.stack(image_ouputs, dim=0) def load_and_transform_text(text, device): if text is None: return None tokenizer = SimpleTokenizer(bpe_path=BPE_PATH) tokens = [tokenizer(t).unsqueeze(0).to(device) for t in text] tokens = torch.cat(tokens, dim=0) return tokens def load_and_transform_audio_data( audio_paths, device, num_mel_bins=128, target_length=204, sample_rate=16000, clip_duration=2, clips_per_video=3, mean=-4.268, std=9.138, ): if audio_paths is None: return None audio_outputs = [] clip_sampler = ConstantClipsPerVideoSampler( clip_duration=clip_duration, clips_per_video=clips_per_video ) for audio_path in audio_paths: waveform, sr = torchaudio.load(audio_path) if sample_rate != sr: waveform = torchaudio.functional.resample( waveform, orig_freq=sr, new_freq=sample_rate ) all_clips_timepoints = get_clip_timepoints( clip_sampler, waveform.size(1) / sample_rate ) all_clips = [] for clip_timepoints in all_clips_timepoints: waveform_clip = waveform[ :, int(clip_timepoints[0] * sample_rate) : int( clip_timepoints[1] * sample_rate ), ] waveform_melspec = waveform2melspec( waveform_clip, sample_rate, num_mel_bins, target_length ) all_clips.append(waveform_melspec) normalize = transforms.Normalize(mean=mean, std=std) all_clips = [normalize(ac).to(device) for ac in all_clips] all_clips = torch.stack(all_clips, dim=0) audio_outputs.append(all_clips) return torch.stack(audio_outputs, dim=0) def crop_boxes(boxes, x_offset, y_offset): """ Perform crop on the bounding boxes given the offsets. Args: boxes (ndarray or None): bounding boxes to perform crop. The dimension is `num boxes` x 4. x_offset (int): cropping offset in the x axis. y_offset (int): cropping offset in the y axis. Returns: cropped_boxes (ndarray or None): the cropped boxes with dimension of `num boxes` x 4. """ cropped_boxes = boxes.copy() cropped_boxes[:, [0, 2]] = boxes[:, [0, 2]] - x_offset cropped_boxes[:, [1, 3]] = boxes[:, [1, 3]] - y_offset return cropped_boxes def uniform_crop(images, size, spatial_idx, boxes=None, scale_size=None): """ Perform uniform spatial sampling on the images and corresponding boxes. Args: images (tensor): images to perform uniform crop. The dimension is `num frames` x `channel` x `height` x `width`. size (int): size of height and weight to crop the images. spatial_idx (int): 0, 1, or 2 for left, center, and right crop if width is larger than height. Or 0, 1, or 2 for top, center, and bottom crop if height is larger than width. boxes (ndarray or None): optional. Corresponding boxes to images. Dimension is `num boxes` x 4. scale_size (int): optinal. If not None, resize the images to scale_size before performing any crop. Returns: cropped (tensor): images with dimension of `num frames` x `channel` x `size` x `size`. cropped_boxes (ndarray or None): the cropped boxes with dimension of `num boxes` x 4. """ assert spatial_idx in [0, 1, 2] ndim = len(images.shape) if ndim == 3: images = images.unsqueeze(0) height = images.shape[2] width = images.shape[3] if scale_size is not None: if width <= height: width, height = scale_size, int(height / width * scale_size) else: width, height = int(width / height * scale_size), scale_size images = torch.nn.functional.interpolate( images, size=(height, width), mode="bilinear", align_corners=False, ) y_offset = int(math.ceil((height - size) / 2)) x_offset = int(math.ceil((width - size) / 2)) if height > width: if spatial_idx == 0: y_offset = 0 elif spatial_idx == 2: y_offset = height - size else: if spatial_idx == 0: x_offset = 0 elif spatial_idx == 2: x_offset = width - size cropped = images[:, :, y_offset : y_offset + size, x_offset : x_offset + size] cropped_boxes = crop_boxes(boxes, x_offset, y_offset) if boxes is not None else None if ndim == 3: cropped = cropped.squeeze(0) return cropped, cropped_boxes class SpatialCrop(nn.Module): """ Convert the video into 3 smaller clips spatially. Must be used after the temporal crops to get spatial crops, and should be used with -2 in the spatial crop at the slowfast augmentation stage (so full frames are passed in here). Will return a larger list with the 3x spatial crops as well. """ def __init__(self, crop_size: int = 224, num_crops: int = 3): super().__init__() self.crop_size = crop_size if num_crops == 3: self.crops_to_ext = [0, 1, 2] self.flipped_crops_to_ext = [] elif num_crops == 1: self.crops_to_ext = [1] self.flipped_crops_to_ext = [] else: raise NotImplementedError("Nothing else supported yet") def forward(self, videos): """ Args: videos: A list of C, T, H, W videos. Returns: videos: A list with 3x the number of elements. Each video converted to C, T, H', W' by spatial cropping. """ assert isinstance(videos, list), "Must be a list of videos after temporal crops" assert all([video.ndim == 4 for video in videos]), "Must be (C,T,H,W)" res = [] for video in videos: for spatial_idx in self.crops_to_ext: res.append(uniform_crop(video, self.crop_size, spatial_idx)[0]) if not self.flipped_crops_to_ext: continue flipped_video = transforms.functional.hflip(video) for spatial_idx in self.flipped_crops_to_ext: res.append(uniform_crop(flipped_video, self.crop_size, spatial_idx)[0]) return res def load_and_transform_video_data( video_paths, device, clip_duration=2, clips_per_video=5, sample_rate=16000, ): if video_paths is None: return None video_outputs = [] video_transform = transforms.Compose( [ pv_transforms.ShortSideScale(224), NormalizeVideo( mean=(0.48145466, 0.4578275, 0.40821073), std=(0.26862954, 0.26130258, 0.27577711), ), ] ) clip_sampler = ConstantClipsPerVideoSampler( clip_duration=clip_duration, clips_per_video=clips_per_video ) frame_sampler = pv_transforms.UniformTemporalSubsample(num_samples=clip_duration) for video_path in video_paths: video = EncodedVideo.from_path( video_path, decoder="decord", decode_audio=False, **{"sample_rate": sample_rate}, ) all_clips_timepoints = get_clip_timepoints(clip_sampler, video.duration) all_video = [] for clip_timepoints in all_clips_timepoints: # Read the clip, get frames clip = video.get_clip(clip_timepoints[0], clip_timepoints[1]) if clip is None: raise ValueError("No clip found") video_clip = frame_sampler(clip["video"]) video_clip = video_clip / 255.0 # since this is float, need 0-1 all_video.append(video_clip) all_video = [video_transform(clip) for clip in all_video] all_video = SpatialCrop(224, num_crops=3)(all_video) all_video = torch.stack(all_video, dim=0) video_outputs.append(all_video) return torch.stack(video_outputs, dim=0).to(device)
Pegasus-master
ImageBind-LoRA/data.py
Pegasus-master
ImageBind-LoRA/datasets/__init__.py
import os from typing import Optional, Callable from sklearn.model_selection import train_test_split from torch.utils.data import Dataset from models.imagebind_model import ModalityType import data class DreamBoothDataset(Dataset): def __init__(self, root_dir: str, transform: Optional[Callable] = None, split: str = 'train', train_size: float = 0.8, random_seed: int = 42, device: str = 'cpu'): self.root_dir = root_dir self.transform = transform self.device = device self.classes = [d for d in os.listdir(root_dir) if os.path.isdir(os.path.join(root_dir, d))] self.class_to_idx = {cls: idx for idx, cls in enumerate(self.classes)} self.paths = [] for cls in self.classes: cls_dir = os.path.join(root_dir, cls) for filename in os.listdir(cls_dir): if filename.endswith('.jpg'): self.paths.append((os.path.join(cls_dir, filename), cls)) # Split dataset train_paths, test_paths = train_test_split(self.paths, train_size=train_size, random_state=random_seed) if split == 'train': self.paths = train_paths elif split == 'test': self.paths = test_paths else: raise ValueError(f"Invalid split argument. Expected 'train' or 'test', got {split}") def __len__(self): return len(self.paths) def __getitem__(self, index): img_path, class_text = self.paths[index] images = data.load_and_transform_vision_data([img_path], self.device, to_tensor=False) if self.transform is not None: image = images[0] images = self.transform(image) texts = data.load_and_transform_text([class_text], self.device) return images, ModalityType.VISION, texts, ModalityType.TEXT
Pegasus-master
ImageBind-LoRA/datasets/dreambooth.py
Pegasus-master
ImageBind-LoRA/models/__init__.py
#!/usr/bin/env python3 # Portions Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import logging import os from functools import partial from types import SimpleNamespace import torch import torch.nn as nn from models.helpers import (EinOpsRearrange, LearnableLogitScaling, Normalize, SelectElement, SelectEOSAndProject) from models.multimodal_preprocessors import (AudioPreprocessor, IMUPreprocessor, PadIm2Video, PatchEmbedGeneric, RGBDTPreprocessor, SpatioTemporalPosEmbeddingHelper, TextPreprocessor, ThermalPreprocessor) from models.transformer import MultiheadAttention, SimpleTransformer ModalityType = SimpleNamespace( VISION="vision", TEXT="text", AUDIO="audio", THERMAL="thermal", DEPTH="depth", IMU="imu", ) class ImageBindModel(nn.Module): def __init__( self, video_frames=2, kernel_size=(2, 14, 14), audio_kernel_size=16, audio_stride=10, out_embed_dim=768, vision_embed_dim=1024, vision_num_blocks=24, vision_num_heads=16, audio_embed_dim=768, audio_num_blocks=12, audio_num_heads=12, audio_num_mel_bins=128, audio_target_len=204, audio_drop_path=0.1, text_embed_dim=768, text_num_blocks=12, text_num_heads=12, depth_embed_dim=384, depth_kernel_size=16, depth_num_blocks=12, depth_num_heads=8, depth_drop_path=0.0, thermal_embed_dim=768, thermal_kernel_size=16, thermal_num_blocks=12, thermal_num_heads=12, thermal_drop_path=0.0, imu_embed_dim=512, imu_kernel_size=8, imu_num_blocks=6, imu_num_heads=8, imu_drop_path=0.7, ): super().__init__() self.modality_preprocessors = self._create_modality_preprocessors( video_frames, vision_embed_dim, kernel_size, text_embed_dim, audio_embed_dim, audio_kernel_size, audio_stride, audio_num_mel_bins, audio_target_len, depth_embed_dim, depth_kernel_size, thermal_embed_dim, thermal_kernel_size, imu_embed_dim, ) self.modality_trunks = self._create_modality_trunks( vision_embed_dim, vision_num_blocks, vision_num_heads, text_embed_dim, text_num_blocks, text_num_heads, audio_embed_dim, audio_num_blocks, audio_num_heads, audio_drop_path, depth_embed_dim, depth_num_blocks, depth_num_heads, depth_drop_path, thermal_embed_dim, thermal_num_blocks, thermal_num_heads, thermal_drop_path, imu_embed_dim, imu_num_blocks, imu_num_heads, imu_drop_path, ) self.modality_heads = self._create_modality_heads( out_embed_dim, vision_embed_dim, text_embed_dim, audio_embed_dim, depth_embed_dim, thermal_embed_dim, imu_embed_dim, ) self.modality_postprocessors = self._create_modality_postprocessors( out_embed_dim ) def _create_modality_preprocessors( self, video_frames=2, vision_embed_dim=1024, kernel_size=(2, 14, 14), text_embed_dim=768, audio_embed_dim=768, audio_kernel_size=16, audio_stride=10, audio_num_mel_bins=128, audio_target_len=204, depth_embed_dim=768, depth_kernel_size=16, thermal_embed_dim=768, thermal_kernel_size=16, imu_embed_dim=512, ): rgbt_stem = PatchEmbedGeneric( proj_stem=[ PadIm2Video(pad_type="repeat", ntimes=2), nn.Conv3d( in_channels=3, kernel_size=kernel_size, out_channels=vision_embed_dim, stride=kernel_size, bias=False, ), ] ) rgbt_preprocessor = RGBDTPreprocessor( img_size=[3, video_frames, 224, 224], num_cls_tokens=1, pos_embed_fn=partial(SpatioTemporalPosEmbeddingHelper, learnable=True), rgbt_stem=rgbt_stem, depth_stem=None, ) text_preprocessor = TextPreprocessor( context_length=77, vocab_size=49408, embed_dim=text_embed_dim, causal_masking=True, ) audio_stem = PatchEmbedGeneric( proj_stem=[ nn.Conv2d( in_channels=1, kernel_size=audio_kernel_size, stride=audio_stride, out_channels=audio_embed_dim, bias=False, ), ], norm_layer=nn.LayerNorm(normalized_shape=audio_embed_dim), ) audio_preprocessor = AudioPreprocessor( img_size=[1, audio_num_mel_bins, audio_target_len], num_cls_tokens=1, pos_embed_fn=partial(SpatioTemporalPosEmbeddingHelper, learnable=True), audio_stem=audio_stem, ) depth_stem = PatchEmbedGeneric( [ nn.Conv2d( kernel_size=depth_kernel_size, in_channels=1, out_channels=depth_embed_dim, stride=depth_kernel_size, bias=False, ), ], norm_layer=nn.LayerNorm(normalized_shape=depth_embed_dim), ) depth_preprocessor = RGBDTPreprocessor( img_size=[1, 224, 224], num_cls_tokens=1, pos_embed_fn=partial(SpatioTemporalPosEmbeddingHelper, learnable=True), rgbt_stem=None, depth_stem=depth_stem, ) thermal_stem = PatchEmbedGeneric( [ nn.Conv2d( kernel_size=thermal_kernel_size, in_channels=1, out_channels=thermal_embed_dim, stride=thermal_kernel_size, bias=False, ), ], norm_layer=nn.LayerNorm(normalized_shape=thermal_embed_dim), ) thermal_preprocessor = ThermalPreprocessor( img_size=[1, 224, 224], num_cls_tokens=1, pos_embed_fn=partial(SpatioTemporalPosEmbeddingHelper, learnable=True), thermal_stem=thermal_stem, ) imu_stem = PatchEmbedGeneric( [ nn.Linear( in_features=48, out_features=imu_embed_dim, bias=False, ), ], norm_layer=nn.LayerNorm(normalized_shape=imu_embed_dim), ) imu_preprocessor = IMUPreprocessor( img_size=[6, 2000], num_cls_tokens=1, kernel_size=8, embed_dim=imu_embed_dim, pos_embed_fn=partial(SpatioTemporalPosEmbeddingHelper, learnable=True), imu_stem=imu_stem, ) modality_preprocessors = { ModalityType.VISION: rgbt_preprocessor, ModalityType.TEXT: text_preprocessor, ModalityType.AUDIO: audio_preprocessor, ModalityType.DEPTH: depth_preprocessor, ModalityType.THERMAL: thermal_preprocessor, ModalityType.IMU: imu_preprocessor, } return nn.ModuleDict(modality_preprocessors) def _create_modality_trunks( self, vision_embed_dim=1024, vision_num_blocks=24, vision_num_heads=16, text_embed_dim=768, text_num_blocks=12, text_num_heads=12, audio_embed_dim=768, audio_num_blocks=12, audio_num_heads=12, audio_drop_path=0.0, depth_embed_dim=768, depth_num_blocks=12, depth_num_heads=12, depth_drop_path=0.0, thermal_embed_dim=768, thermal_num_blocks=12, thermal_num_heads=12, thermal_drop_path=0.0, imu_embed_dim=512, imu_num_blocks=6, imu_num_heads=8, imu_drop_path=0.7, ): def instantiate_trunk( embed_dim, num_blocks, num_heads, pre_transformer_ln, add_bias_kv, drop_path ): return SimpleTransformer( embed_dim=embed_dim, num_blocks=num_blocks, ffn_dropout_rate=0.0, drop_path_rate=drop_path, attn_target=partial( #v1 MultiheadAttention, embed_dim=embed_dim, num_heads=num_heads, bias=True, add_bias_kv=add_bias_kv, #v2 flash attention blocksparse # FlashBlockSparseMHA( # embed_dim=embed_dim, # num_heads=num_heads, # max_seq_length = 8192, # attn_mask=attn_mask, # add_bias_kv=add_bias_kv, # ) ), pre_transformer_layer=nn.Sequential( nn.LayerNorm(embed_dim, eps=1e-6) if pre_transformer_ln else nn.Identity(), EinOpsRearrange("b l d -> l b d"), ), post_transformer_layer=EinOpsRearrange("l b d -> b l d"), ) modality_trunks = {} modality_trunks[ModalityType.VISION] = instantiate_trunk( vision_embed_dim, vision_num_blocks, vision_num_heads, pre_transformer_ln=True, add_bias_kv=False, drop_path=0.0, ) modality_trunks[ModalityType.TEXT] = instantiate_trunk( text_embed_dim, text_num_blocks, text_num_heads, pre_transformer_ln=False, add_bias_kv=False, drop_path=0.0, ) modality_trunks[ModalityType.AUDIO] = instantiate_trunk( audio_embed_dim, audio_num_blocks, audio_num_heads, pre_transformer_ln=False, add_bias_kv=True, drop_path=audio_drop_path, ) modality_trunks[ModalityType.DEPTH] = instantiate_trunk( depth_embed_dim, depth_num_blocks, depth_num_heads, pre_transformer_ln=False, add_bias_kv=True, drop_path=depth_drop_path, ) modality_trunks[ModalityType.THERMAL] = instantiate_trunk( thermal_embed_dim, thermal_num_blocks, thermal_num_heads, pre_transformer_ln=False, add_bias_kv=True, drop_path=thermal_drop_path, ) modality_trunks[ModalityType.IMU] = instantiate_trunk( imu_embed_dim, imu_num_blocks, imu_num_heads, pre_transformer_ln=False, add_bias_kv=True, drop_path=imu_drop_path, ) return nn.ModuleDict(modality_trunks) def _create_modality_heads( self, out_embed_dim, vision_embed_dim, text_embed_dim, audio_embed_dim, depth_embed_dim, thermal_embed_dim, imu_embed_dim, ): modality_heads = {} modality_heads[ModalityType.VISION] = nn.Sequential( nn.LayerNorm(normalized_shape=vision_embed_dim, eps=1e-6), SelectElement(index=0), nn.Linear(vision_embed_dim, out_embed_dim, bias=False), ) modality_heads[ModalityType.TEXT] = SelectEOSAndProject( proj=nn.Sequential( nn.LayerNorm(normalized_shape=text_embed_dim, eps=1e-6), nn.Linear(text_embed_dim, out_embed_dim, bias=False), ) ) modality_heads[ModalityType.AUDIO] = nn.Sequential( nn.LayerNorm(normalized_shape=audio_embed_dim, eps=1e-6), SelectElement(index=0), nn.Linear(audio_embed_dim, out_embed_dim, bias=False), ) modality_heads[ModalityType.DEPTH] = nn.Sequential( nn.LayerNorm(normalized_shape=depth_embed_dim, eps=1e-6), SelectElement(index=0), nn.Linear(depth_embed_dim, out_embed_dim, bias=False), ) modality_heads[ModalityType.THERMAL] = nn.Sequential( nn.LayerNorm(normalized_shape=thermal_embed_dim, eps=1e-6), SelectElement(index=0), nn.Linear(thermal_embed_dim, out_embed_dim, bias=False), ) modality_heads[ModalityType.IMU] = nn.Sequential( nn.LayerNorm(normalized_shape=imu_embed_dim, eps=1e-6), SelectElement(index=0), nn.Dropout(p=0.5), nn.Linear(imu_embed_dim, out_embed_dim, bias=False), ) return nn.ModuleDict(modality_heads) def _create_modality_postprocessors(self, out_embed_dim): modality_postprocessors = {} modality_postprocessors[ModalityType.VISION] = Normalize(dim=-1) modality_postprocessors[ModalityType.TEXT] = nn.Sequential( Normalize(dim=-1), LearnableLogitScaling(learnable=True) ) modality_postprocessors[ModalityType.AUDIO] = nn.Sequential( Normalize(dim=-1), LearnableLogitScaling(logit_scale_init=20.0, learnable=False), ) modality_postprocessors[ModalityType.DEPTH] = nn.Sequential( Normalize(dim=-1), LearnableLogitScaling(logit_scale_init=5.0, learnable=False), ) modality_postprocessors[ModalityType.THERMAL] = nn.Sequential( Normalize(dim=-1), LearnableLogitScaling(logit_scale_init=10.0, learnable=False), ) modality_postprocessors[ModalityType.IMU] = nn.Sequential( Normalize(dim=-1), LearnableLogitScaling(logit_scale_init=5.0, learnable=False), ) return nn.ModuleDict(modality_postprocessors) def forward(self, inputs): outputs = {} for modality_key, modality_value in inputs.items(): reduce_list = ( modality_value.ndim >= 5 ) # Audio and Video inputs consist of multiple clips if reduce_list: B, S = modality_value.shape[:2] modality_value = modality_value.reshape( B * S, *modality_value.shape[2:] ) if modality_value is not None: modality_value = self.modality_preprocessors[modality_key]( **{modality_key: modality_value} ) trunk_inputs = modality_value["trunk"] head_inputs = modality_value["head"] modality_value = self.modality_trunks[modality_key](**trunk_inputs) modality_value = self.modality_heads[modality_key]( modality_value, **head_inputs ) modality_value = self.modality_postprocessors[modality_key]( modality_value ) if reduce_list: modality_value = modality_value.reshape(B, S, -1) modality_value = modality_value.mean(dim=1) outputs[modality_key] = modality_value return outputs def imagebind_huge(pretrained=False): model = ImageBindModel( vision_embed_dim=1280, vision_num_blocks=32, vision_num_heads=16, text_embed_dim=1024, text_num_blocks=24, text_num_heads=16, out_embed_dim=1024, audio_drop_path=0.1, imu_drop_path=0.7, ) if pretrained: if not os.path.exists(".checkpoints/imagebind_huge.pth"): print( "Downloading imagebind weights to .checkpoints/imagebind_huge.pth ..." ) os.makedirs(".checkpoints", exist_ok=True) torch.hub.download_url_to_file( "https://dl.fbaipublicfiles.com/imagebind/imagebind_huge.pth", ".checkpoints/imagebind_huge.pth", progress=True, ) model.load_state_dict(torch.load(".checkpoints/imagebind_huge.pth")) return model def save_module(module_dict: nn.ModuleDict, module_name: str = "", checkpoint_dir: str = "./.checkpoints/full", postfix: str = "_last", extension: str = "pth"): try: torch.save(module_dict.state_dict(), os.path.join(checkpoint_dir, f"imagebind-{module_name}{postfix}.{extension}")) logging.info(f"Saved parameters for module {module_name} to {checkpoint_dir}.") except FileNotFoundError: logging.warning(f"Could not save module parameters for {module_name} to {checkpoint_dir}.") def load_module(module_dict: nn.ModuleDict, module_name: str = "", checkpoint_dir: str = "./.checkpoints/full", postfix: str = "_last", extension: str = "pth"): try: module_dict.load_state_dict(torch.load( os.path.join(checkpoint_dir, f"imagebind-{module_name}{postfix}.{extension}")), strict=False) logging.info(f"Loaded parameters for module {module_name} from {checkpoint_dir}.") except FileNotFoundError: logging.warning(f"Could not load module parameters for {module_name} from {checkpoint_dir}.")
Pegasus-master
ImageBind-LoRA/models/imagebind_model.py
#!/usr/bin/env python3 # Portions Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # Code modified from # https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py ; # https://github.com/facebookresearch/deit/blob/main/models.py # and https://github.com/facebookresearch/vissl/blob/main/vissl/models/trunks/vision_transformer.py from functools import partial from typing import Callable, List, Optional import torch import torch.nn as nn import torch.utils.checkpoint as checkpoint from timm.models.layers import DropPath, trunc_normal_ from flash_attn.flash_blocksparse_attention import FlashBlocksparseMHA class Attention(nn.Module): def __init__( self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.0, proj_drop=0.0, ): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads # NOTE scale factor was wrong in my original version, # can set manually to be compat with prev weights self.scale = qk_scale or head_dim**-0.5 self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x): B, N, C = x.shape qkv = ( self.qkv(x) .reshape(B, N, 3, self.num_heads, C // self.num_heads) .permute(2, 0, 3, 1, 4) ) q, k, v = ( qkv[0], qkv[1], qkv[2], ) # make torchscript happy (cannot use tensor as tuple) attn = (q @ k.transpose(-2, -1)) * self.scale attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x class Mlp(nn.Module): def __init__( self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0, ): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) self.act = act_layer() self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, x): x = self.fc1(x) x = self.act(x) x = self.drop(x) x = self.fc2(x) x = self.drop(x) return x #v1 class MultiheadAttention(nn.MultiheadAttention): def forward(self, x: torch.Tensor, attn_mask: torch.Tensor): return super().forward(x, x, x, need_weights=False, attn_mask=attn_mask)[0] class MultiheadAttention(FlashBlocksparseMHA): def forward(self, x: torch.Tensor, attn_mask: torch.Tensor): return super().forward(x, x, x, need_weights=False, attn_mask=attn_mask)[0] #v3 class MultiheadAttention(nn.Module): def __init__(self, embed_dim, num_heads, sparsity_config, bias=True, batch_first=True, attention_dropout=0.0, causal=False, max_seq_length=2048, **kwargs): super().__init__() self.flashblocksparse_mha = FlashBlocksparseMHA(embed_dim, num_heads, sparsity_config, bias=bias, batch_first=batch_first, attention_dropout=attention_dropout, causal=causal, max_seq_length=max_seq_length, **kwargs) def forward(self, x: torch.Tensor, attn_mask: torch.Tensor): # FlashBlocksparseMHA uses the same input for query, key, value, and attn_mask return self.flashblocksparse_mha(x, x, x, attn_mask=attn_mask, key_padding_mask=None, need_weights=False)[0] class ViTAttention(Attention): def forward(self, x: torch.Tensor, attn_mask: torch.Tensor): assert attn_mask is None return super().forward(x) class BlockWithMasking(nn.Module): def __init__( self, dim: int, attn_target: Callable, mlp_ratio: int = 4, act_layer: Callable = nn.GELU, norm_layer: Callable = nn.LayerNorm, ffn_dropout_rate: float = 0.0, drop_path: float = 0.0, layer_scale_type: Optional[str] = None, layer_scale_init_value: float = 1e-4, ): super().__init__() assert not isinstance( attn_target, nn.Module ), "attn_target should be a Callable. Otherwise attn_target is shared across blocks!" self.attn = attn_target() if drop_path > 0.0: self.drop_path = DropPath(drop_path) else: self.drop_path = nn.Identity() self.norm_1 = norm_layer(dim) mlp_hidden_dim = int(mlp_ratio * dim) self.mlp = Mlp( in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=ffn_dropout_rate, ) self.norm_2 = norm_layer(dim) self.layer_scale_type = layer_scale_type if self.layer_scale_type is not None: assert self.layer_scale_type in [ "per_channel", "scalar", ], f"Found Layer scale type {self.layer_scale_type}" if self.layer_scale_type == "per_channel": # one gamma value per channel gamma_shape = [1, 1, dim] elif self.layer_scale_type == "scalar": # single gamma value for all channels gamma_shape = [1, 1, 1] # two gammas: for each part of the fwd in the encoder self.layer_scale_gamma1 = nn.Parameter( torch.ones(size=gamma_shape) * layer_scale_init_value, requires_grad=True, ) self.layer_scale_gamma2 = nn.Parameter( torch.ones(size=gamma_shape) * layer_scale_init_value, requires_grad=True, ) def forward(self, x: torch.Tensor, attn_mask: torch.Tensor): if self.layer_scale_type is None: x = x + self.drop_path(self.attn(self.norm_1(x), attn_mask)) x = x + self.drop_path(self.mlp(self.norm_2(x))) else: x = ( x + self.drop_path(self.attn(self.norm_1(x), attn_mask)) * self.layer_scale_gamma1 ) x = x + self.drop_path(self.mlp(self.norm_2(x))) * self.layer_scale_gamma2 return x _LAYER_NORM = partial(nn.LayerNorm, eps=1e-6) class SimpleTransformer(nn.Module): def __init__( self, attn_target: Callable, embed_dim: int, num_blocks: int, block: Callable = BlockWithMasking, pre_transformer_layer: Optional[Callable] = None, post_transformer_layer: Optional[Callable] = None, drop_path_rate: float = 0.0, drop_path_type: str = "progressive", norm_layer: Callable = _LAYER_NORM, mlp_ratio: int = 4, ffn_dropout_rate: float = 0.0, layer_scale_type: Optional[str] = None, # from cait; possible values are None, "per_channel", "scalar" layer_scale_init_value: float = 1e-4, # from cait; float weight_init_style: str = "jax", # possible values jax or pytorch ): """ Simple Transformer with the following features 1. Supports masked attention 2. Supports DropPath 3. Supports LayerScale 4. Supports Dropout in Attention and FFN 5. Makes few assumptions about the input except that it is a Tensor """ super().__init__() self.pre_transformer_layer = pre_transformer_layer if drop_path_type == "progressive": dpr = [x.item() for x in torch.linspace(0, drop_path_rate, num_blocks)] elif drop_path_type == "uniform": dpr = [drop_path_rate for i in range(num_blocks)] else: raise ValueError(f"Unknown drop_path_type: {drop_path_type}") self.blocks = nn.Sequential( *[ block( dim=embed_dim, attn_target=attn_target, mlp_ratio=mlp_ratio, ffn_dropout_rate=ffn_dropout_rate, drop_path=dpr[i], norm_layer=norm_layer, layer_scale_type=layer_scale_type, layer_scale_init_value=layer_scale_init_value, ) for i in range(num_blocks) ] ) self.post_transformer_layer = post_transformer_layer self.weight_init_style = weight_init_style self.apply(self._init_weights) def _init_weights(self, m): if isinstance(m, nn.Linear): if self.weight_init_style == "jax": # Based on MAE and official Jax ViT implementation torch.nn.init.xavier_uniform_(m.weight) elif self.weight_init_style == "pytorch": # PyTorch ViT uses trunc_normal_ trunc_normal_(m.weight, std=0.02) if m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, (nn.LayerNorm)): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) def forward( self, tokens: torch.Tensor, attn_mask: torch.Tensor = None, use_checkpoint: bool = False, checkpoint_every_n: int = 1, checkpoint_blk_ids: Optional[List[int]] = None, ): """ Inputs - tokens: data of shape N x L x D (or L x N x D depending on the attention implementation) - attn: mask of shape L x L Output - x: data of shape N x L x D (or L x N x D depending on the attention implementation) """ if self.pre_transformer_layer: tokens = self.pre_transformer_layer(tokens) if use_checkpoint and checkpoint_blk_ids is None: checkpoint_blk_ids = [ blk_id for blk_id in range(len(self.blocks)) if blk_id % checkpoint_every_n == 0 ] if checkpoint_blk_ids: checkpoint_blk_ids = set(checkpoint_blk_ids) for blk_id, blk in enumerate(self.blocks): if use_checkpoint and blk_id in checkpoint_blk_ids: tokens = checkpoint.checkpoint( blk, tokens, attn_mask, use_reentrant=False ) else: tokens = blk(tokens, attn_mask=attn_mask) if self.post_transformer_layer: tokens = self.post_transformer_layer(tokens) return tokens
Pegasus-master
ImageBind-LoRA/models/transformer.py
#!/usr/bin/env python3 # Portions Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import gzip import html import io import math from functools import lru_cache from typing import Callable, List, Optional, Tuple import ftfy import numpy as np import regex as re import torch import torch.nn as nn from iopath.common.file_io import g_pathmgr from timm.models.layers import trunc_normal_ from models.helpers import VerboseNNModule, cast_if_src_dtype def get_sinusoid_encoding_table(n_position, d_hid): """Sinusoid position encoding table""" # TODO: make it with torch instead of numpy def get_position_angle_vec(position): return [ position / np.power(10000, 2 * (hid_j // 2) / d_hid) for hid_j in range(d_hid) ] sinusoid_table = np.array( [get_position_angle_vec(pos_i) for pos_i in range(n_position)] ) sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1 return torch.FloatTensor(sinusoid_table).unsqueeze(0) def interpolate_pos_encoding_2d(target_spatial_size, pos_embed): N = pos_embed.shape[1] if N == target_spatial_size: return pos_embed dim = pos_embed.shape[-1] # nn.functional.interpolate doesn't work with bfloat16 so we cast to float32 pos_embed, updated = cast_if_src_dtype(pos_embed, torch.bfloat16, torch.float32) pos_embed = nn.functional.interpolate( pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute( 0, 3, 1, 2 ), scale_factor=math.sqrt(target_spatial_size / N), mode="bicubic", ) if updated: pos_embed, _ = cast_if_src_dtype(pos_embed, torch.float32, torch.bfloat16) pos_embed = pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) return pos_embed def interpolate_pos_encoding( npatch_per_img, pos_embed, patches_layout, input_shape=None, first_patch_idx=1, ): assert first_patch_idx == 0 or first_patch_idx == 1, "there is 1 CLS token or none" N = pos_embed.shape[1] - first_patch_idx # since it's 1 if cls_token exists if npatch_per_img == N: return pos_embed assert ( patches_layout[-1] == patches_layout[-2] ), "Interpolation of pos embed not supported for non-square layouts" class_emb = pos_embed[:, :first_patch_idx] pos_embed = pos_embed[:, first_patch_idx:] if input_shape is None or patches_layout[0] == 1: # simple 2D pos embedding, no temporal component pos_embed = interpolate_pos_encoding_2d(npatch_per_img, pos_embed) elif patches_layout[0] > 1: # pos embed has a temporal component assert len(input_shape) == 4, "temporal interpolation not supported" # we only support 2D interpolation in this case num_frames = patches_layout[0] num_spatial_tokens = patches_layout[1] * patches_layout[2] pos_embed = pos_embed.view(1, num_frames, num_spatial_tokens, -1) # interpolate embedding for zeroth frame pos_embed = interpolate_pos_encoding_2d( npatch_per_img, pos_embed[0, 0, ...].unsqueeze(0) ) else: raise ValueError("This type of interpolation isn't implemented") return torch.cat((class_emb, pos_embed), dim=1) def _get_pos_embedding( npatch_per_img, pos_embed, patches_layout, input_shape, first_patch_idx=1, ): pos_embed = interpolate_pos_encoding( npatch_per_img, pos_embed, patches_layout, input_shape=input_shape, first_patch_idx=first_patch_idx, ) return pos_embed class PatchEmbedGeneric(nn.Module): """ PatchEmbed from Hydra """ def __init__(self, proj_stem, norm_layer: Optional[nn.Module] = None): super().__init__() if len(proj_stem) > 1: self.proj = nn.Sequential(*proj_stem) else: # Special case to be able to load pre-trained models that were # trained with a standard stem self.proj = proj_stem[0] self.norm_layer = norm_layer def get_patch_layout(self, img_size): with torch.no_grad(): dummy_img = torch.zeros( [ 1, ] + img_size ) dummy_out = self.proj(dummy_img) embed_dim = dummy_out.shape[1] patches_layout = tuple(dummy_out.shape[2:]) num_patches = np.prod(patches_layout) return patches_layout, num_patches, embed_dim def forward(self, x): x = self.proj(x) # B C (T) H W -> B (T)HW C x = x.flatten(2).transpose(1, 2) if self.norm_layer is not None: x = self.norm_layer(x) return x class SpatioTemporalPosEmbeddingHelper(VerboseNNModule): def __init__( self, patches_layout: List, num_patches: int, num_cls_tokens: int, embed_dim: int, learnable: bool, ) -> None: super().__init__() self.num_cls_tokens = num_cls_tokens self.patches_layout = patches_layout self.num_patches = num_patches self.num_tokens = num_cls_tokens + num_patches self.learnable = learnable if self.learnable: self.pos_embed = nn.Parameter(torch.zeros(1, self.num_tokens, embed_dim)) trunc_normal_(self.pos_embed, std=0.02) else: self.register_buffer( "pos_embed", get_sinusoid_encoding_table(self.num_tokens, embed_dim) ) def get_pos_embedding(self, vision_input, all_vision_tokens): input_shape = vision_input.shape pos_embed = _get_pos_embedding( all_vision_tokens.size(1) - self.num_cls_tokens, pos_embed=self.pos_embed, patches_layout=self.patches_layout, input_shape=input_shape, first_patch_idx=self.num_cls_tokens, ) return pos_embed class RGBDTPreprocessor(VerboseNNModule): def __init__( self, rgbt_stem: PatchEmbedGeneric, depth_stem: Optional[PatchEmbedGeneric], img_size: Tuple = (3, 224, 224), num_cls_tokens: int = 1, pos_embed_fn: Optional[Callable] = None, use_type_embed: bool = False, init_param_style: str = "openclip", ) -> None: super().__init__() stem = rgbt_stem if rgbt_stem is not None else depth_stem ( self.patches_layout, self.num_patches, self.embed_dim, ) = stem.get_patch_layout(img_size) self.rgbt_stem = rgbt_stem self.depth_stem = depth_stem self.use_pos_embed = pos_embed_fn is not None self.use_type_embed = use_type_embed self.num_cls_tokens = num_cls_tokens if self.use_pos_embed: self.pos_embedding_helper = pos_embed_fn( patches_layout=self.patches_layout, num_cls_tokens=num_cls_tokens, num_patches=self.num_patches, embed_dim=self.embed_dim, ) if self.num_cls_tokens > 0: self.cls_token = nn.Parameter( torch.zeros(1, self.num_cls_tokens, self.embed_dim) ) if self.use_type_embed: self.type_embed = nn.Parameter(torch.zeros(1, 1, self.embed_dim)) self.init_parameters(init_param_style) @torch.no_grad() def init_parameters(self, init_param_style): if init_param_style == "openclip": # OpenCLIP style initialization scale = self.embed_dim**-0.5 if self.use_pos_embed: nn.init.normal_(self.pos_embedding_helper.pos_embed) self.pos_embedding_helper.pos_embed *= scale if self.num_cls_tokens > 0: nn.init.normal_(self.cls_token) self.cls_token *= scale elif init_param_style == "vit": self.cls_token.data.fill_(0) else: raise ValueError(f"Unknown init {init_param_style}") if self.use_type_embed: nn.init.normal_(self.type_embed) def tokenize_input_and_cls_pos(self, input, stem, mask): # tokens is of shape B x L x D tokens = stem(input) assert tokens.ndim == 3 assert tokens.shape[2] == self.embed_dim B = tokens.shape[0] if self.num_cls_tokens > 0: class_tokens = self.cls_token.expand( B, -1, -1 ) # stole class_tokens impl from Phil Wang, thanks tokens = torch.cat((class_tokens, tokens), dim=1) if self.use_pos_embed: pos_embed = self.pos_embedding_helper.get_pos_embedding(input, tokens) tokens = tokens + pos_embed if self.use_type_embed: tokens = tokens + self.type_embed.expand(B, -1, -1) return tokens def forward(self, vision=None, depth=None, patch_mask=None): if patch_mask is not None: raise NotImplementedError() if vision is not None: vision_tokens = self.tokenize_input_and_cls_pos( vision, self.rgbt_stem, patch_mask ) if depth is not None: depth_tokens = self.tokenize_input_and_cls_pos( depth, self.depth_stem, patch_mask ) # aggregate tokens if vision is not None and depth is not None: final_tokens = vision_tokens + depth_tokens else: final_tokens = vision_tokens if vision is not None else depth_tokens return_dict = { "trunk": { "tokens": final_tokens, }, "head": {}, } return return_dict class AudioPreprocessor(RGBDTPreprocessor): def __init__(self, audio_stem: PatchEmbedGeneric, **kwargs) -> None: super().__init__(rgbt_stem=audio_stem, depth_stem=None, **kwargs) def forward(self, audio=None): return super().forward(vision=audio) class ThermalPreprocessor(RGBDTPreprocessor): def __init__(self, thermal_stem: PatchEmbedGeneric, **kwargs) -> None: super().__init__(rgbt_stem=thermal_stem, depth_stem=None, **kwargs) def forward(self, thermal=None): return super().forward(vision=thermal) def build_causal_attention_mask(context_length): # lazily create causal attention mask, with full attention between the vision tokens # pytorch uses additive attention mask; fill with -inf mask = torch.empty(context_length, context_length, requires_grad=False) mask.fill_(float("-inf")) mask.triu_(1) # zero out the lower diagonal return mask class TextPreprocessor(VerboseNNModule): def __init__( self, vocab_size: int, context_length: int, embed_dim: int, causal_masking: bool, supply_seq_len_to_head: bool = True, num_cls_tokens: int = 0, init_param_style: str = "openclip", ) -> None: super().__init__() self.vocab_size = vocab_size self.context_length = context_length self.token_embedding = nn.Embedding(vocab_size, embed_dim) self.pos_embed = nn.Parameter( torch.empty(1, self.context_length + num_cls_tokens, embed_dim) ) self.causal_masking = causal_masking if self.causal_masking: mask = build_causal_attention_mask(self.context_length) # register the mask as a buffer so it can be moved to the right device self.register_buffer("mask", mask) self.supply_seq_len_to_head = supply_seq_len_to_head self.num_cls_tokens = num_cls_tokens self.embed_dim = embed_dim if num_cls_tokens > 0: assert self.causal_masking is False, "Masking + CLS token isn't implemented" self.cls_token = nn.Parameter( torch.zeros(1, self.num_cls_tokens, embed_dim) ) self.init_parameters(init_param_style) @torch.no_grad() def init_parameters(self, init_param_style="openclip"): # OpenCLIP style initialization nn.init.normal_(self.token_embedding.weight, std=0.02) nn.init.normal_(self.pos_embed, std=0.01) if init_param_style == "openclip": # OpenCLIP style initialization scale = self.embed_dim**-0.5 if self.num_cls_tokens > 0: nn.init.normal_(self.cls_token) self.cls_token *= scale elif init_param_style == "vit": self.cls_token.data.fill_(0) else: raise ValueError(f"Unknown init {init_param_style}") def forward(self, text): # text tokens are of shape B x L x D text_tokens = self.token_embedding(text) # concat CLS tokens if any if self.num_cls_tokens > 0: B = text_tokens.shape[0] class_tokens = self.cls_token.expand( B, -1, -1 ) # stole class_tokens impl from Phil Wang, thanks text_tokens = torch.cat((class_tokens, text_tokens), dim=1) text_tokens = text_tokens + self.pos_embed return_dict = { "trunk": { "tokens": text_tokens, }, "head": {}, } # Compute sequence length after adding CLS tokens if self.supply_seq_len_to_head: text_lengths = text.argmax(dim=-1) return_dict["head"] = { "seq_len": text_lengths, } if self.causal_masking: return_dict["trunk"].update({"attn_mask": self.mask}) return return_dict class Im2Video(nn.Module): """Convert an image into a trivial video.""" def __init__(self, time_dim=2): super().__init__() self.time_dim = time_dim def forward(self, x): if x.ndim == 4: # B, C, H, W -> B, C, T, H, W return x.unsqueeze(self.time_dim) elif x.ndim == 5: return x else: raise ValueError(f"Dimension incorrect {x.shape}") class PadIm2Video(Im2Video): def __init__(self, ntimes, pad_type, time_dim=2): super().__init__(time_dim=time_dim) assert ntimes > 0 assert pad_type in ["zero", "repeat"] self.ntimes = ntimes self.pad_type = pad_type def forward(self, x): x = super().forward(x) if x.shape[self.time_dim] == 1: if self.pad_type == "repeat": new_shape = [1] * len(x.shape) new_shape[self.time_dim] = self.ntimes x = x.repeat(new_shape) elif self.pad_type == "zero": padarg = [0, 0] * len(x.shape) padarg[2 * self.time_dim + 1] = self.ntimes - x.shape[self.time_dim] x = nn.functional.pad(x, padarg) return x # Modified from github.com/openai/CLIP @lru_cache() def bytes_to_unicode(): """ Returns list of utf-8 byte and a corresponding list of unicode strings. The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. This is a signficant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings. And avoids mapping to whitespace/control characters the bpe code barfs on. """ bs = ( list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1)) ) cs = bs[:] n = 0 for b in range(2**8): if b not in bs: bs.append(b) cs.append(2**8 + n) n += 1 cs = [chr(n) for n in cs] return dict(zip(bs, cs)) def get_pairs(word): """Return set of symbol pairs in a word. Word is represented as tuple of symbols (symbols being variable-length strings). """ pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char return pairs def basic_clean(text): text = ftfy.fix_text(text) text = html.unescape(html.unescape(text)) return text.strip() def whitespace_clean(text): text = re.sub(r"\s+", " ", text) text = text.strip() return text class SimpleTokenizer(object): def __init__(self, bpe_path: str, context_length=77): self.byte_encoder = bytes_to_unicode() self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} with g_pathmgr.open(bpe_path, "rb") as fh: bpe_bytes = io.BytesIO(fh.read()) merges: List[str] = gzip.open(bpe_bytes).read().decode("utf-8").split("\n") merges = merges[1 : 49152 - 256 - 2 + 1] merges: List[Tuple[str, ...]] = [tuple(merge.split()) for merge in merges] vocab = list(bytes_to_unicode().values()) vocab = vocab + [v + "</w>" for v in vocab] for merge in merges: vocab.append("".join(merge)) vocab.extend(["<|startoftext|>", "<|endoftext|>"]) self.encoder = dict(zip(vocab, range(len(vocab)))) self.decoder = {v: k for k, v in self.encoder.items()} self.bpe_ranks = dict(zip(merges, range(len(merges)))) self.cache = { "<|startoftext|>": "<|startoftext|>", "<|endoftext|>": "<|endoftext|>", } self.pat = re.compile( r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE, ) self.context_length = context_length def bpe(self, token): if token in self.cache: return self.cache[token] word = tuple(token[:-1]) + (token[-1] + "</w>",) pairs = get_pairs(word) if not pairs: return token + "</w>" while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) if bigram not in self.bpe_ranks: break first, second = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) new_word.extend(word[i:j]) i = j except: new_word.extend(word[i:]) break if word[i] == first and i < len(word) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = " ".join(word) self.cache[token] = word return word def encode(self, text): bpe_tokens = [] text = whitespace_clean(basic_clean(text)).lower() for token in re.findall(self.pat, text): token = "".join(self.byte_encoder[b] for b in token.encode("utf-8")) bpe_tokens.extend( self.encoder[bpe_token] for bpe_token in self.bpe(token).split(" ") ) return bpe_tokens def decode(self, tokens): text = "".join([self.decoder[token] for token in tokens]) text = ( bytearray([self.byte_decoder[c] for c in text]) .decode("utf-8", errors="replace") .replace("</w>", " ") ) return text def __call__(self, texts, context_length=None): if not context_length: context_length = self.context_length if isinstance(texts, str): texts = [texts] sot_token = self.encoder["<|startoftext|>"] eot_token = self.encoder["<|endoftext|>"] all_tokens = [[sot_token] + self.encode(text) + [eot_token] for text in texts] result = torch.zeros(len(all_tokens), context_length, dtype=torch.long) for i, tokens in enumerate(all_tokens): tokens = tokens[:context_length] result[i, : len(tokens)] = torch.tensor(tokens) if len(result) == 1: return result[0] return result class IMUPreprocessor(VerboseNNModule): def __init__( self, kernel_size: int, imu_stem: PatchEmbedGeneric, embed_dim: int, img_size: Tuple = (6, 2000), num_cls_tokens: int = 1, pos_embed_fn: Optional[Callable] = None, init_param_style: str = "openclip", ) -> None: super().__init__() self.imu_stem = imu_stem self.embed_dim = embed_dim self.use_pos_embed = pos_embed_fn is not None self.num_cls_tokens = num_cls_tokens self.kernel_size = kernel_size self.pos_embed = nn.Parameter( torch.empty(1, (img_size[1] // kernel_size) + num_cls_tokens, embed_dim) ) if self.num_cls_tokens > 0: self.cls_token = nn.Parameter( torch.zeros(1, self.num_cls_tokens, self.embed_dim) ) self.init_parameters(init_param_style) @torch.no_grad() def init_parameters(self, init_param_style): nn.init.normal_(self.pos_embed, std=0.01) if init_param_style == "openclip": # OpenCLIP style initialization scale = self.embed_dim**-0.5 if self.num_cls_tokens > 0: nn.init.normal_(self.cls_token) self.cls_token *= scale elif init_param_style == "vit": self.cls_token.data.fill_(0) else: raise ValueError(f"Unknown init {init_param_style}") def tokenize_input_and_cls_pos(self, input, stem): # tokens is of shape B x L x D tokens = stem.norm_layer(stem.proj(input)) assert tokens.ndim == 3 assert tokens.shape[2] == self.embed_dim B = tokens.shape[0] if self.num_cls_tokens > 0: class_tokens = self.cls_token.expand( B, -1, -1 ) # stole class_tokens impl from Phil Wang, thanks tokens = torch.cat((class_tokens, tokens), dim=1) if self.use_pos_embed: tokens = tokens + self.pos_embed return tokens def forward(self, imu): # Patchify imu = imu.unfold( -1, self.kernel_size, self.kernel_size, ).permute(0, 2, 1, 3) imu = imu.reshape(imu.size(0), imu.size(1), -1) imu_tokens = self.tokenize_input_and_cls_pos( imu, self.imu_stem, ) return_dict = { "trunk": { "tokens": imu_tokens, }, "head": {}, } return return_dict
Pegasus-master
ImageBind-LoRA/models/multimodal_preprocessors.py
# Sheng Wang at Feb 22 2023 # Based on LoRA-ViT: https://github.com/JamesQFreeman/LoRA-ViT/blob/main/lora.py # Modified by Fares Abawi (@fabawi). import logging import os import math from typing import Optional, List, Dict from types import SimpleNamespace import torch import torch.nn as nn from safetensors import safe_open from safetensors.torch import save_file from torch import Tensor from torch.nn.parameter import Parameter from models.transformer import SimpleTransformer def apply_lora_modality_trunks(modality_trunks: Dict[str, SimpleTransformer], rank: int, layer_idxs: Optional[Dict[SimpleNamespace, List[int]]] = None, modality_names: List[SimpleNamespace] = None): if modality_names is None: modality_names = list(modality_trunks.keys()) if layer_idxs is None: layer_idxs = {} return nn.ModuleDict({modality_name: LoRA_SimpleTransformer(modality_trunk, rank, layer_idxs.get(modality_name, None)) for modality_name, modality_trunk in modality_trunks.items() if modality_name in modality_names}) def save_lora_modality_trunks(modality_trunks: Dict[str, SimpleTransformer], checkpoint_dir: str = "./.checkpoints/lora", postfix: str = "_last", extension: str = "safetensors"): for modality_name, modality_trunk in modality_trunks.items(): try: if isinstance(modality_trunk, LoRA_SimpleTransformer): modality_trunk.save_lora_parameters(os.path.join(checkpoint_dir, f"imagebind-lora-{modality_name}{postfix}.{extension}")) logging.info(f"Saved LoRA parameters for modality {modality_name} to {checkpoint_dir}.") except FileNotFoundError: logging.warning(f"Could not save LoRA parameters for modality {modality_name} to {checkpoint_dir}.") def load_lora_modality_trunks(modality_trunks: Dict[str, SimpleTransformer], checkpoint_dir: str = "./.checkpoints/lora", postfix: str = "_last", extension: str = "safetensors"): for modality_name, modality_trunk in modality_trunks.items(): try: if isinstance(modality_trunk, LoRA_SimpleTransformer): modality_trunk.load_lora_parameters(os.path.join(checkpoint_dir, f"imagebind-lora-{modality_name}{postfix}.{extension}")) logging.info(f"Loaded LoRA parameters for modality {modality_name} from {checkpoint_dir}.") except FileNotFoundError: logging.warning(f"Could not find LoRA parameters for modality {modality_name} in {checkpoint_dir}.") logging.warning("If you are training the sub-model from scratch, this is expected.") logging.warning("If you are loading parts of a pre-trained model, this is expected for some modalities.") class _LoRALayer(nn.Module): def __init__(self, w: nn.Module, w_a: nn.Module, w_b: nn.Module): super().__init__() self.w = w self.w_a = w_a self.w_b = w_b def forward(self, tokens: torch.Tensor, **kwargs): x = self.w(tokens) + self.w_b(self.w_a(tokens)) return x class LoRA_SimpleTransformer(nn.Module): """Applies low-rank adaptation to simple transformer with pytorch multihead attention. Args: transformer_model: a vision transformer model, see base_vit.py rank: rank of LoRA lora_layer_idxs: which layer we apply LoRA. Examples:: >>> model = SimpleTransformer() >>> lora_model = LoRA_SimpleTransformer(model, rank=4) >>> preds = lora_model(img) >>> print(preds.shape) torch.Size([1, 1000]) """ def __init__(self, transformer_model: SimpleTransformer, rank: int, lora_layer_idxs: Optional[List[int]] = None): super(LoRA_SimpleTransformer, self).__init__() assert rank > 0 base_dim = transformer_model.blocks[0].attn.in_proj_bias.size()[0] dim = base_dim if lora_layer_idxs is not None: self.lora_layer_idxs = lora_layer_idxs else: self.lora_layer_idxs = list(range(len(transformer_model.blocks))) # create for storage, then we can init them or load weights self.w_As = [] # These are linear layers self.w_Bs = [] # lets freeze first for param in transformer_model.parameters(): param.requires_grad = False # Here, we do the surgery for t_layer_idx, blk in enumerate(transformer_model.blocks): # If we only want few lora layer instead of all if t_layer_idx not in self.lora_layer_idxs: continue w_q_linear = blk.attn.q_proj_weight w_v_linear = blk.attn.v_proj_weight w_a_linear_q = nn.Linear(dim, rank, bias=False) w_b_linear_q = nn.Linear(rank, dim, bias=False) w_a_linear_v = nn.Linear(dim, rank, bias=False) w_b_linear_v = nn.Linear(rank, dim, bias=False) self.w_As.append(w_a_linear_q) self.w_Bs.append(w_b_linear_q) self.w_As.append(w_a_linear_v) self.w_Bs.append(w_b_linear_v) blk.attn.proj_q = _LoRALayer(w_q_linear, w_a_linear_q, w_b_linear_q) blk.attn.proj_v = _LoRALayer(w_v_linear, w_a_linear_v, w_b_linear_v) if self.training: self.reset_parameters() self.lora_model = transformer_model def save_lora_parameters(self, filename: str) -> None: r"""Only safetensors is supported now. pip install safetensors if you do not have one installed yet. """ assert filename.endswith(".safetensors") num_layer = len(self.w_As) # actually, it is half a_tensors = {f"w_a_{i:03d}": self.w_As[i].weight for i in range(num_layer)} b_tensors = {f"w_b_{i:03d}": self.w_Bs[i].weight for i in range(num_layer)} merged_dict = {**a_tensors, **b_tensors} save_file(merged_dict, filename) def load_lora_parameters(self, filename: str) -> None: r"""Only safetensors is supported now. pip install safetensors if you do not have one installed yet. """ assert filename.endswith(".safetensors") with safe_open(filename, framework="pt") as f: for i, w_A_linear in enumerate(self.w_As): saved_key = f"w_a_{i:03d}" saved_tensor = f.get_tensor(saved_key) w_A_linear.weight = Parameter(saved_tensor) for i, w_B_linear in enumerate(self.w_Bs): saved_key = f"w_b_{i:03d}" saved_tensor = f.get_tensor(saved_key) w_B_linear.weight = Parameter(saved_tensor) def reset_parameters(self) -> None: for w_A in self.w_As: nn.init.kaiming_uniform_(w_A.weight, a=math.sqrt(5)) for w_B in self.w_Bs: nn.init.zeros_(w_B.weight) def forward(self, tokens: torch.Tensor, **kwargs) -> Tensor: return self.lora_model(tokens)
Pegasus-master
ImageBind-LoRA/models/lora.py
#!/usr/bin/env python3 # Portions Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import einops import numpy as np import torch import torch.nn as nn class Normalize(nn.Module): def __init__(self, dim: int) -> None: super().__init__() self.dim = dim def forward(self, x): return torch.nn.functional.normalize(x, dim=self.dim, p=2) class LearnableLogitScaling(nn.Module): def __init__( self, logit_scale_init: float = 1 / 0.07, learnable: bool = True, max_logit_scale: float = 100, ) -> None: super().__init__() self.max_logit_scale = max_logit_scale self.logit_scale_init = logit_scale_init self.learnable = learnable log_logit_scale = torch.ones([]) * np.log(self.logit_scale_init) if learnable: self.log_logit_scale = nn.Parameter(log_logit_scale) else: self.register_buffer("log_logit_scale", log_logit_scale) def forward(self, x): return torch.clip(self.log_logit_scale.exp(), max=self.max_logit_scale) * x def extra_repr(self): st = f"logit_scale_init={self.logit_scale_init},learnable={self.learnable}," \ f" max_logit_scale={self.max_logit_scale}" return st class EinOpsRearrange(nn.Module): def __init__(self, rearrange_expr: str, **kwargs) -> None: super().__init__() self.rearrange_expr = rearrange_expr self.kwargs = kwargs def forward(self, x): assert isinstance(x, torch.Tensor) return einops.rearrange(x, self.rearrange_expr, **self.kwargs) class VerboseNNModule(nn.Module): """ Wrapper around nn.Module that prints registered buffers and parameter names. """ @staticmethod def get_readable_tensor_repr(name: str, tensor: torch.Tensor) -> str: st = ( "(" + name + "): " + "tensor(" + str(tuple(tensor[1].shape)) + ", requires_grad=" + str(tensor[1].requires_grad) + ")\n" ) return st def extra_repr(self) -> str: named_modules = set() for p in self.named_modules(): named_modules.update([p[0]]) named_modules = list(named_modules) string_repr = "" for p in self.named_parameters(): name = p[0].split(".")[0] if name not in named_modules: string_repr += self.get_readable_tensor_repr(name, p) for p in self.named_buffers(): name = p[0].split(".")[0] string_repr += self.get_readable_tensor_repr(name, p) return string_repr def cast_if_src_dtype( tensor: torch.Tensor, src_dtype: torch.dtype, tgt_dtype: torch.dtype ): updated = False if tensor.dtype == src_dtype: tensor = tensor.to(dtype=tgt_dtype) updated = True return tensor, updated class QuickGELU(nn.Module): # From https://github.com/openai/CLIP/blob/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1/clip/model.py#L166 def forward(self, x: torch.Tensor): return x * torch.sigmoid(1.702 * x) class SelectElement(nn.Module): def __init__(self, index) -> None: super().__init__() self.index = index def forward(self, x): assert x.ndim >= 3 return x[:, self.index, ...] class SelectEOSAndProject(nn.Module): """ Text Pooling used in OpenCLIP """ def __init__(self, proj: nn.Module) -> None: super().__init__() self.proj = proj def forward(self, x, seq_len): assert x.ndim == 3 # x is of shape B x L x D # take features from the eot embedding (eot_token is the highest number in each sequence) x = x[torch.arange(x.shape[0]), seq_len] x = self.proj(x) return x
Pegasus-master
ImageBind-LoRA/models/helpers.py
Pegasus-master
tests/main.py
from concurrent.futures import ThreadPoolExecutor import torch from pegasus.ImageBind.models.imagebind_model import ( ModalityType, imagebind_model, load_and_transform_audio_data, load_and_transform_text, load_and_transform_vision_data, ) from pegasus.types import Documents, EmbeddingFunction, Embeddings class MultiModalEmbeddingFunction(EmbeddingFunction): _model_cache = {} def __init__( self, modality: str = ModalityType, # type: ignore model_path: str = "https://dl.fbaipublicfiles.com/imagebind/imagebind_huge.pth", device: str = "cuda:0" ): self._modality = modality self.device = "cuda:0" if torch.cuda.is_available() else "cpu" self._model = imagebind_model.imagebind_huge(pretrained=True) self._model.eval() self._model.to(self.device) def __call__(self, *args: Documents) -> Embeddings: if self._modality == ModalityType.TEXT: inputs = { ModalityType.TEXT: load_and_transform_text( args[0], self.device ) } print("Inputs:", inputs) elif self._modality == ModalityType.VISION: inputs = { ModalityType.VISION: load_and_transform_vision_data( args[0], self.device ) } elif self._modality == ModalityType.AUDIO: inputs = { ModalityType.AUDIO: load_and_transform_audio_data( args[0], self.device ) } else: raise ValueError("Invalid modality specified") with torch.no_grad(): embeddings = self._model(inputs) print("Embeddings:", embeddings) # Convert the embeddings tensor to a NumPy array and then to a list of lists (embeddings) embeddings_array = embeddings[self._modality].cpu().numpy() print("Embeddings array:", embeddings_array) # embeddings_list = embeddings_array.tolist() # return embeddings_list return [embedding.tolist() for embedding in embeddings_array] """ text_embedding_function = MultiModalEmbeddingFunction(modality=ModalityType.TEXT) vision_embedding_function = MultiModalEmbeddingFunction(modality=ModalityType.VISION) audio_embedding_function = MultiModalEmbeddingFunction(modality=ModalityType.AUDIO) """ #ouptu to parquet? import logging from concurrent.futures import ThreadPoolExecutor class OptimizedMultiModalEmbeddingFunction(EmbeddingFunction): """ Class to handle multi-modal embeddings with error handling and logging. """ _model_cache = {} def __init__(self, modality: str = ModalityType, model_path: str = "https://dl.fbaipublicfiles.com/imagebind/imagebind_huge.pth", device: str = "cuda:0"): """ Initialize the embedding function with specified modality and device. Args: modality (str): The type of modality - 'TEXT', 'VISION', 'AUDIO'. model_path (str): Path to the model file. device (str): The device to run the model on - 'cpu' or 'cuda'. """ self._modality = modality self.device = device if torch.cuda.is_available() and "cuda" in device else "cpu" self.model_path = model_path def _load_model(self): """ Load model and store it in cache for reusability. """ if (self._modality, self.device) not in self._model_cache: model = imagebind_model.imagebind_huge(pretrained=True) model.eval() model.to(self.device) self._model_cache[(self._modality, self.device)] = model return self._model_cache[(self._modality, self.device)] def __call__(self, *args: Documents) -> Embeddings: """ Main function call to compute embeddings. Args: args (Documents): Text, video file path, or audio file path. Returns: Embeddings as a list of lists. """ model = self._load_model() load_func = { ModalityType.TEXT: load_and_transform_text, ModalityType.VISION: load_and_transform_vision_data, ModalityType.AUDIO: load_and_transform_audio_data }.get(self._modality, None) if load_func is None: logging.error(f"Invalid modality: {self._modality}") raise ValueError("Invalid modality specified") # Error handling for input data try: with ThreadPoolExecutor() as executor: future = executor.submit(load_func, args[0], self.device) inputs = {self._modality: future.result()} except Exception as e: logging.error(f"Failed to load input data: {str(e)}") raise try: with torch.no_grad(): embeddings = model(inputs) except Exception as e: logging.error(f"Failed to compute embeddings: {str(e)}") raise del inputs # Delete the input tensors to free up memory try: embeddings_array = embeddings[self._modality].cpu().numpy() except Exception as e: logging.error(f"Failed to convert embeddings to numpy array: {str(e)}") raise del embeddings # Delete the output tensor to free up memory return [embedding.tolist() for embedding in embeddings_array]
Pegasus-master
pegasus/embedding_functions.py
from pegasus.main import Pegasus
Pegasus-master
pegasus/__init__.py
from abc import ABC, abstractmethod from typing import Dict, List, Optional, Sequence, TypeVar, Union import numpy as np from sklearn.metrics.pairwise import cosine_similarity from typing_extensions import Literal, Protocol, TypedDict import pegasus.errors as errors from pegasus.ImageBind.models.imagebind_model import ModalityType ID = str IDs = List[ID] Number = Union[int, float] Embedding = List[Number] Embeddings = List[Embedding] Metadata = Dict[str, Union[str, int, float]] Metadatas = List[Metadata] Document = str Documents = List[Document] Parameter = TypeVar("Parameter", Embedding, Document, Metadata, ID) T = TypeVar("T") OneOrMany = Union[T, List[T]] Include = List[Literal["documents", "embeddings", "metadatas", "distances"]] # Grammar for where expressions LiteralValue = Union[str, int, float] LogicalOperator = Literal["$and", "$or"] WhereOperator = Literal["$gt", "$gte", "$lt", "$lte", "$ne", "$eq"] OperatorExpression = Dict[Union[WhereOperator, LogicalOperator], LiteralValue] Where = Dict[ Union[str, LogicalOperator], Union[LiteralValue, OperatorExpression, List["Where"]] ] WhereDocumentOperator = Literal["$contains", LogicalOperator] WhereDocument = Dict[WhereDocumentOperator, Union[str, List["WhereDocument"]]] class GetResult(TypedDict): ids: List[ID] embeddings: Optional[List[Embedding]] documents: Optional[List[Document]] metadatas: Optional[List[Metadata]] class QueryResult(TypedDict): ids: List[IDs] embeddings: Optional[List[List[Embedding]]] documents: Optional[List[List[Document]]] metadatas: Optional[List[List[Metadata]]] distances: Optional[List[List[float]]] class IndexMetadata(TypedDict): dimensionality: int elements: int time_created: float class EmbeddingFunction(Protocol): def __call__(self, texts: Documents) -> Embeddings: ... def maybe_cast_one_to_many( target: OneOrMany[Parameter], ) -> List[Parameter]: """Infers if target is Embedding, Metadata, or Document and casts it to a many object if its one""" if isinstance(target, Sequence): # One Document or ID if isinstance(target, str) and target is not None: return [target] # type: ignore # One Embedding if isinstance(target[0], (int, float)): return [target] # type: ignore # One Metadata dict if isinstance(target, dict): return [target] # Already a sequence return target # type: ignore def validate_ids(ids: IDs) -> IDs: """Validates ids to ensure it is a list of strings""" if not isinstance(ids, list): raise ValueError(f"Expected IDs to be a list, got {ids}") for id in ids: if not isinstance(id, str): raise ValueError(f"Expected ID to be a str, got {id}") if len(ids) != len(set(ids)): dups = set([x for x in ids if ids.count(x) > 1]) raise errors.DuplicateIDError( f"Expected IDs to be unique, found duplicates for: {dups}" ) return ids def validate_metadata(metadata: Metadata) -> Metadata: """Validates metadata to ensure it is a dictionary of strings to strings, ints, or floats""" if not isinstance(metadata, dict): raise ValueError(f"Expected metadata to be a dict, got {metadata}") for key, value in metadata.items(): if not isinstance(key, str): raise ValueError(f"Expected metadata key to be a str, got {key}") if not isinstance(value, (str, int, float)): raise ValueError( f"Expected metadata value to be a str, int, or float, got {value}" ) return metadata def validate_metadatas(metadatas: Metadatas) -> Metadatas: """Validates metadatas to ensure it is a list of dictionaries of strings to strings, ints, or floats""" if not isinstance(metadatas, list): raise ValueError(f"Expected metadatas to be a list, got {metadatas}") for metadata in metadatas: validate_metadata(metadata) return metadatas def validate_where(where: Where) -> Where: """ Validates where to ensure it is a dictionary of strings to strings, ints, floats or operator expressions, or in the case of $and and $or, a list of where expressions """ if not isinstance(where, dict): raise ValueError(f"Expected where to be a dict, got {where}") for key, value in where.items(): if not isinstance(key, str): raise ValueError(f"Expected where key to be a str, got {key}") if ( key != "$and" and key != "$or" and not isinstance(value, (str, int, float, dict)) ): raise ValueError( f"Expected where value to be a str, int, float, or operator expression, got {value}" ) if key == "$and" or key == "$or": if not isinstance(value, list): raise ValueError( f"Expected where value for $and or $or to be a list of where expressions, got {value}" ) if len(value) <= 1: raise ValueError( f"Expected where value for $and or $or to be a list with at least two where expressions, got {value}" ) for where_expression in value: validate_where(where_expression) # Value is a operator expression if isinstance(value, dict): # Ensure there is only one operator if len(value) != 1: raise ValueError( f"Expected operator expression to have exactly one operator, got {value}" ) for operator, operand in value.items(): # Only numbers can be compared with gt, gte, lt, lte if operator in ["$gt", "$gte", "$lt", "$lte"]: if not isinstance(operand, (int, float)): raise ValueError( f"Expected operand value to be an int or a float for operator {operator}, got {operand}" ) if operator not in ["$gt", "$gte", "$lt", "$lte", "$ne", "$eq"]: raise ValueError( f"Expected where operator to be one of $gt, $gte, $lt, $lte, $ne, $eq, got {operator}" ) if not isinstance(operand, (str, int, float)): raise ValueError( f"Expected where operand value to be a str, int, or float, got {operand}" ) return where def validate_where_document(where_document: WhereDocument) -> WhereDocument: """ Validates where_document to ensure it is a dictionary of WhereDocumentOperator to strings, or in the case of $and and $or, a list of where_document expressions """ if not isinstance(where_document, dict): raise ValueError( f"Expected where document to be a dictionary, got {where_document}" ) if len(where_document) != 1: raise ValueError( f"Expected where document to have exactly one operator, got {where_document}" ) for operator, operand in where_document.items(): if operator not in ["$contains", "$and", "$or"]: raise ValueError( f"Expected where document operator to be one of $contains, $and, $or, got {operator}" ) if operator == "$and" or operator == "$or": if not isinstance(operand, list): raise ValueError( f"Expected document value for $and or $or to be a list of where document expressions, got {operand}" ) if len(operand) <= 1: raise ValueError( f"Expected document value for $and or $or to be a list with at least two where document expressions, got {operand}" ) for where_document_expression in operand: validate_where_document(where_document_expression) # Value is a $contains operator elif not isinstance(operand, str): raise ValueError( f"Expected where document operand value for operator $contains to be a str, got {operand}" ) return where_document def validate_include(include: Include, allow_distances: bool) -> Include: """Validates include to ensure it is a list of strings. Since get does not allow distances, allow_distances is used to control if distances is allowed""" if not isinstance(include, list): raise ValueError(f"Expected include to be a list, got {include}") for item in include: if not isinstance(item, str): raise ValueError(f"Expected include item to be a str, got {item}") allowed_values = ["embeddings", "documents", "metadatas"] if allow_distances: allowed_values.append("distances") if item not in allowed_values: raise ValueError( f"Expected include item to be one of {', '.join(allowed_values)}, got {item}" ) return include class SearchFunction(ABC): @abstractmethod def search( self, query_embeddings: List[Embedding], index_data: dict ) -> List[List[ID]]: pass class CrossModalRetrieval(SearchFunction): """ Use the provided MultiModalEmbeddingFunction to compute embeddings for the query. Select the corresponding embeddings of the other modality. Perform similarity search using the computed embeddings. Return the results. """ def __init__(self, modality: str): self.modality = modality def search( self, query_embeddings: List[Embedding], index_data: dict ) -> List[List[ID]]: other_modality = [m for m in ModalityType if m != self.modality][0] # type: ignore # Get the embeddings for the other modality other_embeddings = index_data[other_modality] # Perform similarity search distances = cosine_similarity(query_embeddings, other_embeddings) # type: ignore sorted_indices = np.argsort(distances, axis=1)[:, ::-1] # Get the result IDs result_ids = index_data[f"{other_modality}_ids"][sorted_indices] return result_ids.tolist() class MultiModalFusion(SearchFunction): def __init__(self, fusion_type: str): self.fusion_type = fusion_type def search( self, query_embeddings: List[Embedding], index_data: dict ) -> List[List[ID]]: if self.fusion_type == "early": combined_query_embeddings = self.early_fusion(query_embeddings) # type: ignore combined_index_embeddings = self.early_fusion(index_data) elif self.fusion_type == "late": return self.late_fusion(query_embeddings, index_data) else: raise ValueError("Invalid fusion_type specified") distances = cosine_similarity( combined_query_embeddings, combined_index_embeddings ) sorted_indices = np.argsort(distances, axis=1)[:, ::-1] # Get the result IDs result_ids = index_data["ids"][sorted_indices] return result_ids.tolist() @staticmethod def early_fusion(embeddings: dict) -> np.ndarray: combined_embeddings = np.hstack(list(embeddings.values())) return combined_embeddings def late_fusion( self, query_embeddings: List[Embedding], index_data: dict ) -> List[List[ID]]: result_scores = [] for modality, embeddings in query_embeddings.items(): # type: ignore distances = cosine_similarity(embeddings, index_data[modality]) result_scores.append(distances) combined_scores = np.mean(result_scores, axis=0) sorted_indices = np.argsort(combined_scores, axis=1)[:, ::-1] # Get the result IDs result_ids = index_data["ids"][sorted_indices] return result_ids.tolist() class ModalitySpecificSearching(SearchFunction): def __init__(self, modality: str): self.modality = modality def search( self, query_embeddings: List[Embedding], index_data: dict ) -> List[List[ID]]: # Get the embeddings for the target modality modality_embeddings = index_data[self.modality] # Perform similarity search distances = cosine_similarity(query_embeddings, modality_embeddings) # type: ignore sorted_indices = np.argsort(distances, axis=1)[:, ::-1] # Get the result IDs result_ids = index_data[f"{self.modality}_ids"][sorted_indices] return result_ids.tolist() # to do -> better cosine perhaps torch nn.CosineSimilarity + exception handling
Pegasus-master
pegasus/types.py
from abc import abstractmethod class OceanError(Exception): def code(self): """Return an appropriate HTTP response code for this error""" return 400 # Bad Request def message(self): return ", ".join(self.args) @classmethod @abstractmethod def name(self): """Return the error name""" pass class NoDatapointsException(OceanError): @classmethod def name(cls): return "NoDatapoints" class NoIndexException(OceanError): @classmethod def name(cls): return "NoIndex" class InvalidDimensionException(OceanError): @classmethod def name(cls): return "InvalidDimension" class NotEnoughElementsException(OceanError): @classmethod def name(cls): return "NotEnoughElements" class IDAlreadyExistsError(OceanError): def code(self): return 409 # Conflict @classmethod def name(cls): return "IDAlreadyExists" class DuplicateIDError(OceanError): @classmethod def name(cls): return "DuplicateID" class InvalidUUIDError(OceanError): @classmethod def name(cls): return "InvalidUUID" error_types = { "NoDatapoints": NoDatapointsException, "NoIndex": NoIndexException, "InvalidDimension": InvalidDimensionException, "NotEnoughElements": NotEnoughElementsException, "IDAlreadyExists": IDAlreadyExistsError, "DuplicateID": DuplicateIDError, "InvalidUUID": InvalidUUIDError, }
Pegasus-master
pegasus/errors.py
import logging from concurrent.futures import ProcessPoolExecutor, as_completed import numpy as np from pegasus.embedding_functions import MultiModalEmbeddingFunction #logging logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', level=logging.INFO) logger = logging.getLogger(__name__) def optimized_embedding_function(modality, data): #creates and applies a MultiModalEmbeddingFunction for a specfic modality """ inputs: modality: A string representing the modality in lower case 'text' 'vision' 'audio' data: A numpy array representing the data' Returns: the embeddings generated by MultiModalEmbeddingFunction """ try: return MultiModalEmbeddingFunction(modality)(data) except Exception as e: logger.error(f"Failed to generate embeddings: {str(e)}") raise class Pegasus: """ Pegasus is the main multi-modal embedding class Inputs: modality: A string representing the modality => "text' 'audio' multi_process: A boolean indicating if multiprocessing will be enabled n_processes: An integer indicating that the number of processes to use """ def __init__( self, modality, multi_process=False, n_processes=1, hosted=False ): if not isinstance(modality, str) or modality not in {"text", "audio", "vision", "sensor", "heatmap"}: logger.error(f"Invalid modality: {modality}") raise ValueError("Invalid modality") if not isinstance(multi_process, bool): logger.error(f"Invalid multi_process value: {multi_process}") raise ValueError("multi_process should be a boolean") if not isinstance(n_processes, int) or n_processes < 1: logger.error(f"Invalid n_processes value: {n_processes}") raise ValueError("n_processes should be a positive integer") self.modality = modality self.multi_process = multi_process and n_processes > 1 self.n_processes = n_processes self.hosted = False def _embed_data(self, data): """ Embeds the data using MultiModalEmbeddingFunction Args: data: a numpy array representing the data Returns: The embeddings generated by MultiModalEmbeddingFunction """ if self.modality not in {"text", "audio", "vision", "sensor", "heatmap"}: raise ValueError("Invalid modality") return optimized_embedding_function(self.modality, data) def embed_data(self, data): """ Embeds the data using MultiModalEmbeddingFunction if multiprocessing is enabled, the data is split and processed in parallel Inputs: data: a numpy array or a list representing the data Returns: the embeddings generated by the MultiModalEmbeddingFunction """ if not isinstance(data, np.ndarray): try: data = np.array(data) except Exception as e: logger.error(f"Failed to convert data to numpy array: {str(e)}") raise if not self.multi_process: return self._embed_data(data) if self.multi_process: try: with ProcessPoolExecutor(max_workers=self.n_processes) as executor: future_to_data = {executor.submit(self._embed_data, d): d for d in data} return { future_to_data[future]: future.result() for future in as_completed(future_to_data) } except Exception as e: logger.error(f"Failed to embed data in parallel: {str(e)}") raise else: return self._embed_data(data)
Pegasus-master
pegasus/main.py
Pegasus-master
pegasus/ImageBind/__init__.py
#!/usr/bin/env python3 # Portions Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import math import torch import torch.nn as nn import torchaudio import logging # from ..multimodal_preprocessors import SimpleTokenizer from .models.multimodal_preprocessors import SimpleTokenizer from PIL import Image from pytorchvideo import transforms as pv_transforms from pytorchvideo.data.clip_sampling import ConstantClipsPerVideoSampler from pytorchvideo.data.encoded_video import EncodedVideo from torchvision import transforms from torchvision.transforms._transforms_video import NormalizeVideo DEFAULT_AUDIO_FRAME_SHIFT_MS = 10 # in milliseconds BPE_PATH = "./pegasus/ImageBind/bpe_simple_vocab_16e6.txt.gz" def waveform2melspec(waveform, sample_rate, num_mel_bins, target_length): # Based on https://github.com/YuanGongND/ast/blob/d7d8b4b8e06cdaeb6c843cdb38794c1c7692234c/src/dataloader.py#L102 waveform -= waveform.mean() fbank = torchaudio.compliance.kaldi.fbank( waveform, htk_compat=True, sample_frequency=sample_rate, use_energy=False, window_type="hanning", num_mel_bins=num_mel_bins, dither=0.0, frame_length=25, frame_shift=DEFAULT_AUDIO_FRAME_SHIFT_MS, ) # Convert to [mel_bins, num_frames] shape fbank = fbank.transpose(0, 1) # Pad to target_length n_frames = fbank.size(1) p = target_length - n_frames # if p is too large (say >20%), flash a warning if abs(p) / n_frames > 0.2: logging.warning( "Large gap between audio n_frames(%d) and " "target_length (%d). Is the audio_target_length " "setting correct?", n_frames, target_length, ) # cut and pad if p > 0: fbank = torch.nn.functional.pad(fbank, (0, p), mode="constant", value=0) elif p < 0: fbank = fbank[:, 0:target_length] # Convert to [1, mel_bins, num_frames] shape, essentially like a 1 # channel image fbank = fbank.unsqueeze(0) return fbank def get_clip_timepoints(clip_sampler, duration): # Read out all clips in this video all_clips_timepoints = [] is_last_clip = False end = 0.0 while not is_last_clip: start, end, _, _, is_last_clip = clip_sampler(end, duration, annotation=None) all_clips_timepoints.append((start, end)) return all_clips_timepoints def load_and_transform_vision_data(image_paths, device): if image_paths is None: return None image_ouputs = [] for image_path in image_paths: data_transform = transforms.Compose( [ transforms.Resize( 224, interpolation=transforms.InterpolationMode.BICUBIC ), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize( mean=(0.48145466, 0.4578275, 0.40821073), std=(0.26862954, 0.26130258, 0.27577711), ), ] ) with open(image_path, "rb") as fopen: image = Image.open(fopen).convert("RGB") image = data_transform(image).to(device) image_ouputs.append(image) return torch.stack(image_ouputs, dim=0) def load_and_transform_text(text, device): if text is None: return None tokenizer = SimpleTokenizer(bpe_path=BPE_PATH) tokens = [tokenizer(t).unsqueeze(0).to(device) for t in text] tokens = torch.cat(tokens, dim=0) return tokens def load_and_transform_audio_data( audio_paths, device, num_mel_bins=128, target_length=204, sample_rate=16000, clip_duration=2, clips_per_video=3, mean=-4.268, std=9.138, ): if audio_paths is None: return None audio_outputs = [] clip_sampler = ConstantClipsPerVideoSampler( clip_duration=clip_duration, clips_per_video=clips_per_video ) for audio_path in audio_paths: waveform, sr = torchaudio.load(audio_path) if sample_rate != sr: waveform = torchaudio.functional.resample( waveform, orig_freq=sr, new_freq=sample_rate ) all_clips_timepoints = get_clip_timepoints( clip_sampler, waveform.size(1) / sample_rate ) all_clips = [] for clip_timepoints in all_clips_timepoints: waveform_clip = waveform[ :, int(clip_timepoints[0] * sample_rate) : int( clip_timepoints[1] * sample_rate ), ] waveform_melspec = waveform2melspec( waveform_clip, sample_rate, num_mel_bins, target_length ) all_clips.append(waveform_melspec) normalize = transforms.Normalize(mean=mean, std=std) all_clips = [normalize(ac).to(device) for ac in all_clips] all_clips = torch.stack(all_clips, dim=0) audio_outputs.append(all_clips) return torch.stack(audio_outputs, dim=0) def get_clip_timepoints(clip_sampler, duration): # Read out all clips in this video all_clips_timepoints = [] is_last_clip = False end = 0.0 while not is_last_clip: start, end, _, _, is_last_clip = clip_sampler(end, duration, annotation=None) all_clips_timepoints.append((start, end)) return all_clips_timepoints def crop_boxes(boxes, x_offset, y_offset): """ Peform crop on the bounding boxes given the offsets. Args: boxes (ndarray or None): bounding boxes to peform crop. The dimension is `num boxes` x 4. x_offset (int): cropping offset in the x axis. y_offset (int): cropping offset in the y axis. Returns: cropped_boxes (ndarray or None): the cropped boxes with dimension of `num boxes` x 4. """ cropped_boxes = boxes.copy() cropped_boxes[:, [0, 2]] = boxes[:, [0, 2]] - x_offset cropped_boxes[:, [1, 3]] = boxes[:, [1, 3]] - y_offset return cropped_boxes def uniform_crop(images, size, spatial_idx, boxes=None, scale_size=None): """ Perform uniform spatial sampling on the images and corresponding boxes. Args: images (tensor): images to perform uniform crop. The dimension is `num frames` x `channel` x `height` x `width`. size (int): size of height and weight to crop the images. spatial_idx (int): 0, 1, or 2 for left, center, and right crop if width is larger than height. Or 0, 1, or 2 for top, center, and bottom crop if height is larger than width. boxes (ndarray or None): optional. Corresponding boxes to images. Dimension is `num boxes` x 4. scale_size (int): optinal. If not None, resize the images to scale_size before performing any crop. Returns: cropped (tensor): images with dimension of `num frames` x `channel` x `size` x `size`. cropped_boxes (ndarray or None): the cropped boxes with dimension of `num boxes` x 4. """ assert spatial_idx in [0, 1, 2] ndim = len(images.shape) if ndim == 3: images = images.unsqueeze(0) height = images.shape[2] width = images.shape[3] if scale_size is not None: if width <= height: width, height = scale_size, int(height / width * scale_size) else: width, height = int(width / height * scale_size), scale_size images = torch.nn.functional.interpolate( images, size=(height, width), mode="bilinear", align_corners=False, ) y_offset = int(math.ceil((height - size) / 2)) x_offset = int(math.ceil((width - size) / 2)) if height > width: if spatial_idx == 0: y_offset = 0 elif spatial_idx == 2: y_offset = height - size else: if spatial_idx == 0: x_offset = 0 elif spatial_idx == 2: x_offset = width - size cropped = images[:, :, y_offset : y_offset + size, x_offset : x_offset + size] cropped_boxes = crop_boxes(boxes, x_offset, y_offset) if boxes is not None else None if ndim == 3: cropped = cropped.squeeze(0) return cropped, cropped_boxes class SpatialCrop(nn.Module): """ Convert the video into 3 smaller clips spatially. Must be used after the temporal crops to get spatial crops, and should be used with -2 in the spatial crop at the slowfast augmentation stage (so full frames are passed in here). Will return a larger list with the 3x spatial crops as well. """ def __init__(self, crop_size: int = 224, num_crops: int = 3): super().__init__() self.crop_size = crop_size if num_crops == 3: self.crops_to_ext = [0, 1, 2] self.flipped_crops_to_ext = [] elif num_crops == 1: self.crops_to_ext = [1] self.flipped_crops_to_ext = [] else: raise NotImplementedError("Nothing else supported yet") def forward(self, videos): """ Args: videos: A list of C, T, H, W videos. Returns: videos: A list with 3x the number of elements. Each video converted to C, T, H', W' by spatial cropping. """ assert isinstance(videos, list), "Must be a list of videos after temporal crops" assert all([video.ndim == 4 for video in videos]), "Must be (C,T,H,W)" res = [] for video in videos: for spatial_idx in self.crops_to_ext: res.append(uniform_crop(video, self.crop_size, spatial_idx)[0]) if not self.flipped_crops_to_ext: continue flipped_video = transforms.functional.hflip(video) for spatial_idx in self.flipped_crops_to_ext: res.append(uniform_crop(flipped_video, self.crop_size, spatial_idx)[0]) return res def load_and_transform_video_data( video_paths, device, clip_duration=2, clips_per_video=5, sample_rate=16000, ): if video_paths is None: return None video_outputs = [] video_transform = transforms.Compose( [ pv_transforms.ShortSideScale(224), NormalizeVideo( mean=(0.48145466, 0.4578275, 0.40821073), std=(0.26862954, 0.26130258, 0.27577711), ), ] ) clip_sampler = ConstantClipsPerVideoSampler( clip_duration=clip_duration, clips_per_video=clips_per_video ) frame_sampler = pv_transforms.UniformTemporalSubsample(num_samples=clip_duration) for video_path in video_paths: video = EncodedVideo.from_path( video_path, decoder="decord", decode_audio=False, **{"sample_rate": sample_rate}, ) all_clips_timepoints = get_clip_timepoints(clip_sampler, video.duration) all_video = [] for clip_timepoints in all_clips_timepoints: # Read the clip, get frames clip = video.get_clip(clip_timepoints[0], clip_timepoints[1]) if clip is None: raise ValueError("No clip found") video_clip = frame_sampler(clip["video"]) video_clip = video_clip / 255.0 # since this is float, need 0-1 all_video.append(video_clip) all_video = [video_transform(clip) for clip in all_video] all_video = SpatialCrop(224, num_crops=3)(all_video) all_video = torch.stack(all_video, dim=0) video_outputs.append(all_video) return torch.stack(video_outputs, dim=0).to(device)
Pegasus-master
pegasus/ImageBind/data.py