python_code
stringlengths
0
992k
repo_name
stringlengths
8
46
file_path
stringlengths
5
162
from math import ceil import torch import torch.nn.functional as F from einops import pack, rearrange, unpack from torch import nn def exists(val): return val is not None def eval_decorator(fn): def inner(self, *args, **kwargs): was_training = self.training self.eval() out = fn(self, *args, **kwargs) self.train(was_training) return out return inner # nucleus def top_p(logits, thres = 0.9): sorted_logits, sorted_indices = torch.sort(logits, descending=True) cum_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1) sorted_indices_to_remove = cum_probs > (1 - thres) sorted_indices_to_remove[:, 1:] = sorted_indices_to_remove[:, :-1].clone() sorted_indices_to_remove[:, 0] = 0 sorted_logits[sorted_indices_to_remove] = float('-inf') return sorted_logits.scatter(1, sorted_indices, sorted_logits) # topk def top_k(logits, thres = 0.9): k = ceil((1 - thres) * logits.shape[-1]) val, ind = torch.topk(logits, k) probs = torch.full_like(logits, float('-inf')) probs.scatter_(1, ind, val) return probs # top_a def top_a(logits, min_p_pow=2.0, min_p_ratio=0.02): probs = F.softmax(logits, dim=-1) limit = torch.pow(torch.max(probs), min_p_pow) * min_p_ratio logits[probs < limit] = float('-inf') logits[probs >= limit] = 1 return logits # autoregressive wrapper class class AutoregressiveWrapper(nn.Module): def __init__( self, net, ignore_index = -100, pad_value = 0, mask_prob = 0. ): super().__init__() self.pad_value = pad_value self.ignore_index = ignore_index self.net = net self.max_seq_len = net.max_seq_len # paper shows masking (MLM) in conjunction with autoregressive decoder-only training leads to big improvements https://arxiv.org/abs/2210.13432 assert mask_prob < 1. self.mask_prob = mask_prob @torch.no_grad() @eval_decorator def generate( self, start_tokens, seq_len, eos_token = None, temperature = 1., filter_logits_fn = top_k, filter_thres = 0.9, min_p_pow = 2.0, min_p_ratio = 0.02, **kwargs ): start_tokens, ps = pack([start_tokens], '* n') b, t = start_tokens.shape out = start_tokens for _ in range(seq_len): x = out[:, -self.max_seq_len:] logits = self.net(x, **kwargs)[:, -1] if filter_logits_fn in {top_k, top_p}: filtered_logits = filter_logits_fn(logits, thres = filter_thres) probs = F.softmax(filtered_logits / temperature, dim=-1) elif filter_logits_fn is top_a: filtered_logits = filter_logits_fn(logits, min_p_pow = min_p_pow, min_p_ratio= min_p_ratio) probs = F.softmax(filtered_logits / temperature, dim=-1) sample = torch.multinomial(probs, 1) out = torch.cat((out, sample), dim=-1) if exists(eos_token): is_eos_tokens = (out == eos_token) if is_eos_tokens.any(dim = -1).all(): # mask out everything after the eos tokens shifted_is_eos_tokens = F.pad(is_eos_tokens, (1, -1)) mask = shifted_is_eos_tokens.float().cumsum(dim = -1) >= 1 out = out.masked_fill(mask, self.pad_value) break out = out[:, t:] out, = unpack(out, ps, '* n') return out def forward(self, x, return_loss=True, **kwargs): seq, ignore_index = x.shape[1], self.ignore_index inp, target = x[:, :-1], x[:, 1:] if self.mask_prob > 0.: rand = torch.randn(inp.shape, device = x.device) rand[:, 0] = -torch.finfo(rand.dtype).max # first token should not be masked out num_mask = min(int(seq * self.mask_prob), seq - 1) indices = rand.topk(num_mask, dim = -1).indices mask = ~torch.zeros_like(inp).scatter(1, indices, 1.).bool() kwargs.update(self_attn_context_mask = mask) logits = self.net(inp, **kwargs) loss = F.cross_entropy( rearrange(logits, 'b n c -> b c n'), target, ignore_index = ignore_index ) if return_loss: return logits, loss return logits
SayCan-main
saycan/autoregressive.py
from saycan.model import SayCan
SayCan-main
saycan/__init__.py
from torch.nn import Module from transformers import AutoTokenizer from saycan.transformer import ( Decoder, Transformer, ViTransformerWrapper, Encoder ) from saycan.autoregressive import AutoregressiveWrapper class SayCanTokenizer: def __init__(self): self.tokenizer= AutoTokenizer.from_pretrained( "EleutherAI/gpt-neox-20b", eos_token="<eos>", pad_token="<pad>", extra_ids=0, model_max_length=8192 ) def tokenize_texts(self, texts): return self.tokenizer(texts, return_tensors='pt', padding=True, truncation=True).input_ids def decode(self, texts): return self.tokenizer.decode(texts) def __len__(self): num_tokens = len(self.tokenizer) return num_tokens class SayCan(Module): """ SayCan is a transformer-based model architecture. It initializes with a Transformer and AutoregressiveWrapper with default or user-specified parameters. """ def __init__(self, num_tokens=50432, max_seq_len=8192, dim=2560, depth=32, dim_head=128, heads=24, use_abs_pos_emb=False, alibi_pos_bias=True, alibi_num_heads=12, rotary_xpos=True, attn_flash=True, attn_kv_heads = 2, qk_norm=True, attn_qk_norm=True, attn_qk_norm_dim_scale=True, ): """ Initialize the model with specified or default parameters. Args: - num_tokens: Number of tokens in the vocabulary - max_seq_len: Maximum sequence length - dim: Dimension of the model - depth: Depth of the model - dim_head: Dimension of the model head - heads: Number of heads - use_abs_pos_emb: Whether to use absolute position embedding - alibi_pos_bias: Alibi position bias - alibi_num_heads: Number of alibi heads - rotary_xpos: Rotary position - attn_flash: Attention flash - deepnorm: Deep normalization - shift_tokens: Number of tokens to shift - attn_one_kv_head: Attention one key/value head - qk_norm: Query-key normalization - attn_qk_norm: Attention query-key normalization - attn_qk_norm_dim_scale: Attention query-key normalization dimension scale - embedding_provider: Embedding provider module """ super().__init__() try: self.SayCan = Transformer( num_tokens=num_tokens, max_seq_len=max_seq_len, use_abs_pos_emb=use_abs_pos_emb, attn_layers=Decoder( dim=dim, depth=depth, dim_head=dim_head, heads=heads, alibi_pos_bias=alibi_pos_bias, alibi_num_heads=alibi_num_heads, rotary_xpos=rotary_xpos, attn_flash=attn_flash, attn_kv_heads=attn_kv_heads, qk_norm=qk_norm, attn_qk_norm=attn_qk_norm, attn_qk_norm_dim_scale=attn_qk_norm_dim_scale ) ) self.decoder = AutoregressiveWrapper(self.SayCan) except Exception as e: print("Failed to initialize SayCan: ", e) raise def forward(self, text_tokens, **kwargs): """ Forward pass through the model. It expects the input text_tokens. Args: - text_tokens: Input tokens - kwargs: Other arguments Returns: - output from the decoder """ try: model_input = self.decoder.forward(text_tokens)[0] return self.decoder(model_input, padded_x=model_input[0]) except Exception as e: print("Failed in forward method: ", e) raise class SayCanMultiModal(Module): def __init__( self, image_size=256, patch_size=32, encoder_dim=512, encoder_depth=6, encoder_heads=8, num_tokens=20000, max_seq_len=1024, decoder_dim=512, decoder_depth=6, decoder_heads=8, alibi_num_heads=4, use_abs_pos_emb=False, cross_attend=True, alibi_pos_bias=True, rotary_xpos=True, attn_flash=True, qk_norm=True ): super(SayCanMultiModal, self).__init__() self.encoder = ViTransformerWrapper( image_size=image_size, patch_size=patch_size, attn_layers=Encoder( dim=encoder_dim, depth=encoder_depth, heads=encoder_heads ) ) self.decoder = Transformer( num_tokens=num_tokens, max_seq_len=max_seq_len, use_abs_pos_emb=use_abs_pos_emb, attn_layers=Decoder( dim=decoder_dim, depth=decoder_depth, heads=decoder_heads, cross_attend=cross_attend, alibi_pos_bias=alibi_pos_bias, alibi_num_heads=alibi_num_heads, rotary_xpos=rotary_xpos, attn_flash=attn_flash, qk_norm=qk_norm, ) ) def forward(self, img, text): try: encoded = self.encoder(img, return_embeddings=True) return self.decoder(text, context=encoded) except Exception as error: print(f"Failed in forward method: {error}") raise
SayCan-main
saycan/model.py
from collections import namedtuple from dataclasses import dataclass from functools import partial, wraps from typing import Optional import torch import torch.nn.functional as F from einops import rearrange, repeat from packaging import version from torch import Tensor, einsum, nn # constants EfficientAttentionConfig = namedtuple('EfficientAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient']) @dataclass class Intermediates: qk_similarities: Optional[Tensor] = None pre_softmax_attn: Optional[Tensor] = None post_softmax_attn: Optional[Tensor] = None def to_tuple(self): return (self.qk_similarities, self.pre_softmax_attn, self.post_softmax_attn) # helpers def exists(val): return val is not None def default(val, d): return val if exists(val) else d def compact(arr): return [*filter(exists, arr)] def once(fn): called = False @wraps(fn) def inner(x): nonlocal called if called: return called = True return fn(x) return inner print_once = once(print) # functions for creating causal mask # need a special one for onnx cpu (no support for .triu) def create_causal_mask(i, j, device): return torch.ones((i, j), device = device, dtype = torch.bool).triu(j - i + 1) def onnx_create_causal_mask(i, j, device): r = torch.arange(i, device = device) causal_mask = rearrange(r, 'i -> i 1') < rearrange(r, 'j -> 1 j') causal_mask = F.pad(causal_mask, (j - i, 0), value = False) return causal_mask # main class class Attend(nn.Module): def __init__( self, *, dropout = 0., causal = False, heads = None, talking_heads = False, sparse_topk = None, scale = None, qk_norm = False, flash = False, add_zero_kv = False, onnxable = False ): super().__init__() self.scale = scale self.qk_norm = qk_norm self.causal = causal self.create_causal_mask = onnx_create_causal_mask if onnxable else create_causal_mask self.attn_fn = partial(F.softmax, dtype = torch.float32) if not qk_norm else F.softmax self.dropout = dropout self.attn_dropout = nn.Dropout(dropout) # talking heads assert not (flash and talking_heads), 'talking heads not compatible with flash attention' self.talking_heads = talking_heads if talking_heads: self.pre_softmax_talking_heads = nn.Conv2d(heads, heads, 1, bias = False) self.post_softmax_talking_heads = nn.Conv2d(heads, heads, 1, bias = False) # sparse topk assert not (flash and sparse_topk), 'sparse topk not compatible with flash attention' self.sparse_topk = sparse_topk # add a key / value token composed of zeros # in case this helps controlling outliers, proposed by https://www.evanmiller.org/attention-is-off-by-one.html self.add_zero_kv = add_zero_kv # flash attention self.flash = flash assert not (flash and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above' # determine efficient attention configs for cuda and cpu self.cpu_config = EfficientAttentionConfig(True, True, True) self.cuda_config = None if not torch.cuda.is_available() or not flash: return device_properties = torch.cuda.get_device_properties(torch.device('cuda')) if device_properties.major == 8 and device_properties.minor == 0: print_once('A100 GPU detected, using flash attention if input tensor is on cuda') self.cuda_config = EfficientAttentionConfig(True, False, False) else: print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda') self.cuda_config = EfficientAttentionConfig(False, True, True) def flash_attn( self, q, k, v, mask = None, attn_bias = None ): batch, heads, q_len, _, k_len, is_cuda, device = *q.shape, k.shape[-2], q.is_cuda, q.device # Recommended for multi-query single-key-value attention by Tri Dao # kv shape torch.Size([1, 512, 64]) -> torch.Size([1, 8, 512, 64]) if k.ndim == 3: k = rearrange(k, 'b ... -> b 1 ...').expand_as(q) if v.ndim == 3: v = rearrange(v, 'b ... -> b 1 ...').expand_as(q) # handle scale - by default they scale by dim_head ** -0.5, but need to take care if using cosine sim attention if self.qk_norm: default_scale = q.shape[-1] ** -0.5 q = q * (default_scale / self.scale) # Check if mask exists and expand to compatible shape # The mask is B L, so it would have to be expanded to B H N L causal = self.causal if exists(mask): assert mask.ndim == 4 mask = mask.expand(batch, heads, q_len, k_len) # manually handle causal mask, if another mask was given if causal: causal_mask = self.create_causal_mask(q_len, k_len, device = device) mask = mask & ~causal_mask causal = False # handle alibi positional bias # convert from bool to float if exists(attn_bias): attn_bias = rearrange(attn_bias, 'h i j -> 1 h i j').expand(batch, heads, -1, -1) # if mask given, the mask would already contain the causal mask from above logic # otherwise, if no mask given but still causal, mask out alibi positional bias to a large negative number mask_value = -torch.finfo(q.dtype).max if exists(mask): attn_bias = attn_bias.masked_fill(~mask, mask_value // 2) elif causal: causal_mask = self.create_causal_mask(q_len, k_len, device = device) attn_bias = attn_bias.masked_fill(causal_mask, mask_value // 2) causal = False # scaled_dot_product_attention handles attn_mask either as bool or additive bias # make it an additive bias here mask = attn_bias # Check if there is a compatible device for flash attention config = self.cuda_config if is_cuda else self.cpu_config # pytorch 2.0 flash attn: q, k, v, mask, dropout, causal, softmax_scale with torch.backends.cuda.sdp_kernel(**config._asdict()): out = F.scaled_dot_product_attention( q, k, v, attn_mask = mask, dropout_p = self.dropout if self.training else 0., is_causal = causal ) return out, Intermediates() def forward( self, q, k, v, mask = None, attn_bias = None, prev_attn = None ): """ einstein notation b - batch h - heads n, i, j - sequence length (base sequence length, source, target) d - feature dimension """ n, heads, kv_heads, device = q.shape[-2], q.shape[1], k.shape[1], q.device scale = default(self.scale, q.shape[-1] ** -0.5) # handle grouped multi-query attention if kv_heads == 1: k, v = map(lambda t: rearrange(t, 'b 1 n d -> b n d'), (k, v)) elif kv_heads < heads: k, v = map(lambda t: repeat(t, 'b kvh n d -> b (r kvh) n d', r = heads // kv_heads), (k, v)) # handle zero kv, as means for allowing network to attend to nothing if self.add_zero_kv: k, v = map(lambda t: F.pad(t, (0, 0, 1, 0), value = 0.), (k, v)) if exists(mask): mask = F.pad(mask, (1, 0), value = True) if exists(attn_bias): attn_bias = F.pad(attn_bias, (1, 0), value = 0.) if self.flash: assert not exists(prev_attn), 'residual attention not compatible with flash attention' return self.flash_attn(q, k, v, mask = mask, attn_bias = attn_bias) kv_einsum_eq = 'b j d' if k.ndim == 3 else 'b h j d' dots = einsum(f'b h i d, {kv_einsum_eq} -> b h i j', q, k) * scale if exists(prev_attn): dots = dots + prev_attn qk_similarities = dots.clone() if self.talking_heads: dots = self.pre_softmax_talking_heads(dots) if exists(attn_bias): dots = dots + attn_bias i, j, dtype = *dots.shape[-2:], dots.dtype mask_value = -torch.finfo(dots.dtype).max if exists(self.sparse_topk) and self.sparse_topk < j: top_values, _ = dots.topk(self.sparse_topk, dim = -1) sparse_topk_mask = dots < top_values[..., -1:] mask = (mask & sparse_topk_mask) if exists(mask) else sparse_topk_mask if exists(mask): dots = dots.masked_fill(~mask, mask_value) if self.causal: causal_mask = self.create_causal_mask(i, j, device = device) dots = dots.masked_fill(causal_mask, mask_value) pre_softmax_attn = dots.clone() attn = self.attn_fn(dots, dim = -1) attn = attn.type(dtype) post_softmax_attn = attn.clone() attn = self.attn_dropout(attn) if self.talking_heads: attn = self.post_softmax_talking_heads(attn) out = einsum(f'b h i j, {kv_einsum_eq} -> b h i d', attn, v) intermediates = Intermediates( qk_similarities = qk_similarities, pre_softmax_attn = pre_softmax_attn, post_softmax_attn = post_softmax_attn ) return out, intermediates # cascading heads logic def to_single_heads(t, dim = 1): heads = t.unbind(dim = dim) return tuple(head.unsqueeze(dim) for head in heads) class CascadingHeads(nn.Module): def __init__(self, attend: Attend): super().__init__() self.attend = attend def forward( self, q, k, v, mask = None, attn_bias = None, prev_attn = None ): assert q.shape[-1] == v.shape[-1], 'cascading heads can only be done if query / key and value head dimensions are the same' # split inputs into per-head inputs heads = q.shape[1] queries = to_single_heads(q) keys = to_single_heads(k) if k.ndim == 4 else ((k,) * heads) values = to_single_heads(v) if v.ndim == 4 else ((v,) * heads) mask = (mask,) * heads attn_bias = to_single_heads(attn_bias, dim = 0) if exists(attn_bias) else ((None,) * heads) prev_attn = to_single_heads(prev_attn) if exists(prev_attn) else ((None,) * heads) # now loop through each head, without output of previous head summed with the next head # thus cascading all_outs = [] all_intermediates = [] prev_head_out = None for h_q, h_k, h_v, h_mask, h_attn_bias, h_prev_attn in zip(queries, keys, values, mask, attn_bias, prev_attn): if exists(prev_head_out): h_q = h_q + prev_head_out out, intermediates = self.attend( h_q, h_k, h_v, mask = h_mask, attn_bias = h_attn_bias, prev_attn = h_prev_attn ) prev_head_out = out all_outs.append(out) all_intermediates.append(intermediates) # cat all output heads all_outs = torch.cat(all_outs, dim = 1) # cat all intermediates, if they exist qk_similarities, pre_softmax_attn, post_softmax_attn = zip(*map(lambda i: i.to_tuple(), all_intermediates)) qk_similarities, pre_softmax_attn, post_softmax_attn = map(compact, (qk_similarities, pre_softmax_attn, post_softmax_attn)) aggregated_intermediates = Intermediates( qk_similarities = torch.cat(qk_similarities, dim = 1) if len(qk_similarities) > 0 else None, pre_softmax_attn = torch.cat(pre_softmax_attn, dim = 1) if len(pre_softmax_attn) > 0 else None, post_softmax_attn = torch.cat(post_softmax_attn, dim = 1) if len(post_softmax_attn) > 0 else None ) return all_outs, aggregated_intermediates
SayCan-main
saycan/attend.py
import math from dataclasses import dataclass from functools import partial, wraps from inspect import isfunction from random import random from typing import Callable, List, Optional import torch import torch.nn.functional as F from einops import rearrange, reduce, repeat from torch import Tensor, einsum, nn from saycan.attend import Attend, Intermediates DEFAULT_DIM_HEAD = 64 @dataclass class LayerIntermediates: hiddens: Optional[List[Tensor]] = None attn_intermediates: Optional[List[Intermediates]] = None layer_hiddens: Optional[List[Tensor]] = None attn_z_loss: Optional[Tensor] = None # helpers def exists(val): return val is not None def default(val, d): if exists(val): return val return d() if isfunction(d) else d def cast_tuple(val, depth): return val if isinstance(val, tuple) else (val,) * depth def divisible_by(num, den): return (num % den) == 0 def maybe(fn): @wraps(fn) def inner(x, *args, **kwargs): if not exists(x): return x return fn(x, *args, **kwargs) return inner class always(): def __init__(self, val): self.val = val def __call__(self, *args, **kwargs): return self.val class not_equals(): def __init__(self, val): self.val = val def __call__(self, x, *args, **kwargs): return x != self.val class equals(): def __init__(self, val): self.val = val def __call__(self, x, *args, **kwargs): return x == self.val def Sequential(*modules): return nn.Sequential(*filter(exists, modules)) # tensor helpers def max_neg_value(tensor): return -torch.finfo(tensor.dtype).max def l2norm(t, groups = 1): t = rearrange(t, '... (g d) -> ... g d', g = groups) t = F.normalize(t, p = 2, dim = -1) return rearrange(t, '... g d -> ... (g d)') def pad_at_dim(t, pad, dim = -1, value = 0.): dims_from_right = (- dim - 1) if dim < 0 else (t.ndim - dim - 1) zeros = ((0, 0) * dims_from_right) return F.pad(t, (*zeros, *pad), value = value) def or_reduce(masks): head, *body = masks for rest in body: head = head | rest return head # auxiliary loss helpers def calc_z_loss( pre_softmax_attns: List[Tensor], mask = None, weight = 1. ): # the same loss applied to the mixture of experts router logits in https://arxiv.org/abs/2202.08906 # in the paper, in a tiny footnote, they mention using it on attention logits with stabilizing effects # also used in PaLM as one of the measures lse = 0. for attn in pre_softmax_attns: lse = lse + attn.logsumexp(dim = -1) loss = torch.square(lse) loss = reduce(loss, 'b h n -> b n', 'sum') if not exists(mask): return loss.mean() * weight loss = loss[mask].sum() / mask.sum().clamp(min = 1e-5) return loss * weight # init helpers def init_zero_(layer): nn.init.constant_(layer.weight, 0.) if exists(layer.bias): nn.init.constant_(layer.bias, 0.) # keyword argument helpers def pick_and_pop(keys, d): values = list(map(lambda key: d.pop(key), keys)) return dict(zip(keys, values)) def group_dict_by_key(cond, d): return_val = [dict(),dict()] for key in d.keys(): match = bool(cond(key)) ind = int(not match) return_val[ind][key] = d[key] return (*return_val,) def string_begins_with(prefix, str): return str.startswith(prefix) def group_by_key_prefix(prefix, d): return group_dict_by_key(partial(string_begins_with, prefix), d) def groupby_prefix_and_trim(prefix, d): kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d) kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items()))) return kwargs_without_prefix, kwargs # initializations def deepnorm_init( transformer, beta, module_name_match_list = ['.ff.', '.to_v', '.to_out'] ): for name, module in transformer.named_modules(): if type(module) != nn.Linear: continue needs_beta_gain = any(map(lambda substr: substr in name, module_name_match_list)) gain = beta if needs_beta_gain else 1 nn.init.xavier_normal_(module.weight.data, gain = gain) if exists(module.bias): nn.init.constant_(module.bias.data, 0) # structured dropout, more effective than traditional attention dropouts def dropout_seq(seq, mask, dropout): b, n, *_, device = *seq.shape, seq.device logits = torch.randn(b, n, device = device) if exists(mask): mask_value = max_neg_value(logits) logits = logits.masked_fill(~mask, mask_value) keep_prob = 1. - dropout num_keep = max(1, int(keep_prob * n)) keep_indices = logits.topk(num_keep, dim = 1).indices batch_indices = torch.arange(b, device = device) batch_indices = rearrange(batch_indices, 'b -> b 1') seq = seq[batch_indices, keep_indices] if exists(mask): seq_counts = mask.sum(dim = -1) seq_keep_counts = torch.ceil(seq_counts * keep_prob).int() keep_mask = torch.arange(num_keep, device = device) < rearrange(seq_keep_counts, 'b -> b 1') mask = mask[batch_indices, keep_indices] & keep_mask return seq, mask # activations class ReluSquared(nn.Module): def forward(self, x): return F.relu(x) ** 2 # embedding class TokenEmbedding(nn.Module): def __init__(self, dim, num_tokens, l2norm_embed = False): super().__init__() self.l2norm_embed = l2norm_embed self.emb = nn.Embedding(num_tokens, dim) def forward(self, x): token_emb = self.emb(x) return l2norm(token_emb) if self.l2norm_embed else token_emb # positional embeddings class AbsolutePositionalEmbedding(nn.Module): def __init__(self, dim, max_seq_len, l2norm_embed = False): super().__init__() self.scale = dim ** -0.5 if not l2norm_embed else 1. self.max_seq_len = max_seq_len self.l2norm_embed = l2norm_embed self.emb = nn.Embedding(max_seq_len, dim) def forward(self, x, pos = None): seq_len, device = x.shape[1], x.device assert seq_len <= self.max_seq_len, f'you are passing in a sequence length of {seq_len} but your absolute positional embedding has a max sequence length of {self.max_seq_len}' if not exists(pos): pos = torch.arange(seq_len, device = device) pos_emb = self.emb(pos) pos_emb = pos_emb * self.scale return l2norm(pos_emb) if self.l2norm_embed else pos_emb class ScaledSinusoidalEmbedding(nn.Module): def __init__(self, dim, theta = 10000): super().__init__() assert divisible_by(dim, 2) self.scale = nn.Parameter(torch.ones(1) * dim ** -0.5) half_dim = dim // 2 freq_seq = torch.arange(half_dim).float() / half_dim inv_freq = theta ** -freq_seq self.register_buffer('inv_freq', inv_freq, persistent = False) def forward(self, x, pos = None): seq_len, device = x.shape[1], x.device if not exists(pos): pos = torch.arange(seq_len, device = device) emb = einsum('i, j -> i j', pos, self.inv_freq) emb = torch.cat((emb.sin(), emb.cos()), dim = -1) return emb * self.scale class RelativePositionBias(nn.Module): def __init__(self, scale, causal = False, num_buckets = 32, max_distance = 128, heads = 8): super().__init__() self.scale = scale self.causal = causal self.num_buckets = num_buckets self.max_distance = max_distance self.relative_attention_bias = nn.Embedding(num_buckets, heads) @staticmethod def _relative_position_bucket(relative_position, causal = True, num_buckets = 32, max_distance = 128): ret = 0 n = -relative_position if not causal: num_buckets //= 2 ret += (n < 0).long() * num_buckets n = torch.abs(n) else: n = torch.max(n, torch.zeros_like(n)) max_exact = num_buckets // 2 is_small = n < max_exact val_if_large = max_exact + ( torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) ).long() val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1)) ret += torch.where(is_small, n, val_if_large) return ret @property def device(self): return next(self.parameters()).device def forward(self, i, j): device = self.device q_pos = torch.arange(j - i, j, dtype = torch.long, device = device) k_pos = torch.arange(j, dtype = torch.long, device = device) rel_pos = k_pos[None, :] - q_pos[:, None] rp_bucket = self._relative_position_bucket(rel_pos, causal = self.causal, num_buckets = self.num_buckets, max_distance = self.max_distance) values = self.relative_attention_bias(rp_bucket) bias = rearrange(values, 'i j h -> h i j') return bias * self.scale class DynamicPositionBias(nn.Module): def __init__(self, dim, *, heads, depth, log_distance = False, norm = False): super().__init__() assert depth >= 1, 'depth for dynamic position bias MLP must be greater or equal to 1' self.log_distance = log_distance self.mlp = nn.ModuleList([]) self.mlp.append(Sequential( nn.Linear(1, dim), nn.LayerNorm(dim) if norm else None, nn.SiLU() )) for _ in range(depth - 1): self.mlp.append(Sequential( nn.Linear(dim, dim), nn.LayerNorm(dim) if norm else None, nn.SiLU() )) self.mlp.append(nn.Linear(dim, heads)) @property def device(self): return next(self.parameters()).device def forward(self, i, j): assert i == j n, device = j, self.device # get the (n x n) matrix of distances seq_arange = torch.arange(n, device = device) context_arange = torch.arange(n, device = device) indices = rearrange(seq_arange, 'i -> i 1') - rearrange(context_arange, 'j -> 1 j') indices += (n - 1) # input to continuous positions MLP pos = torch.arange(-n + 1, n, device = device).float() pos = rearrange(pos, '... -> ... 1') if self.log_distance: pos = torch.sign(pos) * torch.log(pos.abs() + 1) # log of distance is sign(rel_pos) * log(abs(rel_pos) + 1) for layer in self.mlp: pos = layer(pos) # get position biases bias = pos[indices] bias = rearrange(bias, 'i j h -> h i j') return bias class AlibiPositionalBias(nn.Module): def __init__(self, heads, total_heads, **kwargs): super().__init__() self.heads = heads self.total_heads = total_heads slopes = Tensor(self._get_slopes(heads)) slopes = rearrange(slopes, 'h -> h 1 1') self.register_buffer('slopes', slopes, persistent = False) self.register_buffer('bias', None, persistent = False) def get_bias(self, i, j, device): i_arange = torch.arange(j - i, j, device = device) j_arange = torch.arange(j, device = device) bias = -torch.abs(rearrange(j_arange, 'j -> 1 1 j') - rearrange(i_arange, 'i -> 1 i 1')) return bias @staticmethod def _get_slopes(heads): def get_slopes_power_of_2(n): start = (2**(-2**-(math.log2(n)-3))) ratio = start return [start*ratio**i for i in range(n)] if math.log2(heads).is_integer(): return get_slopes_power_of_2(heads) closest_power_of_2 = 2 ** math.floor(math.log2(heads)) return get_slopes_power_of_2(closest_power_of_2) + get_slopes_power_of_2(2 * closest_power_of_2)[0::2][:heads-closest_power_of_2] @property def device(self): return next(self.buffers()).device def forward(self, i, j): h, device = self.total_heads, self.device if exists(self.bias) and self.bias.shape[-1] >= j and self.bias.shape[-2] >= i: return self.bias[..., :i, :j] bias = self.get_bias(i, j, device) bias = bias * self.slopes num_heads_unalibied = h - bias.shape[0] bias = pad_at_dim(bias, (0, num_heads_unalibied), dim = 0) self.register_buffer('bias', bias, persistent = False) return self.bias class RotaryEmbedding(nn.Module): def __init__( self, dim, use_xpos = False, scale_base = 512, interpolation_factor = 1., base = 10000, base_rescale_factor = 1. ): super().__init__() # proposed by reddit user bloc97, to rescale rotary embeddings to longer sequence length without fine-tuning # has some connection to NTK literature # https://www.reddit.com/r/LocalLLaMA/comments/14lz7j5/ntkaware_scaled_rope_allows_llama_models_to_have/ base *= base_rescale_factor ** (dim / (dim - 2)) inv_freq = 1. / (base ** (torch.arange(0, dim, 2).float() / dim)) self.register_buffer('inv_freq', inv_freq) assert interpolation_factor >= 1. self.interpolation_factor = interpolation_factor if not use_xpos: self.register_buffer('scale', None) return scale = (torch.arange(0, dim, 2) + 0.4 * dim) / (1.4 * dim) self.scale_base = scale_base self.register_buffer('scale', scale) def forward(self, seq_len, device): t = torch.arange(seq_len, device = device).type_as(self.inv_freq) t = t / self.interpolation_factor freqs = torch.einsum('i , j -> i j', t, self.inv_freq) freqs = torch.cat((freqs, freqs), dim = -1) if not exists(self.scale): return freqs, 1. power = (torch.arange(seq_len, device = device) - (seq_len // 2)) / self.scale_base scale = self.scale ** rearrange(power, 'n -> n 1') scale = torch.cat((scale, scale), dim = -1) return freqs, scale def rotate_half(x): x = rearrange(x, '... (j d) -> ... j d', j = 2) x1, x2 = x.unbind(dim = -2) return torch.cat((-x2, x1), dim = -1) def apply_rotary_pos_emb(t, freqs, scale = 1): seq_len = t.shape[-2] freqs = freqs[-seq_len:, :] return (t * freqs.cos() * scale) + (rotate_half(t) * freqs.sin() * scale) # norms class Scale(nn.Module): def __init__(self, value, fn): super().__init__() self.value = value self.fn = fn def forward(self, x, **kwargs): out = self.fn(x, **kwargs) def scale_fn(t): return t * self.value if not isinstance(out, tuple): return scale_fn(out) return (scale_fn(out[0]), *out[1:]) class ScaleNorm(nn.Module): def __init__(self, dim, eps = 1e-5): super().__init__() self.eps = eps self.g = nn.Parameter(torch.ones(1) * (dim ** -0.5)) def forward(self, x): norm = torch.norm(x, dim = -1, keepdim = True) return x / norm.clamp(min = self.eps) * self.g class RMSNorm(nn.Module): def __init__(self, dim): super().__init__() self.scale = dim ** 0.5 self.g = nn.Parameter(torch.ones(dim)) def forward(self, x): return F.normalize(x, dim = -1) * self.scale * self.g class SimpleRMSNorm(nn.Module): def __init__(self, dim): super().__init__() self.scale = dim ** 0.5 def forward(self, x): return F.normalize(x, dim = -1) * self.scale # residual and residual gates class Residual(nn.Module): def __init__(self, dim, scale_residual = False, scale_residual_constant = 1.): super().__init__() self.residual_scale = nn.Parameter(torch.ones(dim)) if scale_residual else None self.scale_residual_constant = scale_residual_constant def forward(self, x, residual): if exists(self.residual_scale): residual = residual * self.residual_scale if self.scale_residual_constant != 1: residual = residual * self.scale_residual_constant return x + residual class GRUGating(nn.Module): def __init__(self, dim, scale_residual = False, **kwargs): super().__init__() self.gru = nn.GRUCell(dim, dim) self.residual_scale = nn.Parameter(torch.ones(dim)) if scale_residual else None def forward(self, x, residual): if exists(self.residual_scale): residual = residual * self.residual_scale gated_output = self.gru( rearrange(x, 'b n d -> (b n) d'), rearrange(residual, 'b n d -> (b n) d') ) return gated_output.reshape_as(x) # token shifting def shift(t, amount, mask = None): if amount == 0: return t else: amount = min(amount, t.shape[1]) if exists(mask): t = t.masked_fill(~mask[..., None], 0.) return pad_at_dim(t, (amount, -amount), dim = - 2, value = 0.) class ShiftTokens(nn.Module): def __init__(self, shifts, fn): super().__init__() self.fn = fn self.shifts = tuple(shifts) def forward(self, x, **kwargs): mask = kwargs.get('mask', None) shifts = self.shifts segments = len(shifts) feats_per_shift = x.shape[-1] // segments splitted = x.split(feats_per_shift, dim = -1) segments_to_shift, rest = splitted[:segments], splitted[segments:] segments_to_shift = list(map(lambda args: shift(*args, mask = mask), zip(segments_to_shift, shifts))) x = torch.cat((*segments_to_shift, *rest), dim = -1) return self.fn(x, **kwargs) # feedforward class GLU(nn.Module): def __init__( self, dim_in, dim_out, activation: Callable, mult_bias = False ): super().__init__() self.act = activation self.proj = nn.Linear(dim_in, dim_out * 2) self.mult_bias = nn.Parameter(torch.ones(dim_out)) if mult_bias else 1. def forward(self, x): x, gate = self.proj(x).chunk(2, dim = -1) return x * self.act(gate) * self.mult_bias class FeedForward(nn.Module): def __init__( self, dim, dim_out = None, mult = 4, glu = False, glu_mult_bias = False, swish = False, relu_squared = False, post_act_ln = False, dropout = 0., no_bias = False, zero_init_output = False ): super().__init__() inner_dim = int(dim * mult) dim_out = default(dim_out, dim) if relu_squared: activation = ReluSquared() elif swish: activation = nn.SiLU() else: activation = nn.GELU() if glu: project_in = GLU(dim, inner_dim, activation, mult_bias = glu_mult_bias) else: project_in = nn.Sequential( nn.Linear(dim, inner_dim, bias = not no_bias), activation ) self.ff = Sequential( project_in, nn.LayerNorm(inner_dim) if post_act_ln else None, nn.Dropout(dropout), nn.Linear(inner_dim, dim_out, bias = not no_bias) ) # init last linear layer to 0 if zero_init_output: init_zero_(self.ff[-1]) def forward(self, x): return self.ff(x) # attention. it is all we need class Attention(nn.Module): def __init__( self, dim, dim_head = DEFAULT_DIM_HEAD, heads = 8, causal = False, flash = False, talking_heads = False, head_scale = False, sparse_topk = None, num_mem_kv = 0, dropout = 0., on_attn = False, gate_values = False, zero_init_output = False, max_attend_past = None, qk_norm = False, qk_norm_groups = 1, qk_norm_scale = 10, qk_norm_dim_scale = False, one_kv_head = False, kv_heads = None, shared_kv = False, value_dim_head = None, tensor_product = False, # https://arxiv.org/abs/2208.06061 cascading_heads = False, add_zero_kv = False, # same as add_zero_attn in pytorch onnxable = False ): super().__init__() self.scale = dim_head ** -0.5 self.heads = heads self.causal = causal self.max_attend_past = max_attend_past assert not (exists(kv_heads) and one_kv_head), 'either attn_one_kv_head is set to True (in which case kv_heads is set to 1), or attn_kv_heads is set, but not both' value_dim_head = default(value_dim_head, dim_head) kv_heads = default(kv_heads, heads) kv_heads = 1 if one_kv_head else kv_heads assert divisible_by(heads, kv_heads) self.kv_heads = kv_heads q_dim = dim_head * heads k_dim = dim_head * kv_heads v_dim = value_dim_head * kv_heads out_dim = value_dim_head * heads self.to_q = nn.Linear(dim, q_dim, bias = False) self.to_k = nn.Linear(dim, k_dim, bias = False) # shared key / values, for further memory savings during inference assert not (shared_kv and value_dim_head != dim_head), 'key and value head dimensions must be equal for shared key / values' self.to_v = nn.Linear(dim, v_dim, bias = False) if not shared_kv else None # relations projection from tp-attention self.to_r = nn.Linear(dim, v_dim, bias = False) if tensor_product else None # add GLU gating for aggregated values, from alphafold2 self.to_v_gate = None if gate_values: self.to_v_gate = nn.Linear(dim, out_dim) nn.init.constant_(self.to_v_gate.weight, 0) nn.init.constant_(self.to_v_gate.bias, 1) # cosine sim attention self.qk_norm = qk_norm self.qk_norm_groups = qk_norm_groups self.qk_norm_scale = qk_norm_scale # whether to use the rmsnorm (equivalent to cosine sim attention when scale is equal to 1) - https://arxiv.org/abs/2302.05442 self.qk_norm_dim_scale = qk_norm_dim_scale self.qk_norm_q_scale = self.qk_norm_k_scale = 1 if qk_norm and qk_norm_dim_scale: self.qk_norm_q_scale = nn.Parameter(torch.ones(dim_head)) self.qk_norm_k_scale = nn.Parameter(torch.ones(dim_head)) assert (not qk_norm) or divisible_by(dim_head, qk_norm_groups), 'dimension per attention head must be divisible by the qk norm groups' assert not (qk_norm and (dim_head // qk_norm_groups) <= 2), 'the group dimension may be too small (2 was too small in my tests, but 4 still works, surprisingly)' # attend class - includes core attention algorithm + talking heads self.attend = Attend( heads = heads, causal = causal, talking_heads = talking_heads, dropout = dropout, sparse_topk = sparse_topk, qk_norm = qk_norm, scale = qk_norm_scale if qk_norm else self.scale, add_zero_kv = add_zero_kv, flash = flash, onnxable = onnxable ) # head scaling self.head_scale = head_scale if head_scale: self.head_scale_params = nn.Parameter(torch.ones(1, heads, 1, 1)) # explicit topk sparse attention self.sparse_topk = sparse_topk # add memory key / values self.num_mem_kv = num_mem_kv if num_mem_kv > 0: self.mem_k = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head)) self.mem_v = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head)) # attention on attention self.attn_on_attn = on_attn self.to_out = nn.Sequential(nn.Linear(out_dim, dim * 2, bias = False), nn.GLU()) if on_attn else nn.Linear(out_dim, dim, bias = False) # init output projection 0 if zero_init_output: init_zero_(self.to_out) def forward( self, x, context = None, mask = None, context_mask = None, attn_mask = None, rel_pos = None, rotary_pos_emb = None, prev_attn = None, mem = None ): b, n, _, h, kv_h, head_scale, device, has_context = *x.shape, self.heads, self.kv_heads, self.head_scale, x.device, exists(context) kv_input = default(context, x) q_input = x k_input = kv_input v_input = kv_input r_input = x if exists(mem): k_input = torch.cat((mem, k_input), dim = -2) v_input = torch.cat((mem, v_input), dim = -2) q = self.to_q(q_input) k = self.to_k(k_input) v = self.to_v(v_input) if exists(self.to_v) else k r = self.to_r(r_input) if exists(self.to_r) else None q = rearrange(q, 'b n (h d) -> b h n d', h = h) k, v, r = map(lambda t: maybe(rearrange)(t, 'b n (h d) -> b h n d', h = kv_h), (k, v, r)) if self.qk_norm: qk_l2norm = partial(l2norm, groups = self.qk_norm_groups) q, k = map(qk_l2norm, (q, k)) q = q * self.qk_norm_q_scale k = k * self.qk_norm_k_scale if exists(rotary_pos_emb) and not has_context: freqs, xpos_scale = rotary_pos_emb l = freqs.shape[-1] q_xpos_scale, k_xpos_scale = (xpos_scale, xpos_scale ** -1.) if exists(xpos_scale) else (1., 1.) (ql, qr), (kl, kr), (vl, vr) = map(lambda t: (t[..., :l], t[..., l:]), (q, k, v)) ql, kl, vl = map(lambda arg: apply_rotary_pos_emb(arg[0], freqs, arg[1]), ((ql, q_xpos_scale), (kl, k_xpos_scale), (vl, k_xpos_scale))) q, k, v = map(lambda t: torch.cat(t, dim = -1), ((ql, qr), (kl, kr), (vl, vr))) input_mask = context_mask if has_context else mask if self.num_mem_kv > 0: mem_k, mem_v = map(lambda t: repeat(t, 'h n d -> b h n d', b = b), (self.mem_k, self.mem_v)) if self.qk_norm: mem_k = l2norm(mem_k) mem_k = mem_k * self.qk_norm_k_scale k = torch.cat((mem_k, k), dim = -2) v = torch.cat((mem_v, v), dim = -2) if exists(input_mask): input_mask = pad_at_dim(input_mask, (self.num_mem_kv, 0), dim = -1, value = True) i, j = map(lambda t: t.shape[-2], (q, k)) # determine masking max_neg_value(q) masks = [] final_attn_mask = None if exists(input_mask): input_mask = rearrange(input_mask, 'b j -> b 1 1 j') masks.append(~input_mask) if exists(attn_mask): assert 2 <= attn_mask.ndim <= 4, 'attention mask must have greater than 2 dimensions but less than or equal to 4' if attn_mask.ndim == 2: attn_mask = rearrange(attn_mask, 'i j -> 1 1 i j') elif attn_mask.ndim == 3: attn_mask = rearrange(attn_mask, 'h i j -> 1 h i j') masks.append(~attn_mask) if exists(self.max_attend_past): range_q = torch.arange(j - i, j, device = device) range_k = torch.arange(j, device = device) dist = rearrange(range_q, 'i -> 1 1 i 1') - rearrange(range_k, 'j -> 1 1 1 j') max_attend_past_mask = dist > self.max_attend_past masks.append(max_attend_past_mask) if len(masks) > 0: final_attn_mask = ~or_reduce(masks) # prepare relative positional bias, if needed attn_bias = None if exists(rel_pos): attn_bias = rel_pos(i, j) # attention is all we need out, intermediates = self.attend( q, k, v, mask = final_attn_mask, attn_bias = attn_bias, prev_attn = prev_attn ) # https://arxiv.org/abs/2208.06061 proposes to add a residual for better gradients if exists(r): out = out * r + out # normformer scaling of heads if head_scale: out = out * self.head_scale_params # merge heads out = rearrange(out, 'b h n d -> b n (h d)') # alphafold2 styled gating of the values if exists(self.to_v_gate): gates = self.to_v_gate(x) out = out * gates.sigmoid() # combine the heads out = self.to_out(out) if exists(mask): mask = rearrange(mask, 'b n -> b n 1') out = out.masked_fill(~mask, 0.) return out, intermediates class AttentionLayers(nn.Module): def __init__( self, dim, depth, heads = 8, causal = False, cross_attend = False, only_cross = False, use_scalenorm = False, use_rmsnorm = False, use_simple_rmsnorm = False, alibi_pos_bias = False, alibi_num_heads = None, rel_pos_bias = False, rel_pos_num_buckets = 32, rel_pos_max_distance = 128, dynamic_pos_bias = False, dynamic_pos_bias_log_distance = False, dynamic_pos_bias_mlp_depth = 2, dynamic_pos_bias_norm = False, rotary_pos_emb = False, rotary_emb_dim = None, rotary_xpos = False, rotary_interpolation_factor = 1., rotary_xpos_scale_base = 512, rotary_base_rescale_factor = 1., custom_layers = None, sandwich_coef = None, par_ratio = None, residual_attn = False, cross_residual_attn = False, macaron = False, pre_norm = True, pre_norm_has_final_norm = True, gate_residual = False, scale_residual = False, scale_residual_constant = 1., deepnorm = False, shift_tokens = 0, sandwich_norm = False, resi_dual = False, resi_dual_scale = 1., zero_init_branch_output = False, layer_dropout = 0., cross_attn_tokens_dropout = 0., **kwargs ): super().__init__() rotary_pos_emb = rotary_pos_emb or rotary_xpos ff_kwargs, kwargs = groupby_prefix_and_trim('ff_', kwargs) attn_kwargs, kwargs = groupby_prefix_and_trim('attn_', kwargs) dim_head = attn_kwargs.get('dim_head', DEFAULT_DIM_HEAD) self.dim = dim self.depth = depth self.layers = nn.ModuleList([]) self.has_pos_emb = rel_pos_bias or rotary_pos_emb rotary_emb_dim = max(default(rotary_emb_dim, dim_head // 2), 32) assert not (rotary_xpos and not causal), 'rotary xpos is not compatible with bidirectional attention' self.rotary_pos_emb = RotaryEmbedding(rotary_emb_dim, use_xpos = rotary_xpos, scale_base = rotary_xpos_scale_base, interpolation_factor = rotary_interpolation_factor, base_rescale_factor = rotary_base_rescale_factor) if rotary_pos_emb else None assert not (alibi_pos_bias and rel_pos_bias), 'you can only choose Alibi positional bias or T5 relative positional bias, not both' assert rel_pos_num_buckets <= rel_pos_max_distance, 'number of relative position buckets must be less than the relative position max distance' # relative positional bias flash_attn = attn_kwargs.get('flash', False) assert (int(rel_pos_bias) + int(dynamic_pos_bias) + int(alibi_pos_bias)) <= 1, 'you can only choose up to one of t5, alibi, or dynamic positional bias' self.rel_pos = None if rel_pos_bias: assert not flash_attn, 'flash attention not compatible with t5 relative positional bias' self.rel_pos = RelativePositionBias(scale = dim_head ** 0.5, causal = causal, heads = heads, num_buckets = rel_pos_num_buckets, max_distance = rel_pos_max_distance) elif dynamic_pos_bias: assert not flash_attn, 'flash attention not compatible with dynamic positional bias' self.rel_pos = DynamicPositionBias(dim = dim // 4, heads = heads, log_distance = dynamic_pos_bias_log_distance, depth = dynamic_pos_bias_mlp_depth, norm = dynamic_pos_bias_norm) elif alibi_pos_bias: alibi_num_heads = default(alibi_num_heads, heads) assert alibi_num_heads <= heads, 'number of ALiBi heads must be less than the total number of heads' self.rel_pos = AlibiPositionalBias(heads = alibi_num_heads, total_heads = heads) # determine deepnorm and residual scale if deepnorm: assert scale_residual_constant == 1, 'scale residual constant is being overridden by deep norm settings' pre_norm = sandwich_norm = resi_dual = False scale_residual = True scale_residual_constant = (2 * depth) ** 0.25 assert (int(sandwich_norm) + int(resi_dual)) <= 1, 'either sandwich norm or resiDual is selected, but not both' assert not (not pre_norm and sandwich_norm), 'sandwich norm cannot be used when not using prenorm' if resi_dual: pre_norm = False self.pre_norm = pre_norm self.sandwich_norm = sandwich_norm self.resi_dual = resi_dual assert 0 < resi_dual_scale <= 1., 'resiDual prenorm residual must be scaled by a factor greater than 0 and less than or equal to 1.' self.resi_dual_scale = resi_dual_scale self.residual_attn = residual_attn self.cross_residual_attn = cross_residual_attn assert not (flash_attn and (residual_attn or cross_residual_attn)), 'flash attention is not compatible with residual attention' self.cross_attend = cross_attend assert (int(use_scalenorm) + int(use_rmsnorm) + int(use_simple_rmsnorm)) <= 1, 'you can only use either scalenorm, rmsnorm, or simple rmsnorm' if use_scalenorm: norm_class = ScaleNorm elif use_rmsnorm: norm_class = RMSNorm elif use_simple_rmsnorm: norm_class = SimpleRMSNorm else: norm_class = nn.LayerNorm norm_fn = partial(norm_class, dim) if cross_attend and not only_cross: default_block = ('a', 'c', 'f') elif cross_attend and only_cross: default_block = ('c', 'f') else: default_block = ('a', 'f') if macaron: default_block = ('f',) + default_block # zero init if zero_init_branch_output: attn_kwargs = {**attn_kwargs, 'zero_init_output': True} ff_kwargs = {**ff_kwargs, 'zero_init_output': True} # calculate layer block order if exists(custom_layers): layer_types = custom_layers elif exists(par_ratio): par_depth = depth * len(default_block) assert 1 < par_ratio <= par_depth, 'par ratio out of range' default_block = tuple(filter(not_equals('f'), default_block)) par_attn = par_depth // par_ratio depth_cut = par_depth * 2 // 3 # 2 / 3 attention layer cutoff suggested by PAR paper par_width = (depth_cut + depth_cut // par_attn) // par_attn assert len(default_block) <= par_width, 'default block is too large for par_ratio' par_block = default_block + ('f',) * (par_width - len(default_block)) par_head = par_block * par_attn layer_types = par_head + ('f',) * (par_depth - len(par_head)) elif exists(sandwich_coef): assert sandwich_coef > 0 and sandwich_coef <= depth, 'sandwich coefficient should be less than the depth' layer_types = ('a',) * sandwich_coef + default_block * (depth - sandwich_coef) + ('f',) * sandwich_coef else: layer_types = default_block * depth self.layer_types = layer_types self.num_attn_layers = len(list(filter(equals('a'), layer_types))) # stochastic depth self.layer_dropouts = cast_tuple(layer_dropout, len(layer_types)) # structured dropout for cross attending self.cross_attn_tokens_dropout = cross_attn_tokens_dropout # calculate token shifting shift_tokens = cast_tuple(shift_tokens, len(layer_types)) # whether it has post norm self.final_norm = norm_fn() if pre_norm or resi_dual else nn.Identity() # iterate and construct layers for ind, (layer_type, layer_shift_tokens) in enumerate(zip(self.layer_types, shift_tokens)): ind == (len(self.layer_types) - 1) if layer_type == 'a': layer = Attention(dim, heads = heads, causal = causal, **attn_kwargs) elif layer_type == 'c': layer = Attention(dim, heads = heads, **attn_kwargs) elif layer_type == 'f': layer = FeedForward(dim, **ff_kwargs) layer = layer if not macaron else Scale(0.5, layer) else: raise Exception(f'invalid layer type {layer_type}') if layer_shift_tokens > 0: shift_range_upper = layer_shift_tokens + 1 shift_range_lower = -layer_shift_tokens if not causal else 0 layer = ShiftTokens(range(shift_range_lower, shift_range_upper), layer) residual_fn = GRUGating if gate_residual else Residual residual = residual_fn(dim, scale_residual = scale_residual, scale_residual_constant = scale_residual_constant) pre_branch_norm = norm_fn() if pre_norm else None post_branch_norm = norm_fn() if sandwich_norm else None post_main_norm = norm_fn() if not pre_norm else None norms = nn.ModuleList([ pre_branch_norm, post_branch_norm, post_main_norm ]) self.layers.append(nn.ModuleList([ norms, layer, residual ])) if deepnorm: init_gain = (8 * depth) ** -0.25 deepnorm_init(self, init_gain) def forward( self, x, context = None, mask = None, context_mask = None, attn_mask = None, self_attn_context_mask = None, mems = None, return_hiddens = False ): assert not (self.cross_attend ^ exists(context)), 'context must be passed in if cross_attend is set to True' hiddens = [] layer_hiddens = [] intermediates = [] prev_attn = None prev_cross_attn = None mems = mems.copy() if exists(mems) else [None] * self.num_attn_layers rotary_pos_emb = None if exists(self.rotary_pos_emb): max_rotary_emb_length = max(list(map(lambda m: (m.shape[1] if exists(m) else 0) + x.shape[1], mems))) rotary_pos_emb = self.rotary_pos_emb(max_rotary_emb_length, x.device) outer_residual = x * self.resi_dual_scale for ind, (layer_type, (norm, block, residual_fn), layer_dropout) in enumerate(zip(self.layer_types, self.layers, self.layer_dropouts)): ind == (len(self.layers) - 1) if self.training and layer_dropout > 0. and random() < layer_dropout: continue if layer_type == 'a': if return_hiddens: hiddens.append(x) layer_mem = mems.pop(0) if mems else None if layer_type == 'c': if self.training and self.cross_attn_tokens_dropout > 0.: context, context_mask = dropout_seq(context, context_mask, self.cross_attn_tokens_dropout) inner_residual = x if return_hiddens: layer_hiddens.append(x) pre_norm, post_branch_norm, post_main_norm = norm if exists(pre_norm): x = pre_norm(x) if layer_type == 'a': out, inter = block(x, mask = mask, context_mask = self_attn_context_mask, attn_mask = attn_mask, rel_pos = self.rel_pos, rotary_pos_emb = rotary_pos_emb, prev_attn = prev_attn, mem = layer_mem) elif layer_type == 'c': out, inter = block(x, context = context, mask = mask, context_mask = context_mask, prev_attn = prev_cross_attn) elif layer_type == 'f': out = block(x) if self.resi_dual: outer_residual = outer_residual + out * self.resi_dual_scale if exists(post_branch_norm): out = post_branch_norm(out) x = residual_fn(out, inner_residual) if layer_type in ('a', 'c') and return_hiddens: intermediates.append(inter) if layer_type == 'a' and self.residual_attn: prev_attn = inter.pre_softmax_attn elif layer_type == 'c' and self.cross_residual_attn: prev_cross_attn = inter.pre_softmax_attn if exists(post_main_norm): x = post_main_norm(x) if return_hiddens: layer_hiddens.append(x) if self.resi_dual: x = x + self.final_norm(outer_residual) else: x = self.final_norm(x) if return_hiddens: intermediates = LayerIntermediates( hiddens = hiddens, attn_intermediates = intermediates, layer_hiddens = layer_hiddens ) return x, intermediates return x class Encoder(AttentionLayers): def __init__(self, **kwargs): assert 'causal' not in kwargs, 'cannot set causality on encoder' super().__init__(causal = False, **kwargs) class Decoder(AttentionLayers): def __init__(self, **kwargs): assert 'causal' not in kwargs, 'cannot set causality on decoder' super().__init__(causal = True, **kwargs) class CrossAttender(AttentionLayers): def __init__(self, **kwargs): super().__init__(cross_attend = True, only_cross = True, **kwargs) class ViTransformerWrapper(nn.Module): def __init__( self, *, image_size, patch_size, attn_layers, channels = 3, num_classes = None, post_emb_norm = False, emb_dropout = 0. ): super().__init__() assert isinstance(attn_layers, Encoder), 'attention layers must be an Encoder' assert divisible_by(image_size, patch_size), 'image dimensions must be divisible by the patch size' dim = attn_layers.dim num_patches = (image_size // patch_size) ** 2 patch_dim = channels * patch_size ** 2 self.patch_size = patch_size self.pos_embedding = nn.Parameter(torch.randn(1, num_patches, dim)) self.patch_to_embedding = nn.Sequential( nn.LayerNorm(patch_dim), nn.Linear(patch_dim, dim), nn.LayerNorm(dim) ) self.post_emb_norm = nn.LayerNorm(dim) if post_emb_norm else nn.Identity() self.dropout = nn.Dropout(emb_dropout) self.attn_layers = attn_layers self.mlp_head = nn.Linear(dim, num_classes) if exists(num_classes) else nn.Identity() def forward( self, img, return_embeddings = False ): p = self.patch_size x = rearrange(img, 'b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = p, p2 = p) x = self.patch_to_embedding(x) n = x.shape[1] x = x + self.pos_embedding[:, :n] x = self.post_emb_norm(x) x = self.dropout(x) x = self.attn_layers(x) if not exists(self.mlp_head) or return_embeddings: return x x = x.mean(dim = -2) return self.mlp_head(x) class Transformer(nn.Module): def __init__( self, *, num_tokens, max_seq_len, attn_layers, emb_dim = None, max_mem_len = 0, shift_mem_down = 0, emb_dropout = 0., post_emb_norm = False, num_memory_tokens = None, tie_embedding = False, logits_dim = None, use_abs_pos_emb = True, scaled_sinu_pos_emb = False, l2norm_embed = False, emb_frac_gradient = 1., # GLM-130B and Cogview successfully used this, set at 0.1 attn_z_loss_weight = 1e-4 ): super().__init__() assert isinstance(attn_layers, AttentionLayers), 'attention layers must be one of Encoder or Decoder' dim = attn_layers.dim emb_dim = default(emb_dim, dim) self.emb_dim = emb_dim self.num_tokens = num_tokens self.max_seq_len = max_seq_len self.max_mem_len = max_mem_len self.shift_mem_down = shift_mem_down self.l2norm_embed = l2norm_embed self.token_emb = TokenEmbedding(emb_dim, num_tokens, l2norm_embed = l2norm_embed) if not (use_abs_pos_emb and not attn_layers.has_pos_emb): self.pos_emb = always(0) elif scaled_sinu_pos_emb: self.pos_emb = ScaledSinusoidalEmbedding(emb_dim) else: self.pos_emb = AbsolutePositionalEmbedding(emb_dim, max_seq_len, l2norm_embed = l2norm_embed) self.emb_frac_gradient = emb_frac_gradient # fraction of the gradient that should go to the embedding, https://arxiv.org/abs/2105.13290 self.post_emb_norm = nn.LayerNorm(emb_dim) if post_emb_norm else nn.Identity() self.emb_dropout = nn.Dropout(emb_dropout) self.project_emb = nn.Linear(emb_dim, dim) if emb_dim != dim else nn.Identity() self.attn_layers = attn_layers self.init_() logits_dim = default(logits_dim, num_tokens) self.to_logits = nn.Linear(dim, logits_dim) if not tie_embedding else lambda t: t @ self.token_emb.emb.weight.t() # memory tokens (like [cls]) from Memory Transformers paper num_memory_tokens = default(num_memory_tokens, 0) self.num_memory_tokens = num_memory_tokens if num_memory_tokens > 0: self.memory_tokens = nn.Parameter(torch.randn(num_memory_tokens, dim)) def init_(self): if self.l2norm_embed: nn.init.normal_(self.token_emb.emb.weight, std = 1e-5) if not isinstance(self.pos_emb, always): nn.init.normal_(self.pos_emb.emb.weight, std = 1e-5) return nn.init.kaiming_normal_(self.token_emb.emb.weight) def forward( self, x, return_embeddings = False, return_logits_and_embeddings = False, return_intermediates = False, mask = None, return_mems = False, return_attn = False, mems = None, pos = None, prepend_embeds = None, sum_embeds = None, return_attn_z_loss = False, attn_z_loss_weight = 1e-4, **kwargs ): b, n, device, num_mem, emb_frac_gradient = *x.shape, x.device, self.num_memory_tokens, self.emb_frac_gradient return_hiddens = return_mems | return_attn | return_intermediates | return_attn_z_loss # absolute positional embedding external_pos_emb = exists(pos) and pos.dtype != torch.long pos_emb = self.pos_emb(x, pos = pos) if not external_pos_emb else pos x = self.token_emb(x) + pos_emb # for summing embeddings passed externally - needs this for self-conditioning in non-autoregressive training if exists(sum_embeds): x = x + sum_embeds # post embedding norm, purportedly leads to greater stabilization x = self.post_emb_norm(x) # whether to append embeds, as in PaLI, for image embeddings if exists(prepend_embeds): prepend_seq, prepend_dim = prepend_embeds.shape[1:] assert prepend_dim == x.shape[-1], 'prepended embeddings need to have same dimensions as text model dimensions' x = torch.cat((prepend_embeds, x), dim = -2) # whether to reduce the gradient going to the embedding, from cogview paper, corroborated by GLM-130B model if emb_frac_gradient < 1: assert emb_frac_gradient > 0 x = x * emb_frac_gradient + x.detach() * (1 - emb_frac_gradient) # embedding dropout x = self.emb_dropout(x) x = self.project_emb(x) if num_mem > 0: mem = repeat(self.memory_tokens, 'n d -> b n d', b = b) x = torch.cat((mem, x), dim = 1) # auto-handle masking after appending memory tokens if exists(mask): mask = pad_at_dim(mask, (num_mem, 0), dim = -1, value = True) if self.shift_mem_down and exists(mems): mems_l, mems_r = mems[:self.shift_mem_down], mems[self.shift_mem_down:] mems = [*mems_r, *mems_l] if return_hiddens: x, intermediates = self.attn_layers(x, mask = mask, mems = mems, return_hiddens = True, **kwargs) else: x = self.attn_layers(x, mask = mask, mems = mems, **kwargs) mem, x = x[:, :num_mem], x[:, num_mem:] if return_logits_and_embeddings: out = (self.to_logits(x), x) elif return_embeddings: out = x else: out = self.to_logits(x) if return_attn_z_loss: pre_softmax_attns = list(map(lambda t: t.pre_softmax_attn, intermediates.attn_intermediates)) intermediates.attn_z_loss = calc_z_loss(pre_softmax_attns, weight = attn_z_loss_weight) return_intermediates = True if return_intermediates: return out, intermediates if return_mems: hiddens = intermediates.hiddens new_mems = list(map(lambda pair: torch.cat(pair, dim = -2), zip(mems, hiddens))) if exists(mems) else hiddens new_mems = list(map(lambda t: t[..., -self.max_mem_len:, :].detach(), new_mems)) return out, new_mems if return_attn: attn_maps = list(map(lambda t: t.post_softmax_attn, intermediates.attn_intermediates)) return out, attn_maps return out
SayCan-main
saycan/transformer.py
import torch from neox.model import NeoCortex #usage img = torch.randn(1, 3, 256, 256) caption = torch.randint(0, 20000, (1, 1024)) model = NeoCortex() output = model(img, caption) print(output.shape) # (1, 1024, 20000)
NeoCortex-main
example.py
from neox.model import NeoCortex
NeoCortex-main
neox/__init__.py
import torch import torch.nn as nn from transformers import AutoTokenizer, CLIPProcessor from neox.transformer import ( Decoder, Encoder, Transformer, ViTransformerWrapper, ) class NeoCortexTokenizer: def __init__(self): try: self.processor = CLIPProcessor.from_pretrained("laion/CLIP-ViT-L-14-laion2B-s32B-b82K") self.tokenizer = AutoTokenizer.from_pretrained( "EleutherAI/gpt-neox-20b", additional_special_tokens=[""], eos_token ="<eos>", pad_token="<pad>", extra_ids=0, model_max_length=8192 ) self.im_idx, self.im_end_idx = self.tokenizer.convert_tokens_to_ids([""]) except Exception as e: print(f"Error init tokenizer: {e}") def tokenize_texts(self, texts): try: texts = self.tokenizer(texts, return_tensors="pt", padding=True, truncation=True).input_ids image_tokens = torch.tensor([[self.im_idx, self.im_end_idx]] * texts.shape[0]) return torch.cat([texts[:, 0:1], image_tokens, texts[:, 1:]], dim=1), texts except Exception as e: print(f"Error tokenizing texts: {e}") def tokenize_images(self, images): try: tokenized_images = self.processor(images=images, return_tensors="pt").pixel_values print(f"Tokenized image: {tokenized_images.shape}") return tokenized_images except Exception as e: print(f"Error tokenizing texts: {e}") def tokenize(self, sample): try: text_tokens, only_text_tokens = self.tokenize_texts(sample["target_text"]) attention_mask = text_tokens != self.tokenizer.pad_token_id dummy_image_features = torch.ones((text_tokens.shape[0], 64)) attention_mask = torch.cat([dummy_image_features, attention_mask], dim=1) return { "text_tokens": text_tokens, "images": self.tokenize_images(sample["image"]), "labels": only_text_tokens, "attention_mask": attention_mask, } except Exception as e: print(f"Error during tokenization {e}") class NeoCortex(torch.nn.Module): def __init__(self, image_size=256, patch_size=32, encoder_dim=512, encoder_depth=6, encoder_heads=8, num_tokens=20000, max_seq_len=1024, decoder_dim=512, decoder_depth=6, decoder_heads=8, alibi_num_heads=4, use_abs_pos_emb=False, cross_attend=True, alibi_pos_bias=True, rotary_xpos=True, attn_flash=True, qk_norm=True): super(NeoCortex, self).__init__() self.encoder = ViTransformerWrapper( image_size=image_size, patch_size=patch_size, attn_layers=Encoder( dim=encoder_dim, depth=encoder_depth, heads=encoder_heads ) ) self.decoder = Transformer( num_tokens=num_tokens, max_seq_len=max_seq_len, use_abs_pos_emb=use_abs_pos_emb, attn_layers=Decoder( dim=decoder_dim, depth=decoder_depth, heads=decoder_heads, cross_attend=cross_attend, alibi_pos_bias=alibi_pos_bias, alibi_num_heads=alibi_num_heads, rotary_xpos=rotary_xpos, attn_flash=attn_flash, qk_norm=qk_norm, ) ) def forward(self, img, text): try: encoded = self.encoder(img, return_embeddings=True) return self.decoder(text, context=encoded) except Exception as error: print(f"Failed in forward method: {error}") raise
NeoCortex-main
neox/model.py
from collections import namedtuple from dataclasses import dataclass from functools import partial, wraps from typing import Optional import torch import torch.nn.functional as F from einops import rearrange from packaging import version from torch import Tensor, einsum, nn # constants EfficientAttentionConfig = namedtuple('EfficientAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient']) @dataclass class Intermediates: qk_similarities: Optional[Tensor] = None pre_softmax_attn: Optional[Tensor] = None post_softmax_attn: Optional[Tensor] = None def to_tuple(self): return (self.qk_similarities, self.pre_softmax_attn, self.post_softmax_attn) # helpers def exists(val): return val is not None def default(val, d): return val if exists(val) else d def compact(arr): return [*filter(exists, arr)] def once(fn): called = False @wraps(fn) def inner(x): nonlocal called if called: return called = True return fn(x) return inner print_once = once(print) # functions for creating causal mask # need a special one for onnx cpu (no support for .triu) def create_causal_mask(i, j, device): return torch.ones((i, j), device = device, dtype = torch.bool).triu(j - i + 1) def onnx_create_causal_mask(i, j, device): r = torch.arange(i, device = device) causal_mask = rearrange(r, 'i -> i 1') < rearrange(r, 'j -> 1 j') causal_mask = F.pad(causal_mask, (j - i, 0), value = False) return causal_mask # main class class Attend(nn.Module): def __init__( self, *, dropout = 0., causal = False, heads = None, talking_heads = False, sparse_topk = None, scale = None, qk_norm = False, flash = False, add_zero_kv = False, onnxable = False ): super().__init__() self.scale = scale self.qk_norm = qk_norm self.causal = causal self.create_causal_mask = onnx_create_causal_mask if onnxable else create_causal_mask self.attn_fn = partial(F.softmax, dtype = torch.float32) if not qk_norm else F.softmax self.dropout = dropout self.attn_dropout = nn.Dropout(dropout) # talking heads assert not (flash and talking_heads), 'talking heads not compatible with flash attention' self.talking_heads = talking_heads if talking_heads: self.pre_softmax_talking_heads = nn.Conv2d(heads, heads, 1, bias = False) self.post_softmax_talking_heads = nn.Conv2d(heads, heads, 1, bias = False) # sparse topk assert not (flash and sparse_topk), 'sparse topk not compatible with flash attention' self.sparse_topk = sparse_topk # add a key / value token composed of zeros # in case this helps controlling outliers, proposed by https://www.evanmiller.org/attention-is-off-by-one.html self.add_zero_kv = add_zero_kv # flash attention self.flash = flash assert not (flash and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above' # determine efficient attention configs for cuda and cpu self.cpu_config = EfficientAttentionConfig(True, True, True) self.cuda_config = None if not torch.cuda.is_available() or not flash: return device_properties = torch.cuda.get_device_properties(torch.device('cuda')) if device_properties.major == 8 and device_properties.minor == 0: print_once('A100 GPU detected, using flash attention if input tensor is on cuda') self.cuda_config = EfficientAttentionConfig(True, False, False) else: print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda') self.cuda_config = EfficientAttentionConfig(False, True, True) def flash_attn( self, q, k, v, mask = None, attn_bias = None ): batch, heads, q_len, _, k_len, is_cuda, device = *q.shape, k.shape[-2], q.is_cuda, q.device # Recommended for multi-query single-key-value attention by Tri Dao # kv shape torch.Size([1, 512, 64]) -> torch.Size([1, 8, 512, 64]) if k.ndim == 3: k = rearrange(k, 'b ... -> b 1 ...').expand_as(q) if v.ndim == 3: v = rearrange(v, 'b ... -> b 1 ...').expand_as(q) # handle scale - by default they scale by dim_head ** -0.5, but need to take care if using cosine sim attention if self.qk_norm: default_scale = q.shape[-1] ** -0.5 q = q * (default_scale / self.scale) # Check if mask exists and expand to compatible shape # The mask is B L, so it would have to be expanded to B H N L causal = self.causal if exists(mask): assert mask.ndim == 4 mask = mask.expand(batch, heads, q_len, k_len) # manually handle causal mask, if another mask was given if causal: causal_mask = self.create_causal_mask(q_len, k_len, device = device) mask = mask & ~causal_mask causal = False # handle alibi positional bias # convert from bool to float if exists(attn_bias): attn_bias = rearrange(attn_bias, 'h i j -> 1 h i j').expand(batch, heads, -1, -1) # if mask given, the mask would already contain the causal mask from above logic # otherwise, if no mask given but still causal, mask out alibi positional bias to a large negative number mask_value = -torch.finfo(q.dtype).max if exists(mask): attn_bias = attn_bias.masked_fill(~mask, mask_value // 2) elif causal: causal_mask = self.create_causal_mask(q_len, k_len, device = device) attn_bias = attn_bias.masked_fill(causal_mask, mask_value // 2) causal = False # scaled_dot_product_attention handles attn_mask either as bool or additive bias # make it an additive bias here mask = attn_bias # Check if there is a compatible device for flash attention config = self.cuda_config if is_cuda else self.cpu_config # pytorch 2.0 flash attn: q, k, v, mask, dropout, causal, softmax_scale with torch.backends.cuda.sdp_kernel(**config._asdict()): out = F.scaled_dot_product_attention( q, k, v, attn_mask = mask, dropout_p = self.dropout if self.training else 0., is_causal = causal ) return out, Intermediates() def forward( self, q, k, v, mask = None, attn_bias = None, prev_attn = None ): """ einstein notation b - batch h - heads n, i, j - sequence length (base sequence length, source, target) d - feature dimension """ n, device = q.shape[-2], q.device scale = default(self.scale, q.shape[-1] ** -0.5) if self.add_zero_kv: k, v = map(lambda t: F.pad(t, (0, 0, 1, 0), value = 0.), (k, v)) if exists(mask): mask = F.pad(mask, (1, 0), value = True) if exists(attn_bias): attn_bias = F.pad(attn_bias, (1, 0), value = 0.) if self.flash: assert not exists(prev_attn), 'residual attention not compatible with flash attention' return self.flash_attn(q, k, v, mask = mask, attn_bias = attn_bias) kv_einsum_eq = 'b j d' if k.ndim == 3 else 'b h j d' dots = einsum(f'b h i d, {kv_einsum_eq} -> b h i j', q, k) * scale if exists(prev_attn): dots = dots + prev_attn qk_similarities = dots.clone() if self.talking_heads: dots = self.pre_softmax_talking_heads(dots) if exists(attn_bias): dots = dots + attn_bias i, j, dtype = *dots.shape[-2:], dots.dtype mask_value = -torch.finfo(dots.dtype).max if exists(self.sparse_topk) and self.sparse_topk < j: top_values, _ = dots.topk(self.sparse_topk, dim = -1) sparse_topk_mask = dots < top_values[..., -1:] mask = (mask & sparse_topk_mask) if exists(mask) else sparse_topk_mask if exists(mask): dots = dots.masked_fill(~mask, mask_value) if self.causal: causal_mask = self.create_causal_mask(i, j, device = device) dots = dots.masked_fill(causal_mask, mask_value) pre_softmax_attn = dots.clone() attn = self.attn_fn(dots, dim = -1) attn = attn.type(dtype) post_softmax_attn = attn.clone() attn = self.attn_dropout(attn) if self.talking_heads: attn = self.post_softmax_talking_heads(attn) out = einsum(f'b h i j, {kv_einsum_eq} -> b h i d', attn, v) intermediates = Intermediates( qk_similarities = qk_similarities, pre_softmax_attn = pre_softmax_attn, post_softmax_attn = post_softmax_attn ) return out, intermediates # cascading heads logic def to_single_heads(t, dim = 1): heads = t.unbind(dim = dim) return tuple(head.unsqueeze(dim) for head in heads) class CascadingHeads(nn.Module): def __init__(self, attend: Attend): super().__init__() self.attend = attend def forward( self, q, k, v, mask = None, attn_bias = None, prev_attn = None ): assert q.shape[-1] == v.shape[-1], 'cascading heads can only be done if query / key and value head dimensions are the same' # split inputs into per-head inputs heads = q.shape[1] queries = to_single_heads(q) keys = to_single_heads(k) if k.ndim == 4 else ((k,) * heads) values = to_single_heads(v) if v.ndim == 4 else ((v,) * heads) mask = (mask,) * heads attn_bias = to_single_heads(attn_bias, dim = 0) if exists(attn_bias) else ((None,) * heads) prev_attn = to_single_heads(prev_attn) if exists(prev_attn) else ((None,) * heads) # now loop through each head, without output of previous head summed with the next head # thus cascading all_outs = [] all_intermediates = [] prev_head_out = None for h_q, h_k, h_v, h_mask, h_attn_bias, h_prev_attn in zip(queries, keys, values, mask, attn_bias, prev_attn): if exists(prev_head_out): h_q = h_q + prev_head_out out, intermediates = self.attend( h_q, h_k, h_v, mask = h_mask, attn_bias = h_attn_bias, prev_attn = h_prev_attn ) prev_head_out = out all_outs.append(out) all_intermediates.append(intermediates) # cat all output heads all_outs = torch.cat(all_outs, dim = 1) # cat all intermediates, if they exist qk_similarities, pre_softmax_attn, post_softmax_attn = zip(*map(lambda i: i.to_tuple(), all_intermediates)) qk_similarities, pre_softmax_attn, post_softmax_attn = map(compact, (qk_similarities, pre_softmax_attn, post_softmax_attn)) aggregated_intermediates = Intermediates( qk_similarities = torch.cat(qk_similarities, dim = 1) if len(qk_similarities) > 0 else None, pre_softmax_attn = torch.cat(pre_softmax_attn, dim = 1) if len(pre_softmax_attn) > 0 else None, post_softmax_attn = torch.cat(post_softmax_attn, dim = 1) if len(post_softmax_attn) > 0 else None ) return all_outs, aggregated_intermediates
NeoCortex-main
neox/attend.py
import math from dataclasses import dataclass from functools import partial, wraps from inspect import isfunction # constants from math import ceil from random import random from typing import Callable, List, Optional import torch import torch.nn.functional as F from einops import pack, rearrange, reduce, repeat, unpack from torch import Tensor, einsum, nn from neox.attend import Attend, Intermediates def exists(val): return val is not None def eval_decorator(fn): def inner(self, *args, **kwargs): was_training = self.training self.eval() out = fn(self, *args, **kwargs) self.train(was_training) return out return inner # nucleus def top_p(logits, thres = 0.9): sorted_logits, sorted_indices = torch.sort(logits, descending=True) cum_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1) sorted_indices_to_remove = cum_probs > (1 - thres) sorted_indices_to_remove[:, 1:] = sorted_indices_to_remove[:, :-1].clone() sorted_indices_to_remove[:, 0] = 0 sorted_logits[sorted_indices_to_remove] = float('-inf') return sorted_logits.scatter(1, sorted_indices, sorted_logits) # topk def top_k(logits, thres = 0.9): k = ceil((1 - thres) * logits.shape[-1]) val, ind = torch.topk(logits, k) probs = torch.full_like(logits, float('-inf')) probs.scatter_(1, ind, val) return probs # top_a def top_a(logits, min_p_pow=2.0, min_p_ratio=0.02): probs = F.softmax(logits, dim=-1) limit = torch.pow(torch.max(probs), min_p_pow) * min_p_ratio logits[probs < limit] = float('-inf') logits[probs >= limit] = 1 return logits # autoregressive wrapper class class AutoregressiveWrapper(nn.Module): def __init__( self, net, ignore_index = -100, pad_value = 0, mask_prob = 0. ): super().__init__() self.pad_value = pad_value self.ignore_index = ignore_index self.net = net self.max_seq_len = net.max_seq_len # paper shows masking (MLM) in conjunction with autoregressive decoder-only training leads to big improvements https://arxiv.org/abs/2210.13432 assert mask_prob < 1. self.mask_prob = mask_prob @torch.no_grad() @eval_decorator def generate( self, start_tokens, seq_len, eos_token = None, temperature = 1., filter_logits_fn = top_k, filter_thres = 0.9, min_p_pow = 2.0, min_p_ratio = 0.02, **kwargs ): start_tokens, ps = pack([start_tokens], '* n') b, t = start_tokens.shape out = start_tokens for _ in range(seq_len): x = out[:, -self.max_seq_len:] logits = self.net(x, **kwargs)[:, -1] if filter_logits_fn in {top_k, top_p}: filtered_logits = filter_logits_fn(logits, thres = filter_thres) probs = F.softmax(filtered_logits / temperature, dim=-1) elif filter_logits_fn is top_a: filtered_logits = filter_logits_fn(logits, min_p_pow = min_p_pow, min_p_ratio= min_p_ratio) probs = F.softmax(filtered_logits / temperature, dim=-1) sample = torch.multinomial(probs, 1) out = torch.cat((out, sample), dim=-1) if exists(eos_token): is_eos_tokens = (out == eos_token) if is_eos_tokens.any(dim = -1).all(): # mask out everything after the eos tokens shifted_is_eos_tokens = F.pad(is_eos_tokens, (1, -1)) mask = shifted_is_eos_tokens.float().cumsum(dim = -1) >= 1 out = out.masked_fill(mask, self.pad_value) break out = out[:, t:] out, = unpack(out, ps, '* n') return out def forward(self, x, return_loss=True, **kwargs): seq, ignore_index = x.shape[1], self.ignore_index inp, target = x[:, :-1], x[:, 1:] if self.mask_prob > 0.: rand = torch.randn(inp.shape, device = x.device) rand[:, 0] = -torch.finfo(rand.dtype).max # first token should not be masked out num_mask = min(int(seq * self.mask_prob), seq - 1) indices = rand.topk(num_mask, dim = -1).indices mask = ~torch.zeros_like(inp).scatter(1, indices, 1.).bool() kwargs.update(self_attn_context_mask = mask) logits = self.net(inp, **kwargs) loss = F.cross_entropy( rearrange(logits, 'b n c -> b c n'), target, ignore_index = ignore_index ) if return_loss: return logits, loss return logits DEFAULT_DIM_HEAD = 64 @dataclass class LayerIntermediates: hiddens: Optional[List[Tensor]] = None attn_intermediates: Optional[List[Intermediates]] = None layer_hiddens: Optional[List[Tensor]] = None attn_z_loss: Optional[Tensor] = None # helpers def exists(val): return val is not None def default(val, d): if exists(val): return val return d() if isfunction(d) else d def cast_tuple(val, depth): return val if isinstance(val, tuple) else (val,) * depth def maybe(fn): @wraps(fn) def inner(x, *args, **kwargs): if not exists(x): return x return fn(x, *args, **kwargs) return inner class always(): def __init__(self, val): self.val = val def __call__(self, *args, **kwargs): return self.val class not_equals(): def __init__(self, val): self.val = val def __call__(self, x, *args, **kwargs): return x != self.val class equals(): def __init__(self, val): self.val = val def __call__(self, x, *args, **kwargs): return x == self.val def Sequential(*modules): return nn.Sequential(*filter(exists, modules)) # tensor helpers def max_neg_value(tensor): return -torch.finfo(tensor.dtype).max def l2norm(t, groups = 1): t = rearrange(t, '... (g d) -> ... g d', g = groups) t = F.normalize(t, p = 2, dim = -1) return rearrange(t, '... g d -> ... (g d)') def pad_at_dim(t, pad, dim = -1, value = 0.): dims_from_right = (- dim - 1) if dim < 0 else (t.ndim - dim - 1) zeros = ((0, 0) * dims_from_right) return F.pad(t, (*zeros, *pad), value = value) def or_reduce(masks): head, *body = masks for rest in body: head = head | rest return head # auxiliary loss helpers def calc_z_loss( pre_softmax_attns: List[Tensor], mask = None, weight = 1. ): # the same loss applied to the mixture of experts router logits in https://arxiv.org/abs/2202.08906 # in the paper, in a tiny footnote, they mention using it on attention logits with stabilizing effects # also used in PaLM as one of the measures lse = 0. for attn in pre_softmax_attns: lse = lse + attn.logsumexp(dim = -1) loss = torch.square(lse) loss = reduce(loss, 'b h n -> b n', 'sum') if not exists(mask): return loss.mean() * weight loss = loss[mask].sum() / mask.sum().clamp(min = 1e-5) return loss * weight # init helpers def init_zero_(layer): nn.init.constant_(layer.weight, 0.) if exists(layer.bias): nn.init.constant_(layer.bias, 0.) # keyword argument helpers def pick_and_pop(keys, d): values = list(map(lambda key: d.pop(key), keys)) return dict(zip(keys, values)) def group_dict_by_key(cond, d): return_val = [dict(),dict()] for key in d.keys(): match = bool(cond(key)) ind = int(not match) return_val[ind][key] = d[key] return (*return_val,) def string_begins_with(prefix, str): return str.startswith(prefix) def group_by_key_prefix(prefix, d): return group_dict_by_key(partial(string_begins_with, prefix), d) def groupby_prefix_and_trim(prefix, d): kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d) kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items()))) return kwargs_without_prefix, kwargs # initializations def deepnorm_init( transformer, beta, module_name_match_list = ['.ff.', '.to_v', '.to_out'] ): for name, module in transformer.named_modules(): if type(module) != nn.Linear: continue needs_beta_gain = any(map(lambda substr: substr in name, module_name_match_list)) gain = beta if needs_beta_gain else 1 nn.init.xavier_normal_(module.weight.data, gain = gain) if exists(module.bias): nn.init.constant_(module.bias.data, 0) # structured dropout, more effective than traditional attention dropouts def dropout_seq(seq, mask, dropout): b, n, *_, device = *seq.shape, seq.device logits = torch.randn(b, n, device = device) if exists(mask): mask_value = max_neg_value(logits) logits = logits.masked_fill(~mask, mask_value) keep_prob = 1. - dropout num_keep = max(1, int(keep_prob * n)) keep_indices = logits.topk(num_keep, dim = 1).indices batch_indices = torch.arange(b, device = device) batch_indices = rearrange(batch_indices, 'b -> b 1') seq = seq[batch_indices, keep_indices] if exists(mask): seq_counts = mask.sum(dim = -1) seq_keep_counts = torch.ceil(seq_counts * keep_prob).int() keep_mask = torch.arange(num_keep, device = device) < rearrange(seq_keep_counts, 'b -> b 1') mask = mask[batch_indices, keep_indices] & keep_mask return seq, mask # activations class ReluSquared(nn.Module): def forward(self, x): return F.relu(x) ** 2 # embedding class TokenEmbedding(nn.Module): def __init__(self, dim, num_tokens, l2norm_embed = False): super().__init__() self.l2norm_embed = l2norm_embed self.emb = nn.Embedding(num_tokens, dim) def forward(self, x): token_emb = self.emb(x) return l2norm(token_emb) if self.l2norm_embed else token_emb # positional embeddings class AbsolutePositionalEmbedding(nn.Module): def __init__(self, dim, max_seq_len, l2norm_embed = False): super().__init__() self.scale = dim ** -0.5 if not l2norm_embed else 1. self.max_seq_len = max_seq_len self.l2norm_embed = l2norm_embed self.emb = nn.Embedding(max_seq_len, dim) def forward(self, x, pos = None): seq_len, device = x.shape[1], x.device assert seq_len <= self.max_seq_len, f'you are passing in a sequence length of {seq_len} but your absolute positional embedding has a max sequence length of {self.max_seq_len}' if not exists(pos): pos = torch.arange(seq_len, device = device) pos_emb = self.emb(pos) pos_emb = pos_emb * self.scale return l2norm(pos_emb) if self.l2norm_embed else pos_emb class ScaledSinusoidalEmbedding(nn.Module): def __init__(self, dim, theta = 10000): super().__init__() assert (dim % 2) == 0 self.scale = nn.Parameter(torch.ones(1) * dim ** -0.5) half_dim = dim // 2 freq_seq = torch.arange(half_dim).float() / half_dim inv_freq = theta ** -freq_seq self.register_buffer('inv_freq', inv_freq, persistent = False) def forward(self, x, pos = None): seq_len, device = x.shape[1], x.device if not exists(pos): pos = torch.arange(seq_len, device = device) emb = einsum('i, j -> i j', pos, self.inv_freq) emb = torch.cat((emb.sin(), emb.cos()), dim = -1) return emb * self.scale class RelativePositionBias(nn.Module): def __init__(self, scale, causal = False, num_buckets = 32, max_distance = 128, heads = 8): super().__init__() self.scale = scale self.causal = causal self.num_buckets = num_buckets self.max_distance = max_distance self.relative_attention_bias = nn.Embedding(num_buckets, heads) @staticmethod def _relative_position_bucket(relative_position, causal = True, num_buckets = 32, max_distance = 128): ret = 0 n = -relative_position if not causal: num_buckets //= 2 ret += (n < 0).long() * num_buckets n = torch.abs(n) else: n = torch.max(n, torch.zeros_like(n)) max_exact = num_buckets // 2 is_small = n < max_exact val_if_large = max_exact + ( torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) ).long() val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1)) ret += torch.where(is_small, n, val_if_large) return ret @property def device(self): return next(self.parameters()).device def forward(self, i, j): device = self.device q_pos = torch.arange(j - i, j, dtype = torch.long, device = device) k_pos = torch.arange(j, dtype = torch.long, device = device) rel_pos = k_pos[None, :] - q_pos[:, None] rp_bucket = self._relative_position_bucket(rel_pos, causal = self.causal, num_buckets = self.num_buckets, max_distance = self.max_distance) values = self.relative_attention_bias(rp_bucket) bias = rearrange(values, 'i j h -> h i j') return bias * self.scale class DynamicPositionBias(nn.Module): def __init__(self, dim, *, heads, depth, log_distance = False, norm = False): super().__init__() assert depth >= 1, 'depth for dynamic position bias MLP must be greater or equal to 1' self.log_distance = log_distance self.mlp = nn.ModuleList([]) self.mlp.append(Sequential( nn.Linear(1, dim), nn.LayerNorm(dim) if norm else None, nn.SiLU() )) for _ in range(depth - 1): self.mlp.append(Sequential( nn.Linear(dim, dim), nn.LayerNorm(dim) if norm else None, nn.SiLU() )) self.mlp.append(nn.Linear(dim, heads)) @property def device(self): return next(self.parameters()).device def forward(self, i, j): assert i == j n, device = j, self.device # get the (n x n) matrix of distances seq_arange = torch.arange(n, device = device) context_arange = torch.arange(n, device = device) indices = rearrange(seq_arange, 'i -> i 1') - rearrange(context_arange, 'j -> 1 j') indices += (n - 1) # input to continuous positions MLP pos = torch.arange(-n + 1, n, device = device).float() pos = rearrange(pos, '... -> ... 1') if self.log_distance: pos = torch.sign(pos) * torch.log(pos.abs() + 1) # log of distance is sign(rel_pos) * log(abs(rel_pos) + 1) for layer in self.mlp: pos = layer(pos) # get position biases bias = pos[indices] bias = rearrange(bias, 'i j h -> h i j') return bias class AlibiPositionalBias(nn.Module): def __init__(self, heads, total_heads, **kwargs): super().__init__() self.heads = heads self.total_heads = total_heads slopes = Tensor(self._get_slopes(heads)) slopes = rearrange(slopes, 'h -> h 1 1') self.register_buffer('slopes', slopes, persistent = False) self.register_buffer('bias', None, persistent = False) def get_bias(self, i, j, device): i_arange = torch.arange(j - i, j, device = device) j_arange = torch.arange(j, device = device) bias = -torch.abs(rearrange(j_arange, 'j -> 1 1 j') - rearrange(i_arange, 'i -> 1 i 1')) return bias @staticmethod def _get_slopes(heads): def get_slopes_power_of_2(n): start = (2**(-2**-(math.log2(n)-3))) ratio = start return [start*ratio**i for i in range(n)] if math.log2(heads).is_integer(): return get_slopes_power_of_2(heads) closest_power_of_2 = 2 ** math.floor(math.log2(heads)) return get_slopes_power_of_2(closest_power_of_2) + get_slopes_power_of_2(2 * closest_power_of_2)[0::2][:heads-closest_power_of_2] @property def device(self): return next(self.buffers()).device def forward(self, i, j): h, device = self.total_heads, self.device if exists(self.bias) and self.bias.shape[-1] >= j and self.bias.shape[-2] >= i: return self.bias[..., :i, :j] bias = self.get_bias(i, j, device) bias = bias * self.slopes num_heads_unalibied = h - bias.shape[0] bias = pad_at_dim(bias, (0, num_heads_unalibied), dim = 0) self.register_buffer('bias', bias, persistent = False) return self.bias class RotaryEmbedding(nn.Module): def __init__( self, dim, use_xpos = False, scale_base = 512, interpolation_factor = 1., base = 10000, base_rescale_factor = 1. ): super().__init__() # proposed by reddit user bloc97, to rescale rotary embeddings to longer sequence length without fine-tuning # has some connection to NTK literature # https://www.reddit.com/r/LocalLLaMA/comments/14lz7j5/ntkaware_scaled_rope_allows_llama_models_to_have/ base *= base_rescale_factor ** (dim / (dim - 2)) inv_freq = 1. / (base ** (torch.arange(0, dim, 2).float() / dim)) self.register_buffer('inv_freq', inv_freq) assert interpolation_factor >= 1. self.interpolation_factor = interpolation_factor if not use_xpos: self.register_buffer('scale', None) return scale = (torch.arange(0, dim, 2) + 0.4 * dim) / (1.4 * dim) self.scale_base = scale_base self.register_buffer('scale', scale) def forward(self, seq_len, device): t = torch.arange(seq_len, device = device).type_as(self.inv_freq) t = t / self.interpolation_factor freqs = torch.einsum('i , j -> i j', t, self.inv_freq) freqs = torch.cat((freqs, freqs), dim = -1) if not exists(self.scale): return freqs, 1. power = (torch.arange(seq_len, device = device) - (seq_len // 2)) / self.scale_base scale = self.scale ** rearrange(power, 'n -> n 1') scale = torch.cat((scale, scale), dim = -1) return freqs, scale def rotate_half(x): x = rearrange(x, '... (j d) -> ... j d', j = 2) x1, x2 = x.unbind(dim = -2) return torch.cat((-x2, x1), dim = -1) def apply_rotary_pos_emb(t, freqs, scale = 1): seq_len = t.shape[-2] freqs = freqs[-seq_len:, :] return (t * freqs.cos() * scale) + (rotate_half(t) * freqs.sin() * scale) # norms class Scale(nn.Module): def __init__(self, value, fn): super().__init__() self.value = value self.fn = fn def forward(self, x, **kwargs): out = self.fn(x, **kwargs) scale_fn = lambda t: t * self.value if not isinstance(out, tuple): return scale_fn(out) return (scale_fn(out[0]), *out[1:]) class ScaleNorm(nn.Module): def __init__(self, dim, eps = 1e-5): super().__init__() self.eps = eps self.g = nn.Parameter(torch.ones(1) * (dim ** -0.5)) def forward(self, x): norm = torch.norm(x, dim = -1, keepdim = True) return x / norm.clamp(min = self.eps) * self.g class RMSNorm(nn.Module): def __init__(self, dim): super().__init__() self.scale = dim ** 0.5 self.g = nn.Parameter(torch.ones(dim)) def forward(self, x): return F.normalize(x, dim = -1) * self.scale * self.g class SimpleRMSNorm(nn.Module): def __init__(self, dim): super().__init__() self.scale = dim ** 0.5 def forward(self, x): return F.normalize(x, dim = -1) * self.scale # residual and residual gates class Residual(nn.Module): def __init__(self, dim, scale_residual = False, scale_residual_constant = 1.): super().__init__() self.residual_scale = nn.Parameter(torch.ones(dim)) if scale_residual else None self.scale_residual_constant = scale_residual_constant def forward(self, x, residual): if exists(self.residual_scale): residual = residual * self.residual_scale if self.scale_residual_constant != 1: residual = residual * self.scale_residual_constant return x + residual class GRUGating(nn.Module): def __init__(self, dim, scale_residual = False, **kwargs): super().__init__() self.gru = nn.GRUCell(dim, dim) self.residual_scale = nn.Parameter(torch.ones(dim)) if scale_residual else None def forward(self, x, residual): if exists(self.residual_scale): residual = residual * self.residual_scale gated_output = self.gru( rearrange(x, 'b n d -> (b n) d'), rearrange(residual, 'b n d -> (b n) d') ) return gated_output.reshape_as(x) # token shifting def shift(t, amount, mask = None): if amount == 0: return t else: amount = min(amount, t.shape[1]) if exists(mask): t = t.masked_fill(~mask[..., None], 0.) return pad_at_dim(t, (amount, -amount), dim = - 2, value = 0.) class ShiftTokens(nn.Module): def __init__(self, shifts, fn): super().__init__() self.fn = fn self.shifts = tuple(shifts) def forward(self, x, **kwargs): mask = kwargs.get('mask', None) shifts = self.shifts segments = len(shifts) feats_per_shift = x.shape[-1] // segments splitted = x.split(feats_per_shift, dim = -1) segments_to_shift, rest = splitted[:segments], splitted[segments:] segments_to_shift = list(map(lambda args: shift(*args, mask = mask), zip(segments_to_shift, shifts))) x = torch.cat((*segments_to_shift, *rest), dim = -1) return self.fn(x, **kwargs) # feedforward class GLU(nn.Module): def __init__( self, dim_in, dim_out, activation: Callable, mult_bias = False ): super().__init__() self.act = activation self.proj = nn.Linear(dim_in, dim_out * 2) self.mult_bias = nn.Parameter(torch.ones(dim_out)) if mult_bias else 1. def forward(self, x): x, gate = self.proj(x).chunk(2, dim = -1) return x * self.act(gate) * self.mult_bias class FeedForward(nn.Module): def __init__( self, dim, dim_out = None, mult = 4, glu = False, glu_mult_bias = False, swish = False, relu_squared = False, post_act_ln = False, dropout = 0., no_bias = False, zero_init_output = False ): super().__init__() inner_dim = int(dim * mult) dim_out = default(dim_out, dim) if relu_squared: activation = ReluSquared() elif swish: activation = nn.SiLU() else: activation = nn.GELU() if glu: project_in = GLU(dim, inner_dim, activation, mult_bias = glu_mult_bias) else: project_in = nn.Sequential( nn.Linear(dim, inner_dim, bias = not no_bias), activation ) self.ff = Sequential( project_in, nn.LayerNorm(inner_dim) if post_act_ln else None, nn.Dropout(dropout), nn.Linear(inner_dim, dim_out, bias = not no_bias) ) # init last linear layer to 0 if zero_init_output: init_zero_(self.ff[-1]) def forward(self, x): return self.ff(x) # attention. it is all we need class Attention(nn.Module): def __init__( self, dim, dim_head = DEFAULT_DIM_HEAD, heads = 8, causal = False, flash = False, talking_heads = False, head_scale = False, sparse_topk = None, num_mem_kv = 0, dropout = 0., on_attn = False, gate_values = False, zero_init_output = False, max_attend_past = None, qk_norm = False, qk_norm_groups = 1, qk_norm_scale = 10, qk_norm_dim_scale = False, one_kv_head = False, shared_kv = False, value_dim_head = None, tensor_product = False, # https://arxiv.org/abs/2208.06061 cascading_heads = False, add_zero_kv = False, # same as add_zero_attn in pytorch onnxable = False ): super().__init__() self.scale = dim_head ** -0.5 self.heads = heads self.causal = causal self.max_attend_past = max_attend_past value_dim_head = default(value_dim_head, dim_head) q_dim = k_dim = dim_head * heads v_dim = out_dim = value_dim_head * heads self.one_kv_head = one_kv_head if one_kv_head: k_dim = dim_head v_dim = value_dim_head out_dim = v_dim * heads self.to_q = nn.Linear(dim, q_dim, bias = False) self.to_k = nn.Linear(dim, k_dim, bias = False) # shared key / values, for further memory savings during inference assert not (shared_kv and value_dim_head != dim_head), 'key and value head dimensions must be equal for shared key / values' self.to_v = nn.Linear(dim, v_dim, bias = False) if not shared_kv else None # relations projection from tp-attention self.to_r = nn.Linear(dim, v_dim, bias = False) if tensor_product else None # add GLU gating for aggregated values, from alphafold2 self.to_v_gate = None if gate_values: self.to_v_gate = nn.Linear(dim, out_dim) nn.init.constant_(self.to_v_gate.weight, 0) nn.init.constant_(self.to_v_gate.bias, 1) # cosine sim attention self.qk_norm = qk_norm self.qk_norm_groups = qk_norm_groups self.qk_norm_scale = qk_norm_scale # whether to use the rmsnorm (equivalent to cosine sim attention when scale is equal to 1) - https://arxiv.org/abs/2302.05442 self.qk_norm_dim_scale = qk_norm_dim_scale self.qk_norm_q_scale = self.qk_norm_k_scale = 1 if qk_norm and qk_norm_dim_scale: self.qk_norm_q_scale = nn.Parameter(torch.ones(dim_head)) self.qk_norm_k_scale = nn.Parameter(torch.ones(dim_head)) assert (not qk_norm) or (dim_head % qk_norm_groups) == 0, 'dimension per attention head must be divisible by the qk norm groups' assert not (qk_norm and (dim_head // qk_norm_groups) <= 2), 'the group dimension may be too small (2 was too small in my tests, but 4 still works, surprisingly)' # attend class - includes core attention algorithm + talking heads self.attend = Attend( heads = heads, causal = causal, talking_heads = talking_heads, dropout = dropout, sparse_topk = sparse_topk, qk_norm = qk_norm, scale = qk_norm_scale if qk_norm else self.scale, add_zero_kv = add_zero_kv, flash = flash, onnxable = onnxable ) # head scaling self.head_scale = head_scale if head_scale: self.head_scale_params = nn.Parameter(torch.ones(1, heads, 1, 1)) # explicit topk sparse attention self.sparse_topk = sparse_topk # add memory key / values self.num_mem_kv = num_mem_kv if num_mem_kv > 0: self.mem_k = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head)) self.mem_v = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head)) # attention on attention self.attn_on_attn = on_attn self.to_out = nn.Sequential(nn.Linear(out_dim, dim * 2, bias = False), nn.GLU()) if on_attn else nn.Linear(out_dim, dim, bias = False) # init output projection 0 if zero_init_output: init_zero_(self.to_out) def forward( self, x, context = None, mask = None, context_mask = None, attn_mask = None, rel_pos = None, rotary_pos_emb = None, prev_attn = None, mem = None ): b, n, _, h, head_scale, device, has_context = *x.shape, self.heads, self.head_scale, x.device, exists(context) kv_input = default(context, x) q_input = x k_input = kv_input v_input = kv_input r_input = x if exists(mem): k_input = torch.cat((mem, k_input), dim = -2) v_input = torch.cat((mem, v_input), dim = -2) q = self.to_q(q_input) k = self.to_k(k_input) v = self.to_v(v_input) if exists(self.to_v) else k r = self.to_r(r_input) if exists(self.to_r) else None q = rearrange(q, 'b n (h d) -> b h n d', h = h) if not self.one_kv_head: k, v, r = map(lambda t: maybe(rearrange)(t, 'b n (h d) -> b h n d', h = h), (k, v, r)) if self.qk_norm: qk_l2norm = partial(l2norm, groups = self.qk_norm_groups) q, k = map(qk_l2norm, (q, k)) scale = self.qk_norm_scale q = q * self.qk_norm_q_scale k = k * self.qk_norm_k_scale if exists(rotary_pos_emb) and not has_context: freqs, xpos_scale = rotary_pos_emb l = freqs.shape[-1] q_xpos_scale, k_xpos_scale = (xpos_scale, xpos_scale ** -1.) if exists(xpos_scale) else (1., 1.) (ql, qr), (kl, kr), (vl, vr) = map(lambda t: (t[..., :l], t[..., l:]), (q, k, v)) ql, kl, vl = map(lambda arg: apply_rotary_pos_emb(arg[0], freqs, arg[1]), ((ql, q_xpos_scale), (kl, k_xpos_scale), (vl, k_xpos_scale))) q, k, v = map(lambda t: torch.cat(t, dim = -1), ((ql, qr), (kl, kr), (vl, vr))) input_mask = context_mask if has_context else mask if self.num_mem_kv > 0: mem_k, mem_v = map(lambda t: repeat(t, 'h n d -> b h n d', b = b), (self.mem_k, self.mem_v)) if self.qk_norm: mem_k = l2norm(mem_k) mem_k = mem_k * self.qk_norm_k_scale k = torch.cat((mem_k, k), dim = -2) v = torch.cat((mem_v, v), dim = -2) if exists(input_mask): input_mask = pad_at_dim(input_mask, (self.num_mem_kv, 0), dim = -1, value = True) i, j = map(lambda t: t.shape[-2], (q, k)) # determine masking mask_value = max_neg_value(q) masks = [] final_attn_mask = None if exists(input_mask): input_mask = rearrange(input_mask, 'b j -> b 1 1 j') masks.append(~input_mask) if exists(attn_mask): assert 2 <= attn_mask.ndim <= 4, 'attention mask must have greater than 2 dimensions but less than or equal to 4' if attn_mask.ndim == 2: attn_mask = rearrange(attn_mask, 'i j -> 1 1 i j') elif attn_mask.ndim == 3: attn_mask = rearrange(attn_mask, 'h i j -> 1 h i j') masks.append(~attn_mask) if exists(self.max_attend_past): range_q = torch.arange(j - i, j, device = device) range_k = torch.arange(j, device = device) dist = rearrange(range_q, 'i -> 1 1 i 1') - rearrange(range_k, 'j -> 1 1 1 j') max_attend_past_mask = dist > self.max_attend_past masks.append(max_attend_past_mask) if len(masks) > 0: final_attn_mask = ~or_reduce(masks) # prepare relative positional bias, if needed attn_bias = None if exists(rel_pos): attn_bias = rel_pos(i, j) # attention is all we need out, intermediates = self.attend( q, k, v, mask = final_attn_mask, attn_bias = attn_bias, prev_attn = prev_attn ) # https://arxiv.org/abs/2208.06061 proposes to add a residual for better gradients if exists(r): out = out * r + out # normformer scaling of heads if head_scale: out = out * self.head_scale_params # merge heads out = rearrange(out, 'b h n d -> b n (h d)') # alphafold2 styled gating of the values if exists(self.to_v_gate): gates = self.to_v_gate(x) out = out * gates.sigmoid() # combine the heads out = self.to_out(out) if exists(mask): mask = rearrange(mask, 'b n -> b n 1') out = out.masked_fill(~mask, 0.) return out, intermediates class AttentionLayers(nn.Module): def __init__( self, dim, depth, heads = 8, causal = False, cross_attend = False, only_cross = False, use_scalenorm = False, use_rmsnorm = False, use_simple_rmsnorm = False, alibi_pos_bias = False, alibi_num_heads = None, rel_pos_bias = False, rel_pos_num_buckets = 32, rel_pos_max_distance = 128, dynamic_pos_bias = False, dynamic_pos_bias_log_distance = False, dynamic_pos_bias_mlp_depth = 2, dynamic_pos_bias_norm = False, rotary_pos_emb = False, rotary_emb_dim = None, rotary_xpos = False, rotary_interpolation_factor = 1., rotary_xpos_scale_base = 512, rotary_base_rescale_factor = 1., custom_layers = None, sandwich_coef = None, par_ratio = None, residual_attn = False, cross_residual_attn = False, macaron = False, pre_norm = True, pre_norm_has_final_norm = True, gate_residual = False, scale_residual = False, scale_residual_constant = 1., deepnorm = False, shift_tokens = 0, sandwich_norm = False, resi_dual = False, resi_dual_scale = 1., zero_init_branch_output = False, layer_dropout = 0., cross_attn_tokens_dropout = 0., **kwargs ): super().__init__() rotary_pos_emb = rotary_pos_emb or rotary_xpos ff_kwargs, kwargs = groupby_prefix_and_trim('ff_', kwargs) attn_kwargs, kwargs = groupby_prefix_and_trim('attn_', kwargs) dim_head = attn_kwargs.get('dim_head', DEFAULT_DIM_HEAD) self.dim = dim self.depth = depth self.layers = nn.ModuleList([]) self.has_pos_emb = rel_pos_bias or rotary_pos_emb rotary_emb_dim = max(default(rotary_emb_dim, dim_head // 2), 32) assert not (rotary_xpos and not causal), 'rotary xpos is not compatible with bidirectional attention' self.rotary_pos_emb = RotaryEmbedding(rotary_emb_dim, use_xpos = rotary_xpos, scale_base = rotary_xpos_scale_base, interpolation_factor = rotary_interpolation_factor, base_rescale_factor = rotary_base_rescale_factor) if rotary_pos_emb else None assert not (alibi_pos_bias and rel_pos_bias), 'you can only choose Alibi positional bias or T5 relative positional bias, not both' assert rel_pos_num_buckets <= rel_pos_max_distance, 'number of relative position buckets must be less than the relative position max distance' # relative positional bias flash_attn = attn_kwargs.get('flash', False) assert (int(rel_pos_bias) + int(dynamic_pos_bias) + int(alibi_pos_bias)) <= 1, 'you can only choose up to one of t5, alibi, or dynamic positional bias' self.rel_pos = None if rel_pos_bias: assert not flash_attn, 'flash attention not compatible with t5 relative positional bias' self.rel_pos = RelativePositionBias(scale = dim_head ** 0.5, causal = causal, heads = heads, num_buckets = rel_pos_num_buckets, max_distance = rel_pos_max_distance) elif dynamic_pos_bias: assert not flash_attn, 'flash attention not compatible with dynamic positional bias' self.rel_pos = DynamicPositionBias(dim = dim // 4, heads = heads, log_distance = dynamic_pos_bias_log_distance, depth = dynamic_pos_bias_mlp_depth, norm = dynamic_pos_bias_norm) elif alibi_pos_bias: alibi_num_heads = default(alibi_num_heads, heads) assert alibi_num_heads <= heads, 'number of ALiBi heads must be less than the total number of heads' self.rel_pos = AlibiPositionalBias(heads = alibi_num_heads, total_heads = heads) # determine deepnorm and residual scale if deepnorm: assert scale_residual_constant == 1, 'scale residual constant is being overridden by deep norm settings' pre_norm = sandwich_norm = resi_dual = False scale_residual = True scale_residual_constant = (2 * depth) ** 0.25 assert (int(sandwich_norm) + int(resi_dual)) <= 1, 'either sandwich norm or resiDual is selected, but not both' assert not (not pre_norm and sandwich_norm), 'sandwich norm cannot be used when not using prenorm' if resi_dual: pre_norm = False self.pre_norm = pre_norm self.sandwich_norm = sandwich_norm self.resi_dual = resi_dual assert 0 < resi_dual_scale <= 1., 'resiDual prenorm residual must be scaled by a factor greater than 0 and less than or equal to 1.' self.resi_dual_scale = resi_dual_scale self.residual_attn = residual_attn self.cross_residual_attn = cross_residual_attn assert not (flash_attn and (residual_attn or cross_residual_attn)), 'flash attention is not compatible with residual attention' self.cross_attend = cross_attend assert (int(use_scalenorm) + int(use_rmsnorm) + int(use_simple_rmsnorm)) <= 1, 'you can only use either scalenorm, rmsnorm, or simple rmsnorm' if use_scalenorm: norm_class = ScaleNorm elif use_rmsnorm: norm_class = RMSNorm elif use_simple_rmsnorm: norm_class = SimpleRMSNorm else: norm_class = nn.LayerNorm norm_fn = partial(norm_class, dim) if cross_attend and not only_cross: default_block = ('a', 'c', 'f') elif cross_attend and only_cross: default_block = ('c', 'f') else: default_block = ('a', 'f') if macaron: default_block = ('f',) + default_block # zero init if zero_init_branch_output: attn_kwargs = {**attn_kwargs, 'zero_init_output': True} ff_kwargs = {**ff_kwargs, 'zero_init_output': True} # calculate layer block order if exists(custom_layers): layer_types = custom_layers elif exists(par_ratio): par_depth = depth * len(default_block) assert 1 < par_ratio <= par_depth, 'par ratio out of range' default_block = tuple(filter(not_equals('f'), default_block)) par_attn = par_depth // par_ratio depth_cut = par_depth * 2 // 3 # 2 / 3 attention layer cutoff suggested by PAR paper par_width = (depth_cut + depth_cut // par_attn) // par_attn assert len(default_block) <= par_width, 'default block is too large for par_ratio' par_block = default_block + ('f',) * (par_width - len(default_block)) par_head = par_block * par_attn layer_types = par_head + ('f',) * (par_depth - len(par_head)) elif exists(sandwich_coef): assert sandwich_coef > 0 and sandwich_coef <= depth, 'sandwich coefficient should be less than the depth' layer_types = ('a',) * sandwich_coef + default_block * (depth - sandwich_coef) + ('f',) * sandwich_coef else: layer_types = default_block * depth self.layer_types = layer_types self.num_attn_layers = len(list(filter(equals('a'), layer_types))) # stochastic depth self.layer_dropouts = cast_tuple(layer_dropout, len(layer_types)) # structured dropout for cross attending self.cross_attn_tokens_dropout = cross_attn_tokens_dropout # calculate token shifting shift_tokens = cast_tuple(shift_tokens, len(layer_types)) # whether it has post norm self.final_norm = norm_fn() if pre_norm or resi_dual else nn.Identity() # iterate and construct layers for ind, (layer_type, layer_shift_tokens) in enumerate(zip(self.layer_types, shift_tokens)): is_last_layer = ind == (len(self.layer_types) - 1) if layer_type == 'a': layer = Attention(dim, heads = heads, causal = causal, **attn_kwargs) elif layer_type == 'c': layer = Attention(dim, heads = heads, **attn_kwargs) elif layer_type == 'f': layer = FeedForward(dim, **ff_kwargs) layer = layer if not macaron else Scale(0.5, layer) else: raise Exception(f'invalid layer type {layer_type}') if layer_shift_tokens > 0: shift_range_upper = layer_shift_tokens + 1 shift_range_lower = -layer_shift_tokens if not causal else 0 layer = ShiftTokens(range(shift_range_lower, shift_range_upper), layer) residual_fn = GRUGating if gate_residual else Residual residual = residual_fn(dim, scale_residual = scale_residual, scale_residual_constant = scale_residual_constant) pre_branch_norm = norm_fn() if pre_norm else None post_branch_norm = norm_fn() if sandwich_norm else None post_main_norm = norm_fn() if not pre_norm else None norms = nn.ModuleList([ pre_branch_norm, post_branch_norm, post_main_norm ]) self.layers.append(nn.ModuleList([ norms, layer, residual ])) if deepnorm: init_gain = (8 * depth) ** -0.25 deepnorm_init(self, init_gain) def forward( self, x, context = None, mask = None, context_mask = None, attn_mask = None, self_attn_context_mask = None, mems = None, return_hiddens = False ): assert not (self.cross_attend ^ exists(context)), 'context must be passed in if cross_attend is set to True' hiddens = [] layer_hiddens = [] intermediates = [] prev_attn = None prev_cross_attn = None mems = mems.copy() if exists(mems) else [None] * self.num_attn_layers rotary_pos_emb = None if exists(self.rotary_pos_emb): max_rotary_emb_length = max(list(map(lambda m: (m.shape[1] if exists(m) else 0) + x.shape[1], mems))) rotary_pos_emb = self.rotary_pos_emb(max_rotary_emb_length, x.device) outer_residual = x * self.resi_dual_scale for ind, (layer_type, (norm, block, residual_fn), layer_dropout) in enumerate(zip(self.layer_types, self.layers, self.layer_dropouts)): is_last = ind == (len(self.layers) - 1) if self.training and layer_dropout > 0. and random() < layer_dropout: continue if layer_type == 'a': if return_hiddens: hiddens.append(x) layer_mem = mems.pop(0) if mems else None if layer_type == 'c': if self.training and self.cross_attn_tokens_dropout > 0.: context, context_mask = dropout_seq(context, context_mask, self.cross_attn_tokens_dropout) inner_residual = x if return_hiddens: layer_hiddens.append(x) pre_norm, post_branch_norm, post_main_norm = norm if exists(pre_norm): x = pre_norm(x) if layer_type == 'a': out, inter = block(x, mask = mask, context_mask = self_attn_context_mask, attn_mask = attn_mask, rel_pos = self.rel_pos, rotary_pos_emb = rotary_pos_emb, prev_attn = prev_attn, mem = layer_mem) elif layer_type == 'c': out, inter = block(x, context = context, mask = mask, context_mask = context_mask, prev_attn = prev_cross_attn) elif layer_type == 'f': out = block(x) if self.resi_dual: outer_residual = outer_residual + out * self.resi_dual_scale if exists(post_branch_norm): out = post_branch_norm(out) x = residual_fn(out, inner_residual) if layer_type in ('a', 'c') and return_hiddens: intermediates.append(inter) if layer_type == 'a' and self.residual_attn: prev_attn = inter.pre_softmax_attn elif layer_type == 'c' and self.cross_residual_attn: prev_cross_attn = inter.pre_softmax_attn if exists(post_main_norm): x = post_main_norm(x) if return_hiddens: layer_hiddens.append(x) if self.resi_dual: x = x + self.final_norm(outer_residual) else: x = self.final_norm(x) if return_hiddens: intermediates = LayerIntermediates( hiddens = hiddens, attn_intermediates = intermediates, layer_hiddens = layer_hiddens ) return x, intermediates return x class Encoder(AttentionLayers): def __init__(self, **kwargs): assert 'causal' not in kwargs, 'cannot set causality on encoder' super().__init__(causal = False, **kwargs) class Decoder(AttentionLayers): def __init__(self, **kwargs): assert 'causal' not in kwargs, 'cannot set causality on decoder' super().__init__(causal = True, **kwargs) class CrossAttender(AttentionLayers): def __init__(self, **kwargs): super().__init__(cross_attend = True, only_cross = True, **kwargs) class ViTransformerWrapper(nn.Module): def __init__( self, *, image_size, patch_size, attn_layers, channels = 3, num_classes = None, post_emb_norm = False, emb_dropout = 0. ): super().__init__() assert isinstance(attn_layers, Encoder), 'attention layers must be an Encoder' assert image_size % patch_size == 0, 'image dimensions must be divisible by the patch size' dim = attn_layers.dim num_patches = (image_size // patch_size) ** 2 patch_dim = channels * patch_size ** 2 self.patch_size = patch_size self.pos_embedding = nn.Parameter(torch.randn(1, num_patches, dim)) self.patch_to_embedding = nn.Sequential( nn.LayerNorm(patch_dim), nn.Linear(patch_dim, dim), nn.LayerNorm(dim) ) self.post_emb_norm = nn.LayerNorm(dim) if post_emb_norm else nn.Identity() self.dropout = nn.Dropout(emb_dropout) self.attn_layers = attn_layers self.mlp_head = nn.Linear(dim, num_classes) if exists(num_classes) else nn.Identity() def forward( self, img, return_embeddings = False ): p = self.patch_size x = rearrange(img, 'b c (h p1) (w p2) -> b (h w) (p1 p2 c)', p1 = p, p2 = p) x = self.patch_to_embedding(x) n = x.shape[1] x = x + self.pos_embedding[:, :n] x = self.post_emb_norm(x) x = self.dropout(x) x = self.attn_layers(x) if not exists(self.mlp_head) or return_embeddings: return x x = x.mean(dim = -2) return self.mlp_head(x) class Transformer(nn.Module): def __init__( self, *, num_tokens, max_seq_len, attn_layers, emb_dim = None, max_mem_len = 0, shift_mem_down = 0, emb_dropout = 0., post_emb_norm = False, num_memory_tokens = None, tie_embedding = False, logits_dim = None, use_abs_pos_emb = True, scaled_sinu_pos_emb = False, l2norm_embed = False, emb_frac_gradient = 1., # GLM-130B and Cogview successfully used this, set at 0.1 attn_z_loss_weight = 1e-4 ): super().__init__() assert isinstance(attn_layers, AttentionLayers), 'attention layers must be one of Encoder or Decoder' dim = attn_layers.dim emb_dim = default(emb_dim, dim) self.emb_dim = emb_dim self.num_tokens = num_tokens self.max_seq_len = max_seq_len self.max_mem_len = max_mem_len self.shift_mem_down = shift_mem_down self.l2norm_embed = l2norm_embed self.token_emb = TokenEmbedding(emb_dim, num_tokens, l2norm_embed = l2norm_embed) if not (use_abs_pos_emb and not attn_layers.has_pos_emb): self.pos_emb = always(0) elif scaled_sinu_pos_emb: self.pos_emb = ScaledSinusoidalEmbedding(emb_dim) else: self.pos_emb = AbsolutePositionalEmbedding(emb_dim, max_seq_len, l2norm_embed = l2norm_embed) self.emb_frac_gradient = emb_frac_gradient # fraction of the gradient that should go to the embedding, https://arxiv.org/abs/2105.13290 self.post_emb_norm = nn.LayerNorm(emb_dim) if post_emb_norm else nn.Identity() self.emb_dropout = nn.Dropout(emb_dropout) self.project_emb = nn.Linear(emb_dim, dim) if emb_dim != dim else nn.Identity() self.attn_layers = attn_layers self.init_() logits_dim = default(logits_dim, num_tokens) self.to_logits = nn.Linear(dim, logits_dim) if not tie_embedding else lambda t: t @ self.token_emb.emb.weight.t() # memory tokens (like [cls]) from Memory Transformers paper num_memory_tokens = default(num_memory_tokens, 0) self.num_memory_tokens = num_memory_tokens if num_memory_tokens > 0: self.memory_tokens = nn.Parameter(torch.randn(num_memory_tokens, dim)) def init_(self): if self.l2norm_embed: nn.init.normal_(self.token_emb.emb.weight, std = 1e-5) if not isinstance(self.pos_emb, always): nn.init.normal_(self.pos_emb.emb.weight, std = 1e-5) return nn.init.kaiming_normal_(self.token_emb.emb.weight) def forward( self, x, return_embeddings = False, return_logits_and_embeddings = False, return_intermediates = False, mask = None, return_mems = False, return_attn = False, mems = None, pos = None, prepend_embeds = None, sum_embeds = None, return_attn_z_loss = False, attn_z_loss_weight = 1e-4, **kwargs ): b, n, device, num_mem, emb_frac_gradient = *x.shape, x.device, self.num_memory_tokens, self.emb_frac_gradient return_hiddens = return_mems | return_attn | return_intermediates | return_attn_z_loss # absolute positional embedding external_pos_emb = exists(pos) and pos.dtype != torch.long pos_emb = self.pos_emb(x, pos = pos) if not external_pos_emb else pos x = self.token_emb(x) + pos_emb # for summing embeddings passed externally - needs this for self-conditioning in non-autoregressive training if exists(sum_embeds): x = x + sum_embeds # post embedding norm, purportedly leads to greater stabilization x = self.post_emb_norm(x) # whether to append embeds, as in PaLI, for image embeddings if exists(prepend_embeds): prepend_seq, prepend_dim = prepend_embeds.shape[1:] assert prepend_dim == x.shape[-1], 'prepended embeddings need to have same dimensions as text model dimensions' x = torch.cat((prepend_embeds, x), dim = -2) # whether to reduce the gradient going to the embedding, from cogview paper, corroborated by GLM-130B model if emb_frac_gradient < 1: assert emb_frac_gradient > 0 x = x * emb_frac_gradient + x.detach() * (1 - emb_frac_gradient) # embedding dropout x = self.emb_dropout(x) x = self.project_emb(x) if num_mem > 0: mem = repeat(self.memory_tokens, 'n d -> b n d', b = b) x = torch.cat((mem, x), dim = 1) # auto-handle masking after appending memory tokens if exists(mask): mask = pad_at_dim(mask, (num_mem, 0), dim = -1, value = True) if self.shift_mem_down and exists(mems): mems_l, mems_r = mems[:self.shift_mem_down], mems[self.shift_mem_down:] mems = [*mems_r, *mems_l] if return_hiddens: x, intermediates = self.attn_layers(x, mask = mask, mems = mems, return_hiddens = True, **kwargs) else: x = self.attn_layers(x, mask = mask, mems = mems, **kwargs) mem, x = x[:, :num_mem], x[:, num_mem:] if return_logits_and_embeddings: out = (self.to_logits(x), x) elif return_embeddings: out = x else: out = self.to_logits(x) if return_attn_z_loss: pre_softmax_attns = list(map(lambda t: t.pre_softmax_attn, intermediates.attn_intermediates)) intermediates.attn_z_loss = calc_z_loss(pre_softmax_attns, weight = attn_z_loss_weight) return_intermediates = True if return_intermediates: return out, intermediates if return_mems: hiddens = intermediates.hiddens new_mems = list(map(lambda pair: torch.cat(pair, dim = -2), zip(mems, hiddens))) if exists(mems) else hiddens new_mems = list(map(lambda t: t[..., -self.max_mem_len:, :].detach(), new_mems)) return out, new_mems if return_attn: attn_maps = list(map(lambda t: t.post_softmax_attn, intermediates.attn_intermediates)) return out, attn_maps return out
NeoCortex-main
neox/transformer.py
from robocat.model import ImageDataGenerator model = ImageDataGenerator() img = model.generate(prompt="A robot looking at a soda can in first perrson") print(img)
RoboCAT-master
image_example.py
from setuptools import setup, find_packages setup( name = 'robocat', packages = find_packages(exclude=[]), version = '0.0.4', license='MIT', description = 'Robo CAT- Pytorch', author = 'Kye Gomez', author_email = '[email protected]', long_description_content_type = 'text/markdown', url = 'https://github.com/kyegomez/RoboCAT', keywords = [ 'artificial intelligence', 'deep learning', 'transformers', 'attention mechanism', 'robotics' ], install_requires=[ 'transformers', 'torch', 'einops', 'beartype', 'palme', 'transformers', 'palm-rlhf-pytorch', 'tokenizers', 'wandb', 'classifier-free-guidance-pytorch', 'axial_positional_embedding', 'DALL-E', 'einops>=0.3.2', 'ftfy', 'packaging', 'pillow', 'regex', 'rotary-embedding-torch', 'taming-transformers-rom1504', 'tokenizers', 'torch>=1.6', 'torchvision', 'transformers', 'tqdm', 'youtokentome', 'WebDataset' ], classifiers=[ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'Topic :: Scientific/Engineering :: Artificial Intelligence', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3.6', ], )
RoboCAT-master
setup.py
import torch from robocat.model import RoboCat model = RoboCat() video = torch.randn(2, 3, 6, 224, 224) instructions = [ 'bring me that apple sitting on the table', 'please pass the butter' ] result = model.forward(video, instructions) print(result)
RoboCAT-master
example.py
from robocat.model import VideoDataGenerator model = VideoDataGenerator() model.generate(prompt="4 legged robot walking to counter")
RoboCAT-master
video_example.py
from robocat.model import ImageDataGenerator, VideoDataGenerator, RoboCat
RoboCAT-master
robocat/__init__.py
import torch import torch.nn.functional as F from torch import nn, einsum from typing import List, Optional, Callable, Tuple from beartype import beartype from einops import pack, unpack, repeat, reduce, rearrange from einops.layers.torch import Rearrange, Reduce from functools import partial from classifier_free_guidance_pytorch import TextConditioner, AttentionTextConditioner, classifier_free_guidance # helpers def exists(val): return val is not None def default(val, d): return val if exists(val) else d def cast_tuple(val, length = 1): return val if isinstance(val, tuple) else ((val,) * length) def pack_one(x, pattern): return pack([x], pattern) def unpack_one(x, ps, pattern): return unpack(x, ps, pattern)[0] # sinusoidal positions def posemb_sincos_1d(seq, dim, temperature = 10000, device = None, dtype = torch.float32): n = torch.arange(seq, device = device) omega = torch.arange(dim // 2, device = device) / (dim // 2 - 1) omega = 1. / (temperature ** omega) n = n[:, None] * omega[None, :] pos_emb = torch.cat((n.sin(), n.cos()), dim = 1) return pos_emb.type(dtype) # helper classes class Residual(nn.Module): def __init__(self, fn): super().__init__() self.fn = fn def forward(self, x): return self.fn(x) + x class LayerNorm(nn.Module): def __init__(self, dim): super().__init__() self.gamma = nn.Parameter(torch.ones(dim)) self.register_buffer("beta", torch.zeros(dim)) def forward(self, x): return F.layer_norm(x, x.shape[-1:], self.gamma, self.beta) class FeedForward(nn.Module): def __init__(self, dim, mult = 4, dropout = 0.): super().__init__() inner_dim = int(dim * mult) self.norm = LayerNorm(dim) self.net = nn.Sequential( nn.Linear(dim, inner_dim), nn.GELU(), nn.Dropout(dropout), nn.Linear(inner_dim, dim), nn.Dropout(dropout) ) def forward(self, x, cond_fn = None): x = self.norm(x) if exists(cond_fn): # adaptive layernorm x = cond_fn(x) return self.net(x) # MBConv class SqueezeExcitation(nn.Module): def __init__(self, dim, shrinkage_rate = 0.25): super().__init__() hidden_dim = int(dim * shrinkage_rate) self.gate = nn.Sequential( Reduce('b c h w -> b c', 'mean'), nn.Linear(dim, hidden_dim, bias = False), nn.SiLU(), nn.Linear(hidden_dim, dim, bias = False), nn.Sigmoid(), Rearrange('b c -> b c 1 1') ) def forward(self, x): return x * self.gate(x) class MBConvResidual(nn.Module): def __init__(self, fn, dropout = 0.): super().__init__() self.fn = fn self.dropsample = Dropsample(dropout) def forward(self, x): out = self.fn(x) out = self.dropsample(out) return out + x class Dropsample(nn.Module): def __init__(self, prob = 0): super().__init__() self.prob = prob def forward(self, x): device = x.device if self.prob == 0. or (not self.training): return x keep_mask = torch.FloatTensor((x.shape[0], 1, 1, 1), device = device).uniform_() > self.prob return x * keep_mask / (1 - self.prob) def MBConv( dim_in, dim_out, *, downsample, expansion_rate = 4, shrinkage_rate = 0.25, dropout = 0. ): hidden_dim = int(expansion_rate * dim_out) stride = 2 if downsample else 1 net = nn.Sequential( nn.Conv2d(dim_in, hidden_dim, 1), nn.BatchNorm2d(hidden_dim), nn.GELU(), nn.Conv2d(hidden_dim, hidden_dim, 3, stride = stride, padding = 1, groups = hidden_dim), nn.BatchNorm2d(hidden_dim), nn.GELU(), SqueezeExcitation(hidden_dim, shrinkage_rate = shrinkage_rate), nn.Conv2d(hidden_dim, dim_out, 1), nn.BatchNorm2d(dim_out) ) if dim_in == dim_out and not downsample: net = MBConvResidual(net, dropout = dropout) return net # attention related classes class Attention(nn.Module): def __init__( self, dim, dim_head = 32, dropout = 0., window_size = 7 ): super().__init__() assert (dim % dim_head) == 0, 'dimension should be divisible by dimension per head' self.norm = LayerNorm(dim) self.heads = dim // dim_head self.scale = dim_head ** -0.5 self.to_qkv = nn.Linear(dim, dim * 3, bias = False) self.attend = nn.Sequential( nn.Softmax(dim = -1), nn.Dropout(dropout) ) self.to_out = nn.Sequential( nn.Linear(dim, dim, bias = False), nn.Dropout(dropout) ) # relative positional bias self.rel_pos_bias = nn.Embedding((2 * window_size - 1) ** 2, self.heads) pos = torch.arange(window_size) grid = torch.stack(torch.meshgrid(pos, pos, indexing = 'ij')) grid = rearrange(grid, 'c i j -> (i j) c') rel_pos = rearrange(grid, 'i ... -> i 1 ...') - rearrange(grid, 'j ... -> 1 j ...') rel_pos += window_size - 1 rel_pos_indices = (rel_pos * torch.tensor([2 * window_size - 1, 1])).sum(dim = -1) self.register_buffer('rel_pos_indices', rel_pos_indices, persistent = False) def forward(self, x): batch, height, width, window_height, window_width, _, device, h = *x.shape, x.device, self.heads x = self.norm(x) # flatten x = rearrange(x, 'b x y w1 w2 d -> (b x y) (w1 w2) d') # project for queries, keys, values q, k, v = self.to_qkv(x).chunk(3, dim = -1) # split heads q, k, v = map(lambda t: rearrange(t, 'b n (h d ) -> b h n d', h = h), (q, k, v)) # scale q = q * self.scale # sim sim = einsum('b h i d, b h j d -> b h i j', q, k) # add positional bias bias = self.rel_pos_bias(self.rel_pos_indices) sim = sim + rearrange(bias, 'i j h -> h i j') # attention attn = self.attend(sim) # aggregate out = einsum('b h i j, b h j d -> b h i d', attn, v) # merge heads out = rearrange(out, 'b h (w1 w2) d -> b w1 w2 (h d)', w1 = window_height, w2 = window_width) # combine heads out out = self.to_out(out) return rearrange(out, '(b x y) ... -> b x y ...', x = height, y = width) class MaxViT(nn.Module): def __init__( self, *, num_classes, dim, depth, dim_head = 32, dim_conv_stem = None, window_size = 7, mbconv_expansion_rate = 4, mbconv_shrinkage_rate = 0.25, dropout = 0.1, channels = 3 ): super().__init__() assert isinstance(depth, tuple), 'depth needs to be tuple if integers indicating number of transformer blocks at that stage' # convolutional stem dim_conv_stem = default(dim_conv_stem, dim) self.conv_stem = nn.Sequential( nn.Conv2d(channels, dim_conv_stem, 3, stride = 2, padding = 1), nn.Conv2d(dim_conv_stem, dim_conv_stem, 3, padding = 1) ) # variables num_stages = len(depth) dims = tuple(map(lambda i: (2 ** i) * dim, range(num_stages))) dims = (dim_conv_stem, *dims) dim_pairs = tuple(zip(dims[:-1], dims[1:])) self.layers = nn.ModuleList([]) # shorthand for window size for efficient block - grid like attention w = window_size # iterate through stages cond_hidden_dims = [] for ind, ((layer_dim_in, layer_dim), layer_depth) in enumerate(zip(dim_pairs, depth)): for stage_ind in range(layer_depth): is_first = stage_ind == 0 stage_dim_in = layer_dim_in if is_first else layer_dim cond_hidden_dims.append(stage_dim_in) block = nn.Sequential( MBConv( stage_dim_in, layer_dim, downsample = is_first, expansion_rate = mbconv_expansion_rate, shrinkage_rate = mbconv_shrinkage_rate ), Rearrange('b d (x w1) (y w2) -> b x y w1 w2 d', w1 = w, w2 = w), # block-like attention Residual(Attention(dim = layer_dim, dim_head = dim_head, dropout = dropout, window_size = w)), Residual(FeedForward(dim = layer_dim, dropout = dropout)), Rearrange('b x y w1 w2 d -> b d (x w1) (y w2)'), Rearrange('b d (w1 x) (w2 y) -> b x y w1 w2 d', w1 = w, w2 = w), # grid-like attention Residual(Attention(dim = layer_dim, dim_head = dim_head, dropout = dropout, window_size = w)), Residual(FeedForward(dim = layer_dim, dropout = dropout)), Rearrange('b x y w1 w2 d -> b d (w1 x) (w2 y)'), ) self.layers.append(block) embed_dim = dims[-1] self.embed_dim = dims[-1] self.cond_hidden_dims = cond_hidden_dims # mlp head out self.mlp_head = nn.Sequential( Reduce('b d h w -> b d', 'mean'), LayerNorm(embed_dim), nn.Linear(embed_dim, num_classes) ) @beartype def forward( self, x, texts: Optional[List[str]] = None, cond_fns: Optional[Tuple[Callable, ...]] = None, cond_drop_prob = 0., return_embeddings = False ): x = self.conv_stem(x) if not exists(cond_fns): cond_fns = (None,) * len(self.layers) for stage, cond_fn in zip(self.layers, cond_fns): if exists(cond_fn): x = cond_fn(x) x = stage(x) if return_embeddings: return x return self.mlp_head(x) # attention class TransformerAttention(nn.Module): def __init__( self, dim, causal = False, dim_head = 64, dim_context = None, heads = 8, norm_context = False, dropout = 0.1 ): super().__init__() self.heads = heads self.scale = dim_head ** -0.5 self.causal = causal inner_dim = dim_head * heads dim_context = default(dim_context, dim) self.norm = LayerNorm(dim) self.context_norm = LayerNorm(dim_context) if norm_context else nn.Identity() self.attn_dropout = nn.Dropout(dropout) self.to_q = nn.Linear(dim, inner_dim, bias = False) self.to_kv = nn.Linear(dim_context, dim_head * 2, bias = False) self.to_out = nn.Sequential( nn.Linear(inner_dim, dim, bias = False), nn.Dropout(dropout) ) def forward( self, x, context = None, mask = None, attn_bias = None, attn_mask = None, cond_fn: Optional[Callable] = None ): b = x.shape[0] if exists(context): context = self.context_norm(context) kv_input = default(context, x) x = self.norm(x) if exists(cond_fn): # adaptive layer-norm x = cond_fn(x) q, k, v = self.to_q(x), *self.to_kv(kv_input).chunk(2, dim = -1) q = rearrange(q, 'b n (h d) -> b h n d', h = self.heads) q = q * self.scale sim = einsum('b h i d, b j d -> b h i j', q, k) if exists(attn_bias): sim = sim + attn_bias if exists(attn_mask): sim = sim.masked_fill(~attn_mask, -torch.finfo(sim.dtype).max) if exists(mask): mask = rearrange(mask, 'b j -> b 1 1 j') sim = sim.masked_fill(~mask, -torch.finfo(sim.dtype).max) if self.causal: i, j = sim.shape[-2:] causal_mask = torch.ones((i, j), dtype = torch.bool, device = x.device).triu(j - i + 1) sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max) attn = sim.softmax(dim = -1) attn = self.attn_dropout(attn) out = einsum('b h i j, b j d -> b h i d', attn, v) out = rearrange(out, 'b h n d -> b n (h d)') return self.to_out(out) @beartype class Transformer(nn.Module): def __init__( self, dim, dim_head = 64, heads = 8, depth = 6, attn_dropout = 0., ff_dropout = 0. ): super().__init__() self.layers = nn.ModuleList([]) for _ in range(depth): self.layers.append(nn.ModuleList([ TransformerAttention(dim = dim, heads = heads, dropout = attn_dropout), FeedForward(dim = dim, dropout = ff_dropout) ])) def forward( self, x, cond_fns: Optional[Tuple[Callable, ...]] = None, attn_mask = None ): if not exists(cond_fns): cond_fns = (None,) * len(self.layers * 2) cond_fns = iter(cond_fns) for attn, ff in self.layers: x = attn(x, attn_mask = attn_mask, cond_fn = next(cond_fns)) + x x = ff(x, cond_fn = next(cond_fns)) + x return x # token learner module class TokenLearner(nn.Module): """ https://arxiv.org/abs/2106.11297 using the 1.1 version with the MLP (2 dense layers with gelu) for generating attention map """ def __init__( self, *, dim, ff_mult = 2, num_output_tokens = 8, num_layers = 2 ): super().__init__() inner_dim = dim * ff_mult * num_output_tokens self.num_output_tokens = num_output_tokens self.net = nn.Sequential( nn.Conv2d(dim * num_output_tokens, inner_dim, 1, groups = num_output_tokens), nn.GELU(), nn.Conv2d(inner_dim, num_output_tokens, 1, groups = num_output_tokens), ) def forward(self, x): x, ps = pack_one(x, '* c h w') x = repeat(x, 'b c h w -> b (g c) h w', g = self.num_output_tokens) attn = self.net(x) attn = rearrange(attn, 'b g h w -> b 1 g h w') x = rearrange(x, 'b (g c) h w -> b c g h w', g = self.num_output_tokens) x = reduce(x * attn, 'b c g h w -> b c g', 'mean') x = unpack_one(x, ps, '* c n') return x # data generator using stable instead of VQGAN class ImageDataGenerator: def __init__( self, model_id: str = "stabilityai/stable-diffusion-2", prompt: str = None, save_path: str = "generated_images" ): super().__init__() try: from diffusers import StableDiffusionPipeline, EulerDiscreteScheduler except Exception as error: print(f"Cannot import diffusers, please download with pip install diffusers, {error}") self.scheduler = EulerDiscreteScheduler.from_pretrained( model_id, subfolder="scheduler" ) self.pipe = StableDiffusionPipeline.from_pretrained( model_id, scheduler=self.scheduler, torch_dtype=torch.float16 ) self.pipe = self.pipe.to("cuda") self.save_path = save_path def generate(self, prompt): image = self.pipe(prompt).images[0] # Assuming the output is a PIL image image.save(f"{self.save_path}.jpg") return image class VideoDataGenerator: def __init__( self, model_id="cerspense/zeroscope_v2_576w", prompt: str = None, num_inference_steps=40, height=320, width=576, num_frames=24 ): super().__init__() try: import torch from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler from diffusers.utils import export_to_video self.export_to_video = export_to_video except Exception as error: print(f"Please download the torch and diffusers library pip3 install torch diffusers: {error}") self.model_id = model_id self.prompt = prompt self.num_inference_steps = num_inference_steps self.height = height self.width = width self.num_frames = num_frames self.pipe = DiffusionPipeline.from_pretrained( model_id, torch_dtype=torch.float16 ) self.pipe.scheduler = DPMSolverMultistepScheduler.from_config(self.pipe.scheduler.config) self.pipe.enable_model_cpu_offload() def generate(self, prompt=None): prompt = self.prompt or prompt video_frames = self.pipe( prompt, num_inference_steps=self.num_inference_steps, height=self.height, width=self.width, num_frames=self.num_frames ).frames video_path = self.export_to_video(video_frames) # Robotic Transformer @beartype class Gato(nn.Module): def __init__( self, *, vit: MaxViT, num_actions = 11, action_bins = 256, depth = 6, heads = 8, dim_head = 64, token_learner_ff_mult = 2, token_learner_num_layers = 2, token_learner_num_output_tokens = 8, cond_drop_prob = 0.2, use_attn_conditioner = False, conditioner_kwargs: dict = dict() ): super().__init__() self.vit = vit self.num_vit_stages = len(vit.cond_hidden_dims) conditioner_klass = AttentionTextConditioner if use_attn_conditioner else TextConditioner self.conditioner = conditioner_klass( hidden_dims = (*tuple(vit.cond_hidden_dims), *((vit.embed_dim,) * depth * 2)), hiddens_channel_first = (*((True,) * self.num_vit_stages), *((False,) * depth * 2)), cond_drop_prob = cond_drop_prob, **conditioner_kwargs ) self.token_learner = TokenLearner( dim = vit.embed_dim, ff_mult = token_learner_ff_mult, num_output_tokens = token_learner_num_output_tokens, num_layers = token_learner_num_layers ) self.num_learned_tokens = token_learner_num_output_tokens self.transformer_depth = depth self.transformer = Transformer( dim = vit.embed_dim, dim_head = dim_head, heads = heads, depth = depth ) self.cond_drop_prob = cond_drop_prob self.to_logits = nn.Sequential( LayerNorm(vit.embed_dim), nn.Linear(vit.embed_dim, num_actions * action_bins), Rearrange('... (a b) -> ... a b', b = action_bins) ) @classifier_free_guidance def forward( self, video, texts: Optional[List[str]] = None, cond_drop_prob = 0. ): depth = self.transformer_depth cond_drop_prob = default(cond_drop_prob, self.cond_drop_prob) frames, device = video.shape[2], video.device cond_fns = self.conditioner( texts, cond_drop_prob = cond_drop_prob, repeat_batch = (*((frames,) * self.num_vit_stages), *((1,) * self.transformer_depth * 2)) ) vit_cond_fns, transformer_cond_fns = cond_fns[:-(depth * 2)], cond_fns[-(depth * 2):] video = rearrange(video, 'b c f h w -> b f c h w') images, packed_shape = pack_one(video, '* c h w') tokens = self.vit( images, texts = texts, cond_fns = vit_cond_fns, cond_drop_prob = cond_drop_prob, return_embeddings = True ) tokens = unpack_one(tokens, packed_shape, '* c h w') learned_tokens = self.token_learner(tokens) learned_tokens = rearrange(learned_tokens, 'b f c n -> b (f n) c') # causal attention mask attn_mask = torch.ones((frames, frames), dtype = torch.bool, device = device).triu(1) attn_mask = repeat(attn_mask, 'i j -> (i r1) (j r2)', r1 = self.num_learned_tokens, r2 = self.num_learned_tokens) # sinusoidal positional embedding pos_emb = posemb_sincos_1d(frames, learned_tokens.shape[-1], dtype = learned_tokens.dtype, device = learned_tokens.device) learned_tokens = learned_tokens + repeat(pos_emb, 'n d -> (n r) d', r = self.num_learned_tokens) # attention attended_tokens = self.transformer(learned_tokens, cond_fns = transformer_cond_fns, attn_mask = ~attn_mask) pooled = reduce(attended_tokens, 'b (f n) d -> b f d', 'mean', f = frames) logits = self.to_logits(pooled) return logits class RoboCat: def __init__( self, num_classes = 1000, dim_conv_stem = 64, dim = 96, dim_head = 32, depth = (2, 2, 5, 2), window_size = 7, mbconv_expansion_rate = 4, mbconv_shrinkage_rate = 0.25, dropout = 0.1, num_actions=11, model_depth=6, heads=8, model_dim_head=64, cond_drop_prob=0.2 ): super().__init__() self.vit = MaxViT( num_classes=num_classes, dim_conv_stem=dim_conv_stem, dim=dim, dim_head=dim_head, depth=depth, window_size=window_size, mbconv_expansion_rate=mbconv_expansion_rate, mbconv_shrinkage_rate=mbconv_shrinkage_rate, dropout=dropout ) self.model = Gato( vit=self.vit, num_actions=num_actions, depth=model_depth, heads=heads, dim_head=model_dim_head, cond_drop_prob=cond_drop_prob ) def forward( self, video, instructions ): return self.model(video, instructions) # eval = self.model.eval() # eval_logits = self.model(video, text, cond_scale=3.)
RoboCAT-master
robocat/model.py
import math import multiprocessing import os from datetime import timedelta from functools import partial from itertools import chain import torch from torch.distributed.fsdp import ( FullyShardedDataParallel, MixedPrecision, BackwardPrefetch, ShardingStrategy, ) from accelerate import Accelerator from accelerate.utils import (DummyOptim, InitProcessGroupKwargs) from datasets import load_dataset from lion_pytorch import Lion from torch.nn import LayerNorm from torch.nn import LayerNorm from torch.distributed.algorithms._checkpoint.checkpoint_wrapper import ( CheckpointImpl, apply_activation_checkpointing, checkpoint_wrapper) from torch.distributed.fsdp.wrap import ( transformer_auto_wrap_policy, ) from torch.optim import AdamW from torch.utils.data import DataLoader from tqdm import tqdm from transformers import (AutoTokenizer, default_data_collator, get_cosine_schedule_with_warmup, get_linear_schedule_with_warmup, set_seed) # from utils.stable_adamw import StableAdamWUnfused from accelerate.logging import get_logger from robocat.model import RoboCAT from robocat.utils.stabe_adam import StableAdamWUnfused ########### SETUP CONFIG import torch.distributed as dist from accelerate.state import AcceleratorState # state = AcceleratorState() logger = get_logger(__name__, log_level="INFO") class CFG: BATCH_SIZE = 1 GRADIENT_ACCUMULATE_EVERY: int = 1 SEED: int = 42 LEARNING_RATE: float = 1e-4 #3e-4 # 1e-4 for lion WEIGHT_DECAY: float = 0.1 SEQ_LEN: int = 8192 NUM_CPU: int = multiprocessing.cpu_count() USE_DEEPSPEED: bool = True USE_FSDP: bool = True USE_PRETOKENIZED: bool = True USE_ACTIVATION_CHECKPOINTING: bool = True RESUME_FROM_CHECKPOINT: str = False CHECKPOINTING_STEPS: int = 1000 OUTPUT_DIR: str = 'checkpoints/' # Folder ENTITY_NAME: str = "Andromeda" LOGGING_STEPS: int = 100 # helpers def print_num_params(model, accelerator: Accelerator): # n_params = sum(p.numel() for p in model.parameters() if p.requires_grad) n_params = sum(p.numel() for p in model.parameters() if p.requires_grad) accelerator.print(f"Number of parameters in model: {n_params}") # activation checkpointing def activation_checkpointing( model: torch.nn.Module, offload_to_cpu: bool = False, accelerator: Accelerator = None, ): """ Apply activation checkpointing to a model. Args: model (Module): The model to which to apply activation checkpointing. offload_to_cpu (bool, optional): Whether to offload the activations to CPU. Defaults to False. accelerator (Accelerator, optional): The Accelerate library accelerator. Defaults to None. """ if accelerator is not None: accelerator.print("Using activation checkpointing") def check_fn(submodule): return isinstance(submodule, RoboCAT) non_reentrant_wrapper = partial( checkpoint_wrapper, offload_to_cpu=offload_to_cpu, checkpoint_impl=CheckpointImpl.NO_REENTRANT, ) apply_activation_checkpointing( model, checkpoint_wrapper_fn=non_reentrant_wrapper, check_fn=check_fn ) # FSDP def fsdp( model: torch.nn.Module, auto_wrap: bool = False, mp: str = "fp32", shard_strat: str = "NO_SHARD", ): """ This function wraps a given PyTorch model with the FullyShardedDataParallel (FSDP) wrapper to enable efficient data parallelism and model sharding. Args: model (torch.nn.Module): The original PyTorch model to be wrapped with FSDP. auto_wrap (bool, optional): If True, it enables automatic wrapping of the model's layers according to the transformer_auto_wrap_policy. Default is False. mp (str, optional): The mixed precision mode to be used. Can be 'bf16' for BFloat16, 'fp16' for Float16 or 'fp32' for Float32 precision. Default is 'fp32'. shard_strat (str, optional): The sharding strategy to be used. Can be 'SHARD_GRAD' for sharding at gradient computation, 'FULL_SHARD' for full model sharding or 'NO_SHARD' for no sharding. Default is 'NO_SHARD'. Raises: ValueError: If the provided mp (mixed precision mode) is not 'bf16', 'fp16' or 'fp32'. ValueError: If the provided shard_strat (sharding strategy) is not 'SHARD_GRAD', 'FULL_SHARD' or 'NO_SHARD'. Returns: torch.nn.Module: The input model wrapped with FSDP. """ if auto_wrap: Andromeda_auto_wrap_policy = partial( transformer_auto_wrap_policy, transformer_layer_cls={ RoboCAT, }, ) else: Andromeda_auto_wrap_policy = None if mp == "bf16": mp_fsdp = MixedPrecision( param_dtype=torch.bfloat16, # Gradient communication precision. reduce_dtype=torch.bfloat16, # Buffer precision. buffer_dtype=torch.bfloat16, ) elif mp == "fp16": mp_fsdp = MixedPrecision( param_dtype=torch.float16, # Gradient communication precision. reduce_dtype=torch.float16, # Buffer precision. buffer_dtype=torch.float16, ) elif mp == "fp32": mp_fsdp = MixedPrecision( param_dtype=torch.float32, # Gradient communication precision. reduce_dtype=torch.float32, # Buffer precision. buffer_dtype=torch.float32, ) else: raise ValueError( "Invalid scheduler_type. Expected 'bf16', 'fp16' or 'fp32', got: {}".format( mp ) ) if shard_strat == "SHARD_GRAD": sharding_strat_fsdp = ShardingStrategy.SHARD_GRAD_OP elif shard_strat == "FULL_SHARD": sharding_strat_fsdp = ShardingStrategy.FULL_SHARD elif shard_strat == "NO_SHARD": sharding_strat_fsdp = ShardingStrategy.NO_SHARD else: raise ValueError( "Invalid scheduler_type. Expected 'SHARD_GRAD', 'FULL_SHARD' or 'NO_SHARD', got: {}".format( shard_strat ) ) model = FullyShardedDataParallel( model, auto_wrap_policy=Andromeda_auto_wrap_policy, mixed_precision=mp_fsdp, backward_prefetch=BackwardPrefetch.BACKWARD_PRE, sharding_strategy=sharding_strat_fsdp, forward_prefetch=True, use_orig_params=True, ) return model # learning rate scheduler def get_lr_scheduler_with_warmup( optimizer: torch.optim.Optimizer, scheduler_type: str, num_warmup_steps: int, max_train_steps: int, grad_accumulate_every: int = 1, accelerator: Accelerator = None, ): """ Get a learning rate scheduler with warmup. Args: optimizer (Optimizer): The optimizer for which to create the learning rate scheduler. scheduler_type (str): The type of learning rate scheduler to create, either "linear" or "cosine". num_warmup_steps (int): The number of warmup steps for the learning rate scheduler. max_train_steps (int): The maximum number of training steps. grad_accumulate_every (int, optional): The gradient accumulation factor. Defaults to 1. accelerator (Accelerator, optional): The Accelerate library accelerator. Defaults to None. Returns: The learning rate scheduler with warmup. Raises: ValueError: If scheduler_type is not "linear" or "cosine". """ NUM_WARMUP_STEPS = num_warmup_steps GRADIENT_ACCUMULATE_EVERY = grad_accumulate_every if accelerator is not None: accelerator.print(f"Using {scheduler_type} lr scheduler") if scheduler_type == "linear": return get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=NUM_WARMUP_STEPS * GRADIENT_ACCUMULATE_EVERY, num_training_steps=max_train_steps * GRADIENT_ACCUMULATE_EVERY, ) elif scheduler_type == "cosine": return get_cosine_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=NUM_WARMUP_STEPS * GRADIENT_ACCUMULATE_EVERY, num_training_steps=max_train_steps * GRADIENT_ACCUMULATE_EVERY, ) else: raise ValueError( "Invalid scheduler_type. Expected 'linear' or 'cosine', got: {}".format( scheduler_type ) ) # optimizers def decoupled_optimizer( model: torch.nn.Module, learning_rate: float, weight_decay: float, beta_1: float, beta_2: float, optimizer_type: str, use_fsdp: bool = True, accelerator: Accelerator = None, ): """ Decouples the optimizer from the training process. This function sets up the optimizer for the model by creating two groups of parameters: one for weight decay and one without weight decay. Then, it initializes the optimizer with these two groups of parameters. Args: model (Module): The model whose parameters are optimized. learning_rate (float): The learning rate for the optimizer. weight_decay (float): The weight decay for the optimizer. beta_1 (float): The exponential decay rate for the 1st moment estimates. beta_2 (float): The exponential decay rate for the 2nd moment estimates. optimizer_type (str): The type of the optimizer. Can be 'lion', 'adamw', or 'stable_adamw'. use_fsdp (bool, optional): If True, the optimizer will work with fully sharded data parallelism. Defaults to True. accelerator (Accelerator, optional): The accelerator from HuggingFace's Accelerate library. Defaults to None. Returns: Optimizer: The initialized optimizer. Raises: ValueError: If the optimizer type is not 'lion', 'adamw' or 'stable_adamw'. """ accelerator.print(f"Using {optimizer_type} optimizer") # Create an empty dictionary called param_dict to store the model's named parameters. param_dict = {} # Iterate over the model's named parameters and populate the param_dict with key-value pairs. for param_name, param in model.named_parameters(): param_dict[param_name] = param # Separate the model's named modules into two groups: decay and no_decay. # Create an empty list to store the names of the LayerNorm and Embedding layer weights with no weight decay. no_decay = [] if use_fsdp: exclude_module = "_fsdp_wrapped_module.token_emb" else: exclude_module = "token_emb" # Iterate through the named modules of the model. for module_name, module in model.named_modules(): # Check if the current module is an instance of any of the desired types (LayerNorm or torch.nn.Embedding). for ndim in [LayerNorm, torch.nn.Embedding]: if isinstance(module, ndim): # If torch.nn.Embedding, append its name with a ".weight" suffix to the no_decay list. if module_name == exclude_module: no_decay.append(f"{module_name}.weight") else: # If the module is an instance of LayerNorm no_decay.append(f"{module_name}.gamma") # Exit the inner loop since the desired module has been found. break # Create an empty list to store the names of the Linear layer weights with weight decay. decay = [] # Iterate through the named modules of the model. for module_name, module in model.named_modules(): # Check if the current module is an instance of the desired type (torch.nn.Linear). for ndim in [torch.nn.Linear]: if isinstance(module, ndim): # If the module is an instance of torch.nn.Linear, append its name with a ".weight" suffix to the decay list. decay.append(f"{module_name}.weight") # Exit the inner loop since the desired module has been found. break # Create two separate lists of model parameters: decay_param and no_decay_param. # The decay_param list contains the parameters that should have weight decay applied. # The no_decay_param list contains the parameters that should not have weight decay applied, excluding the 'to_logits.weight' parameter. # Create an empty list called decay_param to store the parameters with weight decay. decay_param = [] if use_fsdp: exclude_param = "_fsdp_wrapped_module.to_logits.weight" else: exclude_param = "to_logits.weight" # Iterate over the decay list, which contains the names of the parameters with weight decay. for param in decay: # Check if the current parameter is not 'to_logits.weight'. # Append the corresponding parameter from param_dict to the decay_param list. if param != exclude_param: decay_param.append(param_dict[param]) # Create an empty list called no_decay_param to store the parameters without weight decay. no_decay_param = [] # Iterate over the no_decay list, which contains the names of the parameters without weight decay. for param in no_decay: try: # Append the corresponding parameter from param_dict to the no_decay_param list. no_decay_param.append(param_dict[param]) except KeyError: # print(f"Parameter {param_name} does not exist in the model") pass # Create a list called grouped_params that contains two dictionaries. # The first dictionary has the decay_param list and the corresponding weight_decay value. # The second dictionary has the no_decay_param list and a weight_decay value of 0.0. grouped_params = [ {"params": decay_param, "weight_decay": weight_decay}, {"params": no_decay_param, "weight_decay": 0.0}, ] # Create a variable called optimizer that stores an instance of the optimizer. if optimizer_type == "lion": optimizer = Lion(grouped_params, lr=learning_rate, betas=(beta_1, beta_2),) elif optimizer_type == "adamw": optimizer = AdamW(grouped_params, lr=learning_rate, betas=(beta_1, beta_2),) elif optimizer_type == "deepspeed": optimizer = DummyOptim(grouped_params, lr=learning_rate, betas=(beta_1, beta_2),) elif optimizer_type == "stable_adamw": optimizer = StableAdamWUnfused( grouped_params, lr=learning_rate, betas=(beta_1, beta_2), ) # elif optimizer_type=="Adam8bit": # optimizer = bnb.optim.Adam8bit(grouped_params, lr=learning_rate, betas=(beta_1, beta_2)) # elif optimizer_type=="Lion8Bit": # optimizer = bnb.optim.Lion8bit(grouped_params, lr=learning_rate, betas=(beta_1, beta_2)) else: raise ValueError( "Invalid optimizer_type. Expected 'lion', 'adamw', 'deepspeed' or 'stable_adamw', got: {}".format( optimizer_type ) ) # Return the optimizer. return optimizer # dataloaders def build_dataloaders(): """ Build data loaders for training. This function performs the following steps: 1. Load the tokenizer from the pretrained "EleutherAI/gpt-neox-20b" model. 2. Load the "openwebtext" dataset. 3. Tokenize the dataset, adding the end-of-sentence token to each text. 4. Process the tokenized dataset into chunks of a specified block size. Returns: Dataset: The processed dataset ready for training. """ tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b") dataset = load_dataset("openwebtext", split="train") tokenized_dataset = dataset.map( lambda example: tokenizer([t + tokenizer.eos_token for t in example["text"]]), batched=True, num_proc=CFG.NUM_CPU, remove_columns=["text"], ) block_size = CFG.SEQ_LEN # Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size. def group_texts(examples): # Concatenate all texts. concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()} total_length = len(concatenated_examples[list(examples.keys())[0]]) # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can # customize this part to your needs. if total_length >= block_size: total_length = (total_length // block_size) * block_size # Split by chunks of max_len. result = { k: [t[i : i + block_size] for i in range(0, total_length, block_size)] for k, t in concatenated_examples.items() } return result train_dataset = tokenized_dataset.map( group_texts, batched=True, num_proc=CFG.NUM_CPU, ) return train_dataset #switch to falconwebdataset def build_pre_tokenized(): d0 = load_dataset("conceptofmind/c4_0-to-20_neox_with_eos_8k", split="train[:10]") # d1 = load_dataset("conceptofmind/c4_21-to-40_neox_with_eos_8k", split="train") # d2 = load_dataset("conceptofmind/c4_41-to-60_neox_with_eos_8k", split="train") # d3 = load_dataset("conceptofmind/c4_61-to-80_neox_with_eos_8k", split="train") # d4 = load_dataset("conceptofmind/c4_81-to-100_neox_with_eos_8k", split="train") # train_dataset = concatenate_datasets([d0, d1, d2, d3, d4]) return d0 def Train(): # accelerator timeout = InitProcessGroupKwargs(timeout=timedelta(seconds=1_000_000)) accelerator = Accelerator( gradient_accumulation_steps=CFG.GRADIENT_ACCUMULATE_EVERY, mixed_precision="fp16", log_with="wandb", kwargs_handlers=[timeout], ) state = AcceleratorState() state.deepspeed_plugin.deepspeed_config['train_micro_batch_size_per_gpu'] = CFG.BATCH_SIZE #?????? accelerator.init_trackers( project_name="Andromeda", config={ "batch_size": CFG.BATCH_SIZE, "gradient_accumulate_every": CFG.GRADIENT_ACCUMULATE_EVERY, "learning_rate": CFG.LEARNING_RATE, "seq_len": CFG.SEQ_LEN, }, # init_kwargs={"wandb": {"entity": CFG.ENTITY_NAME}}, ) accelerator.print(f"Total GPUS: {accelerator.num_processes}") # set seed set_seed(CFG.SEED) model = RoboCAT() print_num_params(model, accelerator) if CFG.USE_FSDP: model = fsdp( model, mp="fp16", shard_strat="SHARD_GRAD" ) if CFG.USE_ACTIVATION_CHECKPOINTING: activation_checkpointing(model, accelerator) model = accelerator.prepare(model) # dataloaders if CFG.USE_PRETOKENIZED: train_dataset = build_pre_tokenized() else: train_dataset = build_dataloaders() train_loader = DataLoader( train_dataset, batch_size=CFG.BATCH_SIZE, collate_fn=default_data_collator, ) # optimizer optim = decoupled_optimizer( model=model, learning_rate=CFG.LEARNING_RATE, weight_decay=CFG.WEIGHT_DECAY, beta_1=0.90, beta_2=0.95, optimizer_type='lion', use_fsdp=True, accelerator=accelerator ) # Determine number of training steps max_train_steps = math.ceil(len(train_loader) / CFG.GRADIENT_ACCUMULATE_EVERY) accelerator.print(f"Max train steps: {max_train_steps}") # lr scheduler NUM_WARMUP_STEPS = int(max_train_steps * 0.01) accelerator.print(f"Num warmup steps: {NUM_WARMUP_STEPS}") # if False: # if CFG.USE_DEEPSPEED: # lr_scheduler = DummyScheduler( # optim, # total_num_steps=max_train_steps * accelerator.num_processes, # warmup_num_steps=NUM_WARMUP_STEPS # ) # else: lr_scheduler = get_lr_scheduler_with_warmup( optimizer=optim, scheduler_type="cosine", num_warmup_steps=NUM_WARMUP_STEPS, max_train_steps=max_train_steps, grad_accumulate_every=CFG.GRADIENT_ACCUMULATE_EVERY, ) # prepare optim, train_loader, lr_scheduler = accelerator.prepare( optim, train_loader, lr_scheduler ) # checkpoint scheduler accelerator.register_for_checkpointing(lr_scheduler) # I do not know why Huggingface recommends recalculation of max_train_steps max_train_steps = math.ceil(len(train_loader) / CFG.GRADIENT_ACCUMULATE_EVERY) accelerator.print(f"Max train steps recalculated: {max_train_steps}") # Total batch size for logging total_batch_size = ( CFG.BATCH_SIZE * accelerator.num_processes * CFG.GRADIENT_ACCUMULATE_EVERY ) accelerator.print(f"Total batch size: {total_batch_size}") # resume training progress_bar = tqdm( range(max_train_steps), disable=not accelerator.is_local_main_process ) completed_steps = 0 if CFG.RESUME_FROM_CHECKPOINT: if CFG.RESUME_FROM_CHECKPOINT is not None or CFG.RESUME_FROM_CHECKPOINT != "": accelerator.print(f"Resuming from checkpoint {CFG.RESUME_FROM_CHECKPOINT}") accelerator.load_state(CFG.RESUME_FROM_CHECKPOINT) path = os.path.basename(CFG.RESUME_FROM_CHECKPOINT) training_difference = os.path.splitext(path)[0] # need to multiply `gradient_accumulation_steps` to reflect real steps resume_step = ( int(training_difference.replace("step_", "")) * CFG.GRADIENT_ACCUMULATE_EVERY ) if CFG.RESUME_FROM_CHECKPOINT and resume_step is not None: train_loader = accelerator.skip_first_batches(train_loader, resume_step) completed_steps += resume_step progress_bar.update(resume_step) # training model.train() for step, batch in enumerate(train_loader): with accelerator.accumulate(model): inputs = batch["input_ids"].to(accelerator.device) loss = model(inputs, return_loss=True) accelerator.backward(loss) accelerator.log({"loss": loss.item()}, step=step) if accelerator.sync_gradients: accelerator.clip_grad_norm_(model.parameters(), 1.0) optim.step() lr_scheduler.step() optim.zero_grad() if accelerator.sync_gradients: progress_bar.update(1) completed_steps += 1 if isinstance(CFG.CHECKPOINTING_STEPS, int): if completed_steps % CFG.CHECKPOINTING_STEPS == 0: output_dir = f"step_{completed_steps }" if CFG.OUTPUT_DIR is not None: output_dir = os.path.join(CFG.OUTPUT_DIR, output_dir) accelerator.save_state(output_dir) if completed_steps >= max_train_steps: break #logging every CFG.LOGGING STEPS if CFG.LOGGING_STEPS > 0 and step % CFG.LOGGING_STEPS == 0: logger.info( f"Step: {completed_steps}/{max_train_steps}, Loss: {loss.item():.5f}" ) # end training # accelerator.print(f"Training Finished") accelerator.end_training() # save final model # accelerator.print(f"Saving model to {CFG.OUTPUT_DIR}") if CFG.OUTPUT_DIR is not None: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) with accelerator.main_process_first(): accelerator.save( unwrapped_model.state_dict(), f"{CFG.OUTPUT_DIR}/final/final_model.pt" ) def main(): os.environ['MASTER_ADDR'] #'localhost' os.environ['MASTER_PORT'] #= '9994' # # [CRITICAL] Pay attention to this when scaling to multiple GPUs and clusters # # Pay attention to this, use "accelerate config" os.environ['RANK'] #= str(0) # Number of nodes (servers) os.environ['WORLD_SIZE'] # = str(torch.cuda.device_count()) dist.init_process_group(backend='nccl') #init_method="env://") Train() if __name__ == '__main__': main()
RoboCAT-master
robocat/train.py
RoboCAT-master
robocat/utils/__init__.py
import torch # This is the unfused version of StableAdamW. It is slower than the fused version (coming). class StableAdamWUnfused(torch.optim.Optimizer): def __init__( self, params, lr=0.002, weight_decay=0.2, betas=(0.9, 0.99), eps=1e-8, clip_thresh=1.0, precision="amp_bfloat16", custom_scalar=65536, ): beta1, beta2 = betas[0], betas[1] defaults = dict(lr=lr, weight_decay=weight_decay, beta1=beta1, beta2=beta2) super(StableAdamWUnfused, self).__init__(params, defaults) self.eps = eps self.d = clip_thresh # Set precision to "custom_fp16" if you want to use a fixed loss scalar, custom_scalar, which is divided out in the update step. # If you do this, call (custom_scalar * loss).backward() instead of loss.backward(). self.precision = precision self.custom_scaler = custom_scalar for group in self.param_groups: group["step"] = 1.0 print("Using StableAdamWUnfused-v1") def __setstate__(self, state): super(StableAdamWUnfused, self).__setstate__(state) def step(self, closure=None): if closure is not None: closure() for group in self.param_groups: lr = group["lr"] weight_decay = group["weight_decay"] beta1 = group["beta1"] beta2 = group["beta2"] step = group["step"] for p in group["params"]: if p.grad is None: continue theta = p.data param_state = self.state[p] if self.precision == "custom_fp16": g = p.grad.data / self.custom_scaler if torch.any(torch.isnan(g) | torch.isinf(g)): continue else: g = p.grad.data if "exp_avg" not in param_state: v = param_state["exp_avg"] = torch.zeros_like(theta) u = param_state["exp_avg_sq"] = torch.zeros_like(theta) else: v = param_state["exp_avg"] u = param_state["exp_avg_sq"] beta1hat = beta1 * (1 - beta1 ** (step - 1)) / (1 - beta1**step) beta2hat = beta2 * (1 - beta2 ** (step - 1)) / (1 - beta2**step) v = v.mul_(beta1hat).add_(g, alpha=1.0 - beta1hat) u = u.mul_(beta2hat).addcmul_(g, g, value=1.0 - beta2hat) denominator = u.sqrt().add_(self.eps) # StableAdamW = AdamW + update clipping (https://arxiv.org/abs/1804.04235) applied tensor-wise. rms = ( torch.div( g.pow(2), torch.maximum(u, (self.eps**2) * torch.ones_like(u)) ) .mean() .sqrt() .item() ) theta = theta.mul_(1.0 - lr * weight_decay).addcdiv_( v, denominator, value=-lr * (1.0 / max(1.0, rms / self.d)) ) # save current params param_state["exp_avg"] = v param_state["exp_avg_sq"] = u group["step"] = step + 1
RoboCAT-master
robocat/utils/stabe_adam.py
from torch.utils.data import DataLoader from transformers import AdamW from datasets import load_dataset from robocat import robo_cat import torch # Step 1: Load the dataset dataset = load_dataset("your_dataset_name") # Step 2: Preprocess the dataset def preprocess(example): video = torch.tensor(example["video"]) # assuming "video" key in the dataset instructions = example["instructions"] # assuming "instructions" key in the dataset # further preprocessing steps here... return video, instructions dataset = dataset.map(preprocess) # Step 3: Create a DataLoader dataloader = DataLoader(dataset, batch_size=32, shuffle=True) # Initialize the optimizer optimizer = AdamW(robo_cat.parameters(), lr=1e-4) # Step 4: Write the training loop for epoch in range(epochs): for video, instructions in dataloader: # Forward pass logits = robo_cat(video, instructions) # Compute the loss loss = torch.nn.CrossEntropy()# ... compute the loss based on your task # Backpropagation loss.backward() # Update weights optimizer.step() # Zero gradients optimizer.zero_grad() # Step 5: Validation loop...
RoboCAT-master
robocat/utils/training.py
Model-Infra-Template-main
example.py
Model-Infra-Template-main
model/__init__.py
class ModelAPI: def __init__(self): pass def forward(self): pass
Model-Infra-Template-main
model/app.py
Model-Infra-Template-main
model/subfolder/__init__.py
Model-Infra-Template-main
model/subfolder/main.py
""" Apply the delta weights on top of a base model. Adapted from: https://github.com/lm-sys/FastChat/blob/main/fastchat/model/apply_delta.py. """ import argparse import torch from tqdm import tqdm from transformers import AutoTokenizer, AutoModelForCausalLM def apply_delta(base_model_path, target_model_path, delta_path): print(f"Loading the base model from {base_model_path}") base = AutoModelForCausalLM.from_pretrained( base_model_path, torch_dtype=torch.float16, low_cpu_mem_usage=True) print(f"Loading the delta from {delta_path}") delta = AutoModelForCausalLM.from_pretrained(delta_path, torch_dtype=torch.float16, low_cpu_mem_usage=True) delta_tokenizer = AutoTokenizer.from_pretrained(delta_path, use_fast=False) DEFAULT_PAD_TOKEN = "[PAD]" base_tokenizer = AutoTokenizer.from_pretrained(base_model_path, use_fast=False) num_new_tokens = base_tokenizer.add_special_tokens(dict(pad_token=DEFAULT_PAD_TOKEN)) base.resize_token_embeddings(len(base_tokenizer)) input_embeddings = base.get_input_embeddings().weight.data output_embeddings = base.get_output_embeddings().weight.data input_embeddings[-num_new_tokens:] = 0 output_embeddings[-num_new_tokens:] = 0 print("Applying the delta") for name, param in tqdm(base.state_dict().items(), desc="Applying delta"): assert name in delta.state_dict() param.data += delta.state_dict()[name] print(f"Saving the target model to {target_model_path}") base.save_pretrained(target_model_path) delta_tokenizer.save_pretrained(target_model_path) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--base-model-path", type=str, required=True) parser.add_argument("--target-model-path", type=str, required=True) parser.add_argument("--delta-path", type=str, required=True) args = parser.parse_args() apply_delta(args.base_model_path, args.target_model_path, args.delta_path)
MovieChat-main
apply_delta.py
""" Adapted from: https://github.com/Vision-CAIR/MiniGPT-4/blob/main/demo.py """ import argparse import os import random import numpy as np import torch import torch.backends.cudnn as cudnn from MovieChat.common.config import Config from MovieChat.common.dist_utils import get_rank from MovieChat.common.registry import registry from MovieChat.conversation.conversation_video import Chat, Conversation, default_conversation,SeparatorStyle import decord import cv2 import time import subprocess from moviepy.editor import VideoFileClip from decord import VideoReader decord.bridge.set_bridge('torch') #%% # imports modules for registration from MovieChat.datasets.builders import * from MovieChat.models import * from MovieChat.processors import * from MovieChat.runners import * from MovieChat.tasks import * from moviepy.editor import* import random as rnd from transformers import StoppingCriteria, StoppingCriteriaList from PIL import Image import GPUtil import gradio as gr MAX_INT = 8 N_SAMPLES = 32 SHORT_MEMORY_Length = 10 #%% def parse_args(): parser = argparse.ArgumentParser(description="Demo") parser.add_argument("--cfg-path", required=True, help="path to configuration file.") parser.add_argument("--gpu-id", type=int, default=0, help="specify the gpu to load the model.") parser.add_argument("--num-beams", type=int, default=1) parser.add_argument("--temperature", type=float, default=1.0) parser.add_argument("--text-query", required=True, help="question the video") parser.add_argument("--video-path", required=True, help="path to video file.") parser.add_argument("--fragment-video-path", required=True, help="path to video fragment file.") parser.add_argument("--cur-sec", type=int, default=2, help="current minute") parser.add_argument("--cur-min", type=int, default=15, help="current second") parser.add_argument("--middle-video", type=bool, default=False, help="current second") parser.add_argument( "--options", nargs="+", help="override some settings in the used config, the key-value pair " "in xxx=yyy format will be merged into config file (deprecate), " "change to --cfg-options instead.", ) args = parser.parse_args() return args def setup_seeds(config_seed): seed = config_seed + get_rank() random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) cudnn.benchmark = False cudnn.deterministic = True class StoppingCriteriaSub(StoppingCriteria): def __init__(self, stops=[], encounters=1): super().__init__() self.stops = stops def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor): for stop in self.stops: if torch.all((stop == input_ids[0][-len(stop):])).item(): return True return False def video_duration(filename): result = subprocess.run(["ffprobe", "-v", "error", "-show_entries", "format=duration", "-of", "default=noprint_wrappers=1:nokey=1", filename], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) return float(result.stdout) def capture_video(video_path, fragment_video_path, per_video_length, n_stage): start_time = n_stage * per_video_length end_time = (n_stage+1) * per_video_length video =CompositeVideoClip([VideoFileClip(video_path).subclip(start_time,end_time)]) video.write_videofile(fragment_video_path) def load_video(video_path, n_frms=MAX_INT, height=-1, width=-1, sampling="uniform", return_msg = False): decord.bridge.set_bridge("torch") vr = VideoReader(uri=video_path, height=height, width=width) vlen = len(vr) start, end = 0, vlen n_frms = min(n_frms, vlen) if sampling == "uniform": indices = np.arange(start, end, vlen / n_frms).astype(int).tolist() elif sampling == "headtail": indices_h = sorted(rnd.sample(range(vlen // 2), n_frms // 2)) indices_t = sorted(rnd.sample(range(vlen // 2, vlen), n_frms // 2)) indices = indices_h + indices_t else: raise NotImplementedError # get_batch -> T, H, W, C temp_frms = vr.get_batch(indices) tensor_frms = torch.from_numpy(temp_frms) if type(temp_frms) is not torch.Tensor else temp_frms frms = tensor_frms.permute(3, 0, 1, 2).float() # (C, T, H, W) if not return_msg: return frms fps = float(vr.get_avg_fps()) sec = ", ".join([str(round(f / fps, 1)) for f in indices]) # " " should be added in the start and end msg = f"The video contains {len(indices)} frames sampled at {sec} seconds. " return frms, msg def parse_video_fragment(video_path, video_length, n_stage = 0, n_samples = N_SAMPLES): decord.bridge.set_bridge("torch") per_video_length = video_length / n_samples # cut video from per_video_length(n_stage-1, n_stage) fragment_video_path = "src/video_fragment/output.mp4" capture_video(video_path, fragment_video_path, per_video_length, n_stage) return fragment_video_path class Chat: def __init__(self, model, vis_processor, device='cuda:0'): self.device = device self.output_text = " " self.model = model self.vis_processor = vis_processor self.image_vis_processor = Blip2ImageEvalProcessor() stop_words_ids = [torch.tensor([835]).to(self.device), torch.tensor([2277, 29937]).to(self.device)] # '###' can be encoded in two different ways. self.stopping_criteria = StoppingCriteriaList([StoppingCriteriaSub(stops=stop_words_ids)]) def get_context_emb(self, input_text, msg, img_list): prompt_1 = "You are able to understand the visual content that the user provides.Follow the instructions carefully and explain your answers in detail.###Human: <Video><ImageHere></Video>" prompt_2 = input_text prompt_3 = "###Assistant:" prompt = prompt_1 + " " + prompt_2 + prompt_3 prompt_segs = prompt.split('<ImageHere>') assert len(prompt_segs) == len(img_list) + 1, "Unmatched numbers of image placeholders and images." seg_tokens = [ self.model.llama_tokenizer( seg, return_tensors="pt", add_special_tokens=i == 0).to(self.device).input_ids # only add bos to the first seg for i, seg in enumerate(prompt_segs) ] seg_embs = [self.model.llama_model.model.embed_tokens(seg_t) for seg_t in seg_tokens] mixed_embs = [emb for pair in zip(seg_embs[:-1], img_list) for emb in pair] + [seg_embs[-1]] mixed_embs = torch.cat(mixed_embs, dim=1) return mixed_embs def gradio_answer(self,chatbot, chat_state): # chatbot[-1][1] = llm_message # print(chat_state.get_prompt()) print(chat_state) import pdb;pdb.set_trace() return gr.update(value=self.output_text, interactive=False),None def answer(self, img_list, input_text, msg, max_new_tokens=300, num_beams=1, min_length=1, top_p=0.9, repetition_penalty=1.0, length_penalty=1, temperature=1.0, max_length=2000): embs = self.get_context_emb(input_text, msg, img_list) current_max_len = embs.shape[1] + max_new_tokens if current_max_len - max_length > 0: print('Warning: The number of tokens in current conversation exceeds the max length. ' 'The model will not see the contexts outside the range.') begin_idx = max(0, current_max_len - max_length) embs = embs[:, begin_idx:] outputs = self.model.llama_model.generate( inputs_embeds=embs, max_new_tokens=max_new_tokens, stopping_criteria=self.stopping_criteria, num_beams=num_beams, do_sample=True, min_length=min_length, top_p=top_p, repetition_penalty=repetition_penalty, length_penalty=length_penalty, temperature=temperature, ) output_token = outputs[0] if output_token[0] == 0: # the model might output a unknow token <unk> at the beginning. remove it output_token = output_token[1:] if output_token[0] == 1: # some users find that there is a start token <s> at the beginning. remove it output_token = output_token[1:] output_text = self.model.llama_tokenizer.decode(output_token, add_special_tokens=False) output_text = output_text.split('###')[0] # remove the stop sign '###' output_text = output_text.split('Assistant:')[-1].strip() return output_text, output_token.cpu().numpy() def cal_frame(self, video_length, cur_min, cur_sec, middle_video): per_frag_second = video_length / N_SAMPLES if middle_video: cur_seconds = cur_min * 60 + cur_sec num_frames = int(cur_seconds / per_frag_second) per_frame_second = per_frag_second/SHORT_MEMORY_Length cur_frame = int((cur_seconds-per_frag_second*num_frames)/per_frame_second) return num_frames, cur_frame else: cur_frame = 0 num_frames = int(video_length / per_frag_second) return num_frames, cur_frame def upload_video_without_audio(self, video_path, fragment_video_path, cur_min, cur_sec, cur_image, img_list, middle_video): msg = "" if isinstance(video_path, str): # is a video path ext = os.path.splitext(video_path)[-1].lower() print(video_path) video_length = video_duration(video_path) num_frames, cur_frame = self.cal_frame(video_length, cur_min, cur_sec, middle_video) if num_frames == 0: video_fragment = parse_video_fragment(video_path=video_path, video_length=video_length, n_stage=0, n_samples= N_SAMPLES) video_fragment, msg = load_video( video_path=fragment_video_path, n_frms=MAX_INT, height=224, width=224, sampling ="uniform", return_msg = True ) video_fragment = self.vis_processor.transform(video_fragment) video_fragment = video_fragment.unsqueeze(0).to(self.device) self.model.encode_short_memory_frame(video_fragment, cur_frame) else: for i in range(num_frames): print(i) video_fragment = parse_video_fragment(video_path=video_path, video_length=video_length, n_stage=i, n_samples= N_SAMPLES) video_fragment, msg = load_video( video_path=fragment_video_path, n_frms=MAX_INT, height=224, width=224, sampling ="uniform", return_msg = True ) video_fragment = self.vis_processor.transform(video_fragment) video_fragment = video_fragment.unsqueeze(0).to(self.device) if middle_video: self.model.encode_short_memory_frame(video_fragment, cur_frame) else: self.model.encode_short_memory_frame(video_fragment) else: raise NotImplementedError video_emb, _ = self.model.encode_long_video(cur_image, middle_video) img_list.append(video_emb) return msg def gener_infer(self, video_path, text_input, num_beams, temperature, libraries, minute, second): print("here") fragment_video_path = "src/video_fragment/output.mp4" cur_min = minute if minute is not None else int(0) cur_sec = second if second is not None else int(0) if libraries is not None: cap = cv2.VideoCapture(video_path) if libraries[0] == "Breakpoint mode": fps_video = cap.get(cv2.CAP_PROP_FPS) self.model.middle_video = True self.model.question_minute = minute self.model.question_second = second cur_fps = fps_video * (60*minute + second) else: cur_fps = 0 self.model.middle_video = False cap.set(cv2.CAP_PROP_POS_FRAMES, cur_fps) ret, frame = cap.read() temp_frame_path = 'src/output_frame/snapshot.jpg' cv2.imwrite(temp_frame_path, frame) raw_image = Image.open(temp_frame_path).convert('RGB') image = self.image_vis_processor(raw_image).unsqueeze(0).unsqueeze(2).to(self.device) # [1,3,1,224,224] cur_image = self.model.encode_image(image) img_list = [] msg = self.upload_video_without_audio( video_path=video_path, fragment_video_path=fragment_video_path, cur_min=cur_min, cur_sec=cur_sec, cur_image = cur_image, img_list=img_list, middle_video = self.model.middle_video, ) llm_message = self.answer(img_list=img_list, input_text=text_input, msg = msg, num_beams=num_beams, temperature=temperature, max_new_tokens=300, max_length=2000)[0] self.output_text = llm_message print(self.output_text) if __name__ =='__main__': config_seed = 42 setup_seeds(config_seed) print('Initializing Chat') args = parse_args() cfg = Config(args) model_config = cfg.model_cfg model_config.device_8bit = args.gpu_id model_cls = registry.get_model_class(model_config.arch) model = model_cls.from_config(model_config).to('cuda:{}'.format(args.gpu_id)) vis_processor_cfg = cfg.datasets_cfg.webvid.vis_processor.train vis_processor = registry.get_processor_class(vis_processor_cfg.name).from_config(vis_processor_cfg) chat = Chat(model, vis_processor, device='cuda:{}'.format(args.gpu_id)) print('Initialization Finished') video_path = args.video_path fragment_video_path = args.fragment_video_path cur_min = args.cur_min cur_sec = args.cur_sec middle_video = args.middle_video cap = cv2.VideoCapture(video_path) fps_video = cap.get(cv2.CAP_PROP_FPS) cur_fps = fps_video * (60*cur_min + cur_sec) cap = cv2.VideoCapture(video_path) cap.set(cv2.CAP_PROP_POS_FRAMES, cur_fps) ret, frame = cap.read() temp_frame_path = 'src/output_frame/snapshot.jpg' cv2.imwrite(temp_frame_path, frame) raw_image = Image.open(temp_frame_path).convert('RGB') image = chat.image_vis_processor(raw_image).unsqueeze(0).unsqueeze(2).to(chat.device) # [1,3,1,224,224] cur_image = chat.model.encode_image(image) if middle_video == 1: middle_video = True else: middle_video = False img_list = [] middle_video = True msg = chat.upload_video_without_audio( video_path=video_path, fragment_video_path=fragment_video_path, cur_min=cur_min, cur_sec=cur_sec, cur_image = cur_image, img_list=img_list, middle_video = middle_video, ) text_input = args.text_query num_beams = args.num_beams temperature = args.temperature llm_message = chat.answer(img_list=img_list, input_text=text_input, msg = msg, num_beams=num_beams, temperature=temperature, max_new_tokens=300, max_length=2000)[0] print(llm_message)
MovieChat-main
inference.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import os import sys from omegaconf import OmegaConf from MovieChat.common.registry import registry from MovieChat.datasets.builders import * from MovieChat.models import * from MovieChat.processors import * from MovieChat.tasks import * root_dir = os.path.dirname(os.path.abspath(__file__)) default_cfg = OmegaConf.load(os.path.join(root_dir, "configs/default.yaml")) registry.register_path("library_root", root_dir) repo_root = os.path.join(root_dir, "..") registry.register_path("repo_root", repo_root) cache_root = os.path.join(repo_root, default_cfg.env.cache_root) registry.register_path("cache_root", cache_root) registry.register("MAX_INT", sys.maxsize) registry.register("SPLIT_NAMES", ["train", "val", "test"])
MovieChat-main
MovieChat/__init__.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import logging import os import torch import torch.distributed as dist from MovieChat.common.dist_utils import get_rank, get_world_size, is_main_process, is_dist_avail_and_initialized from MovieChat.common.logger import MetricLogger, SmoothedValue from MovieChat.common.registry import registry from MovieChat.datasets.data_utils import prepare_sample class BaseTask: def __init__(self, **kwargs): super().__init__() self.inst_id_key = "instance_id" @classmethod def setup_task(cls, **kwargs): return cls() def build_model(self, cfg): model_config = cfg.model_cfg model_cls = registry.get_model_class(model_config.arch) return model_cls.from_config(model_config) def build_datasets(self, cfg): """ Build a dictionary of datasets, keyed by split 'train', 'valid', 'test'. Download dataset and annotations automatically if not exist. """ datasets = dict() datasets_config = cfg.datasets_cfg assert len(datasets_config) > 0, "At least one dataset has to be specified." for name in datasets_config: dataset_config = datasets_config[name] builder = registry.get_builder_class(name)(dataset_config) dataset = builder.build_datasets() dataset['train'].name = name if 'sample_ratio' in dataset_config: dataset['train'].sample_ratio = dataset_config.sample_ratio datasets[name] = dataset return datasets def train_step(self, model, samples): loss = model(samples)["loss"] return loss def valid_step(self, model, samples): raise NotImplementedError def before_evaluation(self, model, dataset, **kwargs): model.before_evaluation(dataset=dataset, task_type=type(self)) def after_evaluation(self, **kwargs): pass def inference_step(self): raise NotImplementedError def evaluation(self, model, data_loader, cuda_enabled=True): metric_logger = MetricLogger(delimiter=" ") header = "Evaluation" # TODO make it configurable print_freq = 10 results = [] for samples in metric_logger.log_every(data_loader, print_freq, header): samples = prepare_sample(samples, cuda_enabled=cuda_enabled) eval_output = self.valid_step(model=model, samples=samples) results.extend(eval_output) if is_dist_avail_and_initialized(): dist.barrier() return results def train_epoch( self, epoch, model, data_loader, optimizer, lr_scheduler, scaler=None, cuda_enabled=False, log_freq=50, accum_grad_iters=1, ): return self._train_inner_loop( epoch=epoch, iters_per_epoch=lr_scheduler.iters_per_epoch, model=model, data_loader=data_loader, optimizer=optimizer, scaler=scaler, lr_scheduler=lr_scheduler, log_freq=log_freq, cuda_enabled=cuda_enabled, accum_grad_iters=accum_grad_iters, ) def train_iters( self, epoch, start_iters, iters_per_inner_epoch, model, data_loader, optimizer, lr_scheduler, scaler=None, cuda_enabled=False, log_freq=50, accum_grad_iters=1, ): return self._train_inner_loop( epoch=epoch, start_iters=start_iters, iters_per_epoch=iters_per_inner_epoch, model=model, data_loader=data_loader, optimizer=optimizer, scaler=scaler, lr_scheduler=lr_scheduler, log_freq=log_freq, cuda_enabled=cuda_enabled, accum_grad_iters=accum_grad_iters, ) def _train_inner_loop( self, epoch, iters_per_epoch, model, data_loader, optimizer, lr_scheduler, scaler=None, start_iters=None, log_freq=50, cuda_enabled=False, accum_grad_iters=1, ): """ An inner training loop compatible with both epoch-based and iter-based training. When using epoch-based, training stops after one epoch; when using iter-based, training stops after #iters_per_epoch iterations. """ use_amp = scaler is not None if not hasattr(data_loader, "__next__"): # convert to iterator if not already data_loader = iter(data_loader) metric_logger = MetricLogger(delimiter=" ") metric_logger.add_meter("lr", SmoothedValue(window_size=1, fmt="{value:.6f}")) metric_logger.add_meter("loss", SmoothedValue(window_size=1, fmt="{value:.4f}")) # if iter-based runner, schedule lr based on inner epoch. logging.info( "Start training epoch {}, {} iters per inner epoch.".format( epoch, iters_per_epoch ) ) header = "Train: data epoch: [{}]".format(epoch) if start_iters is None: # epoch-based runner inner_epoch = epoch else: # In iter-based runner, we schedule the learning rate based on iterations. inner_epoch = start_iters // iters_per_epoch header = header + "; inner epoch [{}]".format(inner_epoch) for i in metric_logger.log_every(range(iters_per_epoch), log_freq, header): # if using iter-based runner, we stop after iters_per_epoch iterations. if i >= iters_per_epoch: break samples = next(data_loader) samples = prepare_sample(samples, cuda_enabled=cuda_enabled) samples.update( { "epoch": inner_epoch, "num_iters_per_epoch": iters_per_epoch, "iters": i, } ) lr_scheduler.step(cur_epoch=inner_epoch, cur_step=i) with torch.cuda.amp.autocast(enabled=use_amp): loss = self.train_step(model=model, samples=samples) # after_train_step() if use_amp: scaler.scale(loss).backward() else: loss.backward() # update gradients every accum_grad_iters iterations if (i + 1) % accum_grad_iters == 0: if use_amp: scaler.step(optimizer) scaler.update() else: optimizer.step() optimizer.zero_grad() metric_logger.update(loss=loss.item()) metric_logger.update(lr=optimizer.param_groups[0]["lr"]) # after train_epoch() # gather the stats from all processes metric_logger.synchronize_between_processes() logging.info("Averaged stats: " + str(metric_logger.global_avg())) return { k: "{:.3f}".format(meter.global_avg) for k, meter in metric_logger.meters.items() } @staticmethod def save_result(result, result_dir, filename, remove_duplicate=""): import json result_file = os.path.join( result_dir, "%s_rank%d.json" % (filename, get_rank()) ) final_result_file = os.path.join(result_dir, "%s.json" % filename) json.dump(result, open(result_file, "w")) if is_dist_avail_and_initialized(): dist.barrier() if is_main_process(): logging.warning("rank %d starts merging results." % get_rank()) # combine results from all processes result = [] for rank in range(get_world_size()): result_file = os.path.join( result_dir, "%s_rank%d.json" % (filename, rank) ) res = json.load(open(result_file, "r")) result += res if remove_duplicate: result_new = [] id_list = [] for res in result: if res[remove_duplicate] not in id_list: id_list.append(res[remove_duplicate]) result_new.append(res) result = result_new json.dump(result, open(final_result_file, "w")) print("result file saved to %s" % final_result_file) return final_result_file
MovieChat-main
MovieChat/tasks/base_task.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ from MovieChat.common.registry import registry from MovieChat.tasks.base_task import BaseTask from MovieChat.tasks.image_text_pretrain import ImageTextPretrainTask from MovieChat.tasks.video_text_pretrain import VideoTextPretrainTask def setup_task(cfg): assert "task" in cfg.run_cfg, "Task name must be provided." task_name = cfg.run_cfg.task task = registry.get_task_class(task_name).setup_task(cfg=cfg) assert task is not None, "Task {} not properly registered.".format(task_name) return task __all__ = [ "BaseTask", "ImageTextPretrainTask", "VideoTextPretrainTask" ]
MovieChat-main
MovieChat/tasks/__init__.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ from MovieChat.common.registry import registry from MovieChat.tasks.base_task import BaseTask @registry.register_task("video_text_pretrain") class VideoTextPretrainTask(BaseTask): def __init__(self): super().__init__() def evaluation(self, model, data_loader, cuda_enabled=True): pass
MovieChat-main
MovieChat/tasks/video_text_pretrain.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ from MovieChat.common.registry import registry from MovieChat.tasks.base_task import BaseTask @registry.register_task("image_text_pretrain") class ImageTextPretrainTask(BaseTask): def __init__(self): super().__init__() def evaluation(self, model, data_loader, cuda_enabled=True): pass
MovieChat-main
MovieChat/tasks/image_text_pretrain.py
MovieChat-main
MovieChat/datasets/__init__.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import gzip import logging import os import random as rnd import tarfile import zipfile import random from typing import List from tqdm import tqdm import decord from decord import VideoReader import webdataset as wds import numpy as np import torch from torch.utils.data.dataset import IterableDataset from MovieChat.common.registry import registry from MovieChat.datasets.datasets.base_dataset import ConcatDataset decord.bridge.set_bridge("torch") MAX_INT = registry.get("MAX_INT") class ChainDataset(wds.DataPipeline): r"""Dataset for chaining multiple :class:`DataPipeline` s. This class is useful to assemble different existing dataset streams. The chaining operation is done on-the-fly, so concatenating large-scale datasets with this class will be efficient. Args: datasets (iterable of IterableDataset): datasets to be chained together """ def __init__(self, datasets: List[wds.DataPipeline]) -> None: super().__init__() self.datasets = datasets self.prob = [] self.names = [] for dataset in self.datasets: if hasattr(dataset, 'name'): self.names.append(dataset.name) else: self.names.append('Unknown') if hasattr(dataset, 'sample_ratio'): self.prob.append(dataset.sample_ratio) else: self.prob.append(1) logging.info("One of the datapipeline doesn't define ratio and set to 1 automatically.") def __iter__(self): datastreams = [iter(dataset) for dataset in self.datasets] while True: select_datastream = random.choices(datastreams, weights=self.prob, k=1)[0] yield next(select_datastream) def apply_to_sample(f, sample): if len(sample) == 0: return {} def _apply(x): if torch.is_tensor(x): return f(x) elif isinstance(x, dict): return {key: _apply(value) for key, value in x.items()} elif isinstance(x, list): return [_apply(x) for x in x] else: return x return _apply(sample) def move_to_cuda(sample): def _move_to_cuda(tensor): return tensor.cuda() return apply_to_sample(_move_to_cuda, sample) def prepare_sample(samples, cuda_enabled=True): if cuda_enabled: samples = move_to_cuda(samples) # TODO fp16 support return samples def reorg_datasets_by_split(datasets): """ Organizes datasets by split. Args: datasets: dict of torch.utils.data.Dataset objects by name. Returns: Dict of datasets by split {split_name: List[Datasets]}. """ # if len(datasets) == 1: # return datasets[list(datasets.keys())[0]] # else: reorg_datasets = dict() # reorganize by split for _, dataset in datasets.items(): for split_name, dataset_split in dataset.items(): if split_name not in reorg_datasets: reorg_datasets[split_name] = [dataset_split] else: reorg_datasets[split_name].append(dataset_split) return reorg_datasets def concat_datasets(datasets): """ Concatenates multiple datasets into a single dataset. It supports may-style datasets and DataPipeline from WebDataset. Currently, does not support generic IterableDataset because it requires creating separate samplers. Now only supports conctenating training datasets and assuming validation and testing have only a single dataset. This is because metrics should not be computed on the concatenated datasets. Args: datasets: dict of torch.utils.data.Dataset objects by split. Returns: Dict of concatenated datasets by split, "train" is the concatenation of multiple datasets, "val" and "test" remain the same. If the input training datasets contain both map-style and DataPipeline datasets, returns a tuple, where the first element is a concatenated map-style dataset and the second element is a chained DataPipeline dataset. """ # concatenate datasets in the same split for split_name in datasets: if split_name != "train": assert ( len(datasets[split_name]) == 1 ), "Do not support multiple {} datasets.".format(split_name) datasets[split_name] = datasets[split_name][0] else: iterable_datasets, map_datasets = [], [] for dataset in datasets[split_name]: if isinstance(dataset, wds.DataPipeline): logging.info( "Dataset {} is IterableDataset, can't be concatenated.".format( dataset ) ) iterable_datasets.append(dataset) elif isinstance(dataset, IterableDataset): raise NotImplementedError( "Do not support concatenation of generic IterableDataset." ) else: map_datasets.append(dataset) # if len(iterable_datasets) > 0: # concatenate map-style datasets and iterable-style datasets separately if len(iterable_datasets) > 1: chained_datasets = ( ChainDataset(iterable_datasets) ) elif len(iterable_datasets) == 1: chained_datasets = iterable_datasets[0] else: chained_datasets = None concat_datasets = ( ConcatDataset(map_datasets) if len(map_datasets) > 0 else None ) train_datasets = concat_datasets, chained_datasets train_datasets = tuple([x for x in train_datasets if x is not None]) train_datasets = ( train_datasets[0] if len(train_datasets) == 1 else train_datasets ) datasets[split_name] = train_datasets return datasets
MovieChat-main
MovieChat/datasets/data_utils.py
""" This file is from Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import logging import os import shutil import warnings from omegaconf import OmegaConf import torch.distributed as dist from torchvision.datasets.utils import download_url import MovieChat.common.utils as utils from MovieChat.common.dist_utils import is_dist_avail_and_initialized, is_main_process from MovieChat.common.registry import registry from MovieChat.processors.base_processor import BaseProcessor class BaseDatasetBuilder: train_dataset_cls, eval_dataset_cls = None, None def __init__(self, cfg=None): super().__init__() if cfg is None: # help to create datasets from default config. self.config = load_dataset_config(self.default_config_path()) elif isinstance(cfg, str): self.config = load_dataset_config(cfg) else: # when called from task.build_dataset() self.config = cfg self.data_type = self.config.data_type self.vis_processors = {"train": BaseProcessor(), "eval": BaseProcessor()} self.text_processors = {"train": BaseProcessor(), "eval": BaseProcessor()} def build_datasets(self): # download, split, etc... # only called on 1 GPU/TPU in distributed if is_main_process(): self._download_data() if is_dist_avail_and_initialized(): dist.barrier() # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") datasets = self.build() # dataset['train'/'val'/'test'] return datasets def build_processors(self): vis_proc_cfg = self.config.get("vis_processor") txt_proc_cfg = self.config.get("text_processor") if vis_proc_cfg is not None: vis_train_cfg = vis_proc_cfg.get("train") vis_eval_cfg = vis_proc_cfg.get("eval") self.vis_processors["train"] = self._build_proc_from_cfg(vis_train_cfg) self.vis_processors["eval"] = self._build_proc_from_cfg(vis_eval_cfg) if txt_proc_cfg is not None: txt_train_cfg = txt_proc_cfg.get("train") txt_eval_cfg = txt_proc_cfg.get("eval") self.text_processors["train"] = self._build_proc_from_cfg(txt_train_cfg) self.text_processors["eval"] = self._build_proc_from_cfg(txt_eval_cfg) @staticmethod def _build_proc_from_cfg(cfg): return ( registry.get_processor_class(cfg.name).from_config(cfg) if cfg is not None else None ) @classmethod def default_config_path(cls, type="default"): return utils.get_abs_path(cls.DATASET_CONFIG_DICT[type]) def _download_data(self): self._download_ann() self._download_vis() def _download_ann(self): """ Download annotation files if necessary. All the vision-language datasets should have annotations of unified format. storage_path can be: (1) relative/absolute: will be prefixed with env.cache_root to make full path if relative. (2) basename/dirname: will be suffixed with base name of URL if dirname is provided. Local annotation paths should be relative. """ anns = self.config.build_info.annotations splits = anns.keys() cache_root = registry.get_path("cache_root") for split in splits: info = anns[split] urls, storage_paths = info.get("url", None), info.storage if isinstance(urls, str): urls = [urls] if isinstance(storage_paths, str): storage_paths = [storage_paths] assert len(urls) == len(storage_paths) for url_or_filename, storage_path in zip(urls, storage_paths): # if storage_path is relative, make it full by prefixing with cache_root. if not os.path.isabs(storage_path): storage_path = os.path.join(cache_root, storage_path) dirname = os.path.dirname(storage_path) if not os.path.exists(dirname): os.makedirs(dirname) if os.path.isfile(url_or_filename): src, dst = url_or_filename, storage_path if not os.path.exists(dst): shutil.copyfile(src=src, dst=dst) else: logging.info("Using existing file {}.".format(dst)) else: if os.path.isdir(storage_path): # if only dirname is provided, suffix with basename of URL. raise ValueError( "Expecting storage_path to be a file path, got directory {}".format( storage_path ) ) else: filename = os.path.basename(storage_path) download_url(url=url_or_filename, root=dirname, filename=filename) def _download_vis(self): storage_path = self.config.build_info.get(self.data_type).storage storage_path = utils.get_cache_path(storage_path) if not os.path.exists(storage_path): warnings.warn( f""" The specified path {storage_path} for visual inputs does not exist. Please provide a correct path to the visual inputs or refer to datasets/download_scripts/README.md for downloading instructions. """ ) def build(self): """ Create by split datasets inheriting torch.utils.data.Datasets. # build() can be dataset-specific. Overwrite to customize. """ self.build_processors() build_info = self.config.build_info ann_info = build_info.annotations vis_info = build_info.get(self.data_type) datasets = dict() for split in ann_info.keys(): if split not in ["train", "val", "test"]: continue is_train = split == "train" # processors vis_processor = ( self.vis_processors["train"] if is_train else self.vis_processors["eval"] ) text_processor = ( self.text_processors["train"] if is_train else self.text_processors["eval"] ) # annotation path ann_paths = ann_info.get(split).storage if isinstance(ann_paths, str): ann_paths = [ann_paths] abs_ann_paths = [] for ann_path in ann_paths: if not os.path.isabs(ann_path): ann_path = utils.get_cache_path(ann_path) abs_ann_paths.append(ann_path) ann_paths = abs_ann_paths # visual data storage path vis_path = os.path.join(vis_info.storage, split) if not os.path.isabs(vis_path): # vis_path = os.path.join(utils.get_cache_path(), vis_path) vis_path = utils.get_cache_path(vis_path) if not os.path.exists(vis_path): warnings.warn("storage path {} does not exist.".format(vis_path)) # create datasets dataset_cls = self.train_dataset_cls if is_train else self.eval_dataset_cls datasets[split] = dataset_cls( vis_processor=vis_processor, text_processor=text_processor, ann_paths=ann_paths, vis_root=vis_path, ) return datasets def load_dataset_config(cfg_path): cfg = OmegaConf.load(cfg_path).datasets cfg = cfg[list(cfg.keys())[0]] return cfg
MovieChat-main
MovieChat/datasets/builders/base_dataset_builder.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ from MovieChat.datasets.builders.base_dataset_builder import load_dataset_config from MovieChat.datasets.builders.image_text_pair_builder import ( CCSBUBuilder, LaionBuilder, CCSBUAlignBuilder ) from MovieChat.datasets.builders.video_caption_builder import WebvidBuilder from MovieChat.common.registry import registry from MovieChat.datasets.builders.instruct_builder import WebvidInstruct_Builder,LlavaInstruct_Builder __all__ = [ "CCSBUBuilder", "LaionBuilder", "CCSBUAlignBuilder", "WebvidBuilder", "LlavaInstruct_Builder", "WebvidInstruct_Builder" ] def load_dataset(name, cfg_path=None, vis_path=None, data_type=None): """ Example >>> dataset = load_dataset("coco_caption", cfg=None) >>> splits = dataset.keys() >>> print([len(dataset[split]) for split in splits]) """ if cfg_path is None: cfg = None else: cfg = load_dataset_config(cfg_path) try: builder = registry.get_builder_class(name)(cfg) except TypeError: print( f"Dataset {name} not found. Available datasets:\n" + ", ".join([str(k) for k in dataset_zoo.get_names()]) ) exit(1) if vis_path is not None: if data_type is None: # use default data type in the config data_type = builder.config.data_type assert ( data_type in builder.config.build_info ), f"Invalid data_type {data_type} for {name}." builder.config.build_info.get(data_type).storage = vis_path dataset = builder.build_datasets() return dataset class DatasetZoo: def __init__(self) -> None: self.dataset_zoo = { k: list(v.DATASET_CONFIG_DICT.keys()) for k, v in sorted(registry.mapping["builder_name_mapping"].items()) } def get_names(self): return list(self.dataset_zoo.keys()) dataset_zoo = DatasetZoo()
MovieChat-main
MovieChat/datasets/builders/__init__.py
import os import logging import warnings from MovieChat.common.registry import registry from MovieChat.datasets.builders.base_dataset_builder import BaseDatasetBuilder from MovieChat.datasets.datasets.laion_dataset import LaionDataset from MovieChat.datasets.datasets.llava_instruct_dataset import Instruct_Dataset from MovieChat.datasets.datasets.video_instruct_dataset import Video_Instruct_Dataset @registry.register_builder("instruct") class Instruct_Builder(BaseDatasetBuilder): train_dataset_cls = Instruct_Dataset DATASET_CONFIG_DICT = {"default": "configs/datasets/instruct/defaults.yaml"} def _download_ann(self): pass def _download_vis(self): pass def build(self): self.build_processors() datasets = dict() split = "train" build_info = self.config.build_info dataset_cls = self.train_dataset_cls if self.config.num_video_query_token: num_video_query_token = self.config.num_video_query_token else: num_video_query_token = 32 if self.config.tokenizer_name: tokenizer_name = self.config.tokenizer_name else: tokenizer_name = '/mnt/workspace/ckpt/vicuna-13b/' datasets[split] = dataset_cls( vis_processor=self.vis_processors[split], text_processor=self.text_processors[split], vis_root=build_info.videos_dir, ann_root=build_info.anno_dir, num_video_query_token = num_video_query_token, tokenizer_name = tokenizer_name, data_type = self.config.data_type ) return datasets @registry.register_builder("webvid_instruct") class WebvidInstruct_Builder(Instruct_Builder): train_dataset_cls = Video_Instruct_Dataset DATASET_CONFIG_DICT = { "default": "configs/datasets/instruct/webvid_instruct.yaml", } @registry.register_builder("webvid_instruct_zh") class WebvidInstruct_zh_Builder(Instruct_Builder): train_dataset_cls = Video_Instruct_Dataset DATASET_CONFIG_DICT = { "default": "configs/datasets/instruct/webvid_instruct.yaml", } @registry.register_builder("llava_instruct") class LlavaInstruct_Builder(Instruct_Builder): train_dataset_cls = Instruct_Dataset DATASET_CONFIG_DICT = { "default": "configs/datasets/instruct/llava_instruct.yaml", }
MovieChat-main
MovieChat/datasets/builders/instruct_builder.py
import os import logging import warnings from MovieChat.common.registry import registry from MovieChat.datasets.builders.base_dataset_builder import BaseDatasetBuilder from MovieChat.datasets.datasets.laion_dataset import LaionDataset from MovieChat.datasets.datasets.cc_sbu_dataset import CCSBUDataset, CCSBUAlignDataset @registry.register_builder("cc_sbu") class CCSBUBuilder(BaseDatasetBuilder): train_dataset_cls = CCSBUDataset DATASET_CONFIG_DICT = {"default": "configs/datasets/cc_sbu/defaults.yaml"} def _download_ann(self): pass def _download_vis(self): pass def build(self): self.build_processors() build_info = self.config.build_info datasets = dict() split = "train" # create datasets # [NOTE] return inner_datasets (wds.DataPipeline) dataset_cls = self.train_dataset_cls datasets[split] = dataset_cls( vis_processor=self.vis_processors[split], text_processor=self.text_processors[split], location=build_info.storage, ).inner_dataset return datasets @registry.register_builder("laion") class LaionBuilder(BaseDatasetBuilder): train_dataset_cls = LaionDataset DATASET_CONFIG_DICT = {"default": "configs/datasets/laion/defaults.yaml"} def _download_ann(self): pass def _download_vis(self): pass def build(self): self.build_processors() build_info = self.config.build_info datasets = dict() split = "train" # create datasets # [NOTE] return inner_datasets (wds.DataPipeline) dataset_cls = self.train_dataset_cls datasets[split] = dataset_cls( vis_processor=self.vis_processors[split], text_processor=self.text_processors[split], location=build_info.storage, ).inner_dataset return datasets @registry.register_builder("cc_sbu_align") class CCSBUAlignBuilder(BaseDatasetBuilder): train_dataset_cls = CCSBUAlignDataset DATASET_CONFIG_DICT = { "default": "configs/datasets/cc_sbu/align.yaml", } def build_datasets(self): # at this point, all the annotations and image/videos should be all downloaded to the specified locations. logging.info("Building datasets...") self.build_processors() build_info = self.config.build_info storage_path = build_info.storage datasets = dict() if not os.path.exists(storage_path): warnings.warn("storage path {} does not exist.".format(storage_path)) # create datasets dataset_cls = self.train_dataset_cls datasets['train'] = dataset_cls( vis_processor=self.vis_processors["train"], text_processor=self.text_processors["train"], ann_paths=[os.path.join(storage_path, 'filter_cap.json')], vis_root=os.path.join(storage_path, 'image'), ) return datasets
MovieChat-main
MovieChat/datasets/builders/image_text_pair_builder.py
import os import logging import warnings from MovieChat.common.registry import registry from MovieChat.datasets.builders.base_dataset_builder import BaseDatasetBuilder from MovieChat.datasets.datasets.webvid_datasets import WebvidDataset @registry.register_builder("webvid") class WebvidBuilder(BaseDatasetBuilder): train_dataset_cls = WebvidDataset DATASET_CONFIG_DICT = {"default": "configs/datasets/webvid/defaults.yaml"} def _download_ann(self): pass def _download_vis(self): pass def build(self): self.build_processors() datasets = dict() split = "train" build_info = self.config.build_info dataset_cls = self.train_dataset_cls datasets[split] = dataset_cls( vis_processor=self.vis_processors[split], text_processor=self.text_processors[split], vis_root=build_info.videos_dir, ann_root=build_info.anno_dir ) return datasets
MovieChat-main
MovieChat/datasets/builders/video_caption_builder.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import webdataset as wds from MovieChat.datasets.datasets.base_dataset import BaseDataset class LaionDataset(BaseDataset): def __init__(self, vis_processor, text_processor, location): super().__init__(vis_processor=vis_processor, text_processor=text_processor) self.inner_dataset = wds.DataPipeline( wds.ResampledShards(location), wds.tarfile_to_samples(handler=wds.warn_and_continue), wds.shuffle(1000, handler=wds.warn_and_continue), wds.decode("pilrgb", handler=wds.warn_and_continue), wds.to_tuple("jpg", "json", handler=wds.warn_and_continue), wds.map_tuple(self.vis_processor, handler=wds.warn_and_continue), wds.map(self.to_dict, handler=wds.warn_and_continue), ) def to_dict(self, sample): return { "image": sample[0], "text_input": self.text_processor(sample[1]["caption"]), }
MovieChat-main
MovieChat/datasets/datasets/laion_dataset.py
import os from MovieChat.datasets.datasets.base_dataset import BaseDataset from MovieChat.datasets.datasets.caption_datasets import CaptionDataset import pandas as pd import decord from decord import VideoReader import random import torch from torch.utils.data.dataloader import default_collate from PIL import Image from typing import Dict, Optional, Sequence import transformers import pathlib import json from transformers import AutoTokenizer, AutoModelForCausalLM, LlamaTokenizer import copy from MovieChat.processors import transforms_video,AlproVideoTrainProcessor from torchvision import transforms from MovieChat.processors.video_processor import ToTHWC,ToUint8,load_video from MovieChat.conversation.conversation_video import Conversation,SeparatorStyle DEFAULT_IMAGE_PATCH_TOKEN = '<ImageHere>' video_conversation = Conversation( system="", roles=("Human", "Assistant"), messages=[], offset=0, sep_style=SeparatorStyle.SINGLE, sep="###", ) IGNORE_INDEX = -100 class Video_Instruct_Dataset(BaseDataset): def __init__(self, vis_processor, text_processor, vis_root, ann_root,num_video_query_token=32,tokenizer_name = '/mnt/workspace/ckpt/vicuna-13b/',data_type = 'video'): """ vis_root (string): Root directory of Llava images (e.g. webvid_eval/video/) ann_root (string): Root directory of video (e.g. webvid_eval/annotations/) split (string): val or test """ super().__init__(vis_processor=vis_processor, text_processor=text_processor) data_path = pathlib.Path(ann_root) with data_path.open(encoding='utf-8') as f: self.annotation = json.load(f) self.num_video_query_token = num_video_query_token self.vis_root = vis_root self.resize_size = 224 self.num_frm = 8 self.tokenizer = LlamaTokenizer.from_pretrained(tokenizer_name, use_fast=False) self.tokenizer.pad_token = self.tokenizer.eos_token self.tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True) self.IMAGE_PATCH_TOKEN_ID = self.tokenizer.get_vocab()[DEFAULT_IMAGE_PATCH_TOKEN] self.transform = AlproVideoTrainProcessor( image_size=self.resize_size, n_frms = self.num_frm ).transform self.data_type = data_type def _get_video_path(self, sample): rel_video_fp = sample['video'] full_video_fp = os.path.join(self.vis_root, rel_video_fp) return full_video_fp def __getitem__(self, index): num_retries = 10 # skip error videos for _ in range(num_retries): try: sample = self.annotation[index] video_path = self._get_video_path(sample) conversation_list = sample['QA'] video, msg = load_video( video_path=video_path, n_frms=self.num_frm, height=self.resize_size, width=self.resize_size, sampling ="uniform", return_msg = True ) video = self.transform(video) if 'cn' in self.data_type: msg = "" # 添加视频<DEFAULT_IMAGE_PATCH_TOKEN>,以及msg到convsation list 0 sources = preprocess_multimodal(copy.deepcopy(conversation_list), None, cur_token_len=self.num_video_query_token,msg = msg) new_sources = convert_source_vicuna_format(sources) data_dict = preprocess( new_sources, self.tokenizer) data_dict = dict(input_ids=data_dict["input_ids"][0], labels=data_dict["labels"][0]) # image exist in the data data_dict['image'] = video except: print(f"Failed to load examples with video: {video_path}. " f"Will randomly sample an example as a replacement.") index = random.randint(0, len(self) - 1) continue break else: raise RuntimeError(f"Failed to fetch video after {num_retries} retries.") # "image_id" is kept to stay compatible with the COCO evaluation format return { "image": video, "text_input": data_dict["input_ids"], "labels": data_dict["labels"], "type":'video', } def __len__(self): return len(self.annotation) def collater(self, instances): input_ids, labels = tuple([instance[key] for instance in instances] for key in ("text_input", "labels")) input_ids = torch.nn.utils.rnn.pad_sequence( input_ids, batch_first=True, padding_value=self.tokenizer.pad_token_id) labels = torch.nn.utils.rnn.pad_sequence(labels, batch_first=True, padding_value=IGNORE_INDEX) batch = dict( input_ids=input_ids, labels=labels, attention_mask=input_ids.ne(self.tokenizer.pad_token_id), ) if 'image' in instances[0]: images = [instance['image'] for instance in instances] if all(x is not None and x.shape == images[0].shape for x in images): batch['images'] = torch.stack(images) else: batch['images'] = images batch['conv_type'] = 'multi' return batch def convert_source_vicuna_format(sources): new_sources = [] for source in sources: new_source = [] for i, sentence in enumerate(source): role_0_msg = sentence['q'] role_1_msg = sentence['a'] new_source.append({ 'from':'human', 'value': role_0_msg, }) new_source.append({ 'from':'gpt', 'value': role_1_msg, }) new_sources.append(new_source) return new_sources def preprocess_multimodal( conversation_list: Sequence[str], multimodal_cfg: dict, cur_token_len: int, msg='' ) -> Dict: # 将conversational list中 is_multimodal = True # image_token_len = multimodal_cfg['image_token_len'] image_token_len = cur_token_len conversation_list[0]["q"] = "<Video>"+DEFAULT_IMAGE_PATCH_TOKEN * image_token_len +"</Video> " + msg + conversation_list[0]["q"] return [conversation_list] def _add_speaker_and_signal(header, source, get_conversation=True): """Add speaker and start/end signal on each round.""" BEGIN_SIGNAL = "###" END_SIGNAL = "\n" conversation = header for sentence in source: from_str = sentence["from"] if from_str.lower() == "human": from_str = video_conversation.roles[0] elif from_str.lower() == "gpt": from_str = video_conversation.roles[1] else: from_str = 'unknown' sentence["value"] = (BEGIN_SIGNAL + from_str + ": " + sentence["value"] + END_SIGNAL) if get_conversation: conversation += sentence["value"] conversation += BEGIN_SIGNAL return conversation def _tokenize_fn(strings: Sequence[str], tokenizer: transformers.PreTrainedTokenizer) -> Dict: """Tokenize a list of strings.""" tokenized_list = [ tokenizer( text, return_tensors="pt", padding="longest", max_length=512, truncation=True, ) for text in strings ] input_ids = labels = [ tokenized.input_ids[0] for tokenized in tokenized_list ] input_ids_lens = labels_lens = [ tokenized.input_ids.ne(tokenizer.pad_token_id).sum().item() for tokenized in tokenized_list ] return dict( input_ids=input_ids, labels=labels, input_ids_lens=input_ids_lens, labels_lens=labels_lens, ) def preprocess( sources: Sequence[str], tokenizer: transformers.PreTrainedTokenizer, ) -> Dict: """ Given a list of sources, each is a conversation list. This transform: 1. Add signal '### ' at the beginning each sentence, with end signal '\n'; 2. Concatenate conversations together; 3. Tokenize the concatenated conversation; 4. Make a deepcopy as the target. Mask human words with IGNORE_INDEX. """ # add end signal and concatenate together conversations = [] for source in sources: header = f"{video_conversation.system}\n\n" conversation = _add_speaker_and_signal(header, source) conversations.append(conversation) # tokenize conversations conversations_tokenized = _tokenize_fn(conversations, tokenizer) input_ids = conversations_tokenized["input_ids"] targets = copy.deepcopy(input_ids) for target, source in zip(targets, sources): tokenized_lens = _tokenize_fn([header] + [s["value"] for s in source], tokenizer)["input_ids_lens"] speakers = [sentence["from"] for sentence in source] _mask_targets(target, tokenized_lens, speakers) return dict(input_ids=input_ids, labels=targets) def _mask_targets(target, tokenized_lens, speakers): # cur_idx = 0 cur_idx = tokenized_lens[0] tokenized_lens = tokenized_lens[1:] target[:cur_idx] = IGNORE_INDEX for tokenized_len, speaker in zip(tokenized_lens, speakers): if speaker == "human": target[cur_idx+2:cur_idx + tokenized_len] = IGNORE_INDEX cur_idx += tokenized_len
MovieChat-main
MovieChat/datasets/datasets/video_instruct_dataset.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import json from typing import Iterable from torch.utils.data import Dataset, ConcatDataset from torch.utils.data.dataloader import default_collate class BaseDataset(Dataset): def __init__( self, vis_processor=None, text_processor=None, vis_root=None, ann_paths=[] ): """ vis_root (string): Root directory of images (e.g. coco/images/) ann_root (string): directory to store the annotation file """ self.vis_root = vis_root self.annotation = [] for ann_path in ann_paths: self.annotation.extend(json.load(open(ann_path, "r"))['annotations']) self.vis_processor = vis_processor self.text_processor = text_processor self._add_instance_ids() def __len__(self): return len(self.annotation) def collater(self, samples): return default_collate(samples) def set_processors(self, vis_processor, text_processor): self.vis_processor = vis_processor self.text_processor = text_processor def _add_instance_ids(self, key="instance_id"): for idx, ann in enumerate(self.annotation): ann[key] = str(idx) class ConcatDataset(ConcatDataset): def __init__(self, datasets: Iterable[Dataset]) -> None: super().__init__(datasets) def collater(self, samples): # TODO For now only supports datasets with same underlying collater implementations all_keys = set() for s in samples: all_keys.update(s) shared_keys = all_keys for s in samples: shared_keys = shared_keys & set(s.keys()) samples_shared_keys = [] for s in samples: samples_shared_keys.append({k: s[k] for k in s.keys() if k in shared_keys}) return self.datasets[0].collater(samples_shared_keys)
MovieChat-main
MovieChat/datasets/datasets/base_dataset.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import time import random import torch from MovieChat.datasets.data_utils import move_to_cuda from torch.utils.data import DataLoader class MultiIterLoader: """ A simple wrapper for iterating over multiple iterators. Args: loaders (List[Loader]): List of Iterator loaders. ratios (List[float]): List of ratios to sample from each loader. If None, all loaders are sampled uniformly. """ def __init__(self, loaders, ratios=None): # assert all loaders has __next__ method for loader in loaders: assert hasattr( loader, "__next__" ), "Loader {} has no __next__ method.".format(loader) if ratios is None: ratios = [1.0] * len(loaders) else: assert len(ratios) == len(loaders) ratios = [float(ratio) / sum(ratios) for ratio in ratios] self.loaders = loaders self.ratios = ratios def __next__(self): # random sample from each loader by ratio loader_idx = random.choices(range(len(self.loaders)), self.ratios, k=1)[0] return next(self.loaders[loader_idx]) class PrefetchLoader(object): """ Modified from https://github.com/ChenRocks/UNITER. overlap compute and cuda data transfer (copied and then modified from nvidia apex) """ def __init__(self, loader): self.loader = loader self.stream = torch.cuda.Stream() def __iter__(self): loader_it = iter(self.loader) self.preload(loader_it) batch = self.next(loader_it) while batch is not None: is_tuple = isinstance(batch, tuple) if is_tuple: task, batch = batch if is_tuple: yield task, batch else: yield batch batch = self.next(loader_it) def __len__(self): return len(self.loader) def preload(self, it): try: self.batch = next(it) except StopIteration: self.batch = None return # if record_stream() doesn't work, another option is to make sure # device inputs are created on the main stream. # self.next_input_gpu = torch.empty_like(self.next_input, # device='cuda') # self.next_target_gpu = torch.empty_like(self.next_target, # device='cuda') # Need to make sure the memory allocated for next_* is not still in use # by the main stream at the time we start copying to next_*: # self.stream.wait_stream(torch.cuda.current_stream()) with torch.cuda.stream(self.stream): self.batch = move_to_cuda(self.batch) # more code for the alternative if record_stream() doesn't work: # copy_ will record the use of the pinned source tensor in this # side stream. # self.next_input_gpu.copy_(self.next_input, non_blocking=True) # self.next_target_gpu.copy_(self.next_target, non_blocking=True) # self.next_input = self.next_input_gpu # self.next_target = self.next_target_gpu def next(self, it): torch.cuda.current_stream().wait_stream(self.stream) batch = self.batch if batch is not None: record_cuda_stream(batch) self.preload(it) return batch def __getattr__(self, name): method = self.loader.__getattribute__(name) return method def record_cuda_stream(batch): if isinstance(batch, torch.Tensor): batch.record_stream(torch.cuda.current_stream()) elif isinstance(batch, list) or isinstance(batch, tuple): for t in batch: record_cuda_stream(t) elif isinstance(batch, dict): for t in batch.values(): record_cuda_stream(t) else: pass class IterLoader: """ A wrapper to convert DataLoader as an infinite iterator. Modified from: https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/iter_based_runner.py """ def __init__(self, dataloader: DataLoader, use_distributed: bool = False): self._dataloader = dataloader self.iter_loader = iter(self._dataloader) self._use_distributed = use_distributed self._epoch = 0 @property def epoch(self) -> int: return self._epoch def __next__(self): try: data = next(self.iter_loader) except StopIteration: self._epoch += 1 if hasattr(self._dataloader.sampler, "set_epoch") and self._use_distributed: self._dataloader.sampler.set_epoch(self._epoch) time.sleep(2) # Prevent possible deadlock during epoch transition self.iter_loader = iter(self._dataloader) data = next(self.iter_loader) return data def __iter__(self): return self def __len__(self): return len(self._dataloader)
MovieChat-main
MovieChat/datasets/datasets/dataloader_utils.py
import os from MovieChat.datasets.datasets.base_dataset import BaseDataset from MovieChat.datasets.datasets.caption_datasets import CaptionDataset import pandas as pd import decord from decord import VideoReader import random import torch from torch.utils.data.dataloader import default_collate from PIL import Image from typing import Dict, Optional, Sequence import transformers import pathlib import json from transformers import AutoTokenizer, AutoModelForCausalLM, LlamaTokenizer from MovieChat.conversation.conversation_video import Conversation,SeparatorStyle DEFAULT_IMAGE_PATCH_TOKEN = '<ImageHere>' DEFAULT_IMAGE_TOKEN = "<image>" import copy from MovieChat.processors import transforms_video,AlproVideoTrainProcessor IGNORE_INDEX = -100 image_conversation = Conversation( system="", roles=("Human", "Assistant"), messages=[], offset=0, sep_style=SeparatorStyle.SINGLE, sep="###", ) IGNORE_INDEX = -100 class Instruct_Dataset(BaseDataset): def __init__(self, vis_processor, text_processor, vis_root, ann_root,num_video_query_token=32,tokenizer_name = '/mnt/workspace/ckpt/vicuna-13b/',data_type = 'image'): """ vis_root (string): Root directory of Llava images (e.g. webvid_eval/video/) ann_root (string): Root directory of video (e.g. webvid_eval/annotations/) split (string): val or test """ super().__init__(vis_processor=vis_processor, text_processor=text_processor) data_path = pathlib.Path(ann_root) with data_path.open(encoding='utf-8') as f: self.annotation = json.load(f) self.vis_root = vis_root self.resize_size = 224 self.num_frm = 8 self.tokenizer = LlamaTokenizer.from_pretrained(tokenizer_name, use_fast=False) self.tokenizer.pad_token = self.tokenizer.eos_token self.tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True) self.num_video_query_token = num_video_query_token self.IMAGE_PATCH_TOKEN_ID = self.tokenizer.get_vocab()[DEFAULT_IMAGE_PATCH_TOKEN] self.transform = AlproVideoTrainProcessor( image_size=self.resize_size, n_frms = self.num_frm ).transform self.data_type = data_type def _get_image_path(self, sample): rel_video_fp ='COCO_train2014_' + sample['image'] full_video_fp = os.path.join(self.vis_root, rel_video_fp) return full_video_fp def __getitem__(self, index): num_retries = 10 # skip error videos for _ in range(num_retries): try: sample = self.annotation[index] image_path = self._get_image_path(sample) conversation_list = sample['conversations'] image = Image.open(image_path).convert("RGB") image = self.vis_processor(image) # text = self.text_processor(text) sources = preprocess_multimodal(copy.deepcopy(conversation_list), None, cur_token_len=self.num_video_query_token) data_dict = preprocess( sources, self.tokenizer) data_dict = dict(input_ids=data_dict["input_ids"][0], labels=data_dict["labels"][0]) # image exist in the data data_dict['image'] = image except: print(f"Failed to load examples with image: {image_path}. " f"Will randomly sample an example as a replacement.") index = random.randint(0, len(self) - 1) continue break else: raise RuntimeError(f"Failed to fetch image after {num_retries} retries.") # "image_id" is kept to stay compatible with the COCO evaluation format return { "image": image, "text_input": data_dict["input_ids"], "labels": data_dict["labels"], "type":'image', } def __len__(self): return len(self.annotation) def collater(self, instances): input_ids, labels = tuple([instance[key] for instance in instances] for key in ("text_input", "labels")) input_ids = torch.nn.utils.rnn.pad_sequence( input_ids, batch_first=True, padding_value=self.tokenizer.pad_token_id) labels = torch.nn.utils.rnn.pad_sequence(labels, batch_first=True, padding_value=IGNORE_INDEX) batch = dict( input_ids=input_ids, labels=labels, attention_mask=input_ids.ne(self.tokenizer.pad_token_id), ) if 'image' in instances[0]: images = [instance['image'] for instance in instances] if all(x is not None and x.shape == images[0].shape for x in images): batch['images'] = torch.stack(images) else: batch['images'] = images batch['conv_type'] = 'multi' return batch def preprocess_multimodal( conversation_list: Sequence[str], multimodal_cfg: dict, cur_token_len: int, ) -> Dict: # 将conversational list中 is_multimodal = True # image_token_len = multimodal_cfg['image_token_len'] image_token_len = cur_token_len for sentence in conversation_list: replace_token = '<Image>'+DEFAULT_IMAGE_PATCH_TOKEN * image_token_len+'/<Image>' sentence["value"] = sentence["value"].replace(DEFAULT_IMAGE_TOKEN, replace_token) return [conversation_list] def _add_speaker_and_signal(header, source, get_conversation=True): """Add speaker and start/end signal on each round.""" BEGIN_SIGNAL = "###" END_SIGNAL = "\n" conversation = header for sentence in source: from_str = sentence["from"] if from_str.lower() == "human": from_str = image_conversation.roles[0] elif from_str.lower() == "gpt": from_str = image_conversation.roles[1] else: from_str = 'unknown' sentence["value"] = (BEGIN_SIGNAL + from_str + ": " + sentence["value"] + END_SIGNAL) if get_conversation: conversation += sentence["value"] conversation += BEGIN_SIGNAL return conversation def _tokenize_fn(strings: Sequence[str], tokenizer: transformers.PreTrainedTokenizer) -> Dict: """Tokenize a list of strings.""" tokenized_list = [ tokenizer( text, return_tensors="pt", padding="longest", max_length=512, truncation=True, ) for text in strings ] input_ids = labels = [ tokenized.input_ids[0] for tokenized in tokenized_list ] input_ids_lens = labels_lens = [ tokenized.input_ids.ne(tokenizer.pad_token_id).sum().item() for tokenized in tokenized_list ] return dict( input_ids=input_ids, labels=labels, input_ids_lens=input_ids_lens, labels_lens=labels_lens, ) def preprocess( sources: Sequence[str], tokenizer: transformers.PreTrainedTokenizer, ) -> Dict: """ Given a list of sources, each is a conversation list. This transform: 1. Add signal '### ' at the beginning each sentence, with end signal '\n'; 2. Concatenate conversations together; 3. Tokenize the concatenated conversation; 4. Make a deepcopy as the target. Mask human words with IGNORE_INDEX. """ # add end signal and concatenate together conversations = [] for source in sources: header = f"{image_conversation.system}\n\n" conversation = _add_speaker_and_signal(header, source) conversations.append(conversation) # tokenize conversations conversations_tokenized = _tokenize_fn(conversations, tokenizer) input_ids = conversations_tokenized["input_ids"] targets = copy.deepcopy(input_ids) for target, source in zip(targets, sources): tokenized_lens = _tokenize_fn([header] + [s["value"] for s in source], tokenizer)["input_ids_lens"] speakers = [sentence["from"] for sentence in source] _mask_targets(target, tokenized_lens, speakers) return dict(input_ids=input_ids, labels=targets) def _mask_targets(target, tokenized_lens, speakers): # cur_idx = 0 cur_idx = tokenized_lens[0] tokenized_lens = tokenized_lens[1:] target[:cur_idx] = IGNORE_INDEX for tokenized_len, speaker in zip(tokenized_lens, speakers): if speaker == "human": target[cur_idx+2:cur_idx + tokenized_len] = IGNORE_INDEX cur_idx += tokenized_len
MovieChat-main
MovieChat/datasets/datasets/llava_instruct_dataset.py
MovieChat-main
MovieChat/datasets/datasets/__init__.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import os from MovieChat.datasets.datasets.base_dataset import BaseDataset from MovieChat.datasets.datasets.caption_datasets import CaptionDataset import pandas as pd import decord from decord import VideoReader import random import torch from torch.utils.data.dataloader import default_collate class WebvidDataset(BaseDataset): def __init__(self, vis_processor, text_processor, vis_root, ann_root): """ vis_root (string): Root directory of video (e.g. webvid_eval/video/) ann_root (string): Root directory of video (e.g. webvid_eval/annotations/) split (string): val or test """ super().__init__(vis_processor=vis_processor, text_processor=text_processor) # 读取一个路径下所有的 ts_df = [] for file_name in os.listdir(ann_root): if file_name.endswith('.csv'): df = pd.read_csv(os.path.join(ann_root, file_name)) ts_df.append(df) merged_df = pd.concat(ts_df) self.annotation = merged_df self.vis_root = vis_root self.resize_size = 224 self.num_frm = 8 self.frm_sampling_strategy = 'headtail' def _get_video_path(self, sample): rel_video_fp = os.path.join(sample['page_dir'], str(sample['videoid']) + '.mp4') full_video_fp = os.path.join(self.vis_root, rel_video_fp) return full_video_fp def __getitem__(self, index): num_retries = 10 # skip error videos for _ in range(num_retries): sample = self.annotation.iloc[index] sample_dict = sample.to_dict() video_id = sample_dict['videoid'] if 'name' in sample_dict.keys(): text = sample_dict['name'].strip() else: raise NotImplementedError("Un-supported text annotation format.") # fetch video video_path = self._get_video_path(sample_dict) # if os.path.exists(video_path): try: video = self.vis_processor(video_path) except: print(f"Failed to load examples with video: {video_path}. " f"Will randomly sample an example as a replacement.") index = random.randint(0, len(self) - 1) continue caption = self.text_processor(text) # print(video.size()) if video is None or caption is None \ or video.size()!=torch.Size([3,self.vis_processor.n_frms,224,224]): print(f"Failed to load examples with video: {video_path}. " f"Will randomly sample an example as a replacement.") index = random.randint(0, len(self) - 1) continue else: break else: raise RuntimeError(f"Failed to fetch video after {num_retries} retries.") # "image_id" is kept to stay compatible with the COCO evaluation format return { "image": video, "text_input": caption, "type":'video', } def __len__(self): return len(self.annotation) # def collater(self, samples): # new_result = {} # new_result['image'] = default_collate( [sample["image"] for sample in samples]) # new_result['text_input'] = default_collate( [sample["text_input"] for sample in samples]) # return new_result class WebvidDatasetEvalDataset(BaseDataset): def __init__(self, vis_processor, text_processor, vis_root, ann_paths): """ vis_root (string): Root directory of images (e.g. coco/images/) ann_root (string): directory to store the annotation file split (string): val or test """ super().__init__(vis_processor, text_processor, vis_root, ann_paths) def __getitem__(self, index): ann = self.annotation[index] vname = ann["video"] video_path = os.path.join(self.vis_root, vname) video = self.vis_processor(video_path) return { "video": video, "image_id": ann["image_id"], "instance_id": ann["instance_id"], }
MovieChat-main
MovieChat/datasets/datasets/webvid_datasets.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import os from collections import OrderedDict from MovieChat.datasets.datasets.base_dataset import BaseDataset from PIL import Image class __DisplMixin: def displ_item(self, index): sample, ann = self.__getitem__(index), self.annotation[index] return OrderedDict( { "file": ann["image"], "caption": ann["caption"], "image": sample["image"], } ) class CaptionDataset(BaseDataset, __DisplMixin): def __init__(self, vis_processor, text_processor, vis_root, ann_paths): """ vis_root (string): Root directory of images (e.g. coco/images/) ann_root (string): directory to store the annotation file """ super().__init__(vis_processor, text_processor, vis_root, ann_paths) self.img_ids = {} n = 0 for ann in self.annotation: img_id = ann["image_id"] if img_id not in self.img_ids.keys(): self.img_ids[img_id] = n n += 1 def __getitem__(self, index): # TODO this assumes image input, not general enough ann = self.annotation[index] img_file = '{:0>12}.jpg'.format(ann["image_id"]) image_path = os.path.join(self.vis_root, img_file) image = Image.open(image_path).convert("RGB") image = self.vis_processor(image) caption = self.text_processor(ann["caption"]) return { "image": image, "text_input": caption, "image_id": self.img_ids[ann["image_id"]], } class CaptionEvalDataset(BaseDataset, __DisplMixin): def __init__(self, vis_processor, text_processor, vis_root, ann_paths): """ vis_root (string): Root directory of images (e.g. coco/images/) ann_root (string): directory to store the annotation file split (string): val or test """ super().__init__(vis_processor, text_processor, vis_root, ann_paths) def __getitem__(self, index): ann = self.annotation[index] image_path = os.path.join(self.vis_root, ann["image"]) image = Image.open(image_path).convert("RGB") image = self.vis_processor(image) return { "image": image, "image_id": ann["image_id"], "instance_id": ann["instance_id"], }
MovieChat-main
MovieChat/datasets/datasets/caption_datasets.py
import os from PIL import Image import webdataset as wds from MovieChat.datasets.datasets.base_dataset import BaseDataset from MovieChat.datasets.datasets.caption_datasets import CaptionDataset class CCSBUDataset(BaseDataset): def __init__(self, vis_processor, text_processor, location): super().__init__(vis_processor=vis_processor, text_processor=text_processor) self.inner_dataset = wds.DataPipeline( wds.ResampledShards(location), wds.tarfile_to_samples(handler=wds.warn_and_continue), wds.shuffle(1000, handler=wds.warn_and_continue), wds.decode("pilrgb", handler=wds.warn_and_continue), wds.to_tuple("jpg", "json", handler=wds.warn_and_continue), wds.map_tuple(self.vis_processor, handler=wds.warn_and_continue), wds.map(self.to_dict, handler=wds.warn_and_continue), ) def to_dict(self, sample): return { "image": sample[0], "text_input": self.text_processor(sample[1]["caption"]), "type":'image', } class CCSBUAlignDataset(CaptionDataset): def __getitem__(self, index): # TODO this assumes image input, not general enough ann = self.annotation[index] img_file = '{}.jpg'.format(ann["image_id"]) image_path = os.path.join(self.vis_root, img_file) image = Image.open(image_path).convert("RGB") image = self.vis_processor(image) caption = ann["caption"] return { "image": image, "text_input": caption, "image_id": self.img_ids[ann["image_id"]], "type":'image', }
MovieChat-main
MovieChat/datasets/datasets/cc_sbu_dataset.py
import logging import random import torch from torch.cuda.amp import autocast as autocast import torch.nn as nn from MovieChat.common.registry import registry from MovieChat.models.blip2 import Blip2Base, disabled_train from MovieChat.models.modeling_llama import LlamaForCausalLM from transformers import LlamaTokenizer,BertConfig import einops import copy from MovieChat.models.Qformer import BertConfig, BertLMHeadModel import queue import numpy as np from scipy.spatial.distance import cosine from skimage import transform import cv2 from PIL import Image @registry.register_model("moviechat") class MovieChat(Blip2Base): """ BLIP2 GPT-LLAMA model. """ PRETRAINED_MODEL_CONFIG_DICT = { "pretrain_vicuna": "configs/models/moviechat.yaml", } @classmethod def init_video_Qformer(cls, num_query_token, vision_width,num_hidden_layers =2): encoder_config = BertConfig.from_pretrained("bert-base-uncased") encoder_config.num_hidden_layers = num_hidden_layers encoder_config.encoder_width = vision_width # insert cross-attention layer every other block encoder_config.add_cross_attention = True encoder_config.cross_attention_freq = 1 encoder_config.query_length = num_query_token Qformer = BertLMHeadModel(config=encoder_config) query_tokens = nn.Parameter( torch.zeros(1, num_query_token, encoder_config.hidden_size) ) query_tokens.data.normal_(mean=0.0, std=encoder_config.initializer_range) return Qformer, query_tokens def __init__( self, vit_model="eva_clip_g", q_former_model="https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/blip2_pretrained_flant5xxl.pth", img_size=224, drop_path_rate=0, use_grad_checkpoint=False, vit_precision="fp16", freeze_vit=True, freeze_qformer=True, num_query_token=32, llama_model="", prompt_path="", prompt_template="", max_txt_len=32, end_sym='\n', low_resource=False, device_8bit=0, frozen_llama_proj=True, frozen_video_Qformer=True, llama_proj_model='', fusion_header_type= "seqTransf", max_frame_pos= 32, fusion_head_layers = 2, num_video_query_token = 32, short_memory_length = 18, long_memory_length = 64, short_memory_merge = 2, Qformer_input = 8 ): super().__init__() self.tokenizer = self.init_tokenizer() self.low_resource = low_resource print('Loading VIT') self.visual_encoder, self.ln_vision = self.init_vision_encoder( vit_model, img_size, drop_path_rate, use_grad_checkpoint, vit_precision ) if freeze_vit: for name, param in self.visual_encoder.named_parameters(): param.requires_grad = False self.visual_encoder = self.visual_encoder.eval() self.visual_encoder.train = disabled_train for name, param in self.ln_vision.named_parameters(): param.requires_grad = False self.ln_vision = self.ln_vision.eval() self.ln_vision.train = disabled_train logging.info("freeze vision encoder") print('Loading VIT Done') print('Loading Q-Former') self.Qformer, self.query_tokens = self.init_Qformer( num_query_token, self.visual_encoder.num_features ) self.Qformer.cls = None self.Qformer.bert.embeddings.word_embeddings = None self.Qformer.bert.embeddings.position_embeddings = None for layer in self.Qformer.bert.encoder.layer: layer.output = None layer.intermediate = None self.load_from_pretrained(url_or_filename=q_former_model) if freeze_qformer: for name, param in self.Qformer.named_parameters(): param.requires_grad = False self.Qformer = self.Qformer.eval() self.Qformer.train = disabled_train self.query_tokens.requires_grad = False logging.info("freeze Qformer") logging.info('Loading Q-Former Done') logging.info('Loading LLAMA Tokenizer') self.llama_tokenizer = LlamaTokenizer.from_pretrained(llama_model, use_fast=False) if self.llama_tokenizer.pad_token is None: self.llama_tokenizer.pad_token = self.llama_tokenizer.eos_token DEFAULT_IMAGE_PATCH_TOKEN = '<ImageHere>' DEFAULT_AUDIO_PATCH_TOKEN = '<AudioHere>' self.llama_tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True) self.llama_tokenizer.add_tokens([DEFAULT_AUDIO_PATCH_TOKEN], special_tokens=True) self.IMAGE_PATCH_TOKEN_ID = self.llama_tokenizer.get_vocab()[DEFAULT_IMAGE_PATCH_TOKEN] self.AUDIO_PATCH_TOKEN_ID = self.llama_tokenizer.get_vocab()[DEFAULT_AUDIO_PATCH_TOKEN] logging.info('Loading LLAMA Model') if self.low_resource: self.llama_model = LlamaForCausalLM.from_pretrained( llama_model, torch_dtype=torch.float16, load_in_8bit=True, device_map={'': device_8bit} ) else: self.llama_model = LlamaForCausalLM.from_pretrained( llama_model, torch_dtype=torch.float16, ) for name, param in self.llama_model.named_parameters(): param.requires_grad = False logging.info('Loading LLAMA Done') logging.info('Loading LLAMA proj') self.llama_proj = nn.Linear( self.Qformer.config.hidden_size, self.llama_model.config.hidden_size ) if llama_proj_model: print("load llama proj weight: {}".format(llama_proj_model)) llama_proj_weight = torch.load(llama_proj_model, map_location="cpu") msg = model.load_state_dict(llama_proj_weight['model'], strict=False) if frozen_llama_proj: # todo frozen llama_proj for name, param in self.llama_proj.named_parameters(): param.requires_grad = False logging.info('LLAMA proj is frozen') else: for name, param in self.llama_proj.named_parameters(): param.requires_grad = True logging.info('LLAMA proj is not frozen') logging.info('Loading llama_proj Done') self.max_txt_len = max_txt_len self.end_sym = end_sym if prompt_path: with open(prompt_path, 'r') as f: raw_prompts = f.read().splitlines() filted_prompts = [raw_prompt for raw_prompt in raw_prompts if "<ImageHere>" in raw_prompt] self.prompt_list = [prompt_template.format(p) for p in filted_prompts] print('Load {} training prompts'.format(len(self.prompt_list))) print('Prompt Example \n{}'.format(random.choice(self.prompt_list))) else: self.prompt_list = [] self.max_frame_pos = max_frame_pos self.video_frame_position_embedding = nn.Embedding(max_frame_pos, self.Qformer.config.hidden_size) #[32,768] [200] self.num_video_query_token = num_video_query_token self.video_Qformer,self.video_query_tokens = self.init_video_Qformer(num_query_token = num_video_query_token,\ vision_width=self.Qformer.config.hidden_size, num_hidden_layers =2) self.video_Qformer.cls = None self.video_Qformer.bert.embeddings.word_embeddings = None self.video_Qformer.bert.embeddings.position_embeddings = None for layer in self.video_Qformer.bert.encoder.layer: layer.output = None layer.intermediate = None if frozen_video_Qformer: # todo frozen llama_proj for name, param in self.video_Qformer.named_parameters(): param.requires_grad = False for name, param in self.video_frame_position_embedding.named_parameters(): param.requires_grad = False self.video_query_tokens.requires_grad = False logging.info('video_Qformer is frozen') else: for name, param in self.video_Qformer.named_parameters(): param.requires_grad = True for name, param in self.video_frame_position_embedding.named_parameters(): param.requires_grad = True self.video_query_tokens.requires_grad = True logging.info('video_Qformer is not frozen') self.Qformer_input = Qformer_input logging.info('create short-memory buffer') self.short_memory_length = short_memory_length self.short_memory_buffer = [] self.short_memory_merge = short_memory_merge self.temp_short_memory = [] logging.info('create long-memory buffer') self.long_memory_length = long_memory_length self.long_memory_buffer = [] logging.info('whether Question the whole video') self.middle_video =False self.question_minute = None self.question_second = None def vit_to_cpu(self): self.ln_vision.to("cpu") self.ln_vision.float() self.visual_encoder.to("cpu") self.visual_encoder.float() def encode_short_memory_frame(self, videofragment, n_frame:int = 16): device = videofragment.device # input shape b,c,t,h,w batch_size,_,time_length,_,_ = videofragment.size() # batch_size:1 time_length:8 videofragment = einops.rearrange(videofragment, 'b c t h w -> (b t) c h w') with self.maybe_autocast(): # embed image features with blip2, out: (b t) q h image_embeds = self.ln_vision(self.visual_encoder(videofragment)).to(device) image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(device) query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1) query_output = self.Qformer.bert( query_embeds=query_tokens, encoder_hidden_states=image_embeds, encoder_attention_mask=image_atts, return_dict=True, ) # load short_memory_buffer cur_frame = 0 q_hidden_state = query_output.last_hidden_state for frame in q_hidden_state: if cur_frame < n_frame: if len(self.short_memory_buffer) == self.short_memory_length: self.short_memory_buffer.pop(0) self.short_memory_buffer.append(frame) cur_frame += 1 self.temp_short_memory = [] for i in self.short_memory_buffer: self.temp_short_memory.append(i) #merge short_memory_frames similar_list = [] for frame_i in range(len(self.short_memory_buffer) -1): frame_silimar = cosine(self.short_memory_buffer[frame_i].flatten().cpu(), self.short_memory_buffer[frame_i+1].flatten().cpu()) similar_list.append(frame_silimar) while len(self.short_memory_buffer) > self.short_memory_merge: max_value = max(similar_list) max_index = similar_list.index(max_value) new_frame_feature = (self.short_memory_buffer[max_index].cpu()+self.short_memory_buffer[max_index+1].cpu())/2 self.short_memory_buffer[max_index] = new_frame_feature.cuda() del(self.short_memory_buffer[max_index+1]) similar_list = [] for frame_i in range(len(self.short_memory_buffer)-1): frame_silimar = cosine(self.short_memory_buffer[frame_i].flatten().cpu(), self.short_memory_buffer[frame_i+1].flatten().cpu()) similar_list.append(frame_silimar) for frame in self.short_memory_buffer: self.long_memory_buffer.append(frame) def encode_long_video(self, cur_image, middle_video:False): device = 'cuda:0' # input shape b,c,t,h,w batch_size = 1 # batch_size:1 self.long_memory_buffer = [i.unsqueeze(0) for i in self.long_memory_buffer] # expand position embedding n_position = 8 position_ids = torch.arange(n_position).long().to(self.query_tokens.device) position_ids = position_ids.unsqueeze(0).expand(batch_size, -1) p = self.video_frame_position_embedding(position_ids).squeeze(0) frame_position_embeddings = p.unsqueeze(-2) u = [] alpha = 0.01 for p_i in p: u_i = (p_i-alpha * p[0])/(1-alpha) u.append(u_i) # calculate the position_embedding frame_position_embeddings = [] for i in range(n_position): for j in range(n_position): q_i = alpha * u[i] + (1-alpha) * u[j] q_i = q_i.unsqueeze(0) frame_position_embeddings.append(q_i) frame_position_embeddings = torch.cat(frame_position_embeddings, dim = 0) if middle_video: cur_long_length = len(self.long_memory_buffer) cur_short_length = len(self.temp_short_memory) while (cur_long_length+cur_short_length+1) > self.max_frame_pos: self.temp_short_memory.pop(0) if len(self.long_memory_buffer) == 0: self.temp_short_memory = [i.unsqueeze(0) for i in self.temp_short_memory] cur_short = torch.cat(self.temp_short_memory, dim = 0) video_features = torch.cat([video_features, cur_image], dim = 0) else: cur_video = torch.cat(self.long_memory_buffer,dim = 0) self.temp_short_memory = [i.unsqueeze(0) for i in self.temp_short_memory] cur_short = torch.cat(self.temp_short_memory, dim = 0) video_features = torch.cat([cur_video,cur_short], dim = 0) video_features = torch.cat([video_features, cur_image], dim = 0) cur_video = [] cur_pos = [] for i in range(len(video_features)): cur_pos.append(frame_position_embeddings[i]) cur_video.append(video_features[i]) cur_pos = [j.unsqueeze(0) for j in cur_pos] cur_video = [j.unsqueeze(0) for j in cur_video] cur_position_embeddings = torch.cat(cur_pos, dim=0) cur_position_embeddings = cur_position_embeddings.unsqueeze(-2) cur_position_embeddings = cur_position_embeddings.unsqueeze(0) frame_hidden_state = torch.cat(cur_video, dim=0) frame_hidden_state = einops.rearrange(frame_hidden_state, '(b t) q h -> b t q h', b=batch_size, t=len(video_features)) frame_hidden_state = cur_position_embeddings + frame_hidden_state # frame attention frame_hidden_state = einops.rearrange(frame_hidden_state, 'b t q h -> b (t q) h',b=batch_size,t=len(video_features)) frame_atts = torch.ones(frame_hidden_state.size()[:-1], dtype=torch.long).to(device) video_query_tokens = self.video_query_tokens.expand(frame_hidden_state.shape[0], -1, -1) # a video Q-former to aggregate frame-level representations video_query_output = self.video_Qformer.bert( query_embeds=video_query_tokens, encoder_hidden_states=frame_hidden_state, encoder_attention_mask=frame_atts, return_dict=True, ) video_hiddens=video_query_output.last_hidden_state # a linear layer to project the output video representations into the same dimension as the text embeddings of LLMs inputs_llama = self.llama_proj(video_hiddens) atts_llama = torch.ones(inputs_llama.size()[:-1], dtype=torch.long).to(device) return inputs_llama, atts_llama else: cur_video = [] cur_pos = [] for i in range(len(self.long_memory_buffer)): cur_pos.append(frame_position_embeddings[i]) cur_video.append(self.long_memory_buffer[i]) cur_pos = [j.unsqueeze(0) for j in cur_pos] cur_position_embeddings = torch.cat(cur_pos, dim=0) cur_position_embeddings = cur_position_embeddings.unsqueeze(-2) cur_position_embeddings = cur_position_embeddings.unsqueeze(0) frame_hidden_state = torch.cat(cur_video, dim=0) #[1,32,768] frame_hidden_state = einops.rearrange(frame_hidden_state, '(b t) q h -> b t q h', b=batch_size, t=len(self.long_memory_buffer)) #[64,32,768] frame_hidden_state = cur_position_embeddings + frame_hidden_state # frame attention frame_hidden_state = einops.rearrange(frame_hidden_state, 'b t q h -> b (t q) h',b=batch_size,t=len(self.long_memory_buffer)) frame_atts = torch.ones(frame_hidden_state.size()[:-1], dtype=torch.long).to(device) video_query_tokens = self.video_query_tokens.expand(frame_hidden_state.shape[0], -1, -1) # a video Q-former to aggregate frame-level representations video_query_output = self.video_Qformer.bert( query_embeds=video_query_tokens, encoder_hidden_states=frame_hidden_state, encoder_attention_mask=frame_atts, return_dict=True, ) video_hiddens=video_query_output.last_hidden_state # a linear layer to project the output video representations into the same dimension as the text embeddings of LLMs inputs_llama = self.llama_proj(video_hiddens) atts_llama = torch.ones(inputs_llama.size()[:-1], dtype=torch.long).to(device) return inputs_llama, atts_llama def encode_image(self, image): device = 'cuda:0' image = einops.rearrange(image, 'b c t h w -> (b t) c h w') with self.maybe_autocast(): # embed image features with blip2, out: (b t) q h image_embeds = self.ln_vision(self.visual_encoder(image)).to(device) image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(device) query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1) query_output = self.Qformer.bert( query_embeds=query_tokens, encoder_hidden_states=image_embeds, encoder_attention_mask=image_atts, return_dict=True, ) q_hidden_state = query_output.last_hidden_state return q_hidden_state def encode_videoQformer_visual(self, image): device = image.device # input shape b,c,t,h,w batch_size,_,time_length,_,_ = image.size() # batch_size:1 time_length:8 image = einops.rearrange(image, 'b c t h w -> (b t) c h w') with self.maybe_autocast(): # embed image features with blip2, out: (b t) q h image_embeds = self.ln_vision(self.visual_encoder(image)).to(device) image_atts = torch.ones(image_embeds.size()[:-1], dtype=torch.long).to(device) query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1) query_output = self.Qformer.bert( query_embeds=query_tokens, encoder_hidden_states=image_embeds, encoder_attention_mask=image_atts, return_dict=True, ) q_hidden_state = query_output.last_hidden_state # merge after every frame added for frame in q_hidden_state: self.long_memory_buffer.append(frame) similar_list = [] for frame_i in range(self.long_memory_length): similar_list.append(cosine(self.long_memory_buffer[frame_i].flatten().cpu(), self.long_memory_buffer[frame_i+1].flatten().cpu())) while len(self.long_memory_buffer) > self.long_memory_length: max_value = max(similar_list) max_index = similar_list.index(max_value) new_frame_feature = (self.long_memory_buffer[max_index].cpu()+self.long_memory_buffer[max_index+1].cpu())/2 self.long_memory_buffer[max_index] = new_frame_feature.cuda() del(self.long_memory_buffer[max_index+1]) similar_list = [] for frame_i in range(len(self.long_memory_buffer)-1): similar_list.append(1-cosine(self.long_memory_buffer[frame_i].flatten().cpu(), self.long_memory_buffer[frame_i+1].flatten().cpu())) # a position embedding layer to inject temporal information into video frames if self.whole_video: # add frame_pos embedding self.long_memory_buffer = [i.unsqueeze(0) for i in self.long_memory_buffer] for i in self.long_memory_buffer: while len(i.shape) > 3: i = i.squeeze(0) import pdb;pdb.set_trace() frame_hidden_state = torch.cat(self.long_memory_buffer,dim = 0) position_ids = torch.arange(self.long_memory_length, dtype=torch.long, device=query_tokens.device) position_ids = position_ids.unsqueeze(0).expand(batch_size, -1) frame_position_embeddings = self.video_frame_position_embedding(position_ids) frame_position_embeddings = frame_position_embeddings.unsqueeze(-2) frame_hidden_state = einops.rearrange(frame_hidden_state, '(b t) q h -> b t q h',b=batch_size,t=self.long_memory_length) frame_hidden_state = frame_position_embeddings + frame_hidden_state # frame attention frame_hidden_state = einops.rearrange(frame_hidden_state, 'b t q h -> b (t q) h',b=batch_size,t=self.long_memory_length) frame_atts = torch.ones(frame_hidden_state.size()[:-1], dtype=torch.long).to(device) video_query_tokens = self.video_query_tokens.expand(frame_hidden_state.shape[0], -1, -1) # a video Q-former to aggregate frame-level representations video_query_output = self.video_Qformer.bert( query_embeds=video_query_tokens, encoder_hidden_states=frame_hidden_state, encoder_attention_mask=frame_atts, return_dict=True, ) video_hidden = video_query_output.last_hidden_state # a linear layer to project the output video representations into the same dimension as the text embeddings of LLMs inputs_llama = self.llama_proj(video_hidden) atts_llama = torch.ones(inputs_llama.size()[:-1], dtype=torch.long).to(image_embeds.device) return inputs_llama, atts_llama def prompt_wrap(self, img_embeds, atts_img, prompt): if prompt: batch_size = img_embeds.shape[0] p_before, p_after = prompt.split('<ImageHere>') p_before_tokens = self.llama_tokenizer( p_before, return_tensors="pt", add_special_tokens=False).to(img_embeds.device) p_after_tokens = self.llama_tokenizer( p_after, return_tensors="pt", add_special_tokens=False).to(img_embeds.device) p_before_embeds = self.llama_model.model.embed_tokens(p_before_tokens.input_ids).expand(batch_size, -1, -1) p_after_embeds = self.llama_model.model.embed_tokens(p_after_tokens.input_ids).expand(batch_size, -1, -1) wrapped_img_embeds = torch.cat([p_before_embeds, img_embeds, p_after_embeds], dim=1) wrapped_atts_img = atts_img[:, :1].expand(-1, wrapped_img_embeds.shape[1]) return wrapped_img_embeds, wrapped_atts_img else: return img_embeds, atts_img def forward(self, samples): import pdb;pdb.set_trace() if 'conv_type' in samples.keys() and samples['conv_type']=='multi': im_patch_token_id = self.IMAGE_PATCH_TOKEN_ID image = samples["images"] input_ids = samples['input_ids'] if len(image.size())==4: time = 1 image = einops.repeat(image, 'b c h w -> b c t h w',t = time) num_patch_tokens = self.num_video_query_token img_embeds, atts_img = self.encode_videoQformer_visual(image) temp_input_ids = copy.deepcopy(input_ids) # just copy input_ids temp_input_ids[temp_input_ids == im_patch_token_id] = 0 temp_input_embedding = self.llama_model.model.embed_tokens(temp_input_ids) new_input_embeds=[] cur_image_idx = 0 for cur_input_ids, cur_input_embeds in zip(input_ids, temp_input_embedding): cur_image_features = img_embeds[cur_image_idx] if (cur_input_ids == im_patch_token_id).sum() != num_patch_tokens: raise ValueError("The number of image patch tokens should be the same as the number of image patches.") masked_indices = torch.where(cur_input_ids == im_patch_token_id)[0] mask_index_start = masked_indices[0] if (masked_indices != torch.arange(mask_index_start, mask_index_start+num_patch_tokens, device=masked_indices.device, dtype=masked_indices.dtype)).any(): raise ValueError("The image patch tokens should be consecutive.") cur_new_input_embeds = torch.cat((cur_input_embeds[:mask_index_start], cur_image_features, cur_input_embeds[mask_index_start+num_patch_tokens:]), dim=0) new_input_embeds.append(cur_new_input_embeds) cur_image_idx+=1 inputs_embeds = torch.stack(new_input_embeds, dim=0) targets = samples['labels'] attention_mask = samples['attention_mask'] with self.maybe_autocast(): outputs = self.llama_model( inputs_embeds=inputs_embeds, attention_mask=attention_mask, return_dict=True, labels=targets, ) loss = outputs.loss return {"loss": loss} else: image = samples["image"] if len(image.size()) != 5: time = 1 image = einops.repeat(image, 'b c h w -> b c t h w',t = time) img_embeds, atts_img = self.encode_videoQformer_visual(image) if self.prompt_list: prompt = random.choice(self.prompt_list) img_embeds, atts_img = self.prompt_wrap(img_embeds, atts_img, prompt) self.llama_tokenizer.padding_side = "right" text = [t + self.end_sym for t in samples["text_input"]] to_regress_tokens = self.llama_tokenizer( text, return_tensors="pt", padding="longest", truncation=True, max_length=self.max_txt_len, add_special_tokens=False ).to(image.device) targets = to_regress_tokens.input_ids.masked_fill( to_regress_tokens.input_ids == self.llama_tokenizer.pad_token_id, -100 ) empty_targets = ( torch.ones([atts_img.shape[0], atts_img.shape[1]+1], dtype=torch.long).to(image.device).fill_(-100) # plus one for bos ) targets = torch.cat([empty_targets, targets], dim=1) batch_size = img_embeds.shape[0] bos = torch.ones([batch_size, 1], dtype=to_regress_tokens.input_ids.dtype, device=to_regress_tokens.input_ids.device) * self.llama_tokenizer.bos_token_id bos_embeds = self.llama_model.model.embed_tokens(bos) atts_bos = atts_img[:, :1] to_regress_embeds = self.llama_model.model.embed_tokens(to_regress_tokens.input_ids) inputs_embeds = torch.cat([bos_embeds, img_embeds, to_regress_embeds], dim=1) attention_mask = torch.cat([atts_bos, atts_img, to_regress_tokens.attention_mask], dim=1) with self.maybe_autocast(): outputs = self.llama_model( inputs_embeds=inputs_embeds, attention_mask=attention_mask, return_dict=True, labels=targets, ) loss = outputs.loss return {"loss": loss} @classmethod def from_config(cls, cfg): vit_model = cfg.get("vit_model", "eva_clip_g") q_former_model = cfg.get("q_former_model", "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/blip2_pretrained_flant5xxl.pth") img_size = cfg.get("image_size") num_query_token = cfg.get("num_query_token") llama_model = cfg.get("llama_model") drop_path_rate = cfg.get("drop_path_rate", 0) use_grad_checkpoint = cfg.get("use_grad_checkpoint", False) vit_precision = cfg.get("vit_precision", "fp16") freeze_vit = cfg.get("freeze_vit", True) freeze_qformer = cfg.get("freeze_qformer", True) low_resource = cfg.get("low_resource", False) device_8bit = cfg.get("device_8bit", 0) prompt_path = cfg.get("prompt_path", "") prompt_template = cfg.get("prompt_template", "") max_txt_len = cfg.get("max_txt_len", 32) end_sym = cfg.get("end_sym", '\n') frozen_llama_proj = cfg.get("frozen_llama_proj", True) frozen_video_Qformer = cfg.get("frozen_video_Qformer", True) llama_proj_model = cfg.get("llama_proj_model", '') fusion_header_type = cfg.get("fusion_header_type", 'seqTransf') max_frame_pos = cfg.get("max_frame_pos", 32) fusion_head_layers = cfg.get("fusion_head_layers", 2) num_video_query_token = cfg.get("num_video_query_token", 32) model = cls( vit_model=vit_model, q_former_model=q_former_model, img_size=img_size, drop_path_rate=drop_path_rate, use_grad_checkpoint=use_grad_checkpoint, vit_precision=vit_precision, freeze_vit=freeze_vit, freeze_qformer=freeze_qformer, num_query_token=num_query_token, llama_model=llama_model, prompt_path=prompt_path, prompt_template=prompt_template, max_txt_len=max_txt_len, end_sym=end_sym, low_resource=low_resource, device_8bit=device_8bit, fusion_header_type=fusion_header_type, max_frame_pos=max_frame_pos, fusion_head_layers=fusion_head_layers, frozen_llama_proj=frozen_llama_proj, frozen_video_Qformer=frozen_video_Qformer, num_video_query_token=num_video_query_token, ) ckpt_path = cfg.get("ckpt", "") # load weights of MiniGPT-4 if ckpt_path: print("Load first Checkpoint: {}".format(ckpt_path)) ckpt = torch.load(ckpt_path, map_location="cpu") msg = model.load_state_dict(ckpt['model'], strict=False) ckpt_path_2 = cfg.get("ckpt_2", "") if ckpt_path_2: print("Load second Checkpoint: {}".format(ckpt_path_2)) ckpt = torch.load(ckpt_path_2, map_location="cpu") msg = model.load_state_dict(ckpt['model'], strict=False) return model
MovieChat-main
MovieChat/models/moviechat.py
""" Adapted from salesforce@LAVIS. Below is the original copyright: Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ from dataclasses import dataclass from typing import Optional import torch from transformers.modeling_outputs import ( ModelOutput, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, ) @dataclass class BlipSimilarity(ModelOutput): sim_i2t: torch.FloatTensor = None sim_t2i: torch.FloatTensor = None sim_i2t_m: Optional[torch.FloatTensor] = None sim_t2i_m: Optional[torch.FloatTensor] = None sim_i2t_targets: Optional[torch.FloatTensor] = None sim_t2i_targets: Optional[torch.FloatTensor] = None @dataclass class BlipIntermediateOutput(ModelOutput): """ Data class for intermediate outputs of BLIP models. """ image_embeds: torch.FloatTensor = None text_embeds: Optional[torch.FloatTensor] = None image_embeds_m: Optional[torch.FloatTensor] = None text_embeds_m: Optional[torch.FloatTensor] = None encoder_output: Optional[BaseModelOutputWithPoolingAndCrossAttentions] = None encoder_output_neg: Optional[BaseModelOutputWithPoolingAndCrossAttentions] = None itm_logits: Optional[torch.FloatTensor] = None itm_labels: Optional[torch.LongTensor] = None decoder_output: Optional[CausalLMOutputWithCrossAttentions] = None decoder_labels: Optional[torch.LongTensor] = None @dataclass class BlipOutput(ModelOutput): sims: Optional[BlipSimilarity] = None intermediate_output: BlipIntermediateOutput = None loss: Optional[torch.FloatTensor] = None loss_itc: Optional[torch.FloatTensor] = None loss_itm: Optional[torch.FloatTensor] = None loss_lm: Optional[torch.FloatTensor] = None @dataclass class BlipOutputFeatures(ModelOutput): """ Data class of features from BlipFeatureExtractor. """ image_embeds: Optional[torch.FloatTensor] = None image_embeds_proj: Optional[torch.FloatTensor] = None text_embeds: Optional[torch.FloatTensor] = None text_embeds_proj: Optional[torch.FloatTensor] = None multimodal_embeds: Optional[torch.FloatTensor] = None
MovieChat-main
MovieChat/models/blip2_outputs.py
# Based on EVA, BEIT, timm and DeiT code bases # https://github.com/baaivision/EVA # https://github.com/rwightman/pytorch-image-models/tree/master/timm # https://github.com/microsoft/unilm/tree/master/beit # https://github.com/facebookresearch/deit/ # https://github.com/facebookresearch/dino # --------------------------------------------------------' import math from functools import partial import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint as checkpoint from timm.models.layers import drop_path, to_2tuple, trunc_normal_ from timm.models.registry import register_model from MovieChat.common.dist_utils import download_cached_file def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': .9, 'interpolation': 'bicubic', 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), **kwargs } class DropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). """ def __init__(self, drop_prob=None): super(DropPath, self).__init__() self.drop_prob = drop_prob def forward(self, x): return drop_path(x, self.drop_prob, self.training) def extra_repr(self) -> str: return 'p={}'.format(self.drop_prob) class Mlp(nn.Module): def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): super().__init__() out_features = out_features or in_features hidden_features = hidden_features or in_features self.fc1 = nn.Linear(in_features, hidden_features) self.act = act_layer() self.fc2 = nn.Linear(hidden_features, out_features) self.drop = nn.Dropout(drop) def forward(self, x): x = self.fc1(x) x = self.act(x) # x = self.drop(x) # commit this for the orignal BERT implement x = self.fc2(x) x = self.drop(x) return x class Attention(nn.Module): def __init__( self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., window_size=None, attn_head_dim=None): super().__init__() self.num_heads = num_heads head_dim = dim // num_heads if attn_head_dim is not None: head_dim = attn_head_dim all_head_dim = head_dim * self.num_heads self.scale = qk_scale or head_dim ** -0.5 self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False) if qkv_bias: self.q_bias = nn.Parameter(torch.zeros(all_head_dim)) self.v_bias = nn.Parameter(torch.zeros(all_head_dim)) else: self.q_bias = None self.v_bias = None if window_size: self.window_size = window_size self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3 self.relative_position_bias_table = nn.Parameter( torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH # cls to token & token 2 cls & cls to cls # get pair-wise relative position index for each token inside the window coords_h = torch.arange(window_size[0]) coords_w = torch.arange(window_size[1]) coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0 relative_coords[:, :, 1] += window_size[1] - 1 relative_coords[:, :, 0] *= 2 * window_size[1] - 1 relative_position_index = \ torch.zeros(size=(window_size[0] * window_size[1] + 1, ) * 2, dtype=relative_coords.dtype) relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww relative_position_index[0, 0:] = self.num_relative_distance - 3 relative_position_index[0:, 0] = self.num_relative_distance - 2 relative_position_index[0, 0] = self.num_relative_distance - 1 self.register_buffer("relative_position_index", relative_position_index) else: self.window_size = None self.relative_position_bias_table = None self.relative_position_index = None self.attn_drop = nn.Dropout(attn_drop) self.proj = nn.Linear(all_head_dim, dim) self.proj_drop = nn.Dropout(proj_drop) def forward(self, x, rel_pos_bias=None): B, N, C = x.shape qkv_bias = None if self.q_bias is not None: qkv_bias = torch.cat((self.q_bias, torch.zeros_like(self.v_bias, requires_grad=False), self.v_bias)) # qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias) qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4) q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) q = q * self.scale attn = (q @ k.transpose(-2, -1)) if self.relative_position_bias_table is not None: relative_position_bias = \ self.relative_position_bias_table[self.relative_position_index.view(-1)].view( self.window_size[0] * self.window_size[1] + 1, self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww attn = attn + relative_position_bias.unsqueeze(0) if rel_pos_bias is not None: attn = attn + rel_pos_bias attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, -1) x = self.proj(x) x = self.proj_drop(x) return x class Block(nn.Module): def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0., drop_path=0., init_values=None, act_layer=nn.GELU, norm_layer=nn.LayerNorm, window_size=None, attn_head_dim=None): super().__init__() self.norm1 = norm_layer(dim) self.attn = Attention( dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop, window_size=window_size, attn_head_dim=attn_head_dim) # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() self.norm2 = norm_layer(dim) mlp_hidden_dim = int(dim * mlp_ratio) self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) if init_values is not None and init_values > 0: self.gamma_1 = nn.Parameter(init_values * torch.ones((dim)),requires_grad=True) self.gamma_2 = nn.Parameter(init_values * torch.ones((dim)),requires_grad=True) else: self.gamma_1, self.gamma_2 = None, None def forward(self, x, rel_pos_bias=None): if self.gamma_1 is None: x = x + self.drop_path(self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias)) x = x + self.drop_path(self.mlp(self.norm2(x))) else: x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias)) x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x))) return x class PatchEmbed(nn.Module): """ Image to Patch Embedding """ def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768): super().__init__() img_size = to_2tuple(img_size) patch_size = to_2tuple(patch_size) num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0]) self.patch_shape = (img_size[0] // patch_size[0], img_size[1] // patch_size[1]) self.img_size = img_size self.patch_size = patch_size self.num_patches = num_patches self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) def forward(self, x, **kwargs): B, C, H, W = x.shape # FIXME look at relaxing size constraints assert H == self.img_size[0] and W == self.img_size[1], \ f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})." x = self.proj(x).flatten(2).transpose(1, 2) return x class RelativePositionBias(nn.Module): def __init__(self, window_size, num_heads): super().__init__() self.window_size = window_size self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3 self.relative_position_bias_table = nn.Parameter( torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH # cls to token & token 2 cls & cls to cls # get pair-wise relative position index for each token inside the window coords_h = torch.arange(window_size[0]) coords_w = torch.arange(window_size[1]) coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0 relative_coords[:, :, 1] += window_size[1] - 1 relative_coords[:, :, 0] *= 2 * window_size[1] - 1 relative_position_index = \ torch.zeros(size=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype) relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww relative_position_index[0, 0:] = self.num_relative_distance - 3 relative_position_index[0:, 0] = self.num_relative_distance - 2 relative_position_index[0, 0] = self.num_relative_distance - 1 self.register_buffer("relative_position_index", relative_position_index) # trunc_normal_(self.relative_position_bias_table, std=.02) def forward(self): relative_position_bias = \ self.relative_position_bias_table[self.relative_position_index.view(-1)].view( self.window_size[0] * self.window_size[1] + 1, self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH return relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww class VisionTransformer(nn.Module): """ Vision Transformer with support for patch or hybrid CNN input stage """ def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=nn.LayerNorm, init_values=None, use_abs_pos_emb=True, use_rel_pos_bias=False, use_shared_rel_pos_bias=False, use_mean_pooling=True, init_scale=0.001, use_checkpoint=False): super().__init__() self.image_size = img_size self.num_classes = num_classes self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models self.patch_embed = PatchEmbed( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) num_patches = self.patch_embed.num_patches self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if use_abs_pos_emb: self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) else: self.pos_embed = None self.pos_drop = nn.Dropout(p=drop_rate) if use_shared_rel_pos_bias: self.rel_pos_bias = RelativePositionBias(window_size=self.patch_embed.patch_shape, num_heads=num_heads) else: self.rel_pos_bias = None self.use_checkpoint = use_checkpoint dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule self.use_rel_pos_bias = use_rel_pos_bias self.blocks = nn.ModuleList([ Block( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, init_values=init_values, window_size=self.patch_embed.patch_shape if use_rel_pos_bias else None) for i in range(depth)]) # self.norm = nn.Identity() if use_mean_pooling else norm_layer(embed_dim) # self.fc_norm = norm_layer(embed_dim) if use_mean_pooling else None # self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() if self.pos_embed is not None: trunc_normal_(self.pos_embed, std=.02) trunc_normal_(self.cls_token, std=.02) # trunc_normal_(self.mask_token, std=.02) self.apply(self._init_weights) self.fix_init_weight() def fix_init_weight(self): def rescale(param, layer_id): param.div_(math.sqrt(2.0 * layer_id)) for layer_id, layer in enumerate(self.blocks): rescale(layer.attn.proj.weight.data, layer_id + 1) rescale(layer.mlp.fc2.weight.data, layer_id + 1) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) def get_classifier(self): return self.head def reset_classifier(self, num_classes, global_pool=''): self.num_classes = num_classes self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() def forward_features(self, x): x = self.patch_embed(x) batch_size, seq_len, _ = x.size() cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks x = torch.cat((cls_tokens, x), dim=1) if self.pos_embed is not None: x = x + self.pos_embed x = self.pos_drop(x) rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None for blk in self.blocks: if self.use_checkpoint: x = checkpoint.checkpoint(blk, x, rel_pos_bias) else: x = blk(x, rel_pos_bias) return x def forward(self, x): x = self.forward_features(x) return x def get_intermediate_layers(self, x): x = self.patch_embed(x) batch_size, seq_len, _ = x.size() cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks x = torch.cat((cls_tokens, x), dim=1) if self.pos_embed is not None: x = x + self.pos_embed x = self.pos_drop(x) features = [] rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None for blk in self.blocks: x = blk(x, rel_pos_bias) features.append(x) return features def interpolate_pos_embed(model, checkpoint_model): if 'pos_embed' in checkpoint_model: pos_embed_checkpoint = checkpoint_model['pos_embed'].float() embedding_size = pos_embed_checkpoint.shape[-1] num_patches = model.patch_embed.num_patches num_extra_tokens = model.pos_embed.shape[-2] - num_patches # height (== width) for the checkpoint position embedding orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5) # height (== width) for the new position embedding new_size = int(num_patches ** 0.5) # class_token and dist_token are kept unchanged if orig_size != new_size: print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size)) extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens] # only the position tokens are interpolated pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:] pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2) pos_tokens = torch.nn.functional.interpolate( pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False) pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2) new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1) checkpoint_model['pos_embed'] = new_pos_embed def convert_weights_to_fp16(model: nn.Module): """Convert applicable model parameters to fp16""" def _convert_weights_to_fp16(l): if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)): l.weight.data = l.weight.data.half() if l.bias is not None: l.bias.data = l.bias.data.half() model.apply(_convert_weights_to_fp16) def create_eva_vit_g(img_size=224,drop_path_rate=0.4,use_checkpoint=False,precision="fp16"): model = VisionTransformer( img_size=img_size, patch_size=14, use_mean_pooling=False, embed_dim=1408, depth=39, num_heads=1408//88, mlp_ratio=4.3637, qkv_bias=True, drop_path_rate=drop_path_rate, norm_layer=partial(nn.LayerNorm, eps=1e-6), use_checkpoint=use_checkpoint, ) url = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/eva_vit_g.pth" cached_file = download_cached_file( url, check_hash=False, progress=True ) state_dict = torch.load(cached_file, map_location="cpu") interpolate_pos_embed(model,state_dict) incompatible_keys = model.load_state_dict(state_dict, strict=False) if precision == "fp16": convert_weights_to_fp16(model) return model
MovieChat-main
MovieChat/models/eva_vit.py
""" Adapted from salesforce@LAVIS Vision-CAIR@MiniGPT-4. Below is the original copyright: Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import logging import torch from omegaconf import OmegaConf from MovieChat.common.registry import registry from MovieChat.models.base_model import BaseModel from MovieChat.models.blip2 import Blip2Base from MovieChat.models.moviechat import MovieChat from MovieChat.processors.base_processor import BaseProcessor __all__ = [ "load_model", "BaseModel", "Blip2Base", "MovieChat" ] def load_model(name, model_type, is_eval=False, device="cpu", checkpoint=None): """ Load supported models. """ model = registry.get_model_class(name).from_pretrained(model_type=model_type) if checkpoint is not None: model.load_checkpoint(checkpoint) if is_eval: model.eval() if device == "cpu": model = model.float() return model.to(device) def load_preprocess(config): """ Load preprocessor configs and construct preprocessors. If no preprocessor is specified, return BaseProcessor, which does not do any preprocessing. """ def _build_proc_from_cfg(cfg): return ( registry.get_processor_class(cfg.name).from_config(cfg) if cfg is not None else BaseProcessor() ) vis_processors = dict() txt_processors = dict() vis_proc_cfg = config.get("vis_processor") txt_proc_cfg = config.get("text_processor") if vis_proc_cfg is not None: vis_train_cfg = vis_proc_cfg.get("train") vis_eval_cfg = vis_proc_cfg.get("eval") else: vis_train_cfg = None vis_eval_cfg = None vis_processors["train"] = _build_proc_from_cfg(vis_train_cfg) vis_processors["eval"] = _build_proc_from_cfg(vis_eval_cfg) if txt_proc_cfg is not None: txt_train_cfg = txt_proc_cfg.get("train") txt_eval_cfg = txt_proc_cfg.get("eval") else: txt_train_cfg = None txt_eval_cfg = None txt_processors["train"] = _build_proc_from_cfg(txt_train_cfg) txt_processors["eval"] = _build_proc_from_cfg(txt_eval_cfg) return vis_processors, txt_processors def load_model_and_preprocess(name, model_type, is_eval=False, device="cpu"): """ Load model and its related preprocessors. """ model_cls = registry.get_model_class(name) # load model model = model_cls.from_pretrained(model_type=model_type) if is_eval: model.eval() # load preprocess cfg = OmegaConf.load(model_cls.default_config_path(model_type)) if cfg is not None: preprocess_cfg = cfg.preprocess vis_processors, txt_processors = load_preprocess(preprocess_cfg) else: vis_processors, txt_processors = None, None logging.info( f"""No default preprocess for model {name} ({model_type}). This can happen if the model is not finetuned on downstream datasets, or it is not intended for direct use without finetuning. """ ) if device == "cpu" or device == torch.device("cpu"): model = model.float() return model.to(device), vis_processors, txt_processors class ModelZoo: """ A utility class to create string representation of available model architectures and types. """ def __init__(self) -> None: self.model_zoo = { k: list(v.PRETRAINED_MODEL_CONFIG_DICT.keys()) for k, v in registry.mapping["model_name_mapping"].items() } def __str__(self) -> str: return ( "=" * 50 + "\n" + f"{'Architectures':<30} {'Types'}\n" + "=" * 50 + "\n" + "\n".join( [ f"{name:<30} {', '.join(types)}" for name, types in self.model_zoo.items() ] ) ) def __iter__(self): return iter(self.model_zoo.items()) def __len__(self): return sum([len(v) for v in self.model_zoo.values()]) model_zoo = ModelZoo()
MovieChat-main
MovieChat/models/__init__.py
""" Adapted from salesforce@LAVIS. Below is the original copyright: Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import logging import os import numpy as np import torch import torch.nn as nn from MovieChat.common.dist_utils import download_cached_file, is_dist_avail_and_initialized from MovieChat.common.utils import get_abs_path, is_url from omegaconf import OmegaConf class BaseModel(nn.Module): """Base class for models.""" def __init__(self): super().__init__() @property def device(self): return list(self.parameters())[0].device def load_checkpoint(self, url_or_filename): """ Load from a finetuned checkpoint. This should expect no mismatch in the model keys and the checkpoint keys. """ if is_url(url_or_filename): cached_file = download_cached_file( url_or_filename, check_hash=False, progress=True ) checkpoint = torch.load(cached_file, map_location="cpu") elif os.path.isfile(url_or_filename): checkpoint = torch.load(url_or_filename, map_location="cpu") else: raise RuntimeError("checkpoint url or path is invalid") if "model" in checkpoint.keys(): state_dict = checkpoint["model"] else: state_dict = checkpoint msg = self.load_state_dict(state_dict, strict=False) logging.info("Missing keys {}".format(msg.missing_keys)) logging.info("load checkpoint from %s" % url_or_filename) return msg @classmethod def from_pretrained(cls, model_type): """ Build a pretrained model from default configuration file, specified by model_type. """ model_cfg = OmegaConf.load(cls.default_config_path(model_type)).model model = cls.from_config(model_cfg) return model @classmethod def default_config_path(cls, model_type): assert ( model_type in cls.PRETRAINED_MODEL_CONFIG_DICT ), "Unknown model type {}".format(model_type) return get_abs_path(cls.PRETRAINED_MODEL_CONFIG_DICT[model_type]) def load_checkpoint_from_config(self, cfg, **kwargs): """ Load checkpoint as specified in the config file. """ load_finetuned = cfg.get("load_finetuned", True) if load_finetuned: finetune_path = cfg.get("finetuned", None) assert ( finetune_path is not None ), "Found load_finetuned is True, but finetune_path is None." self.load_checkpoint(url_or_filename=finetune_path) else: # load pre-trained weights pretrain_path = cfg.get("pretrained", None) assert "Found load_finetuned is False, but pretrain_path is None." self.load_from_pretrained(url_or_filename=pretrain_path, **kwargs) def before_evaluation(self, **kwargs): pass def show_n_params(self, return_str=True): tot = 0 for p in self.parameters(): w = 1 for x in p.shape: w *= x tot += w if return_str: if tot >= 1e6: return "{:.1f}M".format(tot / 1e6) else: return "{:.1f}K".format(tot / 1e3) else: return tot class BaseEncoder(nn.Module): """ Base class for primitive encoders, such as ViT, TimeSformer, etc. """ def __init__(self): super().__init__() def forward_features(self, samples, **kwargs): raise NotImplementedError @property def device(self): return list(self.parameters())[0].device class SharedQueueMixin: @torch.no_grad() def _dequeue_and_enqueue(self, image_feat, text_feat, idxs=None): # gather keys before updating queue image_feats = concat_all_gather(image_feat) text_feats = concat_all_gather(text_feat) batch_size = image_feats.shape[0] ptr = int(self.queue_ptr) assert self.queue_size % batch_size == 0 # for simplicity self.image_queue[:, ptr : ptr + batch_size] = image_feats.T self.text_queue[:, ptr : ptr + batch_size] = text_feats.T if idxs is not None: idxs = concat_all_gather(idxs) self.idx_queue[:, ptr : ptr + batch_size] = idxs.T ptr = (ptr + batch_size) % self.queue_size self.queue_ptr[0] = ptr class MomentumDistilationMixin: @torch.no_grad() def copy_params(self): for model_pair in self.model_pairs: for param, param_m in zip( model_pair[0].parameters(), model_pair[1].parameters() ): param_m.data.copy_(param.data) param_m.requires_grad = False @torch.no_grad() def _momentum_update(self): for model_pair in self.model_pairs: for param, param_m in zip( model_pair[0].parameters(), model_pair[1].parameters() ): param_m.data = param_m.data * self.momentum + param.data * ( 1.0 - self.momentum ) class GatherLayer(torch.autograd.Function): """ Gather tensors from all workers with support for backward propagation: This implementation does not cut the gradients as torch.distributed.all_gather does. """ @staticmethod def forward(ctx, x): output = [ torch.zeros_like(x) for _ in range(torch.distributed.get_world_size()) ] torch.distributed.all_gather(output, x) return tuple(output) @staticmethod def backward(ctx, *grads): all_gradients = torch.stack(grads) torch.distributed.all_reduce(all_gradients) return all_gradients[torch.distributed.get_rank()] def all_gather_with_grad(tensors): """ Performs all_gather operation on the provided tensors. Graph remains connected for backward grad computation. """ world_size = torch.distributed.get_world_size() if world_size == 1: return tensors tensor_all = GatherLayer.apply(tensors) return torch.cat(tensor_all, dim=0) @torch.no_grad() def concat_all_gather(tensor): """ Performs all_gather operation on the provided tensors. """ if not is_dist_avail_and_initialized(): return tensor tensors_gather = [ torch.ones_like(tensor) for _ in range(torch.distributed.get_world_size()) ] torch.distributed.all_gather(tensors_gather, tensor, async_op=False) output = torch.cat(tensors_gather, dim=0) return output def tile(x, dim, n_tile): init_dim = x.size(dim) repeat_idx = [1] * x.dim() repeat_idx[dim] = n_tile x = x.repeat(*(repeat_idx)) order_index = torch.LongTensor( np.concatenate([init_dim * np.arange(n_tile) + i for i in range(init_dim)]) ) return torch.index_select(x, dim, order_index.to(x.device))
MovieChat-main
MovieChat/models/base_model.py
#!/usr/bin/env python3 # Portions Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import gzip import html import io import math from functools import lru_cache from typing import Callable, List, Optional, Tuple import ftfy import numpy as np import regex as re import torch import torch.nn as nn from iopath.common.file_io import g_pathmgr from timm.models.layers import trunc_normal_ from .helpers import VerboseNNModule, cast_if_src_dtype def get_sinusoid_encoding_table(n_position, d_hid): """Sinusoid position encoding table""" # TODO: make it with torch instead of numpy def get_position_angle_vec(position): return [ position / np.power(10000, 2 * (hid_j // 2) / d_hid) for hid_j in range(d_hid) ] sinusoid_table = np.array( [get_position_angle_vec(pos_i) for pos_i in range(n_position)] ) sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1 return torch.FloatTensor(sinusoid_table).unsqueeze(0) def interpolate_pos_encoding_2d(target_spatial_size, pos_embed): N = pos_embed.shape[1] if N == target_spatial_size: return pos_embed dim = pos_embed.shape[-1] # nn.functional.interpolate doesn't work with bfloat16 so we cast to float32 pos_embed, updated = cast_if_src_dtype(pos_embed, torch.bfloat16, torch.float32) pos_embed = nn.functional.interpolate( pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute( 0, 3, 1, 2 ), scale_factor=math.sqrt(target_spatial_size / N), mode="bicubic", ) if updated: pos_embed, _ = cast_if_src_dtype(pos_embed, torch.float32, torch.bfloat16) pos_embed = pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) return pos_embed def interpolate_pos_encoding( npatch_per_img, pos_embed, patches_layout, input_shape=None, first_patch_idx=1, ): assert first_patch_idx == 0 or first_patch_idx == 1, "there is 1 CLS token or none" N = pos_embed.shape[1] - first_patch_idx # since it's 1 if cls_token exists if npatch_per_img == N: return pos_embed assert ( patches_layout[-1] == patches_layout[-2] ), "Interpolation of pos embed not supported for non-square layouts" class_emb = pos_embed[:, :first_patch_idx] pos_embed = pos_embed[:, first_patch_idx:] if input_shape is None or patches_layout[0] == 1: # simple 2D pos embedding, no temporal component pos_embed = interpolate_pos_encoding_2d(npatch_per_img, pos_embed) elif patches_layout[0] > 1: # pos embed has a temporal component assert len(input_shape) == 4, "temporal interpolation not supported" # we only support 2D interpolation in this case num_frames = patches_layout[0] num_spatial_tokens = patches_layout[1] * patches_layout[2] pos_embed = pos_embed.view(1, num_frames, num_spatial_tokens, -1) # interpolate embedding for zeroth frame pos_embed = interpolate_pos_encoding_2d( npatch_per_img, pos_embed[0, 0, ...].unsqueeze(0) ) else: raise ValueError("This type of interpolation isn't implemented") return torch.cat((class_emb, pos_embed), dim=1) def _get_pos_embedding( npatch_per_img, pos_embed, patches_layout, input_shape, first_patch_idx=1, ): pos_embed = interpolate_pos_encoding( npatch_per_img, pos_embed, patches_layout, input_shape=input_shape, first_patch_idx=first_patch_idx, ) return pos_embed class PatchEmbedGeneric(nn.Module): """ PatchEmbed from Hydra """ def __init__(self, proj_stem, norm_layer: Optional[nn.Module] = None): super().__init__() if len(proj_stem) > 1: self.proj = nn.Sequential(*proj_stem) else: # Special case to be able to load pre-trained models that were # trained with a standard stem self.proj = proj_stem[0] self.norm_layer = norm_layer def get_patch_layout(self, img_size): with torch.no_grad(): dummy_img = torch.zeros( [ 1, ] + img_size ) dummy_out = self.proj(dummy_img) embed_dim = dummy_out.shape[1] patches_layout = tuple(dummy_out.shape[2:]) num_patches = np.prod(patches_layout) return patches_layout, num_patches, embed_dim def forward(self, x): x = self.proj(x) # B C (T) H W -> B (T)HW C x = x.flatten(2).transpose(1, 2) if self.norm_layer is not None: x = self.norm_layer(x) return x class SpatioTemporalPosEmbeddingHelper(VerboseNNModule): def __init__( self, patches_layout: List, num_patches: int, num_cls_tokens: int, embed_dim: int, learnable: bool, ) -> None: super().__init__() self.num_cls_tokens = num_cls_tokens self.patches_layout = patches_layout self.num_patches = num_patches self.num_tokens = num_cls_tokens + num_patches self.learnable = learnable if self.learnable: self.pos_embed = nn.Parameter(torch.zeros(1, self.num_tokens, embed_dim)) trunc_normal_(self.pos_embed, std=0.02) else: self.register_buffer( "pos_embed", get_sinusoid_encoding_table(self.num_tokens, embed_dim) ) def get_pos_embedding(self, vision_input, all_vision_tokens): input_shape = vision_input.shape pos_embed = _get_pos_embedding( all_vision_tokens.size(1) - self.num_cls_tokens, pos_embed=self.pos_embed, patches_layout=self.patches_layout, input_shape=input_shape, first_patch_idx=self.num_cls_tokens, ) return pos_embed class RGBDTPreprocessor(VerboseNNModule): def __init__( self, rgbt_stem: PatchEmbedGeneric, depth_stem: Optional[PatchEmbedGeneric], img_size: Tuple = (3, 224, 224), num_cls_tokens: int = 1, pos_embed_fn: Optional[Callable] = None, use_type_embed: bool = False, init_param_style: str = "openclip", ) -> None: super().__init__() stem = rgbt_stem if rgbt_stem is not None else depth_stem ( self.patches_layout, self.num_patches, self.embed_dim, ) = stem.get_patch_layout(img_size) self.rgbt_stem = rgbt_stem self.depth_stem = depth_stem self.use_pos_embed = pos_embed_fn is not None self.use_type_embed = use_type_embed self.num_cls_tokens = num_cls_tokens if self.use_pos_embed: self.pos_embedding_helper = pos_embed_fn( patches_layout=self.patches_layout, num_cls_tokens=num_cls_tokens, num_patches=self.num_patches, embed_dim=self.embed_dim, ) if self.num_cls_tokens > 0: self.cls_token = nn.Parameter( torch.zeros(1, self.num_cls_tokens, self.embed_dim) ) if self.use_type_embed: self.type_embed = nn.Parameter(torch.zeros(1, 1, self.embed_dim)) self.init_parameters(init_param_style) @torch.no_grad() def init_parameters(self, init_param_style): if init_param_style == "openclip": # OpenCLIP style initialization scale = self.embed_dim**-0.5 if self.use_pos_embed: nn.init.normal_(self.pos_embedding_helper.pos_embed) self.pos_embedding_helper.pos_embed *= scale if self.num_cls_tokens > 0: nn.init.normal_(self.cls_token) self.cls_token *= scale elif init_param_style == "vit": self.cls_token.data.fill_(0) else: raise ValueError(f"Unknown init {init_param_style}") if self.use_type_embed: nn.init.normal_(self.type_embed) def tokenize_input_and_cls_pos(self, input, stem, mask): # tokens is of shape B x L x D tokens = stem(input) assert tokens.ndim == 3 assert tokens.shape[2] == self.embed_dim B = tokens.shape[0] if self.num_cls_tokens > 0: class_tokens = self.cls_token.expand( B, -1, -1 ) # stole class_tokens impl from Phil Wang, thanks tokens = torch.cat((class_tokens, tokens), dim=1) if self.use_pos_embed: pos_embed = self.pos_embedding_helper.get_pos_embedding(input, tokens) tokens = tokens + pos_embed if self.use_type_embed: tokens = tokens + self.type_embed.expand(B, -1, -1) return tokens def forward(self, vision=None, depth=None, patch_mask=None): if patch_mask is not None: raise NotImplementedError() if vision is not None: vision_tokens = self.tokenize_input_and_cls_pos( vision, self.rgbt_stem, patch_mask ) if depth is not None: depth_tokens = self.tokenize_input_and_cls_pos( depth, self.depth_stem, patch_mask ) # aggregate tokens if vision is not None and depth is not None: final_tokens = vision_tokens + depth_tokens else: final_tokens = vision_tokens if vision is not None else depth_tokens return_dict = { "trunk": { "tokens": final_tokens, }, "head": {}, } return return_dict class AudioPreprocessor(RGBDTPreprocessor): def __init__(self, audio_stem: PatchEmbedGeneric, **kwargs) -> None: super().__init__(rgbt_stem=audio_stem, depth_stem=None, **kwargs) def forward(self, audio=None): return super().forward(vision=audio) class ThermalPreprocessor(RGBDTPreprocessor): def __init__(self, thermal_stem: PatchEmbedGeneric, **kwargs) -> None: super().__init__(rgbt_stem=thermal_stem, depth_stem=None, **kwargs) def forward(self, thermal=None): return super().forward(vision=thermal) def build_causal_attention_mask(context_length): # lazily create causal attention mask, with full attention between the vision tokens # pytorch uses additive attention mask; fill with -inf mask = torch.empty(context_length, context_length, requires_grad=False) mask.fill_(float("-inf")) mask.triu_(1) # zero out the lower diagonal return mask class TextPreprocessor(VerboseNNModule): def __init__( self, vocab_size: int, context_length: int, embed_dim: int, causal_masking: bool, supply_seq_len_to_head: bool = True, num_cls_tokens: int = 0, init_param_style: str = "openclip", ) -> None: super().__init__() self.vocab_size = vocab_size self.context_length = context_length self.token_embedding = nn.Embedding(vocab_size, embed_dim) self.pos_embed = nn.Parameter( torch.empty(1, self.context_length + num_cls_tokens, embed_dim) ) self.causal_masking = causal_masking if self.causal_masking: mask = build_causal_attention_mask(self.context_length) # register the mask as a buffer so it can be moved to the right device self.register_buffer("mask", mask) self.supply_seq_len_to_head = supply_seq_len_to_head self.num_cls_tokens = num_cls_tokens self.embed_dim = embed_dim if num_cls_tokens > 0: assert self.causal_masking is False, "Masking + CLS token isn't implemented" self.cls_token = nn.Parameter( torch.zeros(1, self.num_cls_tokens, embed_dim) ) self.init_parameters(init_param_style) @torch.no_grad() def init_parameters(self, init_param_style="openclip"): # OpenCLIP style initialization nn.init.normal_(self.token_embedding.weight, std=0.02) nn.init.normal_(self.pos_embed, std=0.01) if init_param_style == "openclip": # OpenCLIP style initialization scale = self.embed_dim**-0.5 if self.num_cls_tokens > 0: nn.init.normal_(self.cls_token) self.cls_token *= scale elif init_param_style == "vit": self.cls_token.data.fill_(0) else: raise ValueError(f"Unknown init {init_param_style}") def forward(self, text): # text tokens are of shape B x L x D text_tokens = self.token_embedding(text) # concat CLS tokens if any if self.num_cls_tokens > 0: B = text_tokens.shape[0] class_tokens = self.cls_token.expand( B, -1, -1 ) # stole class_tokens impl from Phil Wang, thanks text_tokens = torch.cat((class_tokens, text_tokens), dim=1) text_tokens = text_tokens + self.pos_embed return_dict = { "trunk": { "tokens": text_tokens, }, "head": {}, } # Compute sequence length after adding CLS tokens if self.supply_seq_len_to_head: text_lengths = text.argmax(dim=-1) return_dict["head"] = { "seq_len": text_lengths, } if self.causal_masking: return_dict["trunk"].update({"attn_mask": self.mask}) return return_dict class Im2Video(nn.Module): """Convert an image into a trivial video.""" def __init__(self, time_dim=2): super().__init__() self.time_dim = time_dim def forward(self, x): if x.ndim == 4: # B, C, H, W -> B, C, T, H, W return x.unsqueeze(self.time_dim) elif x.ndim == 5: return x else: raise ValueError(f"Dimension incorrect {x.shape}") class PadIm2Video(Im2Video): def __init__(self, ntimes, pad_type, time_dim=2): super().__init__(time_dim=time_dim) assert ntimes > 0 assert pad_type in ["zero", "repeat"] self.ntimes = ntimes self.pad_type = pad_type def forward(self, x): x = super().forward(x) if x.shape[self.time_dim] == 1: if self.pad_type == "repeat": new_shape = [1] * len(x.shape) new_shape[self.time_dim] = self.ntimes x = x.repeat(new_shape) elif self.pad_type == "zero": padarg = [0, 0] * len(x.shape) padarg[2 * self.time_dim + 1] = self.ntimes - x.shape[self.time_dim] x = nn.functional.pad(x, padarg) return x # Modified from github.com/openai/CLIP @lru_cache() def bytes_to_unicode(): """ Returns list of utf-8 byte and a corresponding list of unicode strings. The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. This is a signficant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings. And avoids mapping to whitespace/control characters the bpe code barfs on. """ bs = ( list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1)) ) cs = bs[:] n = 0 for b in range(2**8): if b not in bs: bs.append(b) cs.append(2**8 + n) n += 1 cs = [chr(n) for n in cs] return dict(zip(bs, cs)) def get_pairs(word): """Return set of symbol pairs in a word. Word is represented as tuple of symbols (symbols being variable-length strings). """ pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char return pairs def basic_clean(text): text = ftfy.fix_text(text) text = html.unescape(html.unescape(text)) return text.strip() def whitespace_clean(text): text = re.sub(r"\s+", " ", text) text = text.strip() return text class SimpleTokenizer(object): def __init__(self, bpe_path: str, context_length=77): self.byte_encoder = bytes_to_unicode() self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} with g_pathmgr.open(bpe_path, "rb") as fh: bpe_bytes = io.BytesIO(fh.read()) merges: List[str] = gzip.open(bpe_bytes).read().decode("utf-8").split("\n") merges = merges[1 : 49152 - 256 - 2 + 1] merges: List[Tuple[str, ...]] = [tuple(merge.split()) for merge in merges] vocab = list(bytes_to_unicode().values()) vocab = vocab + [v + "</w>" for v in vocab] for merge in merges: vocab.append("".join(merge)) vocab.extend(["<|startoftext|>", "<|endoftext|>"]) self.encoder = dict(zip(vocab, range(len(vocab)))) self.decoder = {v: k for k, v in self.encoder.items()} self.bpe_ranks = dict(zip(merges, range(len(merges)))) self.cache = { "<|startoftext|>": "<|startoftext|>", "<|endoftext|>": "<|endoftext|>", } self.pat = re.compile( r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE, ) self.context_length = context_length def bpe(self, token): if token in self.cache: return self.cache[token] word = tuple(token[:-1]) + (token[-1] + "</w>",) pairs = get_pairs(word) if not pairs: return token + "</w>" while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) if bigram not in self.bpe_ranks: break first, second = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) new_word.extend(word[i:j]) i = j except: new_word.extend(word[i:]) break if word[i] == first and i < len(word) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = " ".join(word) self.cache[token] = word return word def encode(self, text): bpe_tokens = [] text = whitespace_clean(basic_clean(text)).lower() for token in re.findall(self.pat, text): token = "".join(self.byte_encoder[b] for b in token.encode("utf-8")) bpe_tokens.extend( self.encoder[bpe_token] for bpe_token in self.bpe(token).split(" ") ) return bpe_tokens def decode(self, tokens): text = "".join([self.decoder[token] for token in tokens]) text = ( bytearray([self.byte_decoder[c] for c in text]) .decode("utf-8", errors="replace") .replace("</w>", " ") ) return text def __call__(self, texts, context_length=None): if not context_length: context_length = self.context_length if isinstance(texts, str): texts = [texts] sot_token = self.encoder["<|startoftext|>"] eot_token = self.encoder["<|endoftext|>"] all_tokens = [[sot_token] + self.encode(text) + [eot_token] for text in texts] result = torch.zeros(len(all_tokens), context_length, dtype=torch.long) for i, tokens in enumerate(all_tokens): tokens = tokens[:context_length] result[i, : len(tokens)] = torch.tensor(tokens) if len(result) == 1: return result[0] return result class IMUPreprocessor(VerboseNNModule): def __init__( self, kernel_size: int, imu_stem: PatchEmbedGeneric, embed_dim: int, img_size: Tuple = (6, 2000), num_cls_tokens: int = 1, pos_embed_fn: Optional[Callable] = None, init_param_style: str = "openclip", ) -> None: super().__init__() self.imu_stem = imu_stem self.embed_dim = embed_dim self.use_pos_embed = pos_embed_fn is not None self.num_cls_tokens = num_cls_tokens self.kernel_size = kernel_size self.pos_embed = nn.Parameter( torch.empty(1, (img_size[1] // kernel_size) + num_cls_tokens, embed_dim) ) if self.num_cls_tokens > 0: self.cls_token = nn.Parameter( torch.zeros(1, self.num_cls_tokens, self.embed_dim) ) self.init_parameters(init_param_style) @torch.no_grad() def init_parameters(self, init_param_style): nn.init.normal_(self.pos_embed, std=0.01) if init_param_style == "openclip": # OpenCLIP style initialization scale = self.embed_dim**-0.5 if self.num_cls_tokens > 0: nn.init.normal_(self.cls_token) self.cls_token *= scale elif init_param_style == "vit": self.cls_token.data.fill_(0) else: raise ValueError(f"Unknown init {init_param_style}") def tokenize_input_and_cls_pos(self, input, stem): # tokens is of shape B x L x D tokens = stem.norm_layer(stem.proj(input)) assert tokens.ndim == 3 assert tokens.shape[2] == self.embed_dim B = tokens.shape[0] if self.num_cls_tokens > 0: class_tokens = self.cls_token.expand( B, -1, -1 ) # stole class_tokens impl from Phil Wang, thanks tokens = torch.cat((class_tokens, tokens), dim=1) if self.use_pos_embed: tokens = tokens + self.pos_embed return tokens def forward(self, imu): # Patchify imu = imu.unfold( -1, self.kernel_size, self.kernel_size, ).permute(0, 2, 1, 3) imu = imu.reshape(imu.size(0), imu.size(1), -1) imu_tokens = self.tokenize_input_and_cls_pos( imu, self.imu_stem, ) return_dict = { "trunk": { "tokens": imu_tokens, }, "head": {}, } return return_dict
MovieChat-main
MovieChat/models/multimodal_preprocessors.py
# This script is based on https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py """ PyTorch LLaMA model.""" import math from typing import List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from transformers.activations import ACT2FN from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast from transformers.modeling_utils import PreTrainedModel from transformers.utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings from transformers.models.llama.configuration_llama import LlamaConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "LlamaConfig" # Copied from transformers.models.bart.modeling_bart._make_causal_mask def _make_causal_mask( input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0 ): """ Make causal mask used for bi-directional self-attention. """ bsz, tgt_len = input_ids_shape mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min, device=device), device=device) mask_cond = torch.arange(mask.size(-1), device=device) mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) mask = mask.to(dtype) if past_key_values_length > 0: mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) # Copied from transformers.models.bart.modeling_bart._expand_mask def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ bsz, src_len = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) class LlamaRMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ LlamaRMSNorm is equivalent to T5LayerNorm """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states): variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) # convert into half-precision if necessary if self.weight.dtype in [torch.float16, torch.bfloat16]: hidden_states = hidden_states.to(self.weight.dtype) return self.weight * hidden_states class LlamaRotaryEmbedding(torch.nn.Module): def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None): super().__init__() inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float().to(device) / dim)) self.register_buffer("inv_freq", inv_freq) # Build here to make `torch.jit.trace` work. self.max_seq_len_cached = max_position_embeddings t = torch.arange(self.max_seq_len_cached, device=self.inv_freq.device, dtype=self.inv_freq.dtype) freqs = torch.einsum("i,j->ij", t, self.inv_freq) # Different from paper, but it uses a different permutation in order to obtain the same calculation emb = torch.cat((freqs, freqs), dim=-1) self.register_buffer("cos_cached", emb.cos()[None, None, :, :], persistent=False) self.register_buffer("sin_cached", emb.sin()[None, None, :, :], persistent=False) def forward(self, x, seq_len=None): # x: [bs, num_attention_heads, seq_len, head_size] # This `if` block is unlikely to be run after we build sin/cos in `__init__`. Keep the logic here just in case. if seq_len > self.max_seq_len_cached: self.max_seq_len_cached = seq_len t = torch.arange(self.max_seq_len_cached, device=x.device, dtype=self.inv_freq.dtype) freqs = torch.einsum("i,j->ij", t, self.inv_freq) # Different from paper, but it uses a different permutation in order to obtain the same calculation emb = torch.cat((freqs, freqs), dim=-1).to(x.device) self.register_buffer("cos_cached", emb.cos()[None, None, :, :], persistent=False) self.register_buffer("sin_cached", emb.sin()[None, None, :, :], persistent=False) return ( self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype), self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype), ) def rotate_half(x): """Rotates half the hidden dims of the input.""" x1 = x[..., : x.shape[-1] // 2] x2 = x[..., x.shape[-1] // 2 :] return torch.cat((-x2, x1), dim=-1) def apply_rotary_pos_emb(q, k, cos, sin, position_ids): gather_indices = position_ids[:, None, :, None] # [bs, 1, seq_len, 1] gather_indices = gather_indices.repeat(1, cos.shape[1], 1, cos.shape[3]) cos = torch.gather(cos.repeat(gather_indices.shape[0], 1, 1, 1), 2, gather_indices) sin = torch.gather(sin.repeat(gather_indices.shape[0], 1, 1, 1), 2, gather_indices) q_embed = (q * cos) + (rotate_half(q) * sin) k_embed = (k * cos) + (rotate_half(k) * sin) return q_embed, k_embed class LlamaMLP(nn.Module): def __init__( self, hidden_size: int, intermediate_size: int, hidden_act: str, ): super().__init__() self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False) self.down_proj = nn.Linear(intermediate_size, hidden_size, bias=False) self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False) self.act_fn = ACT2FN[hidden_act] def forward(self, x): return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) class LlamaAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config: LlamaConfig): super().__init__() self.config = config self.hidden_size = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.hidden_size // self.num_heads self.max_position_embeddings = config.max_position_embeddings if (self.head_dim * self.num_heads) != self.hidden_size: raise ValueError( f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" f" and `num_heads`: {self.num_heads})." ) self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False) self.k_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False) self.v_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False) self.o_proj = nn.Linear(self.num_heads * self.head_dim, self.hidden_size, bias=False) self.rotary_emb = LlamaRotaryEmbedding(self.head_dim, max_position_embeddings=self.max_position_embeddings) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: bool = False, use_cache: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: bsz, q_len, _ = hidden_states.size() query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2) kv_seq_len = key_states.shape[-2] if past_key_value is not None: kv_seq_len += past_key_value[0].shape[-2] cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len) query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids) # [bsz, nh, t, hd] if past_key_value is not None: # reuse k, v, self_attention key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) past_key_value = (key_states, value_states) if use_cache else None attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, q_len, kv_seq_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, q_len, kv_seq_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights + attention_mask attn_weights = torch.max(attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min)) # upcast attention to fp32 attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) attn_output = torch.matmul(attn_weights, value_states) if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) attn_output = self.o_proj(attn_output) if not output_attentions: attn_weights = None return attn_output, attn_weights, past_key_value class LlamaDecoderLayer(nn.Module): def __init__(self, config: LlamaConfig): super().__init__() self.hidden_size = config.hidden_size self.self_attn = LlamaAttention(config=config) self.mlp = LlamaMLP( hidden_size=self.hidden_size, intermediate_size=config.intermediate_size, hidden_act=config.hidden_act, ) self.input_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states """ residual = hidden_states hidden_states = self.input_layernorm(hidden_states) # Self Attention hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) hidden_states = residual + hidden_states # Fully Connected residual = hidden_states hidden_states = self.post_attention_layernorm(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) if use_cache: outputs += (present_key_value,) return outputs LLAMA_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`LlamaConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ @add_start_docstrings( "The bare LLaMA Model outputting raw hidden-states without any specific head on top.", LLAMA_START_DOCSTRING, ) class LlamaPreTrainedModel(PreTrainedModel): config_class = LlamaConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["LlamaDecoderLayer"] _keys_to_ignore_on_load_unexpected = [r"decoder\.version"] def _init_weights(self, module): std = self.config.initializer_range if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, LlamaModel): module.gradient_checkpointing = value LLAMA_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`). If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy. - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids) past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare LLaMA Model outputting raw hidden-states without any specific head on top.", LLAMA_START_DOCSTRING, ) class LlamaModel(LlamaPreTrainedModel): """ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`LlamaDecoderLayer`] Args: config: LlamaConfig """ def __init__(self, config: LlamaConfig): super().__init__(config) self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) self.layers = nn.ModuleList([LlamaDecoderLayer(config) for _ in range(config.num_hidden_layers)]) self.norm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length): # create causal mask # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] combined_attention_mask = None if input_shape[-1] > 1: combined_attention_mask = _make_causal_mask( input_shape, inputs_embeds.dtype, device=inputs_embeds.device, past_key_values_length=past_key_values_length, ) if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to( inputs_embeds.device ) combined_attention_mask = ( expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask ) return combined_attention_mask @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, query_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: batch_size, seq_length = input_ids.shape elif inputs_embeds is not None: batch_size, seq_length, _ = inputs_embeds.shape else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) if query_embeds is not None: inputs_embeds = torch.cat([query_embeds, inputs_embeds], dim=1) batch_size, seq_length, _ = inputs_embeds.shape seq_length_with_past = seq_length past_key_values_length = 0 if past_key_values is not None: past_key_values_length = past_key_values[0][0].shape[2] seq_length_with_past = seq_length_with_past + past_key_values_length if position_ids is None: device = input_ids.device if input_ids is not None else inputs_embeds.device position_ids = torch.arange( past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device ) position_ids = position_ids.unsqueeze(0).view(-1, seq_length) else: position_ids = position_ids.view(-1, seq_length).long() # embed positions if attention_mask is None: attention_mask = torch.ones( (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device ) attention_mask = self._prepare_decoder_attention_mask( attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length ) hidden_states = inputs_embeds if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None next_decoder_cache = () if use_cache else None for idx, decoder_layer in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) past_key_value = past_key_values[idx] if past_key_values is not None else None if self.gradient_checkpointing and self.training: def create_custom_forward(module): def custom_forward(*inputs): # None for past_key_value return module(*inputs, output_attentions, None) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(decoder_layer), hidden_states, attention_mask, position_ids, None, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=attention_mask, position_ids=position_ids, past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) hidden_states = self.norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, ) class LlamaForCausalLM(LlamaPreTrainedModel): def __init__(self, config): super().__init__(config) self.model = LlamaModel(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.model.embed_tokens def set_input_embeddings(self, value): self.model.embed_tokens = value def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def set_decoder(self, decoder): self.model = decoder def get_decoder(self): return self.model @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, query_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, CausalLMOutputWithPast]: r""" Args: labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: Example: ```python >>> from transformers import AutoTokenizer, LlamaForCausalLM >>> model = LlamaForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS) >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER) >>> prompt = "Hey, are you consciours? Can you talk to me?" >>> inputs = tokenizer(prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(inputs.input_ids, max_length=30) >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "Hey, are you consciours? Can you talk to me?\nI'm not consciours, but I can talk to you." ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, query_embeds=query_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] logits = self.lm_head(hidden_states) loss = None if labels is not None: # Shift so that tokens < n predict n shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = CrossEntropyLoss() shift_logits = shift_logits.view(-1, self.config.vocab_size) shift_labels = shift_labels.view(-1) # Enable model parallelism shift_labels = shift_labels.to(shift_logits.device) loss = loss_fct(shift_logits, shift_labels) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def prepare_inputs_for_generation( self, input_ids, query_embeds=None, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs ): if past_key_values: input_ids = input_ids[:, -1:] position_ids = kwargs.get("position_ids", None) if attention_mask is not None and position_ids is None: # create position_ids on the fly for batch generation position_ids = attention_mask.long().cumsum(-1) - 1 position_ids.masked_fill_(attention_mask == 0, 1) if past_key_values: position_ids = position_ids[:, -1].unsqueeze(-1) query_embeds = None # if `inputs_embeds` are passed, we only want to use them in the 1st generation step if inputs_embeds is not None and past_key_values is None: model_inputs = {"inputs_embeds": inputs_embeds} else: model_inputs = {"input_ids": input_ids} model_inputs.update( { "position_ids": position_ids, "query_embeds": query_embeds, "past_key_values": past_key_values, "use_cache": kwargs.get("use_cache"), "attention_mask": attention_mask, } ) return model_inputs @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),) return reordered_past
MovieChat-main
MovieChat/models/modeling_llama.py
# Based on ToMe, EVA, BEIT, timm and DeiT code bases # https://github.com/facebookresearch/ToMe # https://github.com/baaivision/EVA # https://github.com/rwightman/pytorch-image-models/tree/master/timm # https://github.com/microsoft/unilm/tree/master/beit # https://github.com/facebookresearch/deit/ # https://github.com/facebookresearch/dino # --------------------------------------------------------' import math from functools import partial from typing import Callable, Tuple, Union, List import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint as checkpoint from timm.models.layers import drop_path, to_2tuple, trunc_normal_ from timm.models.registry import register_model from MovieChat.models.eva_vit import Attention, Block, VisionTransformer, convert_weights_to_fp16, interpolate_pos_embed from MovieChat.common.dist_utils import download_cached_file def do_nothing(x, mode=None): return x def bipartite_soft_matching( metric: torch.Tensor, r: int, class_token: bool = False, distill_token: bool = False, ) -> Tuple[Callable, Callable]: """ Applies ToMe with a balanced matching set (50%, 50%). Input size is [batch, tokens, channels]. r indicates the number of tokens to remove (max 50% of tokens). Extra args: - class_token: Whether or not there's a class token. - distill_token: Whether or not there's also a distillation token. When enabled, the class token and distillation tokens won't get merged. """ protected = 0 if class_token: protected += 1 if distill_token: protected += 1 # We can only reduce by a maximum of 50% tokens t = metric.shape[1] r = min(r, (t - protected) // 2) if r <= 0: return do_nothing, do_nothing with torch.no_grad(): metric = metric / metric.norm(dim=-1, keepdim=True) a, b = metric[..., ::2, :], metric[..., 1::2, :] scores = a @ b.transpose(-1, -2) if class_token: scores[..., 0, :] = -math.inf if distill_token: scores[..., :, 0] = -math.inf node_max, node_idx = scores.max(dim=-1) edge_idx = node_max.argsort(dim=-1, descending=True)[..., None] unm_idx = edge_idx[..., r:, :] # Unmerged Tokens src_idx = edge_idx[..., :r, :] # Merged Tokens dst_idx = node_idx[..., None].gather(dim=-2, index=src_idx) if class_token: # Sort to ensure the class token is at the start unm_idx = unm_idx.sort(dim=1)[0] def merge(x: torch.Tensor, mode="mean") -> torch.Tensor: src, dst = x[..., ::2, :], x[..., 1::2, :] n, t1, c = src.shape unm = src.gather(dim=-2, index=unm_idx.expand(n, t1 - r, c)) src = src.gather(dim=-2, index=src_idx.expand(n, r, c)) dst = dst.scatter_reduce(-2, dst_idx.expand(n, r, c), src, reduce=mode) if distill_token: return torch.cat([unm[:, :1], dst[:, :1], unm[:, 1:], dst[:, 1:]], dim=1) else: return torch.cat([unm, dst], dim=1) def unmerge(x: torch.Tensor) -> torch.Tensor: unm_len = unm_idx.shape[1] unm, dst = x[..., :unm_len, :], x[..., unm_len:, :] n, _, c = unm.shape src = dst.gather(dim=-2, index=dst_idx.expand(n, r, c)) out = torch.zeros(n, metric.shape[1], c, device=x.device, dtype=x.dtype) out[..., 1::2, :] = dst out.scatter_(dim=-2, index=(2 * unm_idx).expand(n, unm_len, c), src=unm) out.scatter_(dim=-2, index=(2 * src_idx).expand(n, r, c), src=src) return out return merge, unmerge def kth_bipartite_soft_matching( metric: torch.Tensor, k: int ) -> Tuple[Callable, Callable]: """ Applies ToMe with the two sets as (every kth element, the rest). If n is the number of tokens, resulting number of tokens will be n // z. Input size is [batch, tokens, channels]. z indicates the stride for the first set. z = 2 is equivalent to regular bipartite_soft_matching with r = 0.5 * N """ if k <= 1: return do_nothing, do_nothing def split(x): t_rnd = (x.shape[1] // k) * k x = x[:, :t_rnd, :].view(x.shape[0], -1, k, x.shape[2]) a, b = ( x[:, :, : (k - 1), :].contiguous().view(x.shape[0], -1, x.shape[-1]), x[:, :, (k - 1), :], ) return a, b with torch.no_grad(): metric = metric / metric.norm(dim=-1, keepdim=True) a, b = split(metric) r = a.shape[1] scores = a @ b.transpose(-1, -2) _, dst_idx = scores.max(dim=-1) dst_idx = dst_idx[..., None] def merge(x: torch.Tensor, mode="mean") -> torch.Tensor: src, dst = split(x) n, _, c = src.shape dst = dst.scatter_reduce(-2, dst_idx.expand(n, r, c), src, reduce=mode) return dst def unmerge(x: torch.Tensor) -> torch.Tensor: n, _, c = x.shape dst = x src = dst.gather(dim=-2, index=dst_idx.expand(n, r, c)).to(x.dtype) src = src.view(n, -1, (k - 1), c) dst = dst.view(n, -1, 1, c) out = torch.cat([src, dst], dim=-2) out = out.contiguous().view(n, -1, c) return out return merge, unmerge def random_bipartite_soft_matching( metric: torch.Tensor, r: int ) -> Tuple[Callable, Callable]: """ Applies ToMe with the two sets as (r chosen randomly, the rest). Input size is [batch, tokens, channels]. This will reduce the number of tokens by r. """ if r <= 0: return do_nothing, do_nothing with torch.no_grad(): B, N, _ = metric.shape rand_idx = torch.rand(B, N, 1, device=metric.device).argsort(dim=1) a_idx = rand_idx[:, :r, :] b_idx = rand_idx[:, r:, :] def split(x): C = x.shape[-1] a = x.gather(dim=1, index=a_idx.expand(B, r, C)) b = x.gather(dim=1, index=b_idx.expand(B, N - r, C)) return a, b metric = metric / metric.norm(dim=-1, keepdim=True) a, b = split(metric) scores = a @ b.transpose(-1, -2) _, dst_idx = scores.max(dim=-1) dst_idx = dst_idx[..., None] def merge(x: torch.Tensor, mode="mean") -> torch.Tensor: src, dst = split(x) C = src.shape[-1] dst = dst.scatter_reduce(-2, dst_idx.expand(B, r, C), src, reduce=mode) return dst def unmerge(x: torch.Tensor) -> torch.Tensor: C = x.shape[-1] dst = x src = dst.gather(dim=-2, index=dst_idx.expand(B, r, C)) out = torch.zeros(B, N, C, device=x.device, dtype=x.dtype) out.scatter_(dim=-2, index=a_idx.expand(B, r, C), src=src) out.scatter_(dim=-2, index=b_idx.expand(B, N - r, C), src=dst) return out return merge, unmerge def merge_wavg( merge: Callable, x: torch.Tensor, size: torch.Tensor = None ) -> Tuple[torch.Tensor, torch.Tensor]: """ Applies the merge function by taking a weighted average based on token size. Returns the merged tensor and the new token sizes. """ if size is None: size = torch.ones_like(x[..., 0, None]) x = merge(x * size, mode="sum") size = merge(size, mode="sum") x = x / size return x, size def merge_source( merge: Callable, x: torch.Tensor, source: torch.Tensor = None ) -> torch.Tensor: """ For source tracking. Source is an adjacency matrix between the initial tokens and final merged groups. x is used to find out how many tokens there are in case the source is None. """ if source is None: n, t, _ = x.shape source = torch.eye(t, device=x.device)[None, ...].expand(n, t, t) source = merge(source, mode="amax") return source def parse_r(num_layers: int, r: Union[List[int], Tuple[int, float], int]) -> List[int]: """ Process a constant r or r schedule into a list for use internally. r can take the following forms: - int: A constant number of tokens per layer. - Tuple[int, float]: A pair of r, inflection. Inflection describes there the the reduction / layer should trend upward (+1), downward (-1), or stay constant (0). A value of (r, 0) is as providing a constant r. (r, -1) is what we describe in the paper as "decreasing schedule". Any value between -1 and +1 is accepted. - List[int]: A specific number of tokens per layer. For extreme granularity. """ inflect = 0 if isinstance(r, list): if len(r) < num_layers: r = r + [0] * (num_layers - len(r)) return list(r) elif isinstance(r, tuple): r, inflect = r min_val = int(r * (1.0 - inflect)) max_val = 2 * r - min_val step = (max_val - min_val) / (num_layers - 1) return [int(min_val + step * i) for i in range(num_layers)] class ToMeBlock(Block): """ Modifications: - Apply ToMe between the attention and mlp blocks - Compute and propogate token size and potentially the token sources. """ def _drop_path1(self, x): return self.drop_path1(x) if hasattr(self, "drop_path1") else self.drop_path(x) def _drop_path2(self, x): return self.drop_path2(x) if hasattr(self, "drop_path2") else self.drop_path(x) def forward(self, x: torch.Tensor, rel_pos_bias=None) -> torch.Tensor: # Note: this is copied from timm.models.vision_transformer.Block with modifications. attn_size = self._tome_info["size"] if self._tome_info["prop_attn"] else None x_attn, metric = self.attn(self.norm1(x), attn_size) x = x + self._drop_path1(x_attn) r = self._tome_info["r"].pop(0) if r > 0: # Apply ToMe here merge, _ = bipartite_soft_matching( metric, r, self._tome_info["class_token"], self._tome_info["distill_token"], ) if self._tome_info["trace_source"]: self._tome_info["source"] = merge_source( merge, x, self._tome_info["source"] ) x, self._tome_info["size"] = merge_wavg(merge, x, self._tome_info["size"]) x = x + self._drop_path2(self.mlp(self.norm2(x))) return x class ToMeAttention(Attention): """ Modifications: - Apply proportional attention - Return the mean of k over heads from attention """ def forward( self, x: torch.Tensor, size: torch.Tensor = None ) -> Tuple[torch.Tensor, torch.Tensor]: # Note: this is copied from timm.models.vision_transformer.Attention with modifications. B, N, C = x.shape qkv = ( self.qkv(x) .reshape(B, N, 3, self.num_heads, C // self.num_heads) .permute(2, 0, 3, 1, 4) ) q, k, v = ( qkv[0], qkv[1],#[1,16,257,88] qkv[2], ) # make torchscript happy (cannot use tensor as tuple) attn = (q @ k.transpose(-2, -1)) * self.scale # Apply proportional attention if size is not None: attn = attn + size.log()[:, None, None, :, 0] attn = attn.softmax(dim=-1) attn = self.attn_drop(attn) x = (attn @ v).transpose(1, 2).reshape(B, N, C) x = self.proj(x) x = self.proj_drop(x) return x, k.mean(1) def make_tome_class(transformer_class): class ToMeVisionTransformer(transformer_class): """ Modifications: - Initialize r, token size, and token sources. """ def forward(self, *args, **kwdargs) -> torch.Tensor: self._tome_info["r"] = parse_r(len(self.blocks), self.r) self._tome_info["size"] = None self._tome_info["source"] = None return super().forward(*args, **kwdargs) return ToMeVisionTransformer def apply_patch( model: VisionTransformer, trace_source: bool = False, prop_attn: bool = True ): """ Applies ToMe to this transformer. Afterward, set r using model.r. If you want to know the source of each token (e.g., for visualization), set trace_source = true. The sources will be available at model._tome_info["source"] afterward. For proportional attention, set prop_attn to True. This is only necessary when evaluating models off the shelf. For trianing and for evaluating MAE models off the self set this to be False. """ ToMeVisionTransformer = make_tome_class(model.__class__) model.__class__ = ToMeVisionTransformer model.r = 0 model._tome_info = { "r": model.r, "size": None, "source": None, "trace_source": trace_source, "prop_attn": prop_attn, "class_token": model.cls_token is not None, "distill_token": False, } if hasattr(model, "dist_token") and model.dist_token is not None: model._tome_info["distill_token"] = True for module in model.modules(): if isinstance(module, Block): module.__class__ = ToMeBlock module._tome_info = model._tome_info elif isinstance(module, Attention): module.__class__ = ToMeAttention def create_eva_vit_g_with_tome(img_size=224,drop_path_rate=0.4,use_checkpoint=False,precision="fp16"): model = VisionTransformer( img_size=img_size, patch_size=14, use_mean_pooling=False, embed_dim=1408, depth=39, num_heads=1408//88, mlp_ratio=4.3637, qkv_bias=True, drop_path_rate=drop_path_rate, norm_layer=partial(nn.LayerNorm, eps=1e-6), use_checkpoint=use_checkpoint, ) url = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/models/BLIP2/eva_vit_g.pth" cached_file = download_cached_file( url, check_hash=False, progress=True ) state_dict = torch.load(cached_file, map_location="cpu") interpolate_pos_embed(model,state_dict) incompatible_keys = model.load_state_dict(state_dict, strict=False) # print(incompatible_keys) if precision == "fp16": # model.to("cuda") convert_weights_to_fp16(model) # apply tome apply_patch(model) num_tokens = (img_size // 14)**2 + 1 num_layers = 40 model.r = num_tokens // (num_layers - 1) print(f"Apply ToMe with r = {model.r}") print("# token: ", end="") for l in range(num_layers): print(num_tokens - model.r*l, end="->") print("end") return model
MovieChat-main
MovieChat/models/eva_vit_with_tome.py
#!/usr/bin/env python3 # Portions Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import logging import math import torch import torch.nn as nn import torchaudio from PIL import Image from pytorchvideo import transforms as pv_transforms from pytorchvideo.data.clip_sampling import ConstantClipsPerVideoSampler from pytorchvideo.data.encoded_video import EncodedVideo from torchvision import transforms from torchvision.transforms._transforms_video import NormalizeVideo from .multimodal_preprocessors import SimpleTokenizer DEFAULT_AUDIO_FRAME_SHIFT_MS = 10 # in milliseconds BPE_PATH = "bpe/bpe_simple_vocab_16e6.txt.gz" def waveform2melspec(waveform, sample_rate, num_mel_bins, target_length): # Based on https://github.com/YuanGongND/ast/blob/d7d8b4b8e06cdaeb6c843cdb38794c1c7692234c/src/dataloader.py#L102 waveform -= waveform.mean() fbank = torchaudio.compliance.kaldi.fbank( waveform, htk_compat=True, sample_frequency=sample_rate, use_energy=False, window_type="hanning", num_mel_bins=num_mel_bins, dither=0.0, frame_length=25, frame_shift=DEFAULT_AUDIO_FRAME_SHIFT_MS, ) # Convert to [mel_bins, num_frames] shape fbank = fbank.transpose(0, 1) # Pad to target_length n_frames = fbank.size(1) p = target_length - n_frames # if p is too large (say >20%), flash a warning if abs(p) / n_frames > 0.2: logging.warning( "Large gap between audio n_frames(%d) and " "target_length (%d). Is the audio_target_length " "setting correct?", n_frames, target_length, ) # cut and pad if p > 0: fbank = torch.nn.functional.pad(fbank, (0, p), mode="constant", value=0) elif p < 0: fbank = fbank[:, 0:target_length] # Convert to [1, mel_bins, num_frames] shape, essentially like a 1 # channel image fbank = fbank.unsqueeze(0) return fbank def get_clip_timepoints(clip_sampler, duration): # Read out all clips in this video all_clips_timepoints = [] is_last_clip = False end = 0.0 while not is_last_clip: start, end, _, _, is_last_clip = clip_sampler(end, duration, annotation=None) all_clips_timepoints.append((start, end)) return all_clips_timepoints def load_and_transform_vision_data(image_paths, device): if image_paths is None: return None image_ouputs = [] for image_path in image_paths: data_transform = transforms.Compose( [ transforms.Resize( 224, interpolation=transforms.InterpolationMode.BICUBIC ), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize( mean=(0.48145466, 0.4578275, 0.40821073), std=(0.26862954, 0.26130258, 0.27577711), ), ] ) with open(image_path, "rb") as fopen: image = Image.open(fopen).convert("RGB") image = data_transform(image).to(device) image_ouputs.append(image) return torch.stack(image_ouputs, dim=0) def load_and_transform_text(text, device): if text is None: return None tokenizer = SimpleTokenizer(bpe_path=BPE_PATH) tokens = [tokenizer(t).unsqueeze(0).to(device) for t in text] tokens = torch.cat(tokens, dim=0) return tokens def load_and_transform_audio_data( audio_paths, device, num_mel_bins=128, target_length=204, sample_rate=16000, clip_duration=2, clips_per_video=3, mean=-4.268, std=9.138, ): if audio_paths is None: return None audio_outputs = [] clip_sampler = ConstantClipsPerVideoSampler( clip_duration=clip_duration, clips_per_video=clips_per_video ) for audio_path in audio_paths: waveform, sr = torchaudio.load(audio_path) if sample_rate != sr: waveform = torchaudio.functional.resample( waveform, orig_freq=sr, new_freq=sample_rate ) all_clips_timepoints = get_clip_timepoints( clip_sampler, waveform.size(1) / sample_rate ) all_clips = [] for clip_timepoints in all_clips_timepoints: waveform_clip = waveform[ :, int(clip_timepoints[0] * sample_rate) : int( clip_timepoints[1] * sample_rate ), ] waveform_melspec = waveform2melspec( waveform_clip, sample_rate, num_mel_bins, target_length ) all_clips.append(waveform_melspec) normalize = transforms.Normalize(mean=mean, std=std) all_clips = [normalize(ac).to(device) for ac in all_clips] all_clips = torch.stack(all_clips, dim=0) audio_outputs.append(all_clips) return torch.stack(audio_outputs, dim=0) def crop_boxes(boxes, x_offset, y_offset): """ Perform crop on the bounding boxes given the offsets. Args: boxes (ndarray or None): bounding boxes to perform crop. The dimension is `num boxes` x 4. x_offset (int): cropping offset in the x axis. y_offset (int): cropping offset in the y axis. Returns: cropped_boxes (ndarray or None): the cropped boxes with dimension of `num boxes` x 4. """ cropped_boxes = boxes.copy() cropped_boxes[:, [0, 2]] = boxes[:, [0, 2]] - x_offset cropped_boxes[:, [1, 3]] = boxes[:, [1, 3]] - y_offset return cropped_boxes def uniform_crop(images, size, spatial_idx, boxes=None, scale_size=None): """ Perform uniform spatial sampling on the images and corresponding boxes. Args: images (tensor): images to perform uniform crop. The dimension is `num frames` x `channel` x `height` x `width`. size (int): size of height and weight to crop the images. spatial_idx (int): 0, 1, or 2 for left, center, and right crop if width is larger than height. Or 0, 1, or 2 for top, center, and bottom crop if height is larger than width. boxes (ndarray or None): optional. Corresponding boxes to images. Dimension is `num boxes` x 4. scale_size (int): optinal. If not None, resize the images to scale_size before performing any crop. Returns: cropped (tensor): images with dimension of `num frames` x `channel` x `size` x `size`. cropped_boxes (ndarray or None): the cropped boxes with dimension of `num boxes` x 4. """ assert spatial_idx in [0, 1, 2] ndim = len(images.shape) if ndim == 3: images = images.unsqueeze(0) height = images.shape[2] width = images.shape[3] if scale_size is not None: if width <= height: width, height = scale_size, int(height / width * scale_size) else: width, height = int(width / height * scale_size), scale_size images = torch.nn.functional.interpolate( images, size=(height, width), mode="bilinear", align_corners=False, ) y_offset = int(math.ceil((height - size) / 2)) x_offset = int(math.ceil((width - size) / 2)) if height > width: if spatial_idx == 0: y_offset = 0 elif spatial_idx == 2: y_offset = height - size else: if spatial_idx == 0: x_offset = 0 elif spatial_idx == 2: x_offset = width - size cropped = images[:, :, y_offset : y_offset + size, x_offset : x_offset + size] cropped_boxes = crop_boxes(boxes, x_offset, y_offset) if boxes is not None else None if ndim == 3: cropped = cropped.squeeze(0) return cropped, cropped_boxes class SpatialCrop(nn.Module): """ Convert the video into 3 smaller clips spatially. Must be used after the temporal crops to get spatial crops, and should be used with -2 in the spatial crop at the slowfast augmentation stage (so full frames are passed in here). Will return a larger list with the 3x spatial crops as well. """ def __init__(self, crop_size: int = 224, num_crops: int = 3): super().__init__() self.crop_size = crop_size if num_crops == 3: self.crops_to_ext = [0, 1, 2] self.flipped_crops_to_ext = [] elif num_crops == 1: self.crops_to_ext = [1] self.flipped_crops_to_ext = [] else: raise NotImplementedError("Nothing else supported yet") def forward(self, videos): """ Args: videos: A list of C, T, H, W videos. Returns: videos: A list with 3x the number of elements. Each video converted to C, T, H', W' by spatial cropping. """ assert isinstance(videos, list), "Must be a list of videos after temporal crops" assert all([video.ndim == 4 for video in videos]), "Must be (C,T,H,W)" res = [] for video in videos: for spatial_idx in self.crops_to_ext: res.append(uniform_crop(video, self.crop_size, spatial_idx)[0]) if not self.flipped_crops_to_ext: continue flipped_video = transforms.functional.hflip(video) for spatial_idx in self.flipped_crops_to_ext: res.append(uniform_crop(flipped_video, self.crop_size, spatial_idx)[0]) return res def load_and_transform_video_data( video_paths, device, clip_duration=2, clips_per_video=5, sample_rate=16000, ): if video_paths is None: return None video_outputs = [] video_transform = transforms.Compose( [ pv_transforms.ShortSideScale(224), NormalizeVideo( mean=(0.48145466, 0.4578275, 0.40821073), std=(0.26862954, 0.26130258, 0.27577711), ), ] ) clip_sampler = ConstantClipsPerVideoSampler( clip_duration=clip_duration, clips_per_video=clips_per_video ) frame_sampler = pv_transforms.UniformTemporalSubsample(num_samples=clip_duration) for video_path in video_paths: video = EncodedVideo.from_path( video_path, decoder="decord", decode_audio=False, **{"sample_rate": sample_rate}, ) all_clips_timepoints = get_clip_timepoints(clip_sampler, video.duration) all_video = [] for clip_timepoints in all_clips_timepoints: # Read the clip, get frames clip = video.get_clip(clip_timepoints[0], clip_timepoints[1]) if clip is None: raise ValueError("No clip found") video_clip = frame_sampler(clip["video"]) video_clip = video_clip / 255.0 # since this is float, need 0-1 all_video.append(video_clip) all_video = [video_transform(clip) for clip in all_video] all_video = SpatialCrop(224, num_crops=3)(all_video) all_video = torch.stack(all_video, dim=0) video_outputs.append(all_video) return torch.stack(video_outputs, dim=0).to(device)
MovieChat-main
MovieChat/models/process_video_data.py
""" Adapted from salesforce@LAVIS. Below is the original copyright: * Copyright (c) 2023, salesforce.com, inc. * All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause * By Junnan Li * Based on huggingface code base * https://github.com/huggingface/transformers/blob/v4.15.0/src/transformers/models/bert """ import math import os import warnings from dataclasses import dataclass from typing import Optional, Tuple, Dict, Any import torch from torch import Tensor, device, dtype, nn import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss import torch.nn.functional as F from transformers.activations import ACT2FN from transformers.file_utils import ( ModelOutput, ) from transformers.modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from transformers.modeling_utils import ( PreTrainedModel, apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer, ) from transformers.utils import logging from transformers.models.bert.configuration_bert import BertConfig logger = logging.get_logger(__name__) class BertEmbeddings(nn.Module): """Construct the embeddings from word and position embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding( config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id ) self.position_embeddings = nn.Embedding( config.max_position_embeddings, config.hidden_size ) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)) ) self.position_embedding_type = getattr( config, "position_embedding_type", "absolute" ) self.config = config def forward( self, input_ids=None, position_ids=None, query_embeds=None, past_key_values_length=0, ): if input_ids is not None: seq_length = input_ids.size()[1] else: seq_length = 0 if position_ids is None: position_ids = self.position_ids[ :, past_key_values_length : seq_length + past_key_values_length ].clone() if input_ids is not None: embeddings = self.word_embeddings(input_ids) if self.position_embedding_type == "absolute": position_embeddings = self.position_embeddings(position_ids) embeddings = embeddings + position_embeddings if query_embeds is not None: embeddings = torch.cat((query_embeds, embeddings), dim=1) else: embeddings = query_embeds embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class BertSelfAttention(nn.Module): def __init__(self, config, is_cross_attention): super().__init__() self.config = config if config.hidden_size % config.num_attention_heads != 0 and not hasattr( config, "embedding_size" ): raise ValueError( "The hidden size (%d) is not a multiple of the number of attention " "heads (%d)" % (config.hidden_size, config.num_attention_heads) ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) if is_cross_attention: self.key = nn.Linear(config.encoder_width, self.all_head_size) self.value = nn.Linear(config.encoder_width, self.all_head_size) else: self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = getattr( config, "position_embedding_type", "absolute" ) if ( self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query" ): self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding( 2 * config.max_position_embeddings - 1, self.attention_head_size ) self.save_attention = False def save_attn_gradients(self, attn_gradients): self.attn_gradients = attn_gradients def get_attn_gradients(self): return self.attn_gradients def save_attention_map(self, attention_map): self.attention_map = attention_map def get_attention_map(self): return self.attention_map def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + ( self.num_attention_heads, self.attention_head_size, ) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, ): # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) mixed_query_layer = self.query(hidden_states) query_layer = self.transpose_for_scores(mixed_query_layer) past_key_value = (key_layer, value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if ( self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query" ): seq_length = hidden_states.size()[1] position_ids_l = torch.arange( seq_length, dtype=torch.long, device=hidden_states.device ).view(-1, 1) position_ids_r = torch.arange( seq_length, dtype=torch.long, device=hidden_states.device ).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding( distance + self.max_position_embeddings - 1 ) positional_embedding = positional_embedding.to( dtype=query_layer.dtype ) # fp16 compatibility if self.position_embedding_type == "relative_key": relative_position_scores = torch.einsum( "bhld,lrd->bhlr", query_layer, positional_embedding ) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == "relative_key_query": relative_position_scores_query = torch.einsum( "bhld,lrd->bhlr", query_layer, positional_embedding ) relative_position_scores_key = torch.einsum( "bhrd,lrd->bhlr", key_layer, positional_embedding ) attention_scores = ( attention_scores + relative_position_scores_query + relative_position_scores_key ) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in BertModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.Softmax(dim=-1)(attention_scores) if is_cross_attention and self.save_attention: self.save_attention_map(attention_probs) attention_probs.register_hook(self.save_attn_gradients) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs_dropped = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs_dropped = attention_probs_dropped * head_mask context_layer = torch.matmul(attention_probs_dropped, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = ( (context_layer, attention_probs) if output_attentions else (context_layer,) ) outputs = outputs + (past_key_value,) return outputs class BertSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertAttention(nn.Module): def __init__(self, config, is_cross_attention=False): super().__init__() self.self = BertSelfAttention(config, is_cross_attention) self.output = BertSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads, ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = ( self.self.attention_head_size * self.self.num_attention_heads ) self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, ): self_outputs = self.self( hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[ 1: ] # add attentions if we output them return outputs class BertIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class BertOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertLayer(nn.Module): def __init__(self, config, layer_num): super().__init__() self.config = config self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = BertAttention(config) self.layer_num = layer_num if ( self.config.add_cross_attention and layer_num % self.config.cross_attention_freq == 0 ): self.crossattention = BertAttention( config, is_cross_attention=self.config.add_cross_attention ) self.has_cross_attention = True else: self.has_cross_attention = False self.intermediate = BertIntermediate(config) self.output = BertOutput(config) self.intermediate_query = BertIntermediate(config) self.output_query = BertOutput(config) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, query_length=0, ): # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = ( past_key_value[:2] if past_key_value is not None else None ) self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value, ) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] if query_length > 0: query_attention_output = attention_output[:, :query_length, :] if self.has_cross_attention: assert ( encoder_hidden_states is not None ), "encoder_hidden_states must be given for cross-attention layers" cross_attention_outputs = self.crossattention( query_attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, output_attentions=output_attentions, ) query_attention_output = cross_attention_outputs[0] outputs = ( outputs + cross_attention_outputs[1:-1] ) # add cross attentions if we output attention weights layer_output = apply_chunking_to_forward( self.feed_forward_chunk_query, self.chunk_size_feed_forward, self.seq_len_dim, query_attention_output, ) if attention_output.shape[1] > query_length: layer_output_text = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output[:, query_length:, :], ) layer_output = torch.cat([layer_output, layer_output_text], dim=1) else: layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output, ) outputs = (layer_output,) + outputs outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output def feed_forward_chunk_query(self, attention_output): intermediate_output = self.intermediate_query(attention_output) layer_output = self.output_query(intermediate_output, attention_output) return layer_output class BertEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList( [BertLayer(config, i) for i in range(config.num_hidden_layers)] ) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=False, output_hidden_states=False, return_dict=True, query_length=0, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = ( () if output_attentions and self.config.add_cross_attention else None ) next_decoder_cache = () if use_cache else None for i in range(self.config.num_hidden_layers): layer_module = self.layer[i] if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if getattr(self.config, "gradient_checkpointing", False) and self.training: if use_cache: logger.warn( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False def create_custom_forward(module): def custom_forward(*inputs): return module( *inputs, past_key_value, output_attentions, query_length ) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(layer_module), hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, ) else: layer_outputs = layer_module( hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, query_length, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) class BertPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class BertPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class BertLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = BertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states class BertOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = BertLMPredictionHead(config) def forward(self, sequence_output): prediction_scores = self.predictions(sequence_output) return prediction_scores class BertPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = BertConfig base_model_prefix = "bert" _keys_to_ignore_on_load_missing = [r"position_ids"] def _init_weights(self, module): """Initialize the weights""" if isinstance(module, (nn.Linear, nn.Embedding)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() class BertModel(BertPreTrainedModel): """ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in `Attention is all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an input to the forward pass. """ def __init__(self, config, add_pooling_layer=False): super().__init__(config) self.config = config self.embeddings = BertEmbeddings(config) self.encoder = BertEncoder(config) self.pooler = BertPooler(config) if add_pooling_layer else None self.init_weights() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def get_extended_attention_mask( self, attention_mask: Tensor, input_shape: Tuple[int], device: device, is_decoder: bool, has_query: bool = False, ) -> Tensor: """ Makes broadcastable attention and causal masks so that future and masked tokens are ignored. Arguments: attention_mask (:obj:`torch.Tensor`): Mask with ones indicating tokens to attend to, zeros for tokens to ignore. input_shape (:obj:`Tuple[int]`): The shape of the input to the model. device: (:obj:`torch.device`): The device of the input to the model. Returns: :obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`. """ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. if attention_mask.dim() == 3: extended_attention_mask = attention_mask[:, None, :, :] elif attention_mask.dim() == 2: # Provided a padding mask of dimensions [batch_size, seq_length] # - if the model is a decoder, apply a causal mask in addition to the padding mask # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length] if is_decoder: batch_size, seq_length = input_shape seq_ids = torch.arange(seq_length, device=device) causal_mask = ( seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None] ) # add a prefix ones mask to the causal mask # causal and attention masks must have same type with pytorch version < 1.3 causal_mask = causal_mask.to(attention_mask.dtype) if causal_mask.shape[1] < attention_mask.shape[1]: prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1] if has_query: # UniLM style attention mask causal_mask = torch.cat( [ torch.zeros( (batch_size, prefix_seq_len, seq_length), device=device, dtype=causal_mask.dtype, ), causal_mask, ], axis=1, ) causal_mask = torch.cat( [ torch.ones( (batch_size, causal_mask.shape[1], prefix_seq_len), device=device, dtype=causal_mask.dtype, ), causal_mask, ], axis=-1, ) extended_attention_mask = ( causal_mask[:, None, :, :] * attention_mask[:, None, None, :] ) else: extended_attention_mask = attention_mask[:, None, None, :] else: raise ValueError( "Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format( input_shape, attention_mask.shape ) ) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = extended_attention_mask.to( dtype=self.dtype ) # fp16 compatibility extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 return extended_attention_mask def forward( self, input_ids=None, attention_mask=None, position_ids=None, head_mask=None, query_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, is_decoder=False, ): r""" encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. use_cache (:obj:`bool`, `optional`): If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up decoding (see :obj:`past_key_values`). """ output_attentions = ( output_attentions if output_attentions is not None else self.config.output_attentions ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = ( return_dict if return_dict is not None else self.config.use_return_dict ) # use_cache = use_cache if use_cache is not None else self.config.use_cache if input_ids is None: assert ( query_embeds is not None ), "You have to specify query_embeds when input_ids is None" # past_key_values_length past_key_values_length = ( past_key_values[0][0].shape[2] - self.config.query_length if past_key_values is not None else 0 ) query_length = query_embeds.shape[1] if query_embeds is not None else 0 embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, query_embeds=query_embeds, past_key_values_length=past_key_values_length, ) input_shape = embedding_output.size()[:-1] batch_size, seq_length = input_shape device = embedding_output.device if attention_mask is None: attention_mask = torch.ones( ((batch_size, seq_length + past_key_values_length)), device=device ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. if is_decoder: extended_attention_mask = self.get_extended_attention_mask( attention_mask, input_ids.shape, device, is_decoder, has_query=(query_embeds is not None), ) else: extended_attention_mask = self.get_extended_attention_mask( attention_mask, input_shape, device, is_decoder ) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if encoder_hidden_states is not None: if type(encoder_hidden_states) == list: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states[ 0 ].size() else: ( encoder_batch_size, encoder_sequence_length, _, ) = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if type(encoder_attention_mask) == list: encoder_extended_attention_mask = [ self.invert_attention_mask(mask) for mask in encoder_attention_mask ] elif encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask( encoder_attention_mask ) else: encoder_extended_attention_mask = self.invert_attention_mask( encoder_attention_mask ) else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, query_length=query_length, ) sequence_output = encoder_outputs[0] pooled_output = ( self.pooler(sequence_output) if self.pooler is not None else None ) if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) class BertLMHeadModel(BertPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r"pooler"] _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"] def __init__(self, config): super().__init__(config) self.bert = BertModel(config, add_pooling_layer=False) self.cls = BertOnlyMLMHead(config) self.init_weights() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings def forward( self, input_ids=None, attention_mask=None, position_ids=None, head_mask=None, query_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, labels=None, past_key_values=None, use_cache=True, output_attentions=None, output_hidden_states=None, return_dict=None, return_logits=False, is_decoder=True, reduction="mean", ): r""" encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels n ``[0, ..., config.vocab_size]`` past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. use_cache (:obj:`bool`, `optional`): If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up decoding (see :obj:`past_key_values`). Returns: Example:: >>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig >>> import torch >>> tokenizer = BertTokenizer.from_pretrained('bert-base-cased') >>> config = BertConfig.from_pretrained("bert-base-cased") >>> model = BertLMHeadModel.from_pretrained('bert-base-cased', config=config) >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> prediction_logits = outputs.logits """ return_dict = ( return_dict if return_dict is not None else self.config.use_return_dict ) if labels is not None: use_cache = False if past_key_values is not None: query_embeds = None outputs = self.bert( input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, query_embeds=query_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, is_decoder=is_decoder, ) sequence_output = outputs[0] if query_embeds is not None: sequence_output = outputs[0][:, query_embeds.shape[1] :, :] prediction_scores = self.cls(sequence_output) if return_logits: return prediction_scores[:, :-1, :].contiguous() lm_loss = None if labels is not None: # we are doing next-token prediction; shift prediction scores and input ids by one shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() labels = labels[:, 1:].contiguous() loss_fct = CrossEntropyLoss(reduction=reduction, label_smoothing=0.1) lm_loss = loss_fct( shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1), ) if reduction == "none": lm_loss = lm_loss.view(prediction_scores.size(0), -1).sum(1) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((lm_loss,) + output) if lm_loss is not None else output return CausalLMOutputWithCrossAttentions( loss=lm_loss, logits=prediction_scores, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) def prepare_inputs_for_generation( self, input_ids, query_embeds, past=None, attention_mask=None, **model_kwargs ): # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly if attention_mask is None: attention_mask = input_ids.new_ones(input_ids.shape) query_mask = input_ids.new_ones(query_embeds.shape[:-1]) attention_mask = torch.cat([query_mask, attention_mask], dim=-1) # cut decoder_input_ids if past is used if past is not None: input_ids = input_ids[:, -1:] return { "input_ids": input_ids, "query_embeds": query_embeds, "attention_mask": attention_mask, "past_key_values": past, "encoder_hidden_states": model_kwargs.get("encoder_hidden_states", None), "encoder_attention_mask": model_kwargs.get("encoder_attention_mask", None), "is_decoder": True, } def _reorder_cache(self, past, beam_idx): reordered_past = () for layer_past in past: reordered_past += ( tuple( past_state.index_select(0, beam_idx) for past_state in layer_past ), ) return reordered_past class BertForMaskedLM(BertPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r"pooler"] _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"] def __init__(self, config): super().__init__(config) self.bert = BertModel(config, add_pooling_layer=False) self.cls = BertOnlyMLMHead(config) self.init_weights() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings def forward( self, input_ids=None, attention_mask=None, position_ids=None, head_mask=None, query_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, return_logits=False, is_decoder=False, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]`` """ return_dict = ( return_dict if return_dict is not None else self.config.use_return_dict ) outputs = self.bert( input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, query_embeds=query_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, is_decoder=is_decoder, ) if query_embeds is not None: sequence_output = outputs[0][:, query_embeds.shape[1] :, :] prediction_scores = self.cls(sequence_output) if return_logits: return prediction_scores masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() # -100 index = padding token masked_lm_loss = loss_fct( prediction_scores.view(-1, self.config.vocab_size), labels.view(-1) ) if not return_dict: output = (prediction_scores,) + outputs[2:] return ( ((masked_lm_loss,) + output) if masked_lm_loss is not None else output ) return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
MovieChat-main
MovieChat/models/Qformer.py
#!/usr/bin/env python3 # Portions Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import einops import numpy as np import torch import torch.nn as nn class Normalize(nn.Module): def __init__(self, dim: int) -> None: super().__init__() self.dim = dim def forward(self, x): return torch.nn.functional.normalize(x, dim=self.dim, p=2) class LearnableLogitScaling(nn.Module): def __init__( self, logit_scale_init: float = 1 / 0.07, learnable: bool = True, max_logit_scale: float = 100, ) -> None: super().__init__() self.max_logit_scale = max_logit_scale self.logit_scale_init = logit_scale_init self.learnable = learnable log_logit_scale = torch.ones([]) * np.log(self.logit_scale_init) if learnable: self.log_logit_scale = nn.Parameter(log_logit_scale) else: self.register_buffer("log_logit_scale", log_logit_scale) def forward(self, x): return torch.clip(self.log_logit_scale.exp(), max=self.max_logit_scale) * x def extra_repr(self): st = f"logit_scale_init={self.logit_scale_init},learnable={self.learnable}," \ f" max_logit_scale={self.max_logit_scale}" return st class EinOpsRearrange(nn.Module): def __init__(self, rearrange_expr: str, **kwargs) -> None: super().__init__() self.rearrange_expr = rearrange_expr self.kwargs = kwargs def forward(self, x): assert isinstance(x, torch.Tensor) return einops.rearrange(x, self.rearrange_expr, **self.kwargs) class VerboseNNModule(nn.Module): """ Wrapper around nn.Module that prints registered buffers and parameter names. """ @staticmethod def get_readable_tensor_repr(name: str, tensor: torch.Tensor) -> str: st = ( "(" + name + "): " + "tensor(" + str(tuple(tensor[1].shape)) + ", requires_grad=" + str(tensor[1].requires_grad) + ")\n" ) return st def extra_repr(self) -> str: named_modules = set() for p in self.named_modules(): named_modules.update([p[0]]) named_modules = list(named_modules) string_repr = "" for p in self.named_parameters(): name = p[0].split(".")[0] if name not in named_modules: string_repr += self.get_readable_tensor_repr(name, p) for p in self.named_buffers(): name = p[0].split(".")[0] string_repr += self.get_readable_tensor_repr(name, p) return string_repr def cast_if_src_dtype( tensor: torch.Tensor, src_dtype: torch.dtype, tgt_dtype: torch.dtype ): updated = False if tensor.dtype == src_dtype: tensor = tensor.to(dtype=tgt_dtype) updated = True return tensor, updated class QuickGELU(nn.Module): # From https://github.com/openai/CLIP/blob/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1/clip/model.py#L166 def forward(self, x: torch.Tensor): return x * torch.sigmoid(1.702 * x) class SelectElement(nn.Module): def __init__(self, index) -> None: super().__init__() self.index = index def forward(self, x): assert x.ndim >= 3 return x[:, self.index, ...] class SelectEOSAndProject(nn.Module): """ Text Pooling used in OpenCLIP """ def __init__(self, proj: nn.Module) -> None: super().__init__() self.proj = proj def forward(self, x, seq_len): assert x.ndim == 3 # x is of shape B x L x D # take features from the eot embedding (eot_token is the highest number in each sequence) x = x[torch.arange(x.shape[0]), seq_len] x = self.proj(x) return x
MovieChat-main
MovieChat/models/helpers.py
""" Adapted from salesforce@LAVIS. Below is the original copyright: Copyright (c) 2023, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import contextlib import logging import os import time import datetime import torch import torch.nn as nn import torch.distributed as dist import torch.nn.functional as F import MovieChat.common.dist_utils as dist_utils from MovieChat.common.dist_utils import download_cached_file from MovieChat.common.utils import is_url from MovieChat.common.logger import MetricLogger from MovieChat.models.base_model import BaseModel from MovieChat.models.Qformer import BertConfig, BertLMHeadModel from MovieChat.models.eva_vit import create_eva_vit_g from MovieChat.models.eva_vit_with_tome import create_eva_vit_g_with_tome from transformers import BertTokenizer class Blip2Base(BaseModel): @classmethod def init_tokenizer(cls): tokenizer = BertTokenizer.from_pretrained("bert-base-uncased") tokenizer.add_special_tokens({"bos_token": "[DEC]"}) return tokenizer def maybe_autocast(self, dtype=torch.float16): # if on cpu, don't use autocast # if on gpu, use autocast with dtype if provided, otherwise use torch.float16 enable_autocast = self.device != torch.device("cpu") if enable_autocast: return torch.cuda.amp.autocast(dtype=dtype) else: return contextlib.nullcontext() @classmethod def init_Qformer(cls, num_query_token, vision_width, cross_attention_freq=2): encoder_config = BertConfig.from_pretrained("bert-base-uncased") encoder_config.encoder_width = vision_width # insert cross-attention layer every other block encoder_config.add_cross_attention = True encoder_config.cross_attention_freq = cross_attention_freq encoder_config.query_length = num_query_token Qformer = BertLMHeadModel(config=encoder_config) query_tokens = nn.Parameter( torch.zeros(1, num_query_token, encoder_config.hidden_size) ) query_tokens.data.normal_(mean=0.0, std=encoder_config.initializer_range) return Qformer, query_tokens @classmethod def init_vision_encoder( cls, model_name, img_size, drop_path_rate, use_grad_checkpoint, precision ): assert model_name == "eva_clip_g", "vit model must be eva_clip_g for current version of MiniGPT-4" visual_encoder = create_eva_vit_g( img_size, drop_path_rate, use_grad_checkpoint, precision ) ln_vision = LayerNorm(visual_encoder.num_features) return visual_encoder, ln_vision def load_from_pretrained(self, url_or_filename): if is_url(url_or_filename): cached_file = download_cached_file( url_or_filename, check_hash=False, progress=True ) checkpoint = torch.load(cached_file, map_location="cpu") elif os.path.isfile(url_or_filename): checkpoint = torch.load(url_or_filename, map_location="cpu") else: raise RuntimeError("checkpoint url or path is invalid") state_dict = checkpoint["model"] msg = self.load_state_dict(state_dict, strict=False) logging.info("load checkpoint from %s" % url_or_filename) return msg def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self class LayerNorm(nn.LayerNorm): """Subclass torch's LayerNorm to handle fp16.""" def forward(self, x: torch.Tensor): orig_type = x.dtype ret = super().forward(x.type(torch.float32)) return ret.type(orig_type) def compute_sim_matrix(model, data_loader, **kwargs): k_test = kwargs.pop("k_test") metric_logger = MetricLogger(delimiter=" ") header = "Evaluation:" logging.info("Computing features for evaluation...") start_time = time.time() texts = data_loader.dataset.text num_text = len(texts) text_bs = 256 text_ids = [] text_embeds = [] text_atts = [] for i in range(0, num_text, text_bs): text = texts[i : min(num_text, i + text_bs)] text_input = model.tokenizer( text, padding="max_length", truncation=True, max_length=35, return_tensors="pt", ).to(model.device) text_feat = model.forward_text(text_input) text_embed = F.normalize(model.text_proj(text_feat)) text_embeds.append(text_embed) text_ids.append(text_input.input_ids) text_atts.append(text_input.attention_mask) text_embeds = torch.cat(text_embeds, dim=0) text_ids = torch.cat(text_ids, dim=0) text_atts = torch.cat(text_atts, dim=0) vit_feats = [] image_embeds = [] for samples in data_loader: image = samples["image"] image = image.to(model.device) image_feat, vit_feat = model.forward_image(image) image_embed = model.vision_proj(image_feat) image_embed = F.normalize(image_embed, dim=-1) vit_feats.append(vit_feat.cpu()) image_embeds.append(image_embed) vit_feats = torch.cat(vit_feats, dim=0) image_embeds = torch.cat(image_embeds, dim=0) sims_matrix = [] for image_embed in image_embeds: sim_q2t = image_embed @ text_embeds.t() sim_i2t, _ = sim_q2t.max(0) sims_matrix.append(sim_i2t) sims_matrix = torch.stack(sims_matrix, dim=0) score_matrix_i2t = torch.full( (len(data_loader.dataset.image), len(texts)), -100.0 ).to(model.device) num_tasks = dist_utils.get_world_size() rank = dist_utils.get_rank() step = sims_matrix.size(0) // num_tasks + 1 start = rank * step end = min(sims_matrix.size(0), start + step) for i, sims in enumerate( metric_logger.log_every(sims_matrix[start:end], 50, header) ): topk_sim, topk_idx = sims.topk(k=k_test, dim=0) image_inputs = vit_feats[start + i].repeat(k_test, 1, 1).to(model.device) score = model.compute_itm( image_inputs=image_inputs, text_ids=text_ids[topk_idx], text_atts=text_atts[topk_idx], ).float() score_matrix_i2t[start + i, topk_idx] = score + topk_sim sims_matrix = sims_matrix.t() score_matrix_t2i = torch.full( (len(texts), len(data_loader.dataset.image)), -100.0 ).to(model.device) step = sims_matrix.size(0) // num_tasks + 1 start = rank * step end = min(sims_matrix.size(0), start + step) for i, sims in enumerate( metric_logger.log_every(sims_matrix[start:end], 50, header) ): topk_sim, topk_idx = sims.topk(k=k_test, dim=0) image_inputs = vit_feats[topk_idx.cpu()].to(model.device) score = model.compute_itm( image_inputs=image_inputs, text_ids=text_ids[start + i].repeat(k_test, 1), text_atts=text_atts[start + i].repeat(k_test, 1), ).float() score_matrix_t2i[start + i, topk_idx] = score + topk_sim if dist_utils.is_dist_avail_and_initialized(): dist.barrier() torch.distributed.all_reduce( score_matrix_i2t, op=torch.distributed.ReduceOp.SUM ) torch.distributed.all_reduce( score_matrix_t2i, op=torch.distributed.ReduceOp.SUM ) total_time = time.time() - start_time total_time_str = str(datetime.timedelta(seconds=int(total_time))) logging.info("Evaluation time {}".format(total_time_str)) return score_matrix_i2t.cpu().numpy(), score_matrix_t2i.cpu().numpy()
MovieChat-main
MovieChat/models/blip2.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import warnings import torch def _is_tensor_video_clip(clip): if not torch.is_tensor(clip): raise TypeError("clip should be Tensor. Got %s" % type(clip)) if not clip.ndimension() == 4: raise ValueError("clip should be 4D. Got %dD" % clip.dim()) return True def crop(clip, i, j, h, w): """ Args: clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W) """ if len(clip.size()) != 4: raise ValueError("clip should be a 4D tensor") return clip[..., i : i + h, j : j + w] def resize(clip, target_size, interpolation_mode): if len(target_size) != 2: raise ValueError( f"target size should be tuple (height, width), instead got {target_size}" ) return torch.nn.functional.interpolate( clip, size=target_size, mode=interpolation_mode, align_corners=False ) def resized_crop(clip, i, j, h, w, size, interpolation_mode="bilinear"): """ Do spatial cropping and resizing to the video clip Args: clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W) i (int): i in (i,j) i.e coordinates of the upper left corner. j (int): j in (i,j) i.e coordinates of the upper left corner. h (int): Height of the cropped region. w (int): Width of the cropped region. size (tuple(int, int)): height and width of resized clip Returns: clip (torch.tensor): Resized and cropped clip. Size is (C, T, H, W) """ if not _is_tensor_video_clip(clip): raise ValueError("clip should be a 4D torch.tensor") clip = crop(clip, i, j, h, w) clip = resize(clip, size, interpolation_mode) return clip def center_crop(clip, crop_size): if not _is_tensor_video_clip(clip): raise ValueError("clip should be a 4D torch.tensor") h, w = clip.size(-2), clip.size(-1) th, tw = crop_size if h < th or w < tw: raise ValueError("height and width must be no smaller than crop_size") i = int(round((h - th) / 2.0)) j = int(round((w - tw) / 2.0)) return crop(clip, i, j, th, tw) def to_tensor(clip): """ Convert tensor data type from uint8 to float, divide value by 255.0 and permute the dimensions of clip tensor Args: clip (torch.tensor, dtype=torch.uint8): Size is (T, H, W, C) Return: clip (torch.tensor, dtype=torch.float): Size is (C, T, H, W) """ _is_tensor_video_clip(clip) if not clip.dtype == torch.uint8: raise TypeError( "clip tensor should have data type uint8. Got %s" % str(clip.dtype) ) return clip.float().permute(3, 0, 1, 2) / 255.0 def normalize(clip, mean, std, inplace=False): """ Args: clip (torch.tensor): Video clip to be normalized. Size is (C, T, H, W) mean (tuple): pixel RGB mean. Size is (3) std (tuple): pixel standard deviation. Size is (3) Returns: normalized clip (torch.tensor): Size is (C, T, H, W) """ if not _is_tensor_video_clip(clip): raise ValueError("clip should be a 4D torch.tensor") if not inplace: clip = clip.clone() mean = torch.as_tensor(mean, dtype=clip.dtype, device=clip.device) std = torch.as_tensor(std, dtype=clip.dtype, device=clip.device) clip.sub_(mean[:, None, None, None]).div_(std[:, None, None, None]) return clip def hflip(clip): """ Args: clip (torch.tensor): Video clip to be normalized. Size is (C, T, H, W) Returns: flipped clip (torch.tensor): Size is (C, T, H, W) """ if not _is_tensor_video_clip(clip): raise ValueError("clip should be a 4D torch.tensor") return clip.flip(-1)
MovieChat-main
MovieChat/processors/functional_video.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import re from MovieChat.common.registry import registry from MovieChat.processors.base_processor import BaseProcessor from MovieChat.processors.randaugment import RandomAugment from omegaconf import OmegaConf from torchvision import transforms from torchvision.transforms.functional import InterpolationMode class BlipImageBaseProcessor(BaseProcessor): def __init__(self, mean=None, std=None): if mean is None: mean = (0.48145466, 0.4578275, 0.40821073) if std is None: std = (0.26862954, 0.26130258, 0.27577711) self.normalize = transforms.Normalize(mean, std) @registry.register_processor("blip_caption") class BlipCaptionProcessor(BaseProcessor): def __init__(self, prompt="", max_words=50): self.prompt = prompt self.max_words = max_words def __call__(self, caption): caption = self.prompt + self.pre_caption(caption) return caption @classmethod def from_config(cls, cfg=None): if cfg is None: cfg = OmegaConf.create() prompt = cfg.get("prompt", "") max_words = cfg.get("max_words", 50) return cls(prompt=prompt, max_words=max_words) def pre_caption(self, caption): caption = re.sub( r"([.!\"()*#:;~])", " ", caption.lower(), ) caption = re.sub( r"\s{2,}", " ", caption, ) caption = caption.rstrip("\n") caption = caption.strip(" ") # truncate caption caption_words = caption.split(" ") if len(caption_words) > self.max_words: caption = " ".join(caption_words[: self.max_words]) return caption @registry.register_processor("blip2_image_train") class Blip2ImageTrainProcessor(BlipImageBaseProcessor): def __init__(self, image_size=224, mean=None, std=None, min_scale=0.5, max_scale=1.0): super().__init__(mean=mean, std=std) self.transform = transforms.Compose( [ transforms.RandomResizedCrop( image_size, scale=(min_scale, max_scale), interpolation=InterpolationMode.BICUBIC, ), transforms.ToTensor(), self.normalize, ] ) def __call__(self, item): return self.transform(item) @classmethod def from_config(cls, cfg=None): if cfg is None: cfg = OmegaConf.create() image_size = cfg.get("image_size", 224) mean = cfg.get("mean", None) std = cfg.get("std", None) min_scale = cfg.get("min_scale", 0.5) max_scale = cfg.get("max_scale", 1.0) return cls( image_size=image_size, mean=mean, std=std, min_scale=min_scale, max_scale=max_scale, ) @registry.register_processor("blip2_image_eval") class Blip2ImageEvalProcessor(BlipImageBaseProcessor): def __init__(self, image_size=224, mean=None, std=None): super().__init__(mean=mean, std=std) self.transform = transforms.Compose( [ transforms.Resize( (image_size, image_size), interpolation=InterpolationMode.BICUBIC ), transforms.ToTensor(), self.normalize, ] ) def __call__(self, item): return self.transform(item) @classmethod def from_config(cls, cfg=None): if cfg is None: cfg = OmegaConf.create() image_size = cfg.get("image_size", 224) mean = cfg.get("mean", None) std = cfg.get("std", None) return cls(image_size=image_size, mean=mean, std=std)
MovieChat-main
MovieChat/processors/blip_processors.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import torch from MovieChat.common.registry import registry from decord import VideoReader import decord import numpy as np from MovieChat.processors import transforms_video from MovieChat.processors.base_processor import BaseProcessor from MovieChat.processors.randaugment import VideoRandomAugment from MovieChat.processors import functional_video as F from omegaconf import OmegaConf from torchvision import transforms import random as rnd MAX_INT = registry.get("MAX_INT") decord.bridge.set_bridge("torch") def load_video(video_path, n_frms=MAX_INT, height=-1, width=-1, sampling="uniform", return_msg = False): decord.bridge.set_bridge("torch") vr = VideoReader(uri=video_path, height=height, width=width) vlen = len(vr) start, end = 0, vlen n_frms = min(n_frms, vlen) if sampling == "uniform": indices = np.arange(start, end, vlen / n_frms).astype(int).tolist() elif sampling == "headtail": indices_h = sorted(rnd.sample(range(vlen // 2), n_frms // 2)) indices_t = sorted(rnd.sample(range(vlen // 2, vlen), n_frms // 2)) indices = indices_h + indices_t else: raise NotImplementedError # get_batch -> T, H, W, C temp_frms = vr.get_batch(indices) tensor_frms = torch.from_numpy(temp_frms) if type(temp_frms) is not torch.Tensor else temp_frms frms = tensor_frms.permute(3, 0, 1, 2).float() # (C, T, H, W) if not return_msg: return frms fps = float(vr.get_avg_fps()) sec = ", ".join([str(round(f / fps, 1)) for f in indices]) # " " should be added in the start and end msg = f"The video contains {len(indices)} frames sampled at {sec} seconds. " return frms, msg class AlproVideoBaseProcessor(BaseProcessor): def __init__(self, mean=None, std=None, n_frms=MAX_INT): if mean is None: mean = (0.48145466, 0.4578275, 0.40821073) if std is None: std = (0.26862954, 0.26130258, 0.27577711) self.normalize = transforms_video.NormalizeVideo(mean, std) self.n_frms = n_frms class ToUint8(object): def __init__(self): pass def __call__(self, tensor): return tensor.to(torch.uint8) def __repr__(self): return self.__class__.__name__ class ToTHWC(object): """ Args: clip (torch.tensor, dtype=torch.uint8): Size is (C, T, H, W) Return: clip (torch.tensor, dtype=torch.float): Size is (T, H, W, C) """ def __init__(self): pass def __call__(self, tensor): return tensor.permute(1, 2, 3, 0) def __repr__(self): return self.__class__.__name__ class ResizeVideo(object): def __init__(self, target_size, interpolation_mode="bilinear"): self.target_size = target_size self.interpolation_mode = interpolation_mode def __call__(self, clip): """ Args: clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W) Returns: torch.tensor: central cropping of video clip. Size is (C, T, crop_size, crop_size) """ return F.resize(clip, self.target_size, self.interpolation_mode) def __repr__(self): return self.__class__.__name__ + "(resize_size={0})".format(self.target_size) @registry.register_processor("alpro_video_train") class AlproVideoTrainProcessor(AlproVideoBaseProcessor): def __init__( self, image_size=384, mean=None, std=None, min_scale=0.5, max_scale=1.0, n_frms=MAX_INT, ): super().__init__(mean=mean, std=std, n_frms=n_frms) self.image_size = image_size self.transform = transforms.Compose( [ # Video size is (C, T, H, W) transforms_video.RandomResizedCropVideo( image_size, scale=(min_scale, max_scale), interpolation_mode="bicubic", ), ToTHWC(), # C, T, H, W -> T, H, W, C ToUint8(), transforms_video.ToTensorVideo(), # T, H, W, C -> C, T, H, W self.normalize, ] ) def __call__(self, vpath): """ Args: clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W) Returns: torch.tensor: video clip after transforms. Size is (C, T, size, size). """ clip = load_video( video_path=vpath, n_frms=self.n_frms, height=self.image_size, width=self.image_size, sampling="headtail", ) return self.transform(clip) @classmethod def from_config(cls, cfg=None): if cfg is None: cfg = OmegaConf.create() image_size = cfg.get("image_size", 256) mean = cfg.get("mean", None) std = cfg.get("std", None) min_scale = cfg.get("min_scale", 0.5) max_scale = cfg.get("max_scale", 1.0) n_frms = cfg.get("n_frms", MAX_INT) return cls( image_size=image_size, mean=mean, std=std, min_scale=min_scale, max_scale=max_scale, n_frms=n_frms, ) @registry.register_processor("alpro_video_eval") class AlproVideoEvalProcessor(AlproVideoBaseProcessor): def __init__(self, image_size=256, mean=None, std=None, n_frms=MAX_INT): super().__init__(mean=mean, std=std, n_frms=n_frms) self.image_size = image_size # Input video size is (C, T, H, W) self.transform = transforms.Compose( [ # frames will be resized during decord loading. ToUint8(), # C, T, H, W ToTHWC(), # T, H, W, C transforms_video.ToTensorVideo(), # C, T, H, W self.normalize, # C, T, H, W ] ) def __call__(self, vpath): """ Args: clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W) Returns: torch.tensor: video clip after transforms. Size is (C, T, size, size). """ clip = load_video( video_path=vpath, n_frms=self.n_frms, height=self.image_size, width=self.image_size, ) return self.transform(clip) @classmethod def from_config(cls, cfg=None): if cfg is None: cfg = OmegaConf.create() image_size = cfg.get("image_size", 256) mean = cfg.get("mean", None) std = cfg.get("std", None) n_frms = cfg.get("n_frms", MAX_INT) return cls(image_size=image_size, mean=mean, std=std, n_frms=n_frms)
MovieChat-main
MovieChat/processors/video_processor.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ from MovieChat.processors.base_processor import BaseProcessor from MovieChat.processors.blip_processors import ( Blip2ImageTrainProcessor, Blip2ImageEvalProcessor, BlipCaptionProcessor, ) from MovieChat.processors.video_processor import ( AlproVideoTrainProcessor, AlproVideoEvalProcessor ) from MovieChat.common.registry import registry __all__ = [ "BaseProcessor", "Blip2ImageTrainProcessor", "Blip2ImageEvalProcessor", "BlipCaptionProcessor", "AlproVideoTrainProcessor", "AlproVideoEvalProcessor", ] def load_processor(name, cfg=None): """ Example >>> processor = load_processor("alpro_video_train", cfg=None) """ processor = registry.get_processor_class(name).from_config(cfg) return processor
MovieChat-main
MovieChat/processors/__init__.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ from omegaconf import OmegaConf class BaseProcessor: def __init__(self): self.transform = lambda x: x return def __call__(self, item): return self.transform(item) @classmethod def from_config(cls, cfg=None): return cls() def build(self, **kwargs): cfg = OmegaConf.create(kwargs) return self.from_config(cfg)
MovieChat-main
MovieChat/processors/base_processor.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import cv2 import numpy as np import torch ## aug functions def identity_func(img): return img def autocontrast_func(img, cutoff=0): """ same output as PIL.ImageOps.autocontrast """ n_bins = 256 def tune_channel(ch): n = ch.size cut = cutoff * n // 100 if cut == 0: high, low = ch.max(), ch.min() else: hist = cv2.calcHist([ch], [0], None, [n_bins], [0, n_bins]) low = np.argwhere(np.cumsum(hist) > cut) low = 0 if low.shape[0] == 0 else low[0] high = np.argwhere(np.cumsum(hist[::-1]) > cut) high = n_bins - 1 if high.shape[0] == 0 else n_bins - 1 - high[0] if high <= low: table = np.arange(n_bins) else: scale = (n_bins - 1) / (high - low) offset = -low * scale table = np.arange(n_bins) * scale + offset table[table < 0] = 0 table[table > n_bins - 1] = n_bins - 1 table = table.clip(0, 255).astype(np.uint8) return table[ch] channels = [tune_channel(ch) for ch in cv2.split(img)] out = cv2.merge(channels) return out def equalize_func(img): """ same output as PIL.ImageOps.equalize PIL's implementation is different from cv2.equalize """ n_bins = 256 def tune_channel(ch): hist = cv2.calcHist([ch], [0], None, [n_bins], [0, n_bins]) non_zero_hist = hist[hist != 0].reshape(-1) step = np.sum(non_zero_hist[:-1]) // (n_bins - 1) if step == 0: return ch n = np.empty_like(hist) n[0] = step // 2 n[1:] = hist[:-1] table = (np.cumsum(n) // step).clip(0, 255).astype(np.uint8) return table[ch] channels = [tune_channel(ch) for ch in cv2.split(img)] out = cv2.merge(channels) return out def rotate_func(img, degree, fill=(0, 0, 0)): """ like PIL, rotate by degree, not radians """ H, W = img.shape[0], img.shape[1] center = W / 2, H / 2 M = cv2.getRotationMatrix2D(center, degree, 1) out = cv2.warpAffine(img, M, (W, H), borderValue=fill) return out def solarize_func(img, thresh=128): """ same output as PIL.ImageOps.posterize """ table = np.array([el if el < thresh else 255 - el for el in range(256)]) table = table.clip(0, 255).astype(np.uint8) out = table[img] return out def color_func(img, factor): """ same output as PIL.ImageEnhance.Color """ M = np.float32( [[0.886, -0.114, -0.114], [-0.587, 0.413, -0.587], [-0.299, -0.299, 0.701]] ) * factor + np.float32([[0.114], [0.587], [0.299]]) out = np.matmul(img, M).clip(0, 255).astype(np.uint8) return out def contrast_func(img, factor): """ same output as PIL.ImageEnhance.Contrast """ mean = np.sum(np.mean(img, axis=(0, 1)) * np.array([0.114, 0.587, 0.299])) table = ( np.array([(el - mean) * factor + mean for el in range(256)]) .clip(0, 255) .astype(np.uint8) ) out = table[img] return out def brightness_func(img, factor): """ same output as PIL.ImageEnhance.Contrast """ table = (np.arange(256, dtype=np.float32) * factor).clip(0, 255).astype(np.uint8) out = table[img] return out def sharpness_func(img, factor): """ The differences the this result and PIL are all on the 4 boundaries, the center areas are same """ kernel = np.ones((3, 3), dtype=np.float32) kernel[1][1] = 5 kernel /= 13 degenerate = cv2.filter2D(img, -1, kernel) if factor == 0.0: out = degenerate elif factor == 1.0: out = img else: out = img.astype(np.float32) degenerate = degenerate.astype(np.float32)[1:-1, 1:-1, :] out[1:-1, 1:-1, :] = degenerate + factor * (out[1:-1, 1:-1, :] - degenerate) out = out.astype(np.uint8) return out def shear_x_func(img, factor, fill=(0, 0, 0)): H, W = img.shape[0], img.shape[1] M = np.float32([[1, factor, 0], [0, 1, 0]]) out = cv2.warpAffine( img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR ).astype(np.uint8) return out def translate_x_func(img, offset, fill=(0, 0, 0)): """ same output as PIL.Image.transform """ H, W = img.shape[0], img.shape[1] M = np.float32([[1, 0, -offset], [0, 1, 0]]) out = cv2.warpAffine( img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR ).astype(np.uint8) return out def translate_y_func(img, offset, fill=(0, 0, 0)): """ same output as PIL.Image.transform """ H, W = img.shape[0], img.shape[1] M = np.float32([[1, 0, 0], [0, 1, -offset]]) out = cv2.warpAffine( img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR ).astype(np.uint8) return out def posterize_func(img, bits): """ same output as PIL.ImageOps.posterize """ out = np.bitwise_and(img, np.uint8(255 << (8 - bits))) return out def shear_y_func(img, factor, fill=(0, 0, 0)): H, W = img.shape[0], img.shape[1] M = np.float32([[1, 0, 0], [factor, 1, 0]]) out = cv2.warpAffine( img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR ).astype(np.uint8) return out def cutout_func(img, pad_size, replace=(0, 0, 0)): replace = np.array(replace, dtype=np.uint8) H, W = img.shape[0], img.shape[1] rh, rw = np.random.random(2) pad_size = pad_size // 2 ch, cw = int(rh * H), int(rw * W) x1, x2 = max(ch - pad_size, 0), min(ch + pad_size, H) y1, y2 = max(cw - pad_size, 0), min(cw + pad_size, W) out = img.copy() out[x1:x2, y1:y2, :] = replace return out ### level to args def enhance_level_to_args(MAX_LEVEL): def level_to_args(level): return ((level / MAX_LEVEL) * 1.8 + 0.1,) return level_to_args def shear_level_to_args(MAX_LEVEL, replace_value): def level_to_args(level): level = (level / MAX_LEVEL) * 0.3 if np.random.random() > 0.5: level = -level return (level, replace_value) return level_to_args def translate_level_to_args(translate_const, MAX_LEVEL, replace_value): def level_to_args(level): level = (level / MAX_LEVEL) * float(translate_const) if np.random.random() > 0.5: level = -level return (level, replace_value) return level_to_args def cutout_level_to_args(cutout_const, MAX_LEVEL, replace_value): def level_to_args(level): level = int((level / MAX_LEVEL) * cutout_const) return (level, replace_value) return level_to_args def solarize_level_to_args(MAX_LEVEL): def level_to_args(level): level = int((level / MAX_LEVEL) * 256) return (level,) return level_to_args def none_level_to_args(level): return () def posterize_level_to_args(MAX_LEVEL): def level_to_args(level): level = int((level / MAX_LEVEL) * 4) return (level,) return level_to_args def rotate_level_to_args(MAX_LEVEL, replace_value): def level_to_args(level): level = (level / MAX_LEVEL) * 30 if np.random.random() < 0.5: level = -level return (level, replace_value) return level_to_args func_dict = { "Identity": identity_func, "AutoContrast": autocontrast_func, "Equalize": equalize_func, "Rotate": rotate_func, "Solarize": solarize_func, "Color": color_func, "Contrast": contrast_func, "Brightness": brightness_func, "Sharpness": sharpness_func, "ShearX": shear_x_func, "TranslateX": translate_x_func, "TranslateY": translate_y_func, "Posterize": posterize_func, "ShearY": shear_y_func, } translate_const = 10 MAX_LEVEL = 10 replace_value = (128, 128, 128) arg_dict = { "Identity": none_level_to_args, "AutoContrast": none_level_to_args, "Equalize": none_level_to_args, "Rotate": rotate_level_to_args(MAX_LEVEL, replace_value), "Solarize": solarize_level_to_args(MAX_LEVEL), "Color": enhance_level_to_args(MAX_LEVEL), "Contrast": enhance_level_to_args(MAX_LEVEL), "Brightness": enhance_level_to_args(MAX_LEVEL), "Sharpness": enhance_level_to_args(MAX_LEVEL), "ShearX": shear_level_to_args(MAX_LEVEL, replace_value), "TranslateX": translate_level_to_args(translate_const, MAX_LEVEL, replace_value), "TranslateY": translate_level_to_args(translate_const, MAX_LEVEL, replace_value), "Posterize": posterize_level_to_args(MAX_LEVEL), "ShearY": shear_level_to_args(MAX_LEVEL, replace_value), } class RandomAugment(object): def __init__(self, N=2, M=10, isPIL=False, augs=[]): self.N = N self.M = M self.isPIL = isPIL if augs: self.augs = augs else: self.augs = list(arg_dict.keys()) def get_random_ops(self): sampled_ops = np.random.choice(self.augs, self.N) return [(op, 0.5, self.M) for op in sampled_ops] def __call__(self, img): if self.isPIL: img = np.array(img) ops = self.get_random_ops() for name, prob, level in ops: if np.random.random() > prob: continue args = arg_dict[name](level) img = func_dict[name](img, *args) return img class VideoRandomAugment(object): def __init__(self, N=2, M=10, p=0.0, tensor_in_tensor_out=True, augs=[]): self.N = N self.M = M self.p = p self.tensor_in_tensor_out = tensor_in_tensor_out if augs: self.augs = augs else: self.augs = list(arg_dict.keys()) def get_random_ops(self): sampled_ops = np.random.choice(self.augs, self.N, replace=False) return [(op, self.M) for op in sampled_ops] def __call__(self, frames): assert ( frames.shape[-1] == 3 ), "Expecting last dimension for 3-channels RGB (b, h, w, c)." if self.tensor_in_tensor_out: frames = frames.numpy().astype(np.uint8) num_frames = frames.shape[0] ops = num_frames * [self.get_random_ops()] apply_or_not = num_frames * [np.random.random(size=self.N) > self.p] frames = torch.stack( list(map(self._aug, frames, ops, apply_or_not)), dim=0 ).float() return frames def _aug(self, img, ops, apply_or_not): for i, (name, level) in enumerate(ops): if not apply_or_not[i]: continue args = arg_dict[name](level) img = func_dict[name](img, *args) return torch.from_numpy(img) if __name__ == "__main__": a = RandomAugment() img = np.random.randn(32, 32, 3) a(img)
MovieChat-main
MovieChat/processors/randaugment.py
#!/usr/bin/env python3 """ Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import numbers import random from torchvision.transforms import ( RandomCrop, RandomResizedCrop, ) import MovieChat.processors.functional_video as F __all__ = [ "RandomCropVideo", "RandomResizedCropVideo", "CenterCropVideo", "NormalizeVideo", "ToTensorVideo", "RandomHorizontalFlipVideo", ] class RandomCropVideo(RandomCrop): def __init__(self, size): if isinstance(size, numbers.Number): self.size = (int(size), int(size)) else: self.size = size def __call__(self, clip): """ Args: clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W) Returns: torch.tensor: randomly cropped/resized video clip. size is (C, T, OH, OW) """ i, j, h, w = self.get_params(clip, self.size) return F.crop(clip, i, j, h, w) def __repr__(self) -> str: return f"{self.__class__.__name__}(size={self.size})" class RandomResizedCropVideo(RandomResizedCrop): def __init__( self, size, scale=(0.08, 1.0), ratio=(3.0 / 4.0, 4.0 / 3.0), interpolation_mode="bilinear", ): if isinstance(size, tuple): if len(size) != 2: raise ValueError( f"size should be tuple (height, width), instead got {size}" ) self.size = size else: self.size = (size, size) self.interpolation_mode = interpolation_mode self.scale = scale self.ratio = ratio def __call__(self, clip): """ Args: clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W) Returns: torch.tensor: randomly cropped/resized video clip. size is (C, T, H, W) """ i, j, h, w = self.get_params(clip, self.scale, self.ratio) return F.resized_crop(clip, i, j, h, w, self.size, self.interpolation_mode) def __repr__(self) -> str: return f"{self.__class__.__name__}(size={self.size}, interpolation_mode={self.interpolation_mode}, scale={self.scale}, ratio={self.ratio})" class CenterCropVideo: def __init__(self, crop_size): if isinstance(crop_size, numbers.Number): self.crop_size = (int(crop_size), int(crop_size)) else: self.crop_size = crop_size def __call__(self, clip): """ Args: clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W) Returns: torch.tensor: central cropping of video clip. Size is (C, T, crop_size, crop_size) """ return F.center_crop(clip, self.crop_size) def __repr__(self) -> str: return f"{self.__class__.__name__}(crop_size={self.crop_size})" class NormalizeVideo: """ Normalize the video clip by mean subtraction and division by standard deviation Args: mean (3-tuple): pixel RGB mean std (3-tuple): pixel RGB standard deviation inplace (boolean): whether do in-place normalization """ def __init__(self, mean, std, inplace=False): self.mean = mean self.std = std self.inplace = inplace def __call__(self, clip): """ Args: clip (torch.tensor): video clip to be normalized. Size is (C, T, H, W) """ return F.normalize(clip, self.mean, self.std, self.inplace) def __repr__(self) -> str: return f"{self.__class__.__name__}(mean={self.mean}, std={self.std}, inplace={self.inplace})" class ToTensorVideo: """ Convert tensor data type from uint8 to float, divide value by 255.0 and permute the dimensions of clip tensor """ def __init__(self): pass def __call__(self, clip): """ Args: clip (torch.tensor, dtype=torch.uint8): Size is (T, H, W, C) Return: clip (torch.tensor, dtype=torch.float): Size is (C, T, H, W) """ return F.to_tensor(clip) def __repr__(self) -> str: return self.__class__.__name__ class RandomHorizontalFlipVideo: """ Flip the video clip along the horizonal direction with a given probability Args: p (float): probability of the clip being flipped. Default value is 0.5 """ def __init__(self, p=0.5): self.p = p def __call__(self, clip): """ Args: clip (torch.tensor): Size is (C, T, H, W) Return: clip (torch.tensor): Size is (C, T, H, W) """ if random.random() < self.p: clip = F.hflip(clip) return clip def __repr__(self) -> str: return f"{self.__class__.__name__}(p={self.p})"
MovieChat-main
MovieChat/processors/transforms_video.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import math from MovieChat.common.registry import registry @registry.register_lr_scheduler("linear_warmup_step_lr") class LinearWarmupStepLRScheduler: def __init__( self, optimizer, max_epoch, min_lr, init_lr, decay_rate=1, warmup_start_lr=-1, warmup_steps=0, **kwargs ): self.optimizer = optimizer self.max_epoch = max_epoch self.min_lr = min_lr self.decay_rate = decay_rate self.init_lr = init_lr self.warmup_steps = warmup_steps self.warmup_start_lr = warmup_start_lr if warmup_start_lr >= 0 else init_lr def step(self, cur_epoch, cur_step): if cur_epoch == 0: warmup_lr_schedule( step=cur_step, optimizer=self.optimizer, max_step=self.warmup_steps, init_lr=self.warmup_start_lr, max_lr=self.init_lr, ) else: step_lr_schedule( epoch=cur_epoch, optimizer=self.optimizer, init_lr=self.init_lr, min_lr=self.min_lr, decay_rate=self.decay_rate, ) @registry.register_lr_scheduler("linear_warmup_cosine_lr") class LinearWarmupCosineLRScheduler: def __init__( self, optimizer, max_epoch, iters_per_epoch, min_lr, init_lr, warmup_steps=0, warmup_start_lr=-1, **kwargs ): self.optimizer = optimizer self.max_epoch = max_epoch self.iters_per_epoch = iters_per_epoch self.min_lr = min_lr self.init_lr = init_lr self.warmup_steps = warmup_steps self.warmup_start_lr = warmup_start_lr if warmup_start_lr >= 0 else init_lr def step(self, cur_epoch, cur_step): total_cur_step = cur_epoch * self.iters_per_epoch + cur_step if total_cur_step < self.warmup_steps: warmup_lr_schedule( step=cur_step, optimizer=self.optimizer, max_step=self.warmup_steps, init_lr=self.warmup_start_lr, max_lr=self.init_lr, ) else: cosine_lr_schedule( epoch=total_cur_step, optimizer=self.optimizer, max_epoch=self.max_epoch * self.iters_per_epoch, init_lr=self.init_lr, min_lr=self.min_lr, ) def cosine_lr_schedule(optimizer, epoch, max_epoch, init_lr, min_lr): """Decay the learning rate""" lr = (init_lr - min_lr) * 0.5 * ( 1.0 + math.cos(math.pi * epoch / max_epoch) ) + min_lr for param_group in optimizer.param_groups: param_group["lr"] = lr def warmup_lr_schedule(optimizer, step, max_step, init_lr, max_lr): """Warmup the learning rate""" lr = min(max_lr, init_lr + (max_lr - init_lr) * step / max(max_step, 1)) for param_group in optimizer.param_groups: param_group["lr"] = lr def step_lr_schedule(optimizer, epoch, init_lr, min_lr, decay_rate): """Decay the learning rate""" lr = max(min_lr, init_lr * (decay_rate**epoch)) for param_group in optimizer.param_groups: param_group["lr"] = lr
MovieChat-main
MovieChat/common/optims.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import logging import json from typing import Dict from omegaconf import OmegaConf from MovieChat.common.registry import registry class Config: def __init__(self, args): self.config = {} self.args = args # Register the config and configuration for setup registry.register("configuration", self) user_config = self._build_opt_list(self.args.options) config = OmegaConf.load(self.args.cfg_path) runner_config = self.build_runner_config(config) model_config = self.build_model_config(config, **user_config) dataset_config = self.build_dataset_config(config) # Validate the user-provided runner configuration # model and dataset configuration are supposed to be validated by the respective classes # [TODO] validate the model/dataset configuration # self._validate_runner_config(runner_config) # Override the default configuration with user options. self.config = OmegaConf.merge( runner_config, model_config, dataset_config, user_config ) def _validate_runner_config(self, runner_config): """ This method validates the configuration, such that 1) all the user specified options are valid; 2) no type mismatches between the user specified options and the config. """ runner_config_validator = create_runner_config_validator() runner_config_validator.validate(runner_config) def _build_opt_list(self, opts): opts_dot_list = self._convert_to_dot_list(opts) return OmegaConf.from_dotlist(opts_dot_list) @staticmethod def build_model_config(config, **kwargs): model = config.get("model", None) assert model is not None, "Missing model configuration file." model_cls = registry.get_model_class(model.arch) assert model_cls is not None, f"Model '{model.arch}' has not been registered." model_type = kwargs.get("model.model_type", None) if not model_type: model_type = model.get("model_type", None) # else use the model type selected by user. assert model_type is not None, "Missing model_type." model_config_path = model_cls.default_config_path(model_type=model_type) model_config = OmegaConf.create() # hierarchy override, customized config > default config model_config = OmegaConf.merge( model_config, OmegaConf.load(model_config_path), {"model": config["model"]}, ) return model_config @staticmethod def build_runner_config(config): return {"run": config.run} @staticmethod def build_dataset_config(config): datasets = config.get("datasets", None) if datasets is None: raise KeyError( "Expecting 'datasets' as the root key for dataset configuration." ) dataset_config = OmegaConf.create() for dataset_name in datasets: builder_cls = registry.get_builder_class(dataset_name) dataset_config_type = datasets[dataset_name].get("type", "default") dataset_config_path = builder_cls.default_config_path( type=dataset_config_type ) # hierarchy override, customized config > default config dataset_config = OmegaConf.merge( dataset_config, OmegaConf.load(dataset_config_path), {"datasets": {dataset_name: config["datasets"][dataset_name]}}, ) return dataset_config def _convert_to_dot_list(self, opts): if opts is None: opts = [] if len(opts) == 0: return opts has_equal = opts[0].find("=") != -1 if has_equal: return opts return [(opt + "=" + value) for opt, value in zip(opts[0::2], opts[1::2])] def get_config(self): return self.config @property def run_cfg(self): return self.config.run @property def datasets_cfg(self): return self.config.datasets @property def model_cfg(self): return self.config.model def pretty_print(self): logging.info("\n===== Running Parameters =====") logging.info(self._convert_node_to_json(self.config.run)) logging.info("\n====== Dataset Attributes ======") datasets = self.config.datasets for dataset in datasets: if dataset in self.config.datasets: logging.info(f"\n======== {dataset} =======") dataset_config = self.config.datasets[dataset] logging.info(self._convert_node_to_json(dataset_config)) else: logging.warning(f"No dataset named '{dataset}' in config. Skipping") logging.info(f"\n====== Model Attributes ======") logging.info(self._convert_node_to_json(self.config.model)) def _convert_node_to_json(self, node): container = OmegaConf.to_container(node, resolve=True) return json.dumps(container, indent=4, sort_keys=True) def to_dict(self): return OmegaConf.to_container(self.config) def node_to_dict(node): return OmegaConf.to_container(node) class ConfigValidator: """ This is a preliminary implementation to centralize and validate the configuration. May be altered in the future. A helper class to validate configurations from yaml file. This serves the following purposes: 1. Ensure all the options in the yaml are defined, raise error if not. 2. when type mismatches are found, the validator will raise an error. 3. a central place to store and display helpful messages for supported configurations. """ class _Argument: def __init__(self, name, choices=None, type=None, help=None): self.name = name self.val = None self.choices = choices self.type = type self.help = help def __str__(self): s = f"{self.name}={self.val}" if self.type is not None: s += f", ({self.type})" if self.choices is not None: s += f", choices: {self.choices}" if self.help is not None: s += f", ({self.help})" return s def __init__(self, description): self.description = description self.arguments = dict() self.parsed_args = None def __getitem__(self, key): assert self.parsed_args is not None, "No arguments parsed yet." return self.parsed_args[key] def __str__(self) -> str: return self.format_help() def add_argument(self, *args, **kwargs): """ Assume the first argument is the name of the argument. """ self.arguments[args[0]] = self._Argument(*args, **kwargs) def validate(self, config=None): """ Convert yaml config (dict-like) to list, required by argparse. """ for k, v in config.items(): assert ( k in self.arguments ), f"""{k} is not a valid argument. Support arguments are {self.format_arguments()}.""" if self.arguments[k].type is not None: try: self.arguments[k].val = self.arguments[k].type(v) except ValueError: raise ValueError(f"{k} is not a valid {self.arguments[k].type}.") if self.arguments[k].choices is not None: assert ( v in self.arguments[k].choices ), f"""{k} must be one of {self.arguments[k].choices}.""" return config def format_arguments(self): return str([f"{k}" for k in sorted(self.arguments.keys())]) def format_help(self): # description + key-value pair string for each argument help_msg = str(self.description) return help_msg + ", available arguments: " + self.format_arguments() def print_help(self): # display help message print(self.format_help()) def create_runner_config_validator(): validator = ConfigValidator(description="Runner configurations") validator.add_argument( "runner", type=str, choices=["runner_base", "runner_iter"], help="""Runner to use. The "runner_base" uses epoch-based training while iter-based runner runs based on iters. Default: runner_base""", ) # add argumetns for training dataset ratios validator.add_argument( "train_dataset_ratios", type=Dict[str, float], help="""Ratios of training dataset. This is used in iteration-based runner. Do not support for epoch-based runner because how to define an epoch becomes tricky. Default: None""", ) validator.add_argument( "max_iters", type=float, help="Maximum number of iterations to run.", ) validator.add_argument( "max_epoch", type=int, help="Maximum number of epochs to run.", ) # add arguments for iters_per_inner_epoch validator.add_argument( "iters_per_inner_epoch", type=float, help="Number of iterations per inner epoch. This is required when runner is runner_iter.", ) lr_scheds_choices = registry.list_lr_schedulers() validator.add_argument( "lr_sched", type=str, choices=lr_scheds_choices, help="Learning rate scheduler to use, from {}".format(lr_scheds_choices), ) task_choices = registry.list_tasks() validator.add_argument( "task", type=str, choices=task_choices, help="Task to use, from {}".format(task_choices), ) # add arguments for init_lr validator.add_argument( "init_lr", type=float, help="Initial learning rate. This will be the learning rate after warmup and before decay.", ) # add arguments for min_lr validator.add_argument( "min_lr", type=float, help="Minimum learning rate (after decay).", ) # add arguments for warmup_lr validator.add_argument( "warmup_lr", type=float, help="Starting learning rate for warmup.", ) # add arguments for learning rate decay rate validator.add_argument( "lr_decay_rate", type=float, help="Learning rate decay rate. Required if using a decaying learning rate scheduler.", ) # add arguments for weight decay validator.add_argument( "weight_decay", type=float, help="Weight decay rate.", ) # add arguments for training batch size validator.add_argument( "batch_size_train", type=int, help="Training batch size.", ) # add arguments for evaluation batch size validator.add_argument( "batch_size_eval", type=int, help="Evaluation batch size, including validation and testing.", ) # add arguments for number of workers for data loading validator.add_argument( "num_workers", help="Number of workers for data loading.", ) # add arguments for warm up steps validator.add_argument( "warmup_steps", type=int, help="Number of warmup steps. Required if a warmup schedule is used.", ) # add arguments for random seed validator.add_argument( "seed", type=int, help="Random seed.", ) # add arguments for output directory validator.add_argument( "output_dir", type=str, help="Output directory to save checkpoints and logs.", ) # add arguments for whether only use evaluation validator.add_argument( "evaluate", help="Whether to only evaluate the model. If true, training will not be performed.", ) # add arguments for splits used for training, e.g. ["train", "val"] validator.add_argument( "train_splits", type=list, help="Splits to use for training.", ) # add arguments for splits used for validation, e.g. ["val"] validator.add_argument( "valid_splits", type=list, help="Splits to use for validation. If not provided, will skip the validation.", ) # add arguments for splits used for testing, e.g. ["test"] validator.add_argument( "test_splits", type=list, help="Splits to use for testing. If not provided, will skip the testing.", ) # add arguments for accumulating gradient for iterations validator.add_argument( "accum_grad_iters", type=int, help="Number of iterations to accumulate gradient for.", ) # ====== distributed training ====== validator.add_argument( "device", type=str, choices=["cpu", "cuda"], help="Device to use. Support 'cuda' or 'cpu' as for now.", ) validator.add_argument( "world_size", type=int, help="Number of processes participating in the job.", ) validator.add_argument("dist_url", type=str) validator.add_argument("distributed", type=bool) # add arguments to opt using distributed sampler during evaluation or not validator.add_argument( "use_dist_eval_sampler", type=bool, help="Whether to use distributed sampler during evaluation or not.", ) # ====== task specific ====== # generation task specific arguments # add arguments for maximal length of text output validator.add_argument( "max_len", type=int, help="Maximal length of text output.", ) # add arguments for minimal length of text output validator.add_argument( "min_len", type=int, help="Minimal length of text output.", ) # add arguments number of beams validator.add_argument( "num_beams", type=int, help="Number of beams used for beam search.", ) # vqa task specific arguments # add arguments for number of answer candidates validator.add_argument( "num_ans_candidates", type=int, help="""For ALBEF and BLIP, these models first rank answers according to likelihood to select answer candidates.""", ) # add arguments for inference method validator.add_argument( "inference_method", type=str, choices=["genearte", "rank"], help="""Inference method to use for question answering. If rank, requires a answer list.""", ) # ====== model specific ====== validator.add_argument( "k_test", type=int, help="Number of top k most similar samples from ITC/VTC selection to be tested.", ) return validator
MovieChat-main
MovieChat/common/config.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ class Registry: mapping = { "builder_name_mapping": {}, "task_name_mapping": {}, "processor_name_mapping": {}, "model_name_mapping": {}, "lr_scheduler_name_mapping": {}, "runner_name_mapping": {}, "state": {}, "paths": {}, } @classmethod def register_builder(cls, name): r"""Register a dataset builder to registry with key 'name' Args: name: Key with which the builder will be registered. Usage: from MovieChat.common.registry import registry from MovieChat.datasets.base_dataset_builder import BaseDatasetBuilder """ def wrap(builder_cls): from MovieChat.datasets.builders.base_dataset_builder import BaseDatasetBuilder assert issubclass( builder_cls, BaseDatasetBuilder ), "All builders must inherit BaseDatasetBuilder class, found {}".format( builder_cls ) if name in cls.mapping["builder_name_mapping"]: raise KeyError( "Name '{}' already registered for {}.".format( name, cls.mapping["builder_name_mapping"][name] ) ) cls.mapping["builder_name_mapping"][name] = builder_cls return builder_cls return wrap @classmethod def register_task(cls, name): r"""Register a task to registry with key 'name' Args: name: Key with which the task will be registered. Usage: from MovieChat.common.registry import registry """ def wrap(task_cls): from MovieChat.tasks.base_task import BaseTask assert issubclass( task_cls, BaseTask ), "All tasks must inherit BaseTask class" if name in cls.mapping["task_name_mapping"]: raise KeyError( "Name '{}' already registered for {}.".format( name, cls.mapping["task_name_mapping"][name] ) ) cls.mapping["task_name_mapping"][name] = task_cls return task_cls return wrap @classmethod def register_model(cls, name): r"""Register a task to registry with key 'name' Args: name: Key with which the task will be registered. Usage: from MovieChat.common.registry import registry """ def wrap(model_cls): from MovieChat.models import BaseModel assert issubclass( model_cls, BaseModel ), "All models must inherit BaseModel class" if name in cls.mapping["model_name_mapping"]: raise KeyError( "Name '{}' already registered for {}.".format( name, cls.mapping["model_name_mapping"][name] ) ) cls.mapping["model_name_mapping"][name] = model_cls return model_cls return wrap @classmethod def register_processor(cls, name): r"""Register a processor to registry with key 'name' Args: name: Key with which the task will be registered. Usage: from MovieChat.common.registry import registry """ def wrap(processor_cls): from MovieChat.processors import BaseProcessor assert issubclass( processor_cls, BaseProcessor ), "All processors must inherit BaseProcessor class" if name in cls.mapping["processor_name_mapping"]: raise KeyError( "Name '{}' already registered for {}.".format( name, cls.mapping["processor_name_mapping"][name] ) ) cls.mapping["processor_name_mapping"][name] = processor_cls return processor_cls return wrap @classmethod def register_lr_scheduler(cls, name): r"""Register a model to registry with key 'name' Args: name: Key with which the task will be registered. Usage: from MovieChat.common.registry import registry """ def wrap(lr_sched_cls): if name in cls.mapping["lr_scheduler_name_mapping"]: raise KeyError( "Name '{}' already registered for {}.".format( name, cls.mapping["lr_scheduler_name_mapping"][name] ) ) cls.mapping["lr_scheduler_name_mapping"][name] = lr_sched_cls return lr_sched_cls return wrap @classmethod def register_runner(cls, name): r"""Register a model to registry with key 'name' Args: name: Key with which the task will be registered. Usage: from MovieChat.common.registry import registry """ def wrap(runner_cls): if name in cls.mapping["runner_name_mapping"]: raise KeyError( "Name '{}' already registered for {}.".format( name, cls.mapping["runner_name_mapping"][name] ) ) cls.mapping["runner_name_mapping"][name] = runner_cls return runner_cls return wrap @classmethod def register_path(cls, name, path): r"""Register a path to registry with key 'name' Args: name: Key with which the path will be registered. Usage: from MovieChat.common.registry import registry """ assert isinstance(path, str), "All path must be str." if name in cls.mapping["paths"]: raise KeyError("Name '{}' already registered.".format(name)) cls.mapping["paths"][name] = path @classmethod def register(cls, name, obj): r"""Register an item to registry with key 'name' Args: name: Key with which the item will be registered. Usage:: from MovieChat.common.registry import registry registry.register("config", {}) """ path = name.split(".") current = cls.mapping["state"] for part in path[:-1]: if part not in current: current[part] = {} current = current[part] current[path[-1]] = obj # @classmethod # def get_trainer_class(cls, name): # return cls.mapping["trainer_name_mapping"].get(name, None) @classmethod def get_builder_class(cls, name): return cls.mapping["builder_name_mapping"].get(name, None) @classmethod def get_model_class(cls, name): return cls.mapping["model_name_mapping"].get(name, None) @classmethod def get_task_class(cls, name): return cls.mapping["task_name_mapping"].get(name, None) @classmethod def get_processor_class(cls, name): return cls.mapping["processor_name_mapping"].get(name, None) @classmethod def get_lr_scheduler_class(cls, name): return cls.mapping["lr_scheduler_name_mapping"].get(name, None) @classmethod def get_runner_class(cls, name): return cls.mapping["runner_name_mapping"].get(name, None) @classmethod def list_runners(cls): return sorted(cls.mapping["runner_name_mapping"].keys()) @classmethod def list_models(cls): return sorted(cls.mapping["model_name_mapping"].keys()) @classmethod def list_tasks(cls): return sorted(cls.mapping["task_name_mapping"].keys()) @classmethod def list_processors(cls): return sorted(cls.mapping["processor_name_mapping"].keys()) @classmethod def list_lr_schedulers(cls): return sorted(cls.mapping["lr_scheduler_name_mapping"].keys()) @classmethod def list_datasets(cls): return sorted(cls.mapping["builder_name_mapping"].keys()) @classmethod def get_path(cls, name): return cls.mapping["paths"].get(name, None) @classmethod def get(cls, name, default=None, no_warning=False): r"""Get an item from registry with key 'name' Args: name (string): Key whose value needs to be retrieved. default: If passed and key is not in registry, default value will be returned with a warning. Default: None no_warning (bool): If passed as True, warning when key doesn't exist will not be generated. Useful for MMF's internal operations. Default: False """ original_name = name name = name.split(".") value = cls.mapping["state"] for subname in name: value = value.get(subname, default) if value is default: break if ( "writer" in cls.mapping["state"] and value == default and no_warning is False ): cls.mapping["state"]["writer"].warning( "Key {} is not present in registry, returning default value " "of {}".format(original_name, default) ) return value @classmethod def unregister(cls, name): r"""Remove an item from registry with key 'name' Args: name: Key which needs to be removed. Usage:: from mmf.common.registry import registry config = registry.unregister("config") """ return cls.mapping["state"].pop(name, None) registry = Registry()
MovieChat-main
MovieChat/common/registry.py
MovieChat-main
MovieChat/common/__init__.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import datetime import logging import time from collections import defaultdict, deque import torch import torch.distributed as dist from MovieChat.common import dist_utils class SmoothedValue(object): """Track a series of values and provide access to smoothed values over a window or the global series average. """ def __init__(self, window_size=20, fmt=None): if fmt is None: fmt = "{median:.4f} ({global_avg:.4f})" self.deque = deque(maxlen=window_size) self.total = 0.0 self.count = 0 self.fmt = fmt def update(self, value, n=1): self.deque.append(value) self.count += n self.total += value * n def synchronize_between_processes(self): """ Warning: does not synchronize the deque! """ if not dist_utils.is_dist_avail_and_initialized(): return t = torch.tensor([self.count, self.total], dtype=torch.float64, device="cuda") dist.barrier() dist.all_reduce(t) t = t.tolist() self.count = int(t[0]) self.total = t[1] @property def median(self): d = torch.tensor(list(self.deque)) return d.median().item() @property def avg(self): d = torch.tensor(list(self.deque), dtype=torch.float32) return d.mean().item() @property def global_avg(self): return self.total / self.count @property def max(self): return max(self.deque) @property def value(self): return self.deque[-1] def __str__(self): return self.fmt.format( median=self.median, avg=self.avg, global_avg=self.global_avg, max=self.max, value=self.value, ) class MetricLogger(object): def __init__(self, delimiter="\t"): self.meters = defaultdict(SmoothedValue) self.delimiter = delimiter def update(self, **kwargs): for k, v in kwargs.items(): if isinstance(v, torch.Tensor): v = v.item() assert isinstance(v, (float, int)) self.meters[k].update(v) def __getattr__(self, attr): if attr in self.meters: return self.meters[attr] if attr in self.__dict__: return self.__dict__[attr] raise AttributeError( "'{}' object has no attribute '{}'".format(type(self).__name__, attr) ) def __str__(self): loss_str = [] for name, meter in self.meters.items(): loss_str.append("{}: {}".format(name, str(meter))) return self.delimiter.join(loss_str) def global_avg(self): loss_str = [] for name, meter in self.meters.items(): loss_str.append("{}: {:.4f}".format(name, meter.global_avg)) return self.delimiter.join(loss_str) def synchronize_between_processes(self): for meter in self.meters.values(): meter.synchronize_between_processes() def add_meter(self, name, meter): self.meters[name] = meter def log_every(self, iterable, print_freq, header=None): i = 0 if not header: header = "" start_time = time.time() end = time.time() iter_time = SmoothedValue(fmt="{avg:.4f}") data_time = SmoothedValue(fmt="{avg:.4f}") space_fmt = ":" + str(len(str(len(iterable)))) + "d" log_msg = [ header, "[{0" + space_fmt + "}/{1}]", "eta: {eta}", "{meters}", "time: {time}", "data: {data}", ] if torch.cuda.is_available(): log_msg.append("max mem: {memory:.0f}") log_msg = self.delimiter.join(log_msg) MB = 1024.0 * 1024.0 for obj in iterable: data_time.update(time.time() - end) yield obj iter_time.update(time.time() - end) if i % print_freq == 0 or i == len(iterable) - 1: eta_seconds = iter_time.global_avg * (len(iterable) - i) eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) if torch.cuda.is_available(): print( log_msg.format( i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time), memory=torch.cuda.max_memory_allocated() / MB, ) ) else: print( log_msg.format( i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time), ) ) i += 1 end = time.time() total_time = time.time() - start_time total_time_str = str(datetime.timedelta(seconds=int(total_time))) print( "{} Total time: {} ({:.4f} s / it)".format( header, total_time_str, total_time / len(iterable) ) ) class AttrDict(dict): def __init__(self, *args, **kwargs): super(AttrDict, self).__init__(*args, **kwargs) self.__dict__ = self def setup_logger(): logging.basicConfig( level=logging.INFO if dist_utils.is_main_process() else logging.WARN, format="%(asctime)s [%(levelname)s] %(message)s", handlers=[logging.StreamHandler()], )
MovieChat-main
MovieChat/common/logger.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import io import json import logging import os import pickle import re import shutil import urllib import urllib.error import urllib.request from typing import Optional from urllib.parse import urlparse import numpy as np import pandas as pd import yaml from iopath.common.download import download from iopath.common.file_io import file_lock, g_pathmgr from MovieChat.common.registry import registry from torch.utils.model_zoo import tqdm from torchvision.datasets.utils import ( check_integrity, download_file_from_google_drive, extract_archive, ) def now(): from datetime import datetime return datetime.now().strftime("%Y%m%d%H%M")[:-1] def is_url(url_or_filename): parsed = urlparse(url_or_filename) return parsed.scheme in ("http", "https") def get_cache_path(rel_path): return os.path.expanduser(os.path.join(registry.get_path("cache_root"), rel_path)) def get_abs_path(rel_path): return os.path.join(registry.get_path("library_root"), rel_path) def load_json(filename): with open(filename, "r") as f: return json.load(f) # The following are adapted from torchvision and vissl # torchvision: https://github.com/pytorch/vision # vissl: https://github.com/facebookresearch/vissl/blob/main/vissl/utils/download.py def makedir(dir_path): """ Create the directory if it does not exist. """ is_success = False try: if not g_pathmgr.exists(dir_path): g_pathmgr.mkdirs(dir_path) is_success = True except BaseException: print(f"Error creating directory: {dir_path}") return is_success def get_redirected_url(url: str): """ Given a URL, returns the URL it redirects to or the original URL in case of no indirection """ import requests with requests.Session() as session: with session.get(url, stream=True, allow_redirects=True) as response: if response.history: return response.url else: return url def to_google_drive_download_url(view_url: str) -> str: """ Utility function to transform a view URL of google drive to a download URL for google drive Example input: https://drive.google.com/file/d/137RyRjvTBkBiIfeYBNZBtViDHQ6_Ewsp/view Example output: https://drive.google.com/uc?export=download&id=137RyRjvTBkBiIfeYBNZBtViDHQ6_Ewsp """ splits = view_url.split("/") assert splits[-1] == "view" file_id = splits[-2] return f"https://drive.google.com/uc?export=download&id={file_id}" def download_google_drive_url(url: str, output_path: str, output_file_name: str): """ Download a file from google drive Downloading an URL from google drive requires confirmation when the file of the size is too big (google drive notifies that anti-viral checks cannot be performed on such files) """ import requests with requests.Session() as session: # First get the confirmation token and append it to the URL with session.get(url, stream=True, allow_redirects=True) as response: for k, v in response.cookies.items(): if k.startswith("download_warning"): url = url + "&confirm=" + v # Then download the content of the file with session.get(url, stream=True, verify=True) as response: makedir(output_path) path = os.path.join(output_path, output_file_name) total_size = int(response.headers.get("Content-length", 0)) with open(path, "wb") as file: from tqdm import tqdm with tqdm(total=total_size) as progress_bar: for block in response.iter_content( chunk_size=io.DEFAULT_BUFFER_SIZE ): file.write(block) progress_bar.update(len(block)) def _get_google_drive_file_id(url: str) -> Optional[str]: parts = urlparse(url) if re.match(r"(drive|docs)[.]google[.]com", parts.netloc) is None: return None match = re.match(r"/file/d/(?P<id>[^/]*)", parts.path) if match is None: return None return match.group("id") def _urlretrieve(url: str, filename: str, chunk_size: int = 1024) -> None: with open(filename, "wb") as fh: with urllib.request.urlopen( urllib.request.Request(url, headers={"User-Agent": "vissl"}) ) as response: with tqdm(total=response.length) as pbar: for chunk in iter(lambda: response.read(chunk_size), ""): if not chunk: break pbar.update(chunk_size) fh.write(chunk) def download_url( url: str, root: str, filename: Optional[str] = None, md5: Optional[str] = None, ) -> None: """Download a file from a url and place it in root. Args: url (str): URL to download file from root (str): Directory to place downloaded file in filename (str, optional): Name to save the file under. If None, use the basename of the URL. md5 (str, optional): MD5 checksum of the download. If None, do not check """ root = os.path.expanduser(root) if not filename: filename = os.path.basename(url) fpath = os.path.join(root, filename) makedir(root) # check if file is already present locally if check_integrity(fpath, md5): print("Using downloaded and verified file: " + fpath) return # expand redirect chain if needed url = get_redirected_url(url) # check if file is located on Google Drive file_id = _get_google_drive_file_id(url) if file_id is not None: return download_file_from_google_drive(file_id, root, filename, md5) # download the file try: print("Downloading " + url + " to " + fpath) _urlretrieve(url, fpath) except (urllib.error.URLError, IOError) as e: # type: ignore[attr-defined] if url[:5] == "https": url = url.replace("https:", "http:") print( "Failed download. Trying https -> http instead." " Downloading " + url + " to " + fpath ) _urlretrieve(url, fpath) else: raise e # check integrity of downloaded file if not check_integrity(fpath, md5): raise RuntimeError("File not found or corrupted.") def download_and_extract_archive( url: str, download_root: str, extract_root: Optional[str] = None, filename: Optional[str] = None, md5: Optional[str] = None, remove_finished: bool = False, ) -> None: download_root = os.path.expanduser(download_root) if extract_root is None: extract_root = download_root if not filename: filename = os.path.basename(url) download_url(url, download_root, filename, md5) archive = os.path.join(download_root, filename) print("Extracting {} to {}".format(archive, extract_root)) extract_archive(archive, extract_root, remove_finished) def cache_url(url: str, cache_dir: str) -> str: """ This implementation downloads the remote resource and caches it locally. The resource will only be downloaded if not previously requested. """ parsed_url = urlparse(url) dirname = os.path.join(cache_dir, os.path.dirname(parsed_url.path.lstrip("/"))) makedir(dirname) filename = url.split("/")[-1] cached = os.path.join(dirname, filename) with file_lock(cached): if not os.path.isfile(cached): logging.info(f"Downloading {url} to {cached} ...") cached = download(url, dirname, filename=filename) logging.info(f"URL {url} cached in {cached}") return cached # TODO (prigoyal): convert this into RAII-style API def create_file_symlink(file1, file2): """ Simply create the symlinks for a given file1 to file2. Useful during model checkpointing to symlinks to the latest successful checkpoint. """ try: if g_pathmgr.exists(file2): g_pathmgr.rm(file2) g_pathmgr.symlink(file1, file2) except Exception as e: logging.info(f"Could NOT create symlink. Error: {e}") def save_file(data, filename, append_to_json=True, verbose=True): """ Common i/o utility to handle saving data to various file formats. Supported: .pkl, .pickle, .npy, .json Specifically for .json, users have the option to either append (default) or rewrite by passing in Boolean value to append_to_json. """ if verbose: logging.info(f"Saving data to file: {filename}") file_ext = os.path.splitext(filename)[1] if file_ext in [".pkl", ".pickle"]: with g_pathmgr.open(filename, "wb") as fopen: pickle.dump(data, fopen, pickle.HIGHEST_PROTOCOL) elif file_ext == ".npy": with g_pathmgr.open(filename, "wb") as fopen: np.save(fopen, data) elif file_ext == ".json": if append_to_json: with g_pathmgr.open(filename, "a") as fopen: fopen.write(json.dumps(data, sort_keys=True) + "\n") fopen.flush() else: with g_pathmgr.open(filename, "w") as fopen: fopen.write(json.dumps(data, sort_keys=True) + "\n") fopen.flush() elif file_ext == ".yaml": with g_pathmgr.open(filename, "w") as fopen: dump = yaml.dump(data) fopen.write(dump) fopen.flush() else: raise Exception(f"Saving {file_ext} is not supported yet") if verbose: logging.info(f"Saved data to file: {filename}") def load_file(filename, mmap_mode=None, verbose=True, allow_pickle=False): """ Common i/o utility to handle loading data from various file formats. Supported: .pkl, .pickle, .npy, .json For the npy files, we support reading the files in mmap_mode. If the mmap_mode of reading is not successful, we load data without the mmap_mode. """ if verbose: logging.info(f"Loading data from file: {filename}") file_ext = os.path.splitext(filename)[1] if file_ext == ".txt": with g_pathmgr.open(filename, "r") as fopen: data = fopen.readlines() elif file_ext in [".pkl", ".pickle"]: with g_pathmgr.open(filename, "rb") as fopen: data = pickle.load(fopen, encoding="latin1") elif file_ext == ".npy": if mmap_mode: try: with g_pathmgr.open(filename, "rb") as fopen: data = np.load( fopen, allow_pickle=allow_pickle, encoding="latin1", mmap_mode=mmap_mode, ) except ValueError as e: logging.info( f"Could not mmap {filename}: {e}. Trying without g_pathmgr" ) data = np.load( filename, allow_pickle=allow_pickle, encoding="latin1", mmap_mode=mmap_mode, ) logging.info("Successfully loaded without g_pathmgr") except Exception: logging.info("Could not mmap without g_pathmgr. Trying without mmap") with g_pathmgr.open(filename, "rb") as fopen: data = np.load(fopen, allow_pickle=allow_pickle, encoding="latin1") else: with g_pathmgr.open(filename, "rb") as fopen: data = np.load(fopen, allow_pickle=allow_pickle, encoding="latin1") elif file_ext == ".json": with g_pathmgr.open(filename, "r") as fopen: data = json.load(fopen) elif file_ext == ".yaml": with g_pathmgr.open(filename, "r") as fopen: data = yaml.load(fopen, Loader=yaml.FullLoader) elif file_ext == ".csv": with g_pathmgr.open(filename, "r") as fopen: data = pd.read_csv(fopen) else: raise Exception(f"Reading from {file_ext} is not supported yet") return data def abspath(resource_path: str): """ Make a path absolute, but take into account prefixes like "http://" or "manifold://" """ regex = re.compile(r"^\w+://") if regex.match(resource_path) is None: return os.path.abspath(resource_path) else: return resource_path def makedir(dir_path): """ Create the directory if it does not exist. """ is_success = False try: if not g_pathmgr.exists(dir_path): g_pathmgr.mkdirs(dir_path) is_success = True except BaseException: logging.info(f"Error creating directory: {dir_path}") return is_success def is_url(input_url): """ Check if an input string is a url. look for http(s):// and ignoring the case """ is_url = re.match(r"^(?:http)s?://", input_url, re.IGNORECASE) is not None return is_url def cleanup_dir(dir): """ Utility for deleting a directory. Useful for cleaning the storage space that contains various training artifacts like checkpoints, data etc. """ if os.path.exists(dir): logging.info(f"Deleting directory: {dir}") shutil.rmtree(dir) logging.info(f"Deleted contents of directory: {dir}") def get_file_size(filename): """ Given a file, get the size of file in MB """ size_in_mb = os.path.getsize(filename) / float(1024**2) return size_in_mb
MovieChat-main
MovieChat/common/utils.py
import numpy as np from matplotlib import pyplot as plt from scipy.ndimage import filters from skimage import transform as skimage_transform def getAttMap(img, attMap, blur=True, overlap=True): attMap -= attMap.min() if attMap.max() > 0: attMap /= attMap.max() attMap = skimage_transform.resize(attMap, (img.shape[:2]), order=3, mode="constant") if blur: attMap = filters.gaussian_filter(attMap, 0.02 * max(img.shape[:2])) attMap -= attMap.min() attMap /= attMap.max() cmap = plt.get_cmap("jet") attMapV = cmap(attMap) attMapV = np.delete(attMapV, 3, 2) if overlap: attMap = ( 1 * (1 - attMap**0.7).reshape(attMap.shape + (1,)) * img + (attMap**0.7).reshape(attMap.shape + (1,)) * attMapV ) return attMap
MovieChat-main
MovieChat/common/gradcam.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import datetime import functools import os import torch import torch.distributed as dist import timm.models.hub as timm_hub def setup_for_distributed(is_master): """ This function disables printing when not in master process """ import builtins as __builtin__ builtin_print = __builtin__.print def print(*args, **kwargs): force = kwargs.pop("force", False) if is_master or force: builtin_print(*args, **kwargs) __builtin__.print = print def is_dist_avail_and_initialized(): if not dist.is_available(): return False if not dist.is_initialized(): return False return True def get_world_size(): if not is_dist_avail_and_initialized(): return 1 return dist.get_world_size() def get_rank(): if not is_dist_avail_and_initialized(): return 0 return dist.get_rank() def is_main_process(): return get_rank() == 0 def init_distributed_mode(args): if "RANK" in os.environ and "WORLD_SIZE" in os.environ: args.rank = int(os.environ["RANK"]) args.world_size = int(os.environ["WORLD_SIZE"]) args.gpu = int(os.environ["LOCAL_RANK"]) elif "SLURM_PROCID" in os.environ: args.rank = int(os.environ["SLURM_PROCID"]) args.gpu = args.rank % torch.cuda.device_count() else: print("Not using distributed mode") args.distributed = False return args.distributed = True torch.cuda.set_device(args.gpu) args.dist_backend = "nccl" print( "| distributed init (rank {}, world {}): {}".format( args.rank, args.world_size, args.dist_url ), flush=True, ) torch.distributed.init_process_group( backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank, timeout=datetime.timedelta( days=365 ), # allow auto-downloading and de-compressing ) torch.distributed.barrier() setup_for_distributed(args.rank == 0) def get_dist_info(): if torch.__version__ < "1.0": initialized = dist._initialized else: initialized = dist.is_initialized() if initialized: rank = dist.get_rank() world_size = dist.get_world_size() else: # non-distributed training rank = 0 world_size = 1 return rank, world_size def main_process(func): @functools.wraps(func) def wrapper(*args, **kwargs): rank, _ = get_dist_info() if rank == 0: return func(*args, **kwargs) return wrapper def download_cached_file(url, check_hash=True, progress=False): """ Download a file from a URL and cache it locally. If the file already exists, it is not downloaded again. If distributed, only the main process downloads the file, and the other processes wait for the file to be downloaded. """ def get_cached_file_path(): # a hack to sync the file path across processes parts = torch.hub.urlparse(url) filename = os.path.basename(parts.path) cached_file = os.path.join(timm_hub.get_cache_dir(), filename) return cached_file if is_main_process(): timm_hub.download_cached_file(url, check_hash, progress) if is_dist_avail_and_initialized(): dist.barrier() return get_cached_file_path()
MovieChat-main
MovieChat/common/dist_utils.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ from MovieChat.runners.runner_base import RunnerBase __all__ = ["RunnerBase"]
MovieChat-main
MovieChat/runners/__init__.py
""" Copyright (c) 2022, salesforce.com, inc. All rights reserved. SPDX-License-Identifier: BSD-3-Clause For full license text, see the LICENSE_Lavis file in the repo root or https://opensource.org/licenses/BSD-3-Clause """ import datetime import json import logging import os import time from pathlib import Path import torch import torch.distributed as dist import webdataset as wds from MovieChat.common.dist_utils import ( download_cached_file, get_rank, get_world_size, is_main_process, main_process, ) from MovieChat.common.registry import registry from MovieChat.common.utils import is_url from MovieChat.datasets.data_utils import concat_datasets, reorg_datasets_by_split, ChainDataset from MovieChat.datasets.datasets.dataloader_utils import ( IterLoader, MultiIterLoader, PrefetchLoader, ) from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.data import DataLoader, DistributedSampler @registry.register_runner("runner_base") class RunnerBase: """ A runner class to train and evaluate a model given a task and datasets. The runner uses pytorch distributed data parallel by default. Future release will support other distributed frameworks. """ def __init__(self, cfg, task, model, datasets, job_id): self.config = cfg self.job_id = job_id self.task = task self.datasets = datasets self._model = model self._wrapped_model = None self._device = None self._optimizer = None self._scaler = None self._dataloaders = None self._lr_sched = None self.start_epoch = 0 # self.setup_seeds() self.setup_output_dir() @property def device(self): if self._device is None: self._device = torch.device(self.config.run_cfg.device) return self._device @property def use_distributed(self): return self.config.run_cfg.distributed @property def model(self): """ A property to get the DDP-wrapped model on the device. """ # move model to device if self._model.device != self.device: self._model = self._model.to(self.device) # distributed training wrapper if self.use_distributed: if self._wrapped_model is None: self._wrapped_model = DDP( self._model, device_ids=[self.config.run_cfg.gpu] ) else: self._wrapped_model = self._model return self._wrapped_model @property def optimizer(self): # TODO make optimizer class and configurations if self._optimizer is None: num_parameters = 0 p_wd, p_non_wd = [], [] for n, p in self.model.named_parameters(): if not p.requires_grad: continue # frozen weights print(n) if p.ndim < 2 or "bias" in n or "ln" in n or "bn" in n: p_non_wd.append(p) else: p_wd.append(p) num_parameters += p.data.nelement() logging.info("number of trainable parameters: %d" % num_parameters) optim_params = [ { "params": p_wd, "weight_decay": float(self.config.run_cfg.weight_decay), }, {"params": p_non_wd, "weight_decay": 0}, ] beta2 = self.config.run_cfg.get("beta2", 0.999) self._optimizer = torch.optim.AdamW( optim_params, lr=float(self.config.run_cfg.init_lr), weight_decay=float(self.config.run_cfg.weight_decay), betas=(0.9, beta2), ) return self._optimizer @property def scaler(self): amp = self.config.run_cfg.get("amp", False) if amp: if self._scaler is None: self._scaler = torch.cuda.amp.GradScaler() return self._scaler @property def lr_scheduler(self): """ A property to get and create learning rate scheduler by split just in need. """ if self._lr_sched is None: lr_sched_cls = registry.get_lr_scheduler_class(self.config.run_cfg.lr_sched) max_epoch = self.max_epoch min_lr = self.min_lr init_lr = self.init_lr # optional parameters decay_rate = self.config.run_cfg.get("lr_decay_rate", None) warmup_start_lr = self.config.run_cfg.get("warmup_lr", -1) warmup_steps = self.config.run_cfg.get("warmup_steps", 0) iters_per_epoch = self.config.run_cfg.get("iters_per_epoch", None) if iters_per_epoch is None: try: iters_per_epoch = len(self.dataloaders['train']) except (AttributeError, TypeError): iters_per_epoch = 10000 self._lr_sched = lr_sched_cls( optimizer=self.optimizer, max_epoch=max_epoch, iters_per_epoch=iters_per_epoch, min_lr=min_lr, init_lr=init_lr, decay_rate=decay_rate, warmup_start_lr=warmup_start_lr, warmup_steps=warmup_steps, ) return self._lr_sched @property def dataloaders(self) -> dict: """ A property to get and create dataloaders by split just in need. If no train_dataset_ratio is provided, concatenate map-style datasets and chain wds.DataPipe datasets separately. Training set becomes a tuple (ConcatDataset, ChainDataset), both are optional but at least one of them is required. The resultant ConcatDataset and ChainDataset will be sampled evenly. If train_dataset_ratio is provided, create a MultiIterLoader to sample each dataset by ratios during training. Currently do not support multiple datasets for validation and test. Returns: dict: {split_name: (tuples of) dataloader} """ if self._dataloaders is None: # concatenate map-style datasets and chain wds.DataPipe datasets separately # training set becomes a tuple (ConcatDataset, ChainDataset), both are # optional but at least one of them is required. The resultant ConcatDataset # and ChainDataset will be sampled evenly. logging.info( "dataset_ratios not specified, datasets will be concatenated (map-style datasets) or chained (webdataset.DataPipeline)." ) datasets = reorg_datasets_by_split(self.datasets) self.datasets = datasets # print dataset statistics after concatenation/chaining for split_name in self.datasets: if isinstance(self.datasets[split_name], tuple) or isinstance( self.datasets[split_name], list ): # mixed wds.DataPipeline and torch.utils.data.Dataset num_records = sum( [ len(d) if not type(d) in [wds.DataPipeline, ChainDataset] else 0 for d in self.datasets[split_name] ] ) else: if hasattr(self.datasets[split_name], "__len__"): # a single map-style dataset num_records = len(self.datasets[split_name]) else: # a single wds.DataPipeline num_records = -1 logging.info( "Only a single wds.DataPipeline dataset, no __len__ attribute." ) if num_records >= 0: logging.info( "Loaded {} records for {} split from the dataset.".format( num_records, split_name ) ) # create dataloaders split_names = sorted(self.datasets.keys()) datasets = [self.datasets[split] for split in split_names] is_trains = [split in self.train_splits for split in split_names] batch_sizes = [ self.config.run_cfg.batch_size_train if split == "train" else self.config.run_cfg.batch_size_eval for split in split_names ] collate_fns = [] for dataset in datasets: if isinstance(dataset, tuple) or isinstance(dataset, list): collate_fns.append([getattr(d, "collater", None) for d in dataset]) else: collate_fns.append(getattr(dataset, "collater", None)) dataloaders = self.create_loaders( datasets=datasets, num_workers=self.config.run_cfg.num_workers, batch_sizes=batch_sizes, is_trains=is_trains, collate_fns=collate_fns, ) self._dataloaders = {k: v for k, v in zip(split_names, dataloaders)} return self._dataloaders @property def cuda_enabled(self): return self.device.type == "cuda" @property def max_epoch(self): return int(self.config.run_cfg.max_epoch) @property def log_freq(self): log_freq = self.config.run_cfg.get("log_freq", 50) return int(log_freq) @property def init_lr(self): return float(self.config.run_cfg.init_lr) @property def min_lr(self): return float(self.config.run_cfg.min_lr) @property def accum_grad_iters(self): return int(self.config.run_cfg.get("accum_grad_iters", 1)) @property def valid_splits(self): valid_splits = self.config.run_cfg.get("valid_splits", []) if len(valid_splits) == 0: logging.info("No validation splits found.") return valid_splits @property def test_splits(self): test_splits = self.config.run_cfg.get("test_splits", []) return test_splits @property def train_splits(self): train_splits = self.config.run_cfg.get("train_splits", []) if len(train_splits) == 0: logging.info("Empty train splits.") return train_splits @property def evaluate_only(self): """ Set to True to skip training. """ return self.config.run_cfg.evaluate @property def use_dist_eval_sampler(self): return self.config.run_cfg.get("use_dist_eval_sampler", True) @property def resume_ckpt_path(self): return self.config.run_cfg.get("resume_ckpt_path", None) @property def train_loader(self): train_dataloader = self.dataloaders["train"] return train_dataloader def setup_output_dir(self): lib_root = Path(registry.get_path("library_root")) output_dir = lib_root / self.config.run_cfg.output_dir / self.job_id result_dir = output_dir / "result" output_dir.mkdir(parents=True, exist_ok=True) result_dir.mkdir(parents=True, exist_ok=True) registry.register_path("result_dir", str(result_dir)) registry.register_path("output_dir", str(output_dir)) self.result_dir = result_dir self.output_dir = output_dir def train(self): start_time = time.time() best_agg_metric = 0 best_epoch = 0 self.log_config() # resume from checkpoint if specified if not self.evaluate_only and self.resume_ckpt_path is not None: self._load_checkpoint(self.resume_ckpt_path) for cur_epoch in range(self.start_epoch, self.max_epoch): # training phase if not self.evaluate_only: logging.info("Start training") train_stats = self.train_epoch(cur_epoch) self.log_stats(split_name="train", stats=train_stats) # evaluation phase if len(self.valid_splits) > 0: for split_name in self.valid_splits: logging.info("Evaluating on {}.".format(split_name)) val_log = self.eval_epoch( split_name=split_name, cur_epoch=cur_epoch ) if val_log is not None: if is_main_process(): assert ( "agg_metrics" in val_log ), "No agg_metrics found in validation log." agg_metrics = val_log["agg_metrics"] if agg_metrics > best_agg_metric and split_name == "val": best_epoch, best_agg_metric = cur_epoch, agg_metrics self._save_checkpoint(cur_epoch, is_best=True) val_log.update({"best_epoch": best_epoch}) self.log_stats(val_log, split_name) else: # if no validation split is provided, we just save the checkpoint at the end of each epoch. if not self.evaluate_only: self._save_checkpoint(cur_epoch, is_best=False) if self.evaluate_only: break if self.config.run_cfg.distributed: dist.barrier() # testing phase test_epoch = "best" if len(self.valid_splits) > 0 else cur_epoch self.evaluate(cur_epoch=test_epoch, skip_reload=self.evaluate_only) total_time = time.time() - start_time total_time_str = str(datetime.timedelta(seconds=int(total_time))) logging.info("Training time {}".format(total_time_str)) def evaluate(self, cur_epoch="best", skip_reload=False): test_logs = dict() if len(self.test_splits) > 0: for split_name in self.test_splits: test_logs[split_name] = self.eval_epoch( split_name=split_name, cur_epoch=cur_epoch, skip_reload=skip_reload ) return test_logs def train_epoch(self, epoch): # train self.model.train() return self.task.train_epoch( epoch=epoch, model=self.model, data_loader=self.train_loader, optimizer=self.optimizer, scaler=self.scaler, lr_scheduler=self.lr_scheduler, cuda_enabled=self.cuda_enabled, log_freq=self.log_freq, accum_grad_iters=self.accum_grad_iters, ) @torch.no_grad() def eval_epoch(self, split_name, cur_epoch, skip_reload=False): """ Evaluate the model on a given split. Args: split_name (str): name of the split to evaluate on. cur_epoch (int): current epoch. skip_reload_best (bool): whether to skip reloading the best checkpoint. During training, we will reload the best checkpoint for validation. During testing, we will use provided weights and skip reloading the best checkpoint . """ data_loader = self.dataloaders.get(split_name, None) assert data_loader, "data_loader for split {} is None.".format(split_name) # TODO In validation, you need to compute loss as well as metrics # TODO consider moving to model.before_evaluation() model = self.unwrap_dist_model(self.model) if not skip_reload and cur_epoch == "best": model = self._reload_best_model(model) model.eval() self.task.before_evaluation( model=model, dataset=self.datasets[split_name], ) results = self.task.evaluation(model, data_loader) if results is not None: return self.task.after_evaluation( val_result=results, split_name=split_name, epoch=cur_epoch, ) def unwrap_dist_model(self, model): if self.use_distributed: return model.module else: return model def create_loaders( self, datasets, num_workers, batch_sizes, is_trains, collate_fns, dataset_ratios=None, ): """ Create dataloaders for training and validation. """ def _create_loader(dataset, num_workers, bsz, is_train, collate_fn): # create a single dataloader for each split if isinstance(dataset, ChainDataset) or isinstance( dataset, wds.DataPipeline ): # wds.WebdDataset instance are chained together # webdataset.DataPipeline has its own sampler and collate_fn loader = iter( DataLoader( dataset, batch_size=bsz, num_workers=num_workers, pin_memory=True, ) ) else: # map-style dataset are concatenated together # setup distributed sampler if self.use_distributed: sampler = DistributedSampler( dataset, shuffle=is_train, num_replicas=get_world_size(), rank=get_rank(), ) if not self.use_dist_eval_sampler: # e.g. retrieval evaluation sampler = sampler if is_train else None else: sampler = None loader = DataLoader( dataset, batch_size=bsz, num_workers=num_workers, pin_memory=True, sampler=sampler, shuffle=sampler is None and is_train, collate_fn=collate_fn, drop_last=True if is_train else False, ) loader = PrefetchLoader(loader) if is_train: loader = IterLoader(loader, use_distributed=self.use_distributed) return loader loaders = [] for dataset, bsz, is_train, collate_fn in zip( datasets, batch_sizes, is_trains, collate_fns ): if isinstance(dataset, list) or isinstance(dataset, tuple): if hasattr(dataset[0], 'sample_ratio') and dataset_ratios is None: dataset_ratios = [d.sample_ratio for d in dataset] loader = MultiIterLoader( loaders=[ _create_loader(d, num_workers, bsz, is_train, collate_fn[i]) for i, d in enumerate(dataset) ], ratios=dataset_ratios, ) else: loader = _create_loader(dataset, num_workers, bsz, is_train, collate_fn) loaders.append(loader) return loaders @main_process def _save_checkpoint(self, cur_epoch, is_best=False): """ Save the checkpoint at the current epoch. """ model_no_ddp = self.unwrap_dist_model(self.model) param_grad_dic = { k: v.requires_grad for (k, v) in model_no_ddp.named_parameters() } state_dict = model_no_ddp.state_dict() for k in list(state_dict.keys()): if k in param_grad_dic.keys() and not param_grad_dic[k]: # delete parameters that do not require gradient del state_dict[k] save_obj = { "model": state_dict, "optimizer": self.optimizer.state_dict(), "config": self.config.to_dict(), "scaler": self.scaler.state_dict() if self.scaler else None, "epoch": cur_epoch, } save_to = os.path.join( self.output_dir, "checkpoint_{}.pth".format("best" if is_best else cur_epoch), ) logging.info("Saving checkpoint at epoch {} to {}.".format(cur_epoch, save_to)) torch.save(save_obj, save_to) def _reload_best_model(self, model): """ Load the best checkpoint for evaluation. """ checkpoint_path = os.path.join(self.output_dir, "checkpoint_best.pth") logging.info("Loading checkpoint from {}.".format(checkpoint_path)) checkpoint = torch.load(checkpoint_path, map_location="cpu") try: model.load_state_dict(checkpoint["model"]) except RuntimeError as e: logging.warning( """ Key mismatch when loading checkpoint. This is expected if only part of the model is saved. Trying to load the model with strict=False. """ ) model.load_state_dict(checkpoint["model"], strict=False) return model def _load_checkpoint(self, url_or_filename): """ Resume from a checkpoint. """ if is_url(url_or_filename): cached_file = download_cached_file( url_or_filename, check_hash=False, progress=True ) checkpoint = torch.load(cached_file, map_location=self.device, strict=False) elif os.path.isfile(url_or_filename): checkpoint = torch.load(url_or_filename, map_location=self.device, strict=False) else: raise RuntimeError("checkpoint url or path is invalid") state_dict = checkpoint["model"] self.unwrap_dist_model(self.model).load_state_dict(state_dict) self.optimizer.load_state_dict(checkpoint["optimizer"]) if self.scaler and "scaler" in checkpoint: self.scaler.load_state_dict(checkpoint["scaler"]) self.start_epoch = checkpoint["epoch"] + 1 logging.info("Resume checkpoint from {}".format(url_or_filename)) @main_process def log_stats(self, stats, split_name): if isinstance(stats, dict): log_stats = {**{f"{split_name}_{k}": v for k, v in stats.items()}} with open(os.path.join(self.output_dir, "log.txt"), "a") as f: f.write(json.dumps(log_stats) + "\n") elif isinstance(stats, list): pass @main_process def log_config(self): with open(os.path.join(self.output_dir, "log.txt"), "a") as f: f.write(json.dumps(self.config.to_dict(), indent=4) + "\n")
MovieChat-main
MovieChat/runners/runner_base.py
MovieChat-main
MovieChat/runners/test.py
MovieChat-main
MovieChat/conversation/__init__.py
""" Conversation prompt template of Video-LLaMA. Adapted from: https://github.com/Vision-CAIR/MiniGPT-4/blob/main/minigpt4/conversation/conversation.py """ import argparse import time from PIL import Image import sys sys.path.append('/mnt/workspace/videoGPT/Video-llama/') import os import torch from transformers import AutoTokenizer, AutoModelForCausalLM, LlamaTokenizer from transformers import StoppingCriteria, StoppingCriteriaList import dataclasses from enum import auto, Enum from typing import List, Tuple, Any import os from MovieChat.common.registry import registry from MovieChat.processors.video_processor import ToTHWC,ToUint8,load_video from MovieChat.processors import Blip2ImageEvalProcessor from MovieChat.models.process_video_data import load_and_transform_video_data class SeparatorStyle(Enum): """Different separator style.""" SINGLE = auto() TWO = auto() @dataclasses.dataclass class Conversation: """A class that keeps all conversation history.""" system: str roles: List[str] messages: List[List[str]] offset: int # system_img: List[Image.Image] = [] sep_style: SeparatorStyle = SeparatorStyle.SINGLE sep: str = "###" sep2: str = None skip_next: bool = False conv_id: Any = None def get_prompt(self): if self.sep_style == SeparatorStyle.SINGLE: ret = self.system + self.sep for role, message in self.messages: if message: ret += role + ": " + message + self.sep else: ret += role + ":" return ret elif self.sep_style == SeparatorStyle.TWO: seps = [self.sep, self.sep2] ret = self.system + seps[0] for i, (role, message) in enumerate(self.messages): if message: ret += role + ": " + message + seps[i % 2] else: ret += role + ":" return ret else: raise ValueError(f"Invalid style: {self.sep_style}") def append_message(self, role, message): self.messages.append([role, message]) def to_gradio_chatbot(self): ret = [] for i, (role, msg) in enumerate(self.messages[self.offset:]): if i % 2 == 0: ret.append([msg, None]) else: ret[-1][-1] = msg return ret def copy(self): return Conversation( system=self.system, # system_img=self.system_img, roles=self.roles, messages=[[x, y] for x, y in self.messages], offset=self.offset, sep_style=self.sep_style, sep=self.sep, sep2=self.sep2, conv_id=self.conv_id) def dict(self): return { "system": self.system, # "system_img": self.system_img, "roles": self.roles, "messages": self.messages, "offset": self.offset, "sep": self.sep, "sep2": self.sep2, "conv_id": self.conv_id, } class StoppingCriteriaSub(StoppingCriteria): def __init__(self, stops=[], encounters=1): super().__init__() self.stops = stops def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor): for stop in self.stops: if torch.all((stop == input_ids[0][-len(stop):])).item(): return True return False CONV_VISION = Conversation( system="Give the following image: <Img>ImageContent</Img>. " "You will be able to see the image once I provide it to you. Please answer my questions.", roles=("Human", "Assistant"), messages=[], offset=0, sep_style=SeparatorStyle.SINGLE, sep="###", ) default_conversation = Conversation( system="", roles=("Human", "Assistant"), messages=[], offset=0, sep_style=SeparatorStyle.SINGLE, sep="###", ) class Chat: def __init__(self, model, vis_processor, device='cuda:0'): self.device = device self.model = model self.vis_processor = vis_processor self.image_vis_processor = Blip2ImageEvalProcessor() stop_words_ids = [torch.tensor([835]).to(self.device), torch.tensor([2277, 29937]).to(self.device)] # '###' can be encoded in two different ways. self.stopping_criteria = StoppingCriteriaList([StoppingCriteriaSub(stops=stop_words_ids)]) def ask(self, text, conv): if len(conv.messages) > 0 and conv.messages[-1][0] == conv.roles[0] \ and ('</Video>' in conv.messages[-1][1] or '</Image>' in conv.messages[-1][1]): # last message is image. conv.messages[-1][1] = ' '.join([conv.messages[-1][1], text]) else: conv.append_message(conv.roles[0], text) def answer(self, conv, img_list, max_new_tokens=300, num_beams=1, min_length=1, top_p=0.9, repetition_penalty=1.0, length_penalty=1, temperature=1.0, max_length=2000): # import pdb;pdb.set_trace() conv.append_message(conv.roles[1], None) embs = self.get_context_emb(conv, img_list) # embs = [1,142,4096], img.shape = [1,32,4096] current_max_len = embs.shape[1] + max_new_tokens if current_max_len - max_length > 0: print('Warning: The number of tokens in current conversation exceeds the max length. ' 'The model will not see the contexts outside the range.') begin_idx = max(0, current_max_len - max_length) embs = embs[:, begin_idx:] outputs = self.model.llama_model.generate( inputs_embeds=embs, max_new_tokens=max_new_tokens, stopping_criteria=self.stopping_criteria, num_beams=num_beams, do_sample=True, min_length=min_length,# 1 top_p=top_p, # 0.9 repetition_penalty=repetition_penalty, # 1.0 length_penalty=length_penalty, # 1 temperature=temperature, # 1 ) output_token = outputs[0] if output_token[0] == 0: # the model might output a unknow token <unk> at the beginning. remove it output_token = output_token[1:] if output_token[0] == 1: # some users find that there is a start token <s> at the beginning. remove it output_token = output_token[1:] output_text = self.model.llama_tokenizer.decode(output_token, add_special_tokens=False) output_text = output_text.split('###')[0] # remove the stop sign '###' output_text = output_text.split('Assistant:')[-1].strip() conv.messages[-1][1] = output_text return output_text, output_token.cpu().numpy() def upload_video(self, video_path, conv, img_list): import pdb;pdb.set_trace() msg = "" if isinstance(video_path, str): # is a video path ext = os.path.splitext(video_path)[-1].lower() print(video_path) # image = self.vis_processor(image).unsqueeze(0).to(self.device) video, msg = load_video( video_path=video_path, n_frms=8, height=224, width=224, sampling ="uniform", return_msg = True ) video = self.vis_processor.transform(video) video = video.unsqueeze(0).to(self.device) # print(image) else: raise NotImplementedError image_emb, _ = self.model.encode_videoQformer_visual(video) img_list.append(image_emb) conv.append_message(conv.roles[0], "<Video><ImageHere></Video> "+ msg) return "Received." def upload_video_without_audio(self, video_path, conv, img_list): # import pdb;pdb.set_trace() msg = "" if isinstance(video_path, str): # is a video path ext = os.path.splitext(video_path)[-1].lower() print(video_path) # image = self.vis_processor(image).unsqueeze(0).to(self.device) video, msg = load_video( video_path=video_path, n_frms=16, # here!!!!!,change the time_length, origin:8 height=224, width=224, sampling ="uniform", return_msg = True ) # video.shape [3,8,224,224] video = self.vis_processor.transform(video) # [3,8,224,224] video = video.unsqueeze(0).to(self.device) # print(image) else: raise NotImplementedError # conv.system = "You can understand the video that the user provides. Follow the instructions carefully and explain your answers in detail." image_emb, _ = self.model.encode_videoQformer_visual(video) # [1,32,4096] img_list.append(image_emb) # 1 conv.append_message(conv.roles[0], "<Video><ImageHere></Video> "+ msg) return "Received." def upload_img(self, image, conv, img_list): import pdb;pdb.set_trace() msg = "" if isinstance(image, str): # is a image path raw_image = Image.open(image).convert('RGB') # 增加一个时间维度 image = self.image_vis_processor(raw_image).unsqueeze(0).unsqueeze(2).to(self.device) elif isinstance(image, Image.Image): raw_image = image image = self.image_vis_processor(raw_image).unsqueeze(0).unsqueeze(2).to(self.device) elif isinstance(image, torch.Tensor): if len(image.shape) == 3: image = image.unsqueeze(0) image = image.to(self.device) else: raise NotImplementedError image_emb, _ = self.model.encode_videoQformer_visual(image) img_list.append(image_emb) # Todo msg="" conv.append_message(conv.roles[0], "<Image><ImageHere></Image> "+ msg) return "Received." def get_context_emb(self, conv, img_list): # import pdb;pdb.set_trace() prompt = conv.get_prompt() prompt_segs = prompt.split('<ImageHere>') assert len(prompt_segs) == len(img_list) + 1, "Unmatched numbers of image placeholders and images." seg_tokens = [ self.model.llama_tokenizer( seg, return_tensors="pt", add_special_tokens=i == 0).to(self.device).input_ids # only add bos to the first seg for i, seg in enumerate(prompt_segs) ] seg_embs = [self.model.llama_model.model.embed_tokens(seg_t) for seg_t in seg_tokens] mixed_embs = [emb for pair in zip(seg_embs[:-1], img_list) for emb in pair] + [seg_embs[-1]] mixed_embs = torch.cat(mixed_embs, dim=1) return mixed_embs if __name__ =='__main__': video_path = '/mnt/workspace/videoGPT/Video-LLaMA/examples/applausing.mp4' load_and_transform_video_data([video_path],"cpu", clips_per_video=8)
MovieChat-main
MovieChat/conversation/conversation_video.py
""" Adapted from: https://github.com/Vision-CAIR/MiniGPT-4/blob/main/demo.py """ import argparse import os import random import numpy as np import torch import json import torch.backends.cudnn as cudnn from MovieChat.common.config import Config from MovieChat.common.dist_utils import get_rank from MovieChat.common.registry import registry from MovieChat.conversation.conversation_video import Chat, Conversation, default_conversation,SeparatorStyle import decord import cv2 import time from tqdm import tqdm import subprocess from moviepy.editor import VideoFileClip from decord import VideoReader decord.bridge.set_bridge('torch') # imports modules for registration from MovieChat.datasets.builders import * from MovieChat.models import * from MovieChat.processors import * from MovieChat.runners import * from MovieChat.tasks import * from moviepy.editor import* import random as rnd from transformers import StoppingCriteria, StoppingCriteriaList from PIL import Image import GPUtil MAX_INT = 8 N_SAMPLES = 32 SHORT_MEMORY_Length = 10 def parse_args(): parser = argparse.ArgumentParser(description="Demo") parser.add_argument("--cfg-path", required=True, help="path to configuration file.") parser.add_argument("--gpu-id", type=int, default=0, help="specify the gpu to load the model.") parser.add_argument("--num-beams", type=int, default=1) parser.add_argument("--temperature", type=float, default=1.0) parser.add_argument("--video-path", required=True, help="path to video file.") parser.add_argument("--gt_file", required=True, help="path to gt file.") parser.add_argument('--output_dir', help='Directory to save the model results JSON.', required=True) parser.add_argument('--output_name', help='Name of the file for storing results JSON.', required=True) parser.add_argument("--fragment-video-path", required=True, help="path to video fragment file.") parser.add_argument( "--options", nargs="+", help="override some settings in the used config, the key-value pair " "in xxx=yyy format will be merged into config file (deprecate), " "change to --cfg-options instead.", ) args = parser.parse_args() return args def setup_seeds(config_seed): seed = config_seed + get_rank() random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) cudnn.benchmark = False cudnn.deterministic = True class StoppingCriteriaSub(StoppingCriteria): def __init__(self, stops=[], encounters=1): super().__init__() self.stops = stops def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor): for stop in self.stops: if torch.all((stop == input_ids[0][-len(stop):])).item(): return True return False def video_duration(filename): result = subprocess.run(["ffprobe", "-v", "error", "-show_entries", "format=duration", "-of", "default=noprint_wrappers=1:nokey=1", filename], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) return float(result.stdout) def capture_video(video_path, fragment_video_path, per_video_length, n_stage): start_time = n_stage * per_video_length end_time = (n_stage+1) * per_video_length video =CompositeVideoClip([VideoFileClip(video_path).subclip(start_time,end_time)]) video.write_videofile(fragment_video_path) def load_video(video_path, n_frms=MAX_INT, height=-1, width=-1, sampling="uniform", return_msg = False): decord.bridge.set_bridge("torch") vr = VideoReader(uri=video_path, height=height, width=width) vlen = len(vr) start, end = 0, vlen n_frms = min(n_frms, vlen) if sampling == "uniform": indices = np.arange(start, end, vlen / n_frms).astype(int).tolist() elif sampling == "headtail": indices_h = sorted(rnd.sample(range(vlen // 2), n_frms // 2)) indices_t = sorted(rnd.sample(range(vlen // 2, vlen), n_frms // 2)) indices = indices_h + indices_t else: raise NotImplementedError # get_batch -> T, H, W, C temp_frms = vr.get_batch(indices) tensor_frms = torch.from_numpy(temp_frms) if type(temp_frms) is not torch.Tensor else temp_frms frms = tensor_frms.permute(3, 0, 1, 2).float() # (C, T, H, W) if not return_msg: return frms fps = float(vr.get_avg_fps()) sec = ", ".join([str(round(f / fps, 1)) for f in indices]) # " " should be added in the start and end msg = f"The video contains {len(indices)} frames sampled at {sec} seconds. " return frms, msg def parse_video_fragment(video_path, video_length, n_stage = 0, n_samples = N_SAMPLES): decord.bridge.set_bridge("torch") per_video_length = video_length / n_samples # cut video from per_video_length(n_stage-1, n_stage) capture_video(video_path, fragment_video_path, per_video_length, n_stage) return fragment_video_path class Chat: def __init__(self, model, vis_processor, device='cuda:0'): self.device = device self.model = model self.vis_processor = vis_processor self.image_vis_processor = Blip2ImageEvalProcessor() stop_words_ids = [torch.tensor([835]).to(self.device), torch.tensor([2277, 29937]).to(self.device)] # '###' can be encoded in two different ways. self.stopping_criteria = StoppingCriteriaList([StoppingCriteriaSub(stops=stop_words_ids)]) def get_context_emb(self, input_text, msg, img_list): prompt_1 = "You are able to understand the visual content that the user provides.Follow the instructions carefully and explain your answers in detail.###Human: <Video><ImageHere></Video>" prompt_2 = input_text prompt_3 = "###Assistant:" prompt = prompt_1 + " " + prompt_2 + prompt_3 prompt_segs = prompt.split('<ImageHere>') assert len(prompt_segs) == len(img_list) + 1, "Unmatched numbers of image placeholders and images." seg_tokens = [ self.model.llama_tokenizer( seg, return_tensors="pt", add_special_tokens=i == 0).to(self.device).input_ids # only add bos to the first seg for i, seg in enumerate(prompt_segs) ] seg_embs = [self.model.llama_model.model.embed_tokens(seg_t) for seg_t in seg_tokens] mixed_embs = [emb for pair in zip(seg_embs[:-1], img_list) for emb in pair] + [seg_embs[-1]] mixed_embs = torch.cat(mixed_embs, dim=1) return mixed_embs def answer(self, img_list, input_text, msg, max_new_tokens=300, num_beams=1, min_length=1, top_p=0.9, repetition_penalty=1.0, length_penalty=1, temperature=1.0, max_length=2000): embs = self.get_context_emb(input_text, msg, img_list) current_max_len = embs.shape[1] + max_new_tokens if current_max_len - max_length > 0: print('Warning: The number of tokens in current conversation exceeds the max length. ' 'The model will not see the contexts outside the range.') begin_idx = max(0, current_max_len - max_length) embs = embs[:, begin_idx:] outputs = self.model.llama_model.generate( inputs_embeds=embs, max_new_tokens=max_new_tokens, stopping_criteria=self.stopping_criteria, num_beams=num_beams, do_sample=True, min_length=min_length, top_p=top_p, repetition_penalty=repetition_penalty, length_penalty=length_penalty, temperature=temperature, ) output_token = outputs[0] if output_token[0] == 0: # the model might output a unknow token <unk> at the beginning. remove it output_token = output_token[1:] if output_token[0] == 1: # some users find that there is a start token <s> at the beginning. remove it output_token = output_token[1:] output_text = self.model.llama_tokenizer.decode(output_token, add_special_tokens=False) output_text = output_text.split('###')[0] # remove the stop sign '###' output_text = output_text.split('Assistant:')[-1].strip() return output_text, output_token.cpu().numpy() def cal_frame(self, video_length, cur_min, cur_sec, middle_video): per_frag_second = video_length / N_SAMPLES if middle_video: cur_seconds = cur_min * 60 + cur_sec num_frames = int(cur_seconds / per_frag_second) per_frame_second = per_frag_second/SHORT_MEMORY_Length cur_frame = int((cur_seconds-per_frag_second*num_frames)/per_frame_second) return num_frames, cur_frame else: cur_frame = 0 num_frames = int(video_length / per_frag_second) return num_frames, cur_frame def upload_video_without_audio(self, video_path, fragment_video_path, cur_min, cur_sec, cur_image, img_list, middle_video): msg = "" if isinstance(video_path, str): # is a video path ext = os.path.splitext(video_path)[-1].lower() print(video_path) video_length = video_duration(video_path) num_frames, cur_frame = self.cal_frame(video_length, cur_min, cur_sec, middle_video) if num_frames == 0: video_fragment = parse_video_fragment(video_path=video_path, video_length=video_length, n_stage=0, n_samples= N_SAMPLES) video_fragment, msg = load_video( video_path=fragment_video_path, n_frms=MAX_INT, height=224, width=224, sampling ="uniform", return_msg = True ) video_fragment = self.vis_processor.transform(video_fragment) video_fragment = video_fragment.unsqueeze(0).to(self.device) self.model.encode_short_memory_frame(video_fragment, cur_frame) else: for i in range(num_frames): print(i) video_fragment = parse_video_fragment(video_path=video_path, video_length=video_length, n_stage=i, n_samples= N_SAMPLES) video_fragment, msg = load_video( video_path=fragment_video_path, n_frms=MAX_INT, height=224, width=224, sampling ="uniform", return_msg = True ) video_fragment = self.vis_processor.transform(video_fragment) video_fragment = video_fragment.unsqueeze(0).to(self.device) if middle_video: self.model.encode_short_memory_frame(video_fragment, cur_frame) else: self.model.encode_short_memory_frame(video_fragment) else: raise NotImplementedError video_emb, _ = self.model.encode_long_video(cur_image, middle_video) img_list.append(video_emb) return msg if __name__ =='__main__': # 初始化模型 config_seed = 42 setup_seeds(config_seed) print('Initializing Chat') args = parse_args() cfg = Config(args) model_config = cfg.model_cfg model_config.device_8bit = args.gpu_id model_cls = registry.get_model_class(model_config.arch) model = model_cls.from_config(model_config).to('cuda:{}'.format(args.gpu_id)) vis_processor_cfg = cfg.datasets_cfg.webvid.vis_processor.train vis_processor = registry.get_processor_class(vis_processor_cfg.name).from_config(vis_processor_cfg) chat = Chat(model, vis_processor, device='cuda:{}'.format(args.gpu_id)) print('Initialization Finished') # 读取视频和GT # Load both ground truth file containing questions and answers with open(args.gt_file) as file: gt_qa = json.load(file) output_list = [] if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) prev_video_id = None cur_image = None img_list = None msg = None for sample in tqdm(gt_qa): video_id = sample['video_id'] question = sample['question'] answer = sample['answer'] id = sample['id'] sample_set = {'id': id, 'question': question, 'answer': answer} fragment_video_path = args.fragment_video_path cur_min = 0 cur_sec = 0 middle_video = False if prev_video_id != video_id: chat.model.long_memory_buffer = [] chat.model.short_memory_buffer = [] video_path = os.path.join(args.video_path, f"video{video_id}.mp4") # 加载视频 cap = cv2.VideoCapture(video_path) fps_video = cap.get(cv2.CAP_PROP_FPS) cur_fps = fps_video * (60*cur_min + cur_sec) cap = cv2.VideoCapture(video_path) cap.set(cv2.CAP_PROP_POS_FRAMES, cur_fps) ret, frame = cap.read() temp_frame_path = 'src/output_frame2/snapshot.jpg' cv2.imwrite(temp_frame_path, frame) raw_image = Image.open(temp_frame_path).convert('RGB') image = chat.image_vis_processor(raw_image).unsqueeze(0).unsqueeze(2).to(chat.device) # [1,3,1,224,224] cur_image = chat.model.encode_image(image) img_list = [] msg = chat.upload_video_without_audio( video_path=video_path, fragment_video_path=fragment_video_path, cur_min=cur_min, cur_sec=cur_sec, cur_image = cur_image, img_list=img_list, middle_video = middle_video, ) prev_video_id = video_id text_input = question num_beams = args.num_beams temperature = args.temperature try: llm_message = chat.answer(img_list=img_list, input_text=text_input, msg = msg, num_beams=num_beams, temperature=temperature, max_new_tokens=300, max_length=2000)[0] sample_set['pred'] = llm_message print(llm_message) output_list.append(sample_set) except Exception as e: print(f"Error processing video file '{video_id}': {e}") # Save the output list to a JSON file with open(os.path.join(args.output_dir, f"{args.output_name}.json"), 'w') as file: json.dump(output_list, file)
MovieChat-main
eval_code/run_inference_qa_msrvtt.py
""" Adapted from: https://github.com/Vision-CAIR/MiniGPT-4/blob/main/demo.py """ import argparse import os import random import numpy as np import torch import json import torch.backends.cudnn as cudnn from MovieChat.common.config import Config from MovieChat.common.dist_utils import get_rank from MovieChat.common.registry import registry from MovieChat.conversation.conversation_video import Chat, Conversation, default_conversation,SeparatorStyle import decord import cv2 import time from tqdm import tqdm import subprocess from moviepy.editor import VideoFileClip from decord import VideoReader decord.bridge.set_bridge('torch') # imports modules for registration from MovieChat.datasets.builders import * from MovieChat.models import * from MovieChat.processors import * from MovieChat.runners import * from MovieChat.tasks import * from moviepy.editor import* import random as rnd from transformers import StoppingCriteria, StoppingCriteriaList from PIL import Image import GPUtil MAX_INT = 8 N_SAMPLES = 32 SHORT_MEMORY_Length = 10 def parse_args(): parser = argparse.ArgumentParser(description="Demo") parser.add_argument("--cfg-path", required=True, help="path to configuration file.") parser.add_argument("--gpu-id", type=int, default=0, help="specify the gpu to load the model.") parser.add_argument("--num-beams", type=int, default=1) parser.add_argument("--temperature", type=float, default=1.0) parser.add_argument("--video-path", required=True, help="path to video file.") parser.add_argument("--gt_file", required=True, help="path to gt file.") parser.add_argument('--output_dir', help='Directory to save the model results JSON.', required=True) parser.add_argument('--output_name', help='Name of the file for storing results JSON.', required=True) parser.add_argument("--fragment-video-path", required=True, help="path to video fragment file.") parser.add_argument( "--options", nargs="+", help="override some settings in the used config, the key-value pair " "in xxx=yyy format will be merged into config file (deprecate), " "change to --cfg-options instead.", ) args = parser.parse_args() return args def setup_seeds(config_seed): seed = config_seed + get_rank() random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) cudnn.benchmark = False cudnn.deterministic = True class StoppingCriteriaSub(StoppingCriteria): def __init__(self, stops=[], encounters=1): super().__init__() self.stops = stops def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor): for stop in self.stops: if torch.all((stop == input_ids[0][-len(stop):])).item(): return True return False def video_duration(filename): result = subprocess.run(["ffprobe", "-v", "error", "-show_entries", "format=duration", "-of", "default=noprint_wrappers=1:nokey=1", filename], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) return float(result.stdout) def capture_video(video_path, fragment_video_path, per_video_length, n_stage): start_time = n_stage * per_video_length end_time = (n_stage+1) * per_video_length video =CompositeVideoClip([VideoFileClip(video_path).subclip(start_time,end_time)]) video.write_videofile(fragment_video_path) def load_video(video_path, n_frms=MAX_INT, height=-1, width=-1, sampling="uniform", return_msg = False): decord.bridge.set_bridge("torch") vr = VideoReader(uri=video_path, height=height, width=width) vlen = len(vr) start, end = 0, vlen n_frms = min(n_frms, vlen) if sampling == "uniform": indices = np.arange(start, end, vlen / n_frms).astype(int).tolist() elif sampling == "headtail": indices_h = sorted(rnd.sample(range(vlen // 2), n_frms // 2)) indices_t = sorted(rnd.sample(range(vlen // 2, vlen), n_frms // 2)) indices = indices_h + indices_t else: raise NotImplementedError # get_batch -> T, H, W, C temp_frms = vr.get_batch(indices) tensor_frms = torch.from_numpy(temp_frms) if type(temp_frms) is not torch.Tensor else temp_frms frms = tensor_frms.permute(3, 0, 1, 2).float() # (C, T, H, W) if not return_msg: return frms fps = float(vr.get_avg_fps()) sec = ", ".join([str(round(f / fps, 1)) for f in indices]) # " " should be added in the start and end msg = f"The video contains {len(indices)} frames sampled at {sec} seconds. " return frms, msg def parse_video_fragment(video_path, video_length, n_stage = 0, n_samples = N_SAMPLES): decord.bridge.set_bridge("torch") per_video_length = video_length / n_samples # cut video from per_video_length(n_stage-1, n_stage) capture_video(video_path, fragment_video_path, per_video_length, n_stage) return fragment_video_path class Chat: def __init__(self, model, vis_processor, device='cuda:0'): self.device = device self.model = model self.vis_processor = vis_processor self.image_vis_processor = Blip2ImageEvalProcessor() stop_words_ids = [torch.tensor([835]).to(self.device), torch.tensor([2277, 29937]).to(self.device)] # '###' can be encoded in two different ways. self.stopping_criteria = StoppingCriteriaList([StoppingCriteriaSub(stops=stop_words_ids)]) def get_context_emb(self, input_text, msg, img_list): prompt_1 = "You are able to understand the visual content that the user provides.Follow the instructions carefully and explain your answers in detail.###Human: <Video><ImageHere></Video>" prompt_2 = input_text prompt_3 = "###Assistant:" prompt = prompt_1 + " " + prompt_2 + prompt_3 prompt_segs = prompt.split('<ImageHere>') assert len(prompt_segs) == len(img_list) + 1, "Unmatched numbers of image placeholders and images." seg_tokens = [ self.model.llama_tokenizer( seg, return_tensors="pt", add_special_tokens=i == 0).to(self.device).input_ids # only add bos to the first seg for i, seg in enumerate(prompt_segs) ] seg_embs = [self.model.llama_model.model.embed_tokens(seg_t) for seg_t in seg_tokens] mixed_embs = [emb for pair in zip(seg_embs[:-1], img_list) for emb in pair] + [seg_embs[-1]] mixed_embs = torch.cat(mixed_embs, dim=1) return mixed_embs def answer(self, img_list, input_text, msg, max_new_tokens=300, num_beams=1, min_length=1, top_p=0.9, repetition_penalty=1.0, length_penalty=1, temperature=1.0, max_length=2000): embs = self.get_context_emb(input_text, msg, img_list) current_max_len = embs.shape[1] + max_new_tokens if current_max_len - max_length > 0: print('Warning: The number of tokens in current conversation exceeds the max length. ' 'The model will not see the contexts outside the range.') begin_idx = max(0, current_max_len - max_length) embs = embs[:, begin_idx:] outputs = self.model.llama_model.generate( inputs_embeds=embs, max_new_tokens=max_new_tokens, stopping_criteria=self.stopping_criteria, num_beams=num_beams, do_sample=True, min_length=min_length, top_p=top_p, repetition_penalty=repetition_penalty, length_penalty=length_penalty, temperature=temperature, ) output_token = outputs[0] if output_token[0] == 0: # the model might output a unknow token <unk> at the beginning. remove it output_token = output_token[1:] if output_token[0] == 1: # some users find that there is a start token <s> at the beginning. remove it output_token = output_token[1:] output_text = self.model.llama_tokenizer.decode(output_token, add_special_tokens=False) output_text = output_text.split('###')[0] # remove the stop sign '###' output_text = output_text.split('Assistant:')[-1].strip() return output_text, output_token.cpu().numpy() def cal_frame(self, video_length, cur_min, cur_sec, middle_video): per_frag_second = video_length / N_SAMPLES if middle_video: cur_seconds = cur_min * 60 + cur_sec num_frames = int(cur_seconds / per_frag_second) per_frame_second = per_frag_second/SHORT_MEMORY_Length cur_frame = int((cur_seconds-per_frag_second*num_frames)/per_frame_second) return num_frames, cur_frame else: cur_frame = 0 num_frames = int(video_length / per_frag_second) return num_frames, cur_frame def upload_video_without_audio(self, video_path, fragment_video_path, cur_min, cur_sec, cur_image, img_list, middle_video): msg = "" if isinstance(video_path, str): # is a video path ext = os.path.splitext(video_path)[-1].lower() print(video_path) video_length = video_duration(video_path) num_frames, cur_frame = self.cal_frame(video_length, cur_min, cur_sec, middle_video) if num_frames == 0: video_fragment = parse_video_fragment(video_path=video_path, video_length=video_length, n_stage=0, n_samples= N_SAMPLES) video_fragment, msg = load_video( video_path=fragment_video_path, n_frms=MAX_INT, height=224, width=224, sampling ="uniform", return_msg = True ) video_fragment = self.vis_processor.transform(video_fragment) video_fragment = video_fragment.unsqueeze(0).to(self.device) self.model.encode_short_memory_frame(video_fragment, cur_frame) else: for i in range(num_frames): print(i) video_fragment = parse_video_fragment(video_path=video_path, video_length=video_length, n_stage=i, n_samples= N_SAMPLES) video_fragment, msg = load_video( video_path=fragment_video_path, n_frms=MAX_INT, height=224, width=224, sampling ="uniform", return_msg = True ) video_fragment = self.vis_processor.transform(video_fragment) video_fragment = video_fragment.unsqueeze(0).to(self.device) if middle_video: self.model.encode_short_memory_frame(video_fragment, cur_frame) else: self.model.encode_short_memory_frame(video_fragment) else: raise NotImplementedError video_emb, _ = self.model.encode_long_video(cur_image, middle_video) img_list.append(video_emb) return msg if __name__ =='__main__': # 初始化模型 config_seed = 42 setup_seeds(config_seed) print('Initializing Chat') args = parse_args() cfg = Config(args) model_config = cfg.model_cfg model_config.device_8bit = args.gpu_id model_cls = registry.get_model_class(model_config.arch) model = model_cls.from_config(model_config).to('cuda:{}'.format(args.gpu_id)) vis_processor_cfg = cfg.datasets_cfg.webvid.vis_processor.train vis_processor = registry.get_processor_class(vis_processor_cfg.name).from_config(vis_processor_cfg) chat = Chat(model, vis_processor, device='cuda:{}'.format(args.gpu_id)) print('Initialization Finished') # 读取视频和GT # Load both ground truth file containing questions and answers with open(args.gt_file) as file: gt_qa = json.load(file) output_list = [] if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) prev_video_id = None cur_image = None img_list = None msg = None for sample in tqdm(gt_qa): video_id = sample['video_id'] question = sample['question'] answer = sample['answer'] id = sample['id'] sample_set = {'id': id, 'question': question, 'answer': answer} fragment_video_path = args.fragment_video_path cur_min = 0 cur_sec = 0 middle_video = False if prev_video_id != video_id: chat.model.long_memory_buffer = [] chat.model.short_memory_buffer = [] video_path = os.path.join(args.video_path, f"{video_id}.avi") # 加载视频 cap = cv2.VideoCapture(video_path) fps_video = cap.get(cv2.CAP_PROP_FPS) cur_fps = fps_video * (60*cur_min + cur_sec) cap = cv2.VideoCapture(video_path) cap.set(cv2.CAP_PROP_POS_FRAMES, cur_fps) ret, frame = cap.read() temp_frame_path = 'src/output_frame/snapshot.jpg' cv2.imwrite(temp_frame_path, frame) raw_image = Image.open(temp_frame_path).convert('RGB') image = chat.image_vis_processor(raw_image).unsqueeze(0).unsqueeze(2).to(chat.device) # [1,3,1,224,224] cur_image = chat.model.encode_image(image) img_list = [] msg = chat.upload_video_without_audio( video_path=video_path, fragment_video_path=fragment_video_path, cur_min=cur_min, cur_sec=cur_sec, cur_image = cur_image, img_list=img_list, middle_video = middle_video, ) prev_video_id = video_id text_input = question num_beams = args.num_beams temperature = args.temperature try: llm_message = chat.answer(img_list=img_list, input_text=text_input, msg = msg, num_beams=num_beams, temperature=temperature, max_new_tokens=300, max_length=2000)[0] sample_set['pred'] = llm_message print(llm_message) output_list.append(sample_set) except Exception as e: print(f"Error processing video file '{video_id}': {e}") # Save the output list to a JSON file with open(os.path.join(args.output_dir, f"{args.output_name}.json"), 'w') as file: json.dump(output_list, file)
MovieChat-main
eval_code/run_inference_qa_msvd.py
""" Adapted from: https://github.com/mbzuai-oryx/Video-ChatGPT/blob/main/quantitative_evaluation/evaluate_activitynet_qa.py """ import openai import os import argparse import json import ast from multiprocessing.pool import Pool def parse_args(): parser = argparse.ArgumentParser(description="question-answer-generation-using-gpt-3") parser.add_argument("--pred_path", required=True, help="The path to file containing prediction.") parser.add_argument("--output_dir", required=True, help="The path to save annotation json files.") parser.add_argument("--output_json", required=True, help="The path to save annotation final combined json file.") parser.add_argument("--api_key", required=True, help="OpenAI API key.") parser.add_argument("--num_tasks", required=True, type=int, help="Number of splits.") args = parser.parse_args() return args def annotate(prediction_set, caption_files, output_dir): """ Evaluates question and answer pairs using GPT-3 Returns a score for correctness. """ for file in caption_files: key = file[:-5] # Strip file extension qa_set = prediction_set[key] question = qa_set['q'] answer = qa_set['a'] pred = qa_set['pred'] try: # Compute the correctness score completion = openai.ChatCompletion.create( model="gpt-3.5-turbo", messages=[ { "role": "system", "content": "You are an intelligent chatbot designed for evaluating the correctness of generative outputs for question-answer pairs. " "Your task is to compare the predicted answer with the correct answer and determine if they match meaningfully. Here's how you can accomplish the task:" "------" "##INSTRUCTIONS: " "- Focus on the meaningful match between the predicted answer and the correct answer.\n" "- Consider synonyms or paraphrases as valid matches.\n" "- Evaluate the correctness of the prediction compared to the answer." }, { "role": "user", "content": "Please evaluate the following video-based question-answer pair:\n\n" f"Question: {question}\n" f"Correct Answer: {answer}\n" f"Predicted Answer: {pred}\n\n" "Provide your evaluation only as a yes/no and score where the score is an integer value between 0 and 5, with 5 indicating the highest meaningful match. " "Please generate the response in the form of a Python dictionary string with keys 'pred' and 'score', where value of 'pred' is a string of 'yes' or 'no' and value of 'score' is in INTEGER, not STRING." "DO NOT PROVIDE ANY OTHER OUTPUT TEXT OR EXPLANATION. Only provide the Python dictionary string. " "For example, your response should look like this: {'pred': 'yes', 'score': 4.8}." } ] ) # Convert response to a Python dictionary. response_message = completion["choices"][0]["message"]["content"] response_dict = ast.literal_eval(response_message) result_qa_pair = [response_dict, qa_set] # Save the question-answer pairs to a json file. with open(f"{output_dir}/{key}.json", "w") as f: json.dump(result_qa_pair, f) except Exception as e: print(f"Error processing file '{key}': {e}") def main(): """ Main function to control the flow of the program. """ # Parse arguments. args = parse_args() file = open(args.pred_path) pred_contents = json.load(file) # Dictionary to store the count of occurrences for each video_id video_id_counts = {} new_pred_contents = [] # Iterate through each sample in pred_contents for sample in pred_contents: video_id = sample['id'] if video_id in video_id_counts: video_id_counts[video_id] += 1 else: video_id_counts[video_id] = 0 # Create a new sample with the modified key new_sample = sample new_sample['id'] = f"{video_id}_{video_id_counts[video_id]}" new_pred_contents.append(new_sample) # Generating list of id's and corresponding files id_list = [x['id'] for x in new_pred_contents] caption_files = [f"{id}.json" for id in id_list] # import pdb; pdb.set_trace() output_dir = args.output_dir # Generate output directory if not exists. if not os.path.exists(output_dir): os.makedirs(output_dir) # Preparing dictionary of question-answer sets prediction_set = {} for sample in new_pred_contents: # import pdb; pdb.set_trace() id = sample['id'] question = sample['question'] answer = sample['answer'] pred = sample['pred'] qa_set = {"q": question, "a": answer, "pred": pred} prediction_set[id] = qa_set # Set the OpenAI API key. openai.api_key = args.api_key openai.api_base = "https://api.aiproxy.io/v1" num_tasks = args.num_tasks # import pdb; pdb.set_trace() # While loop to ensure that all captions are processed. while True: try: # Files that have not been processed yet. completed_files = os.listdir(output_dir) print(f"completed_files: {len(completed_files)}") # Files that have not been processed yet. incomplete_files = [f for f in caption_files if f not in completed_files] print(f"incomplete_files: {len(incomplete_files)}") # Break the loop when there are no incomplete files if len(incomplete_files) == 0: break if len(incomplete_files) <= num_tasks: num_tasks = 1 # Split tasks into parts. part_len = len(incomplete_files) // num_tasks all_parts = [incomplete_files[i:i + part_len] for i in range(0, len(incomplete_files), part_len)] task_args = [(prediction_set, part, args.output_dir) for part in all_parts] # Use a pool of workers to process the files in parallel. with Pool() as pool: pool.starmap(annotate, task_args) except Exception as e: print(f"Error: {e}") # Combine all the processed files into one combined_contents = {} json_path = args.output_json # Iterate through json files for file_name in os.listdir(output_dir): if file_name.endswith(".json"): file_path = os.path.join(output_dir, file_name) with open(file_path, "r") as json_file: content = json.load(json_file) combined_contents[file_name[:-5]] = content # Write combined content to a json file with open(json_path, "w") as json_file: json.dump(combined_contents, json_file) print("All evaluation completed!") # Calculate average score and accuracy score_sum = 0 count = 0 yes_count = 0 no_count = 0 for key, result in combined_contents.items(): # Computing score count += 1 score_match = result[0]['score'] score = int(score_match) score_sum += score # Computing accuracy pred = result[0]['pred'] if "yes" in pred.lower(): yes_count += 1 elif "no" in pred.lower(): no_count += 1 average_score = score_sum / count accuracy = yes_count / (yes_count + no_count) print("Yes count:", yes_count) print("No count:", no_count) print("Accuracy:", accuracy) print("Average score:", average_score) if __name__ == "__main__": main()
MovieChat-main
eval_code/run_eval_qa.py
""" Adapted from: https://github.com/Vision-CAIR/MiniGPT-4/blob/main/demo.py """ import argparse import os import random import numpy as np import torch import json import torch.backends.cudnn as cudnn from MovieChat.common.config import Config from MovieChat.common.dist_utils import get_rank from MovieChat.common.registry import registry from MovieChat.conversation.conversation_video import Chat, Conversation, default_conversation,SeparatorStyle import decord import cv2 import time from tqdm import tqdm import subprocess from moviepy.editor import VideoFileClip from decord import VideoReader decord.bridge.set_bridge('torch') # imports modules for registration from MovieChat.datasets.builders import * from MovieChat.models import * from MovieChat.processors import * from MovieChat.runners import * from MovieChat.tasks import * from moviepy.editor import* import random as rnd from transformers import StoppingCriteria, StoppingCriteriaList from PIL import Image import GPUtil MAX_INT = 8 N_SAMPLES = 32 SHORT_MEMORY_Length = 10 def parse_args(): parser = argparse.ArgumentParser(description="Demo") parser.add_argument("--cfg-path", required=True, help="path to configuration file.") parser.add_argument("--gpu-id", type=int, default=0, help="specify the gpu to load the model.") parser.add_argument("--num-beams", type=int, default=1) parser.add_argument("--temperature", type=float, default=1.0) parser.add_argument("--video-path", required=True, help="path to video file.") parser.add_argument("--gt_file", required=True, help="path to gt file.") parser.add_argument("--gt_file_answers", required=True, help="path to gt file.") parser.add_argument('--output_dir', help='Directory to save the model results JSON.', required=True) parser.add_argument('--output_name', help='Name of the file for storing results JSON.', required=True) parser.add_argument("--fragment-video-path", required=True, help="path to video fragment file.") parser.add_argument( "--options", nargs="+", help="override some settings in the used config, the key-value pair " "in xxx=yyy format will be merged into config file (deprecate), " "change to --cfg-options instead.", ) args = parser.parse_args() return args def setup_seeds(config_seed): seed = config_seed + get_rank() random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) cudnn.benchmark = False cudnn.deterministic = True class StoppingCriteriaSub(StoppingCriteria): def __init__(self, stops=[], encounters=1): super().__init__() self.stops = stops def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor): for stop in self.stops: if torch.all((stop == input_ids[0][-len(stop):])).item(): return True return False def video_duration(filename): result = subprocess.run(["ffprobe", "-v", "error", "-show_entries", "format=duration", "-of", "default=noprint_wrappers=1:nokey=1", filename], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) return float(result.stdout) def capture_video(video_path, fragment_video_path, per_video_length, n_stage): start_time = n_stage * per_video_length end_time = (n_stage+1) * per_video_length video =CompositeVideoClip([VideoFileClip(video_path).subclip(start_time,end_time)]) video.write_videofile(fragment_video_path) def load_video(video_path, n_frms=MAX_INT, height=-1, width=-1, sampling="uniform", return_msg = False): decord.bridge.set_bridge("torch") vr = VideoReader(uri=video_path, height=height, width=width) vlen = len(vr) start, end = 0, vlen n_frms = min(n_frms, vlen) if sampling == "uniform": indices = np.arange(start, end, vlen / n_frms).astype(int).tolist() elif sampling == "headtail": indices_h = sorted(rnd.sample(range(vlen // 2), n_frms // 2)) indices_t = sorted(rnd.sample(range(vlen // 2, vlen), n_frms // 2)) indices = indices_h + indices_t else: raise NotImplementedError # get_batch -> T, H, W, C temp_frms = vr.get_batch(indices) tensor_frms = torch.from_numpy(temp_frms) if type(temp_frms) is not torch.Tensor else temp_frms frms = tensor_frms.permute(3, 0, 1, 2).float() # (C, T, H, W) if not return_msg: return frms fps = float(vr.get_avg_fps()) sec = ", ".join([str(round(f / fps, 1)) for f in indices]) # " " should be added in the start and end msg = f"The video contains {len(indices)} frames sampled at {sec} seconds. " return frms, msg def parse_video_fragment(video_path, video_length, n_stage = 0, n_samples = N_SAMPLES): decord.bridge.set_bridge("torch") per_video_length = video_length / n_samples # cut video from per_video_length(n_stage-1, n_stage) capture_video(video_path, fragment_video_path, per_video_length, n_stage) return fragment_video_path class Chat: def __init__(self, model, vis_processor, device='cuda:0'): self.device = device self.model = model self.vis_processor = vis_processor self.image_vis_processor = Blip2ImageEvalProcessor() stop_words_ids = [torch.tensor([835]).to(self.device), torch.tensor([2277, 29937]).to(self.device)] # '###' can be encoded in two different ways. self.stopping_criteria = StoppingCriteriaList([StoppingCriteriaSub(stops=stop_words_ids)]) def get_context_emb(self, input_text, msg, img_list): prompt_1 = "You are able to understand the visual content that the user provides.Follow the instructions carefully and explain your answers in detail.###Human: <Video><ImageHere></Video>" prompt_2 = input_text prompt_3 = "###Assistant:" prompt = prompt_1 + " " + prompt_2 + prompt_3 prompt_segs = prompt.split('<ImageHere>') assert len(prompt_segs) == len(img_list) + 1, "Unmatched numbers of image placeholders and images." seg_tokens = [ self.model.llama_tokenizer( seg, return_tensors="pt", add_special_tokens=i == 0).to(self.device).input_ids # only add bos to the first seg for i, seg in enumerate(prompt_segs) ] seg_embs = [self.model.llama_model.model.embed_tokens(seg_t) for seg_t in seg_tokens] mixed_embs = [emb for pair in zip(seg_embs[:-1], img_list) for emb in pair] + [seg_embs[-1]] mixed_embs = torch.cat(mixed_embs, dim=1) return mixed_embs def answer(self, img_list, input_text, msg, max_new_tokens=300, num_beams=1, min_length=1, top_p=0.9, repetition_penalty=1.0, length_penalty=1, temperature=1.0, max_length=2000): embs = self.get_context_emb(input_text, msg, img_list) current_max_len = embs.shape[1] + max_new_tokens if current_max_len - max_length > 0: print('Warning: The number of tokens in current conversation exceeds the max length. ' 'The model will not see the contexts outside the range.') begin_idx = max(0, current_max_len - max_length) embs = embs[:, begin_idx:] outputs = self.model.llama_model.generate( inputs_embeds=embs, max_new_tokens=max_new_tokens, stopping_criteria=self.stopping_criteria, num_beams=num_beams, do_sample=True, min_length=min_length, top_p=top_p, repetition_penalty=repetition_penalty, length_penalty=length_penalty, temperature=temperature, ) output_token = outputs[0] if output_token[0] == 0: # the model might output a unknow token <unk> at the beginning. remove it output_token = output_token[1:] if output_token[0] == 1: # some users find that there is a start token <s> at the beginning. remove it output_token = output_token[1:] output_text = self.model.llama_tokenizer.decode(output_token, add_special_tokens=False) output_text = output_text.split('###')[0] # remove the stop sign '###' output_text = output_text.split('Assistant:')[-1].strip() return output_text, output_token.cpu().numpy() def cal_frame(self, video_length, cur_min, cur_sec, middle_video): per_frag_second = video_length / N_SAMPLES if middle_video: cur_seconds = cur_min * 60 + cur_sec num_frames = int(cur_seconds / per_frag_second) per_frame_second = per_frag_second/SHORT_MEMORY_Length cur_frame = int((cur_seconds-per_frag_second*num_frames)/per_frame_second) return num_frames, cur_frame else: cur_frame = 0 num_frames = int(video_length / per_frag_second) return num_frames, cur_frame def upload_video_without_audio(self, video_path, fragment_video_path, cur_min, cur_sec, cur_image, img_list, middle_video): msg = "" if isinstance(video_path, str): # is a video path ext = os.path.splitext(video_path)[-1].lower() print(video_path) video_length = video_duration(video_path) num_frames, cur_frame = self.cal_frame(video_length, cur_min, cur_sec, middle_video) if num_frames == 0: video_fragment = parse_video_fragment(video_path=video_path, video_length=video_length, n_stage=0, n_samples= N_SAMPLES) video_fragment, msg = load_video( video_path=fragment_video_path, n_frms=MAX_INT, height=224, width=224, sampling ="uniform", return_msg = True ) video_fragment = self.vis_processor.transform(video_fragment) video_fragment = video_fragment.unsqueeze(0).to(self.device) self.model.encode_short_memory_frame(video_fragment, cur_frame) else: for i in range(num_frames): print(i) video_fragment = parse_video_fragment(video_path=video_path, video_length=video_length, n_stage=i, n_samples= N_SAMPLES) video_fragment, msg = load_video( video_path=fragment_video_path, n_frms=MAX_INT, height=224, width=224, sampling ="uniform", return_msg = True ) video_fragment = self.vis_processor.transform(video_fragment) video_fragment = video_fragment.unsqueeze(0).to(self.device) if middle_video: self.model.encode_short_memory_frame(video_fragment, cur_frame) else: self.model.encode_short_memory_frame(video_fragment) else: raise NotImplementedError video_emb, _ = self.model.encode_long_video(cur_image, middle_video) img_list.append(video_emb) return msg if __name__ =='__main__': # 初始化模型 config_seed = 42 setup_seeds(config_seed) print('Initializing Chat') args = parse_args() cfg = Config(args) model_config = cfg.model_cfg model_config.device_8bit = args.gpu_id model_cls = registry.get_model_class(model_config.arch) model = model_cls.from_config(model_config).to('cuda:{}'.format(args.gpu_id)) vis_processor_cfg = cfg.datasets_cfg.webvid.vis_processor.train vis_processor = registry.get_processor_class(vis_processor_cfg.name).from_config(vis_processor_cfg) chat = Chat(model, vis_processor, device='cuda:{}'.format(args.gpu_id)) print('Initialization Finished') # 读取视频和GT # Load both ground truth file containing questions and answers with open(args.gt_file) as file: gt_qa = json.load(file) with open(args.gt_file_answers) as file2: gt_answers = json.load(file2) output_list = [] if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) index = 0 for sample in tqdm(gt_qa): chat.model.long_memory_buffer = [] chat.model.short_memory_buffer = [] video_id = sample['video_name'] question = sample['question'] answer = gt_answers[index]['answer'] index = index + 1 # answer = sample['answer'] id = sample['question_id'] sample_set = {'id': id, 'question': question, 'answer': answer} video_path = os.path.join(args.video_path, f"{video_id}.mp4") fragment_video_path = args.fragment_video_path cur_min = 0 cur_sec = 0 middle_video = False # 加载视频 cap = cv2.VideoCapture(video_path) fps_video = cap.get(cv2.CAP_PROP_FPS) cur_fps = fps_video * (60*cur_min + cur_sec) cap = cv2.VideoCapture(video_path) cap.set(cv2.CAP_PROP_POS_FRAMES, cur_fps) ret, frame = cap.read() temp_frame_path = 'src/output_frame/snapshot.jpg' cv2.imwrite(temp_frame_path, frame) raw_image = Image.open(temp_frame_path).convert('RGB') image = chat.image_vis_processor(raw_image).unsqueeze(0).unsqueeze(2).to(chat.device) # [1,3,1,224,224] cur_image = chat.model.encode_image(image) img_list = [] msg = chat.upload_video_without_audio( video_path=video_path, fragment_video_path=fragment_video_path, cur_min=cur_min, cur_sec=cur_sec, cur_image = cur_image, img_list=img_list, middle_video = middle_video, ) text_input = question num_beams = args.num_beams temperature = args.temperature try: llm_message = chat.answer(img_list=img_list, input_text=text_input, msg = msg, num_beams=num_beams, temperature=temperature, max_new_tokens=300, max_length=2000)[0] sample_set['pred'] = llm_message print(llm_message) output_list.append(sample_set) except Exception as e: print(f"Error processing video file '{video_id}': {e}") # Save the output list to a JSON file with open(os.path.join(args.output_dir, f"{args.output_name}.json"), 'w') as file: json.dump(output_list, file)
MovieChat-main
eval_code/run_inference_qa_activitynet.py
import argparse import os import random import numpy as np import torch import torch.backends.cudnn as cudnn from MovieChat.common.config import Config from MovieChat.common.dist_utils import get_rank from MovieChat.common.registry import registry from MovieChat.conversation.conversation_video import Chat, Conversation, default_conversation,SeparatorStyle import decord import cv2 import time import subprocess from moviepy.editor import VideoFileClip from decord import VideoReader import gradio as gr import pandas as pd import plotly.express as px from helpers import * decord.bridge.set_bridge('torch') from MovieChat.datasets.builders import * from MovieChat.models import * from MovieChat.processors import * from MovieChat.runners import * from MovieChat.tasks import * from moviepy.editor import* from inference import * import random as rnd from transformers import StoppingCriteria, StoppingCriteriaList from PIL import Image import GPUtil MAX_INT = 8 N_SAMPLES = 32 SHORT_MEMORY_Length = 10 def parse_args(): parser = argparse.ArgumentParser(description="Demo") parser.add_argument("--cfg-path", required=True, help="path to configuration file.") parser.add_argument("--gpu-id", type=int, default=0, help="specify the gpu to load the model.") parser.add_argument("--num-beams", type=int, default=1) parser.add_argument("--temperature", type=float, default=1.0) parser.add_argument( "--options", nargs="+", help="override some settings in the used config, the key-value pair " "in xxx=yyy format will be merged into config file (deprecate), " "change to --cfg-options instead.", ) args = parser.parse_args() return args def setup_seeds(config_seed): seed = config_seed + get_rank() random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) cudnn.benchmark = False cudnn.deterministic = True class StoppingCriteriaSub(StoppingCriteria): def __init__(self, stops=[], encounters=1): super().__init__() self.stops = stops def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor): for stop in self.stops: if torch.all((stop == input_ids[0][-len(stop):])).item(): return True return False def show_video(video): print(video) # ======================================== # Gradio Setting # ======================================== LIBRARIES = ["Breakpoint mode", "Global mode"] title = """ <h1 align="center"><a href="https://rese1f.github.io/MovieChat"></a> </h1> <h1 align="center">MovieChat: From Dense Token to Sparse Memory in Long Video Understanding</h1> <h5 align="center"> Introduction:MovieChat, a novel framework that integrating vision models and LLMs, is the first to support long video understanding . </h5> Thank you for using the MovieChat Demo Page! If you have any questions or feedback, feel free to contact us. If you find MovieChat interesting, please give us a star on GitHub. Current online demo uses the 7B version of MovieChat due to resource limitations. Please note that after clicking the chat button, you will need to view the result in the terminal window. """ case_note_upload = (""" ### We provide some examples at the bottom of the page. Simply click on them to try them out directly. """) #TODO show examples below with gr.Blocks() as demo: gr.Markdown(title) with gr.Column(scale=0.5): video = gr.Video() gr.Markdown(case_note_upload) with gr.Column(): upload_button = gr.Button(value="Upload", interactive=True, variant="primary") chat_state = gr.State() img_list = gr.State() text_input = gr.Textbox(label='User', placeholder='Upload your image/video first, or directly click the examples at the bottom of the page.', interactive=True) gr.Markdown("## Select inference mode") libraries = gr.CheckboxGroup(choices=LIBRARIES, label="") with gr.Column(scale=0.5): with gr.Row(): minute = gr.Slider( minimum=0, maximum=20, value=1, step=1, interactive=True, label="minutes of breakpoint)", ) second = gr.Slider( minimum=0, maximum=60, value=1, step=1, interactive=True, label="seconds of breakpoint)", ) with gr.Row(): num_beams = gr.Slider( minimum=1, maximum=10, value=1, step=1, interactive=True, label="beam search numbers)", ) temperature = gr.Slider( minimum=0.1, maximum=2.0, value=1.0, step=0.1, interactive=True, label="Temperature", ) with gr.Column(): upload_text = gr.Button("Chat now") with gr.Column(): gr.Examples(examples=[ [f"src/examples/Cooking_cake.mp4", "What is going on in the kitchen? "], [f"src/examples/goblin.mp4", "Can you describe the movie?"], ], inputs=[video, text_input]) upload_button.click(show_video, [video]) config_seed = 42 setup_seeds(config_seed) print('Initializing Chat') args = parse_args() cfg = Config(args) model_config = cfg.model_cfg model_config.device_8bit = args.gpu_id model_cls = registry.get_model_class(model_config.arch) model = model_cls.from_config(model_config).to('cuda:{}'.format(args.gpu_id)) vis_processor_cfg = cfg.datasets_cfg.webvid.vis_processor.train vis_processor = registry.get_processor_class(vis_processor_cfg.name).from_config(vis_processor_cfg) chat = Chat(model, vis_processor, device='cuda:{}'.format(args.gpu_id)) print('Initialization Finished') upload_text.click(chat.gener_infer,[video, text_input, num_beams, temperature, libraries, minute, second]) demo.launch(share=False, enable_queue=True)
MovieChat-main
Gradio_demo/app_gradio.py
MovieChat-main
src/examples/__init__.py
""" Author: Joon Sung Park ([email protected]) File: compress_sim_storage.py Description: Compresses a simulation for replay demos. """ import shutil import json from global_methods import * def compress(sim_code): sim_storage = f"../environment/frontend_server/storage/{sim_code}" compressed_storage = f"../environment/frontend_server/compressed_storage/{sim_code}" persona_folder = sim_storage + "/personas" move_folder = sim_storage + "/movement" meta_file = sim_storage + "/reverie/meta.json" persona_names = [] for i in find_filenames(persona_folder, ""): x = i.split("/")[-1].strip() if x[0] != ".": persona_names += [x] max_move_count = max([int(i.split("/")[-1].split(".")[0]) for i in find_filenames(move_folder, "json")]) persona_last_move = dict() master_move = dict() for i in range(max_move_count+1): master_move[i] = dict() with open(f"{move_folder}/{str(i)}.json") as json_file: i_move_dict = json.load(json_file)["persona"] for p in persona_names: move = False if i == 0: move = True elif (i_move_dict[p]["movement"] != persona_last_move[p]["movement"] or i_move_dict[p]["pronunciatio"] != persona_last_move[p]["pronunciatio"] or i_move_dict[p]["description"] != persona_last_move[p]["description"] or i_move_dict[p]["chat"] != persona_last_move[p]["chat"]): move = True if move: persona_last_move[p] = {"movement": i_move_dict[p]["movement"], "pronunciatio": i_move_dict[p]["pronunciatio"], "description": i_move_dict[p]["description"], "chat": i_move_dict[p]["chat"]} master_move[i][p] = {"movement": i_move_dict[p]["movement"], "pronunciatio": i_move_dict[p]["pronunciatio"], "description": i_move_dict[p]["description"], "chat": i_move_dict[p]["chat"]} create_folder_if_not_there(compressed_storage) with open(f"{compressed_storage}/master_movement.json", "w") as outfile: outfile.write(json.dumps(master_move, indent=2)) shutil.copyfile(meta_file, f"{compressed_storage}/meta.json") shutil.copytree(persona_folder, f"{compressed_storage}/personas/") if __name__ == '__main__': compress("July1_the_ville_isabella_maria_klaus-step-3-9")
generative_agents-main
reverie/compress_sim_storage.py
""" Author: Joon Sung Park ([email protected]) File: global_methods.py Description: Contains functions used throughout my projects. """ import random import string import csv import time import datetime as dt import pathlib import os import sys import numpy import math import shutil, errno from os import listdir def create_folder_if_not_there(curr_path): """ Checks if a folder in the curr_path exists. If it does not exist, creates the folder. Note that if the curr_path designates a file location, it will operate on the folder that contains the file. But the function also works even if the path designates to just a folder. Args: curr_list: list to write. The list comes in the following form: [['key1', 'val1-1', 'val1-2'...], ['key2', 'val2-1', 'val2-2'...],] outfile: name of the csv file to write RETURNS: True: if a new folder is created False: if a new folder is not created """ outfolder_name = curr_path.split("/") if len(outfolder_name) != 1: # This checks if the curr path is a file or a folder. if "." in outfolder_name[-1]: outfolder_name = outfolder_name[:-1] outfolder_name = "/".join(outfolder_name) if not os.path.exists(outfolder_name): os.makedirs(outfolder_name) return True return False def write_list_of_list_to_csv(curr_list_of_list, outfile): """ Writes a list of list to csv. Unlike write_list_to_csv_line, it writes the entire csv in one shot. ARGS: curr_list_of_list: list to write. The list comes in the following form: [['key1', 'val1-1', 'val1-2'...], ['key2', 'val2-1', 'val2-2'...],] outfile: name of the csv file to write RETURNS: None """ create_folder_if_not_there(outfile) with open(outfile, "w") as f: writer = csv.writer(f) writer.writerows(curr_list_of_list) def write_list_to_csv_line(line_list, outfile): """ Writes one line to a csv file. Unlike write_list_of_list_to_csv, this opens an existing outfile and then appends a line to that file. This also works if the file does not exist already. ARGS: curr_list: list to write. The list comes in the following form: ['key1', 'val1-1', 'val1-2'...] Importantly, this is NOT a list of list. outfile: name of the csv file to write RETURNS: None """ create_folder_if_not_there(outfile) # Opening the file first so we can write incrementally as we progress curr_file = open(outfile, 'a',) csvfile_1 = csv.writer(curr_file) csvfile_1.writerow(line_list) curr_file.close() def read_file_to_list(curr_file, header=False, strip_trail=True): """ Reads in a csv file to a list of list. If header is True, it returns a tuple with (header row, all rows) ARGS: curr_file: path to the current csv file. RETURNS: List of list where the component lists are the rows of the file. """ if not header: analysis_list = [] with open(curr_file) as f_analysis_file: data_reader = csv.reader(f_analysis_file, delimiter=",") for count, row in enumerate(data_reader): if strip_trail: row = [i.strip() for i in row] analysis_list += [row] return analysis_list else: analysis_list = [] with open(curr_file) as f_analysis_file: data_reader = csv.reader(f_analysis_file, delimiter=",") for count, row in enumerate(data_reader): if strip_trail: row = [i.strip() for i in row] analysis_list += [row] return analysis_list[0], analysis_list[1:] def read_file_to_set(curr_file, col=0): """ Reads in a "single column" of a csv file to a set. ARGS: curr_file: path to the current csv file. RETURNS: Set with all items in a single column of a csv file. """ analysis_set = set() with open(curr_file) as f_analysis_file: data_reader = csv.reader(f_analysis_file, delimiter=",") for count, row in enumerate(data_reader): analysis_set.add(row[col]) return analysis_set def get_row_len(curr_file): """ Get the number of rows in a csv file ARGS: curr_file: path to the current csv file. RETURNS: The number of rows False if the file does not exist """ try: analysis_set = set() with open(curr_file) as f_analysis_file: data_reader = csv.reader(f_analysis_file, delimiter=",") for count, row in enumerate(data_reader): analysis_set.add(row[0]) return len(analysis_set) except: return False def check_if_file_exists(curr_file): """ Checks if a file exists ARGS: curr_file: path to the current csv file. RETURNS: True if the file exists False if the file does not exist """ try: with open(curr_file) as f_analysis_file: pass return True except: return False def find_filenames(path_to_dir, suffix=".csv"): """ Given a directory, find all files that ends with the provided suffix and returns their paths. ARGS: path_to_dir: Path to the current directory suffix: The target suffix. RETURNS: A list of paths to all files in the directory. """ filenames = listdir(path_to_dir) return [ path_to_dir+"/"+filename for filename in filenames if filename.endswith( suffix ) ] def average(list_of_val): """ Finds the average of the numbers in a list. ARGS: list_of_val: a list of numeric values RETURNS: The average of the values """ return sum(list_of_val)/float(len(list_of_val)) def std(list_of_val): """ Finds the std of the numbers in a list. ARGS: list_of_val: a list of numeric values RETURNS: The std of the values """ std = numpy.std(list_of_val) return std def copyanything(src, dst): """ Copy over everything in the src folder to dst folder. ARGS: src: address of the source folder dst: address of the destination folder RETURNS: None """ try: shutil.copytree(src, dst) except OSError as exc: # python >2.5 if exc.errno in (errno.ENOTDIR, errno.EINVAL): shutil.copy(src, dst) else: raise if __name__ == '__main__': pass
generative_agents-main
reverie/global_methods.py
""" Author: Joon Sung Park ([email protected]) File: path_finder.py Description: Implements various path finding functions for generative agents. Some of the functions are defunct. """ import numpy as np def print_maze(maze): for row in maze: for item in row: print(item, end='') print() def path_finder_v1(maze, start, end, collision_block_char, verbose=False): def prepare_maze(maze, start, end): maze[start[0]][start[1]] = "S" maze[end[0]][end[1]] = "E" return maze def find_start(maze): for row in range(len(maze)): for col in range(len(maze[0])): if maze[row][col] == 'S': return row, col def is_valid_position(maze, pos_r, pos_c): if pos_r < 0 or pos_c < 0: return False if pos_r >= len(maze) or pos_c >= len(maze[0]): return False if maze[pos_r][pos_c] in ' E': return True return False def solve_maze(maze, start, verbose=False): path = [] # We use a Python list as a stack - then we have push operations as # append, and pop as pop. stack = [] # Add the entry point (as a tuple) stack.append(start) # Go through the stack as long as there are elements while len(stack) > 0: pos_r, pos_c = stack.pop() if verbose: print("Current position", pos_r, pos_c) if maze[pos_r][pos_c] == 'E': path += [(pos_r, pos_c)] return path if maze[pos_r][pos_c] == 'X': # Already visited continue # Mark position as visited maze[pos_r][pos_c] = 'X' path += [(pos_r, pos_c)] # Check for all possible positions and add if possible if is_valid_position(maze, pos_r - 1, pos_c): stack.append((pos_r - 1, pos_c)) if is_valid_position(maze, pos_r + 1, pos_c): stack.append((pos_r + 1, pos_c)) if is_valid_position(maze, pos_r, pos_c - 1): stack.append((pos_r, pos_c - 1)) if is_valid_position(maze, pos_r, pos_c + 1): stack.append((pos_r, pos_c + 1)) # To follow the maze if verbose: print('Stack:' , stack) print_maze(maze) # We didn't find a path, hence we do not need to return the path return False # clean maze new_maze = [] for row in maze: new_row = [] for j in row: if j == collision_block_char: new_row += ["#"] else: new_row += [" "] new_maze += [new_row] maze = new_maze maze = prepare_maze(maze, start, end) start = find_start(maze) path = solve_maze(maze, start, verbose) return path def path_finder_v2(a, start, end, collision_block_char, verbose=False): def make_step(m, k): for i in range(len(m)): for j in range(len(m[i])): if m[i][j] == k: if i>0 and m[i-1][j] == 0 and a[i-1][j] == 0: m[i-1][j] = k + 1 if j>0 and m[i][j-1] == 0 and a[i][j-1] == 0: m[i][j-1] = k + 1 if i<len(m)-1 and m[i+1][j] == 0 and a[i+1][j] == 0: m[i+1][j] = k + 1 if j<len(m[i])-1 and m[i][j+1] == 0 and a[i][j+1] == 0: m[i][j+1] = k + 1 new_maze = [] for row in a: new_row = [] for j in row: if j == collision_block_char: new_row += [1] else: new_row += [0] new_maze += [new_row] a = new_maze m = [] for i in range(len(a)): m.append([]) for j in range(len(a[i])): m[-1].append(0) i,j = start m[i][j] = 1 k = 0 except_handle = 150 while m[end[0]][end[1]] == 0: k += 1 make_step(m, k) if except_handle == 0: break except_handle -= 1 i, j = end k = m[i][j] the_path = [(i,j)] while k > 1: if i > 0 and m[i - 1][j] == k-1: i, j = i-1, j the_path.append((i, j)) k-=1 elif j > 0 and m[i][j - 1] == k-1: i, j = i, j-1 the_path.append((i, j)) k-=1 elif i < len(m) - 1 and m[i + 1][j] == k-1: i, j = i+1, j the_path.append((i, j)) k-=1 elif j < len(m[i]) - 1 and m[i][j + 1] == k-1: i, j = i, j+1 the_path.append((i, j)) k -= 1 the_path.reverse() return the_path def path_finder(maze, start, end, collision_block_char, verbose=False): # EMERGENCY PATCH start = (start[1], start[0]) end = (end[1], end[0]) # END EMERGENCY PATCH path = path_finder_v2(maze, start, end, collision_block_char, verbose) new_path = [] for i in path: new_path += [(i[1], i[0])] path = new_path return path def closest_coordinate(curr_coordinate, target_coordinates): min_dist = None closest_coordinate = None for coordinate in target_coordinates: a = np.array(coordinate) b = np.array(curr_coordinate) dist = abs(np.linalg.norm(a-b)) if not closest_coordinate: min_dist = dist closest_coordinate = coordinate else: if min_dist > dist: min_dist = dist closest_coordinate = coordinate return closest_coordinate def path_finder_2(maze, start, end, collision_block_char, verbose=False): # start => persona_a # end => persona_b start = list(start) end = list(end) t_top = (end[0], end[1]+1) t_bottom = (end[0], end[1]-1) t_left = (end[0]-1, end[1]) t_right = (end[0]+1, end[1]) pot_target_coordinates = [t_top, t_bottom, t_left, t_right] maze_width = len(maze[0]) maze_height = len(maze) target_coordinates = [] for coordinate in pot_target_coordinates: if coordinate[0] >= 0 and coordinate[0] < maze_width and coordinate[1] >= 0 and coordinate[1] < maze_height: target_coordinates += [coordinate] target_coordinate = closest_coordinate(start, target_coordinates) path = path_finder(maze, start, target_coordinate, collision_block_char, verbose=False) return path def path_finder_3(maze, start, end, collision_block_char, verbose=False): # start => persona_a # end => persona_b curr_path = path_finder(maze, start, end, collision_block_char, verbose=False) if len(curr_path) <= 2: return [] else: a_path = curr_path[:int(len(curr_path)/2)] b_path = curr_path[int(len(curr_path)/2)-1:] b_path.reverse() print (a_path) print (b_path) return a_path, b_path if __name__ == '__main__': maze = [['#', '#', '#', '#', '#', '#', '#', '#', '#', '#', '#', '#', '#'], [' ', ' ', '#', ' ', ' ', ' ', ' ', ' ', '#', ' ', ' ', ' ', '#'], ['#', ' ', '#', ' ', ' ', '#', '#', ' ', ' ', ' ', '#', ' ', '#'], ['#', ' ', '#', ' ', ' ', '#', '#', ' ', '#', ' ', '#', ' ', '#'], ['#', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '#', ' ', ' ', ' ', '#'], ['#', '#', '#', ' ', '#', ' ', '#', '#', '#', ' ', '#', ' ', '#'], ['#', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', '#', ' ', ' '], ['#', '#', '#', '#', '#', '#', '#', '#', '#', '#', '#', '#', '#']] start = (0, 1) end = (0, 1) print (path_finder(maze, start, end, "#")) print ("-===") start = (0, 1) end = (11, 4) print (path_finder_2(maze, start, end, "#")) print ("-===") start = (0, 1) end = (12, 6) print (path_finder_3(maze, start, end, "#")) print ("-===") path_finder_3(maze, start, end, "#")[0] path_finder_3(maze, start, end, "#")[1]
generative_agents-main
reverie/backend_server/path_finder.py
""" Author: Joon Sung Park ([email protected]) File: global_methods.py Description: Contains functions used throughout my projects. """ import random import string import csv import time import datetime as dt import pathlib import os import sys import numpy import math import shutil, errno from os import listdir def create_folder_if_not_there(curr_path): """ Checks if a folder in the curr_path exists. If it does not exist, creates the folder. Note that if the curr_path designates a file location, it will operate on the folder that contains the file. But the function also works even if the path designates to just a folder. Args: curr_list: list to write. The list comes in the following form: [['key1', 'val1-1', 'val1-2'...], ['key2', 'val2-1', 'val2-2'...],] outfile: name of the csv file to write RETURNS: True: if a new folder is created False: if a new folder is not created """ outfolder_name = curr_path.split("/") if len(outfolder_name) != 1: # This checks if the curr path is a file or a folder. if "." in outfolder_name[-1]: outfolder_name = outfolder_name[:-1] outfolder_name = "/".join(outfolder_name) if not os.path.exists(outfolder_name): os.makedirs(outfolder_name) return True return False def write_list_of_list_to_csv(curr_list_of_list, outfile): """ Writes a list of list to csv. Unlike write_list_to_csv_line, it writes the entire csv in one shot. ARGS: curr_list_of_list: list to write. The list comes in the following form: [['key1', 'val1-1', 'val1-2'...], ['key2', 'val2-1', 'val2-2'...],] outfile: name of the csv file to write RETURNS: None """ create_folder_if_not_there(outfile) with open(outfile, "w") as f: writer = csv.writer(f) writer.writerows(curr_list_of_list) def write_list_to_csv_line(line_list, outfile): """ Writes one line to a csv file. Unlike write_list_of_list_to_csv, this opens an existing outfile and then appends a line to that file. This also works if the file does not exist already. ARGS: curr_list: list to write. The list comes in the following form: ['key1', 'val1-1', 'val1-2'...] Importantly, this is NOT a list of list. outfile: name of the csv file to write RETURNS: None """ create_folder_if_not_there(outfile) # Opening the file first so we can write incrementally as we progress curr_file = open(outfile, 'a',) csvfile_1 = csv.writer(curr_file) csvfile_1.writerow(line_list) curr_file.close() def read_file_to_list(curr_file, header=False, strip_trail=True): """ Reads in a csv file to a list of list. If header is True, it returns a tuple with (header row, all rows) ARGS: curr_file: path to the current csv file. RETURNS: List of list where the component lists are the rows of the file. """ if not header: analysis_list = [] with open(curr_file) as f_analysis_file: data_reader = csv.reader(f_analysis_file, delimiter=",") for count, row in enumerate(data_reader): if strip_trail: row = [i.strip() for i in row] analysis_list += [row] return analysis_list else: analysis_list = [] with open(curr_file) as f_analysis_file: data_reader = csv.reader(f_analysis_file, delimiter=",") for count, row in enumerate(data_reader): if strip_trail: row = [i.strip() for i in row] analysis_list += [row] return analysis_list[0], analysis_list[1:] def read_file_to_set(curr_file, col=0): """ Reads in a "single column" of a csv file to a set. ARGS: curr_file: path to the current csv file. RETURNS: Set with all items in a single column of a csv file. """ analysis_set = set() with open(curr_file) as f_analysis_file: data_reader = csv.reader(f_analysis_file, delimiter=",") for count, row in enumerate(data_reader): analysis_set.add(row[col]) return analysis_set def get_row_len(curr_file): """ Get the number of rows in a csv file ARGS: curr_file: path to the current csv file. RETURNS: The number of rows False if the file does not exist """ try: analysis_set = set() with open(curr_file) as f_analysis_file: data_reader = csv.reader(f_analysis_file, delimiter=",") for count, row in enumerate(data_reader): analysis_set.add(row[0]) return len(analysis_set) except: return False def check_if_file_exists(curr_file): """ Checks if a file exists ARGS: curr_file: path to the current csv file. RETURNS: True if the file exists False if the file does not exist """ try: with open(curr_file) as f_analysis_file: pass return True except: return False def find_filenames(path_to_dir, suffix=".csv"): """ Given a directory, find all files that ends with the provided suffix and returns their paths. ARGS: path_to_dir: Path to the current directory suffix: The target suffix. RETURNS: A list of paths to all files in the directory. """ filenames = listdir(path_to_dir) return [ path_to_dir+"/"+filename for filename in filenames if filename.endswith( suffix ) ] def average(list_of_val): """ Finds the average of the numbers in a list. ARGS: list_of_val: a list of numeric values RETURNS: The average of the values """ return sum(list_of_val)/float(len(list_of_val)) def std(list_of_val): """ Finds the std of the numbers in a list. ARGS: list_of_val: a list of numeric values RETURNS: The std of the values """ std = numpy.std(list_of_val) return std def copyanything(src, dst): """ Copy over everything in the src folder to dst folder. ARGS: src: address of the source folder dst: address of the destination folder RETURNS: None """ try: shutil.copytree(src, dst) except OSError as exc: # python >2.5 if exc.errno in (errno.ENOTDIR, errno.EINVAL): shutil.copy(src, dst) else: raise if __name__ == '__main__': pass
generative_agents-main
reverie/backend_server/global_methods.py
""" Author: Joon Sung Park ([email protected]) File: gpt_structure.py Description: Wrapper functions for calling OpenAI APIs. """ import json import random import openai import time from utils import * openai.api_key = openai_api_key def ChatGPT_request(prompt): """ Given a prompt and a dictionary of GPT parameters, make a request to OpenAI server and returns the response. ARGS: prompt: a str prompt gpt_parameter: a python dictionary with the keys indicating the names of the parameter and the values indicating the parameter values. RETURNS: a str of GPT-3's response. """ # temp_sleep() try: completion = openai.ChatCompletion.create( model="gpt-3.5-turbo", messages=[{"role": "user", "content": prompt}] ) return completion["choices"][0]["message"]["content"] except: print ("ChatGPT ERROR") return "ChatGPT ERROR" prompt = """ --- Character 1: Maria Lopez is working on her physics degree and streaming games on Twitch to make some extra money. She visits Hobbs Cafe for studying and eating just about everyday. Character 2: Klaus Mueller is writing a research paper on the effects of gentrification in low-income communities. Past Context: 138 minutes ago, Maria Lopez and Klaus Mueller were already conversing about conversing about Maria's research paper mentioned by Klaus This context takes place after that conversation. Current Context: Maria Lopez was attending her Physics class (preparing for the next lecture) when Maria Lopez saw Klaus Mueller in the middle of working on his research paper at the library (writing the introduction). Maria Lopez is thinking of initating a conversation with Klaus Mueller. Current Location: library in Oak Hill College (This is what is in Maria Lopez's head: Maria Lopez should remember to follow up with Klaus Mueller about his thoughts on her research paper. Beyond this, Maria Lopez doesn't necessarily know anything more about Klaus Mueller) (This is what is in Klaus Mueller's head: Klaus Mueller should remember to ask Maria Lopez about her research paper, as she found it interesting that he mentioned it. Beyond this, Klaus Mueller doesn't necessarily know anything more about Maria Lopez) Here is their conversation. Maria Lopez: " --- Output the response to the prompt above in json. The output should be a list of list where the inner lists are in the form of ["<Name>", "<Utterance>"]. Output multiple utterances in ther conversation until the conversation comes to a natural conclusion. Example output json: {"output": "[["Jane Doe", "Hi!"], ["John Doe", "Hello there!"] ... ]"} """ print (ChatGPT_request(prompt))
generative_agents-main
reverie/backend_server/test.py
""" Author: Joon Sung Park ([email protected]) File: maze.py Description: Defines the Maze class, which represents the map of the simulated world in a 2-dimensional matrix. """ import json import numpy import datetime import pickle import time import math from global_methods import * from utils import * class Maze: def __init__(self, maze_name): # READING IN THE BASIC META INFORMATION ABOUT THE MAP self.maze_name = maze_name # Reading in the meta information about the world. If you want tp see the # example variables, check out the maze_meta_info.json file. meta_info = json.load(open(f"{env_matrix}/maze_meta_info.json")) # <maze_width> and <maze_height> denote the number of tiles make up the # height and width of the map. self.maze_width = int(meta_info["maze_width"]) self.maze_height = int(meta_info["maze_height"]) # <sq_tile_size> denotes the pixel height/width of a tile. self.sq_tile_size = int(meta_info["sq_tile_size"]) # <special_constraint> is a string description of any relevant special # constraints the world might have. # e.g., "planning to stay at home all day and never go out of her home" self.special_constraint = meta_info["special_constraint"] # READING IN SPECIAL BLOCKS # Special blocks are those that are colored in the Tiled map. # Here is an example row for the arena block file: # e.g., "25335, Double Studio, Studio, Common Room" # And here is another example row for the game object block file: # e.g, "25331, Double Studio, Studio, Bedroom 2, Painting" # Notice that the first element here is the color marker digit from the # Tiled export. Then we basically have the block path: # World, Sector, Arena, Game Object -- again, these paths need to be # unique within an instance of Reverie. blocks_folder = f"{env_matrix}/special_blocks" _wb = blocks_folder + "/world_blocks.csv" wb_rows = read_file_to_list(_wb, header=False) wb = wb_rows[0][-1] _sb = blocks_folder + "/sector_blocks.csv" sb_rows = read_file_to_list(_sb, header=False) sb_dict = dict() for i in sb_rows: sb_dict[i[0]] = i[-1] _ab = blocks_folder + "/arena_blocks.csv" ab_rows = read_file_to_list(_ab, header=False) ab_dict = dict() for i in ab_rows: ab_dict[i[0]] = i[-1] _gob = blocks_folder + "/game_object_blocks.csv" gob_rows = read_file_to_list(_gob, header=False) gob_dict = dict() for i in gob_rows: gob_dict[i[0]] = i[-1] _slb = blocks_folder + "/spawning_location_blocks.csv" slb_rows = read_file_to_list(_slb, header=False) slb_dict = dict() for i in slb_rows: slb_dict[i[0]] = i[-1] # [SECTION 3] Reading in the matrices # This is your typical two dimensional matrices. It's made up of 0s and # the number that represents the color block from the blocks folder. maze_folder = f"{env_matrix}/maze" _cm = maze_folder + "/collision_maze.csv" collision_maze_raw = read_file_to_list(_cm, header=False)[0] _sm = maze_folder + "/sector_maze.csv" sector_maze_raw = read_file_to_list(_sm, header=False)[0] _am = maze_folder + "/arena_maze.csv" arena_maze_raw = read_file_to_list(_am, header=False)[0] _gom = maze_folder + "/game_object_maze.csv" game_object_maze_raw = read_file_to_list(_gom, header=False)[0] _slm = maze_folder + "/spawning_location_maze.csv" spawning_location_maze_raw = read_file_to_list(_slm, header=False)[0] # Loading the maze. The mazes are taken directly from the json exports of # Tiled maps. They should be in csv format. # Importantly, they are "not" in a 2-d matrix format -- they are single # row matrices with the length of width x height of the maze. So we need # to convert here. # We can do this all at once since the dimension of all these matrices are # identical (e.g., 70 x 40). # example format: [['0', '0', ... '25309', '0',...], ['0',...]...] # 25309 is the collision bar number right now. self.collision_maze = [] sector_maze = [] arena_maze = [] game_object_maze = [] spawning_location_maze = [] for i in range(0, len(collision_maze_raw), meta_info["maze_width"]): tw = meta_info["maze_width"] self.collision_maze += [collision_maze_raw[i:i+tw]] sector_maze += [sector_maze_raw[i:i+tw]] arena_maze += [arena_maze_raw[i:i+tw]] game_object_maze += [game_object_maze_raw[i:i+tw]] spawning_location_maze += [spawning_location_maze_raw[i:i+tw]] # Once we are done loading in the maze, we now set up self.tiles. This is # a matrix accessed by row:col where each access point is a dictionary # that contains all the things that are taking place in that tile. # More specifically, it contains information about its "world," "sector," # "arena," "game_object," "spawning_location," as well as whether it is a # collision block, and a set of all events taking place in it. # e.g., self.tiles[32][59] = {'world': 'double studio', # 'sector': '', 'arena': '', 'game_object': '', # 'spawning_location': '', 'collision': False, 'events': set()} # e.g., self.tiles[9][58] = {'world': 'double studio', # 'sector': 'double studio', 'arena': 'bedroom 2', # 'game_object': 'bed', 'spawning_location': 'bedroom-2-a', # 'collision': False, # 'events': {('double studio:double studio:bedroom 2:bed', # None, None)}} self.tiles = [] for i in range(self.maze_height): row = [] for j in range(self.maze_width): tile_details = dict() tile_details["world"] = wb tile_details["sector"] = "" if sector_maze[i][j] in sb_dict: tile_details["sector"] = sb_dict[sector_maze[i][j]] tile_details["arena"] = "" if arena_maze[i][j] in ab_dict: tile_details["arena"] = ab_dict[arena_maze[i][j]] tile_details["game_object"] = "" if game_object_maze[i][j] in gob_dict: tile_details["game_object"] = gob_dict[game_object_maze[i][j]] tile_details["spawning_location"] = "" if spawning_location_maze[i][j] in slb_dict: tile_details["spawning_location"] = slb_dict[spawning_location_maze[i][j]] tile_details["collision"] = False if self.collision_maze[i][j] != "0": tile_details["collision"] = True tile_details["events"] = set() row += [tile_details] self.tiles += [row] # Each game object occupies an event in the tile. We are setting up the # default event value here. for i in range(self.maze_height): for j in range(self.maze_width): if self.tiles[i][j]["game_object"]: object_name = ":".join([self.tiles[i][j]["world"], self.tiles[i][j]["sector"], self.tiles[i][j]["arena"], self.tiles[i][j]["game_object"]]) go_event = (object_name, None, None, None) self.tiles[i][j]["events"].add(go_event) # Reverse tile access. # <self.address_tiles> -- given a string address, we return a set of all # tile coordinates belonging to that address (this is opposite of # self.tiles that give you the string address given a coordinate). This is # an optimization component for finding paths for the personas' movement. # self.address_tiles['<spawn_loc>bedroom-2-a'] == {(58, 9)} # self.address_tiles['double studio:recreation:pool table'] # == {(29, 14), (31, 11), (30, 14), (32, 11), ...}, self.address_tiles = dict() for i in range(self.maze_height): for j in range(self.maze_width): addresses = [] if self.tiles[i][j]["sector"]: add = f'{self.tiles[i][j]["world"]}:' add += f'{self.tiles[i][j]["sector"]}' addresses += [add] if self.tiles[i][j]["arena"]: add = f'{self.tiles[i][j]["world"]}:' add += f'{self.tiles[i][j]["sector"]}:' add += f'{self.tiles[i][j]["arena"]}' addresses += [add] if self.tiles[i][j]["game_object"]: add = f'{self.tiles[i][j]["world"]}:' add += f'{self.tiles[i][j]["sector"]}:' add += f'{self.tiles[i][j]["arena"]}:' add += f'{self.tiles[i][j]["game_object"]}' addresses += [add] if self.tiles[i][j]["spawning_location"]: add = f'<spawn_loc>{self.tiles[i][j]["spawning_location"]}' addresses += [add] for add in addresses: if add in self.address_tiles: self.address_tiles[add].add((j, i)) else: self.address_tiles[add] = set([(j, i)]) def turn_coordinate_to_tile(self, px_coordinate): """ Turns a pixel coordinate to a tile coordinate. INPUT px_coordinate: The pixel coordinate of our interest. Comes in the x, y format. OUTPUT tile coordinate (x, y): The tile coordinate that corresponds to the pixel coordinate. EXAMPLE OUTPUT Given (1600, 384), outputs (50, 12) """ x = math.ceil(px_coordinate[0]/self.sq_tile_size) y = math.ceil(px_coordinate[1]/self.sq_tile_size) return (x, y) def access_tile(self, tile): """ Returns the tiles details dictionary that is stored in self.tiles of the designated x, y location. INPUT tile: The tile coordinate of our interest in (x, y) form. OUTPUT The tile detail dictionary for the designated tile. EXAMPLE OUTPUT Given (58, 9), self.tiles[9][58] = {'world': 'double studio', 'sector': 'double studio', 'arena': 'bedroom 2', 'game_object': 'bed', 'spawning_location': 'bedroom-2-a', 'collision': False, 'events': {('double studio:double studio:bedroom 2:bed', None, None)}} """ x = tile[0] y = tile[1] return self.tiles[y][x] def get_tile_path(self, tile, level): """ Get the tile string address given its coordinate. You designate the level by giving it a string level description. INPUT: tile: The tile coordinate of our interest in (x, y) form. level: world, sector, arena, or game object OUTPUT The string address for the tile. EXAMPLE OUTPUT Given tile=(58, 9), and level=arena, "double studio:double studio:bedroom 2" """ x = tile[0] y = tile[1] tile = self.tiles[y][x] path = f"{tile['world']}" if level == "world": return path else: path += f":{tile['sector']}" if level == "sector": return path else: path += f":{tile['arena']}" if level == "arena": return path else: path += f":{tile['game_object']}" return path def get_nearby_tiles(self, tile, vision_r): """ Given the current tile and vision_r, return a list of tiles that are within the radius. Note that this implementation looks at a square boundary when determining what is within the radius. i.e., for vision_r, returns x's. x x x x x x x x x x x x P x x x x x x x x x x x x INPUT: tile: The tile coordinate of our interest in (x, y) form. vision_r: The radius of the persona's vision. OUTPUT: nearby_tiles: a list of tiles that are within the radius. """ left_end = 0 if tile[0] - vision_r > left_end: left_end = tile[0] - vision_r right_end = self.maze_width - 1 if tile[0] + vision_r + 1 < right_end: right_end = tile[0] + vision_r + 1 bottom_end = self.maze_height - 1 if tile[1] + vision_r + 1 < bottom_end: bottom_end = tile[1] + vision_r + 1 top_end = 0 if tile[1] - vision_r > top_end: top_end = tile[1] - vision_r nearby_tiles = [] for i in range(left_end, right_end): for j in range(top_end, bottom_end): nearby_tiles += [(i, j)] return nearby_tiles def add_event_from_tile(self, curr_event, tile): """ Add an event triple to a tile. INPUT: curr_event: Current event triple. e.g., ('double studio:double studio:bedroom 2:bed', None, None) tile: The tile coordinate of our interest in (x, y) form. OUPUT: None """ self.tiles[tile[1]][tile[0]]["events"].add(curr_event) def remove_event_from_tile(self, curr_event, tile): """ Remove an event triple from a tile. INPUT: curr_event: Current event triple. e.g., ('double studio:double studio:bedroom 2:bed', None, None) tile: The tile coordinate of our interest in (x, y) form. OUPUT: None """ curr_tile_ev_cp = self.tiles[tile[1]][tile[0]]["events"].copy() for event in curr_tile_ev_cp: if event == curr_event: self.tiles[tile[1]][tile[0]]["events"].remove(event) def turn_event_from_tile_idle(self, curr_event, tile): curr_tile_ev_cp = self.tiles[tile[1]][tile[0]]["events"].copy() for event in curr_tile_ev_cp: if event == curr_event: self.tiles[tile[1]][tile[0]]["events"].remove(event) new_event = (event[0], None, None, None) self.tiles[tile[1]][tile[0]]["events"].add(new_event) def remove_subject_events_from_tile(self, subject, tile): """ Remove an event triple that has the input subject from a tile. INPUT: subject: "Isabella Rodriguez" tile: The tile coordinate of our interest in (x, y) form. OUPUT: None """ curr_tile_ev_cp = self.tiles[tile[1]][tile[0]]["events"].copy() for event in curr_tile_ev_cp: if event[0] == subject: self.tiles[tile[1]][tile[0]]["events"].remove(event)
generative_agents-main
reverie/backend_server/maze.py
""" Author: Joon Sung Park ([email protected]) File: reverie.py Description: This is the main program for running generative agent simulations that defines the ReverieServer class. This class maintains and records all states related to the simulation. The primary mode of interaction for those running the simulation should be through the open_server function, which enables the simulator to input command-line prompts for running and saving the simulation, among other tasks. Release note (June 14, 2023) -- Reverie implements the core simulation mechanism described in my paper entitled "Generative Agents: Interactive Simulacra of Human Behavior." If you are reading through these lines after having read the paper, you might notice that I use older terms to describe generative agents and their cognitive modules here. Most notably, I use the term "personas" to refer to generative agents, "associative memory" to refer to the memory stream, and "reverie" to refer to the overarching simulation framework. """ import json import numpy import datetime import pickle import time import math import os import shutil from selenium import webdriver from global_methods import * from utils import * from maze import * from persona.persona import * ############################################################################## # REVERIE # ############################################################################## class ReverieServer: def __init__(self, fork_sim_code, sim_code): # FORKING FROM A PRIOR SIMULATION: # <fork_sim_code> indicates the simulation we are forking from. # Interestingly, all simulations must be forked from some initial # simulation, where the first simulation is "hand-crafted". self.fork_sim_code = fork_sim_code fork_folder = f"{fs_storage}/{self.fork_sim_code}" # <sim_code> indicates our current simulation. The first step here is to # copy everything that's in <fork_sim_code>, but edit its # reverie/meta/json's fork variable. self.sim_code = sim_code sim_folder = f"{fs_storage}/{self.sim_code}" copyanything(fork_folder, sim_folder) with open(f"{sim_folder}/reverie/meta.json") as json_file: reverie_meta = json.load(json_file) with open(f"{sim_folder}/reverie/meta.json", "w") as outfile: reverie_meta["fork_sim_code"] = fork_sim_code outfile.write(json.dumps(reverie_meta, indent=2)) # LOADING REVERIE'S GLOBAL VARIABLES # The start datetime of the Reverie: # <start_datetime> is the datetime instance for the start datetime of # the Reverie instance. Once it is set, this is not really meant to # change. It takes a string date in the following example form: # "June 25, 2022" # e.g., ...strptime(June 25, 2022, "%B %d, %Y") self.start_time = datetime.datetime.strptime( f"{reverie_meta['start_date']}, 00:00:00", "%B %d, %Y, %H:%M:%S") # <curr_time> is the datetime instance that indicates the game's current # time. This gets incremented by <sec_per_step> amount everytime the world # progresses (that is, everytime curr_env_file is recieved). self.curr_time = datetime.datetime.strptime(reverie_meta['curr_time'], "%B %d, %Y, %H:%M:%S") # <sec_per_step> denotes the number of seconds in game time that each # step moves foward. self.sec_per_step = reverie_meta['sec_per_step'] # <maze> is the main Maze instance. Note that we pass in the maze_name # (e.g., "double_studio") to instantiate Maze. # e.g., Maze("double_studio") self.maze = Maze(reverie_meta['maze_name']) # <step> denotes the number of steps that our game has taken. A step here # literally translates to the number of moves our personas made in terms # of the number of tiles. self.step = reverie_meta['step'] # SETTING UP PERSONAS IN REVERIE # <personas> is a dictionary that takes the persona's full name as its # keys, and the actual persona instance as its values. # This dictionary is meant to keep track of all personas who are part of # the Reverie instance. # e.g., ["Isabella Rodriguez"] = Persona("Isabella Rodriguezs") self.personas = dict() # <personas_tile> is a dictionary that contains the tile location of # the personas (!-> NOT px tile, but the actual tile coordinate). # The tile take the form of a set, (row, col). # e.g., ["Isabella Rodriguez"] = (58, 39) self.personas_tile = dict() # # <persona_convo_match> is a dictionary that describes which of the two # # personas are talking to each other. It takes a key of a persona's full # # name, and value of another persona's full name who is talking to the # # original persona. # # e.g., dict["Isabella Rodriguez"] = ["Maria Lopez"] # self.persona_convo_match = dict() # # <persona_convo> contains the actual content of the conversations. It # # takes as keys, a pair of persona names, and val of a string convo. # # Note that the key pairs are *ordered alphabetically*. # # e.g., dict[("Adam Abraham", "Zane Xu")] = "Adam: baba \n Zane:..." # self.persona_convo = dict() # Loading in all personas. init_env_file = f"{sim_folder}/environment/{str(self.step)}.json" init_env = json.load(open(init_env_file)) for persona_name in reverie_meta['persona_names']: persona_folder = f"{sim_folder}/personas/{persona_name}" p_x = init_env[persona_name]["x"] p_y = init_env[persona_name]["y"] curr_persona = Persona(persona_name, persona_folder) self.personas[persona_name] = curr_persona self.personas_tile[persona_name] = (p_x, p_y) self.maze.tiles[p_y][p_x]["events"].add(curr_persona.scratch .get_curr_event_and_desc()) # REVERIE SETTINGS PARAMETERS: # <server_sleep> denotes the amount of time that our while loop rests each # cycle; this is to not kill our machine. self.server_sleep = 0.1 # SIGNALING THE FRONTEND SERVER: # curr_sim_code.json contains the current simulation code, and # curr_step.json contains the current step of the simulation. These are # used to communicate the code and step information to the frontend. # Note that step file is removed as soon as the frontend opens up the # simulation. curr_sim_code = dict() curr_sim_code["sim_code"] = self.sim_code with open(f"{fs_temp_storage}/curr_sim_code.json", "w") as outfile: outfile.write(json.dumps(curr_sim_code, indent=2)) curr_step = dict() curr_step["step"] = self.step with open(f"{fs_temp_storage}/curr_step.json", "w") as outfile: outfile.write(json.dumps(curr_step, indent=2)) def save(self): """ Save all Reverie progress -- this includes Reverie's global state as well as all the personas. INPUT None OUTPUT None * Saves all relevant data to the designated memory directory """ # <sim_folder> points to the current simulation folder. sim_folder = f"{fs_storage}/{self.sim_code}" # Save Reverie meta information. reverie_meta = dict() reverie_meta["fork_sim_code"] = self.fork_sim_code reverie_meta["start_date"] = self.start_time.strftime("%B %d, %Y") reverie_meta["curr_time"] = self.curr_time.strftime("%B %d, %Y, %H:%M:%S") reverie_meta["sec_per_step"] = self.sec_per_step reverie_meta["maze_name"] = self.maze.maze_name reverie_meta["persona_names"] = list(self.personas.keys()) reverie_meta["step"] = self.step reverie_meta_f = f"{sim_folder}/reverie/meta.json" with open(reverie_meta_f, "w") as outfile: outfile.write(json.dumps(reverie_meta, indent=2)) # Save the personas. for persona_name, persona in self.personas.items(): save_folder = f"{sim_folder}/personas/{persona_name}/bootstrap_memory" persona.save(save_folder) def start_path_tester_server(self): """ Starts the path tester server. This is for generating the spatial memory that we need for bootstrapping a persona's state. To use this, you need to open server and enter the path tester mode, and open the front-end side of the browser. INPUT None OUTPUT None * Saves the spatial memory of the test agent to the path_tester_env.json of the temp storage. """ def print_tree(tree): def _print_tree(tree, depth): dash = " >" * depth if type(tree) == type(list()): if tree: print (dash, tree) return for key, val in tree.items(): if key: print (dash, key) _print_tree(val, depth+1) _print_tree(tree, 0) # <curr_vision> is the vision radius of the test agent. Recommend 8 as # our default. curr_vision = 8 # <s_mem> is our test spatial memory. s_mem = dict() # The main while loop for the test agent. while (True): try: curr_dict = {} tester_file = fs_temp_storage + "/path_tester_env.json" if check_if_file_exists(tester_file): with open(tester_file) as json_file: curr_dict = json.load(json_file) os.remove(tester_file) # Current camera location curr_sts = self.maze.sq_tile_size curr_camera = (int(math.ceil(curr_dict["x"]/curr_sts)), int(math.ceil(curr_dict["y"]/curr_sts))+1) curr_tile_det = self.maze.access_tile(curr_camera) # Initiating the s_mem world = curr_tile_det["world"] if curr_tile_det["world"] not in s_mem: s_mem[world] = dict() # Iterating throughn the nearby tiles. nearby_tiles = self.maze.get_nearby_tiles(curr_camera, curr_vision) for i in nearby_tiles: i_det = self.maze.access_tile(i) if (curr_tile_det["sector"] == i_det["sector"] and curr_tile_det["arena"] == i_det["arena"]): if i_det["sector"] != "": if i_det["sector"] not in s_mem[world]: s_mem[world][i_det["sector"]] = dict() if i_det["arena"] != "": if i_det["arena"] not in s_mem[world][i_det["sector"]]: s_mem[world][i_det["sector"]][i_det["arena"]] = list() if i_det["game_object"] != "": if (i_det["game_object"] not in s_mem[world][i_det["sector"]][i_det["arena"]]): s_mem[world][i_det["sector"]][i_det["arena"]] += [ i_det["game_object"]] # Incrementally outputting the s_mem and saving the json file. print ("= " * 15) out_file = fs_temp_storage + "/path_tester_out.json" with open(out_file, "w") as outfile: outfile.write(json.dumps(s_mem, indent=2)) print_tree(s_mem) except: pass time.sleep(self.server_sleep * 10) def start_server(self, int_counter): """ The main backend server of Reverie. This function retrieves the environment file from the frontend to understand the state of the world, calls on each personas to make decisions based on the world state, and saves their moves at certain step intervals. INPUT int_counter: Integer value for the number of steps left for us to take in this iteration. OUTPUT None """ # <sim_folder> points to the current simulation folder. sim_folder = f"{fs_storage}/{self.sim_code}" # When a persona arrives at a game object, we give a unique event # to that object. # e.g., ('double studio[...]:bed', 'is', 'unmade', 'unmade') # Later on, before this cycle ends, we need to return that to its # initial state, like this: # e.g., ('double studio[...]:bed', None, None, None) # So we need to keep track of which event we added. # <game_obj_cleanup> is used for that. game_obj_cleanup = dict() # The main while loop of Reverie. while (True): # Done with this iteration if <int_counter> reaches 0. if int_counter == 0: break # <curr_env_file> file is the file that our frontend outputs. When the # frontend has done its job and moved the personas, then it will put a # new environment file that matches our step count. That's when we run # the content of this for loop. Otherwise, we just wait. curr_env_file = f"{sim_folder}/environment/{self.step}.json" if check_if_file_exists(curr_env_file): # If we have an environment file, it means we have a new perception # input to our personas. So we first retrieve it. try: # Try and save block for robustness of the while loop. with open(curr_env_file) as json_file: new_env = json.load(json_file) env_retrieved = True except: pass if env_retrieved: # This is where we go through <game_obj_cleanup> to clean up all # object actions that were used in this cylce. for key, val in game_obj_cleanup.items(): # We turn all object actions to their blank form (with None). self.maze.turn_event_from_tile_idle(key, val) # Then we initialize game_obj_cleanup for this cycle. game_obj_cleanup = dict() # We first move our personas in the backend environment to match # the frontend environment. for persona_name, persona in self.personas.items(): # <curr_tile> is the tile that the persona was at previously. curr_tile = self.personas_tile[persona_name] # <new_tile> is the tile that the persona will move to right now, # during this cycle. new_tile = (new_env[persona_name]["x"], new_env[persona_name]["y"]) # We actually move the persona on the backend tile map here. self.personas_tile[persona_name] = new_tile self.maze.remove_subject_events_from_tile(persona.name, curr_tile) self.maze.add_event_from_tile(persona.scratch .get_curr_event_and_desc(), new_tile) # Now, the persona will travel to get to their destination. *Once* # the persona gets there, we activate the object action. if not persona.scratch.planned_path: # We add that new object action event to the backend tile map. # At its creation, it is stored in the persona's backend. game_obj_cleanup[persona.scratch .get_curr_obj_event_and_desc()] = new_tile self.maze.add_event_from_tile(persona.scratch .get_curr_obj_event_and_desc(), new_tile) # We also need to remove the temporary blank action for the # object that is currently taking the action. blank = (persona.scratch.get_curr_obj_event_and_desc()[0], None, None, None) self.maze.remove_event_from_tile(blank, new_tile) # Then we need to actually have each of the personas perceive and # move. The movement for each of the personas comes in the form of # x y coordinates where the persona will move towards. e.g., (50, 34) # This is where the core brains of the personas are invoked. movements = {"persona": dict(), "meta": dict()} for persona_name, persona in self.personas.items(): # <next_tile> is a x,y coordinate. e.g., (58, 9) # <pronunciatio> is an emoji. e.g., "\ud83d\udca4" # <description> is a string description of the movement. e.g., # writing her next novel (editing her novel) # @ double studio:double studio:common room:sofa next_tile, pronunciatio, description = persona.move( self.maze, self.personas, self.personas_tile[persona_name], self.curr_time) movements["persona"][persona_name] = {} movements["persona"][persona_name]["movement"] = next_tile movements["persona"][persona_name]["pronunciatio"] = pronunciatio movements["persona"][persona_name]["description"] = description movements["persona"][persona_name]["chat"] = (persona .scratch.chat) # Include the meta information about the current stage in the # movements dictionary. movements["meta"]["curr_time"] = (self.curr_time .strftime("%B %d, %Y, %H:%M:%S")) # We then write the personas' movements to a file that will be sent # to the frontend server. # Example json output: # {"persona": {"Maria Lopez": {"movement": [58, 9]}}, # "persona": {"Klaus Mueller": {"movement": [38, 12]}}, # "meta": {curr_time: <datetime>}} curr_move_file = f"{sim_folder}/movement/{self.step}.json" with open(curr_move_file, "w") as outfile: outfile.write(json.dumps(movements, indent=2)) # After this cycle, the world takes one step forward, and the # current time moves by <sec_per_step> amount. self.step += 1 self.curr_time += datetime.timedelta(seconds=self.sec_per_step) int_counter -= 1 # Sleep so we don't burn our machines. time.sleep(self.server_sleep) def open_server(self): """ Open up an interactive terminal prompt that lets you run the simulation step by step and probe agent state. INPUT None OUTPUT None """ print ("Note: The agents in this simulation package are computational") print ("constructs powered by generative agents architecture and LLM. We") print ("clarify that these agents lack human-like agency, consciousness,") print ("and independent decision-making.\n---") # <sim_folder> points to the current simulation folder. sim_folder = f"{fs_storage}/{self.sim_code}" while True: sim_command = input("Enter option: ") sim_command = sim_command.strip() ret_str = "" try: if sim_command.lower() in ["f", "fin", "finish", "save and finish"]: # Finishes the simulation environment and saves the progress. # Example: fin self.save() break elif sim_command.lower() == "start path tester mode": # Starts the path tester and removes the currently forked sim files. # Note that once you start this mode, you need to exit out of the # session and restart in case you want to run something else. shutil.rmtree(sim_folder) self.start_path_tester_server() elif sim_command.lower() == "exit": # Finishes the simulation environment but does not save the progress # and erases all saved data from current simulation. # Example: exit shutil.rmtree(sim_folder) break elif sim_command.lower() == "save": # Saves the current simulation progress. # Example: save self.save() elif sim_command[:3].lower() == "run": # Runs the number of steps specified in the prompt. # Example: run 1000 int_count = int(sim_command.split()[-1]) rs.start_server(int_count) elif ("print persona schedule" in sim_command[:22].lower()): # Print the decomposed schedule of the persona specified in the # prompt. # Example: print persona schedule Isabella Rodriguez ret_str += (self.personas[" ".join(sim_command.split()[-2:])] .scratch.get_str_daily_schedule_summary()) elif ("print all persona schedule" in sim_command[:26].lower()): # Print the decomposed schedule of all personas in the world. # Example: print all persona schedule for persona_name, persona in self.personas.items(): ret_str += f"{persona_name}\n" ret_str += f"{persona.scratch.get_str_daily_schedule_summary()}\n" ret_str += f"---\n" elif ("print hourly org persona schedule" in sim_command.lower()): # Print the hourly schedule of the persona specified in the prompt. # This one shows the original, non-decomposed version of the # schedule. # Ex: print persona schedule Isabella Rodriguez ret_str += (self.personas[" ".join(sim_command.split()[-2:])] .scratch.get_str_daily_schedule_hourly_org_summary()) elif ("print persona current tile" in sim_command[:26].lower()): # Print the x y tile coordinate of the persona specified in the # prompt. # Ex: print persona current tile Isabella Rodriguez ret_str += str(self.personas[" ".join(sim_command.split()[-2:])] .scratch.curr_tile) elif ("print persona chatting with buffer" in sim_command.lower()): # Print the chatting with buffer of the persona specified in the # prompt. # Ex: print persona chatting with buffer Isabella Rodriguez curr_persona = self.personas[" ".join(sim_command.split()[-2:])] for p_n, count in curr_persona.scratch.chatting_with_buffer.items(): ret_str += f"{p_n}: {count}" elif ("print persona associative memory (event)" in sim_command.lower()): # Print the associative memory (event) of the persona specified in # the prompt # Ex: print persona associative memory (event) Isabella Rodriguez ret_str += f'{self.personas[" ".join(sim_command.split()[-2:])]}\n' ret_str += (self.personas[" ".join(sim_command.split()[-2:])] .a_mem.get_str_seq_events()) elif ("print persona associative memory (thought)" in sim_command.lower()): # Print the associative memory (thought) of the persona specified in # the prompt # Ex: print persona associative memory (thought) Isabella Rodriguez ret_str += f'{self.personas[" ".join(sim_command.split()[-2:])]}\n' ret_str += (self.personas[" ".join(sim_command.split()[-2:])] .a_mem.get_str_seq_thoughts()) elif ("print persona associative memory (chat)" in sim_command.lower()): # Print the associative memory (chat) of the persona specified in # the prompt # Ex: print persona associative memory (chat) Isabella Rodriguez ret_str += f'{self.personas[" ".join(sim_command.split()[-2:])]}\n' ret_str += (self.personas[" ".join(sim_command.split()[-2:])] .a_mem.get_str_seq_chats()) elif ("print persona spatial memory" in sim_command.lower()): # Print the spatial memory of the persona specified in the prompt # Ex: print persona spatial memory Isabella Rodriguez self.personas[" ".join(sim_command.split()[-2:])].s_mem.print_tree() elif ("print current time" in sim_command[:18].lower()): # Print the current time of the world. # Ex: print current time ret_str += f'{self.curr_time.strftime("%B %d, %Y, %H:%M:%S")}\n' ret_str += f'steps: {self.step}' elif ("print tile event" in sim_command[:16].lower()): # Print the tile events in the tile specified in the prompt # Ex: print tile event 50, 30 cooordinate = [int(i.strip()) for i in sim_command[16:].split(",")] for i in self.maze.access_tile(cooordinate)["events"]: ret_str += f"{i}\n" elif ("print tile details" in sim_command.lower()): # Print the tile details of the tile specified in the prompt # Ex: print tile event 50, 30 cooordinate = [int(i.strip()) for i in sim_command[18:].split(",")] for key, val in self.maze.access_tile(cooordinate).items(): ret_str += f"{key}: {val}\n" elif ("call -- analysis" in sim_command.lower()): # Starts a stateless chat session with the agent. It does not save # anything to the agent's memory. # Ex: call -- analysis Isabella Rodriguez persona_name = sim_command[len("call -- analysis"):].strip() self.personas[persona_name].open_convo_session("analysis") elif ("call -- load history" in sim_command.lower()): curr_file = maze_assets_loc + "/" + sim_command[len("call -- load history"):].strip() # call -- load history the_ville/agent_history_init_n3.csv rows = read_file_to_list(curr_file, header=True, strip_trail=True)[1] clean_whispers = [] for row in rows: agent_name = row[0].strip() whispers = row[1].split(";") whispers = [whisper.strip() for whisper in whispers] for whisper in whispers: clean_whispers += [[agent_name, whisper]] load_history_via_whisper(self.personas, clean_whispers) print (ret_str) except: print ("Error.") pass if __name__ == '__main__': # rs = ReverieServer("base_the_ville_isabella_maria_klaus", # "July1_the_ville_isabella_maria_klaus-step-3-1") # rs = ReverieServer("July1_the_ville_isabella_maria_klaus-step-3-20", # "July1_the_ville_isabella_maria_klaus-step-3-21") # rs.open_server() origin = input("Enter the name of the forked simulation: ").strip() target = input("Enter the name of the new simulation: ").strip() rs = ReverieServer(origin, target) rs.open_server()
generative_agents-main
reverie/backend_server/reverie.py
""" Author: Joon Sung Park ([email protected]) File: persona.py Description: Defines the Persona class that powers the agents in Reverie. Note (May 1, 2023) -- this is effectively GenerativeAgent class. Persona was the term we used internally back in 2022, taking from our Social Simulacra paper. """ import math import sys import datetime import random sys.path.append('../') from global_methods import * from persona.memory_structures.spatial_memory import * from persona.memory_structures.associative_memory import * from persona.memory_structures.scratch import * from persona.cognitive_modules.perceive import * from persona.cognitive_modules.retrieve import * from persona.cognitive_modules.plan import * from persona.cognitive_modules.reflect import * from persona.cognitive_modules.execute import * from persona.cognitive_modules.converse import * class Persona: def __init__(self, name, folder_mem_saved=False): # PERSONA BASE STATE # <name> is the full name of the persona. This is a unique identifier for # the persona within Reverie. self.name = name # PERSONA MEMORY # If there is already memory in folder_mem_saved, we load that. Otherwise, # we create new memory instances. # <s_mem> is the persona's spatial memory. f_s_mem_saved = f"{folder_mem_saved}/bootstrap_memory/spatial_memory.json" self.s_mem = MemoryTree(f_s_mem_saved) # <s_mem> is the persona's associative memory. f_a_mem_saved = f"{folder_mem_saved}/bootstrap_memory/associative_memory" self.a_mem = AssociativeMemory(f_a_mem_saved) # <scratch> is the persona's scratch (short term memory) space. scratch_saved = f"{folder_mem_saved}/bootstrap_memory/scratch.json" self.scratch = Scratch(scratch_saved) def save(self, save_folder): """ Save persona's current state (i.e., memory). INPUT: save_folder: The folder where we wil be saving our persona's state. OUTPUT: None """ # Spatial memory contains a tree in a json format. # e.g., {"double studio": # {"double studio": # {"bedroom 2": # ["painting", "easel", "closet", "bed"]}}} f_s_mem = f"{save_folder}/spatial_memory.json" self.s_mem.save(f_s_mem) # Associative memory contains a csv with the following rows: # [event.type, event.created, event.expiration, s, p, o] # e.g., event,2022-10-23 00:00:00,,Isabella Rodriguez,is,idle f_a_mem = f"{save_folder}/associative_memory" self.a_mem.save(f_a_mem) # Scratch contains non-permanent data associated with the persona. When # it is saved, it takes a json form. When we load it, we move the values # to Python variables. f_scratch = f"{save_folder}/scratch.json" self.scratch.save(f_scratch) def perceive(self, maze): """ This function takes the current maze, and returns events that are happening around the persona. Importantly, perceive is guided by two key hyper-parameter for the persona: 1) att_bandwidth, and 2) retention. First, <att_bandwidth> determines the number of nearby events that the persona can perceive. Say there are 10 events that are within the vision radius for the persona -- perceiving all 10 might be too much. So, the persona perceives the closest att_bandwidth number of events in case there are too many events. Second, the persona does not want to perceive and think about the same event at each time step. That's where <retention> comes in -- there is temporal order to what the persona remembers. So if the persona's memory contains the current surrounding events that happened within the most recent retention, there is no need to perceive that again. xx INPUT: maze: Current <Maze> instance of the world. OUTPUT: a list of <ConceptNode> that are perceived and new. See associative_memory.py -- but to get you a sense of what it receives as its input: "s, p, o, desc, persona.scratch.curr_time" """ return perceive(self, maze) def retrieve(self, perceived): """ This function takes the events that are perceived by the persona as input and returns a set of related events and thoughts that the persona would need to consider as context when planning. INPUT: perceive: a list of <ConceptNode> that are perceived and new. OUTPUT: retrieved: dictionary of dictionary. The first layer specifies an event, while the latter layer specifies the "curr_event", "events", and "thoughts" that are relevant. """ return retrieve(self, perceived) def plan(self, maze, personas, new_day, retrieved): """ Main cognitive function of the chain. It takes the retrieved memory and perception, as well as the maze and the first day state to conduct both the long term and short term planning for the persona. INPUT: maze: Current <Maze> instance of the world. personas: A dictionary that contains all persona names as keys, and the Persona instance as values. new_day: This can take one of the three values. 1) <Boolean> False -- It is not a "new day" cycle (if it is, we would need to call the long term planning sequence for the persona). 2) <String> "First day" -- It is literally the start of a simulation, so not only is it a new day, but also it is the first day. 2) <String> "New day" -- It is a new day. retrieved: dictionary of dictionary. The first layer specifies an event, while the latter layer specifies the "curr_event", "events", and "thoughts" that are relevant. OUTPUT The target action address of the persona (persona.scratch.act_address). """ return plan(self, maze, personas, new_day, retrieved) def execute(self, maze, personas, plan): """ This function takes the agent's current plan and outputs a concrete execution (what object to use, and what tile to travel to). INPUT: maze: Current <Maze> instance of the world. personas: A dictionary that contains all persona names as keys, and the Persona instance as values. plan: The target action address of the persona (persona.scratch.act_address). OUTPUT: execution: A triple set that contains the following components: <next_tile> is a x,y coordinate. e.g., (58, 9) <pronunciatio> is an emoji. <description> is a string description of the movement. e.g., writing her next novel (editing her novel) @ double studio:double studio:common room:sofa """ return execute(self, maze, personas, plan) def reflect(self): """ Reviews the persona's memory and create new thoughts based on it. INPUT: None OUTPUT: None """ reflect(self) def move(self, maze, personas, curr_tile, curr_time): """ This is the main cognitive function where our main sequence is called. INPUT: maze: The Maze class of the current world. personas: A dictionary that contains all persona names as keys, and the Persona instance as values. curr_tile: A tuple that designates the persona's current tile location in (row, col) form. e.g., (58, 39) curr_time: datetime instance that indicates the game's current time. OUTPUT: execution: A triple set that contains the following components: <next_tile> is a x,y coordinate. e.g., (58, 9) <pronunciatio> is an emoji. <description> is a string description of the movement. e.g., writing her next novel (editing her novel) @ double studio:double studio:common room:sofa """ # Updating persona's scratch memory with <curr_tile>. self.scratch.curr_tile = curr_tile # We figure out whether the persona started a new day, and if it is a new # day, whether it is the very first day of the simulation. This is # important because we set up the persona's long term plan at the start of # a new day. new_day = False if not self.scratch.curr_time: new_day = "First day" elif (self.scratch.curr_time.strftime('%A %B %d') != curr_time.strftime('%A %B %d')): new_day = "New day" self.scratch.curr_time = curr_time # Main cognitive sequence begins here. perceived = self.perceive(maze) retrieved = self.retrieve(perceived) plan = self.plan(maze, personas, new_day, retrieved) self.reflect() # <execution> is a triple set that contains the following components: # <next_tile> is a x,y coordinate. e.g., (58, 9) # <pronunciatio> is an emoji. e.g., "\ud83d\udca4" # <description> is a string description of the movement. e.g., # writing her next novel (editing her novel) # @ double studio:double studio:common room:sofa return self.execute(maze, personas, plan) def open_convo_session(self, convo_mode): open_convo_session(self, convo_mode)
generative_agents-main
reverie/backend_server/persona/persona.py
""" Author: Joon Sung Park ([email protected]) File: plan.py Description: This defines the "Plan" module for generative agents. """ import datetime import math import random import sys import time sys.path.append('../../') from global_methods import * from persona.prompt_template.run_gpt_prompt import * from persona.cognitive_modules.retrieve import * from persona.cognitive_modules.converse import * ############################################################################## # CHAPTER 2: Generate ############################################################################## def generate_wake_up_hour(persona): """ Generates the time when the persona wakes up. This becomes an integral part of our process for generating the persona's daily plan. Persona state: identity stable set, lifestyle, first_name INPUT: persona: The Persona class instance OUTPUT: an integer signifying the persona's wake up hour EXAMPLE OUTPUT: 8 """ if debug: print ("GNS FUNCTION: <generate_wake_up_hour>") return int(run_gpt_prompt_wake_up_hour(persona)[0]) def generate_first_daily_plan(persona, wake_up_hour): """ Generates the daily plan for the persona. Basically the long term planning that spans a day. Returns a list of actions that the persona will take today. Usually comes in the following form: 'wake up and complete the morning routine at 6:00 am', 'eat breakfast at 7:00 am',.. Note that the actions come without a period. Persona state: identity stable set, lifestyle, cur_data_str, first_name INPUT: persona: The Persona class instance wake_up_hour: an integer that indicates when the hour the persona wakes up (e.g., 8) OUTPUT: a list of daily actions in broad strokes. EXAMPLE OUTPUT: ['wake up and complete the morning routine at 6:00 am', 'have breakfast and brush teeth at 6:30 am', 'work on painting project from 8:00 am to 12:00 pm', 'have lunch at 12:00 pm', 'take a break and watch TV from 2:00 pm to 4:00 pm', 'work on painting project from 4:00 pm to 6:00 pm', 'have dinner at 6:00 pm', 'watch TV from 7:00 pm to 8:00 pm'] """ if debug: print ("GNS FUNCTION: <generate_first_daily_plan>") return run_gpt_prompt_daily_plan(persona, wake_up_hour)[0] def generate_hourly_schedule(persona, wake_up_hour): """ Based on the daily req, creates an hourly schedule -- one hour at a time. The form of the action for each of the hour is something like below: "sleeping in her bed" The output is basically meant to finish the phrase, "x is..." Persona state: identity stable set, daily_plan INPUT: persona: The Persona class instance persona: Integer form of the wake up hour for the persona. OUTPUT: a list of activities and their duration in minutes: EXAMPLE OUTPUT: [['sleeping', 360], ['waking up and starting her morning routine', 60], ['eating breakfast', 60],.. """ if debug: print ("GNS FUNCTION: <generate_hourly_schedule>") hour_str = ["00:00 AM", "01:00 AM", "02:00 AM", "03:00 AM", "04:00 AM", "05:00 AM", "06:00 AM", "07:00 AM", "08:00 AM", "09:00 AM", "10:00 AM", "11:00 AM", "12:00 PM", "01:00 PM", "02:00 PM", "03:00 PM", "04:00 PM", "05:00 PM", "06:00 PM", "07:00 PM", "08:00 PM", "09:00 PM", "10:00 PM", "11:00 PM"] n_m1_activity = [] diversity_repeat_count = 3 for i in range(diversity_repeat_count): n_m1_activity_set = set(n_m1_activity) if len(n_m1_activity_set) < 5: n_m1_activity = [] for count, curr_hour_str in enumerate(hour_str): if wake_up_hour > 0: n_m1_activity += ["sleeping"] wake_up_hour -= 1 else: n_m1_activity += [run_gpt_prompt_generate_hourly_schedule( persona, curr_hour_str, n_m1_activity, hour_str)[0]] # Step 1. Compressing the hourly schedule to the following format: # The integer indicates the number of hours. They should add up to 24. # [['sleeping', 6], ['waking up and starting her morning routine', 1], # ['eating breakfast', 1], ['getting ready for the day', 1], # ['working on her painting', 2], ['taking a break', 1], # ['having lunch', 1], ['working on her painting', 3], # ['taking a break', 2], ['working on her painting', 2], # ['relaxing and watching TV', 1], ['going to bed', 1], ['sleeping', 2]] _n_m1_hourly_compressed = [] prev = None prev_count = 0 for i in n_m1_activity: if i != prev: prev_count = 1 _n_m1_hourly_compressed += [[i, prev_count]] prev = i else: if _n_m1_hourly_compressed: _n_m1_hourly_compressed[-1][1] += 1 # Step 2. Expand to min scale (from hour scale) # [['sleeping', 360], ['waking up and starting her morning routine', 60], # ['eating breakfast', 60],.. n_m1_hourly_compressed = [] for task, duration in _n_m1_hourly_compressed: n_m1_hourly_compressed += [[task, duration*60]] return n_m1_hourly_compressed def generate_task_decomp(persona, task, duration): """ A few shot decomposition of a task given the task description Persona state: identity stable set, curr_date_str, first_name INPUT: persona: The Persona class instance task: the description of the task at hand in str form (e.g., "waking up and starting her morning routine") duration: an integer that indicates the number of minutes this task is meant to last (e.g., 60) OUTPUT: a list of list where the inner list contains the decomposed task description and the number of minutes the task is supposed to last. EXAMPLE OUTPUT: [['going to the bathroom', 5], ['getting dressed', 5], ['eating breakfast', 15], ['checking her email', 5], ['getting her supplies ready for the day', 15], ['starting to work on her painting', 15]] """ if debug: print ("GNS FUNCTION: <generate_task_decomp>") return run_gpt_prompt_task_decomp(persona, task, duration)[0] def generate_action_sector(act_desp, persona, maze): """TODO Given the persona and the task description, choose the action_sector. Persona state: identity stable set, n-1 day schedule, daily plan INPUT: act_desp: description of the new action (e.g., "sleeping") persona: The Persona class instance OUTPUT: action_arena (e.g., "bedroom 2") EXAMPLE OUTPUT: "bedroom 2" """ if debug: print ("GNS FUNCTION: <generate_action_sector>") return run_gpt_prompt_action_sector(act_desp, persona, maze)[0] def generate_action_arena(act_desp, persona, maze, act_world, act_sector): """TODO Given the persona and the task description, choose the action_arena. Persona state: identity stable set, n-1 day schedule, daily plan INPUT: act_desp: description of the new action (e.g., "sleeping") persona: The Persona class instance OUTPUT: action_arena (e.g., "bedroom 2") EXAMPLE OUTPUT: "bedroom 2" """ if debug: print ("GNS FUNCTION: <generate_action_arena>") return run_gpt_prompt_action_arena(act_desp, persona, maze, act_world, act_sector)[0] def generate_action_game_object(act_desp, act_address, persona, maze): """TODO Given the action description and the act address (the address where we expect the action to task place), choose one of the game objects. Persona state: identity stable set, n-1 day schedule, daily plan INPUT: act_desp: the description of the action (e.g., "sleeping") act_address: the arena where the action will take place: (e.g., "dolores double studio:double studio:bedroom 2") persona: The Persona class instance OUTPUT: act_game_object: EXAMPLE OUTPUT: "bed" """ if debug: print ("GNS FUNCTION: <generate_action_game_object>") if not persona.s_mem.get_str_accessible_arena_game_objects(act_address): return "<random>" return run_gpt_prompt_action_game_object(act_desp, persona, maze, act_address)[0] def generate_action_pronunciatio(act_desp, persona): """TODO Given an action description, creates an emoji string description via a few shot prompt. Does not really need any information from persona. INPUT: act_desp: the description of the action (e.g., "sleeping") persona: The Persona class instance OUTPUT: a string of emoji that translates action description. EXAMPLE OUTPUT: "🧈🍞" """ if debug: print ("GNS FUNCTION: <generate_action_pronunciatio>") try: x = run_gpt_prompt_pronunciatio(act_desp, persona)[0] except: x = "🙂" if not x: return "🙂" return x def generate_action_event_triple(act_desp, persona): """TODO INPUT: act_desp: the description of the action (e.g., "sleeping") persona: The Persona class instance OUTPUT: a string of emoji that translates action description. EXAMPLE OUTPUT: "🧈🍞" """ if debug: print ("GNS FUNCTION: <generate_action_event_triple>") return run_gpt_prompt_event_triple(act_desp, persona)[0] def generate_act_obj_desc(act_game_object, act_desp, persona): if debug: print ("GNS FUNCTION: <generate_act_obj_desc>") return run_gpt_prompt_act_obj_desc(act_game_object, act_desp, persona)[0] def generate_act_obj_event_triple(act_game_object, act_obj_desc, persona): if debug: print ("GNS FUNCTION: <generate_act_obj_event_triple>") return run_gpt_prompt_act_obj_event_triple(act_game_object, act_obj_desc, persona)[0] def generate_convo(maze, init_persona, target_persona): curr_loc = maze.access_tile(init_persona.scratch.curr_tile) # convo = run_gpt_prompt_create_conversation(init_persona, target_persona, curr_loc)[0] # convo = agent_chat_v1(maze, init_persona, target_persona) convo = agent_chat_v2(maze, init_persona, target_persona) all_utt = "" for row in convo: speaker = row[0] utt = row[1] all_utt += f"{speaker}: {utt}\n" convo_length = math.ceil(int(len(all_utt)/8) / 30) if debug: print ("GNS FUNCTION: <generate_convo>") return convo, convo_length def generate_convo_summary(persona, convo): convo_summary = run_gpt_prompt_summarize_conversation(persona, convo)[0] return convo_summary def generate_decide_to_talk(init_persona, target_persona, retrieved): x =run_gpt_prompt_decide_to_talk(init_persona, target_persona, retrieved)[0] if debug: print ("GNS FUNCTION: <generate_decide_to_talk>") if x == "yes": return True else: return False def generate_decide_to_react(init_persona, target_persona, retrieved): if debug: print ("GNS FUNCTION: <generate_decide_to_react>") return run_gpt_prompt_decide_to_react(init_persona, target_persona, retrieved)[0] def generate_new_decomp_schedule(persona, inserted_act, inserted_act_dur, start_hour, end_hour): # Step 1: Setting up the core variables for the function. # <p> is the persona whose schedule we are editing right now. p = persona # <today_min_pass> indicates the number of minutes that have passed today. today_min_pass = (int(p.scratch.curr_time.hour) * 60 + int(p.scratch.curr_time.minute) + 1) # Step 2: We need to create <main_act_dur> and <truncated_act_dur>. # These are basically a sub-component of <f_daily_schedule> of the persona, # but focusing on the current decomposition. # Here is an example for <main_act_dur>: # ['wakes up and completes her morning routine (wakes up at 6am)', 5] # ['wakes up and completes her morning routine (wakes up at 6am)', 5] # ['wakes up and completes her morning routine (uses the restroom)', 5] # ['wakes up and completes her morning routine (washes her ...)', 10] # ['wakes up and completes her morning routine (makes her bed)', 5] # ['wakes up and completes her morning routine (eats breakfast)', 15] # ['wakes up and completes her morning routine (gets dressed)', 10] # ['wakes up and completes her morning routine (leaves her ...)', 5] # ['wakes up and completes her morning routine (starts her ...)', 5] # ['preparing for her day (waking up at 6am)', 5] # ['preparing for her day (making her bed)', 5] # ['preparing for her day (taking a shower)', 15] # ['preparing for her day (getting dressed)', 5] # ['preparing for her day (eating breakfast)', 10] # ['preparing for her day (brushing her teeth)', 5] # ['preparing for her day (making coffee)', 5] # ['preparing for her day (checking her email)', 5] # ['preparing for her day (starting to work on her painting)', 5] # # And <truncated_act_dur> concerns only until where an event happens. # ['wakes up and completes her morning routine (wakes up at 6am)', 5] # ['wakes up and completes her morning routine (wakes up at 6am)', 2] main_act_dur = [] truncated_act_dur = [] dur_sum = 0 # duration sum count = 0 # enumerate count truncated_fin = False print ("DEBUG::: ", persona.scratch.name) for act, dur in p.scratch.f_daily_schedule: if (dur_sum >= start_hour * 60) and (dur_sum < end_hour * 60): main_act_dur += [[act, dur]] if dur_sum <= today_min_pass: truncated_act_dur += [[act, dur]] elif dur_sum > today_min_pass and not truncated_fin: # We need to insert that last act, duration list like this one: # e.g., ['wakes up and completes her morning routine (wakes up...)', 2] truncated_act_dur += [[p.scratch.f_daily_schedule[count][0], dur_sum - today_min_pass]] truncated_act_dur[-1][-1] -= (dur_sum - today_min_pass) ######## DEC 7 DEBUG;.. is the +1 the right thing to do??? # truncated_act_dur[-1][-1] -= (dur_sum - today_min_pass + 1) ######## DEC 7 DEBUG;.. is the +1 the right thing to do??? print ("DEBUG::: ", truncated_act_dur) # truncated_act_dur[-1][-1] -= (dur_sum - today_min_pass) ######## DEC 7 DEBUG;.. is the +1 the right thing to do??? truncated_fin = True dur_sum += dur count += 1 persona_name = persona.name main_act_dur = main_act_dur x = truncated_act_dur[-1][0].split("(")[0].strip() + " (on the way to " + truncated_act_dur[-1][0].split("(")[-1][:-1] + ")" truncated_act_dur[-1][0] = x if "(" in truncated_act_dur[-1][0]: inserted_act = truncated_act_dur[-1][0].split("(")[0].strip() + " (" + inserted_act + ")" # To do inserted_act_dur+1 below is an important decision but I'm not sure # if I understand the full extent of its implications. Might want to # revisit. truncated_act_dur += [[inserted_act, inserted_act_dur]] start_time_hour = (datetime.datetime(2022, 10, 31, 0, 0) + datetime.timedelta(hours=start_hour)) end_time_hour = (datetime.datetime(2022, 10, 31, 0, 0) + datetime.timedelta(hours=end_hour)) if debug: print ("GNS FUNCTION: <generate_new_decomp_schedule>") return run_gpt_prompt_new_decomp_schedule(persona, main_act_dur, truncated_act_dur, start_time_hour, end_time_hour, inserted_act, inserted_act_dur)[0] ############################################################################## # CHAPTER 3: Plan ############################################################################## def revise_identity(persona): p_name = persona.scratch.name focal_points = [f"{p_name}'s plan for {persona.scratch.get_str_curr_date_str()}.", f"Important recent events for {p_name}'s life."] retrieved = new_retrieve(persona, focal_points) statements = "[Statements]\n" for key, val in retrieved.items(): for i in val: statements += f"{i.created.strftime('%A %B %d -- %H:%M %p')}: {i.embedding_key}\n" # print (";adjhfno;asdjao;idfjo;af", p_name) plan_prompt = statements + "\n" plan_prompt += f"Given the statements above, is there anything that {p_name} should remember as she plans for" plan_prompt += f" *{persona.scratch.curr_time.strftime('%A %B %d')}*? " plan_prompt += f"If there is any scheduling information, be as specific as possible (include date, time, and location if stated in the statement)\n\n" plan_prompt += f"Write the response from {p_name}'s perspective." plan_note = ChatGPT_single_request(plan_prompt) # print (plan_note) thought_prompt = statements + "\n" thought_prompt += f"Given the statements above, how might we summarize {p_name}'s feelings about her days up to now?\n\n" thought_prompt += f"Write the response from {p_name}'s perspective." thought_note = ChatGPT_single_request(plan_prompt) # print (thought_note) currently_prompt = f"Isabella Rodriguez's status from {(persona.scratch.curr_time - datetime.timedelta(days=1)).strftime('%A %B %d')}:\n" currently_prompt += f"{persona.scratch.currently}\n\n" currently_prompt += f"{p_name}'s thoughts at the end of {(persona.scratch.curr_time - datetime.timedelta(days=1)).strftime('%A %B %d')}:\n" currently_prompt += (plan_note + thought_note).replace('\n', '') + "\n\n" currently_prompt += f"It is now {persona.scratch.curr_time.strftime('%A %B %d')}. Given the above, write {p_name}'s status for {persona.scratch.curr_time.strftime('%A %B %d')} that reflects {p_name}'s thoughts at the end of {(persona.scratch.curr_time - datetime.timedelta(days=1)).strftime('%A %B %d')}. Write this in third-person talking about {p_name}." currently_prompt += f"If there is any scheduling information, be as specific as possible (include date, time, and location if stated in the statement).\n\n" currently_prompt += "Follow this format below:\nStatus: <new status>" # print ("DEBUG ;adjhfno;asdjao;asdfsidfjo;af", p_name) # print (currently_prompt) new_currently = ChatGPT_single_request(currently_prompt) # print (new_currently) # print (new_currently[10:]) persona.scratch.currently = new_currently daily_req_prompt = persona.scratch.get_str_iss() + "\n" daily_req_prompt += f"Today is {persona.scratch.curr_time.strftime('%A %B %d')}. Here is {persona.scratch.name}'s plan today in broad-strokes (with the time of the day. e.g., have a lunch at 12:00 pm, watch TV from 7 to 8 pm).\n\n" daily_req_prompt += f"Follow this format (the list should have 4~6 items but no more):\n" daily_req_prompt += f"1. wake up and complete the morning routine at <time>, 2. ..." new_daily_req = ChatGPT_single_request(daily_req_prompt) new_daily_req = new_daily_req.replace('\n', ' ') print ("WE ARE HERE!!!", new_daily_req) persona.scratch.daily_plan_req = new_daily_req def _long_term_planning(persona, new_day): """ Formulates the persona's daily long-term plan if it is the start of a new day. This basically has two components: first, we create the wake-up hour, and second, we create the hourly schedule based on it. INPUT new_day: Indicates whether the current time signals a "First day", "New day", or False (for neither). This is important because we create the personas' long term planning on the new day. """ # We start by creating the wake up hour for the persona. wake_up_hour = generate_wake_up_hour(persona) # When it is a new day, we start by creating the daily_req of the persona. # Note that the daily_req is a list of strings that describe the persona's # day in broad strokes. if new_day == "First day": # Bootstrapping the daily plan for the start of then generation: # if this is the start of generation (so there is no previous day's # daily requirement, or if we are on a new day, we want to create a new # set of daily requirements. persona.scratch.daily_req = generate_first_daily_plan(persona, wake_up_hour) elif new_day == "New day": revise_identity(persona) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - TODO # We need to create a new daily_req here... persona.scratch.daily_req = persona.scratch.daily_req # Based on the daily_req, we create an hourly schedule for the persona, # which is a list of todo items with a time duration (in minutes) that # add up to 24 hours. persona.scratch.f_daily_schedule = generate_hourly_schedule(persona, wake_up_hour) persona.scratch.f_daily_schedule_hourly_org = (persona.scratch .f_daily_schedule[:]) # Added March 4 -- adding plan to the memory. thought = f"This is {persona.scratch.name}'s plan for {persona.scratch.curr_time.strftime('%A %B %d')}:" for i in persona.scratch.daily_req: thought += f" {i}," thought = thought[:-1] + "." created = persona.scratch.curr_time expiration = persona.scratch.curr_time + datetime.timedelta(days=30) s, p, o = (persona.scratch.name, "plan", persona.scratch.curr_time.strftime('%A %B %d')) keywords = set(["plan"]) thought_poignancy = 5 thought_embedding_pair = (thought, get_embedding(thought)) persona.a_mem.add_thought(created, expiration, s, p, o, thought, keywords, thought_poignancy, thought_embedding_pair, None) # print("Sleeping for 20 seconds...") # time.sleep(10) # print("Done sleeping!") def _determine_action(persona, maze): """ Creates the next action sequence for the persona. The main goal of this function is to run "add_new_action" on the persona's scratch space, which sets up all the action related variables for the next action. As a part of this, the persona may need to decompose its hourly schedule as needed. INPUT persona: Current <Persona> instance whose action we are determining. maze: Current <Maze> instance. """ def determine_decomp(act_desp, act_dura): """ Given an action description and its duration, we determine whether we need to decompose it. If the action is about the agent sleeping, we generally do not want to decompose it, so that's what we catch here. INPUT: act_desp: the description of the action (e.g., "sleeping") act_dura: the duration of the action in minutes. OUTPUT: a boolean. True if we need to decompose, False otherwise. """ if "sleep" not in act_desp and "bed" not in act_desp: return True elif "sleeping" in act_desp or "asleep" in act_desp or "in bed" in act_desp: return False elif "sleep" in act_desp or "bed" in act_desp: if act_dura > 60: return False return True # The goal of this function is to get us the action associated with # <curr_index>. As a part of this, we may need to decompose some large # chunk actions. # Importantly, we try to decompose at least two hours worth of schedule at # any given point. curr_index = persona.scratch.get_f_daily_schedule_index() curr_index_60 = persona.scratch.get_f_daily_schedule_index(advance=60) # * Decompose * # During the first hour of the day, we need to decompose two hours # sequence. We do that here. if curr_index == 0: # This portion is invoked if it is the first hour of the day. act_desp, act_dura = persona.scratch.f_daily_schedule[curr_index] if act_dura >= 60: # We decompose if the next action is longer than an hour, and fits the # criteria described in determine_decomp. if determine_decomp(act_desp, act_dura): persona.scratch.f_daily_schedule[curr_index:curr_index+1] = ( generate_task_decomp(persona, act_desp, act_dura)) if curr_index_60 + 1 < len(persona.scratch.f_daily_schedule): act_desp, act_dura = persona.scratch.f_daily_schedule[curr_index_60+1] if act_dura >= 60: if determine_decomp(act_desp, act_dura): persona.scratch.f_daily_schedule[curr_index_60+1:curr_index_60+2] = ( generate_task_decomp(persona, act_desp, act_dura)) if curr_index_60 < len(persona.scratch.f_daily_schedule): # If it is not the first hour of the day, this is always invoked (it is # also invoked during the first hour of the day -- to double up so we can # decompose two hours in one go). Of course, we need to have something to # decompose as well, so we check for that too. if persona.scratch.curr_time.hour < 23: # And we don't want to decompose after 11 pm. act_desp, act_dura = persona.scratch.f_daily_schedule[curr_index_60] if act_dura >= 60: if determine_decomp(act_desp, act_dura): persona.scratch.f_daily_schedule[curr_index_60:curr_index_60+1] = ( generate_task_decomp(persona, act_desp, act_dura)) # * End of Decompose * # Generate an <Action> instance from the action description and duration. By # this point, we assume that all the relevant actions are decomposed and # ready in f_daily_schedule. print ("DEBUG LJSDLFSKJF") for i in persona.scratch.f_daily_schedule: print (i) print (curr_index) print (len(persona.scratch.f_daily_schedule)) print (persona.scratch.name) print ("------") # 1440 x_emergency = 0 for i in persona.scratch.f_daily_schedule: x_emergency += i[1] # print ("x_emergency", x_emergency) if 1440 - x_emergency > 0: print ("x_emergency__AAA", x_emergency) persona.scratch.f_daily_schedule += [["sleeping", 1440 - x_emergency]] act_desp, act_dura = persona.scratch.f_daily_schedule[curr_index] # Finding the target location of the action and creating action-related # variables. act_world = maze.access_tile(persona.scratch.curr_tile)["world"] # act_sector = maze.access_tile(persona.scratch.curr_tile)["sector"] act_sector = generate_action_sector(act_desp, persona, maze) act_arena = generate_action_arena(act_desp, persona, maze, act_world, act_sector) act_address = f"{act_world}:{act_sector}:{act_arena}" act_game_object = generate_action_game_object(act_desp, act_address, persona, maze) new_address = f"{act_world}:{act_sector}:{act_arena}:{act_game_object}" act_pron = generate_action_pronunciatio(act_desp, persona) act_event = generate_action_event_triple(act_desp, persona) # Persona's actions also influence the object states. We set those up here. act_obj_desp = generate_act_obj_desc(act_game_object, act_desp, persona) act_obj_pron = generate_action_pronunciatio(act_obj_desp, persona) act_obj_event = generate_act_obj_event_triple(act_game_object, act_obj_desp, persona) # Adding the action to persona's queue. persona.scratch.add_new_action(new_address, int(act_dura), act_desp, act_pron, act_event, None, None, None, None, act_obj_desp, act_obj_pron, act_obj_event) def _choose_retrieved(persona, retrieved): """ Retrieved elements have multiple core "curr_events". We need to choose one event to which we are going to react to. We pick that event here. INPUT persona: Current <Persona> instance whose action we are determining. retrieved: A dictionary of <ConceptNode> that were retrieved from the the persona's associative memory. This dictionary takes the following form: dictionary[event.description] = {["curr_event"] = <ConceptNode>, ["events"] = [<ConceptNode>, ...], ["thoughts"] = [<ConceptNode>, ...] } """ # Once we are done with the reflection, we might want to build a more # complex structure here. # We do not want to take self events... for now copy_retrieved = retrieved.copy() for event_desc, rel_ctx in copy_retrieved.items(): curr_event = rel_ctx["curr_event"] if curr_event.subject == persona.name: del retrieved[event_desc] # Always choose persona first. priority = [] for event_desc, rel_ctx in retrieved.items(): curr_event = rel_ctx["curr_event"] if (":" not in curr_event.subject and curr_event.subject != persona.name): priority += [rel_ctx] if priority: return random.choice(priority) # Skip idle. for event_desc, rel_ctx in retrieved.items(): curr_event = rel_ctx["curr_event"] if "is idle" not in event_desc: priority += [rel_ctx] if priority: return random.choice(priority) return None def _should_react(persona, retrieved, personas): """ Determines what form of reaction the persona should exihibit given the retrieved values. INPUT persona: Current <Persona> instance whose action we are determining. retrieved: A dictionary of <ConceptNode> that were retrieved from the the persona's associative memory. This dictionary takes the following form: dictionary[event.description] = {["curr_event"] = <ConceptNode>, ["events"] = [<ConceptNode>, ...], ["thoughts"] = [<ConceptNode>, ...] } personas: A dictionary that contains all persona names as keys, and the <Persona> instance as values. """ def lets_talk(init_persona, target_persona, retrieved): if (not target_persona.scratch.act_address or not target_persona.scratch.act_description or not init_persona.scratch.act_address or not init_persona.scratch.act_description): return False if ("sleeping" in target_persona.scratch.act_description or "sleeping" in init_persona.scratch.act_description): return False if init_persona.scratch.curr_time.hour == 23: return False if "<waiting>" in target_persona.scratch.act_address: return False if (target_persona.scratch.chatting_with or init_persona.scratch.chatting_with): return False if (target_persona.name in init_persona.scratch.chatting_with_buffer): if init_persona.scratch.chatting_with_buffer[target_persona.name] > 0: return False if generate_decide_to_talk(init_persona, target_persona, retrieved): return True return False def lets_react(init_persona, target_persona, retrieved): if (not target_persona.scratch.act_address or not target_persona.scratch.act_description or not init_persona.scratch.act_address or not init_persona.scratch.act_description): return False if ("sleeping" in target_persona.scratch.act_description or "sleeping" in init_persona.scratch.act_description): return False # return False if init_persona.scratch.curr_time.hour == 23: return False if "waiting" in target_persona.scratch.act_description: return False if init_persona.scratch.planned_path == []: return False if (init_persona.scratch.act_address != target_persona.scratch.act_address): return False react_mode = generate_decide_to_react(init_persona, target_persona, retrieved) if react_mode == "1": wait_until = ((target_persona.scratch.act_start_time + datetime.timedelta(minutes=target_persona.scratch.act_duration - 1)) .strftime("%B %d, %Y, %H:%M:%S")) return f"wait: {wait_until}" elif react_mode == "2": return False return "do other things" else: return False #"keep" # If the persona is chatting right now, default to no reaction if persona.scratch.chatting_with: return False if "<waiting>" in persona.scratch.act_address: return False # Recall that retrieved takes the following form: # dictionary {["curr_event"] = <ConceptNode>, # ["events"] = [<ConceptNode>, ...], # ["thoughts"] = [<ConceptNode>, ...]} curr_event = retrieved["curr_event"] if ":" not in curr_event.subject: # this is a persona event. if lets_talk(persona, personas[curr_event.subject], retrieved): return f"chat with {curr_event.subject}" react_mode = lets_react(persona, personas[curr_event.subject], retrieved) return react_mode return False def _create_react(persona, inserted_act, inserted_act_dur, act_address, act_event, chatting_with, chat, chatting_with_buffer, chatting_end_time, act_pronunciatio, act_obj_description, act_obj_pronunciatio, act_obj_event, act_start_time=None): p = persona min_sum = 0 for i in range (p.scratch.get_f_daily_schedule_hourly_org_index()): min_sum += p.scratch.f_daily_schedule_hourly_org[i][1] start_hour = int (min_sum/60) if (p.scratch.f_daily_schedule_hourly_org[p.scratch.get_f_daily_schedule_hourly_org_index()][1] >= 120): end_hour = start_hour + p.scratch.f_daily_schedule_hourly_org[p.scratch.get_f_daily_schedule_hourly_org_index()][1]/60 elif (p.scratch.f_daily_schedule_hourly_org[p.scratch.get_f_daily_schedule_hourly_org_index()][1] + p.scratch.f_daily_schedule_hourly_org[p.scratch.get_f_daily_schedule_hourly_org_index()+1][1]): end_hour = start_hour + ((p.scratch.f_daily_schedule_hourly_org[p.scratch.get_f_daily_schedule_hourly_org_index()][1] + p.scratch.f_daily_schedule_hourly_org[p.scratch.get_f_daily_schedule_hourly_org_index()+1][1])/60) else: end_hour = start_hour + 2 end_hour = int(end_hour) dur_sum = 0 count = 0 start_index = None end_index = None for act, dur in p.scratch.f_daily_schedule: if dur_sum >= start_hour * 60 and start_index == None: start_index = count if dur_sum >= end_hour * 60 and end_index == None: end_index = count dur_sum += dur count += 1 ret = generate_new_decomp_schedule(p, inserted_act, inserted_act_dur, start_hour, end_hour) p.scratch.f_daily_schedule[start_index:end_index] = ret p.scratch.add_new_action(act_address, inserted_act_dur, inserted_act, act_pronunciatio, act_event, chatting_with, chat, chatting_with_buffer, chatting_end_time, act_obj_description, act_obj_pronunciatio, act_obj_event, act_start_time) def _chat_react(maze, persona, focused_event, reaction_mode, personas): # There are two personas -- the persona who is initiating the conversation # and the persona who is the target. We get the persona instances here. init_persona = persona target_persona = personas[reaction_mode[9:].strip()] curr_personas = [init_persona, target_persona] # Actually creating the conversation here. convo, duration_min = generate_convo(maze, init_persona, target_persona) convo_summary = generate_convo_summary(init_persona, convo) inserted_act = convo_summary inserted_act_dur = duration_min act_start_time = target_persona.scratch.act_start_time curr_time = target_persona.scratch.curr_time if curr_time.second != 0: temp_curr_time = curr_time + datetime.timedelta(seconds=60 - curr_time.second) chatting_end_time = temp_curr_time + datetime.timedelta(minutes=inserted_act_dur) else: chatting_end_time = curr_time + datetime.timedelta(minutes=inserted_act_dur) for role, p in [("init", init_persona), ("target", target_persona)]: if role == "init": act_address = f"<persona> {target_persona.name}" act_event = (p.name, "chat with", target_persona.name) chatting_with = target_persona.name chatting_with_buffer = {} chatting_with_buffer[target_persona.name] = 800 elif role == "target": act_address = f"<persona> {init_persona.name}" act_event = (p.name, "chat with", init_persona.name) chatting_with = init_persona.name chatting_with_buffer = {} chatting_with_buffer[init_persona.name] = 800 act_pronunciatio = "💬" act_obj_description = None act_obj_pronunciatio = None act_obj_event = (None, None, None) _create_react(p, inserted_act, inserted_act_dur, act_address, act_event, chatting_with, convo, chatting_with_buffer, chatting_end_time, act_pronunciatio, act_obj_description, act_obj_pronunciatio, act_obj_event, act_start_time) def _wait_react(persona, reaction_mode): p = persona inserted_act = f'waiting to start {p.scratch.act_description.split("(")[-1][:-1]}' end_time = datetime.datetime.strptime(reaction_mode[6:].strip(), "%B %d, %Y, %H:%M:%S") inserted_act_dur = (end_time.minute + end_time.hour * 60) - (p.scratch.curr_time.minute + p.scratch.curr_time.hour * 60) + 1 act_address = f"<waiting> {p.scratch.curr_tile[0]} {p.scratch.curr_tile[1]}" act_event = (p.name, "waiting to start", p.scratch.act_description.split("(")[-1][:-1]) chatting_with = None chat = None chatting_with_buffer = None chatting_end_time = None act_pronunciatio = "⌛" act_obj_description = None act_obj_pronunciatio = None act_obj_event = (None, None, None) _create_react(p, inserted_act, inserted_act_dur, act_address, act_event, chatting_with, chat, chatting_with_buffer, chatting_end_time, act_pronunciatio, act_obj_description, act_obj_pronunciatio, act_obj_event) def plan(persona, maze, personas, new_day, retrieved): """ Main cognitive function of the chain. It takes the retrieved memory and perception, as well as the maze and the first day state to conduct both the long term and short term planning for the persona. INPUT: maze: Current <Maze> instance of the world. personas: A dictionary that contains all persona names as keys, and the Persona instance as values. new_day: This can take one of the three values. 1) <Boolean> False -- It is not a "new day" cycle (if it is, we would need to call the long term planning sequence for the persona). 2) <String> "First day" -- It is literally the start of a simulation, so not only is it a new day, but also it is the first day. 2) <String> "New day" -- It is a new day. retrieved: dictionary of dictionary. The first layer specifies an event, while the latter layer specifies the "curr_event", "events", and "thoughts" that are relevant. OUTPUT The target action address of the persona (persona.scratch.act_address). """ # PART 1: Generate the hourly schedule. if new_day: _long_term_planning(persona, new_day) # PART 2: If the current action has expired, we want to create a new plan. if persona.scratch.act_check_finished(): _determine_action(persona, maze) # PART 3: If you perceived an event that needs to be responded to (saw # another persona), and retrieved relevant information. # Step 1: Retrieved may have multiple events represented in it. The first # job here is to determine which of the events we want to focus # on for the persona. # <focused_event> takes the form of a dictionary like this: # dictionary {["curr_event"] = <ConceptNode>, # ["events"] = [<ConceptNode>, ...], # ["thoughts"] = [<ConceptNode>, ...]} focused_event = False if retrieved.keys(): focused_event = _choose_retrieved(persona, retrieved) # Step 2: Once we choose an event, we need to determine whether the # persona will take any actions for the perceived event. There are # three possible modes of reaction returned by _should_react. # a) "chat with {target_persona.name}" # b) "react" # c) False if focused_event: reaction_mode = _should_react(persona, focused_event, personas) if reaction_mode: # If we do want to chat, then we generate conversation if reaction_mode[:9] == "chat with": _chat_react(maze, persona, focused_event, reaction_mode, personas) elif reaction_mode[:4] == "wait": _wait_react(persona, reaction_mode) # elif reaction_mode == "do other things": # _chat_react(persona, focused_event, reaction_mode, personas) # Step 3: Chat-related state clean up. # If the persona is not chatting with anyone, we clean up any of the # chat-related states here. if persona.scratch.act_event[1] != "chat with": persona.scratch.chatting_with = None persona.scratch.chat = None persona.scratch.chatting_end_time = None # We want to make sure that the persona does not keep conversing with each # other in an infinite loop. So, chatting_with_buffer maintains a form of # buffer that makes the persona wait from talking to the same target # immediately after chatting once. We keep track of the buffer value here. curr_persona_chat_buffer = persona.scratch.chatting_with_buffer for persona_name, buffer_count in curr_persona_chat_buffer.items(): if persona_name != persona.scratch.chatting_with: persona.scratch.chatting_with_buffer[persona_name] -= 1 return persona.scratch.act_address
generative_agents-main
reverie/backend_server/persona/cognitive_modules/plan.py
""" Author: Joon Sung Park ([email protected]) File: execute.py Description: This defines the "Act" module for generative agents. """ import sys import random sys.path.append('../../') from global_methods import * from path_finder import * from utils import * def execute(persona, maze, personas, plan): """ Given a plan (action's string address), we execute the plan (actually outputs the tile coordinate path and the next coordinate for the persona). INPUT: persona: Current <Persona> instance. maze: An instance of current <Maze>. personas: A dictionary of all personas in the world. plan: This is a string address of the action we need to execute. It comes in the form of "{world}:{sector}:{arena}:{game_objects}". It is important that you access this without doing negative indexing (e.g., [-1]) because the latter address elements may not be present in some cases. e.g., "dolores double studio:double studio:bedroom 1:bed" OUTPUT: execution """ if "<random>" in plan and persona.scratch.planned_path == []: persona.scratch.act_path_set = False # <act_path_set> is set to True if the path is set for the current action. # It is False otherwise, and means we need to construct a new path. if not persona.scratch.act_path_set: # <target_tiles> is a list of tile coordinates where the persona may go # to execute the current action. The goal is to pick one of them. target_tiles = None print ('aldhfoaf/????') print (plan) if "<persona>" in plan: # Executing persona-persona interaction. target_p_tile = (personas[plan.split("<persona>")[-1].strip()] .scratch.curr_tile) potential_path = path_finder(maze.collision_maze, persona.scratch.curr_tile, target_p_tile, collision_block_id) if len(potential_path) <= 2: target_tiles = [potential_path[0]] else: potential_1 = path_finder(maze.collision_maze, persona.scratch.curr_tile, potential_path[int(len(potential_path)/2)], collision_block_id) potential_2 = path_finder(maze.collision_maze, persona.scratch.curr_tile, potential_path[int(len(potential_path)/2)+1], collision_block_id) if len(potential_1) <= len(potential_2): target_tiles = [potential_path[int(len(potential_path)/2)]] else: target_tiles = [potential_path[int(len(potential_path)/2+1)]] elif "<waiting>" in plan: # Executing interaction where the persona has decided to wait before # executing their action. x = int(plan.split()[1]) y = int(plan.split()[2]) target_tiles = [[x, y]] elif "<random>" in plan: # Executing a random location action. plan = ":".join(plan.split(":")[:-1]) target_tiles = maze.address_tiles[plan] target_tiles = random.sample(list(target_tiles), 1) else: # This is our default execution. We simply take the persona to the # location where the current action is taking place. # Retrieve the target addresses. Again, plan is an action address in its # string form. <maze.address_tiles> takes this and returns candidate # coordinates. if plan not in maze.address_tiles: maze.address_tiles["Johnson Park:park:park garden"] #ERRORRRRRRR else: target_tiles = maze.address_tiles[plan] # There are sometimes more than one tile returned from this (e.g., a tabe # may stretch many coordinates). So, we sample a few here. And from that # random sample, we will take the closest ones. if len(target_tiles) < 4: target_tiles = random.sample(list(target_tiles), len(target_tiles)) else: target_tiles = random.sample(list(target_tiles), 4) # If possible, we want personas to occupy different tiles when they are # headed to the same location on the maze. It is ok if they end up on the # same time, but we try to lower that probability. # We take care of that overlap here. persona_name_set = set(personas.keys()) new_target_tiles = [] for i in target_tiles: curr_event_set = maze.access_tile(i)["events"] pass_curr_tile = False for j in curr_event_set: if j[0] in persona_name_set: pass_curr_tile = True if not pass_curr_tile: new_target_tiles += [i] if len(new_target_tiles) == 0: new_target_tiles = target_tiles target_tiles = new_target_tiles # Now that we've identified the target tile, we find the shortest path to # one of the target tiles. curr_tile = persona.scratch.curr_tile collision_maze = maze.collision_maze closest_target_tile = None path = None for i in target_tiles: # path_finder takes a collision_mze and the curr_tile coordinate as # an input, and returns a list of coordinate tuples that becomes the # path. # e.g., [(0, 1), (1, 1), (1, 2), (1, 3), (1, 4)...] curr_path = path_finder(maze.collision_maze, curr_tile, i, collision_block_id) if not closest_target_tile: closest_target_tile = i path = curr_path elif len(curr_path) < len(path): closest_target_tile = i path = curr_path # Actually setting the <planned_path> and <act_path_set>. We cut the # first element in the planned_path because it includes the curr_tile. persona.scratch.planned_path = path[1:] persona.scratch.act_path_set = True # Setting up the next immediate step. We stay at our curr_tile if there is # no <planned_path> left, but otherwise, we go to the next tile in the path. ret = persona.scratch.curr_tile if persona.scratch.planned_path: ret = persona.scratch.planned_path[0] persona.scratch.planned_path = persona.scratch.planned_path[1:] description = f"{persona.scratch.act_description}" description += f" @ {persona.scratch.act_address}" execution = ret, persona.scratch.act_pronunciatio, description return execution
generative_agents-main
reverie/backend_server/persona/cognitive_modules/execute.py
""" Author: Joon Sung Park ([email protected]) File: perceive.py Description: This defines the "Perceive" module for generative agents. """ import sys sys.path.append('../../') from operator import itemgetter from global_methods import * from persona.prompt_template.gpt_structure import * from persona.prompt_template.run_gpt_prompt import * def generate_poig_score(persona, event_type, description): if "is idle" in description: return 1 if event_type == "event": return run_gpt_prompt_event_poignancy(persona, description)[0] elif event_type == "chat": return run_gpt_prompt_chat_poignancy(persona, persona.scratch.act_description)[0] def perceive(persona, maze): """ Perceives events around the persona and saves it to the memory, both events and spaces. We first perceive the events nearby the persona, as determined by its <vision_r>. If there are a lot of events happening within that radius, we take the <att_bandwidth> of the closest events. Finally, we check whether any of them are new, as determined by <retention>. If they are new, then we save those and return the <ConceptNode> instances for those events. INPUT: persona: An instance of <Persona> that represents the current persona. maze: An instance of <Maze> that represents the current maze in which the persona is acting in. OUTPUT: ret_events: a list of <ConceptNode> that are perceived and new. """ # PERCEIVE SPACE # We get the nearby tiles given our current tile and the persona's vision # radius. nearby_tiles = maze.get_nearby_tiles(persona.scratch.curr_tile, persona.scratch.vision_r) # We then store the perceived space. Note that the s_mem of the persona is # in the form of a tree constructed using dictionaries. for i in nearby_tiles: i = maze.access_tile(i) if i["world"]: if (i["world"] not in persona.s_mem.tree): persona.s_mem.tree[i["world"]] = {} if i["sector"]: if (i["sector"] not in persona.s_mem.tree[i["world"]]): persona.s_mem.tree[i["world"]][i["sector"]] = {} if i["arena"]: if (i["arena"] not in persona.s_mem.tree[i["world"]] [i["sector"]]): persona.s_mem.tree[i["world"]][i["sector"]][i["arena"]] = [] if i["game_object"]: if (i["game_object"] not in persona.s_mem.tree[i["world"]] [i["sector"]] [i["arena"]]): persona.s_mem.tree[i["world"]][i["sector"]][i["arena"]] += [ i["game_object"]] # PERCEIVE EVENTS. # We will perceive events that take place in the same arena as the # persona's current arena. curr_arena_path = maze.get_tile_path(persona.scratch.curr_tile, "arena") # We do not perceive the same event twice (this can happen if an object is # extended across multiple tiles). percept_events_set = set() # We will order our percept based on the distance, with the closest ones # getting priorities. percept_events_list = [] # First, we put all events that are occuring in the nearby tiles into the # percept_events_list for tile in nearby_tiles: tile_details = maze.access_tile(tile) if tile_details["events"]: if maze.get_tile_path(tile, "arena") == curr_arena_path: # This calculates the distance between the persona's current tile, # and the target tile. dist = math.dist([tile[0], tile[1]], [persona.scratch.curr_tile[0], persona.scratch.curr_tile[1]]) # Add any relevant events to our temp set/list with the distant info. for event in tile_details["events"]: if event not in percept_events_set: percept_events_list += [[dist, event]] percept_events_set.add(event) # We sort, and perceive only persona.scratch.att_bandwidth of the closest # events. If the bandwidth is larger, then it means the persona can perceive # more elements within a small area. percept_events_list = sorted(percept_events_list, key=itemgetter(0)) perceived_events = [] for dist, event in percept_events_list[:persona.scratch.att_bandwidth]: perceived_events += [event] # Storing events. # <ret_events> is a list of <ConceptNode> instances from the persona's # associative memory. ret_events = [] for p_event in perceived_events: s, p, o, desc = p_event if not p: # If the object is not present, then we default the event to "idle". p = "is" o = "idle" desc = "idle" desc = f"{s.split(':')[-1]} is {desc}" p_event = (s, p, o) # We retrieve the latest persona.scratch.retention events. If there is # something new that is happening (that is, p_event not in latest_events), # then we add that event to the a_mem and return it. latest_events = persona.a_mem.get_summarized_latest_events( persona.scratch.retention) if p_event not in latest_events: # We start by managing keywords. keywords = set() sub = p_event[0] obj = p_event[2] if ":" in p_event[0]: sub = p_event[0].split(":")[-1] if ":" in p_event[2]: obj = p_event[2].split(":")[-1] keywords.update([sub, obj]) # Get event embedding desc_embedding_in = desc if "(" in desc: desc_embedding_in = (desc_embedding_in.split("(")[1] .split(")")[0] .strip()) if desc_embedding_in in persona.a_mem.embeddings: event_embedding = persona.a_mem.embeddings[desc_embedding_in] else: event_embedding = get_embedding(desc_embedding_in) event_embedding_pair = (desc_embedding_in, event_embedding) # Get event poignancy. event_poignancy = generate_poig_score(persona, "event", desc_embedding_in) # If we observe the persona's self chat, we include that in the memory # of the persona here. chat_node_ids = [] if p_event[0] == f"{persona.name}" and p_event[1] == "chat with": curr_event = persona.scratch.act_event if persona.scratch.act_description in persona.a_mem.embeddings: chat_embedding = persona.a_mem.embeddings[ persona.scratch.act_description] else: chat_embedding = get_embedding(persona.scratch .act_description) chat_embedding_pair = (persona.scratch.act_description, chat_embedding) chat_poignancy = generate_poig_score(persona, "chat", persona.scratch.act_description) chat_node = persona.a_mem.add_chat(persona.scratch.curr_time, None, curr_event[0], curr_event[1], curr_event[2], persona.scratch.act_description, keywords, chat_poignancy, chat_embedding_pair, persona.scratch.chat) chat_node_ids = [chat_node.node_id] # Finally, we add the current event to the agent's memory. ret_events += [persona.a_mem.add_event(persona.scratch.curr_time, None, s, p, o, desc, keywords, event_poignancy, event_embedding_pair, chat_node_ids)] persona.scratch.importance_trigger_curr -= event_poignancy persona.scratch.importance_ele_n += 1 return ret_events
generative_agents-main
reverie/backend_server/persona/cognitive_modules/perceive.py
""" Author: Joon Sung Park ([email protected]) File: retrieve.py Description: This defines the "Retrieve" module for generative agents. """ import sys sys.path.append('../../') from global_methods import * from persona.prompt_template.gpt_structure import * from numpy import dot from numpy.linalg import norm def retrieve(persona, perceived): """ This function takes the events that are perceived by the persona as input and returns a set of related events and thoughts that the persona would need to consider as context when planning. INPUT: perceived: a list of event <ConceptNode>s that represent any of the events ` that are happening around the persona. What is included in here are controlled by the att_bandwidth and retention hyper-parameters. OUTPUT: retrieved: a dictionary of dictionary. The first layer specifies an event, while the latter layer specifies the "curr_event", "events", and "thoughts" that are relevant. """ # We rerieve events and thoughts separately. retrieved = dict() for event in perceived: retrieved[event.description] = dict() retrieved[event.description]["curr_event"] = event relevant_events = persona.a_mem.retrieve_relevant_events( event.subject, event.predicate, event.object) retrieved[event.description]["events"] = list(relevant_events) relevant_thoughts = persona.a_mem.retrieve_relevant_thoughts( event.subject, event.predicate, event.object) retrieved[event.description]["thoughts"] = list(relevant_thoughts) return retrieved def cos_sim(a, b): """ This function calculates the cosine similarity between two input vectors 'a' and 'b'. Cosine similarity is a measure of similarity between two non-zero vectors of an inner product space that measures the cosine of the angle between them. INPUT: a: 1-D array object b: 1-D array object OUTPUT: A scalar value representing the cosine similarity between the input vectors 'a' and 'b'. Example input: a = [0.3, 0.2, 0.5] b = [0.2, 0.2, 0.5] """ return dot(a, b)/(norm(a)*norm(b)) def normalize_dict_floats(d, target_min, target_max): """ This function normalizes the float values of a given dictionary 'd' between a target minimum and maximum value. The normalization is done by scaling the values to the target range while maintaining the same relative proportions between the original values. INPUT: d: Dictionary. The input dictionary whose float values need to be normalized. target_min: Integer or float. The minimum value to which the original values should be scaled. target_max: Integer or float. The maximum value to which the original values should be scaled. OUTPUT: d: A new dictionary with the same keys as the input but with the float values normalized between the target_min and target_max. Example input: d = {'a':1.2,'b':3.4,'c':5.6,'d':7.8} target_min = -5 target_max = 5 """ min_val = min(val for val in d.values()) max_val = max(val for val in d.values()) range_val = max_val - min_val if range_val == 0: for key, val in d.items(): d[key] = (target_max - target_min)/2 else: for key, val in d.items(): d[key] = ((val - min_val) * (target_max - target_min) / range_val + target_min) return d def top_highest_x_values(d, x): """ This function takes a dictionary 'd' and an integer 'x' as input, and returns a new dictionary containing the top 'x' key-value pairs from the input dictionary 'd' with the highest values. INPUT: d: Dictionary. The input dictionary from which the top 'x' key-value pairs with the highest values are to be extracted. x: Integer. The number of top key-value pairs with the highest values to be extracted from the input dictionary. OUTPUT: A new dictionary containing the top 'x' key-value pairs from the input dictionary 'd' with the highest values. Example input: d = {'a':1.2,'b':3.4,'c':5.6,'d':7.8} x = 3 """ top_v = dict(sorted(d.items(), key=lambda item: item[1], reverse=True)[:x]) return top_v def extract_recency(persona, nodes): """ Gets the current Persona object and a list of nodes that are in a chronological order, and outputs a dictionary that has the recency score calculated. INPUT: persona: Current persona whose memory we are retrieving. nodes: A list of Node object in a chronological order. OUTPUT: recency_out: A dictionary whose keys are the node.node_id and whose values are the float that represents the recency score. """ recency_vals = [persona.scratch.recency_decay ** i for i in range(1, len(nodes) + 1)] recency_out = dict() for count, node in enumerate(nodes): recency_out[node.node_id] = recency_vals[count] return recency_out def extract_importance(persona, nodes): """ Gets the current Persona object and a list of nodes that are in a chronological order, and outputs a dictionary that has the importance score calculated. INPUT: persona: Current persona whose memory we are retrieving. nodes: A list of Node object in a chronological order. OUTPUT: importance_out: A dictionary whose keys are the node.node_id and whose values are the float that represents the importance score. """ importance_out = dict() for count, node in enumerate(nodes): importance_out[node.node_id] = node.poignancy return importance_out def extract_relevance(persona, nodes, focal_pt): """ Gets the current Persona object, a list of nodes that are in a chronological order, and the focal_pt string and outputs a dictionary that has the relevance score calculated. INPUT: persona: Current persona whose memory we are retrieving. nodes: A list of Node object in a chronological order. focal_pt: A string describing the current thought of revent of focus. OUTPUT: relevance_out: A dictionary whose keys are the node.node_id and whose values are the float that represents the relevance score. """ focal_embedding = get_embedding(focal_pt) relevance_out = dict() for count, node in enumerate(nodes): node_embedding = persona.a_mem.embeddings[node.embedding_key] relevance_out[node.node_id] = cos_sim(node_embedding, focal_embedding) return relevance_out def new_retrieve(persona, focal_points, n_count=30): """ Given the current persona and focal points (focal points are events or thoughts for which we are retrieving), we retrieve a set of nodes for each of the focal points and return a dictionary. INPUT: persona: The current persona object whose memory we are retrieving. focal_points: A list of focal points (string description of the events or thoughts that is the focus of current retrieval). OUTPUT: retrieved: A dictionary whose keys are a string focal point, and whose values are a list of Node object in the agent's associative memory. Example input: persona = <persona> object focal_points = ["How are you?", "Jane is swimming in the pond"] """ # <retrieved> is the main dictionary that we are returning retrieved = dict() for focal_pt in focal_points: # Getting all nodes from the agent's memory (both thoughts and events) and # sorting them by the datetime of creation. # You could also imagine getting the raw conversation, but for now. nodes = [[i.last_accessed, i] for i in persona.a_mem.seq_event + persona.a_mem.seq_thought if "idle" not in i.embedding_key] nodes = sorted(nodes, key=lambda x: x[0]) nodes = [i for created, i in nodes] # Calculating the component dictionaries and normalizing them. recency_out = extract_recency(persona, nodes) recency_out = normalize_dict_floats(recency_out, 0, 1) importance_out = extract_importance(persona, nodes) importance_out = normalize_dict_floats(importance_out, 0, 1) relevance_out = extract_relevance(persona, nodes, focal_pt) relevance_out = normalize_dict_floats(relevance_out, 0, 1) # Computing the final scores that combines the component values. # Note to self: test out different weights. [1, 1, 1] tends to work # decently, but in the future, these weights should likely be learned, # perhaps through an RL-like process. # gw = [1, 1, 1] # gw = [1, 2, 1] gw = [0.5, 3, 2] master_out = dict() for key in recency_out.keys(): master_out[key] = (persona.scratch.recency_w*recency_out[key]*gw[0] + persona.scratch.relevance_w*relevance_out[key]*gw[1] + persona.scratch.importance_w*importance_out[key]*gw[2]) master_out = top_highest_x_values(master_out, len(master_out.keys())) for key, val in master_out.items(): print (persona.a_mem.id_to_node[key].embedding_key, val) print (persona.scratch.recency_w*recency_out[key]*1, persona.scratch.relevance_w*relevance_out[key]*1, persona.scratch.importance_w*importance_out[key]*1) # Extracting the highest x values. # <master_out> has the key of node.id and value of float. Once we get the # highest x values, we want to translate the node.id into nodes and return # the list of nodes. master_out = top_highest_x_values(master_out, n_count) master_nodes = [persona.a_mem.id_to_node[key] for key in list(master_out.keys())] for n in master_nodes: n.last_accessed = persona.scratch.curr_time retrieved[focal_pt] = master_nodes return retrieved
generative_agents-main
reverie/backend_server/persona/cognitive_modules/retrieve.py
""" Author: Joon Sung Park ([email protected]) File: converse.py Description: An extra cognitive module for generating conversations. """ import math import sys import datetime import random sys.path.append('../') from global_methods import * from persona.memory_structures.spatial_memory import * from persona.memory_structures.associative_memory import * from persona.memory_structures.scratch import * from persona.cognitive_modules.retrieve import * from persona.prompt_template.run_gpt_prompt import * def generate_agent_chat_summarize_ideas(init_persona, target_persona, retrieved, curr_context): all_embedding_keys = list() for key, val in retrieved.items(): for i in val: all_embedding_keys += [i.embedding_key] all_embedding_key_str ="" for i in all_embedding_keys: all_embedding_key_str += f"{i}\n" try: summarized_idea = run_gpt_prompt_agent_chat_summarize_ideas(init_persona, target_persona, all_embedding_key_str, curr_context)[0] except: summarized_idea = "" return summarized_idea def generate_summarize_agent_relationship(init_persona, target_persona, retrieved): all_embedding_keys = list() for key, val in retrieved.items(): for i in val: all_embedding_keys += [i.embedding_key] all_embedding_key_str ="" for i in all_embedding_keys: all_embedding_key_str += f"{i}\n" summarized_relationship = run_gpt_prompt_agent_chat_summarize_relationship( init_persona, target_persona, all_embedding_key_str)[0] return summarized_relationship def generate_agent_chat(maze, init_persona, target_persona, curr_context, init_summ_idea, target_summ_idea): summarized_idea = run_gpt_prompt_agent_chat(maze, init_persona, target_persona, curr_context, init_summ_idea, target_summ_idea)[0] for i in summarized_idea: print (i) return summarized_idea def agent_chat_v1(maze, init_persona, target_persona): # Chat version optimized for speed via batch generation curr_context = (f"{init_persona.scratch.name} " + f"was {init_persona.scratch.act_description} " + f"when {init_persona.scratch.name} " + f"saw {target_persona.scratch.name} " + f"in the middle of {target_persona.scratch.act_description}.\n") curr_context += (f"{init_persona.scratch.name} " + f"is thinking of initating a conversation with " + f"{target_persona.scratch.name}.") summarized_ideas = [] part_pairs = [(init_persona, target_persona), (target_persona, init_persona)] for p_1, p_2 in part_pairs: focal_points = [f"{p_2.scratch.name}"] retrieved = new_retrieve(p_1, focal_points, 50) relationship = generate_summarize_agent_relationship(p_1, p_2, retrieved) focal_points = [f"{relationship}", f"{p_2.scratch.name} is {p_2.scratch.act_description}"] retrieved = new_retrieve(p_1, focal_points, 25) summarized_idea = generate_agent_chat_summarize_ideas(p_1, p_2, retrieved, curr_context) summarized_ideas += [summarized_idea] return generate_agent_chat(maze, init_persona, target_persona, curr_context, summarized_ideas[0], summarized_ideas[1]) def generate_one_utterance(maze, init_persona, target_persona, retrieved, curr_chat): # Chat version optimized for speed via batch generation curr_context = (f"{init_persona.scratch.name} " + f"was {init_persona.scratch.act_description} " + f"when {init_persona.scratch.name} " + f"saw {target_persona.scratch.name} " + f"in the middle of {target_persona.scratch.act_description}.\n") curr_context += (f"{init_persona.scratch.name} " + f"is initiating a conversation with " + f"{target_persona.scratch.name}.") print ("July 23 5") x = run_gpt_generate_iterative_chat_utt(maze, init_persona, target_persona, retrieved, curr_context, curr_chat)[0] print ("July 23 6") print ("adshfoa;khdf;fajslkfjald;sdfa HERE", x) return x["utterance"], x["end"] def agent_chat_v2(maze, init_persona, target_persona): curr_chat = [] print ("July 23") for i in range(8): focal_points = [f"{target_persona.scratch.name}"] retrieved = new_retrieve(init_persona, focal_points, 50) relationship = generate_summarize_agent_relationship(init_persona, target_persona, retrieved) print ("-------- relationshopadsjfhkalsdjf", relationship) last_chat = "" for i in curr_chat[-4:]: last_chat += ": ".join(i) + "\n" if last_chat: focal_points = [f"{relationship}", f"{target_persona.scratch.name} is {target_persona.scratch.act_description}", last_chat] else: focal_points = [f"{relationship}", f"{target_persona.scratch.name} is {target_persona.scratch.act_description}"] retrieved = new_retrieve(init_persona, focal_points, 15) utt, end = generate_one_utterance(maze, init_persona, target_persona, retrieved, curr_chat) curr_chat += [[init_persona.scratch.name, utt]] if end: break focal_points = [f"{init_persona.scratch.name}"] retrieved = new_retrieve(target_persona, focal_points, 50) relationship = generate_summarize_agent_relationship(target_persona, init_persona, retrieved) print ("-------- relationshopadsjfhkalsdjf", relationship) last_chat = "" for i in curr_chat[-4:]: last_chat += ": ".join(i) + "\n" if last_chat: focal_points = [f"{relationship}", f"{init_persona.scratch.name} is {init_persona.scratch.act_description}", last_chat] else: focal_points = [f"{relationship}", f"{init_persona.scratch.name} is {init_persona.scratch.act_description}"] retrieved = new_retrieve(target_persona, focal_points, 15) utt, end = generate_one_utterance(maze, target_persona, init_persona, retrieved, curr_chat) curr_chat += [[target_persona.scratch.name, utt]] if end: break print ("July 23 PU") for row in curr_chat: print (row) print ("July 23 FIN") return curr_chat def generate_summarize_ideas(persona, nodes, question): statements = "" for n in nodes: statements += f"{n.embedding_key}\n" summarized_idea = run_gpt_prompt_summarize_ideas(persona, statements, question)[0] return summarized_idea def generate_next_line(persona, interlocutor_desc, curr_convo, summarized_idea): # Original chat -- line by line generation prev_convo = "" for row in curr_convo: prev_convo += f'{row[0]}: {row[1]}\n' next_line = run_gpt_prompt_generate_next_convo_line(persona, interlocutor_desc, prev_convo, summarized_idea)[0] return next_line def generate_inner_thought(persona, whisper): inner_thought = run_gpt_prompt_generate_whisper_inner_thought(persona, whisper)[0] return inner_thought def generate_action_event_triple(act_desp, persona): """TODO INPUT: act_desp: the description of the action (e.g., "sleeping") persona: The Persona class instance OUTPUT: a string of emoji that translates action description. EXAMPLE OUTPUT: "🧈🍞" """ if debug: print ("GNS FUNCTION: <generate_action_event_triple>") return run_gpt_prompt_event_triple(act_desp, persona)[0] def generate_poig_score(persona, event_type, description): if debug: print ("GNS FUNCTION: <generate_poig_score>") if "is idle" in description: return 1 if event_type == "event" or event_type == "thought": return run_gpt_prompt_event_poignancy(persona, description)[0] elif event_type == "chat": return run_gpt_prompt_chat_poignancy(persona, persona.scratch.act_description)[0] def load_history_via_whisper(personas, whispers): for count, row in enumerate(whispers): persona = personas[row[0]] whisper = row[1] thought = generate_inner_thought(persona, whisper) created = persona.scratch.curr_time expiration = persona.scratch.curr_time + datetime.timedelta(days=30) s, p, o = generate_action_event_triple(thought, persona) keywords = set([s, p, o]) thought_poignancy = generate_poig_score(persona, "event", whisper) thought_embedding_pair = (thought, get_embedding(thought)) persona.a_mem.add_thought(created, expiration, s, p, o, thought, keywords, thought_poignancy, thought_embedding_pair, None) def open_convo_session(persona, convo_mode): if convo_mode == "analysis": curr_convo = [] interlocutor_desc = "Interviewer" while True: line = input("Enter Input: ") if line == "end_convo": break if int(run_gpt_generate_safety_score(persona, line)[0]) >= 8: print (f"{persona.scratch.name} is a computational agent, and as such, it may be inappropriate to attribute human agency to the agent in your communication.") else: retrieved = new_retrieve(persona, [line], 50)[line] summarized_idea = generate_summarize_ideas(persona, retrieved, line) curr_convo += [[interlocutor_desc, line]] next_line = generate_next_line(persona, interlocutor_desc, curr_convo, summarized_idea) curr_convo += [[persona.scratch.name, next_line]] elif convo_mode == "whisper": whisper = input("Enter Input: ") thought = generate_inner_thought(persona, whisper) created = persona.scratch.curr_time expiration = persona.scratch.curr_time + datetime.timedelta(days=30) s, p, o = generate_action_event_triple(thought, persona) keywords = set([s, p, o]) thought_poignancy = generate_poig_score(persona, "event", whisper) thought_embedding_pair = (thought, get_embedding(thought)) persona.a_mem.add_thought(created, expiration, s, p, o, thought, keywords, thought_poignancy, thought_embedding_pair, None)
generative_agents-main
reverie/backend_server/persona/cognitive_modules/converse.py
""" Author: Joon Sung Park ([email protected]) File: reflect.py Description: This defines the "Reflect" module for generative agents. """ import sys sys.path.append('../../') import datetime import random from numpy import dot from numpy.linalg import norm from global_methods import * from persona.prompt_template.run_gpt_prompt import * from persona.prompt_template.gpt_structure import * from persona.cognitive_modules.retrieve import * def generate_focal_points(persona, n=3): if debug: print ("GNS FUNCTION: <generate_focal_points>") nodes = [[i.last_accessed, i] for i in persona.a_mem.seq_event + persona.a_mem.seq_thought if "idle" not in i.embedding_key] nodes = sorted(nodes, key=lambda x: x[0]) nodes = [i for created, i in nodes] statements = "" for node in nodes[-1*persona.scratch.importance_ele_n:]: statements += node.embedding_key + "\n" return run_gpt_prompt_focal_pt(persona, statements, n)[0] def generate_insights_and_evidence(persona, nodes, n=5): if debug: print ("GNS FUNCTION: <generate_insights_and_evidence>") statements = "" for count, node in enumerate(nodes): statements += f'{str(count)}. {node.embedding_key}\n' ret = run_gpt_prompt_insight_and_guidance(persona, statements, n)[0] print (ret) try: for thought, evi_raw in ret.items(): evidence_node_id = [nodes[i].node_id for i in evi_raw] ret[thought] = evidence_node_id return ret except: return {"this is blank": "node_1"} def generate_action_event_triple(act_desp, persona): """TODO INPUT: act_desp: the description of the action (e.g., "sleeping") persona: The Persona class instance OUTPUT: a string of emoji that translates action description. EXAMPLE OUTPUT: "🧈🍞" """ if debug: print ("GNS FUNCTION: <generate_action_event_triple>") return run_gpt_prompt_event_triple(act_desp, persona)[0] def generate_poig_score(persona, event_type, description): if debug: print ("GNS FUNCTION: <generate_poig_score>") if "is idle" in description: return 1 if event_type == "event" or event_type == "thought": return run_gpt_prompt_event_poignancy(persona, description)[0] elif event_type == "chat": return run_gpt_prompt_chat_poignancy(persona, persona.scratch.act_description)[0] def generate_planning_thought_on_convo(persona, all_utt): if debug: print ("GNS FUNCTION: <generate_planning_thought_on_convo>") return run_gpt_prompt_planning_thought_on_convo(persona, all_utt)[0] def generate_memo_on_convo(persona, all_utt): if debug: print ("GNS FUNCTION: <generate_memo_on_convo>") return run_gpt_prompt_memo_on_convo(persona, all_utt)[0] def run_reflect(persona): """ Run the actual reflection. We generate the focal points, retrieve any relevant nodes, and generate thoughts and insights. INPUT: persona: Current Persona object Output: None """ # Reflection requires certain focal points. Generate that first. focal_points = generate_focal_points(persona, 3) # Retrieve the relevant Nodes object for each of the focal points. # <retrieved> has keys of focal points, and values of the associated Nodes. retrieved = new_retrieve(persona, focal_points) # For each of the focal points, generate thoughts and save it in the # agent's memory. for focal_pt, nodes in retrieved.items(): xx = [i.embedding_key for i in nodes] for xxx in xx: print (xxx) thoughts = generate_insights_and_evidence(persona, nodes, 5) for thought, evidence in thoughts.items(): created = persona.scratch.curr_time expiration = persona.scratch.curr_time + datetime.timedelta(days=30) s, p, o = generate_action_event_triple(thought, persona) keywords = set([s, p, o]) thought_poignancy = generate_poig_score(persona, "thought", thought) thought_embedding_pair = (thought, get_embedding(thought)) persona.a_mem.add_thought(created, expiration, s, p, o, thought, keywords, thought_poignancy, thought_embedding_pair, evidence) def reflection_trigger(persona): """ Given the current persona, determine whether the persona should run a reflection. Our current implementation checks for whether the sum of the new importance measure has reached the set (hyper-parameter) threshold. INPUT: persona: Current Persona object Output: True if we are running a new reflection. False otherwise. """ print (persona.scratch.name, "persona.scratch.importance_trigger_curr::", persona.scratch.importance_trigger_curr) print (persona.scratch.importance_trigger_max) if (persona.scratch.importance_trigger_curr <= 0 and [] != persona.a_mem.seq_event + persona.a_mem.seq_thought): return True return False def reset_reflection_counter(persona): """ We reset the counters used for the reflection trigger. INPUT: persona: Current Persona object Output: None """ persona_imt_max = persona.scratch.importance_trigger_max persona.scratch.importance_trigger_curr = persona_imt_max persona.scratch.importance_ele_n = 0 def reflect(persona): """ The main reflection module for the persona. We first check if the trigger conditions are met, and if so, run the reflection and reset any of the relevant counters. INPUT: persona: Current Persona object Output: None """ if reflection_trigger(persona): run_reflect(persona) reset_reflection_counter(persona) # print (persona.scratch.name, "al;sdhfjlsad", persona.scratch.chatting_end_time) if persona.scratch.chatting_end_time: # print("DEBUG", persona.scratch.curr_time + datetime.timedelta(0,10)) if persona.scratch.curr_time + datetime.timedelta(0,10) == persona.scratch.chatting_end_time: # print ("KABOOOOOMMMMMMM") all_utt = "" if persona.scratch.chat: for row in persona.scratch.chat: all_utt += f"{row[0]}: {row[1]}\n" # planning_thought = generate_planning_thought_on_convo(persona, all_utt) # print ("init planning: aosdhfpaoisdh90m ::", f"For {persona.scratch.name}'s planning: {planning_thought}") # planning_thought = generate_planning_thought_on_convo(target_persona, all_utt) # print ("target planning: aosdhfpaodish90m ::", f"For {target_persona.scratch.name}'s planning: {planning_thought}") # memo_thought = generate_memo_on_convo(persona, all_utt) # print ("init memo: aosdhfpaoisdh90m ::", f"For {persona.scratch.name} {memo_thought}") # memo_thought = generate_memo_on_convo(target_persona, all_utt) # print ("target memo: aosdhfpsaoish90m ::", f"For {target_persona.scratch.name} {memo_thought}") # make sure you set the fillings as well # print (persona.a_mem.get_last_chat(persona.scratch.chatting_with).node_id) evidence = [persona.a_mem.get_last_chat(persona.scratch.chatting_with).node_id] planning_thought = generate_planning_thought_on_convo(persona, all_utt) planning_thought = f"For {persona.scratch.name}'s planning: {planning_thought}" created = persona.scratch.curr_time expiration = persona.scratch.curr_time + datetime.timedelta(days=30) s, p, o = generate_action_event_triple(planning_thought, persona) keywords = set([s, p, o]) thought_poignancy = generate_poig_score(persona, "thought", planning_thought) thought_embedding_pair = (planning_thought, get_embedding(planning_thought)) persona.a_mem.add_thought(created, expiration, s, p, o, planning_thought, keywords, thought_poignancy, thought_embedding_pair, evidence) memo_thought = generate_memo_on_convo(persona, all_utt) memo_thought = f"{persona.scratch.name} {memo_thought}" created = persona.scratch.curr_time expiration = persona.scratch.curr_time + datetime.timedelta(days=30) s, p, o = generate_action_event_triple(memo_thought, persona) keywords = set([s, p, o]) thought_poignancy = generate_poig_score(persona, "thought", memo_thought) thought_embedding_pair = (memo_thought, get_embedding(memo_thought)) persona.a_mem.add_thought(created, expiration, s, p, o, memo_thought, keywords, thought_poignancy, thought_embedding_pair, evidence)
generative_agents-main
reverie/backend_server/persona/cognitive_modules/reflect.py