python_code
stringlengths 0
992k
| repo_name
stringlengths 8
46
| file_path
stringlengths 5
162
|
---|---|---|
EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/encoders/__init__.py |
|
import torch
import torch.nn as nn
from functools import partial
from ldm.modules.x_transformer import Encoder, TransformerWrapper # TODO: can we directly rely on lucidrains code and simply add this as a reuirement? --> test
from torch.utils.checkpoint import checkpoint
from transformers import T5Tokenizer, T5EncoderModel, CLIPTokenizer, CLIPTextModel, AutoTokenizer
from importlib_resources import files
from ldm.modules.encoders.CLAP.utils import read_config_as_args
from ldm.modules.encoders.CLAP.clap import TextEncoder
from ldm.util import default, count_params
class AbstractEncoder(nn.Module):
def __init__(self):
super().__init__()
def encode(self, *args, **kwargs):
raise NotImplementedError
class ClassEmbedder(nn.Module):
def __init__(self, embed_dim, n_classes=1000, key='class'):
super().__init__()
self.key = key
self.embedding = nn.Embedding(n_classes, embed_dim)
def forward(self, batch, key=None):
if key is None:
key = self.key
# this is for use in crossattn
c = batch[key][:, None]# (bsz,1)
c = self.embedding(c)
return c
class TransformerEmbedder(AbstractEncoder):
"""Some transformer encoder layers"""
def __init__(self, n_embed, n_layer, vocab_size, max_seq_len=77, device="cuda"):
super().__init__()
self.device = device
self.transformer = TransformerWrapper(num_tokens=vocab_size, max_seq_len=max_seq_len,
attn_layers=Encoder(dim=n_embed, depth=n_layer))
def forward(self, tokens):
tokens = tokens.to(self.device) # meh
z = self.transformer(tokens, return_embeddings=True)
return z
def encode(self, x):
return self(x)
class BERTTokenizer(AbstractEncoder):
""" Uses a pretrained BERT tokenizer by huggingface. Vocab size: 30522 (?)"""
def __init__(self, device="cuda", vq_interface=True, max_length=77):
super().__init__()
from transformers import BertTokenizerFast # TODO: add to reuquirements
self.tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased")
self.device = device
self.vq_interface = vq_interface
self.max_length = max_length
def forward(self, text):
batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True,
return_overflowing_tokens=False, padding="max_length", return_tensors="pt")
tokens = batch_encoding["input_ids"].to(self.device)
return tokens
@torch.no_grad()
def encode(self, text):
tokens = self(text)
if not self.vq_interface:
return tokens
return None, None, [None, None, tokens]
def decode(self, text):
return text
class BERTEmbedder(AbstractEncoder):# 这里不是用的pretrained bert,是用的transformers的BertTokenizer加自定义的TransformerWrapper
"""Uses the BERT tokenizr model and add some transformer encoder layers"""
def __init__(self, n_embed, n_layer, vocab_size=30522, max_seq_len=77,
device="cuda",use_tokenizer=True, embedding_dropout=0.0):
super().__init__()
self.use_tknz_fn = use_tokenizer
if self.use_tknz_fn:
self.tknz_fn = BERTTokenizer(vq_interface=False, max_length=max_seq_len)
self.device = device
self.transformer = TransformerWrapper(num_tokens=vocab_size, max_seq_len=max_seq_len,
attn_layers=Encoder(dim=n_embed, depth=n_layer),
emb_dropout=embedding_dropout)
def forward(self, text):
if self.use_tknz_fn:
tokens = self.tknz_fn(text)#.to(self.device)
else:
tokens = text
z = self.transformer(tokens, return_embeddings=True)
return z
def encode(self, text):
# output of length 77
return self(text)
class SpatialRescaler(nn.Module):
def __init__(self,
n_stages=1,
method='bilinear',
multiplier=0.5,
in_channels=3,
out_channels=None,
bias=False):
super().__init__()
self.n_stages = n_stages
assert self.n_stages >= 0
assert method in ['nearest','linear','bilinear','trilinear','bicubic','area']
self.multiplier = multiplier
self.interpolator = partial(torch.nn.functional.interpolate, mode=method)
self.remap_output = out_channels is not None
if self.remap_output:
print(f'Spatial Rescaler mapping from {in_channels} to {out_channels} channels after resizing.')
self.channel_mapper = nn.Conv2d(in_channels,out_channels,1,bias=bias)
def forward(self,x):
for stage in range(self.n_stages):
x = self.interpolator(x, scale_factor=self.multiplier)
if self.remap_output:
x = self.channel_mapper(x)
return x
def encode(self, x):
return self(x)
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self
class FrozenT5Embedder(AbstractEncoder):
"""Uses the T5 transformer encoder for text"""
def __init__(self, version="google/t5-v1_1-large", device="cuda", max_length=77, freeze=True): # others are google/t5-v1_1-xl and google/t5-v1_1-xxl
super().__init__()
self.tokenizer = T5Tokenizer.from_pretrained(version)
self.transformer = T5EncoderModel.from_pretrained(version)
self.device = device
self.max_length = max_length # TODO: typical value?
if freeze:
self.freeze()
def freeze(self):
self.transformer = self.transformer.eval()
#self.train = disabled_train
for param in self.parameters():
param.requires_grad = False
def forward(self, text):
batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True,
return_overflowing_tokens=False, padding="max_length", return_tensors="pt")
tokens = batch_encoding["input_ids"].to(self.device)
outputs = self.transformer(input_ids=tokens)
z = outputs.last_hidden_state
return z
def encode(self, text):
return self(text)
class FrozenCLAPEmbedder(AbstractEncoder):
"""Uses the CLAP transformer encoder for text (from huggingface)"""
def __init__(self, weights_path, freeze=True, device="cuda", max_length=77): # clip-vit-base-patch32
super().__init__()
model_state_dict = torch.load(weights_path, map_location=torch.device('cpu'))['model']
match_params = dict()
for key in list(model_state_dict.keys()):
if 'caption_encoder' in key:
match_params[key.replace('caption_encoder.', '')] = model_state_dict[key]
config_as_str = files('ldm').joinpath('modules/encoders/CLAP/config.yml').read_text()
args = read_config_as_args(config_as_str, is_config_str=True)
# To device
self.tokenizer = AutoTokenizer.from_pretrained(args.text_model) # args.text_model
self.caption_encoder = TextEncoder(
args.d_proj, args.text_model, args.transformer_embed_dim
)
self.max_length = max_length
self.device = device
if freeze: self.freeze()
print(f"{self.caption_encoder.__class__.__name__} comes with {count_params(self.caption_encoder) * 1.e-6:.2f} M params.")
def freeze(self):
self.caption_encoder.base = self.caption_encoder.base.eval()
for param in self.caption_encoder.base.parameters():
param.requires_grad = False
def encode(self, text):
batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True,
return_overflowing_tokens=False, padding="max_length", return_tensors="pt")
tokens = batch_encoding["input_ids"].to(self.device)
outputs = self.caption_encoder.base(input_ids=tokens)
z = self.caption_encoder.projection(outputs.last_hidden_state)
return z
class FrozenCLAPEmbedderNoLoad(AbstractEncoder):
def __init__(self, config, freeze=True, device="cpu", max_length=77):
super().__init__()
args = config
# To device
self.tokenizer = AutoTokenizer.from_pretrained(args.text_model) # args.text_model
self.caption_encoder = TextEncoder(
args.d_proj, args.text_model, args.transformer_embed_dim
)
self.max_length = max_length
self.device = device
if freeze: self.freeze()
print(f"{self.caption_encoder.__class__.__name__} comes with {count_params(self.caption_encoder) * 1.e-6:.2f} M params.")
def freeze(self):
self.caption_encoder.base = self.caption_encoder.base.eval()
for param in self.caption_encoder.base.parameters():
param.requires_grad = False
def encode(self, text):
batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True,
return_overflowing_tokens=False, padding="max_length", return_tensors="pt")
tokens = batch_encoding["input_ids"].to(self.device)
outputs = self.caption_encoder.base(input_ids=tokens)
z = self.caption_encoder.projection(outputs.last_hidden_state)
return z
class NewFrozenCLAPEmbedder(AbstractEncoder):
"""Uses the CLAP transformer encoder for text (from huggingface)"""
def __init__(self, weights_path, freeze=True, device="cuda", max_length=77): # clip-vit-base-patch32
super().__init__()
# To device
from transformers import RobertaTokenizer
from ldm.modules.encoders.open_clap import create_model
model, model_cfg = create_model(
'HTSAT-tiny',
'roberta',
weights_path,
enable_fusion=True,
fusion_type='aff_2d'
)
del model.audio_branch, model.audio_transform, model.audio_projection
self.tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
self.model = model
self.max_length = max_length
self.device = device
if freeze: self.freeze()
param_num = sum(p.numel() for p in model.parameters() if p.requires_grad)
print(f'{self.model.__class__.__name__} comes with: {param_num / 1e+6:.3f} M params.')
def freeze(self):
self.model = self.model.eval()
for param in self.model.parameters():
param.requires_grad = False
def encode(self, text):
batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True,
return_overflowing_tokens=False, padding="max_length", return_tensors="pt")
outputs = self.model.text_branch(input_ids=batch_encoding["input_ids"].to(self.device), attention_mask=batch_encoding["attention_mask"].to(self.device))
z = self.model.text_projection(outputs.last_hidden_state)
return z
class FrozenFLANEmbedder(AbstractEncoder):
"""Uses the T5 transformer encoder for text"""
def __init__(self, version="google/flan-t5-large", device="cuda", max_length=77, freeze=True): # others are google/t5-v1_1-xl and google/t5-v1_1-xxl
super().__init__()
self.tokenizer = T5Tokenizer.from_pretrained(version)
self.transformer = T5EncoderModel.from_pretrained(version)
self.device = device
self.max_length = max_length # TODO: typical value?
if freeze:
self.freeze()
def freeze(self):
self.transformer = self.transformer.eval()
#self.train = disabled_train
for param in self.parameters():
param.requires_grad = False
def forward(self, text):
batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True,
return_overflowing_tokens=False, padding="max_length", return_tensors="pt")
tokens = batch_encoding["input_ids"].to(self.device)
outputs = self.transformer(input_ids=tokens)
z = outputs.last_hidden_state
return z
def encode(self, text):
return self(text) | EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/encoders/modules.py |
# PANNs: Large-Scale Pretrained Audio Neural Networks for Audio Pattern Recognition
# Reference from https://github.com/qiuqiangkong/audioset_tagging_cnn
# Some layers are re-designed for CLAP
import os
os.environ['NUMBA_CACHE_DIR'] = '/tmp/'
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchlibrosa.stft import Spectrogram, LogmelFilterBank
from torchlibrosa.augmentation import SpecAugmentation
from .utils import do_mixup, interpolate, pad_framewise_output
from .feature_fusion import iAFF, AFF, DAF
def init_layer(layer):
"""Initialize a Linear or Convolutional layer. """
nn.init.xavier_uniform_(layer.weight)
if hasattr(layer, 'bias'):
if layer.bias is not None:
layer.bias.data.fill_(0.)
def init_bn(bn):
"""Initialize a Batchnorm layer. """
bn.bias.data.fill_(0.)
bn.weight.data.fill_(1.)
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(ConvBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=(3, 3), stride=(1, 1),
padding=(1, 1), bias=False)
self.conv2 = nn.Conv2d(in_channels=out_channels,
out_channels=out_channels,
kernel_size=(3, 3), stride=(1, 1),
padding=(1, 1), bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
self.bn2 = nn.BatchNorm2d(out_channels)
self.init_weight()
def init_weight(self):
init_layer(self.conv1)
init_layer(self.conv2)
init_bn(self.bn1)
init_bn(self.bn2)
def forward(self, input, pool_size=(2, 2), pool_type='avg'):
x = input
x = F.relu_(self.bn1(self.conv1(x)))
x = F.relu_(self.bn2(self.conv2(x)))
if pool_type == 'max':
x = F.max_pool2d(x, kernel_size=pool_size)
elif pool_type == 'avg':
x = F.avg_pool2d(x, kernel_size=pool_size)
elif pool_type == 'avg+max':
x1 = F.avg_pool2d(x, kernel_size=pool_size)
x2 = F.max_pool2d(x, kernel_size=pool_size)
x = x1 + x2
else:
raise Exception('Incorrect argument!')
return x
class ConvBlock5x5(nn.Module):
def __init__(self, in_channels, out_channels):
super(ConvBlock5x5, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=(5, 5), stride=(1, 1),
padding=(2, 2), bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
self.init_weight()
def init_weight(self):
init_layer(self.conv1)
init_bn(self.bn1)
def forward(self, input, pool_size=(2, 2), pool_type='avg'):
x = input
x = F.relu_(self.bn1(self.conv1(x)))
if pool_type == 'max':
x = F.max_pool2d(x, kernel_size=pool_size)
elif pool_type == 'avg':
x = F.avg_pool2d(x, kernel_size=pool_size)
elif pool_type == 'avg+max':
x1 = F.avg_pool2d(x, kernel_size=pool_size)
x2 = F.max_pool2d(x, kernel_size=pool_size)
x = x1 + x2
else:
raise Exception('Incorrect argument!')
return x
class AttBlock(nn.Module):
def __init__(self, n_in, n_out, activation='linear', temperature=1.):
super(AttBlock, self).__init__()
self.activation = activation
self.temperature = temperature
self.att = nn.Conv1d(in_channels=n_in, out_channels=n_out, kernel_size=1, stride=1, padding=0, bias=True)
self.cla = nn.Conv1d(in_channels=n_in, out_channels=n_out, kernel_size=1, stride=1, padding=0, bias=True)
self.bn_att = nn.BatchNorm1d(n_out)
self.init_weights()
def init_weights(self):
init_layer(self.att)
init_layer(self.cla)
init_bn(self.bn_att)
def forward(self, x):
# x: (n_samples, n_in, n_time)
norm_att = torch.softmax(torch.clamp(self.att(x), -10, 10), dim=-1)
cla = self.nonlinear_transform(self.cla(x))
x = torch.sum(norm_att * cla, dim=2)
return x, norm_att, cla
def nonlinear_transform(self, x):
if self.activation == 'linear':
return x
elif self.activation == 'sigmoid':
return torch.sigmoid(x)
class Cnn14(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, classes_num, enable_fusion=False, fusion_type='None'):
super(Cnn14, self).__init__()
window = 'hann'
center = True
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
self.enable_fusion = enable_fusion
self.fusion_type = fusion_type
# Spectrogram extractor
self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
win_length=window_size, window=window, center=center, pad_mode=pad_mode,
freeze_parameters=True)
# Logmel feature extractor
self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin, top_db=top_db,
freeze_parameters=True)
# Spec augmenter
self.spec_augmenter = SpecAugmentation(time_drop_width=64, time_stripes_num=2,
freq_drop_width=8, freq_stripes_num=2)
self.bn0 = nn.BatchNorm2d(64)
if (self.enable_fusion) and (self.fusion_type == 'channel_map'):
self.conv_block1 = ConvBlock(in_channels=4, out_channels=64)
else:
self.conv_block1 = ConvBlock(in_channels=1, out_channels=64)
self.conv_block2 = ConvBlock(in_channels=64, out_channels=128)
self.conv_block3 = ConvBlock(in_channels=128, out_channels=256)
self.conv_block4 = ConvBlock(in_channels=256, out_channels=512)
self.conv_block5 = ConvBlock(in_channels=512, out_channels=1024)
self.conv_block6 = ConvBlock(in_channels=1024, out_channels=2048)
self.fc1 = nn.Linear(2048, 2048, bias=True)
self.fc_audioset = nn.Linear(2048, classes_num, bias=True)
if (self.enable_fusion) and (self.fusion_type in ['daf_1d','aff_1d','iaff_1d']):
self.mel_conv1d = nn.Sequential(
nn.Conv1d(64, 64, kernel_size=5, stride=3, padding=2),
nn.BatchNorm1d(64) # No Relu
)
if self.fusion_type == 'daf_1d':
self.fusion_model = DAF()
elif self.fusion_type == 'aff_1d':
self.fusion_model = AFF(channels=64, type='1D')
elif self.fusion_type == 'iaff_1d':
self.fusion_model = iAFF(channels=64, type='1D')
if (self.enable_fusion) and (self.fusion_type in ['daf_2d','aff_2d','iaff_2d']):
self.mel_conv2d = nn.Sequential(
nn.Conv2d(1, 64, kernel_size=(5,5), stride=(6, 2), padding=(2,2)),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True)
)
if self.fusion_type == 'daf_2d':
self.fusion_model = DAF()
elif self.fusion_type == 'aff_2d':
self.fusion_model = AFF(channels=64, type='2D')
elif self.fusion_type == 'iaff_2d':
self.fusion_model = iAFF(channels=64, type='2D')
self.init_weight()
def init_weight(self):
init_bn(self.bn0)
init_layer(self.fc1)
init_layer(self.fc_audioset)
def forward(self, input, mixup_lambda=None, device=None):
"""
Input: (batch_size, data_length)"""
if self.enable_fusion and input["longer"].sum() == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
input["longer"][torch.randint(0, input["longer"].shape[0], (1,))] = True
if not self.enable_fusion:
x = self.spectrogram_extractor(input['waveform'].to(device=device, non_blocking=True)) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
else:
longer_list = input["longer"].to(device=device, non_blocking=True)
x = input["mel_fusion"].to(device=device, non_blocking=True)
longer_list_idx = torch.where(longer_list)[0]
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
if self.fusion_type in ['daf_1d','aff_1d','iaff_1d']:
new_x = x[:,0:1,:,:].clone().contiguous()
# local processing
if len(longer_list_idx) > 0:
fusion_x_local = x[longer_list_idx,1:,:,:].clone().contiguous()
FB,FC,FT,FF = fusion_x_local.size()
fusion_x_local = fusion_x_local.view(FB * FC, FT, FF)
fusion_x_local = torch.permute(fusion_x_local, (0,2,1)).contiguous()
fusion_x_local = self.mel_conv1d(fusion_x_local)
fusion_x_local = fusion_x_local.view(FB,FC,FF,fusion_x_local.size(-1))
fusion_x_local = torch.permute(fusion_x_local, (0,2,1,3)).contiguous().flatten(2)
if fusion_x_local.size(-1) < FT:
fusion_x_local = torch.cat([fusion_x_local, torch.zeros((FB,FF,FT- fusion_x_local.size(-1)), device=device)], dim=-1)
else:
fusion_x_local = fusion_x_local[:,:,:FT]
# 1D fusion
new_x = new_x.squeeze(1).permute((0,2,1)).contiguous()
new_x[longer_list_idx] = self.fusion_model(new_x[longer_list_idx], fusion_x_local)
x = new_x.permute((0,2,1)).contiguous()[:,None,:,:]
else:
x = new_x
elif self.fusion_type in ['daf_2d','aff_2d','iaff_2d','channel_map']:
x = x # no change
if self.training:
x = self.spec_augmenter(x)
# Mixup on spectrogram
if self.training and mixup_lambda is not None:
x = do_mixup(x, mixup_lambda)
if (self.enable_fusion) and (self.fusion_type in ['daf_2d','aff_2d','iaff_2d']):
global_x = x[:,0:1,:,:]
# global processing
B, C, H, W = global_x.shape
global_x = self.conv_block1(global_x, pool_size=(2, 2), pool_type='avg')
if len(longer_list_idx) > 0:
local_x = x[longer_list_idx,1:,:,:].contiguous()
TH = global_x.size(-2)
# local processing
B, C, H, W = local_x.shape
local_x = local_x.view(B*C,1,H,W)
local_x = self.mel_conv2d(local_x)
local_x = local_x.view(B,C,local_x.size(1),local_x.size(2),local_x.size(3))
local_x = local_x.permute((0,2,1,3,4)).contiguous().flatten(2,3)
TB,TC,_,TW = local_x.size()
if local_x.size(-2) < TH:
local_x = torch.cat([local_x, torch.zeros((TB,TC,TH-local_x.size(-2),TW), device=global_x.device)], dim=-2)
else:
local_x = local_x[:,:,:TH,:]
global_x[longer_list_idx] = self.fusion_model(global_x[longer_list_idx],local_x)
x = global_x
else:
x = self.conv_block1(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block2(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block3(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block4(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block5(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block6(x, pool_size=(1, 1), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = torch.mean(x, dim=3)
latent_x1 = F.max_pool1d(x, kernel_size=3, stride=1, padding=1)
latent_x2 = F.avg_pool1d(x, kernel_size=3, stride=1, padding=1)
latent_x = latent_x1 + latent_x2
latent_x = latent_x.transpose(1, 2)
latent_x = F.relu_(self.fc1(latent_x))
latent_output = interpolate(latent_x, 32)
(x1, _) = torch.max(x, dim=2)
x2 = torch.mean(x, dim=2)
x = x1 + x2
x = F.dropout(x, p=0.5, training=self.training)
x = F.relu_(self.fc1(x))
embedding = F.dropout(x, p=0.5, training=self.training)
clipwise_output = torch.sigmoid(self.fc_audioset(x))
output_dict = {'clipwise_output': clipwise_output, 'embedding': embedding, 'fine_grained_embedding': latent_output}
return output_dict
class Cnn6(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, classes_num, enable_fusion=False, fusion_type='None'):
super(Cnn6, self).__init__()
window = 'hann'
center = True
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
self.enable_fusion = enable_fusion
self.fusion_type = fusion_type
# Spectrogram extractor
self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
win_length=window_size, window=window, center=center, pad_mode=pad_mode,
freeze_parameters=True)
# Logmel feature extractor
self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin, top_db=top_db,
freeze_parameters=True)
# Spec augmenter
self.spec_augmenter = SpecAugmentation(time_drop_width=64, time_stripes_num=2,
freq_drop_width=8, freq_stripes_num=2)
self.bn0 = nn.BatchNorm2d(64)
self.conv_block1 = ConvBlock5x5(in_channels=1, out_channels=64)
self.conv_block2 = ConvBlock5x5(in_channels=64, out_channels=128)
self.conv_block3 = ConvBlock5x5(in_channels=128, out_channels=256)
self.conv_block4 = ConvBlock5x5(in_channels=256, out_channels=512)
self.fc1 = nn.Linear(512, 512, bias=True)
self.fc_audioset = nn.Linear(512, classes_num, bias=True)
self.init_weight()
def init_weight(self):
init_bn(self.bn0)
init_layer(self.fc1)
init_layer(self.fc_audioset)
def forward(self, input, mixup_lambda=None, device=None):
"""
Input: (batch_size, data_length)"""
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
if self.training:
x = self.spec_augmenter(x)
# Mixup on spectrogram
if self.training and mixup_lambda is not None:
x = do_mixup(x, mixup_lambda)
x = self.conv_block1(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block2(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block3(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block4(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = torch.mean(x, dim=3)
latent_x1 = F.max_pool1d(x, kernel_size=3, stride=1, padding=1)
latent_x2 = F.avg_pool1d(x, kernel_size=3, stride=1, padding=1)
latent_x = latent_x1 + latent_x2
latent_x = latent_x.transpose(1, 2)
latent_x = F.relu_(self.fc1(latent_x))
latent_output = interpolate(latent_x, 16)
(x1, _) = torch.max(x, dim=2)
x2 = torch.mean(x, dim=2)
x = x1 + x2
x = F.dropout(x, p=0.5, training=self.training)
x = F.relu_(self.fc1(x))
embedding = F.dropout(x, p=0.5, training=self.training)
clipwise_output = torch.sigmoid(self.fc_audioset(x))
output_dict = {'clipwise_output': clipwise_output, 'embedding': embedding, 'fine_grained_embedding': latent_output}
return output_dict
class Cnn10(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, classes_num, enable_fusion=False, fusion_type='None'):
super(Cnn10, self).__init__()
window = 'hann'
center = True
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
self.enable_fusion = enable_fusion
self.fusion_type = fusion_type
# Spectrogram extractor
self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
win_length=window_size, window=window, center=center, pad_mode=pad_mode,
freeze_parameters=True)
# Logmel feature extractor
self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin, top_db=top_db,
freeze_parameters=True)
# Spec augmenter
self.spec_augmenter = SpecAugmentation(time_drop_width=64, time_stripes_num=2,
freq_drop_width=8, freq_stripes_num=2)
self.bn0 = nn.BatchNorm2d(64)
self.conv_block1 = ConvBlock(in_channels=1, out_channels=64)
self.conv_block2 = ConvBlock(in_channels=64, out_channels=128)
self.conv_block3 = ConvBlock(in_channels=128, out_channels=256)
self.conv_block4 = ConvBlock(in_channels=256, out_channels=512)
self.conv_block5 = ConvBlock(in_channels=512, out_channels=1024)
self.fc1 = nn.Linear(1024, 1024, bias=True)
self.fc_audioset = nn.Linear(1024, classes_num, bias=True)
self.init_weight()
def init_weight(self):
init_bn(self.bn0)
init_layer(self.fc1)
init_layer(self.fc_audioset)
def forward(self, input, mixup_lambda=None, device=None):
"""
Input: (batch_size, data_length)"""
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
if self.training:
x = self.spec_augmenter(x)
# Mixup on spectrogram
if self.training and mixup_lambda is not None:
x = do_mixup(x, mixup_lambda)
x = self.conv_block1(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block2(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block3(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block4(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block5(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = torch.mean(x, dim=3)
latent_x1 = F.max_pool1d(x, kernel_size=3, stride=1, padding=1)
latent_x2 = F.avg_pool1d(x, kernel_size=3, stride=1, padding=1)
latent_x = latent_x1 + latent_x2
latent_x = latent_x.transpose(1, 2)
latent_x = F.relu_(self.fc1(latent_x))
latent_output = interpolate(latent_x, 32)
(x1, _) = torch.max(x, dim=2)
x2 = torch.mean(x, dim=2)
x = x1 + x2
x = F.dropout(x, p=0.5, training=self.training)
x = F.relu_(self.fc1(x))
embedding = F.dropout(x, p=0.5, training=self.training)
clipwise_output = torch.sigmoid(self.fc_audioset(x))
output_dict = {'clipwise_output': clipwise_output, 'embedding': embedding, 'fine_grained_embedding': latent_output}
return output_dict
def create_pann_model(audio_cfg, enable_fusion=False, fusion_type='None'):
try:
ModelProto = eval(audio_cfg.model_name)
model = ModelProto(
sample_rate = audio_cfg.sample_rate,
window_size = audio_cfg.window_size,
hop_size =audio_cfg.hop_size,
mel_bins = audio_cfg.mel_bins,
fmin = audio_cfg.fmin,
fmax = audio_cfg.fmax,
classes_num = audio_cfg.class_num,
enable_fusion = enable_fusion,
fusion_type = fusion_type
)
return model
except:
raise RuntimeError(f'Import Model for {audio_cfg.model_name} not found, or the audio cfg parameters are not enough.')
| EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/encoders/open_clap/pann_model.py |
'''
Feature Fusion for Varible-Length Data Processing
AFF/iAFF is referred and modified from https://github.com/YimianDai/open-aff/blob/master/aff_pytorch/aff_net/fusion.py
According to the paper: Yimian Dai et al, Attentional Feature Fusion, IEEE Winter Conference on Applications of Computer Vision, WACV 2021
'''
import torch
import torch.nn as nn
class DAF(nn.Module):
'''
直接相加 DirectAddFuse
'''
def __init__(self):
super(DAF, self).__init__()
def forward(self, x, residual):
return x + residual
class iAFF(nn.Module):
'''
多特征融合 iAFF
'''
def __init__(self, channels=64, r=4, type='2D'):
super(iAFF, self).__init__()
inter_channels = int(channels // r)
if type == '1D':
# 本地注意力
self.local_att = nn.Sequential(
nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),
nn.BatchNorm1d(inter_channels),
nn.ReLU(inplace=True),
nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),
nn.BatchNorm1d(channels),
)
# 全局注意力
self.global_att = nn.Sequential(
nn.AdaptiveAvgPool1d(1),
nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),
nn.BatchNorm1d(inter_channels),
nn.ReLU(inplace=True),
nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),
nn.BatchNorm1d(channels),
)
# 第二次本地注意力
self.local_att2 = nn.Sequential(
nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),
nn.BatchNorm1d(inter_channels),
nn.ReLU(inplace=True),
nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),
nn.BatchNorm1d(channels),
)
# 第二次全局注意力
self.global_att2 = nn.Sequential(
nn.AdaptiveAvgPool1d(1),
nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),
nn.BatchNorm1d(inter_channels),
nn.ReLU(inplace=True),
nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),
nn.BatchNorm1d(channels),
)
elif type == '2D':
# 本地注意力
self.local_att = nn.Sequential(
nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),
nn.BatchNorm2d(inter_channels),
nn.ReLU(inplace=True),
nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),
nn.BatchNorm2d(channels),
)
# 全局注意力
self.global_att = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),
nn.BatchNorm2d(inter_channels),
nn.ReLU(inplace=True),
nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),
nn.BatchNorm2d(channels),
)
# 第二次本地注意力
self.local_att2 = nn.Sequential(
nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),
nn.BatchNorm2d(inter_channels),
nn.ReLU(inplace=True),
nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),
nn.BatchNorm2d(channels),
)
# 第二次全局注意力
self.global_att2 = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),
nn.BatchNorm2d(inter_channels),
nn.ReLU(inplace=True),
nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),
nn.BatchNorm2d(channels),
)
else:
raise f'the type is not supported'
self.sigmoid = nn.Sigmoid()
def forward(self, x, residual):
flag = False
xa = x + residual
if xa.size(0) == 1:
xa = torch.cat([xa,xa],dim=0)
flag = True
xl = self.local_att(xa)
xg = self.global_att(xa)
xlg = xl + xg
wei = self.sigmoid(xlg)
xi = x * wei + residual * (1 - wei)
xl2 = self.local_att2(xi)
xg2 = self.global_att(xi)
xlg2 = xl2 + xg2
wei2 = self.sigmoid(xlg2)
xo = x * wei2 + residual * (1 - wei2)
if flag:
xo = xo[0].unsqueeze(0)
return xo
class AFF(nn.Module):
'''
多特征融合 AFF
'''
def __init__(self, channels=64, r=4, type='2D'):
super(AFF, self).__init__()
inter_channels = int(channels // r)
if type == '1D':
self.local_att = nn.Sequential(
nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),
nn.BatchNorm1d(inter_channels),
nn.ReLU(inplace=True),
nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),
nn.BatchNorm1d(channels),
)
self.global_att = nn.Sequential(
nn.AdaptiveAvgPool1d(1),
nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),
nn.BatchNorm1d(inter_channels),
nn.ReLU(inplace=True),
nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),
nn.BatchNorm1d(channels),
)
elif type == '2D':
self.local_att = nn.Sequential(
nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),
nn.BatchNorm2d(inter_channels),
nn.ReLU(inplace=True),
nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),
nn.BatchNorm2d(channels),
)
self.global_att = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),
nn.BatchNorm2d(inter_channels),
nn.ReLU(inplace=True),
nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),
nn.BatchNorm2d(channels),
)
else:
raise f'the type is not supported.'
self.sigmoid = nn.Sigmoid()
def forward(self, x, residual):
flag = False
xa = x + residual
if xa.size(0) == 1:
xa = torch.cat([xa,xa],dim=0)
flag = True
xl = self.local_att(xa)
xg = self.global_att(xa)
xlg = xl + xg
wei = self.sigmoid(xlg)
xo = 2 * x * wei + 2 * residual * (1 - wei)
if flag:
xo = xo[0].unsqueeze(0)
return xo
| EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/encoders/open_clap/feature_fusion.py |
import hashlib
import os
import urllib
import warnings
from tqdm import tqdm
_RN50 = dict(
openai="https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt",
yfcc15m="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn50-quickgelu-yfcc15m-455df137.pt",
cc12m="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn50-quickgelu-cc12m-f000538c.pt"
)
_RN50_quickgelu = dict(
openai="https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt",
yfcc15m="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn50-quickgelu-yfcc15m-455df137.pt",
cc12m="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn50-quickgelu-cc12m-f000538c.pt"
)
_RN101 = dict(
openai="https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt",
yfcc15m="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn101-quickgelu-yfcc15m-3e04b30e.pt"
)
_RN101_quickgelu = dict(
openai="https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt",
yfcc15m="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn101-quickgelu-yfcc15m-3e04b30e.pt"
)
_RN50x4 = dict(
openai="https://openaipublic.azureedge.net/clip/models/7e526bd135e493cef0776de27d5f42653e6b4c8bf9e0f653bb11773263205fdd/RN50x4.pt",
)
_RN50x16 = dict(
openai="https://openaipublic.azureedge.net/clip/models/52378b407f34354e150460fe41077663dd5b39c54cd0bfd2b27167a4a06ec9aa/RN50x16.pt",
)
_RN50x64 = dict(
openai="https://openaipublic.azureedge.net/clip/models/be1cfb55d75a9666199fb2206c106743da0f6468c9d327f3e0d0a543a9919d9c/RN50x64.pt",
)
_VITB32 = dict(
openai="https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
laion400m_e31="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e31-d867053b.pt",
laion400m_e32="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e32-46683a32.pt",
laion400m_avg="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_avg-8a00ab3c.pt",
)
_VITB32_quickgelu = dict(
openai="https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt",
laion400m_e31="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e31-d867053b.pt",
laion400m_e32="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e32-46683a32.pt",
laion400m_avg="https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_avg-8a00ab3c.pt",
)
_VITB16 = dict(
openai="https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt",
)
_VITL14 = dict(
openai="https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt",
)
_PRETRAINED = {
"RN50": _RN50,
"RN50-quickgelu": _RN50_quickgelu,
"RN101": _RN101,
"RN101-quickgelu": _RN101_quickgelu,
"RN50x4": _RN50x4,
"RN50x16": _RN50x16,
"ViT-B-32": _VITB32,
"ViT-B-32-quickgelu": _VITB32_quickgelu,
"ViT-B-16": _VITB16,
"ViT-L-14": _VITL14,
}
def list_pretrained(as_str: bool = False):
""" returns list of pretrained models
Returns a tuple (model_name, pretrain_tag) by default or 'name:tag' if as_str == True
"""
return [':'.join([k, t]) if as_str else (k, t) for k in _PRETRAINED.keys() for t in _PRETRAINED[k].keys()]
def list_pretrained_tag_models(tag: str):
""" return all models having the specified pretrain tag """
models = []
for k in _PRETRAINED.keys():
if tag in _PRETRAINED[k]:
models.append(k)
return models
def list_pretrained_model_tags(model: str):
""" return all pretrain tags for the specified model architecture """
tags = []
if model in _PRETRAINED:
tags.extend(_PRETRAINED[model].keys())
return tags
def get_pretrained_url(model: str, tag: str):
if model not in _PRETRAINED:
return ''
model_pretrained = _PRETRAINED[model]
if tag not in model_pretrained:
return ''
return model_pretrained[tag]
def download_pretrained(url: str, root: str = os.path.expanduser("~/.cache/clip")):
os.makedirs(root, exist_ok=True)
filename = os.path.basename(url)
if 'openaipublic' in url:
expected_sha256 = url.split("/")[-2]
else:
expected_sha256 = ''
download_target = os.path.join(root, filename)
if os.path.exists(download_target) and not os.path.isfile(download_target):
raise RuntimeError(f"{download_target} exists and is not a regular file")
if os.path.isfile(download_target):
if expected_sha256:
if hashlib.sha256(open(download_target, "rb").read()).hexdigest() == expected_sha256:
return download_target
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file")
else:
return download_target
with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
with tqdm(total=int(source.info().get("Content-Length")), ncols=80, unit='iB', unit_scale=True) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
if expected_sha256 and hashlib.sha256(open(download_target, "rb").read()).hexdigest() != expected_sha256:
raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match")
return download_target
| EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/encoders/open_clap/pretrained.py |
__version__ = '0.2.1'
| EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/encoders/open_clap/version.py |
import numpy as np
import torch.nn.functional as F
from torch import nn
from .model import MLPLayers
class LinearProbe(nn.Module):
def __init__(self, model, mlp, freeze, in_ch, out_ch, act=None):
"""
Args:
model: nn.Module
mlp: bool, if True, then use the MLP layer as the linear probe module
freeze: bool, if Ture, then freeze all the CLAP model's layers when training the linear probe
in_ch: int, the output channel from CLAP model
out_ch: int, the output channel from linear probe (class_num)
act: torch.nn.functional, the activation function before the loss function
"""
super().__init__()
in_ch = 512
self.clap_model = model
self.clap_model.text_branch = None # to save memory
self.freeze = freeze
if mlp:
self.lp_layer = MLPLayers(units=[in_ch, in_ch * 2, out_ch])
else:
self.lp_layer = nn.Linear(in_ch, out_ch)
if self.freeze:
for param in self.clap_model.parameters():
param.requires_grad = False
if act == 'None':
self.act = None
elif act == 'relu':
self.act = nn.ReLU()
elif act == 'elu':
self.act = nn.ELU()
elif act == 'prelu':
self.act = nn.PReLU(num_parameters=in_ch)
elif act == 'softmax':
self.act = nn.Softmax(dim=-1)
elif act == 'sigmoid':
self.act = nn.Sigmoid()
def forward(self, x, mix_lambda=None, device=None):
"""
Args:
x: waveform, torch.tensor [batch, t_samples] / batch of mel_spec and longer list
mix_lambda: torch.tensor [batch], the mixup lambda
Returns:
class_prob: torch.tensor [batch, class_num]
"""
# batchnorm cancel grandient
if self.freeze:
self.clap_model.eval()
x = self.clap_model.audio_projection(
self.clap_model.audio_branch(x, mixup_lambda=mix_lambda, device=device)["embedding"])
out = self.lp_layer(x)
if self.act is not None:
out = self.act(out)
return out
| EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/encoders/open_clap/linear_probe.py |
from .factory import list_models, create_model, create_model_and_transforms, add_model_config
from .loss import ClipLoss, gather_features, LPLoss, lp_gather_features, LPMetrics
from .model import CLAP, CLAPTextCfg, CLAPVisionCfg, CLAPAudioCfp, convert_weights_to_fp16, trace_model
from .openai import load_openai_model, list_openai_models
from .pretrained import list_pretrained, list_pretrained_tag_models, list_pretrained_model_tags,\
get_pretrained_url, download_pretrained
from .tokenizer import SimpleTokenizer, tokenize
from .transform import image_transform
| EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/encoders/open_clap/__init__.py |
import json
import logging
import os
import pathlib
import re
from copy import deepcopy
from pathlib import Path
import torch
from .model import CLAP, convert_weights_to_fp16
from .openai import load_openai_model
from .pretrained import get_pretrained_url, download_pretrained
from .transform import image_transform
_MODEL_CONFIG_PATHS = [Path(__file__).parent / f"model_configs/"]
_MODEL_CONFIGS = {} # directory (model_name: config) of model architecture configs
def _natural_key(string_):
return [int(s) if s.isdigit() else s for s in re.split(r"(\d+)", string_.lower())]
def _rescan_model_configs():
global _MODEL_CONFIGS
config_ext = (".json",)
config_files = []
for config_path in _MODEL_CONFIG_PATHS:
if config_path.is_file() and config_path.suffix in config_ext:
config_files.append(config_path)
elif config_path.is_dir():
for ext in config_ext:
config_files.extend(config_path.glob(f"*{ext}"))
for cf in config_files:
with open(cf, "r") as f:
model_cfg = json.load(f)
if all(a in model_cfg for a in ("embed_dim", "audio_cfg", "text_cfg")):
_MODEL_CONFIGS[cf.stem] = model_cfg
_MODEL_CONFIGS = {
k: v
for k, v in sorted(_MODEL_CONFIGS.items(), key=lambda x: _natural_key(x[0]))
}
_rescan_model_configs() # initial populate of model config registry
def load_state_dict(checkpoint_path: str, map_location="cpu", skip_params=True):
checkpoint = torch.load(checkpoint_path, map_location=map_location)
if isinstance(checkpoint, dict) and "state_dict" in checkpoint:
state_dict = checkpoint["state_dict"]
else:
state_dict = checkpoint
if skip_params:
if next(iter(state_dict.items()))[0].startswith("module"):
state_dict = {k[7:]: v for k, v in state_dict.items()}
# for k in state_dict:
# if k.startswith('transformer'):
# v = state_dict.pop(k)
# state_dict['text_branch.' + k[12:]] = v
return state_dict
def create_model(
amodel_name: str,
tmodel_name: str,
pretrained: str = "",
precision: str = "fp32",
device: torch.device = torch.device("cpu"),
jit: bool = False,
force_quick_gelu: bool = False,
openai_model_cache_dir: str = os.path.expanduser("~/.cache/clip"),
skip_params=True,
pretrained_audio: str = "",
pretrained_text: str = "",
enable_fusion: bool = False,
fusion_type: str = 'None'
# pretrained_image: bool = False,
):
amodel_name = amodel_name.replace(
"/", "-"
) # for callers using old naming with / in ViT names
pretrained_orig = pretrained
pretrained = pretrained.lower()
if pretrained == "openai":
if amodel_name in _MODEL_CONFIGS:
logging.info(f"Loading {amodel_name} model config.")
model_cfg = deepcopy(_MODEL_CONFIGS[amodel_name])
else:
logging.error(
f"Model config for {amodel_name} not found; available models {list_models()}."
)
raise RuntimeError(f"Model config for {amodel_name} not found.")
logging.info(f"Loading pretrained ViT-B-16 text encoder from OpenAI.")
# Hard Code in model name
model_cfg["text_cfg"]["model_type"] = tmodel_name
model = load_openai_model(
"ViT-B-16",
model_cfg,
device=device,
jit=jit,
cache_dir=openai_model_cache_dir,
enable_fusion=enable_fusion,
fusion_type=fusion_type
)
# See https://discuss.pytorch.org/t/valueerror-attemting-to-unscale-fp16-gradients/81372
if precision == "amp" or precision == "fp32":
model = model.float()
else:
if amodel_name in _MODEL_CONFIGS:
logging.info(f"Loading {amodel_name} model config.")
model_cfg = deepcopy(_MODEL_CONFIGS[amodel_name])
else:
logging.error(
f"Model config for {amodel_name} not found; available models {list_models()}."
)
raise RuntimeError(f"Model config for {amodel_name} not found.")
if force_quick_gelu:
# override for use of QuickGELU on non-OpenAI transformer models
model_cfg["quick_gelu"] = True
# if pretrained_image:
# if 'timm_amodel_name' in model_cfg.get('vision_cfg', {}):
# # pretrained weight loading for timm models set via vision_cfg
# model_cfg['vision_cfg']['timm_model_pretrained'] = True
# else:
# assert False, 'pretrained image towers currently only supported for timm models'
model_cfg["text_cfg"]["model_type"] = tmodel_name
model_cfg["enable_fusion"] = enable_fusion
model_cfg["fusion_type"] = fusion_type
model = CLAP(**model_cfg)
if pretrained:
checkpoint_path = ""
url = get_pretrained_url(amodel_name, pretrained)
if url:
checkpoint_path = download_pretrained(url, root=openai_model_cache_dir)
elif os.path.exists(pretrained_orig):
checkpoint_path = pretrained_orig
if checkpoint_path:
logging.info(f"Loading pretrained {amodel_name}-{tmodel_name} weights ({pretrained}).")
ckpt = load_state_dict(checkpoint_path, skip_params=True)
model.load_state_dict(ckpt)
param_names = [n for n, p in model.named_parameters()]
for n in param_names:
print(n, "\t", "Loaded" if n in ckpt else "Unloaded")
else:
logging.warning(
f"Pretrained weights ({pretrained}) not found for model {amodel_name}."
)
raise RuntimeError(
f"Pretrained weights ({pretrained}) not found for model {amodel_name}."
)
if pretrained_audio:
if amodel_name.startswith('PANN'):
if 'Cnn14_mAP' in pretrained_audio: # official checkpoint
audio_ckpt = torch.load(pretrained_audio, map_location='cpu')
audio_ckpt = audio_ckpt['model']
keys = list(audio_ckpt.keys())
for key in keys:
if 'spectrogram_extractor' not in key and 'logmel_extractor' not in key:
v = audio_ckpt.pop(key)
audio_ckpt['audio_branch.' + key] = v
elif os.path.basename(pretrained_audio).startswith('PANN'): # checkpoint trained via HTSAT codebase
audio_ckpt = torch.load(pretrained_audio, map_location='cpu')
audio_ckpt = audio_ckpt['state_dict']
keys = list(audio_ckpt.keys())
for key in keys:
if key.startswith('sed_model'):
v = audio_ckpt.pop(key)
audio_ckpt['audio_branch.' + key[10:]] = v
elif os.path.basename(pretrained_audio).startswith('finetuned'): # checkpoint trained via linear probe codebase
audio_ckpt = torch.load(pretrained_audio, map_location='cpu')
else:
raise ValueError('Unknown audio checkpoint')
elif amodel_name.startswith('HTSAT'):
if 'HTSAT_AudioSet_Saved' in pretrained_audio: # official checkpoint
audio_ckpt = torch.load(pretrained_audio, map_location='cpu')
audio_ckpt = audio_ckpt['state_dict']
keys = list(audio_ckpt.keys())
for key in keys:
if key.startswith('sed_model') and ('spectrogram_extractor' not in key
and 'logmel_extractor' not in key):
v = audio_ckpt.pop(key)
audio_ckpt['audio_branch.' + key[10:]] = v
elif os.path.basename(pretrained_audio).startswith('HTSAT'): # checkpoint trained via HTSAT codebase
audio_ckpt = torch.load(pretrained_audio, map_location='cpu')
audio_ckpt = audio_ckpt['state_dict']
keys = list(audio_ckpt.keys())
for key in keys:
if key.startswith('sed_model'):
v = audio_ckpt.pop(key)
audio_ckpt['audio_branch.' + key[10:]] = v
elif os.path.basename(pretrained_audio).startswith('finetuned'): # checkpoint trained via linear probe codebase
audio_ckpt = torch.load(pretrained_audio, map_location='cpu')
else:
raise ValueError('Unknown audio checkpoint')
else:
raise f'this audio encoder pretrained checkpoint is not support'
model.load_state_dict(audio_ckpt, strict=False)
logging.info(f"Loading pretrained {amodel_name} weights ({pretrained_audio}).")
param_names = [n for n, p in model.named_parameters()]
for n in param_names:
print(n, "\t", "Loaded" if n in audio_ckpt else "Unloaded")
model.to(device=device)
if precision == "fp16":
assert device.type != "cpu"
convert_weights_to_fp16(model)
if jit:
model = torch.jit.script(model)
return model, model_cfg
def create_model_and_transforms(
model_name: str,
pretrained: str = "",
precision: str = "fp32",
device: torch.device = torch.device("cpu"),
jit: bool = False,
force_quick_gelu: bool = False,
# pretrained_image: bool = False,
):
model = create_model(
model_name,
pretrained,
precision,
device,
jit,
force_quick_gelu=force_quick_gelu,
# pretrained_image=pretrained_image
)
preprocess_train = image_transform(model.visual.image_size, is_train=True)
preprocess_val = image_transform(model.visual.image_size, is_train=False)
return model, preprocess_train, preprocess_val
def list_models():
"""enumerate available model architectures based on config files"""
return list(_MODEL_CONFIGS.keys())
def add_model_config(path):
"""add model config path or file and update registry"""
if not isinstance(path, Path):
path = Path(path)
_MODEL_CONFIG_PATHS.append(path)
_rescan_model_configs()
| EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/encoders/open_clap/factory.py |
""" CLAP Model
Adapted from CLIP: https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI.
Adapted to the Audio Task.
"""
from collections import OrderedDict
from dataclasses import dataclass
from email.mime import audio
from typing import Tuple, Union, Callable, Optional
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from .timm_model import TimmModel
import logging
from .utils import freeze_batch_norm_2d
from .pann_model import create_pann_model
from .htsat import create_htsat_model
from transformers import BertModel, RobertaModel, BartModel
from transformers.tokenization_utils_base import BatchEncoding
class MLPLayers(nn.Module):
def __init__(self, units=[512, 512, 512], nonlin=nn.ReLU(), dropout=0.1):
super(MLPLayers, self).__init__()
self.nonlin = nonlin
self.dropout = dropout
sequence = []
for u0, u1 in zip(units[:-1], units[1:]):
sequence.append(nn.Linear(u0, u1))
sequence.append(self.nonlin)
sequence.append(nn.Dropout(self.dropout))
sequence = sequence[:-2]
self.sequential = nn.Sequential(*sequence)
def forward(self, X):
X = self.sequential(X)
return X
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1):
super().__init__()
# all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = None
self.stride = stride
if stride > 1 or inplanes != planes * Bottleneck.expansion:
# downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
self.downsample = nn.Sequential(
OrderedDict(
[
("-1", nn.AvgPool2d(stride)),
(
"0",
nn.Conv2d(
inplanes,
planes * self.expansion,
1,
stride=1,
bias=False,
),
),
("1", nn.BatchNorm2d(planes * self.expansion)),
]
)
)
def forward(self, x: torch.Tensor):
identity = x
out = self.relu(self.bn1(self.conv1(x)))
out = self.relu(self.bn2(self.conv2(out)))
out = self.avgpool(out)
out = self.bn3(self.conv3(out))
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class AttentionPool2d(nn.Module):
def __init__(
self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None
):
super().__init__()
self.positional_embedding = nn.Parameter(
torch.randn(spacial_dim**2 + 1, embed_dim) / embed_dim**0.5
)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
self.num_heads = num_heads
def forward(self, x):
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(
2, 0, 1
) # NCHW -> (HW)NC
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
x, _ = F.multi_head_attention_forward(
query=x,
key=x,
value=x,
embed_dim_to_check=x.shape[-1],
num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
in_proj_weight=None,
in_proj_bias=torch.cat(
[self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]
),
bias_k=None,
bias_v=None,
add_zero_attn=False,
dropout_p=0,
out_proj_weight=self.c_proj.weight,
out_proj_bias=self.c_proj.bias,
use_separate_proj_weight=True,
training=self.training,
need_weights=False,
)
return x[0]
class ModifiedResNet(nn.Module):
"""
A ResNet class that is similar to torchvision's but contains the following changes:
- There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool.
- Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1
- The final pooling layer is a QKV attention instead of an average pool
"""
def __init__(self, layers, output_dim, heads, image_size=224, width=64):
super().__init__()
self.output_dim = output_dim
self.image_size = image_size
# the 3-layer stem
self.conv1 = nn.Conv2d(
3, width // 2, kernel_size=3, stride=2, padding=1, bias=False
)
self.bn1 = nn.BatchNorm2d(width // 2)
self.conv2 = nn.Conv2d(
width // 2, width // 2, kernel_size=3, padding=1, bias=False
)
self.bn2 = nn.BatchNorm2d(width // 2)
self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(width)
self.avgpool = nn.AvgPool2d(2)
self.relu = nn.ReLU(inplace=True)
# residual layers
self._inplanes = width # this is a *mutable* variable used during construction
self.layer1 = self._make_layer(width, layers[0])
self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
embed_dim = width * 32 # the ResNet feature dimension
self.attnpool = AttentionPool2d(image_size // 32, embed_dim, heads, output_dim)
self.init_parameters()
def _make_layer(self, planes, blocks, stride=1):
layers = [Bottleneck(self._inplanes, planes, stride)]
self._inplanes = planes * Bottleneck.expansion
for _ in range(1, blocks):
layers.append(Bottleneck(self._inplanes, planes))
return nn.Sequential(*layers)
def init_parameters(self):
if self.attnpool is not None:
std = self.attnpool.c_proj.in_features**-0.5
nn.init.normal_(self.attnpool.q_proj.weight, std=std)
nn.init.normal_(self.attnpool.k_proj.weight, std=std)
nn.init.normal_(self.attnpool.v_proj.weight, std=std)
nn.init.normal_(self.attnpool.c_proj.weight, std=std)
for resnet_block in [self.layer1, self.layer2, self.layer3, self.layer4]:
for name, param in resnet_block.named_parameters():
if name.endswith("bn3.weight"):
nn.init.zeros_(param)
def lock(self, unlocked_groups=0, freeze_bn_stats=False):
assert (
unlocked_groups == 0
), "partial locking not currently supported for this model"
for param in self.parameters():
param.requires_grad = False
if freeze_bn_stats:
freeze_batch_norm_2d(self)
def stem(self, x):
for conv, bn in [
(self.conv1, self.bn1),
(self.conv2, self.bn2),
(self.conv3, self.bn3),
]:
x = self.relu(bn(conv(x)))
x = self.avgpool(x)
return x
def forward(self, x):
x = self.stem(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.attnpool(x)
return x
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
return x.to(orig_type)
class QuickGELU(nn.Module):
# NOTE This is slower than nn.GELU or nn.SiLU and uses more GPU memory
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model: int, n_head: int, act_layer: Callable = nn.GELU):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(
OrderedDict(
[
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", act_layer()),
("c_proj", nn.Linear(d_model * 4, d_model)),
]
)
)
self.ln_2 = LayerNorm(d_model)
def attention(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
return self.attn(x, x, x, need_weights=False, attn_mask=attn_mask)[0]
def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
x = x + self.attention(self.ln_1(x), attn_mask=attn_mask)
x = x + self.mlp(self.ln_2(x))
return x
class Transformer(nn.Module):
def __init__(
self, width: int, layers: int, heads: int, act_layer: Callable = nn.GELU
):
super().__init__()
self.width = width
self.layers = layers
self.resblocks = nn.ModuleList(
[
ResidualAttentionBlock(width, heads, act_layer=act_layer)
for _ in range(layers)
]
)
def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
for r in self.resblocks:
x = r(x, attn_mask=attn_mask)
return x
class VisualTransformer(nn.Module):
def __init__(
self,
image_size: int,
patch_size: int,
width: int,
layers: int,
heads: int,
output_dim: int,
act_layer: Callable = nn.GELU,
):
super().__init__()
self.image_size = image_size
self.output_dim = output_dim
self.conv1 = nn.Conv2d(
in_channels=3,
out_channels=width,
kernel_size=patch_size,
stride=patch_size,
bias=False,
)
scale = width**-0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(
scale * torch.randn((image_size // patch_size) ** 2 + 1, width)
)
self.ln_pre = LayerNorm(width)
self.text_branch = Transformer(width, layers, heads, act_layer=act_layer)
self.ln_post = LayerNorm(width)
self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
def lock(self, unlocked_groups=0, freeze_bn_stats=False):
assert (
unlocked_groups == 0
), "partial locking not currently supported for this model"
for param in self.parameters():
param.requires_grad = False
def forward(self, x: torch.Tensor):
x = self.conv1(x) # shape = [*, width, grid, grid]
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
x = torch.cat(
[
self.class_embedding.to(x.dtype)
+ torch.zeros(
x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device
),
x,
],
dim=1,
) # shape = [*, grid ** 2 + 1, width]
x = x + self.positional_embedding.to(x.dtype)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.text_branch(x)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_post(x[:, 0, :])
if self.proj is not None:
x = x @ self.proj
return x
@dataclass
class CLAPVisionCfg:
layers: Union[Tuple[int, int, int, int], int] = 12
width: int = 768
patch_size: int = 16
image_size: Union[Tuple[int, int], int] = 224
timm_model_name: str = (
None # a valid model name overrides layers, width, patch_size
)
timm_model_pretrained: bool = (
False # use (imagenet) pretrained weights for named model
)
timm_pool: str = (
"avg" # feature pooling for timm model ('abs_attn', 'rot_attn', 'avg', '')
)
timm_proj: str = (
"linear" # linear projection for timm model output ('linear', 'mlp', '')
)
# Audio Config Class
@dataclass
class CLAPAudioCfp:
model_type: str = "PANN"
model_name: str = "Cnn14"
sample_rate: int = 48000
# Param
audio_length: int = 1024
window_size: int = 1024
hop_size: int = 1024
fmin: int = 50
fmax: int = 14000
class_num: int = 527
mel_bins: int = 64
clip_samples: int = 480000
@dataclass
class CLAPTextCfg:
context_length: int
vocab_size: int
width: int
heads: int
layers: int
model_type: str
class CLAP(nn.Module):
def __init__(
self,
embed_dim: int,
audio_cfg: CLAPAudioCfp,
text_cfg: CLAPTextCfg,
quick_gelu: bool = False,
enable_fusion: bool = False,
fusion_type: str = 'None',
joint_embed_shape: int = 512,
mlp_act: str = 'relu',
):
super().__init__()
if isinstance(audio_cfg, dict):
audio_cfg = CLAPAudioCfp(**audio_cfg)
if isinstance(text_cfg, dict):
text_cfg = CLAPTextCfg(**text_cfg)
self.audio_cfg = audio_cfg
self.text_cfg = text_cfg
self.enable_fusion = enable_fusion
self.fusion_type = fusion_type
self.joint_embed_shape = joint_embed_shape
self.mlp_act = mlp_act
self.context_length = text_cfg.context_length
# OpenAI models are pretrained w/ QuickGELU but native nn.GELU is both faster and more
# memory efficient in recent PyTorch releases (>= 1.10).
# NOTE: timm models always use native GELU regardless of quick_gelu flag.
act_layer = QuickGELU if quick_gelu else nn.GELU
if mlp_act == 'relu':
mlp_act_layer = nn.ReLU()
elif mlp_act == 'gelu':
mlp_act_layer = nn.GELU()
else:
raise NotImplementedError
# audio branch
# audio branch parameters
if audio_cfg.model_type == "PANN":
self.audio_branch = create_pann_model(audio_cfg, enable_fusion, fusion_type)
elif audio_cfg.model_type == "HTSAT":
self.audio_branch = create_htsat_model(audio_cfg, enable_fusion, fusion_type)
else:
logging.error(f"Model config for {audio_cfg.model_type} not found")
raise RuntimeError(f"Model config for {audio_cfg.model_type} not found.")
# text branch
# text branch parameters
if text_cfg.model_type == "transformer":
self.text_branch = Transformer(
width=text_cfg.width,
layers=text_cfg.layers,
heads=text_cfg.heads,
act_layer=act_layer,
)
self.vocab_size = text_cfg.vocab_size
self.token_embedding = nn.Embedding(text_cfg.vocab_size, text_cfg.width)
self.positional_embedding = nn.Parameter(
torch.empty(self.context_length, text_cfg.width)
)
self.ln_final = LayerNorm(text_cfg.width)
self.text_transform = MLPLayers(units=[self.joint_embed_shape,
self.joint_embed_shape,
self.joint_embed_shape], dropout=0.1)
self.text_projection = nn.Sequential(
nn.Linear(text_cfg.width, self.joint_embed_shape),
mlp_act_layer,
nn.Linear(self.joint_embed_shape, self.joint_embed_shape)
)
elif text_cfg.model_type == "bert":
self.text_branch = BertModel.from_pretrained("bert-base-uncased")
self.text_transform = MLPLayers(units=[self.joint_embed_shape,
self.joint_embed_shape,
self.joint_embed_shape], dropout=0.1)
self.text_projection = nn.Sequential(
nn.Linear(768, self.joint_embed_shape),
mlp_act_layer,
nn.Linear(self.joint_embed_shape, self.joint_embed_shape)
)
elif text_cfg.model_type == "roberta":
self.text_branch = RobertaModel.from_pretrained('roberta-base')
self.text_transform = MLPLayers(units=[self.joint_embed_shape,
self.joint_embed_shape,
self.joint_embed_shape], dropout=0.1)
self.text_projection = nn.Sequential(
nn.Linear(768, self.joint_embed_shape),
mlp_act_layer,
nn.Linear(self.joint_embed_shape, self.joint_embed_shape)
)
elif text_cfg.model_type == "bart":
self.text_branch = BartModel.from_pretrained('facebook/bart-base')
self.text_transform = MLPLayers(units=[self.joint_embed_shape,
self.joint_embed_shape,
self.joint_embed_shape], dropout=0.1)
self.text_projection = nn.Sequential(
nn.Linear(768, self.joint_embed_shape),
mlp_act_layer,
nn.Linear(self.joint_embed_shape, self.joint_embed_shape)
)
else:
logging.error(f"Model config for {text_cfg.model_type} not found")
raise RuntimeError(f"Model config for {text_cfg.model_type} not found.")
self.text_branch_type = text_cfg.model_type
# text branch parameters
# audio branch parameters
self.audio_transform = MLPLayers(units=[self.joint_embed_shape,
self.joint_embed_shape,
self.joint_embed_shape], dropout=0.1)
# below here is text branch parameters
# ============================================================================================================
self.audio_projection = nn.Sequential(
nn.Linear(embed_dim, self.joint_embed_shape),
mlp_act_layer,
nn.Linear(self.joint_embed_shape, self.joint_embed_shape)
)
self.logit_scale_a = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
self.logit_scale_t = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
self.register_buffer("attn_mask", self.build_attention_mask(), persistent=False)
self.init_text_branch_parameters()
def init_text_branch_parameters(self):
if self.text_branch_type == "transformer":
nn.init.normal_(self.token_embedding.weight, std=0.02)
nn.init.normal_(self.positional_embedding, std=0.01)
proj_std = (self.text_branch.width**-0.5) * (
(2 * self.text_branch.layers) ** -0.5
)
attn_std = self.text_branch.width**-0.5
fc_std = (2 * self.text_branch.width) ** -0.5
for block in self.text_branch.resblocks:
nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
if self.text_branch_type == "bert" or self.text_branch_type == "roberta":
width = self.text_branch.embeddings.word_embeddings.weight.shape[-1]
elif self.text_branch_type == "bart":
width = self.text_branch.shared.weight.shape[-1]
else:
width = self.text_branch.width
nn.init.constant_(self.logit_scale_a, np.log(1 / 0.07))
nn.init.constant_(self.logit_scale_t, np.log(1 / 0.07))
# deprecated
# if hasattr(self.visual, 'init_parameters'):
# self.visual.init_parameters()
# if self.text_projection is not None:
# nn.init.normal_(self.text_projection, std=width**-0.5)
def build_attention_mask(self):
# lazily create causal attention mask, with full attention between the vision tokens
# pytorch uses additive attention mask; fill with -inf
mask = torch.empty(self.context_length, self.context_length)
mask.fill_(float("-inf"))
mask.triu_(1) # zero out the lower diagonal
return mask
def encode_audio(self, audio, device):
return self.audio_branch(audio, mixup_lambda=None, device=device) # mix lambda needs to add
# def list_of_dict_of_tensor2dict_of_tensor(self, x, device):
# tmp = {}
# for k in x[0].keys():
# tmp[k] = []
# for i in range(len(x)):
# tmp[k].append(x[i][k][:77])
# for k in x[0].keys():
# tmp[k] = torch.tensor(tmp[k]).to(device=device, non_blocking=True)
# return tmp
def encode_text(self, text, device):
if self.text_branch_type == "transformer":
text = text.to(device=device, non_blocking=True)
x = self.token_embedding(text) # [batch_size, n_ctx, d_model]
x = x + self.positional_embedding
x = x.permute(1, 0, 2) # NLD -> LND
x = self.text_branch(x, attn_mask=self.attn_mask)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_final(x)
# x.shape = [batch_size, n_ctx, transformer.width]
# take features from the eot embedding (eot_token is the highest number in each sequence)
x = self.text_projection(x[torch.arange(x.shape[0]), text.argmax(dim=-1)])
elif self.text_branch_type == "bert":
# text = self.list_of_dict_of_tensor2dict_of_tensor(text, device)
# text = BatchEncoding(text)
x = self.text_branch(
input_ids=text["input_ids"].to(device=device, non_blocking=True),
attention_mask=text["attention_mask"].to(
device=device, non_blocking=True
),
token_type_ids=text["token_type_ids"].to(
device=device, non_blocking=True
),
)["pooler_output"]
x = self.text_projection(x)
elif self.text_branch_type == "roberta":
x = self.text_branch(
input_ids=text["input_ids"].to(device=device, non_blocking=True),
attention_mask=text["attention_mask"].to(
device=device, non_blocking=True
),
)["pooler_output"]
x = self.text_projection(x)
elif self.text_branch_type == "bart":
x = torch.mean(self.text_branch(
input_ids=text["input_ids"].to(device=device, non_blocking=True),
attention_mask=text["attention_mask"].to(
device=device, non_blocking=True
),
)["encoder_last_hidden_state"],axis=1)
x = self.text_projection(x)
else:
logging.error(f"Model type {self.text_branch_type} not found")
raise RuntimeError(f"Model type {self.text_branch_type} not found.")
return x
def forward(self, audio, text, device=None):
"""Forward audio and text into the CLAP
Parameters
----------
audio: torch.Tensor (batch_size, audio_length)
the time-domain audio input / the batch of mel_spec and longer list.
text: torch.Tensor () // need to add
the text token input
"""
if device is None:
if audio is not None:
device = audio.device
elif text is not None:
device = text.device
if audio is None and text is None:
# a hack to get the logit scale
return self.logit_scale_a.exp(), self.logit_scale_t.exp()
elif audio is None:
return self.encode_text(text, device=device)
elif text is None:
return self.audio_projection(self.encode_audio(audio, device=device)["embedding"])
audio_features = self.audio_projection(self.encode_audio(audio, device=device)["embedding"])
audio_features = F.normalize(audio_features, dim=-1)
text_features = self.encode_text(
text, device=device
)
# print("text_features", text_features)
# print("text_features.shape", text_features.shape)
# print("text_features.type", type(text_features))
text_features = F.normalize(text_features, dim=-1)
audio_features_mlp = self.audio_transform(audio_features)
text_features_mlp = self.text_transform(text_features)
# Four outputs: audio features (basic & MLP), text features (basic & MLP)
return (
audio_features,
text_features,
audio_features_mlp,
text_features_mlp,
self.logit_scale_a.exp(),
self.logit_scale_t.exp(),
)
def get_logit_scale(self):
return self.logit_scale_a.exp(), self.logit_scale_t.exp()
def get_textual_embedding(self, data):
device = next(self.parameters()).device
for k in data:
data[k] = data[k].to(device)
# if self.text_branch_type == "roberta":
text_embeds = self.text_branch(
input_ids=data["input_ids"].to(device=device, non_blocking=True),
attention_mask=data["attention_mask"].to(device=device, non_blocking=True),
)["last_hidden_state"]
text_embeds = self.text_projection(text_embeds)
text_embeds = F.normalize(text_embeds, dim=-1)
return text_embeds
def get_text_embedding(self, data):
"""Get the text embedding from the model
Parameters
----------
data: torch.Tensor
a tensor of text embedding
Returns
----------
text_embed: torch.Tensor
a tensor of text_embeds (N, D)
"""
device = next(self.parameters()).device
for k in data:
data[k] = data[k].to(device)
text_embeds = self.encode_text(data, device=device)
text_embeds = F.normalize(text_embeds, dim=-1)
return text_embeds
def get_audio_embedding(self, data):
"""Get the audio embedding from the model
Parameters
----------
data: a list of dict
the audio input dict list from 'get_audio_feature' method
Returns
----------
audio_embed: torch.Tensor
a tensor of audio_embeds (N, D)
"""
device = next(self.parameters()).device
input_dict = {}
keys = data[0].keys()
for k in keys:
input_dict[k] = torch.cat([d[k].unsqueeze(0) for d in data], dim=0).to(device)
audio_embeds = self.audio_projection(self.encode_audio(input_dict, device=device)["embedding"])
audio_embeds = F.normalize(audio_embeds, dim=-1)
return audio_embeds
def audio_infer(self, audio, hopsize=None, device=None):
"""Forward one audio and produce the audio embedding
Parameters
----------
audio: (audio_length)
the time-domain audio input, notice that it must be only one input
hopsize: int
the overlap hopsize as the sliding window
Returns
----------
output_dict: {
key: [n, (embedding_shape)] if "HTS-AT"
or
key: [(embedding_shape)] if "PANN"
}
the list of key values of the audio branch
"""
assert not self.training, "the inference mode must be run at eval stage"
output_dict = {}
# PANN
if self.audio_cfg.model_type == "PANN":
audio_input = audio.unsqueeze(dim=0)
output_dict[key] = self.encode_audio(audio_input, device=device)[key].squeeze(dim=0)
elif self.audio_cfg.model_type == "HTSAT":
# repeat
audio_len = len(audio)
k = self.audio_cfg.clip_samples // audio_len
if k > 1:
audio = audio.repeat(k)
audio_len = len(audio)
if hopsize is None:
hopsize = min(hopsize, audio_len)
if audio_len > self.audio_cfg.clip_samples:
audio_input = [
audio[pos : pos + self.audio_cfg.clip_samples].clone()
for pos in range(
0, audio_len - self.audio_cfg.clip_samples, hopsize
)
]
audio_input.append(audio[-self.audio_cfg.clip_samples :].clone())
audio_input = torch.stack(audio_input)
output_dict[key] = self.encode_audio(audio_input, device=device)[key]
else:
audio_input = audio.unsqueeze(dim=0)
output_dict[key] = self.encode_audio(audio_input, device=device)[key].squeeze(dim=0)
return output_dict
def convert_weights_to_fp16(model: nn.Module):
"""Convert applicable model parameters to fp16"""
def _convert_weights_to_fp16(l):
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
l.weight.data = l.weight.data.half()
if l.bias is not None:
l.bias.data = l.bias.data.half()
if isinstance(l, nn.MultiheadAttention):
for attr in [
*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]],
"in_proj_bias",
"bias_k",
"bias_v",
]:
tensor = getattr(l, attr)
if tensor is not None:
tensor.data = tensor.data.half()
for name in ["text_projection", "proj"]:
if hasattr(l, name):
attr = getattr(l, name)
if attr is not None:
attr.data = attr.data.half()
model.apply(_convert_weights_to_fp16)
# Ignore the state dict of the vision part
def build_model_from_openai_state_dict(state_dict: dict, model_cfg, enable_fusion: bool = False, fusion_type: str = 'None'):
embed_dim = model_cfg["embed_dim"]
audio_cfg = model_cfg["audio_cfg"]
text_cfg = model_cfg["text_cfg"]
context_length = state_dict["positional_embedding"].shape[0]
vocab_size = state_dict["token_embedding.weight"].shape[0]
transformer_width = state_dict["ln_final.weight"].shape[0]
transformer_heads = transformer_width // 64
transformer_layers = len(
set(
k.split(".")[2]
for k in state_dict
if k.startswith(f"transformer.resblocks")
)
)
audio_cfg = CLAPAudioCfp(**audio_cfg)
text_cfg = CLAPTextCfg(**text_cfg)
model = CLAP(
embed_dim,
audio_cfg=audio_cfg,
text_cfg=text_cfg,
quick_gelu=True, # OpenAI models were trained with QuickGELU
enable_fusion=enable_fusion,
fusion_type=fusion_type
)
state_dict["logit_scale_a"] = state_dict["logit_scale"]
state_dict["logit_scale_t"] = state_dict["logit_scale"]
pop_keys = list(state_dict.keys())[::]
# pop the visual branch saved weights
for key in pop_keys:
if key.startswith("visual."):
state_dict.pop(key, None)
for key in ["logit_scale", "input_resolution", "context_length", "vocab_size"]:
state_dict.pop(key, None)
# not use fp16
# convert_weights_to_fp16(model)
model.load_state_dict(state_dict, strict=False)
return model.eval()
def trace_model(model, batch_size=256, device=torch.device("cpu")):
model.eval()
audio_length = model.audio_cfg.audio_length
example_audio = torch.ones((batch_size, audio_length), device=device)
example_text = torch.zeros(
(batch_size, model.context_length), dtype=torch.int, device=device
)
model = torch.jit.trace_module(
model,
inputs=dict(
forward=(example_audio, example_text),
encode_text=(example_text,),
encode_image=(example_audio,),
),
)
model.audio_cfg.audio_length = audio_length # Question: what does this do?
return model
| EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/encoders/open_clap/model.py |
# Ke Chen
# [email protected]
# HTS-AT: A HIERARCHICAL TOKEN-SEMANTIC AUDIO TRANSFORMER FOR SOUND CLASSIFICATION AND DETECTION
# Some layers designed on the model
# below codes are based and referred from https://github.com/microsoft/Swin-Transformer
# Swin Transformer for Computer Vision: https://arxiv.org/pdf/2103.14030.pdf
import torch
import torch.nn as nn
import torch.nn.functional as F
from itertools import repeat
import collections.abc
import math
import warnings
from torch.nn.init import _calculate_fan_in_and_fan_out
import torch.utils.checkpoint as checkpoint
import random
from torchlibrosa.stft import Spectrogram, LogmelFilterBank
from torchlibrosa.augmentation import SpecAugmentation
from itertools import repeat
from .utils import do_mixup, interpolate
from .feature_fusion import iAFF, AFF, DAF
# from PyTorch internals
def _ntuple(n):
def parse(x):
if isinstance(x, collections.abc.Iterable):
return x
return tuple(repeat(x, n))
return parse
to_1tuple = _ntuple(1)
to_2tuple = _ntuple(2)
to_3tuple = _ntuple(3)
to_4tuple = _ntuple(4)
to_ntuple = _ntuple
def drop_path(x, drop_prob: float = 0., training: bool = False):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
'survival rate' as the argument.
"""
if drop_prob == 0. or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device)
random_tensor.floor_() # binarize
output = x.div(keep_prob) * random_tensor
return output
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
class PatchEmbed(nn.Module):
""" 2D Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, norm_layer=None, flatten=True, patch_stride = 16,
enable_fusion=False, fusion_type='None'):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
patch_stride = to_2tuple(patch_stride)
self.img_size = img_size
self.patch_size = patch_size
self.patch_stride = patch_stride
self.grid_size = (img_size[0] // patch_stride[0], img_size[1] // patch_stride[1])
self.num_patches = self.grid_size[0] * self.grid_size[1]
self.flatten = flatten
self.in_chans = in_chans
self.embed_dim = embed_dim
self.enable_fusion = enable_fusion
self.fusion_type = fusion_type
padding = ((patch_size[0] - patch_stride[0]) // 2, (patch_size[1] - patch_stride[1]) // 2)
if (self.enable_fusion) and (self.fusion_type == 'channel_map'):
self.proj = nn.Conv2d(in_chans*4, embed_dim, kernel_size=patch_size, stride=patch_stride, padding=padding)
else:
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_stride, padding=padding)
self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
if (self.enable_fusion) and (self.fusion_type in ['daf_2d','aff_2d','iaff_2d']):
self.mel_conv2d = nn.Conv2d(in_chans, embed_dim, kernel_size=(patch_size[0], patch_size[1]*3), stride=(patch_stride[0], patch_stride[1] * 3), padding=padding)
if self.fusion_type == 'daf_2d':
self.fusion_model = DAF()
elif self.fusion_type == 'aff_2d':
self.fusion_model = AFF(channels=embed_dim, type='2D')
elif self.fusion_type == 'iaff_2d':
self.fusion_model = iAFF(channels=embed_dim, type='2D')
def forward(self, x, longer_idx = None):
if (self.enable_fusion) and (self.fusion_type in ['daf_2d','aff_2d','iaff_2d']):
global_x = x[:,0:1,:,:]
# global processing
B, C, H, W = global_x.shape
assert H == self.img_size[0] and W == self.img_size[1], \
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
global_x = self.proj(global_x)
TW = global_x.size(-1)
if len(longer_idx) > 0:
# local processing
local_x = x[longer_idx,1:,:,:].contiguous()
B, C, H, W = local_x.shape
local_x = local_x.view(B*C,1,H,W)
local_x = self.mel_conv2d(local_x)
local_x = local_x.view(B,C,local_x.size(1),local_x.size(2),local_x.size(3))
local_x = local_x.permute((0,2,3,1,4)).contiguous().flatten(3)
TB,TC,TH,_ = local_x.size()
if local_x.size(-1) < TW:
local_x = torch.cat([local_x, torch.zeros((TB,TC,TH,TW-local_x.size(-1)), device=global_x.device)], dim=-1)
else:
local_x = local_x[:,:,:,:TW]
global_x[longer_idx] = self.fusion_model(global_x[longer_idx],local_x)
x = global_x
else:
B, C, H, W = x.shape
assert H == self.img_size[0] and W == self.img_size[1], \
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x)
if self.flatten:
x = x.flatten(2).transpose(1, 2) # BCHW -> BNC
x = self.norm(x)
return x
class Mlp(nn.Module):
""" MLP as used in Vision Transformer, MLP-Mixer and related networks
"""
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
with torch.no_grad():
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
# type: (Tensor, float, float, float, float) -> Tensor
r"""Fills the input Tensor with values drawn from a truncated
normal distribution. The values are effectively drawn from the
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq \text{mean} \leq b`.
Args:
tensor: an n-dimensional `torch.Tensor`
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
a: the minimum cutoff value
b: the maximum cutoff value
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.trunc_normal_(w)
"""
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
def variance_scaling_(tensor, scale=1.0, mode='fan_in', distribution='normal'):
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
if mode == 'fan_in':
denom = fan_in
elif mode == 'fan_out':
denom = fan_out
elif mode == 'fan_avg':
denom = (fan_in + fan_out) / 2
variance = scale / denom
if distribution == "truncated_normal":
# constant is stddev of standard normal truncated to (-2, 2)
trunc_normal_(tensor, std=math.sqrt(variance) / .87962566103423978)
elif distribution == "normal":
tensor.normal_(std=math.sqrt(variance))
elif distribution == "uniform":
bound = math.sqrt(3 * variance)
tensor.uniform_(-bound, bound)
else:
raise ValueError(f"invalid distribution {distribution}")
def lecun_normal_(tensor):
variance_scaling_(tensor, mode='fan_in', distribution='truncated_normal')
def window_partition(x, window_size):
"""
Args:
x: (B, H, W, C)
window_size (int): window size
Returns:
windows: (num_windows*B, window_size, window_size, C)
"""
B, H, W, C = x.shape
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
return windows
def window_reverse(windows, window_size, H, W):
"""
Args:
windows: (num_windows*B, window_size, window_size, C)
window_size (int): Window size
H (int): Height of image
W (int): Width of image
Returns:
x: (B, H, W, C)
"""
B = int(windows.shape[0] / (H * W / window_size / window_size))
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
return x
class WindowAttention(nn.Module):
r""" Window based multi-head self attention (W-MSA) module with relative position bias.
It supports both of shifted and non-shifted window.
Args:
dim (int): Number of input channels.
window_size (tuple[int]): The height and width of the window.
num_heads (int): Number of attention heads.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
"""
def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):
super().__init__()
self.dim = dim
self.window_size = window_size # Wh, Ww
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
# define a parameter table of relative position bias
self.relative_position_bias_table = nn.Parameter(
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(self.window_size[0])
coords_w = torch.arange(self.window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += self.window_size[1] - 1
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
self.register_buffer("relative_position_index", relative_position_index)
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
trunc_normal_(self.relative_position_bias_table, std=.02)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, mask=None):
"""
Args:
x: input features with shape of (num_windows*B, N, C)
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
"""
B_, N, C = x.shape
qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
nW = mask.shape[0]
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
attn = attn.view(-1, self.num_heads, N, N)
attn = self.softmax(attn)
else:
attn = self.softmax(attn)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x, attn
def extra_repr(self):
return f'dim={self.dim}, window_size={self.window_size}, num_heads={self.num_heads}'
# We use the model based on Swintransformer Block, therefore we can use the swin-transformer pretrained model
class SwinTransformerBlock(nn.Module):
r""" Swin Transformer Block.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resulotion.
num_heads (int): Number of attention heads.
window_size (int): Window size.
shift_size (int): Shift size for SW-MSA.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float, optional): Stochastic depth rate. Default: 0.0
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, dim, input_resolution, num_heads, window_size=7, shift_size=0,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,
act_layer=nn.GELU, norm_layer=nn.LayerNorm, norm_before_mlp='ln'):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.num_heads = num_heads
self.window_size = window_size
self.shift_size = shift_size
self.mlp_ratio = mlp_ratio
self.norm_before_mlp = norm_before_mlp
if min(self.input_resolution) <= self.window_size:
# if window size is larger than input resolution, we don't partition windows
self.shift_size = 0
self.window_size = min(self.input_resolution)
assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
self.norm1 = norm_layer(dim)
self.attn = WindowAttention(
dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,
qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
if self.norm_before_mlp == 'ln':
self.norm2 = nn.LayerNorm(dim)
elif self.norm_before_mlp == 'bn':
self.norm2 = lambda x: nn.BatchNorm1d(dim)(x.transpose(1, 2)).transpose(1, 2)
else:
raise NotImplementedError
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
if self.shift_size > 0:
# calculate attention mask for SW-MSA
H, W = self.input_resolution
img_mask = torch.zeros((1, H, W, 1)) # 1 H W 1
h_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
w_slices = (slice(0, -self.window_size),
slice(-self.window_size, -self.shift_size),
slice(-self.shift_size, None))
cnt = 0
for h in h_slices:
for w in w_slices:
img_mask[:, h, w, :] = cnt
cnt += 1
mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))
else:
attn_mask = None
self.register_buffer("attn_mask", attn_mask)
def forward(self, x):
# pdb.set_trace()
H, W = self.input_resolution
# print("H: ", H)
# print("W: ", W)
# pdb.set_trace()
B, L, C = x.shape
# assert L == H * W, "input feature has wrong size"
shortcut = x
x = self.norm1(x)
x = x.view(B, H, W, C)
# cyclic shift
if self.shift_size > 0:
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
else:
shifted_x = x
# partition windows
x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C
x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C
# W-MSA/SW-MSA
attn_windows, attn = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C
# merge windows
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C
# reverse cyclic shift
if self.shift_size > 0:
x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
else:
x = shifted_x
x = x.view(B, H * W, C)
# FFN
x = shortcut + self.drop_path(x)
x = x + self.drop_path(self.mlp(self.norm2(x)))
return x, attn
def extra_repr(self):
return f"dim={self.dim}, input_resolution={self.input_resolution}, num_heads={self.num_heads}, " \
f"window_size={self.window_size}, shift_size={self.shift_size}, mlp_ratio={self.mlp_ratio}"
class PatchMerging(nn.Module):
r""" Patch Merging Layer.
Args:
input_resolution (tuple[int]): Resolution of input feature.
dim (int): Number of input channels.
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
"""
def __init__(self, input_resolution, dim, norm_layer=nn.LayerNorm):
super().__init__()
self.input_resolution = input_resolution
self.dim = dim
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
self.norm = norm_layer(4 * dim)
def forward(self, x):
"""
x: B, H*W, C
"""
H, W = self.input_resolution
B, L, C = x.shape
assert L == H * W, "input feature has wrong size"
assert H % 2 == 0 and W % 2 == 0, f"x size ({H}*{W}) are not even."
x = x.view(B, H, W, C)
x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
x = self.norm(x)
x = self.reduction(x)
return x
def extra_repr(self):
return f"input_resolution={self.input_resolution}, dim={self.dim}"
class BasicLayer(nn.Module):
""" A basic Swin Transformer layer for one stage.
Args:
dim (int): Number of input channels.
input_resolution (tuple[int]): Input resolution.
depth (int): Number of blocks.
num_heads (int): Number of attention heads.
window_size (int): Local window size.
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
drop (float, optional): Dropout rate. Default: 0.0
attn_drop (float, optional): Attention dropout rate. Default: 0.0
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
"""
def __init__(self, dim, input_resolution, depth, num_heads, window_size,
mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False,
norm_before_mlp='ln'):
super().__init__()
self.dim = dim
self.input_resolution = input_resolution
self.depth = depth
self.use_checkpoint = use_checkpoint
# build blocks
self.blocks = nn.ModuleList([
SwinTransformerBlock(dim=dim, input_resolution=input_resolution,
num_heads=num_heads, window_size=window_size,
shift_size=0 if (i % 2 == 0) else window_size // 2,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop, attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
norm_layer=norm_layer, norm_before_mlp=norm_before_mlp)
for i in range(depth)])
# patch merging layer
if downsample is not None:
self.downsample = downsample(input_resolution, dim=dim, norm_layer=norm_layer)
else:
self.downsample = None
def forward(self, x):
attns = []
for blk in self.blocks:
if self.use_checkpoint:
x = checkpoint.checkpoint(blk, x)
else:
x, attn = blk(x)
if not self.training:
attns.append(attn.unsqueeze(0))
if self.downsample is not None:
x = self.downsample(x)
if not self.training:
attn = torch.cat(attns, dim = 0)
attn = torch.mean(attn, dim = 0)
return x, attn
def extra_repr(self):
return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
# The Core of HTSAT
class HTSAT_Swin_Transformer(nn.Module):
r"""HTSAT based on the Swin Transformer
Args:
spec_size (int | tuple(int)): Input Spectrogram size. Default 256
patch_size (int | tuple(int)): Patch size. Default: 4
path_stride (iot | tuple(int)): Patch Stride for Frequency and Time Axis. Default: 4
in_chans (int): Number of input image channels. Default: 1 (mono)
num_classes (int): Number of classes for classification head. Default: 527
embed_dim (int): Patch embedding dimension. Default: 96
depths (tuple(int)): Depth of each HTSAT-Swin Transformer layer.
num_heads (tuple(int)): Number of attention heads in different layers.
window_size (int): Window size. Default: 8
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None
drop_rate (float): Dropout rate. Default: 0
attn_drop_rate (float): Attention dropout rate. Default: 0
drop_path_rate (float): Stochastic depth rate. Default: 0.1
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
patch_norm (bool): If True, add normalization after patch embedding. Default: True
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
config (module): The configuration Module from config.py
"""
def __init__(self, spec_size=256, patch_size=4, patch_stride=(4,4),
in_chans=1, num_classes=527,
embed_dim=96, depths=[2, 2, 6, 2], num_heads=[4, 8, 16, 32],
window_size=8, mlp_ratio=4., qkv_bias=True, qk_scale=None,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
norm_layer=nn.LayerNorm,
ape=False, patch_norm=True,
use_checkpoint=False, norm_before_mlp='ln', config = None,
enable_fusion = False, fusion_type = 'None', **kwargs):
super(HTSAT_Swin_Transformer, self).__init__()
self.config = config
self.spec_size = spec_size
self.patch_stride = patch_stride
self.patch_size = patch_size
self.window_size = window_size
self.embed_dim = embed_dim
self.depths = depths
self.ape = ape
self.in_chans = in_chans
self.num_classes = num_classes
self.num_heads = num_heads
self.num_layers = len(self.depths)
self.num_features = int(self.embed_dim * 2 ** (self.num_layers - 1))
self.drop_rate = drop_rate
self.attn_drop_rate = attn_drop_rate
self.drop_path_rate = drop_path_rate
self.qkv_bias = qkv_bias
self.qk_scale = None
self.patch_norm = patch_norm
self.norm_layer = norm_layer if self.patch_norm else None
self.norm_before_mlp = norm_before_mlp
self.mlp_ratio = mlp_ratio
self.use_checkpoint = use_checkpoint
self.enable_fusion = enable_fusion
self.fusion_type = fusion_type
# process mel-spec ; used only once
self.freq_ratio = self.spec_size // self.config.mel_bins
window = 'hann'
center = True
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
self.interpolate_ratio = 32 # Downsampled ratio
# Spectrogram extractor
self.spectrogram_extractor = Spectrogram(n_fft=config.window_size, hop_length=config.hop_size,
win_length=config.window_size, window=window, center=center, pad_mode=pad_mode,
freeze_parameters=True)
# Logmel feature extractor
self.logmel_extractor = LogmelFilterBank(sr=config.sample_rate, n_fft=config.window_size,
n_mels=config.mel_bins, fmin=config.fmin, fmax=config.fmax, ref=ref, amin=amin, top_db=top_db,
freeze_parameters=True)
# Spec augmenter
self.spec_augmenter = SpecAugmentation(time_drop_width=64, time_stripes_num=2,
freq_drop_width=8, freq_stripes_num=2) # 2 2
self.bn0 = nn.BatchNorm2d(self.config.mel_bins)
# split spctrogram into non-overlapping patches
self.patch_embed = PatchEmbed(
img_size=self.spec_size, patch_size=self.patch_size, in_chans=self.in_chans,
embed_dim=self.embed_dim, norm_layer=self.norm_layer, patch_stride = patch_stride,
enable_fusion=self.enable_fusion, fusion_type=self.fusion_type
)
num_patches = self.patch_embed.num_patches
patches_resolution = self.patch_embed.grid_size
self.patches_resolution = patches_resolution
# absolute position embedding
if self.ape:
self.absolute_pos_embed = nn.Parameter(torch.zeros(1, num_patches, self.embed_dim))
trunc_normal_(self.absolute_pos_embed, std=.02)
self.pos_drop = nn.Dropout(p=self.drop_rate)
# stochastic depth
dpr = [x.item() for x in torch.linspace(0, self.drop_path_rate, sum(self.depths))] # stochastic depth decay rule
# build layers
self.layers = nn.ModuleList()
for i_layer in range(self.num_layers):
layer = BasicLayer(dim=int(self.embed_dim * 2 ** i_layer),
input_resolution=(patches_resolution[0] // (2 ** i_layer),
patches_resolution[1] // (2 ** i_layer)),
depth=self.depths[i_layer],
num_heads=self.num_heads[i_layer],
window_size=self.window_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=self.qkv_bias, qk_scale=self.qk_scale,
drop=self.drop_rate, attn_drop=self.attn_drop_rate,
drop_path=dpr[sum(self.depths[:i_layer]):sum(self.depths[:i_layer + 1])],
norm_layer=self.norm_layer,
downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
use_checkpoint=use_checkpoint,
norm_before_mlp=self.norm_before_mlp)
self.layers.append(layer)
self.norm = self.norm_layer(self.num_features)
self.avgpool = nn.AdaptiveAvgPool1d(1)
self.maxpool = nn.AdaptiveMaxPool1d(1)
SF = self.spec_size // (2 ** (len(self.depths) - 1)) // self.patch_stride[0] // self.freq_ratio
self.tscam_conv = nn.Conv2d(
in_channels = self.num_features,
out_channels = self.num_classes,
kernel_size = (SF,3),
padding = (0,1)
)
self.head = nn.Linear(num_classes, num_classes)
if (self.enable_fusion) and (self.fusion_type in ['daf_1d','aff_1d','iaff_1d']):
self.mel_conv1d = nn.Sequential(
nn.Conv1d(64, 64, kernel_size=5, stride=3, padding=2),
nn.BatchNorm1d(64)
)
if self.fusion_type == 'daf_1d':
self.fusion_model = DAF()
elif self.fusion_type == 'aff_1d':
self.fusion_model = AFF(channels=64, type='1D')
elif self.fusion_type == 'iaff_1d':
self.fusion_model = iAFF(channels=64, type='1D')
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {'absolute_pos_embed'}
@torch.jit.ignore
def no_weight_decay_keywords(self):
return {'relative_position_bias_table'}
def forward_features(self, x, longer_idx = None):
# A deprecated optimization for using a hierarchical output from different blocks
frames_num = x.shape[2]
x = self.patch_embed(x, longer_idx = longer_idx)
if self.ape:
x = x + self.absolute_pos_embed
x = self.pos_drop(x)
for i, layer in enumerate(self.layers):
x, attn = layer(x)
# for x
x = self.norm(x)
B, N, C = x.shape
SF = frames_num // (2 ** (len(self.depths) - 1)) // self.patch_stride[0]
ST = frames_num // (2 ** (len(self.depths) - 1)) // self.patch_stride[1]
x = x.permute(0,2,1).contiguous().reshape(B, C, SF, ST)
B, C, F, T = x.shape
# group 2D CNN
c_freq_bin = F // self.freq_ratio
x = x.reshape(B, C, F // c_freq_bin, c_freq_bin, T)
x = x.permute(0,1,3,2,4).contiguous().reshape(B, C, c_freq_bin, -1)
# get latent_output
fine_grained_latent_output = torch.mean(x, dim = 2)
fine_grained_latent_output = interpolate(fine_grained_latent_output.permute(0,2,1).contiguous(), 8 * self.patch_stride[1])
latent_output = self.avgpool(torch.flatten(x,2))
latent_output = torch.flatten(latent_output, 1)
# display the attention map, if needed
x = self.tscam_conv(x)
x = torch.flatten(x, 2) # B, C, T
fpx = interpolate(torch.sigmoid(x).permute(0,2,1).contiguous(), 8 * self.patch_stride[1])
x = self.avgpool(x)
x = torch.flatten(x, 1)
output_dict = {
'framewise_output': fpx, # already sigmoided
'clipwise_output': torch.sigmoid(x),
'fine_grained_embedding': fine_grained_latent_output,
'embedding': latent_output
}
return output_dict
def crop_wav(self, x, crop_size, spe_pos = None):
time_steps = x.shape[2]
tx = torch.zeros(x.shape[0], x.shape[1], crop_size, x.shape[3]).to(x.device)
for i in range(len(x)):
if spe_pos is None:
crop_pos = random.randint(0, time_steps - crop_size - 1)
else:
crop_pos = spe_pos
tx[i][0] = x[i, 0, crop_pos:crop_pos + crop_size,:]
return tx
# Reshape the wavform to a img size, if you want to use the pretrained swin transformer model
def reshape_wav2img(self, x):
B, C, T, F = x.shape
target_T = int(self.spec_size * self.freq_ratio)
target_F = self.spec_size // self.freq_ratio
assert T <= target_T and F <= target_F, "the wav size should less than or equal to the swin input size"
# to avoid bicubic zero error
if T < target_T:
x = nn.functional.interpolate(x, (target_T, x.shape[3]), mode="bicubic", align_corners=True)
if F < target_F:
x = nn.functional.interpolate(x, (x.shape[2], target_F), mode="bicubic", align_corners=True)
x = x.permute(0,1,3,2).contiguous()
x = x.reshape(x.shape[0], x.shape[1], x.shape[2], self.freq_ratio, x.shape[3] // self.freq_ratio)
# print(x.shape)
x = x.permute(0,1,3,2,4).contiguous()
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3], x.shape[4])
return x
# Repeat the wavform to a img size, if you want to use the pretrained swin transformer model
def repeat_wat2img(self, x, cur_pos):
B, C, T, F = x.shape
target_T = int(self.spec_size * self.freq_ratio)
target_F = self.spec_size // self.freq_ratio
assert T <= target_T and F <= target_F, "the wav size should less than or equal to the swin input size"
# to avoid bicubic zero error
if T < target_T:
x = nn.functional.interpolate(x, (target_T, x.shape[3]), mode="bicubic", align_corners=True)
if F < target_F:
x = nn.functional.interpolate(x, (x.shape[2], target_F), mode="bicubic", align_corners=True)
x = x.permute(0,1,3,2).contiguous() # B C F T
x = x[:,:,:,cur_pos:cur_pos + self.spec_size]
x = x.repeat(repeats = (1,1,4,1))
return x
def forward(self, x: torch.Tensor, mixup_lambda = None, infer_mode = False, device=None):# out_feat_keys: List[str] = None):
if self.enable_fusion and x["longer"].sum() == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
x["longer"][torch.randint(0, x["longer"].shape[0], (1,))] = True
if not self.enable_fusion:
x = x["waveform"].to(device=device, non_blocking=True)
x = self.spectrogram_extractor(x) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
if self.training:
x = self.spec_augmenter(x)
if self.training and mixup_lambda is not None:
x = do_mixup(x, mixup_lambda)
x = self.reshape_wav2img(x)
output_dict = self.forward_features(x)
else:
longer_list = x["longer"].to(device=device, non_blocking=True)
x = x["mel_fusion"].to(device=device, non_blocking=True)
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
longer_list_idx = torch.where(longer_list)[0]
if self.fusion_type in ['daf_1d','aff_1d','iaff_1d']:
new_x = x[:,0:1,:,:].clone().contiguous()
if len(longer_list_idx) > 0:
# local processing
fusion_x_local = x[longer_list_idx,1:,:,:].clone().contiguous()
FB,FC,FT,FF = fusion_x_local.size()
fusion_x_local = fusion_x_local.view(FB * FC, FT, FF)
fusion_x_local = torch.permute(fusion_x_local, (0,2,1)).contiguous()
fusion_x_local = self.mel_conv1d(fusion_x_local)
fusion_x_local = fusion_x_local.view(FB,FC,FF,fusion_x_local.size(-1))
fusion_x_local = torch.permute(fusion_x_local, (0,2,1,3)).contiguous().flatten(2)
if fusion_x_local.size(-1) < FT:
fusion_x_local = torch.cat([fusion_x_local, torch.zeros((FB,FF,FT- fusion_x_local.size(-1)), device=device)], dim=-1)
else:
fusion_x_local = fusion_x_local[:,:,:FT]
# 1D fusion
new_x = new_x.squeeze(1).permute((0,2,1)).contiguous()
new_x[longer_list_idx] = self.fusion_model(new_x[longer_list_idx], fusion_x_local)
x = new_x.permute((0,2,1)).contiguous()[:,None,:,:]
else:
x = new_x
elif self.fusion_type in ['daf_2d','aff_2d','iaff_2d','channel_map']:
x = x # no change
if self.training:
x = self.spec_augmenter(x)
if self.training and mixup_lambda is not None:
x = do_mixup(x, mixup_lambda)
x = self.reshape_wav2img(x)
output_dict = self.forward_features(x, longer_idx = longer_list_idx)
# if infer_mode:
# # in infer mode. we need to handle different length audio input
# frame_num = x.shape[2]
# target_T = int(self.spec_size * self.freq_ratio)
# repeat_ratio = math.floor(target_T / frame_num)
# x = x.repeat(repeats=(1,1,repeat_ratio,1))
# x = self.reshape_wav2img(x)
# output_dict = self.forward_features(x)
# else:
# if x.shape[2] > self.freq_ratio * self.spec_size:
# if self.training:
# x = self.crop_wav(x, crop_size=self.freq_ratio * self.spec_size)
# x = self.reshape_wav2img(x)
# output_dict = self.forward_features(x)
# else:
# # Change: Hard code here
# overlap_size = (x.shape[2] - 1) // 4
# output_dicts = []
# crop_size = (x.shape[2] - 1) // 2
# for cur_pos in range(0, x.shape[2] - crop_size - 1, overlap_size):
# tx = self.crop_wav(x, crop_size = crop_size, spe_pos = cur_pos)
# tx = self.reshape_wav2img(tx)
# output_dicts.append(self.forward_features(tx))
# clipwise_output = torch.zeros_like(output_dicts[0]["clipwise_output"]).float().to(x.device)
# framewise_output = torch.zeros_like(output_dicts[0]["framewise_output"]).float().to(x.device)
# for d in output_dicts:
# clipwise_output += d["clipwise_output"]
# framewise_output += d["framewise_output"]
# clipwise_output = clipwise_output / len(output_dicts)
# framewise_output = framewise_output / len(output_dicts)
# output_dict = {
# 'framewise_output': framewise_output,
# 'clipwise_output': clipwise_output
# }
# else: # this part is typically used, and most easy one
# x = self.reshape_wav2img(x)
# output_dict = self.forward_features(x)
# x = self.head(x)
# We process the data in the dataloader part, in that here we only consider the input_T < fixed_T
return output_dict
def create_htsat_model(audio_cfg, enable_fusion=False, fusion_type='None'):
try:
assert audio_cfg.model_name in ["tiny", "base", "large"], "model name for HTS-AT is wrong!"
if audio_cfg.model_name == "tiny":
model = HTSAT_Swin_Transformer(
spec_size=256,
patch_size=4,
patch_stride=(4,4),
num_classes=audio_cfg.class_num,
embed_dim=96,
depths=[2,2,6,2],
num_heads=[4,8,16,32],
window_size=8,
config = audio_cfg,
enable_fusion = enable_fusion,
fusion_type = fusion_type
)
elif audio_cfg.model_name == "base":
model = HTSAT_Swin_Transformer(
spec_size=256,
patch_size=4,
patch_stride=(4,4),
num_classes=audio_cfg.class_num,
embed_dim=128,
depths=[2,2,12,2],
num_heads=[4,8,16,32],
window_size=8,
config = audio_cfg,
enable_fusion = enable_fusion,
fusion_type = fusion_type
)
elif audio_cfg.model_name == "large":
model = HTSAT_Swin_Transformer(
spec_size=256,
patch_size=4,
patch_stride=(4,4),
num_classes=audio_cfg.class_num,
embed_dim=256,
depths=[2,2,12,2],
num_heads=[4,8,16,32],
window_size=8,
config = audio_cfg,
enable_fusion = enable_fusion,
fusion_type = fusion_type
)
return model
except:
raise RuntimeError(f'Import Model for {audio_cfg.model_name} not found, or the audio cfg parameters are not enough.')
| EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/encoders/open_clap/htsat.py |
""" CLIP tokenizer
Copied from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI.
"""
import gzip
import html
import os
from functools import lru_cache
from typing import Union, List
import ftfy
import regex as re
import torch
@lru_cache()
def default_bpe():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz")
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8+n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
def whitespace_clean(text):
text = re.sub(r'\s+', ' ', text)
text = text.strip()
return text
class SimpleTokenizer(object):
def __init__(self, bpe_path: str = default_bpe(), special_tokens=None):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
merges = gzip.open(bpe_path).read().decode("utf-8").split('\n')
merges = merges[1:49152-256-2+1]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = vocab + [v+'</w>' for v in vocab]
for merge in merges:
vocab.append(''.join(merge))
if not special_tokens:
special_tokens = ['<start_of_text>', '<end_of_text>']
else:
special_tokens = ['<start_of_text>', '<end_of_text>'] + special_tokens
vocab.extend(special_tokens)
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for k, v in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {t:t for t in special_tokens}
special = "|".join(special_tokens)
self.pat = re.compile(special + r"""|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE)
self.vocab_size = len(self.encoder)
self.all_special_ids = [self.encoder[t] for t in special_tokens]
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + ( token[-1] + '</w>',)
pairs = get_pairs(word)
if not pairs:
return token+'</w>'
while True:
bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
return text
_tokenizer = SimpleTokenizer()
def tokenize(texts: Union[str, List[str]], context_length: int = 77) -> torch.LongTensor:
"""
Returns the tokenized representation of given input string(s)
Parameters
----------
texts : Union[str, List[str]]
An input string or a list of input strings to tokenize
context_length : int
The context length to use; all CLIP models use 77 as the context length
Returns
-------
A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]
"""
if isinstance(texts, str):
texts = [texts]
sot_token = _tokenizer.encoder["<start_of_text>"]
eot_token = _tokenizer.encoder["<end_of_text>"]
all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
tokens = tokens[:context_length] # Truncate
result[i, :len(tokens)] = torch.tensor(tokens)
return result
| EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/encoders/open_clap/tokenizer.py |
from multiprocessing.sharedctypes import Value
import torch
import torch.distributed.nn
from torch import distributed as dist, nn as nn
from torch.nn import functional as F
import numpy as np
from sklearn.metrics import average_precision_score, roc_auc_score, accuracy_score
try:
import horovod.torch as hvd
except ImportError:
hvd = None
def gather_features(
audio_features,
text_features,
audio_features_mlp=None,
text_features_mlp=None,
local_loss=False,
gather_with_grad=False,
rank=0,
world_size=1,
use_horovod=False,
mlp_loss=False
):
if use_horovod:
assert hvd is not None, 'Please install horovod'
if gather_with_grad:
all_audio_features = hvd.allgather(audio_features)
all_text_features = hvd.allgather(text_features)
if mlp_loss:
all_audio_features_mlp = hvd.allgather(audio_features_mlp)
all_text_features_mlp = hvd.allgather(text_features_mlp)
else:
with torch.no_grad():
all_audio_features = hvd.allgather(audio_features)
all_text_features = hvd.allgather(text_features)
if mlp_loss:
all_audio_features_mlp = hvd.allgather(audio_features_mlp)
all_text_features_mlp = hvd.allgather(text_features_mlp)
if not local_loss:
# ensure grads for local rank when all_* features don't have a gradient
gathered_audio_features = list(all_audio_features.chunk(world_size, dim=0))
gathered_text_features = list(all_text_features.chunk(world_size, dim=0))
gathered_audio_features[rank] = audio_features
gathered_text_features[rank] = text_features
all_audio_features = torch.cat(gathered_audio_features, dim=0)
all_text_features = torch.cat(gathered_text_features, dim=0)
if mlp_loss:
gathered_audio_features_mlp = list(all_audio_features_mlp.chunk(world_size, dim=0))
gathered_text_features_mlp = list(all_text_features_mlp.chunk(world_size, dim=0))
gathered_audio_features_mlp[rank] = audio_features_mlp
gathered_text_features_mlp[rank] = text_features_mlp
all_audio_features_mlp = torch.cat(gathered_audio_features_mlp, dim=0)
all_text_features_mlp = torch.cat(gathered_text_features_mlp, dim=0)
else:
# We gather tensors from all gpus
if gather_with_grad:
all_audio_features = torch.cat(torch.distributed.nn.all_gather(audio_features), dim=0)
all_text_features = torch.cat(torch.distributed.nn.all_gather(text_features), dim=0)
if mlp_loss:
all_audio_features_mlp = torch.cat(torch.distributed.nn.all_gather(audio_features_mlp), dim=0)
all_text_features_mlp = torch.cat(torch.distributed.nn.all_gather(text_features_mlp), dim=0)
else:
gathered_audio_features = [torch.zeros_like(audio_features) for _ in range(world_size)]
gathered_text_features = [torch.zeros_like(text_features) for _ in range(world_size)]
dist.all_gather(gathered_audio_features, audio_features)
dist.all_gather(gathered_text_features, text_features)
if mlp_loss:
gathered_audio_features_mlp = [torch.zeros_like(audio_features_mlp) for _ in range(world_size)]
gathered_text_features_mlp = [torch.zeros_like(text_features_mlp) for _ in range(world_size)]
dist.all_gather(gathered_audio_features_mlp, audio_features_mlp)
dist.all_gather(gathered_text_features_mlp, text_features_mlp)
if not local_loss:
# ensure grads for local rank when all_* features don't have a gradient
gathered_audio_features[rank] = audio_features
gathered_text_features[rank] = text_features
if mlp_loss:
gathered_audio_features_mlp[rank] = audio_features_mlp
gathered_text_features_mlp[rank] = text_features_mlp
all_audio_features = torch.cat(gathered_audio_features, dim=0)
all_text_features = torch.cat(gathered_text_features, dim=0)
if mlp_loss:
all_audio_features_mlp = torch.cat(gathered_audio_features_mlp, dim=0)
all_text_features_mlp = torch.cat(gathered_text_features_mlp, dim=0)
if mlp_loss:
return all_audio_features, all_text_features, all_audio_features_mlp, all_text_features_mlp
else:
return all_audio_features, all_text_features
class ClipLoss(nn.Module):
def __init__(
self,
local_loss=False,
gather_with_grad=False,
cache_labels=False,
rank=0,
world_size=1,
use_horovod=False,
mlp_loss=False,
weight_loss_kappa=0,
):
super().__init__()
self.local_loss = local_loss
self.gather_with_grad = gather_with_grad
self.cache_labels = cache_labels
self.rank = rank
self.world_size = world_size
self.use_horovod = use_horovod
self.mlp_loss = mlp_loss
self.weighted_loss = bool(weight_loss_kappa!=0)
self.weight_loss_kappa = weight_loss_kappa
# cache state
self.prev_num_logits = 0
self.labels = {}
def forward(self, audio_features, text_features, logit_scale_a, logit_scale_t=None, audio_features_mlp=None, text_features_mlp=None):
device = audio_features.device
if self.mlp_loss:
if self.world_size > 1:
all_audio_features, all_text_features, all_audio_features_mlp, all_text_features_mlp = gather_features(
audio_features=audio_features,text_features=text_features,
audio_features_mlp=audio_features_mlp,text_features_mlp=text_features_mlp,
local_loss=self.local_loss,gather_with_grad=self.gather_with_grad,
rank=self.rank,world_size=self.world_size,use_horovod=self.use_horovod,
mlp_loss=self.mlp_loss
)
if self.local_loss:
a_logits_per_audio = logit_scale_a * audio_features @ all_text_features_mlp.T
a_logits_per_text = logit_scale_a * text_features_mlp @ all_audio_features.T
t_logits_per_audio = logit_scale_t * audio_features_mlp @ all_text_features.T
t_logits_per_text = logit_scale_t * text_features @ all_audio_features_mlp.T
else:
a_logits_per_audio = logit_scale_a * all_audio_features @ all_text_features_mlp.T
a_logits_per_text = a_logits_per_audio.T
t_logits_per_audio = logit_scale_t * all_audio_features_mlp @ all_text_features.T
t_logits_per_text = t_logits_per_audio.T
else:
a_logits_per_audio = logit_scale_a * audio_features @ text_features_mlp.T
a_logits_per_text = logit_scale_a * text_features_mlp @ audio_features.T
t_logits_per_audio = logit_scale_t * audio_features_mlp @ text_features.T
t_logits_per_text = logit_scale_t * text_features @ audio_features_mlp.T
# calculated ground-truth and cache if enabled
num_logits = a_logits_per_audio.shape[0]
if self.prev_num_logits != num_logits or device not in self.labels:
labels = torch.arange(num_logits, device=device, dtype=torch.long)
if self.world_size > 1 and self.local_loss:
labels = labels + num_logits * self.rank
if self.cache_labels:
self.labels[device] = labels
self.prev_num_logits = num_logits
else:
labels = self.labels[device]
if not self.weighted_loss:
total_loss = (
F.cross_entropy(a_logits_per_audio, labels) +
F.cross_entropy(a_logits_per_text, labels) +
F.cross_entropy(t_logits_per_audio, labels) +
F.cross_entropy(t_logits_per_text, labels)
) / 4
else:
audio_weight = (audio_features@audio_features.T).detach()
audio_weight = (torch.exp(torch.sum(audio_weight, axis=1)/(self.weight_loss_kappa*len(audio_weight)))).detach()
text_weight = (text_features@text_features.T).detach()
text_weight = (torch.exp(torch.sum(text_weight, axis=1)/(self.weight_loss_kappa*len(text_features)))).detach()
total_loss = (
F.cross_entropy(a_logits_per_audio, labels, weight=audio_weight) +
F.cross_entropy(a_logits_per_text, labels, weight=audio_weight) +
F.cross_entropy(t_logits_per_audio, labels, weight=text_weight) +
F.cross_entropy(t_logits_per_text, labels, weight=text_weight)
) / 4
else:
if self.world_size > 1:
all_audio_features, all_text_features = gather_features(
audio_features=audio_features,text_features=text_features,
local_loss=self.local_loss,gather_with_grad=self.gather_with_grad,
rank=self.rank,world_size=self.world_size,use_horovod=self.use_horovod,
mlp_loss=self.mlp_loss
)
if self.local_loss:
logits_per_audio = logit_scale_a * audio_features @ all_text_features.T
logits_per_text = logit_scale_a * text_features @ all_audio_features.T
else:
logits_per_audio = logit_scale_a * all_audio_features @ all_text_features.T
logits_per_text = logits_per_audio.T
else:
logits_per_audio = logit_scale_a * audio_features @ text_features.T
logits_per_text = logit_scale_a * text_features @ audio_features.T
# calculated ground-truth and cache if enabled
num_logits = logits_per_audio.shape[0]
if self.prev_num_logits != num_logits or device not in self.labels:
labels = torch.arange(num_logits, device=device, dtype=torch.long)
if self.world_size > 1 and self.local_loss:
labels = labels + num_logits * self.rank
if self.cache_labels:
self.labels[device] = labels
self.prev_num_logits = num_logits
else:
labels = self.labels[device]
if not self.weighted_loss:
total_loss = (
F.cross_entropy(logits_per_audio, labels) +
F.cross_entropy(logits_per_text, labels)
) / 2
else:
audio_weight = (all_audio_features@all_audio_features.T).detach()
audio_weight = (torch.exp(torch.sum(audio_weight, axis=1)/(self.weight_loss_kappa*len(all_audio_features)))).detach()
text_weight = (all_text_features@all_text_features.T).detach()
text_weight = (torch.exp(torch.sum(text_weight, axis=1)/(self.weight_loss_kappa*len(all_text_features)))).detach()
total_loss = (
F.cross_entropy(logits_per_audio, labels, weight=text_weight) +
F.cross_entropy(logits_per_text, labels, weight=audio_weight)
) / 2
return total_loss
def lp_gather_features(
pred,
target,
world_size=1,
use_horovod=False
):
if use_horovod:
assert hvd is not None, 'Please install horovod'
with torch.no_grad():
all_preds = hvd.allgather(pred)
all_targets = hvd.allgath(target)
else:
gathered_preds = [torch.zeros_like(pred) for _ in range(world_size)]
gathered_targets = [torch.zeros_like(target) for _ in range(world_size)]
dist.all_gather(gathered_preds, pred)
dist.all_gather(gathered_targets, target)
all_preds = torch.cat(gathered_preds, dim=0)
all_targets = torch.cat(gathered_targets, dim=0)
return all_preds, all_targets
def get_map(pred, target):
pred = torch.sigmoid(pred).numpy()
target = target.numpy()
return np.mean(average_precision_score(target, pred, average=None))
def get_acc(pred, target):
pred = torch.argmax(pred,1).numpy()
target = torch.argmax(target,1).numpy()
return accuracy_score(target, pred)
def get_mauc(pred, target):
pred = torch.sigmoid(pred).numpy()
target = target.numpy()
return np.mean(roc_auc_score(target, pred, average=None))
class LPMetrics(object):
def __init__(self, metric_names = ['map','acc','mauc']):
self.metrics = []
for name in metric_names:
self.metrics.append(self.get_metric(name))
self.metric_names = metric_names
def get_metric(self,name):
if name == 'map':
return get_map
elif name == 'acc':
return get_acc
elif name == 'mauc':
return get_mauc
else:
raise ValueError(f'the metric should be at least one of [map, acc, mauc]')
def evaluate_mertics(self, pred, target):
metric_dict = {}
for i in range(len(self.metric_names)):
metric_dict[self.metric_names[i]] = self.metrics[i](pred, target)
return metric_dict
def calc_celoss(pred, target):
target = torch.argmax(target, 1).long()
return nn.CrossEntropyLoss()(pred, target)
class LPLoss(nn.Module):
def __init__(self, loss_name):
super().__init__()
if loss_name == 'bce':
self.loss_func = nn.BCEWithLogitsLoss()
elif loss_name == 'ce':
self.loss_func = calc_celoss
elif loss_name == 'mse':
self.loss_func = nn.MSELoss()
else:
raise ValueError(f'the loss func should be at least one of [bce, ce, mse]')
def forward(self, pred, target):
loss = self.loss_func(pred, target)
return loss
| EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/encoders/open_clap/loss.py |
""" OpenAI pretrained model functions
Adapted from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI.
"""
import os
import warnings
from typing import Union, List
import torch
from .model import build_model_from_openai_state_dict
from .pretrained import get_pretrained_url, list_pretrained_tag_models, download_pretrained
__all__ = ["list_openai_models", "load_openai_model"]
def list_openai_models() -> List[str]:
"""Returns the names of available CLIP models"""
return list_pretrained_tag_models('openai')
def load_openai_model(
name: str,
model_cfg,
device: Union[str, torch.device] = "cuda" if torch.cuda.is_available() else "cpu",
jit=True,
cache_dir=os.path.expanduser("~/.cache/clip"),
enable_fusion: bool = False,
fusion_type: str = 'None'
):
"""Load a CLIP model, preserve its text pretrained part, and set in the CLAP model
Parameters
----------
name : str
A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
device : Union[str, torch.device]
The device to put the loaded model
jit : bool
Whether to load the optimized JIT model (default) or more hackable non-JIT model.
Returns
-------
model : torch.nn.Module
The CLAP model
preprocess : Callable[[PIL.Image], torch.Tensor]
A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
"""
if get_pretrained_url(name, 'openai'):
model_path = download_pretrained(get_pretrained_url(name, 'openai'), root=cache_dir)
elif os.path.isfile(name):
model_path = name
else:
raise RuntimeError(f"Model {name} not found; available models = {list_openai_models()}")
try:
# loading JIT archive
model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval()
state_dict = None
except RuntimeError:
# loading saved state dict
if jit:
warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead")
jit = False
state_dict = torch.load(model_path, map_location="cpu")
if not jit:
try:
model = build_model_from_openai_state_dict(state_dict or model.state_dict(), model_cfg, enable_fusion, fusion_type).to(device)
except KeyError:
sd = {k[7:]: v for k, v in state_dict["state_dict"].items()}
model = build_model_from_openai_state_dict(sd, model_cfg, enable_fusion, fusion_type).to(device)
if str(device) == "cpu":
model.float()
return model
# patch the device names
device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[])
device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1]
def patch_device(module):
try:
graphs = [module.graph] if hasattr(module, "graph") else []
except RuntimeError:
graphs = []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("prim::Constant"):
if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"):
node.copyAttributes(device_node)
model.apply(patch_device)
patch_device(model.encode_audio)
patch_device(model.encode_text)
# patch dtype to float32 on CPU
if str(device) == "cpu":
float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[])
float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
float_node = float_input.node()
def patch_float(module):
try:
graphs = [module.graph] if hasattr(module, "graph") else []
except RuntimeError:
graphs = []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("aten::to"):
inputs = list(node.inputs())
for i in [1, 2]: # dtype can be the second or third argument to aten::to()
if inputs[i].node()["value"] == 5:
inputs[i].node().copyAttributes(float_node)
model.apply(patch_float)
patch_float(model.encode_audio)
patch_float(model.encode_text)
model.float()
model.audio_branch.audio_length = model.audio_cfg.audio_length
return model
| EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/encoders/open_clap/openai.py |
import numpy as np
import torch
from torch import nn as nn
from torchvision.ops.misc import FrozenBatchNorm2d
import logging
import h5py
from tqdm import tqdm
import random
import json
import os
import pathlib
# TODO: (yusong) this not a good place to store those information and does not scale. Need to be fixed later.
dataset_split = {
"audiocaps": ["train", "valid", "test"],
"audioset": ["balanced_train", "unbalanced_train", "eval"],
"BBCSoundEffects": ["train", "test"],
"Clotho": ["train", "test", "valid"],
"free_to_use_sounds": ["train", "test"],
"paramount_motion": ["train", "test"],
"sonniss_game_effects": ["train", "test"],
"wesoundeffects": ["train", "test"],
"MACS": ["train", "test"],
"freesound": ["train", "test"],
"FSD50K": ["train", "test", "valid"],
"fsd50k_class_label": ["train", "test", "valid"],
"esc50": ["train", "test"],
"audiostock": ["train", "test"],
"freesound_no_overlap_noesc50": ["train", "test"],
"epidemic_sound_effects": ["train", "test"],
"VGGSound": ["train", "test"],
"urbansound8k_class_label": ["train", "test"],
"audioset_t5": ["balanced_train", "unbalanced_train", "eval"],
"epidemic_sound_effects_t5": ["train", "test"],
"WavText5K": ["train", "test"],
"esc50_no_overlap": ["train", "test"],
"usd8k_no_overlap": ["train", "test"],
"fsd50k_200_class_label": ["train", "test", "valid"]
}
def freeze_batch_norm_2d(module, module_match={}, name=""):
"""
Converts all `BatchNorm2d` and `SyncBatchNorm` layers of provided module into `FrozenBatchNorm2d`. If `module` is
itself an instance of either `BatchNorm2d` or `SyncBatchNorm`, it is converted into `FrozenBatchNorm2d` and
returned. Otherwise, the module is walked recursively and submodules are converted in place.
Args:
module (torch.nn.Module): Any PyTorch module.
module_match (dict): Dictionary of full module names to freeze (all if empty)
name (str): Full module name (prefix)
Returns:
torch.nn.Module: Resulting module
Inspired by https://github.com/pytorch/pytorch/blob/a5895f85be0f10212791145bfedc0261d364f103/torch/nn/modules/batchnorm.py#L762
"""
res = module
is_match = True
if module_match:
is_match = name in module_match
if is_match and isinstance(
module, (nn.modules.batchnorm.BatchNorm2d, nn.modules.batchnorm.SyncBatchNorm)
):
res = FrozenBatchNorm2d(module.num_features)
res.num_features = module.num_features
res.affine = module.affine
if module.affine:
res.weight.data = module.weight.data.clone().detach()
res.bias.data = module.bias.data.clone().detach()
res.running_mean.data = module.running_mean.data
res.running_var.data = module.running_var.data
res.eps = module.eps
else:
for child_name, child in module.named_children():
full_child_name = ".".join([name, child_name]) if name else child_name
new_child = freeze_batch_norm_2d(child, module_match, full_child_name)
if new_child is not child:
res.add_module(child_name, new_child)
return res
def exist(dataset_name, dataset_type):
"""
Check if dataset exists
"""
if dataset_type in dataset_split[dataset_name]:
return True
else:
return False
def get_tar_path_from_dataset_name(
dataset_names,
dataset_types,
islocal,
dataset_path,
proportion=1,
full_dataset=None
):
"""
Get tar path from dataset name and type
"""
output = []
for n in dataset_names:
if full_dataset is not None and n in full_dataset:
current_dataset_types = dataset_split[n]
else:
current_dataset_types = dataset_types
for s in current_dataset_types:
tmp = []
if islocal:
sizefilepath_ = f"{dataset_path}/{n}/{s}/sizes.json"
if not os.path.exists(sizefilepath_):
sizefilepath_ = f"./json_files/{n}/{s}/sizes.json"
else:
sizefilepath_ = f"./json_files/{n}/{s}/sizes.json"
if not os.path.exists(sizefilepath_):
continue
sizes = json.load(open(sizefilepath_, "r"))
for k in sizes.keys():
if islocal:
tmp.append(f"{dataset_path}/{n}/{s}/{k}")
else:
tmp.append(
f"pipe:aws s3 --cli-connect-timeout 0 cp s3://s-laion-audio/webdataset_tar/{n}/{s}/{k} -"
)
if proportion != 1:
tmp = random.sample(tmp, int(proportion * len(tmp)))
output.append(tmp)
return sum(output, [])
def get_tar_path_from_txts(txt_path, islocal, proportion=1):
"""
Get tar path from txt path
"""
if isinstance(txt_path, (list, tuple)):
return sum(
[
get_tar_path_from_txts(
txt_path[i], islocal=islocal, proportion=proportion
)
for i in range(len(txt_path))
],
[],
)
if isinstance(txt_path, str):
with open(txt_path) as f:
lines = f.readlines()
if islocal:
lines = [
lines[i]
.split("\n")[0]
.replace("pipe:aws s3 cp s3://s-laion-audio/", "/mnt/audio_clip/")
for i in range(len(lines))
]
else:
lines = [
lines[i].split("\n")[0].replace(".tar", ".tar -")
for i in range(len(lines))
]
if proportion != 1:
print("Sampling tars with proportion of {}".format(proportion))
lines = random.sample(lines, int(proportion * len(lines)))
return lines
def get_mix_lambda(mixup_alpha, batch_size):
mixup_lambdas = [
np.random.beta(mixup_alpha, mixup_alpha, 1)[0] for _ in range(batch_size)
]
return np.array(mixup_lambdas).astype(np.float32)
def do_mixup(x, mixup_lambda):
"""
Args:
x: (batch_size , ...)
mixup_lambda: (batch_size,)
Returns:
out: (batch_size, ...)
"""
out = (
x.transpose(0, -1) * mixup_lambda
+ torch.flip(x, dims=[0]).transpose(0, -1) * (1 - mixup_lambda)
).transpose(0, -1)
return out
def interpolate(x, ratio):
"""Interpolate data in time domain. This is used to compensate the
resolution reduction in downsampling of a CNN.
Args:
x: (batch_size, time_steps, classes_num)
ratio: int, ratio to interpolate
Returns:
upsampled: (batch_size, time_steps * ratio, classes_num)
"""
(batch_size, time_steps, classes_num) = x.shape
upsampled = x[:, :, None, :].repeat(1, 1, ratio, 1)
upsampled = upsampled.reshape(batch_size, time_steps * ratio, classes_num)
return upsampled
def pad_framewise_output(framewise_output, frames_num):
"""Pad framewise_output to the same length as input frames. The pad value
is the same as the value of the last frame.
Args:
framewise_output: (batch_size, frames_num, classes_num)
frames_num: int, number of frames to pad
Outputs:
output: (batch_size, frames_num, classes_num)
"""
pad = framewise_output[:, -1:, :].repeat(
1, frames_num - framewise_output.shape[1], 1
)
"""tensor for padding"""
output = torch.cat((framewise_output, pad), dim=1)
"""(batch_size, frames_num, classes_num)"""
def process_ipc(index_path, classes_num, filename):
# load data
logging.info("Load Data...............")
ipc = [[] for _ in range(classes_num)]
with h5py.File(index_path, "r") as f:
for i in tqdm(range(len(f["target"]))):
t_class = np.where(f["target"][i])[0]
for t in t_class:
ipc[t].append(i)
print(ipc)
np.save(filename, ipc)
logging.info("Load Data Succeed...............")
def save_to_dict(s, o_={}):
sp = s.split(": ")
o_.update({sp[0]: float(sp[1])})
return o_
def get_data_from_log(txt_path):
"""
Output dictionary from out.txt log file
"""
with open(txt_path) as f:
lines = f.readlines()
val_data = {}
train_data = {}
train_losses = []
train_losses_epoch = []
for i in range(len(lines)):
if "| INFO |" in lines[i]:
if "Eval Epoch" in lines[i]:
if "val_loss" in lines[i]:
# float(regex.sub("", lines[310].split(" ")[-1]).replace(" ", ""))
line = lines[i].split("Eval Epoch: ")[-1]
num_epoch = int(line.split(" ")[0].split(" ")[0])
d = {
line.split(" ")[0]
.split(" ")[1]
.replace(":", ""): float(line.split(" ")[0].split(" ")[-1])
}
for i in range(1, len(line.split(" "))):
d = save_to_dict(line.split(" ")[i], d)
val_data[num_epoch] = d
elif "Train Epoch" in lines[i]:
num_epoch = int(lines[i].split("Train Epoch: ")[1][0])
loss = float(lines[i].split("Loss: ")[-1].split(" (")[0])
train_losses.append(loss)
train_losses_epoch.append(num_epoch)
for i in range(len(train_losses)):
train_data[i] = {
"num_epoch": train_losses_epoch[i],
"train_loss": train_losses[i],
}
return train_data, val_data
def save_p(obj, filename):
import pickle
try:
from deepdiff import DeepDiff
except:
os.system("pip install deepdiff")
from deepdiff import DeepDiff
with open(filename, "wb") as file:
pickle.dump(obj, file, protocol=pickle.HIGHEST_PROTOCOL) # highest protocol
with open(filename, "rb") as file:
z = pickle.load(file)
assert (
DeepDiff(obj, z, ignore_string_case=True) == {}
), "there is something wrong with the saving process"
return
def load_p(filename):
import pickle
with open(filename, "rb") as file:
z = pickle.load(file)
return z
def save_json(data, name="data.json"):
import json
with open(name, 'w') as fp:
json.dump(data, fp)
return
def load_json(name):
import json
with open(name, 'r') as fp:
data = json.load(fp)
return data
from multiprocessing import Process, Manager
from multiprocessing import Process, Value, Array
from ctypes import c_wchar
def load_class_label(path):
# https://stackoverflow.com/questions/48004243/how-to-share-large-read-only-dictionary-list-across-processes-in-multiprocessing
# https://stackoverflow.com/questions/45693949/storing-strings-in-a-multiprocessing-sharedctypes-array
out = None
if path is not None:
if pathlib.Path(path).suffix in [".pkl", ".pickle"]:
out = load_p(path)
elif pathlib.Path(path).suffix in [".json", ".txt"]:
out = load_json(path)
elif pathlib.Path(path).suffix in [".npy", ".npz"]:
out = np.load(path)
elif pathlib.Path(path).suffix in [".csv"]:
import pandas as pd
out = pd.read_csv(path)
return out
# if out is None:
# return None
# else:
# key = Array(c_wchar, '\n'.join(list(out.keys())), lock=False)
# val = Array('i', out.values(), lock=False)
# return (key, val)
from torch import optim
def get_optimizer(params, lr, betas, eps, momentum, optimizer_name):
if optimizer_name.lower() == "adamw":
optimizer = optim.AdamW(
params, lr=lr, betas=betas, eps=eps
)
elif optimizer_name.lower() == "sgd":
optimizer = optim.SGD(
params, lr=lr, momentum=momentum
)
elif optimizer_name.lower() == "adam":
optimizer = optim.Adam(
params, lr=lr, betas=betas, eps=eps
)
else:
raise ValueError("optimizer name is not correct")
return optimizer
| EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/encoders/open_clap/utils.py |
from torchvision.transforms import Normalize, Compose, RandomResizedCrop, InterpolationMode, ToTensor, Resize, \
CenterCrop
def _convert_to_rgb(image):
return image.convert('RGB')
def image_transform(
image_size: int,
is_train: bool,
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711)
):
normalize = Normalize(mean=mean, std=std)
if is_train:
return Compose([
RandomResizedCrop(image_size, scale=(0.9, 1.0), interpolation=InterpolationMode.BICUBIC),
_convert_to_rgb,
ToTensor(),
normalize,
])
else:
return Compose([
Resize(image_size, interpolation=InterpolationMode.BICUBIC),
CenterCrop(image_size),
_convert_to_rgb,
ToTensor(),
normalize,
])
| EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/encoders/open_clap/transform.py |
""" timm model adapter
Wraps timm (https://github.com/rwightman/pytorch-image-models) models for use as a vision tower in CLIP model.
"""
from collections import OrderedDict
import torch.nn as nn
try:
import timm
from timm.models.layers import Mlp, to_2tuple
from timm.models.layers.attention_pool2d import RotAttentionPool2d
from timm.models.layers.attention_pool2d import AttentionPool2d as AbsAttentionPool2d
except ImportError as e:
timm = None
from .utils import freeze_batch_norm_2d
class TimmModel(nn.Module):
""" timm model adapter
# FIXME this adapter is a work in progress, may change in ways that break weight compat
"""
def __init__(
self,
model_name,
embed_dim,
image_size=224,
pool='avg',
proj='linear',
drop=0.,
pretrained=False):
super().__init__()
if timm is None:
raise RuntimeError("Please `pip install timm` to use timm models.")
self.image_size = to_2tuple(image_size)
self.trunk = timm.create_model(model_name, pretrained=pretrained)
feat_size = self.trunk.default_cfg.get('pool_size', None)
feature_ndim = 1 if not feat_size else 2
if pool in ('abs_attn', 'rot_attn'):
assert feature_ndim == 2
# if attn pooling used, remove both classifier and default pool
self.trunk.reset_classifier(0, global_pool='')
else:
# reset global pool if pool config set, otherwise leave as network default
reset_kwargs = dict(global_pool=pool) if pool else {}
self.trunk.reset_classifier(0, **reset_kwargs)
prev_chs = self.trunk.num_features
head_layers = OrderedDict()
if pool == 'abs_attn':
head_layers['pool'] = AbsAttentionPool2d(prev_chs, feat_size=feat_size, out_features=embed_dim)
prev_chs = embed_dim
elif pool == 'rot_attn':
head_layers['pool'] = RotAttentionPool2d(prev_chs, out_features=embed_dim)
prev_chs = embed_dim
else:
assert proj, 'projection layer needed if non-attention pooling is used.'
# NOTE attention pool ends with a projection layer, so proj should usually be set to '' if such pooling is used
if proj == 'linear':
head_layers['drop'] = nn.Dropout(drop)
head_layers['proj'] = nn.Linear(prev_chs, embed_dim)
elif proj == 'mlp':
head_layers['mlp'] = Mlp(prev_chs, 2 * embed_dim, embed_dim, drop=drop)
self.head = nn.Sequential(head_layers)
def lock(self, unlocked_groups=0, freeze_bn_stats=False):
""" lock modules
Args:
unlocked_groups (int): leave last n layer groups unlocked (default: 0)
"""
if not unlocked_groups:
# lock full model
for param in self.trunk.parameters():
param.requires_grad = False
if freeze_bn_stats:
freeze_batch_norm_2d(self.trunk)
else:
# NOTE: partial freeze requires latest timm (master) branch and is subject to change
try:
# FIXME import here until API stable and in an official release
from timm.models.helpers import group_parameters, group_modules
except ImportError:
raise RuntimeError(
'Please install latest timm `pip install git+https://github.com/rwightman/pytorch-image-models`')
matcher = self.trunk.group_matcher()
gparams = group_parameters(self.trunk, matcher)
max_layer_id = max(gparams.keys())
max_layer_id = max_layer_id - unlocked_groups
for group_idx in range(max_layer_id + 1):
group = gparams[group_idx]
for param in group:
self.trunk.get_parameter(param).requires_grad = False
if freeze_bn_stats:
gmodules = group_modules(self.trunk, matcher, reverse=True)
gmodules = {k for k, v in gmodules.items() if v <= max_layer_id}
freeze_batch_norm_2d(self.trunk, gmodules)
def forward(self, x):
x = self.trunk(x)
x = self.head(x)
return x
| EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/encoders/open_clap/timm_model.py |
from transformers import BertTokenizer, BertModel
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained("bert-base-uncased")
text = "Replace me by any text you'd like."
def bert_embeddings(text):
# text = "Replace me by any text you'd like."
encoded_input = tokenizer(text, return_tensors='pt')
output = model(**encoded_input)
return output
from transformers import RobertaTokenizer, RobertaModel
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
model = RobertaModel.from_pretrained('roberta-base')
text = "Replace me by any text you'd like."
def Roberta_embeddings(text):
# text = "Replace me by any text you'd like."
encoded_input = tokenizer(text, return_tensors='pt')
output = model(**encoded_input)
return output
from transformers import BartTokenizer, BartModel
tokenizer = BartTokenizer.from_pretrained('facebook/bart-base')
model = BartModel.from_pretrained('facebook/bart-base')
text = "Replace me by any text you'd like."
def bart_embeddings(text):
# text = "Replace me by any text you'd like."
encoded_input = tokenizer(text, return_tensors='pt')
output = model(**encoded_input)
return output | EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/encoders/open_clap/bert.py |
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from transformers import AutoModel
from .audio import get_audio_encoder
class Projection(nn.Module):
def __init__(self, d_in: int, d_out: int, p: float=0.5) -> None:
super().__init__()
self.linear1 = nn.Linear(d_in, d_out, bias=False)
self.linear2 = nn.Linear(d_out, d_out, bias=False)
self.layer_norm = nn.LayerNorm(d_out)
self.drop = nn.Dropout(p)
def forward(self, x: torch.Tensor) -> torch.Tensor:
embed1 = self.linear1(x)
embed2 = self.drop(self.linear2(F.gelu(embed1)))
embeds = self.layer_norm(embed1 + embed2)
return embeds
class AudioEncoder(nn.Module):
def __init__(self, audioenc_name:str, d_in: int, d_out: int, sample_rate: int, window_size: int,
hop_size: int, mel_bins: int, fmin: int, fmax: int, classes_num: int) -> None:
super().__init__()
audio_encoder = get_audio_encoder(audioenc_name)
self.base = audio_encoder(
sample_rate, window_size,
hop_size, mel_bins, fmin, fmax,
classes_num, d_in)
self.projection = Projection(d_in, d_out)
def forward(self, x):
out_dict = self.base(x)
audio_features, audio_classification_output = out_dict['embedding'], out_dict['clipwise_output']
projected_vec = self.projection(audio_features)
return projected_vec, audio_classification_output
class TextEncoder(nn.Module):
def __init__(self, d_out: int, text_model: str, transformer_embed_dim: int) -> None:
super().__init__()
self.base = AutoModel.from_pretrained(text_model)
self.projection = Projection(transformer_embed_dim, d_out)
def forward(self, x):
out = self.base(**x)[0]
out = out[:, 0, :] # get CLS token output
projected_vec = self.projection(out)
return projected_vec
class CLAP(nn.Module):
def __init__(self,
# audio
audioenc_name: str,
sample_rate: int,
window_size: int,
hop_size: int,
mel_bins: int,
fmin: int,
fmax: int,
classes_num: int,
out_emb: int,
# text
text_model: str,
transformer_embed_dim: int,
# common
d_proj: int,
):
super().__init__()
self.audio_encoder = AudioEncoder(
audioenc_name, out_emb, d_proj,
sample_rate, window_size, hop_size, mel_bins, fmin, fmax, classes_num)
self.caption_encoder = TextEncoder(
d_proj, text_model, transformer_embed_dim
)
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
def forward(self, audio, text):
audio_embed, _ = self.audio_encoder(audio)
caption_embed = self.caption_encoder(text)
return caption_embed, audio_embed, self.logit_scale.exp() | EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/encoders/CLAP/clap.py |
from . import clap
from . import audio
from . import utils | EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/encoders/CLAP/__init__.py |
import argparse
import yaml
import sys
def read_config_as_args(config_path,args=None,is_config_str=False):
return_dict = {}
if config_path is not None:
if is_config_str:
yml_config = yaml.load(config_path, Loader=yaml.FullLoader)
else:
with open(config_path, "r") as f:
yml_config = yaml.load(f, Loader=yaml.FullLoader)
if args != None:
for k, v in yml_config.items():
if k in args.__dict__:
args.__dict__[k] = v
else:
sys.stderr.write("Ignored unknown parameter {} in yaml.\n".format(k))
else:
for k, v in yml_config.items():
return_dict[k] = v
args = args if args != None else return_dict
return argparse.Namespace(**args)
| EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/encoders/CLAP/utils.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchlibrosa.stft import Spectrogram, LogmelFilterBank
def get_audio_encoder(name: str):
if name == "Cnn14":
return Cnn14
else:
raise Exception('The audio encoder name {} is incorrect or not supported'.format(name))
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(ConvBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=(3, 3), stride=(1, 1),
padding=(1, 1), bias=False)
self.conv2 = nn.Conv2d(in_channels=out_channels,
out_channels=out_channels,
kernel_size=(3, 3), stride=(1, 1),
padding=(1, 1), bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
self.bn2 = nn.BatchNorm2d(out_channels)
def forward(self, input, pool_size=(2, 2), pool_type='avg'):
x = input
x = F.relu_(self.bn1(self.conv1(x)))
x = F.relu_(self.bn2(self.conv2(x)))
if pool_type == 'max':
x = F.max_pool2d(x, kernel_size=pool_size)
elif pool_type == 'avg':
x = F.avg_pool2d(x, kernel_size=pool_size)
elif pool_type == 'avg+max':
x1 = F.avg_pool2d(x, kernel_size=pool_size)
x2 = F.max_pool2d(x, kernel_size=pool_size)
x = x1 + x2
else:
raise Exception('Incorrect argument!')
return x
class ConvBlock5x5(nn.Module):
def __init__(self, in_channels, out_channels):
super(ConvBlock5x5, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=(5, 5), stride=(1, 1),
padding=(2, 2), bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
def forward(self, input, pool_size=(2, 2), pool_type='avg'):
x = input
x = F.relu_(self.bn1(self.conv1(x)))
if pool_type == 'max':
x = F.max_pool2d(x, kernel_size=pool_size)
elif pool_type == 'avg':
x = F.avg_pool2d(x, kernel_size=pool_size)
elif pool_type == 'avg+max':
x1 = F.avg_pool2d(x, kernel_size=pool_size)
x2 = F.max_pool2d(x, kernel_size=pool_size)
x = x1 + x2
else:
raise Exception('Incorrect argument!')
return x
class AttBlock(nn.Module):
def __init__(self, n_in, n_out, activation='linear', temperature=1.):
super(AttBlock, self).__init__()
self.activation = activation
self.temperature = temperature
self.att = nn.Conv1d(in_channels=n_in, out_channels=n_out, kernel_size=1, stride=1, padding=0, bias=True)
self.cla = nn.Conv1d(in_channels=n_in, out_channels=n_out, kernel_size=1, stride=1, padding=0, bias=True)
self.bn_att = nn.BatchNorm1d(n_out)
def forward(self, x):
# x: (n_samples, n_in, n_time)
norm_att = torch.softmax(torch.clamp(self.att(x), -10, 10), dim=-1)
cla = self.nonlinear_transform(self.cla(x))
x = torch.sum(norm_att * cla, dim=2)
return x, norm_att, cla
def nonlinear_transform(self, x):
if self.activation == 'linear':
return x
elif self.activation == 'sigmoid':
return torch.sigmoid(x)
class Cnn14(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, classes_num, out_emb):
super(Cnn14, self).__init__()
window = 'hann'
center = True
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
# Spectrogram extractor
self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
win_length=window_size, window=window, center=center, pad_mode=pad_mode,
freeze_parameters=True)
# Logmel feature extractor
self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin, top_db=top_db,
freeze_parameters=True)
self.bn0 = nn.BatchNorm2d(64)
self.conv_block1 = ConvBlock(in_channels=1, out_channels=64)
self.conv_block2 = ConvBlock(in_channels=64, out_channels=128)
self.conv_block3 = ConvBlock(in_channels=128, out_channels=256)
self.conv_block4 = ConvBlock(in_channels=256, out_channels=512)
self.conv_block5 = ConvBlock(in_channels=512, out_channels=1024)
self.conv_block6 = ConvBlock(in_channels=1024, out_channels=2048)
# out_emb is 2048 for best Cnn14
self.fc1 = nn.Linear(2048, out_emb, bias=True)
self.fc_audioset = nn.Linear(out_emb, classes_num, bias=True)
def forward(self, input, mixup_lambda=None):
"""
Input: (batch_size, data_length)
"""
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
x = self.conv_block1(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block2(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block3(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block4(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block5(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block6(x, pool_size=(1, 1), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = torch.mean(x, dim=3)
(x1, _) = torch.max(x, dim=2)
x2 = torch.mean(x, dim=2)
x = x1 + x2
x = F.dropout(x, p=0.5, training=self.training)
x = F.relu_(self.fc1(x))
embedding = F.dropout(x, p=0.5, training=self.training)
clipwise_output = torch.sigmoid(self.fc_audioset(x))
output_dict = {'clipwise_output': clipwise_output, 'embedding': embedding}
return output_dict | EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/encoders/CLAP/audio.py |
import random
import torchaudio
from torch._six import string_classes
import collections
import re
import torch.nn.functional as F
import numpy as np
from transformers import AutoTokenizer
from ldm.modules.encoders.CLAP.utils import read_config_as_args
from ldm.modules.encoders.CLAP.clap import CLAP
import math
import torchaudio.transforms as T
import os
import torch
from importlib_resources import files
class CLAPWrapper():
"""
A class for interfacing CLAP model.
"""
def __init__(self, model_fp, device):
self.np_str_obj_array_pattern = re.compile(r'[SaUO]')
self.file_path = os.path.realpath(__file__)
self.default_collate_err_msg_format = (
"default_collate: batch must contain tensors, numpy arrays, numbers, "
"dicts or lists; found {}")
self.config_as_str = files('ldm').joinpath('modules/encoders/CLAP/config.yml').read_text()
self.model_fp = model_fp
self.device = device
self.clap, self.tokenizer, self.args = self.load_clap()
def load_clap(self):
r"""Load CLAP model with args from config file"""
args = read_config_as_args(self.config_as_str, is_config_str=True)
if 'bert' in args.text_model:
self.token_keys = ['input_ids', 'token_type_ids', 'attention_mask']
else:
self.token_keys = ['input_ids', 'attention_mask']
clap = CLAP(
audioenc_name=args.audioenc_name,
sample_rate=args.sampling_rate,
window_size=args.window_size,
hop_size=args.hop_size,
mel_bins=args.mel_bins,
fmin=args.fmin,
fmax=args.fmax,
classes_num=args.num_classes,
out_emb=args.out_emb,
text_model=args.text_model,
transformer_embed_dim=args.transformer_embed_dim,
d_proj=args.d_proj
)
# Load pretrained weights for model
model_state_dict = torch.load(self.model_fp, map_location=torch.device('cpu'))['model']
clap.load_state_dict(model_state_dict)
clap.eval() # set clap in eval mode
tokenizer = AutoTokenizer.from_pretrained(args.text_model)
clap = clap.to(self.device)
tokenizer = tokenizer.to(self.device)
return clap, tokenizer, args
def default_collate(self, batch):
r"""Puts each data field into a tensor with outer dimension batch size"""
elem = batch[0]
elem_type = type(elem)
if isinstance(elem, torch.Tensor):
out = None
if torch.utils.data.get_worker_info() is not None:
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
numel = sum([x.numel() for x in batch])
storage = elem.storage()._new_shared(numel)
out = elem.new(storage)
return torch.stack(batch, 0, out=out)
elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
and elem_type.__name__ != 'string_':
if elem_type.__name__ == 'ndarray' or elem_type.__name__ == 'memmap':
# array of string classes and object
if self.np_str_obj_array_pattern.search(elem.dtype.str) is not None:
raise TypeError(
self.default_collate_err_msg_format.format(elem.dtype))
return self.default_collate([torch.as_tensor(b) for b in batch])
elif elem.shape == (): # scalars
return torch.as_tensor(batch)
elif isinstance(elem, float):
return torch.tensor(batch, dtype=torch.float64)
elif isinstance(elem, int):
return torch.tensor(batch)
elif isinstance(elem, string_classes):
return batch
elif isinstance(elem, collections.abc.Mapping):
return {key: self.default_collate([d[key] for d in batch]) for key in elem}
elif isinstance(elem, tuple) and hasattr(elem, '_fields'): # namedtuple
return elem_type(*(self.default_collate(samples) for samples in zip(*batch)))
elif isinstance(elem, collections.abc.Sequence):
# check to make sure that the elements in batch have consistent size
it = iter(batch)
elem_size = len(next(it))
if not all(len(elem) == elem_size for elem in it):
raise RuntimeError(
'each element in list of batch should be of equal size')
transposed = zip(*batch)
return [self.default_collate(samples) for samples in transposed]
raise TypeError(self.default_collate_err_msg_format.format(elem_type))
def load_audio_into_tensor(self, audio_path, audio_duration, resample=False):
r"""Loads audio file and returns raw audio."""
# Randomly sample a segment of audio_duration from the clip or pad to match duration
audio_time_series, sample_rate = torchaudio.load(audio_path)
resample_rate = self.args.sampling_rate
if resample:
resampler = T.Resample(sample_rate, resample_rate)
audio_time_series = resampler(audio_time_series)
audio_time_series = audio_time_series.reshape(-1)
# audio_time_series is shorter than predefined audio duration,
# so audio_time_series is extended
if audio_duration*sample_rate >= audio_time_series.shape[0]:
repeat_factor = int(np.ceil((audio_duration*sample_rate) /
audio_time_series.shape[0]))
# Repeat audio_time_series by repeat_factor to match audio_duration
audio_time_series = audio_time_series.repeat(repeat_factor)
# remove excess part of audio_time_series
audio_time_series = audio_time_series[0:audio_duration*sample_rate]
else:
# audio_time_series is longer than predefined audio duration,
# so audio_time_series is trimmed
start_index = random.randrange(
audio_time_series.shape[0] - audio_duration*sample_rate)
audio_time_series = audio_time_series[start_index:start_index +
audio_duration*sample_rate]
return torch.FloatTensor(audio_time_series)
def preprocess_audio(self, audio_files, resample):
r"""Load list of audio files and return raw audio"""
audio_tensors = []
for audio_file in audio_files:
audio_tensor = self.load_audio_into_tensor(
audio_file, self.args.duration, resample)
audio_tensor = audio_tensor.reshape(1, -1).to(self.device)
audio_tensors.append(audio_tensor)
return self.default_collate(audio_tensors)
def preprocess_text(self, text_queries, text_len=100):
r"""Load list of class labels and return tokenized text"""
device = next(self.clap.parameters()).device
tokenized_texts = []
for ttext in text_queries:
tok = self.tokenizer.encode_plus(
text=ttext, add_special_tokens=True, max_length=text_len, pad_to_max_length=True, return_tensors="pt")
for key in self.token_keys:
tok[key] = tok[key].reshape(-1).to(device)
tokenized_texts.append(tok)
return self.default_collate(tokenized_texts)
def get_text_embeddings(self, class_labels):
r"""Load list of class labels and return text embeddings"""
preprocessed_text = self.preprocess_text(class_labels)
text_embeddings = self._get_text_embeddings(preprocessed_text)
text_embeddings = text_embeddings/torch.norm(text_embeddings, dim=-1, keepdim=True)
return text_embeddings
def get_audio_embeddings(self, audio_files, resample):
r"""Load list of audio files and return a audio embeddings"""
preprocessed_audio = self.preprocess_audio(audio_files, resample)
audio_embeddings = self._get_audio_embeddings(preprocessed_audio)
audio_embeddings = audio_embeddings/torch.norm(audio_embeddings, dim=-1, keepdim=True)
return audio_embeddings
def _get_text_embeddings(self, preprocessed_text):
r"""Load preprocessed text and return text embeddings"""
with torch.no_grad():
text_embeddings = self.clap.caption_encoder(preprocessed_text)
text_embeddings = text_embeddings/torch.norm(text_embeddings, dim=-1, keepdim=True)
return text_embeddings
def _get_audio_embeddings(self, preprocessed_audio):
r"""Load preprocessed audio and return a audio embeddings"""
with torch.no_grad():
preprocessed_audio = preprocessed_audio.reshape(
preprocessed_audio.shape[0], preprocessed_audio.shape[2])
#Append [0] the audio emebdding, [1] has output class probabilities
audio_embeddings = self.clap.audio_encoder(preprocessed_audio)[0]
audio_embeddings = audio_embeddings/torch.norm(audio_embeddings, dim=-1, keepdim=True)
return audio_embeddings
def compute_similarity(self, audio_embeddings, text_embeddings):
r"""Compute similarity between text and audio embeddings"""
logit_scale = self.clap.logit_scale.exp()
similarity = logit_scale*text_embeddings @ audio_embeddings.T
return similarity.T
def _generic_batch_inference(self, func, *args):
r"""Process audio and/or text per batch"""
input_tmp = args[0]
batch_size = args[-1]
# args[0] has audio_files, args[1] has class_labels
inputs = [args[0], args[1]] if len(args) == 3 else [args[0]]
args0_len = len(args[0])
# compute text_embeddings once for all the audio_files batches
if len(inputs) == 2:
text_embeddings = self.get_text_embeddings(args[1])
inputs = [args[0], args[1], text_embeddings]
dataset_idx = 0
for _ in range(math.ceil(args0_len/batch_size)):
next_batch_idx = dataset_idx + batch_size
# batch size is bigger than available audio/text items
if next_batch_idx >= args0_len:
inputs[0] = input_tmp[dataset_idx:]
return func(*tuple(inputs))
else:
inputs[0] = input_tmp[dataset_idx:next_batch_idx]
yield func(*tuple(inputs))
dataset_idx = next_batch_idx
def get_audio_embeddings_per_batch(self, audio_files, batch_size):
r"""Load preprocessed audio and return a audio embeddings per batch"""
return self._generic_batch_inference(self.get_audio_embeddings, audio_files, batch_size)
def get_text_embeddings_per_batch(self, class_labels, batch_size):
r"""Load preprocessed text and return text embeddings per batch"""
return self._generic_batch_inference(self.get_text_embeddings, class_labels, batch_size)
def classify_audio_files_per_batch(self, audio_files, class_labels, batch_size):
r"""Compute classification probabilities for each audio recording in a batch and each class label"""
return self._generic_batch_inference(self.classify_audio_files, audio_files, class_labels, batch_size)
if __name__ == '__main__':
# Load and initialize CLAP
weights_path = "/home1/huangrongjie/Project/Diffusion/LatentDiffusion/CLAP/CLAP_weights_2022.pth"
clap_model = CLAPWrapper(weights_path, use_cuda=False)
y = ["A woman talks nearby as water pours", "Multiple clanging and clanking sounds"]
x = ['/home2/huangjiawei/data/audiocaps/train/Yr1nicOVtvkQ.wav', '/home2/huangjiawei/data/audiocaps/train/YUDGBjjwyaqE.wav']
# Computing text embeddings
text_embeddings = clap_model.get_text_embeddings(y)
import ipdb
ipdb.set_trace()
# Computing audio embeddings
audio_embeddings = clap_model.get_audio_embeddings(x, resample=True)
similarity = clap_model.compute_similarity(audio_embeddings, text_embeddings)
| EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/encoders/CLAP/CLAPWrapper.py |
# -*- coding: utf-8 -*-
"""
# --------------------------------------------
# Super-Resolution
# --------------------------------------------
#
# Kai Zhang ([email protected])
# https://github.com/cszn
# From 2019/03--2021/08
# --------------------------------------------
"""
import numpy as np
import cv2
import torch
from functools import partial
import random
from scipy import ndimage
import scipy
import scipy.stats as ss
from scipy.interpolate import interp2d
from scipy.linalg import orth
import albumentations
import ldm.modules.image_degradation.utils_image as util
def modcrop_np(img, sf):
'''
Args:
img: numpy image, WxH or WxHxC
sf: scale factor
Return:
cropped image
'''
w, h = img.shape[:2]
im = np.copy(img)
return im[:w - w % sf, :h - h % sf, ...]
"""
# --------------------------------------------
# anisotropic Gaussian kernels
# --------------------------------------------
"""
def analytic_kernel(k):
"""Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)"""
k_size = k.shape[0]
# Calculate the big kernels size
big_k = np.zeros((3 * k_size - 2, 3 * k_size - 2))
# Loop over the small kernel to fill the big one
for r in range(k_size):
for c in range(k_size):
big_k[2 * r:2 * r + k_size, 2 * c:2 * c + k_size] += k[r, c] * k
# Crop the edges of the big kernel to ignore very small values and increase run time of SR
crop = k_size // 2
cropped_big_k = big_k[crop:-crop, crop:-crop]
# Normalize to 1
return cropped_big_k / cropped_big_k.sum()
def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6):
""" generate an anisotropic Gaussian kernel
Args:
ksize : e.g., 15, kernel size
theta : [0, pi], rotation angle range
l1 : [0.1,50], scaling of eigenvalues
l2 : [0.1,l1], scaling of eigenvalues
If l1 = l2, will get an isotropic Gaussian kernel.
Returns:
k : kernel
"""
v = np.dot(np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1., 0.]))
V = np.array([[v[0], v[1]], [v[1], -v[0]]])
D = np.array([[l1, 0], [0, l2]])
Sigma = np.dot(np.dot(V, D), np.linalg.inv(V))
k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize)
return k
def gm_blur_kernel(mean, cov, size=15):
center = size / 2.0 + 0.5
k = np.zeros([size, size])
for y in range(size):
for x in range(size):
cy = y - center + 1
cx = x - center + 1
k[y, x] = ss.multivariate_normal.pdf([cx, cy], mean=mean, cov=cov)
k = k / np.sum(k)
return k
def shift_pixel(x, sf, upper_left=True):
"""shift pixel for super-resolution with different scale factors
Args:
x: WxHxC or WxH
sf: scale factor
upper_left: shift direction
"""
h, w = x.shape[:2]
shift = (sf - 1) * 0.5
xv, yv = np.arange(0, w, 1.0), np.arange(0, h, 1.0)
if upper_left:
x1 = xv + shift
y1 = yv + shift
else:
x1 = xv - shift
y1 = yv - shift
x1 = np.clip(x1, 0, w - 1)
y1 = np.clip(y1, 0, h - 1)
if x.ndim == 2:
x = interp2d(xv, yv, x)(x1, y1)
if x.ndim == 3:
for i in range(x.shape[-1]):
x[:, :, i] = interp2d(xv, yv, x[:, :, i])(x1, y1)
return x
def blur(x, k):
'''
x: image, NxcxHxW
k: kernel, Nx1xhxw
'''
n, c = x.shape[:2]
p1, p2 = (k.shape[-2] - 1) // 2, (k.shape[-1] - 1) // 2
x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode='replicate')
k = k.repeat(1, c, 1, 1)
k = k.view(-1, 1, k.shape[2], k.shape[3])
x = x.view(1, -1, x.shape[2], x.shape[3])
x = torch.nn.functional.conv2d(x, k, bias=None, stride=1, padding=0, groups=n * c)
x = x.view(n, c, x.shape[2], x.shape[3])
return x
def gen_kernel(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var=0.6, max_var=10., noise_level=0):
""""
# modified version of https://github.com/assafshocher/BlindSR_dataset_generator
# Kai Zhang
# min_var = 0.175 * sf # variance of the gaussian kernel will be sampled between min_var and max_var
# max_var = 2.5 * sf
"""
# Set random eigen-vals (lambdas) and angle (theta) for COV matrix
lambda_1 = min_var + np.random.rand() * (max_var - min_var)
lambda_2 = min_var + np.random.rand() * (max_var - min_var)
theta = np.random.rand() * np.pi # random theta
noise = -noise_level + np.random.rand(*k_size) * noise_level * 2
# Set COV matrix using Lambdas and Theta
LAMBDA = np.diag([lambda_1, lambda_2])
Q = np.array([[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)]])
SIGMA = Q @ LAMBDA @ Q.T
INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :]
# Set expectation position (shifting kernel for aligned image)
MU = k_size // 2 - 0.5 * (scale_factor - 1) # - 0.5 * (scale_factor - k_size % 2)
MU = MU[None, None, :, None]
# Create meshgrid for Gaussian
[X, Y] = np.meshgrid(range(k_size[0]), range(k_size[1]))
Z = np.stack([X, Y], 2)[:, :, :, None]
# Calcualte Gaussian for every pixel of the kernel
ZZ = Z - MU
ZZ_t = ZZ.transpose(0, 1, 3, 2)
raw_kernel = np.exp(-0.5 * np.squeeze(ZZ_t @ INV_SIGMA @ ZZ)) * (1 + noise)
# shift the kernel so it will be centered
# raw_kernel_centered = kernel_shift(raw_kernel, scale_factor)
# Normalize the kernel and return
# kernel = raw_kernel_centered / np.sum(raw_kernel_centered)
kernel = raw_kernel / np.sum(raw_kernel)
return kernel
def fspecial_gaussian(hsize, sigma):
hsize = [hsize, hsize]
siz = [(hsize[0] - 1.0) / 2.0, (hsize[1] - 1.0) / 2.0]
std = sigma
[x, y] = np.meshgrid(np.arange(-siz[1], siz[1] + 1), np.arange(-siz[0], siz[0] + 1))
arg = -(x * x + y * y) / (2 * std * std)
h = np.exp(arg)
h[h < scipy.finfo(float).eps * h.max()] = 0
sumh = h.sum()
if sumh != 0:
h = h / sumh
return h
def fspecial_laplacian(alpha):
alpha = max([0, min([alpha, 1])])
h1 = alpha / (alpha + 1)
h2 = (1 - alpha) / (alpha + 1)
h = [[h1, h2, h1], [h2, -4 / (alpha + 1), h2], [h1, h2, h1]]
h = np.array(h)
return h
def fspecial(filter_type, *args, **kwargs):
'''
python code from:
https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py
'''
if filter_type == 'gaussian':
return fspecial_gaussian(*args, **kwargs)
if filter_type == 'laplacian':
return fspecial_laplacian(*args, **kwargs)
"""
# --------------------------------------------
# degradation models
# --------------------------------------------
"""
def bicubic_degradation(x, sf=3):
'''
Args:
x: HxWxC image, [0, 1]
sf: down-scale factor
Return:
bicubicly downsampled LR image
'''
x = util.imresize_np(x, scale=1 / sf)
return x
def srmd_degradation(x, k, sf=3):
''' blur + bicubic downsampling
Args:
x: HxWxC image, [0, 1]
k: hxw, double
sf: down-scale factor
Return:
downsampled LR image
Reference:
@inproceedings{zhang2018learning,
title={Learning a single convolutional super-resolution network for multiple degradations},
author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei},
booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
pages={3262--3271},
year={2018}
}
'''
x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') # 'nearest' | 'mirror'
x = bicubic_degradation(x, sf=sf)
return x
def dpsr_degradation(x, k, sf=3):
''' bicubic downsampling + blur
Args:
x: HxWxC image, [0, 1]
k: hxw, double
sf: down-scale factor
Return:
downsampled LR image
Reference:
@inproceedings{zhang2019deep,
title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels},
author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei},
booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
pages={1671--1681},
year={2019}
}
'''
x = bicubic_degradation(x, sf=sf)
x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap')
return x
def classical_degradation(x, k, sf=3):
''' blur + downsampling
Args:
x: HxWxC image, [0, 1]/[0, 255]
k: hxw, double
sf: down-scale factor
Return:
downsampled LR image
'''
x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap')
# x = filters.correlate(x, np.expand_dims(np.flip(k), axis=2))
st = 0
return x[st::sf, st::sf, ...]
def add_sharpening(img, weight=0.5, radius=50, threshold=10):
"""USM sharpening. borrowed from real-ESRGAN
Input image: I; Blurry image: B.
1. K = I + weight * (I - B)
2. Mask = 1 if abs(I - B) > threshold, else: 0
3. Blur mask:
4. Out = Mask * K + (1 - Mask) * I
Args:
img (Numpy array): Input image, HWC, BGR; float32, [0, 1].
weight (float): Sharp weight. Default: 1.
radius (float): Kernel size of Gaussian blur. Default: 50.
threshold (int):
"""
if radius % 2 == 0:
radius += 1
blur = cv2.GaussianBlur(img, (radius, radius), 0)
residual = img - blur
mask = np.abs(residual) * 255 > threshold
mask = mask.astype('float32')
soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0)
K = img + weight * residual
K = np.clip(K, 0, 1)
return soft_mask * K + (1 - soft_mask) * img
def add_blur(img, sf=4):
wd2 = 4.0 + sf
wd = 2.0 + 0.2 * sf
if random.random() < 0.5:
l1 = wd2 * random.random()
l2 = wd2 * random.random()
k = anisotropic_Gaussian(ksize=2 * random.randint(2, 11) + 3, theta=random.random() * np.pi, l1=l1, l2=l2)
else:
k = fspecial('gaussian', 2 * random.randint(2, 11) + 3, wd * random.random())
img = ndimage.filters.convolve(img, np.expand_dims(k, axis=2), mode='mirror')
return img
def add_resize(img, sf=4):
rnum = np.random.rand()
if rnum > 0.8: # up
sf1 = random.uniform(1, 2)
elif rnum < 0.7: # down
sf1 = random.uniform(0.5 / sf, 1)
else:
sf1 = 1.0
img = cv2.resize(img, (int(sf1 * img.shape[1]), int(sf1 * img.shape[0])), interpolation=random.choice([1, 2, 3]))
img = np.clip(img, 0.0, 1.0)
return img
# def add_Gaussian_noise(img, noise_level1=2, noise_level2=25):
# noise_level = random.randint(noise_level1, noise_level2)
# rnum = np.random.rand()
# if rnum > 0.6: # add color Gaussian noise
# img += np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
# elif rnum < 0.4: # add grayscale Gaussian noise
# img += np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
# else: # add noise
# L = noise_level2 / 255.
# D = np.diag(np.random.rand(3))
# U = orth(np.random.rand(3, 3))
# conv = np.dot(np.dot(np.transpose(U), D), U)
# img += np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
# img = np.clip(img, 0.0, 1.0)
# return img
def add_Gaussian_noise(img, noise_level1=2, noise_level2=25):
noise_level = random.randint(noise_level1, noise_level2)
rnum = np.random.rand()
if rnum > 0.6: # add color Gaussian noise
img = img + np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
elif rnum < 0.4: # add grayscale Gaussian noise
img = img + np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
else: # add noise
L = noise_level2 / 255.
D = np.diag(np.random.rand(3))
U = orth(np.random.rand(3, 3))
conv = np.dot(np.dot(np.transpose(U), D), U)
img = img + np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
img = np.clip(img, 0.0, 1.0)
return img
def add_speckle_noise(img, noise_level1=2, noise_level2=25):
noise_level = random.randint(noise_level1, noise_level2)
img = np.clip(img, 0.0, 1.0)
rnum = random.random()
if rnum > 0.6:
img += img * np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
elif rnum < 0.4:
img += img * np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
else:
L = noise_level2 / 255.
D = np.diag(np.random.rand(3))
U = orth(np.random.rand(3, 3))
conv = np.dot(np.dot(np.transpose(U), D), U)
img += img * np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
img = np.clip(img, 0.0, 1.0)
return img
def add_Poisson_noise(img):
img = np.clip((img * 255.0).round(), 0, 255) / 255.
vals = 10 ** (2 * random.random() + 2.0) # [2, 4]
if random.random() < 0.5:
img = np.random.poisson(img * vals).astype(np.float32) / vals
else:
img_gray = np.dot(img[..., :3], [0.299, 0.587, 0.114])
img_gray = np.clip((img_gray * 255.0).round(), 0, 255) / 255.
noise_gray = np.random.poisson(img_gray * vals).astype(np.float32) / vals - img_gray
img += noise_gray[:, :, np.newaxis]
img = np.clip(img, 0.0, 1.0)
return img
def add_JPEG_noise(img):
quality_factor = random.randint(30, 95)
img = cv2.cvtColor(util.single2uint(img), cv2.COLOR_RGB2BGR)
result, encimg = cv2.imencode('.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor])
img = cv2.imdecode(encimg, 1)
img = cv2.cvtColor(util.uint2single(img), cv2.COLOR_BGR2RGB)
return img
def random_crop(lq, hq, sf=4, lq_patchsize=64):
h, w = lq.shape[:2]
rnd_h = random.randint(0, h - lq_patchsize)
rnd_w = random.randint(0, w - lq_patchsize)
lq = lq[rnd_h:rnd_h + lq_patchsize, rnd_w:rnd_w + lq_patchsize, :]
rnd_h_H, rnd_w_H = int(rnd_h * sf), int(rnd_w * sf)
hq = hq[rnd_h_H:rnd_h_H + lq_patchsize * sf, rnd_w_H:rnd_w_H + lq_patchsize * sf, :]
return lq, hq
def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None):
"""
This is the degradation model of BSRGAN from the paper
"Designing a Practical Degradation Model for Deep Blind Image Super-Resolution"
----------
img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf)
sf: scale factor
isp_model: camera ISP model
Returns
-------
img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1]
hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
"""
isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25
sf_ori = sf
h1, w1 = img.shape[:2]
img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop
h, w = img.shape[:2]
if h < lq_patchsize * sf or w < lq_patchsize * sf:
raise ValueError(f'img size ({h1}X{w1}) is too small!')
hq = img.copy()
if sf == 4 and random.random() < scale2_prob: # downsample1
if np.random.rand() < 0.5:
img = cv2.resize(img, (int(1 / 2 * img.shape[1]), int(1 / 2 * img.shape[0])),
interpolation=random.choice([1, 2, 3]))
else:
img = util.imresize_np(img, 1 / 2, True)
img = np.clip(img, 0.0, 1.0)
sf = 2
shuffle_order = random.sample(range(7), 7)
idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3)
if idx1 > idx2: # keep downsample3 last
shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1]
for i in shuffle_order:
if i == 0:
img = add_blur(img, sf=sf)
elif i == 1:
img = add_blur(img, sf=sf)
elif i == 2:
a, b = img.shape[1], img.shape[0]
# downsample2
if random.random() < 0.75:
sf1 = random.uniform(1, 2 * sf)
img = cv2.resize(img, (int(1 / sf1 * img.shape[1]), int(1 / sf1 * img.shape[0])),
interpolation=random.choice([1, 2, 3]))
else:
k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf))
k_shifted = shift_pixel(k, sf)
k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel
img = ndimage.filters.convolve(img, np.expand_dims(k_shifted, axis=2), mode='mirror')
img = img[0::sf, 0::sf, ...] # nearest downsampling
img = np.clip(img, 0.0, 1.0)
elif i == 3:
# downsample3
img = cv2.resize(img, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3]))
img = np.clip(img, 0.0, 1.0)
elif i == 4:
# add Gaussian noise
img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25)
elif i == 5:
# add JPEG noise
if random.random() < jpeg_prob:
img = add_JPEG_noise(img)
elif i == 6:
# add processed camera sensor noise
if random.random() < isp_prob and isp_model is not None:
with torch.no_grad():
img, hq = isp_model.forward(img.copy(), hq)
# add final JPEG compression noise
img = add_JPEG_noise(img)
# random crop
img, hq = random_crop(img, hq, sf_ori, lq_patchsize)
return img, hq
# todo no isp_model?
def degradation_bsrgan_variant(image, sf=4, isp_model=None):
"""
This is the degradation model of BSRGAN from the paper
"Designing a Practical Degradation Model for Deep Blind Image Super-Resolution"
----------
sf: scale factor
isp_model: camera ISP model
Returns
-------
img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1]
hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
"""
image = util.uint2single(image)
isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25
sf_ori = sf
h1, w1 = image.shape[:2]
image = image.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop
h, w = image.shape[:2]
hq = image.copy()
if sf == 4 and random.random() < scale2_prob: # downsample1
if np.random.rand() < 0.5:
image = cv2.resize(image, (int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])),
interpolation=random.choice([1, 2, 3]))
else:
image = util.imresize_np(image, 1 / 2, True)
image = np.clip(image, 0.0, 1.0)
sf = 2
shuffle_order = random.sample(range(7), 7)
idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3)
if idx1 > idx2: # keep downsample3 last
shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1]
for i in shuffle_order:
if i == 0:
image = add_blur(image, sf=sf)
elif i == 1:
image = add_blur(image, sf=sf)
elif i == 2:
a, b = image.shape[1], image.shape[0]
# downsample2
if random.random() < 0.75:
sf1 = random.uniform(1, 2 * sf)
image = cv2.resize(image, (int(1 / sf1 * image.shape[1]), int(1 / sf1 * image.shape[0])),
interpolation=random.choice([1, 2, 3]))
else:
k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf))
k_shifted = shift_pixel(k, sf)
k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel
image = ndimage.filters.convolve(image, np.expand_dims(k_shifted, axis=2), mode='mirror')
image = image[0::sf, 0::sf, ...] # nearest downsampling
image = np.clip(image, 0.0, 1.0)
elif i == 3:
# downsample3
image = cv2.resize(image, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3]))
image = np.clip(image, 0.0, 1.0)
elif i == 4:
# add Gaussian noise
image = add_Gaussian_noise(image, noise_level1=2, noise_level2=25)
elif i == 5:
# add JPEG noise
if random.random() < jpeg_prob:
image = add_JPEG_noise(image)
# elif i == 6:
# # add processed camera sensor noise
# if random.random() < isp_prob and isp_model is not None:
# with torch.no_grad():
# img, hq = isp_model.forward(img.copy(), hq)
# add final JPEG compression noise
image = add_JPEG_noise(image)
image = util.single2uint(image)
example = {"image":image}
return example
# TODO incase there is a pickle error one needs to replace a += x with a = a + x in add_speckle_noise etc...
def degradation_bsrgan_plus(img, sf=4, shuffle_prob=0.5, use_sharp=True, lq_patchsize=64, isp_model=None):
"""
This is an extended degradation model by combining
the degradation models of BSRGAN and Real-ESRGAN
----------
img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf)
sf: scale factor
use_shuffle: the degradation shuffle
use_sharp: sharpening the img
Returns
-------
img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1]
hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
"""
h1, w1 = img.shape[:2]
img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop
h, w = img.shape[:2]
if h < lq_patchsize * sf or w < lq_patchsize * sf:
raise ValueError(f'img size ({h1}X{w1}) is too small!')
if use_sharp:
img = add_sharpening(img)
hq = img.copy()
if random.random() < shuffle_prob:
shuffle_order = random.sample(range(13), 13)
else:
shuffle_order = list(range(13))
# local shuffle for noise, JPEG is always the last one
shuffle_order[2:6] = random.sample(shuffle_order[2:6], len(range(2, 6)))
shuffle_order[9:13] = random.sample(shuffle_order[9:13], len(range(9, 13)))
poisson_prob, speckle_prob, isp_prob = 0.1, 0.1, 0.1
for i in shuffle_order:
if i == 0:
img = add_blur(img, sf=sf)
elif i == 1:
img = add_resize(img, sf=sf)
elif i == 2:
img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25)
elif i == 3:
if random.random() < poisson_prob:
img = add_Poisson_noise(img)
elif i == 4:
if random.random() < speckle_prob:
img = add_speckle_noise(img)
elif i == 5:
if random.random() < isp_prob and isp_model is not None:
with torch.no_grad():
img, hq = isp_model.forward(img.copy(), hq)
elif i == 6:
img = add_JPEG_noise(img)
elif i == 7:
img = add_blur(img, sf=sf)
elif i == 8:
img = add_resize(img, sf=sf)
elif i == 9:
img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25)
elif i == 10:
if random.random() < poisson_prob:
img = add_Poisson_noise(img)
elif i == 11:
if random.random() < speckle_prob:
img = add_speckle_noise(img)
elif i == 12:
if random.random() < isp_prob and isp_model is not None:
with torch.no_grad():
img, hq = isp_model.forward(img.copy(), hq)
else:
print('check the shuffle!')
# resize to desired size
img = cv2.resize(img, (int(1 / sf * hq.shape[1]), int(1 / sf * hq.shape[0])),
interpolation=random.choice([1, 2, 3]))
# add final JPEG compression noise
img = add_JPEG_noise(img)
# random crop
img, hq = random_crop(img, hq, sf, lq_patchsize)
return img, hq
if __name__ == '__main__':
print("hey")
img = util.imread_uint('utils/test.png', 3)
print(img)
img = util.uint2single(img)
print(img)
img = img[:448, :448]
h = img.shape[0] // 4
print("resizing to", h)
sf = 4
deg_fn = partial(degradation_bsrgan_variant, sf=sf)
for i in range(20):
print(i)
img_lq = deg_fn(img)
print(img_lq)
img_lq_bicubic = albumentations.SmallestMaxSize(max_size=h, interpolation=cv2.INTER_CUBIC)(image=img)["image"]
print(img_lq.shape)
print("bicubic", img_lq_bicubic.shape)
print(img_hq.shape)
lq_nearest = cv2.resize(util.single2uint(img_lq), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])),
interpolation=0)
lq_bicubic_nearest = cv2.resize(util.single2uint(img_lq_bicubic), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])),
interpolation=0)
img_concat = np.concatenate([lq_bicubic_nearest, lq_nearest, util.single2uint(img_hq)], axis=1)
util.imsave(img_concat, str(i) + '.png')
| EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/image_degradation/bsrgan.py |
from ldm.modules.image_degradation.bsrgan import degradation_bsrgan_variant as degradation_fn_bsr
from ldm.modules.image_degradation.bsrgan_light import degradation_bsrgan_variant as degradation_fn_bsr_light
| EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/image_degradation/__init__.py |
import os
import math
import random
import numpy as np
import torch
import cv2
from torchvision.utils import make_grid
from datetime import datetime
#import matplotlib.pyplot as plt # TODO: check with Dominik, also bsrgan.py vs bsrgan_light.py
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
'''
# --------------------------------------------
# Kai Zhang (github: https://github.com/cszn)
# 03/Mar/2019
# --------------------------------------------
# https://github.com/twhui/SRGAN-pyTorch
# https://github.com/xinntao/BasicSR
# --------------------------------------------
'''
IMG_EXTENSIONS = ['.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', '.tif']
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def get_timestamp():
return datetime.now().strftime('%y%m%d-%H%M%S')
def imshow(x, title=None, cbar=False, figsize=None):
plt.figure(figsize=figsize)
plt.imshow(np.squeeze(x), interpolation='nearest', cmap='gray')
if title:
plt.title(title)
if cbar:
plt.colorbar()
plt.show()
def surf(Z, cmap='rainbow', figsize=None):
plt.figure(figsize=figsize)
ax3 = plt.axes(projection='3d')
w, h = Z.shape[:2]
xx = np.arange(0,w,1)
yy = np.arange(0,h,1)
X, Y = np.meshgrid(xx, yy)
ax3.plot_surface(X,Y,Z,cmap=cmap)
#ax3.contour(X,Y,Z, zdim='z',offset=-2,cmap=cmap)
plt.show()
'''
# --------------------------------------------
# get image pathes
# --------------------------------------------
'''
def get_image_paths(dataroot):
paths = None # return None if dataroot is None
if dataroot is not None:
paths = sorted(_get_paths_from_images(dataroot))
return paths
def _get_paths_from_images(path):
assert os.path.isdir(path), '{:s} is not a valid directory'.format(path)
images = []
for dirpath, _, fnames in sorted(os.walk(path)):
for fname in sorted(fnames):
if is_image_file(fname):
img_path = os.path.join(dirpath, fname)
images.append(img_path)
assert images, '{:s} has no valid image file'.format(path)
return images
'''
# --------------------------------------------
# split large images into small images
# --------------------------------------------
'''
def patches_from_image(img, p_size=512, p_overlap=64, p_max=800):
w, h = img.shape[:2]
patches = []
if w > p_max and h > p_max:
w1 = list(np.arange(0, w-p_size, p_size-p_overlap, dtype=np.int))
h1 = list(np.arange(0, h-p_size, p_size-p_overlap, dtype=np.int))
w1.append(w-p_size)
h1.append(h-p_size)
# print(w1)
# print(h1)
for i in w1:
for j in h1:
patches.append(img[i:i+p_size, j:j+p_size,:])
else:
patches.append(img)
return patches
def imssave(imgs, img_path):
"""
imgs: list, N images of size WxHxC
"""
img_name, ext = os.path.splitext(os.path.basename(img_path))
for i, img in enumerate(imgs):
if img.ndim == 3:
img = img[:, :, [2, 1, 0]]
new_path = os.path.join(os.path.dirname(img_path), img_name+str('_s{:04d}'.format(i))+'.png')
cv2.imwrite(new_path, img)
def split_imageset(original_dataroot, taget_dataroot, n_channels=3, p_size=800, p_overlap=96, p_max=1000):
"""
split the large images from original_dataroot into small overlapped images with size (p_size)x(p_size),
and save them into taget_dataroot; only the images with larger size than (p_max)x(p_max)
will be splitted.
Args:
original_dataroot:
taget_dataroot:
p_size: size of small images
p_overlap: patch size in training is a good choice
p_max: images with smaller size than (p_max)x(p_max) keep unchanged.
"""
paths = get_image_paths(original_dataroot)
for img_path in paths:
# img_name, ext = os.path.splitext(os.path.basename(img_path))
img = imread_uint(img_path, n_channels=n_channels)
patches = patches_from_image(img, p_size, p_overlap, p_max)
imssave(patches, os.path.join(taget_dataroot,os.path.basename(img_path)))
#if original_dataroot == taget_dataroot:
#del img_path
'''
# --------------------------------------------
# makedir
# --------------------------------------------
'''
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def mkdirs(paths):
if isinstance(paths, str):
mkdir(paths)
else:
for path in paths:
mkdir(path)
def mkdir_and_rename(path):
if os.path.exists(path):
new_name = path + '_archived_' + get_timestamp()
print('Path already exists. Rename it to [{:s}]'.format(new_name))
os.rename(path, new_name)
os.makedirs(path)
'''
# --------------------------------------------
# read image from path
# opencv is fast, but read BGR numpy image
# --------------------------------------------
'''
# --------------------------------------------
# get uint8 image of size HxWxn_channles (RGB)
# --------------------------------------------
def imread_uint(path, n_channels=3):
# input: path
# output: HxWx3(RGB or GGG), or HxWx1 (G)
if n_channels == 1:
img = cv2.imread(path, 0) # cv2.IMREAD_GRAYSCALE
img = np.expand_dims(img, axis=2) # HxWx1
elif n_channels == 3:
img = cv2.imread(path, cv2.IMREAD_UNCHANGED) # BGR or G
if img.ndim == 2:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) # GGG
else:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # RGB
return img
# --------------------------------------------
# matlab's imwrite
# --------------------------------------------
def imsave(img, img_path):
img = np.squeeze(img)
if img.ndim == 3:
img = img[:, :, [2, 1, 0]]
cv2.imwrite(img_path, img)
def imwrite(img, img_path):
img = np.squeeze(img)
if img.ndim == 3:
img = img[:, :, [2, 1, 0]]
cv2.imwrite(img_path, img)
# --------------------------------------------
# get single image of size HxWxn_channles (BGR)
# --------------------------------------------
def read_img(path):
# read image by cv2
# return: Numpy float32, HWC, BGR, [0,1]
img = cv2.imread(path, cv2.IMREAD_UNCHANGED) # cv2.IMREAD_GRAYSCALE
img = img.astype(np.float32) / 255.
if img.ndim == 2:
img = np.expand_dims(img, axis=2)
# some images have 4 channels
if img.shape[2] > 3:
img = img[:, :, :3]
return img
'''
# --------------------------------------------
# image format conversion
# --------------------------------------------
# numpy(single) <---> numpy(unit)
# numpy(single) <---> tensor
# numpy(unit) <---> tensor
# --------------------------------------------
'''
# --------------------------------------------
# numpy(single) [0, 1] <---> numpy(unit)
# --------------------------------------------
def uint2single(img):
return np.float32(img/255.)
def single2uint(img):
return np.uint8((img.clip(0, 1)*255.).round())
def uint162single(img):
return np.float32(img/65535.)
def single2uint16(img):
return np.uint16((img.clip(0, 1)*65535.).round())
# --------------------------------------------
# numpy(unit) (HxWxC or HxW) <---> tensor
# --------------------------------------------
# convert uint to 4-dimensional torch tensor
def uint2tensor4(img):
if img.ndim == 2:
img = np.expand_dims(img, axis=2)
return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().div(255.).unsqueeze(0)
# convert uint to 3-dimensional torch tensor
def uint2tensor3(img):
if img.ndim == 2:
img = np.expand_dims(img, axis=2)
return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().div(255.)
# convert 2/3/4-dimensional torch tensor to uint
def tensor2uint(img):
img = img.data.squeeze().float().clamp_(0, 1).cpu().numpy()
if img.ndim == 3:
img = np.transpose(img, (1, 2, 0))
return np.uint8((img*255.0).round())
# --------------------------------------------
# numpy(single) (HxWxC) <---> tensor
# --------------------------------------------
# convert single (HxWxC) to 3-dimensional torch tensor
def single2tensor3(img):
return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float()
# convert single (HxWxC) to 4-dimensional torch tensor
def single2tensor4(img):
return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().unsqueeze(0)
# convert torch tensor to single
def tensor2single(img):
img = img.data.squeeze().float().cpu().numpy()
if img.ndim == 3:
img = np.transpose(img, (1, 2, 0))
return img
# convert torch tensor to single
def tensor2single3(img):
img = img.data.squeeze().float().cpu().numpy()
if img.ndim == 3:
img = np.transpose(img, (1, 2, 0))
elif img.ndim == 2:
img = np.expand_dims(img, axis=2)
return img
def single2tensor5(img):
return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1, 3).float().unsqueeze(0)
def single32tensor5(img):
return torch.from_numpy(np.ascontiguousarray(img)).float().unsqueeze(0).unsqueeze(0)
def single42tensor4(img):
return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1, 3).float()
# from skimage.io import imread, imsave
def tensor2img(tensor, out_type=np.uint8, min_max=(0, 1)):
'''
Converts a torch Tensor into an image Numpy array of BGR channel order
Input: 4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order
Output: 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default)
'''
tensor = tensor.squeeze().float().cpu().clamp_(*min_max) # squeeze first, then clamp
tensor = (tensor - min_max[0]) / (min_max[1] - min_max[0]) # to range [0,1]
n_dim = tensor.dim()
if n_dim == 4:
n_img = len(tensor)
img_np = make_grid(tensor, nrow=int(math.sqrt(n_img)), normalize=False).numpy()
img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR
elif n_dim == 3:
img_np = tensor.numpy()
img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR
elif n_dim == 2:
img_np = tensor.numpy()
else:
raise TypeError(
'Only support 4D, 3D and 2D tensor. But received with dimension: {:d}'.format(n_dim))
if out_type == np.uint8:
img_np = (img_np * 255.0).round()
# Important. Unlike matlab, numpy.unit8() WILL NOT round by default.
return img_np.astype(out_type)
'''
# --------------------------------------------
# Augmentation, flipe and/or rotate
# --------------------------------------------
# The following two are enough.
# (1) augmet_img: numpy image of WxHxC or WxH
# (2) augment_img_tensor4: tensor image 1xCxWxH
# --------------------------------------------
'''
def augment_img(img, mode=0):
'''Kai Zhang (github: https://github.com/cszn)
'''
if mode == 0:
return img
elif mode == 1:
return np.flipud(np.rot90(img))
elif mode == 2:
return np.flipud(img)
elif mode == 3:
return np.rot90(img, k=3)
elif mode == 4:
return np.flipud(np.rot90(img, k=2))
elif mode == 5:
return np.rot90(img)
elif mode == 6:
return np.rot90(img, k=2)
elif mode == 7:
return np.flipud(np.rot90(img, k=3))
def augment_img_tensor4(img, mode=0):
'''Kai Zhang (github: https://github.com/cszn)
'''
if mode == 0:
return img
elif mode == 1:
return img.rot90(1, [2, 3]).flip([2])
elif mode == 2:
return img.flip([2])
elif mode == 3:
return img.rot90(3, [2, 3])
elif mode == 4:
return img.rot90(2, [2, 3]).flip([2])
elif mode == 5:
return img.rot90(1, [2, 3])
elif mode == 6:
return img.rot90(2, [2, 3])
elif mode == 7:
return img.rot90(3, [2, 3]).flip([2])
def augment_img_tensor(img, mode=0):
'''Kai Zhang (github: https://github.com/cszn)
'''
img_size = img.size()
img_np = img.data.cpu().numpy()
if len(img_size) == 3:
img_np = np.transpose(img_np, (1, 2, 0))
elif len(img_size) == 4:
img_np = np.transpose(img_np, (2, 3, 1, 0))
img_np = augment_img(img_np, mode=mode)
img_tensor = torch.from_numpy(np.ascontiguousarray(img_np))
if len(img_size) == 3:
img_tensor = img_tensor.permute(2, 0, 1)
elif len(img_size) == 4:
img_tensor = img_tensor.permute(3, 2, 0, 1)
return img_tensor.type_as(img)
def augment_img_np3(img, mode=0):
if mode == 0:
return img
elif mode == 1:
return img.transpose(1, 0, 2)
elif mode == 2:
return img[::-1, :, :]
elif mode == 3:
img = img[::-1, :, :]
img = img.transpose(1, 0, 2)
return img
elif mode == 4:
return img[:, ::-1, :]
elif mode == 5:
img = img[:, ::-1, :]
img = img.transpose(1, 0, 2)
return img
elif mode == 6:
img = img[:, ::-1, :]
img = img[::-1, :, :]
return img
elif mode == 7:
img = img[:, ::-1, :]
img = img[::-1, :, :]
img = img.transpose(1, 0, 2)
return img
def augment_imgs(img_list, hflip=True, rot=True):
# horizontal flip OR rotate
hflip = hflip and random.random() < 0.5
vflip = rot and random.random() < 0.5
rot90 = rot and random.random() < 0.5
def _augment(img):
if hflip:
img = img[:, ::-1, :]
if vflip:
img = img[::-1, :, :]
if rot90:
img = img.transpose(1, 0, 2)
return img
return [_augment(img) for img in img_list]
'''
# --------------------------------------------
# modcrop and shave
# --------------------------------------------
'''
def modcrop(img_in, scale):
# img_in: Numpy, HWC or HW
img = np.copy(img_in)
if img.ndim == 2:
H, W = img.shape
H_r, W_r = H % scale, W % scale
img = img[:H - H_r, :W - W_r]
elif img.ndim == 3:
H, W, C = img.shape
H_r, W_r = H % scale, W % scale
img = img[:H - H_r, :W - W_r, :]
else:
raise ValueError('Wrong img ndim: [{:d}].'.format(img.ndim))
return img
def shave(img_in, border=0):
# img_in: Numpy, HWC or HW
img = np.copy(img_in)
h, w = img.shape[:2]
img = img[border:h-border, border:w-border]
return img
'''
# --------------------------------------------
# image processing process on numpy image
# channel_convert(in_c, tar_type, img_list):
# rgb2ycbcr(img, only_y=True):
# bgr2ycbcr(img, only_y=True):
# ycbcr2rgb(img):
# --------------------------------------------
'''
def rgb2ycbcr(img, only_y=True):
'''same as matlab rgb2ycbcr
only_y: only return Y channel
Input:
uint8, [0, 255]
float, [0, 1]
'''
in_img_type = img.dtype
img.astype(np.float32)
if in_img_type != np.uint8:
img *= 255.
# convert
if only_y:
rlt = np.dot(img, [65.481, 128.553, 24.966]) / 255.0 + 16.0
else:
rlt = np.matmul(img, [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786],
[24.966, 112.0, -18.214]]) / 255.0 + [16, 128, 128]
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type)
def ycbcr2rgb(img):
'''same as matlab ycbcr2rgb
Input:
uint8, [0, 255]
float, [0, 1]
'''
in_img_type = img.dtype
img.astype(np.float32)
if in_img_type != np.uint8:
img *= 255.
# convert
rlt = np.matmul(img, [[0.00456621, 0.00456621, 0.00456621], [0, -0.00153632, 0.00791071],
[0.00625893, -0.00318811, 0]]) * 255.0 + [-222.921, 135.576, -276.836]
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type)
def bgr2ycbcr(img, only_y=True):
'''bgr version of rgb2ycbcr
only_y: only return Y channel
Input:
uint8, [0, 255]
float, [0, 1]
'''
in_img_type = img.dtype
img.astype(np.float32)
if in_img_type != np.uint8:
img *= 255.
# convert
if only_y:
rlt = np.dot(img, [24.966, 128.553, 65.481]) / 255.0 + 16.0
else:
rlt = np.matmul(img, [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786],
[65.481, -37.797, 112.0]]) / 255.0 + [16, 128, 128]
if in_img_type == np.uint8:
rlt = rlt.round()
else:
rlt /= 255.
return rlt.astype(in_img_type)
def channel_convert(in_c, tar_type, img_list):
# conversion among BGR, gray and y
if in_c == 3 and tar_type == 'gray': # BGR to gray
gray_list = [cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) for img in img_list]
return [np.expand_dims(img, axis=2) for img in gray_list]
elif in_c == 3 and tar_type == 'y': # BGR to y
y_list = [bgr2ycbcr(img, only_y=True) for img in img_list]
return [np.expand_dims(img, axis=2) for img in y_list]
elif in_c == 1 and tar_type == 'RGB': # gray/y to BGR
return [cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) for img in img_list]
else:
return img_list
'''
# --------------------------------------------
# metric, PSNR and SSIM
# --------------------------------------------
'''
# --------------------------------------------
# PSNR
# --------------------------------------------
def calculate_psnr(img1, img2, border=0):
# img1 and img2 have range [0, 255]
#img1 = img1.squeeze()
#img2 = img2.squeeze()
if not img1.shape == img2.shape:
raise ValueError('Input images must have the same dimensions.')
h, w = img1.shape[:2]
img1 = img1[border:h-border, border:w-border]
img2 = img2[border:h-border, border:w-border]
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
mse = np.mean((img1 - img2)**2)
if mse == 0:
return float('inf')
return 20 * math.log10(255.0 / math.sqrt(mse))
# --------------------------------------------
# SSIM
# --------------------------------------------
def calculate_ssim(img1, img2, border=0):
'''calculate SSIM
the same outputs as MATLAB's
img1, img2: [0, 255]
'''
#img1 = img1.squeeze()
#img2 = img2.squeeze()
if not img1.shape == img2.shape:
raise ValueError('Input images must have the same dimensions.')
h, w = img1.shape[:2]
img1 = img1[border:h-border, border:w-border]
img2 = img2[border:h-border, border:w-border]
if img1.ndim == 2:
return ssim(img1, img2)
elif img1.ndim == 3:
if img1.shape[2] == 3:
ssims = []
for i in range(3):
ssims.append(ssim(img1[:,:,i], img2[:,:,i]))
return np.array(ssims).mean()
elif img1.shape[2] == 1:
return ssim(np.squeeze(img1), np.squeeze(img2))
else:
raise ValueError('Wrong input image dimensions.')
def ssim(img1, img2):
C1 = (0.01 * 255)**2
C2 = (0.03 * 255)**2
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
kernel = cv2.getGaussianKernel(11, 1.5)
window = np.outer(kernel, kernel.transpose())
mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5] # valid
mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
mu1_sq = mu1**2
mu2_sq = mu2**2
mu1_mu2 = mu1 * mu2
sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq
sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq
sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
(sigma1_sq + sigma2_sq + C2))
return ssim_map.mean()
'''
# --------------------------------------------
# matlab's bicubic imresize (numpy and torch) [0, 1]
# --------------------------------------------
'''
# matlab 'imresize' function, now only support 'bicubic'
def cubic(x):
absx = torch.abs(x)
absx2 = absx**2
absx3 = absx**3
return (1.5*absx3 - 2.5*absx2 + 1) * ((absx <= 1).type_as(absx)) + \
(-0.5*absx3 + 2.5*absx2 - 4*absx + 2) * (((absx > 1)*(absx <= 2)).type_as(absx))
def calculate_weights_indices(in_length, out_length, scale, kernel, kernel_width, antialiasing):
if (scale < 1) and (antialiasing):
# Use a modified kernel to simultaneously interpolate and antialias- larger kernel width
kernel_width = kernel_width / scale
# Output-space coordinates
x = torch.linspace(1, out_length, out_length)
# Input-space coordinates. Calculate the inverse mapping such that 0.5
# in output space maps to 0.5 in input space, and 0.5+scale in output
# space maps to 1.5 in input space.
u = x / scale + 0.5 * (1 - 1 / scale)
# What is the left-most pixel that can be involved in the computation?
left = torch.floor(u - kernel_width / 2)
# What is the maximum number of pixels that can be involved in the
# computation? Note: it's OK to use an extra pixel here; if the
# corresponding weights are all zero, it will be eliminated at the end
# of this function.
P = math.ceil(kernel_width) + 2
# The indices of the input pixels involved in computing the k-th output
# pixel are in row k of the indices matrix.
indices = left.view(out_length, 1).expand(out_length, P) + torch.linspace(0, P - 1, P).view(
1, P).expand(out_length, P)
# The weights used to compute the k-th output pixel are in row k of the
# weights matrix.
distance_to_center = u.view(out_length, 1).expand(out_length, P) - indices
# apply cubic kernel
if (scale < 1) and (antialiasing):
weights = scale * cubic(distance_to_center * scale)
else:
weights = cubic(distance_to_center)
# Normalize the weights matrix so that each row sums to 1.
weights_sum = torch.sum(weights, 1).view(out_length, 1)
weights = weights / weights_sum.expand(out_length, P)
# If a column in weights is all zero, get rid of it. only consider the first and last column.
weights_zero_tmp = torch.sum((weights == 0), 0)
if not math.isclose(weights_zero_tmp[0], 0, rel_tol=1e-6):
indices = indices.narrow(1, 1, P - 2)
weights = weights.narrow(1, 1, P - 2)
if not math.isclose(weights_zero_tmp[-1], 0, rel_tol=1e-6):
indices = indices.narrow(1, 0, P - 2)
weights = weights.narrow(1, 0, P - 2)
weights = weights.contiguous()
indices = indices.contiguous()
sym_len_s = -indices.min() + 1
sym_len_e = indices.max() - in_length
indices = indices + sym_len_s - 1
return weights, indices, int(sym_len_s), int(sym_len_e)
# --------------------------------------------
# imresize for tensor image [0, 1]
# --------------------------------------------
def imresize(img, scale, antialiasing=True):
# Now the scale should be the same for H and W
# input: img: pytorch tensor, CHW or HW [0,1]
# output: CHW or HW [0,1] w/o round
need_squeeze = True if img.dim() == 2 else False
if need_squeeze:
img.unsqueeze_(0)
in_C, in_H, in_W = img.size()
out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)
kernel_width = 4
kernel = 'cubic'
# Return the desired dimension order for performing the resize. The
# strategy is to perform the resize first along the dimension with the
# smallest scale factor.
# Now we do not support this.
# get weights and indices
weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(
in_H, out_H, scale, kernel, kernel_width, antialiasing)
weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(
in_W, out_W, scale, kernel, kernel_width, antialiasing)
# process H dimension
# symmetric copying
img_aug = torch.FloatTensor(in_C, in_H + sym_len_Hs + sym_len_He, in_W)
img_aug.narrow(1, sym_len_Hs, in_H).copy_(img)
sym_patch = img[:, :sym_len_Hs, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
img_aug.narrow(1, 0, sym_len_Hs).copy_(sym_patch_inv)
sym_patch = img[:, -sym_len_He:, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
img_aug.narrow(1, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)
out_1 = torch.FloatTensor(in_C, out_H, in_W)
kernel_width = weights_H.size(1)
for i in range(out_H):
idx = int(indices_H[i][0])
for j in range(out_C):
out_1[j, i, :] = img_aug[j, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i])
# process W dimension
# symmetric copying
out_1_aug = torch.FloatTensor(in_C, out_H, in_W + sym_len_Ws + sym_len_We)
out_1_aug.narrow(2, sym_len_Ws, in_W).copy_(out_1)
sym_patch = out_1[:, :, :sym_len_Ws]
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(2, inv_idx)
out_1_aug.narrow(2, 0, sym_len_Ws).copy_(sym_patch_inv)
sym_patch = out_1[:, :, -sym_len_We:]
inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(2, inv_idx)
out_1_aug.narrow(2, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)
out_2 = torch.FloatTensor(in_C, out_H, out_W)
kernel_width = weights_W.size(1)
for i in range(out_W):
idx = int(indices_W[i][0])
for j in range(out_C):
out_2[j, :, i] = out_1_aug[j, :, idx:idx + kernel_width].mv(weights_W[i])
if need_squeeze:
out_2.squeeze_()
return out_2
# --------------------------------------------
# imresize for numpy image [0, 1]
# --------------------------------------------
def imresize_np(img, scale, antialiasing=True):
# Now the scale should be the same for H and W
# input: img: Numpy, HWC or HW [0,1]
# output: HWC or HW [0,1] w/o round
img = torch.from_numpy(img)
need_squeeze = True if img.dim() == 2 else False
if need_squeeze:
img.unsqueeze_(2)
in_H, in_W, in_C = img.size()
out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale)
kernel_width = 4
kernel = 'cubic'
# Return the desired dimension order for performing the resize. The
# strategy is to perform the resize first along the dimension with the
# smallest scale factor.
# Now we do not support this.
# get weights and indices
weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices(
in_H, out_H, scale, kernel, kernel_width, antialiasing)
weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices(
in_W, out_W, scale, kernel, kernel_width, antialiasing)
# process H dimension
# symmetric copying
img_aug = torch.FloatTensor(in_H + sym_len_Hs + sym_len_He, in_W, in_C)
img_aug.narrow(0, sym_len_Hs, in_H).copy_(img)
sym_patch = img[:sym_len_Hs, :, :]
inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(0, inv_idx)
img_aug.narrow(0, 0, sym_len_Hs).copy_(sym_patch_inv)
sym_patch = img[-sym_len_He:, :, :]
inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(0, inv_idx)
img_aug.narrow(0, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv)
out_1 = torch.FloatTensor(out_H, in_W, in_C)
kernel_width = weights_H.size(1)
for i in range(out_H):
idx = int(indices_H[i][0])
for j in range(out_C):
out_1[i, :, j] = img_aug[idx:idx + kernel_width, :, j].transpose(0, 1).mv(weights_H[i])
# process W dimension
# symmetric copying
out_1_aug = torch.FloatTensor(out_H, in_W + sym_len_Ws + sym_len_We, in_C)
out_1_aug.narrow(1, sym_len_Ws, in_W).copy_(out_1)
sym_patch = out_1[:, :sym_len_Ws, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
out_1_aug.narrow(1, 0, sym_len_Ws).copy_(sym_patch_inv)
sym_patch = out_1[:, -sym_len_We:, :]
inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long()
sym_patch_inv = sym_patch.index_select(1, inv_idx)
out_1_aug.narrow(1, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv)
out_2 = torch.FloatTensor(out_H, out_W, in_C)
kernel_width = weights_W.size(1)
for i in range(out_W):
idx = int(indices_W[i][0])
for j in range(out_C):
out_2[:, i, j] = out_1_aug[:, idx:idx + kernel_width, j].mv(weights_W[i])
if need_squeeze:
out_2.squeeze_()
return out_2.numpy()
if __name__ == '__main__':
print('---')
# img = imread_uint('test.bmp', 3)
# img = uint2single(img)
# img_bicubic = imresize_np(img, 1/4) | EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/image_degradation/utils_image.py |
# -*- coding: utf-8 -*-
import numpy as np
import cv2
import torch
from functools import partial
import random
from scipy import ndimage
import scipy
import scipy.stats as ss
from scipy.interpolate import interp2d
from scipy.linalg import orth
import albumentations
import ldm.modules.image_degradation.utils_image as util
"""
# --------------------------------------------
# Super-Resolution
# --------------------------------------------
#
# Kai Zhang ([email protected])
# https://github.com/cszn
# From 2019/03--2021/08
# --------------------------------------------
"""
def modcrop_np(img, sf):
'''
Args:
img: numpy image, WxH or WxHxC
sf: scale factor
Return:
cropped image
'''
w, h = img.shape[:2]
im = np.copy(img)
return im[:w - w % sf, :h - h % sf, ...]
"""
# --------------------------------------------
# anisotropic Gaussian kernels
# --------------------------------------------
"""
def analytic_kernel(k):
"""Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)"""
k_size = k.shape[0]
# Calculate the big kernels size
big_k = np.zeros((3 * k_size - 2, 3 * k_size - 2))
# Loop over the small kernel to fill the big one
for r in range(k_size):
for c in range(k_size):
big_k[2 * r:2 * r + k_size, 2 * c:2 * c + k_size] += k[r, c] * k
# Crop the edges of the big kernel to ignore very small values and increase run time of SR
crop = k_size // 2
cropped_big_k = big_k[crop:-crop, crop:-crop]
# Normalize to 1
return cropped_big_k / cropped_big_k.sum()
def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6):
""" generate an anisotropic Gaussian kernel
Args:
ksize : e.g., 15, kernel size
theta : [0, pi], rotation angle range
l1 : [0.1,50], scaling of eigenvalues
l2 : [0.1,l1], scaling of eigenvalues
If l1 = l2, will get an isotropic Gaussian kernel.
Returns:
k : kernel
"""
v = np.dot(np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1., 0.]))
V = np.array([[v[0], v[1]], [v[1], -v[0]]])
D = np.array([[l1, 0], [0, l2]])
Sigma = np.dot(np.dot(V, D), np.linalg.inv(V))
k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize)
return k
def gm_blur_kernel(mean, cov, size=15):
center = size / 2.0 + 0.5
k = np.zeros([size, size])
for y in range(size):
for x in range(size):
cy = y - center + 1
cx = x - center + 1
k[y, x] = ss.multivariate_normal.pdf([cx, cy], mean=mean, cov=cov)
k = k / np.sum(k)
return k
def shift_pixel(x, sf, upper_left=True):
"""shift pixel for super-resolution with different scale factors
Args:
x: WxHxC or WxH
sf: scale factor
upper_left: shift direction
"""
h, w = x.shape[:2]
shift = (sf - 1) * 0.5
xv, yv = np.arange(0, w, 1.0), np.arange(0, h, 1.0)
if upper_left:
x1 = xv + shift
y1 = yv + shift
else:
x1 = xv - shift
y1 = yv - shift
x1 = np.clip(x1, 0, w - 1)
y1 = np.clip(y1, 0, h - 1)
if x.ndim == 2:
x = interp2d(xv, yv, x)(x1, y1)
if x.ndim == 3:
for i in range(x.shape[-1]):
x[:, :, i] = interp2d(xv, yv, x[:, :, i])(x1, y1)
return x
def blur(x, k):
'''
x: image, NxcxHxW
k: kernel, Nx1xhxw
'''
n, c = x.shape[:2]
p1, p2 = (k.shape[-2] - 1) // 2, (k.shape[-1] - 1) // 2
x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode='replicate')
k = k.repeat(1, c, 1, 1)
k = k.view(-1, 1, k.shape[2], k.shape[3])
x = x.view(1, -1, x.shape[2], x.shape[3])
x = torch.nn.functional.conv2d(x, k, bias=None, stride=1, padding=0, groups=n * c)
x = x.view(n, c, x.shape[2], x.shape[3])
return x
def gen_kernel(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var=0.6, max_var=10., noise_level=0):
""""
# modified version of https://github.com/assafshocher/BlindSR_dataset_generator
# Kai Zhang
# min_var = 0.175 * sf # variance of the gaussian kernel will be sampled between min_var and max_var
# max_var = 2.5 * sf
"""
# Set random eigen-vals (lambdas) and angle (theta) for COV matrix
lambda_1 = min_var + np.random.rand() * (max_var - min_var)
lambda_2 = min_var + np.random.rand() * (max_var - min_var)
theta = np.random.rand() * np.pi # random theta
noise = -noise_level + np.random.rand(*k_size) * noise_level * 2
# Set COV matrix using Lambdas and Theta
LAMBDA = np.diag([lambda_1, lambda_2])
Q = np.array([[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)]])
SIGMA = Q @ LAMBDA @ Q.T
INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :]
# Set expectation position (shifting kernel for aligned image)
MU = k_size // 2 - 0.5 * (scale_factor - 1) # - 0.5 * (scale_factor - k_size % 2)
MU = MU[None, None, :, None]
# Create meshgrid for Gaussian
[X, Y] = np.meshgrid(range(k_size[0]), range(k_size[1]))
Z = np.stack([X, Y], 2)[:, :, :, None]
# Calcualte Gaussian for every pixel of the kernel
ZZ = Z - MU
ZZ_t = ZZ.transpose(0, 1, 3, 2)
raw_kernel = np.exp(-0.5 * np.squeeze(ZZ_t @ INV_SIGMA @ ZZ)) * (1 + noise)
# shift the kernel so it will be centered
# raw_kernel_centered = kernel_shift(raw_kernel, scale_factor)
# Normalize the kernel and return
# kernel = raw_kernel_centered / np.sum(raw_kernel_centered)
kernel = raw_kernel / np.sum(raw_kernel)
return kernel
def fspecial_gaussian(hsize, sigma):
hsize = [hsize, hsize]
siz = [(hsize[0] - 1.0) / 2.0, (hsize[1] - 1.0) / 2.0]
std = sigma
[x, y] = np.meshgrid(np.arange(-siz[1], siz[1] + 1), np.arange(-siz[0], siz[0] + 1))
arg = -(x * x + y * y) / (2 * std * std)
h = np.exp(arg)
h[h < scipy.finfo(float).eps * h.max()] = 0
sumh = h.sum()
if sumh != 0:
h = h / sumh
return h
def fspecial_laplacian(alpha):
alpha = max([0, min([alpha, 1])])
h1 = alpha / (alpha + 1)
h2 = (1 - alpha) / (alpha + 1)
h = [[h1, h2, h1], [h2, -4 / (alpha + 1), h2], [h1, h2, h1]]
h = np.array(h)
return h
def fspecial(filter_type, *args, **kwargs):
'''
python code from:
https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py
'''
if filter_type == 'gaussian':
return fspecial_gaussian(*args, **kwargs)
if filter_type == 'laplacian':
return fspecial_laplacian(*args, **kwargs)
"""
# --------------------------------------------
# degradation models
# --------------------------------------------
"""
def bicubic_degradation(x, sf=3):
'''
Args:
x: HxWxC image, [0, 1]
sf: down-scale factor
Return:
bicubicly downsampled LR image
'''
x = util.imresize_np(x, scale=1 / sf)
return x
def srmd_degradation(x, k, sf=3):
''' blur + bicubic downsampling
Args:
x: HxWxC image, [0, 1]
k: hxw, double
sf: down-scale factor
Return:
downsampled LR image
Reference:
@inproceedings{zhang2018learning,
title={Learning a single convolutional super-resolution network for multiple degradations},
author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei},
booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
pages={3262--3271},
year={2018}
}
'''
x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') # 'nearest' | 'mirror'
x = bicubic_degradation(x, sf=sf)
return x
def dpsr_degradation(x, k, sf=3):
''' bicubic downsampling + blur
Args:
x: HxWxC image, [0, 1]
k: hxw, double
sf: down-scale factor
Return:
downsampled LR image
Reference:
@inproceedings{zhang2019deep,
title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels},
author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei},
booktitle={IEEE Conference on Computer Vision and Pattern Recognition},
pages={1671--1681},
year={2019}
}
'''
x = bicubic_degradation(x, sf=sf)
x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap')
return x
def classical_degradation(x, k, sf=3):
''' blur + downsampling
Args:
x: HxWxC image, [0, 1]/[0, 255]
k: hxw, double
sf: down-scale factor
Return:
downsampled LR image
'''
x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap')
# x = filters.correlate(x, np.expand_dims(np.flip(k), axis=2))
st = 0
return x[st::sf, st::sf, ...]
def add_sharpening(img, weight=0.5, radius=50, threshold=10):
"""USM sharpening. borrowed from real-ESRGAN
Input image: I; Blurry image: B.
1. K = I + weight * (I - B)
2. Mask = 1 if abs(I - B) > threshold, else: 0
3. Blur mask:
4. Out = Mask * K + (1 - Mask) * I
Args:
img (Numpy array): Input image, HWC, BGR; float32, [0, 1].
weight (float): Sharp weight. Default: 1.
radius (float): Kernel size of Gaussian blur. Default: 50.
threshold (int):
"""
if radius % 2 == 0:
radius += 1
blur = cv2.GaussianBlur(img, (radius, radius), 0)
residual = img - blur
mask = np.abs(residual) * 255 > threshold
mask = mask.astype('float32')
soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0)
K = img + weight * residual
K = np.clip(K, 0, 1)
return soft_mask * K + (1 - soft_mask) * img
def add_blur(img, sf=4):
wd2 = 4.0 + sf
wd = 2.0 + 0.2 * sf
wd2 = wd2/4
wd = wd/4
if random.random() < 0.5:
l1 = wd2 * random.random()
l2 = wd2 * random.random()
k = anisotropic_Gaussian(ksize=random.randint(2, 11) + 3, theta=random.random() * np.pi, l1=l1, l2=l2)
else:
k = fspecial('gaussian', random.randint(2, 4) + 3, wd * random.random())
img = ndimage.filters.convolve(img, np.expand_dims(k, axis=2), mode='mirror')
return img
def add_resize(img, sf=4):
rnum = np.random.rand()
if rnum > 0.8: # up
sf1 = random.uniform(1, 2)
elif rnum < 0.7: # down
sf1 = random.uniform(0.5 / sf, 1)
else:
sf1 = 1.0
img = cv2.resize(img, (int(sf1 * img.shape[1]), int(sf1 * img.shape[0])), interpolation=random.choice([1, 2, 3]))
img = np.clip(img, 0.0, 1.0)
return img
# def add_Gaussian_noise(img, noise_level1=2, noise_level2=25):
# noise_level = random.randint(noise_level1, noise_level2)
# rnum = np.random.rand()
# if rnum > 0.6: # add color Gaussian noise
# img += np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
# elif rnum < 0.4: # add grayscale Gaussian noise
# img += np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
# else: # add noise
# L = noise_level2 / 255.
# D = np.diag(np.random.rand(3))
# U = orth(np.random.rand(3, 3))
# conv = np.dot(np.dot(np.transpose(U), D), U)
# img += np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
# img = np.clip(img, 0.0, 1.0)
# return img
def add_Gaussian_noise(img, noise_level1=2, noise_level2=25):
noise_level = random.randint(noise_level1, noise_level2)
rnum = np.random.rand()
if rnum > 0.6: # add color Gaussian noise
img = img + np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
elif rnum < 0.4: # add grayscale Gaussian noise
img = img + np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
else: # add noise
L = noise_level2 / 255.
D = np.diag(np.random.rand(3))
U = orth(np.random.rand(3, 3))
conv = np.dot(np.dot(np.transpose(U), D), U)
img = img + np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
img = np.clip(img, 0.0, 1.0)
return img
def add_speckle_noise(img, noise_level1=2, noise_level2=25):
noise_level = random.randint(noise_level1, noise_level2)
img = np.clip(img, 0.0, 1.0)
rnum = random.random()
if rnum > 0.6:
img += img * np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32)
elif rnum < 0.4:
img += img * np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32)
else:
L = noise_level2 / 255.
D = np.diag(np.random.rand(3))
U = orth(np.random.rand(3, 3))
conv = np.dot(np.dot(np.transpose(U), D), U)
img += img * np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32)
img = np.clip(img, 0.0, 1.0)
return img
def add_Poisson_noise(img):
img = np.clip((img * 255.0).round(), 0, 255) / 255.
vals = 10 ** (2 * random.random() + 2.0) # [2, 4]
if random.random() < 0.5:
img = np.random.poisson(img * vals).astype(np.float32) / vals
else:
img_gray = np.dot(img[..., :3], [0.299, 0.587, 0.114])
img_gray = np.clip((img_gray * 255.0).round(), 0, 255) / 255.
noise_gray = np.random.poisson(img_gray * vals).astype(np.float32) / vals - img_gray
img += noise_gray[:, :, np.newaxis]
img = np.clip(img, 0.0, 1.0)
return img
def add_JPEG_noise(img):
quality_factor = random.randint(80, 95)
img = cv2.cvtColor(util.single2uint(img), cv2.COLOR_RGB2BGR)
result, encimg = cv2.imencode('.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor])
img = cv2.imdecode(encimg, 1)
img = cv2.cvtColor(util.uint2single(img), cv2.COLOR_BGR2RGB)
return img
def random_crop(lq, hq, sf=4, lq_patchsize=64):
h, w = lq.shape[:2]
rnd_h = random.randint(0, h - lq_patchsize)
rnd_w = random.randint(0, w - lq_patchsize)
lq = lq[rnd_h:rnd_h + lq_patchsize, rnd_w:rnd_w + lq_patchsize, :]
rnd_h_H, rnd_w_H = int(rnd_h * sf), int(rnd_w * sf)
hq = hq[rnd_h_H:rnd_h_H + lq_patchsize * sf, rnd_w_H:rnd_w_H + lq_patchsize * sf, :]
return lq, hq
def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None):
"""
This is the degradation model of BSRGAN from the paper
"Designing a Practical Degradation Model for Deep Blind Image Super-Resolution"
----------
img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf)
sf: scale factor
isp_model: camera ISP model
Returns
-------
img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1]
hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
"""
isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25
sf_ori = sf
h1, w1 = img.shape[:2]
img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop
h, w = img.shape[:2]
if h < lq_patchsize * sf or w < lq_patchsize * sf:
raise ValueError(f'img size ({h1}X{w1}) is too small!')
hq = img.copy()
if sf == 4 and random.random() < scale2_prob: # downsample1
if np.random.rand() < 0.5:
img = cv2.resize(img, (int(1 / 2 * img.shape[1]), int(1 / 2 * img.shape[0])),
interpolation=random.choice([1, 2, 3]))
else:
img = util.imresize_np(img, 1 / 2, True)
img = np.clip(img, 0.0, 1.0)
sf = 2
shuffle_order = random.sample(range(7), 7)
idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3)
if idx1 > idx2: # keep downsample3 last
shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1]
for i in shuffle_order:
if i == 0:
img = add_blur(img, sf=sf)
elif i == 1:
img = add_blur(img, sf=sf)
elif i == 2:
a, b = img.shape[1], img.shape[0]
# downsample2
if random.random() < 0.75:
sf1 = random.uniform(1, 2 * sf)
img = cv2.resize(img, (int(1 / sf1 * img.shape[1]), int(1 / sf1 * img.shape[0])),
interpolation=random.choice([1, 2, 3]))
else:
k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf))
k_shifted = shift_pixel(k, sf)
k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel
img = ndimage.filters.convolve(img, np.expand_dims(k_shifted, axis=2), mode='mirror')
img = img[0::sf, 0::sf, ...] # nearest downsampling
img = np.clip(img, 0.0, 1.0)
elif i == 3:
# downsample3
img = cv2.resize(img, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3]))
img = np.clip(img, 0.0, 1.0)
elif i == 4:
# add Gaussian noise
img = add_Gaussian_noise(img, noise_level1=2, noise_level2=8)
elif i == 5:
# add JPEG noise
if random.random() < jpeg_prob:
img = add_JPEG_noise(img)
elif i == 6:
# add processed camera sensor noise
if random.random() < isp_prob and isp_model is not None:
with torch.no_grad():
img, hq = isp_model.forward(img.copy(), hq)
# add final JPEG compression noise
img = add_JPEG_noise(img)
# random crop
img, hq = random_crop(img, hq, sf_ori, lq_patchsize)
return img, hq
# todo no isp_model?
def degradation_bsrgan_variant(image, sf=4, isp_model=None):
"""
This is the degradation model of BSRGAN from the paper
"Designing a Practical Degradation Model for Deep Blind Image Super-Resolution"
----------
sf: scale factor
isp_model: camera ISP model
Returns
-------
img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1]
hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1]
"""
image = util.uint2single(image)
isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25
sf_ori = sf
h1, w1 = image.shape[:2]
image = image.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop
h, w = image.shape[:2]
hq = image.copy()
if sf == 4 and random.random() < scale2_prob: # downsample1
if np.random.rand() < 0.5:
image = cv2.resize(image, (int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])),
interpolation=random.choice([1, 2, 3]))
else:
image = util.imresize_np(image, 1 / 2, True)
image = np.clip(image, 0.0, 1.0)
sf = 2
shuffle_order = random.sample(range(7), 7)
idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3)
if idx1 > idx2: # keep downsample3 last
shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1]
for i in shuffle_order:
if i == 0:
image = add_blur(image, sf=sf)
# elif i == 1:
# image = add_blur(image, sf=sf)
if i == 0:
pass
elif i == 2:
a, b = image.shape[1], image.shape[0]
# downsample2
if random.random() < 0.8:
sf1 = random.uniform(1, 2 * sf)
image = cv2.resize(image, (int(1 / sf1 * image.shape[1]), int(1 / sf1 * image.shape[0])),
interpolation=random.choice([1, 2, 3]))
else:
k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf))
k_shifted = shift_pixel(k, sf)
k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel
image = ndimage.filters.convolve(image, np.expand_dims(k_shifted, axis=2), mode='mirror')
image = image[0::sf, 0::sf, ...] # nearest downsampling
image = np.clip(image, 0.0, 1.0)
elif i == 3:
# downsample3
image = cv2.resize(image, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3]))
image = np.clip(image, 0.0, 1.0)
elif i == 4:
# add Gaussian noise
image = add_Gaussian_noise(image, noise_level1=1, noise_level2=2)
elif i == 5:
# add JPEG noise
if random.random() < jpeg_prob:
image = add_JPEG_noise(image)
#
# elif i == 6:
# # add processed camera sensor noise
# if random.random() < isp_prob and isp_model is not None:
# with torch.no_grad():
# img, hq = isp_model.forward(img.copy(), hq)
# add final JPEG compression noise
image = add_JPEG_noise(image)
image = util.single2uint(image)
example = {"image": image}
return example
if __name__ == '__main__':
print("hey")
img = util.imread_uint('utils/test.png', 3)
img = img[:448, :448]
h = img.shape[0] // 4
print("resizing to", h)
sf = 4
deg_fn = partial(degradation_bsrgan_variant, sf=sf)
for i in range(20):
print(i)
img_hq = img
img_lq = deg_fn(img)["image"]
img_hq, img_lq = util.uint2single(img_hq), util.uint2single(img_lq)
print(img_lq)
img_lq_bicubic = albumentations.SmallestMaxSize(max_size=h, interpolation=cv2.INTER_CUBIC)(image=img_hq)["image"]
print(img_lq.shape)
print("bicubic", img_lq_bicubic.shape)
print(img_hq.shape)
lq_nearest = cv2.resize(util.single2uint(img_lq), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])),
interpolation=0)
lq_bicubic_nearest = cv2.resize(util.single2uint(img_lq_bicubic),
(int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])),
interpolation=0)
img_concat = np.concatenate([lq_bicubic_nearest, lq_nearest, util.single2uint(img_hq)], axis=1)
util.imsave(img_concat, str(i) + '.png')
| EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/image_degradation/bsrgan_light.py |
# adopted from
# https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py
# and
# https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
# and
# https://github.com/openai/guided-diffusion/blob/0ba878e517b276c45d1195eb29f6f5f72659a05b/guided_diffusion/nn.py
#
# thanks!
import os
import math
import torch
import torch.nn as nn
import numpy as np
from einops import repeat
from ldm.util import instantiate_from_config
def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
if schedule == "linear":
betas = (
torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2
)
elif schedule == "cosine":
timesteps = (
torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s
)
alphas = timesteps / (1 + cosine_s) * np.pi / 2
alphas = torch.cos(alphas).pow(2)
alphas = alphas / alphas[0]
betas = 1 - alphas[1:] / alphas[:-1]
betas = np.clip(betas, a_min=0, a_max=0.999)
elif schedule == "sqrt_linear":
betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)
elif schedule == "sqrt":
betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5
else:
raise ValueError(f"schedule '{schedule}' unknown.")
return betas.numpy()
def make_ddim_timesteps(ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True):
if ddim_discr_method == 'uniform':
c = num_ddpm_timesteps // num_ddim_timesteps
ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c)))
elif ddim_discr_method == 'quad':
ddim_timesteps = ((np.linspace(0, np.sqrt(num_ddpm_timesteps * .8), num_ddim_timesteps)) ** 2).astype(int)
else:
raise NotImplementedError(f'There is no ddim discretization method called "{ddim_discr_method}"')
# assert ddim_timesteps.shape[0] == num_ddim_timesteps
# add one to get the final alpha values right (the ones from first scale to data during sampling)
steps_out = ddim_timesteps + 1
if verbose:
print(f'Selected timesteps for ddim sampler: {steps_out}')
return steps_out
def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True):
# select alphas for computing the variance schedule
alphas = alphacums[ddim_timesteps]
alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist())
# according the the formula provided in https://arxiv.org/abs/2010.02502
sigmas = eta * np.sqrt((1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev))
if verbose:
print(f'Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}')
print(f'For the chosen value of eta, which is {eta}, '
f'this results in the following sigma_t schedule for ddim sampler {sigmas}')
return sigmas, alphas, alphas_prev
def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):
"""
Create a beta schedule that discretizes the given alpha_t_bar function,
which defines the cumulative product of (1-beta) over time from t = [0,1].
:param num_diffusion_timesteps: the number of betas to produce.
:param alpha_bar: a lambda that takes an argument t from 0 to 1 and
produces the cumulative product of (1-beta) up to that
part of the diffusion process.
:param max_beta: the maximum beta to use; use values lower than 1 to
prevent singularities.
"""
betas = []
for i in range(num_diffusion_timesteps):
t1 = i / num_diffusion_timesteps
t2 = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta))
return np.array(betas)
def extract_into_tensor(a, t, x_shape):
b, *_ = t.shape
out = a.gather(-1, t)
return out.reshape(b, *((1,) * (len(x_shape) - 1)))
def checkpoint(func, inputs, params, flag):
"""
Evaluate a function without caching intermediate activations, allowing for
reduced memory at the expense of extra compute in the backward pass.
:param func: the function to evaluate.
:param inputs: the argument sequence to pass to `func`.
:param params: a sequence of parameters `func` depends on but does not
explicitly take as arguments.
:param flag: if False, disable gradient checkpointing.
"""
if flag:
args = tuple(inputs) + tuple(params)
return CheckpointFunction.apply(func, len(inputs), *args)
else:
return func(*inputs)
class CheckpointFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, run_function, length, *args):
ctx.run_function = run_function
ctx.input_tensors = list(args[:length])
ctx.input_params = list(args[length:])
with torch.no_grad():
output_tensors = ctx.run_function(*ctx.input_tensors)
return output_tensors
@staticmethod
def backward(ctx, *output_grads):
ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors]
with torch.enable_grad():
# Fixes a bug where the first op in run_function modifies the
# Tensor storage in place, which is not allowed for detach()'d
# Tensors.
shallow_copies = [x.view_as(x) for x in ctx.input_tensors]
output_tensors = ctx.run_function(*shallow_copies)
input_grads = torch.autograd.grad(
output_tensors,
ctx.input_tensors + ctx.input_params,
output_grads,
allow_unused=True,
)
del ctx.input_tensors
del ctx.input_params
del output_tensors
return (None, None) + input_grads
def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):
"""
Create sinusoidal timestep embeddings.
:param timesteps: a 1-D Tensor of N indices, one per batch element.
These may be fractional.
:param dim: the dimension of the output.
:param max_period: controls the minimum frequency of the embeddings.
:return: an [N x dim] Tensor of positional embeddings.
"""
if not repeat_only:
half = dim // 2
freqs = torch.exp(
-math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half
).to(device=timesteps.device)
args = timesteps[:, None].float() * freqs[None]
embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
if dim % 2:
embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
else:
embedding = repeat(timesteps, 'b -> b d', d=dim)
return embedding
def zero_module(module):
"""
Zero out the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().zero_()
return module
def scale_module(module, scale):
"""
Scale the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().mul_(scale)
return module
def mean_flat(tensor):
"""
Take the mean over all non-batch dimensions.
"""
return tensor.mean(dim=list(range(1, len(tensor.shape))))
def normalization(channels):
"""
Make a standard normalization layer.
:param channels: number of input channels.
:return: an nn.Module for normalization.
"""
return GroupNorm32(32, channels)
# PyTorch 1.7 has SiLU, but we support PyTorch 1.5.
class SiLU(nn.Module):
def forward(self, x):
return x * torch.sigmoid(x)
class GroupNorm32(nn.GroupNorm):
def forward(self, x):
return super().forward(x.float()).type(x.dtype)
def conv_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D convolution module.
"""
if dims == 1:
return nn.Conv1d(*args, **kwargs)
elif dims == 2:
return nn.Conv2d(*args, **kwargs)
elif dims == 3:
return nn.Conv3d(*args, **kwargs)
raise ValueError(f"unsupported dimensions: {dims}")
def linear(*args, **kwargs):
"""
Create a linear module.
"""
return nn.Linear(*args, **kwargs)
def avg_pool_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D average pooling module.
"""
if dims == 1:
return nn.AvgPool1d(*args, **kwargs)
elif dims == 2:
return nn.AvgPool2d(*args, **kwargs)
elif dims == 3:
return nn.AvgPool3d(*args, **kwargs)
raise ValueError(f"unsupported dimensions: {dims}")
class HybridConditioner(nn.Module):
def __init__(self, c_concat_config, c_crossattn_config):
super().__init__()
self.concat_conditioner = instantiate_from_config(c_concat_config)
self.crossattn_conditioner = instantiate_from_config(c_crossattn_config)
def forward(self, c_concat, c_crossattn):
c_concat = self.concat_conditioner(c_concat)
c_crossattn = self.crossattn_conditioner(c_crossattn)
return {'c_concat': [c_concat], 'c_crossattn': [c_crossattn]}
def noise_like(shape, device, repeat=False):
repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))
noise = lambda: torch.randn(shape, device=device)
return repeat_noise() if repeat else noise() | EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/diffusionmodules/util.py |
EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/diffusionmodules/__init__.py |
|
# pytorch_diffusion + derived encoder decoder
import math
import torch
import torch.nn as nn
import numpy as np
from einops import rearrange
from ldm.util import instantiate_from_config
from ldm.modules.attention import LinearAttention
def get_timestep_embedding(timesteps, embedding_dim):
"""
This matches the implementation in Denoising Diffusion Probabilistic Models:
From Fairseq.
Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
"""
assert len(timesteps.shape) == 1
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb)
emb = emb.to(device=timesteps.device)
emb = timesteps.float()[:, None] * emb[None, :]
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
if embedding_dim % 2 == 1: # zero pad
emb = torch.nn.functional.pad(emb, (0,1,0,0))
return emb
def nonlinearity(x):
# swish
return x*torch.sigmoid(x)
def Normalize(in_channels, num_groups=32):
return torch.nn.GroupNorm(num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True)
class Upsample(nn.Module):
def __init__(self, in_channels, with_conv):
super().__init__()
self.with_conv = with_conv
if self.with_conv:
self.conv = torch.nn.Conv2d(in_channels,
in_channels,
kernel_size=3,
stride=1,
padding=1)
def forward(self, x):
x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest")
if self.with_conv:
x = self.conv(x)
return x
class Downsample(nn.Module):
def __init__(self, in_channels, with_conv):
super().__init__()
self.with_conv = with_conv
if self.with_conv:
# no asymmetric padding in torch conv, must do it ourselves
self.conv = torch.nn.Conv2d(in_channels,
in_channels,
kernel_size=3,
stride=2,
padding=0)
def forward(self, x):
if self.with_conv:
pad = (0,1,0,1)
x = torch.nn.functional.pad(x, pad, mode="constant", value=0)
x = self.conv(x)
else:
x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2)
return x
class ResnetBlock(nn.Module):
def __init__(self, *, in_channels, out_channels=None, conv_shortcut=False,
dropout, temb_channels=512):
super().__init__()
self.in_channels = in_channels
out_channels = in_channels if out_channels is None else out_channels
self.out_channels = out_channels
self.use_conv_shortcut = conv_shortcut
self.norm1 = Normalize(in_channels)
self.conv1 = torch.nn.Conv2d(in_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1)
if temb_channels > 0:
self.temb_proj = torch.nn.Linear(temb_channels,
out_channels)
self.norm2 = Normalize(out_channels)
self.dropout = torch.nn.Dropout(dropout)
self.conv2 = torch.nn.Conv2d(out_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1)
if self.in_channels != self.out_channels:
if self.use_conv_shortcut:
self.conv_shortcut = torch.nn.Conv2d(in_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1)
else:
self.nin_shortcut = torch.nn.Conv2d(in_channels,
out_channels,
kernel_size=1,
stride=1,
padding=0)
def forward(self, x, temb):
h = x
h = self.norm1(h)
h = nonlinearity(h)
h = self.conv1(h)
if temb is not None:
h = h + self.temb_proj(nonlinearity(temb))[:,:,None,None]
h = self.norm2(h)
h = nonlinearity(h)
h = self.dropout(h)
h = self.conv2(h)
if self.in_channels != self.out_channels:
if self.use_conv_shortcut:
x = self.conv_shortcut(x)
else:
x = self.nin_shortcut(x)
return x+h
class LinAttnBlock(LinearAttention):
"""to match AttnBlock usage"""
def __init__(self, in_channels):
super().__init__(dim=in_channels, heads=1, dim_head=in_channels)
class AttnBlock(nn.Module):
def __init__(self, in_channels):
super().__init__()
self.in_channels = in_channels
self.norm = Normalize(in_channels)
self.q = torch.nn.Conv2d(in_channels,
in_channels,
kernel_size=1,
stride=1,
padding=0)
self.k = torch.nn.Conv2d(in_channels,
in_channels,
kernel_size=1,
stride=1,
padding=0)
self.v = torch.nn.Conv2d(in_channels,
in_channels,
kernel_size=1,
stride=1,
padding=0)
self.proj_out = torch.nn.Conv2d(in_channels,
in_channels,
kernel_size=1,
stride=1,
padding=0)
def forward(self, x):
h_ = x
h_ = self.norm(h_)
q = self.q(h_)
k = self.k(h_)
v = self.v(h_)
# compute attention
b,c,h,w = q.shape
q = q.reshape(b,c,h*w)
q = q.permute(0,2,1) # b,hw,c
k = k.reshape(b,c,h*w) # b,c,hw
w_ = torch.bmm(q,k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j]
w_ = w_ * (int(c)**(-0.5))
w_ = torch.nn.functional.softmax(w_, dim=2)
# attend to values
v = v.reshape(b,c,h*w)
w_ = w_.permute(0,2,1) # b,hw,hw (first hw of k, second of q)
h_ = torch.bmm(v,w_) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j]
h_ = h_.reshape(b,c,h,w)
h_ = self.proj_out(h_)
return x+h_
def make_attn(in_channels, attn_type="vanilla"):
assert attn_type in ["vanilla", "linear", "none"], f'attn_type {attn_type} unknown'
print(f"making attention of type '{attn_type}' with {in_channels} in_channels")
if attn_type == "vanilla":
return AttnBlock(in_channels)
elif attn_type == "none":
return nn.Identity(in_channels)
else:
return LinAttnBlock(in_channels)
class Model(nn.Module):
def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
resolution, use_timestep=True, use_linear_attn=False, attn_type="vanilla"):
super().__init__()
if use_linear_attn: attn_type = "linear"
self.ch = ch
self.temb_ch = self.ch*4
self.num_resolutions = len(ch_mult)
self.num_res_blocks = num_res_blocks
self.resolution = resolution
self.in_channels = in_channels
self.use_timestep = use_timestep
if self.use_timestep:
# timestep embedding
self.temb = nn.Module()
self.temb.dense = nn.ModuleList([
torch.nn.Linear(self.ch,
self.temb_ch),
torch.nn.Linear(self.temb_ch,
self.temb_ch),
])
# downsampling
self.conv_in = torch.nn.Conv2d(in_channels,
self.ch,
kernel_size=3,
stride=1,
padding=1)
curr_res = resolution
in_ch_mult = (1,)+tuple(ch_mult)
self.down = nn.ModuleList()
for i_level in range(self.num_resolutions):
block = nn.ModuleList()
attn = nn.ModuleList()
block_in = ch*in_ch_mult[i_level]
block_out = ch*ch_mult[i_level]
for i_block in range(self.num_res_blocks):
block.append(ResnetBlock(in_channels=block_in,
out_channels=block_out,
temb_channels=self.temb_ch,
dropout=dropout))
block_in = block_out
if curr_res in attn_resolutions:
attn.append(make_attn(block_in, attn_type=attn_type))
down = nn.Module()
down.block = block
down.attn = attn
if i_level != self.num_resolutions-1:
down.downsample = Downsample(block_in, resamp_with_conv)
curr_res = curr_res // 2
self.down.append(down)
# middle
self.mid = nn.Module()
self.mid.block_1 = ResnetBlock(in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout)
self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
self.mid.block_2 = ResnetBlock(in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout)
# upsampling
self.up = nn.ModuleList()
for i_level in reversed(range(self.num_resolutions)):
block = nn.ModuleList()
attn = nn.ModuleList()
block_out = ch*ch_mult[i_level]
skip_in = ch*ch_mult[i_level]
for i_block in range(self.num_res_blocks+1):
if i_block == self.num_res_blocks:
skip_in = ch*in_ch_mult[i_level]
block.append(ResnetBlock(in_channels=block_in+skip_in,
out_channels=block_out,
temb_channels=self.temb_ch,
dropout=dropout))
block_in = block_out
if curr_res in attn_resolutions:
attn.append(make_attn(block_in, attn_type=attn_type))
up = nn.Module()
up.block = block
up.attn = attn
if i_level != 0:
up.upsample = Upsample(block_in, resamp_with_conv)
curr_res = curr_res * 2
self.up.insert(0, up) # prepend to get consistent order
# end
self.norm_out = Normalize(block_in)
self.conv_out = torch.nn.Conv2d(block_in,
out_ch,
kernel_size=3,
stride=1,
padding=1)
def forward(self, x, t=None, context=None):
#assert x.shape[2] == x.shape[3] == self.resolution
if context is not None:
# assume aligned context, cat along channel axis
x = torch.cat((x, context), dim=1)
if self.use_timestep:
# timestep embedding
assert t is not None
temb = get_timestep_embedding(t, self.ch)
temb = self.temb.dense[0](temb)
temb = nonlinearity(temb)
temb = self.temb.dense[1](temb)
else:
temb = None
# downsampling
hs = [self.conv_in(x)]
for i_level in range(self.num_resolutions):
for i_block in range(self.num_res_blocks):
h = self.down[i_level].block[i_block](hs[-1], temb)
if len(self.down[i_level].attn) > 0:
h = self.down[i_level].attn[i_block](h)
hs.append(h)
if i_level != self.num_resolutions-1:
hs.append(self.down[i_level].downsample(hs[-1]))
# middle
h = hs[-1]
h = self.mid.block_1(h, temb)
h = self.mid.attn_1(h)
h = self.mid.block_2(h, temb)
# upsampling
for i_level in reversed(range(self.num_resolutions)):
for i_block in range(self.num_res_blocks+1):
h = self.up[i_level].block[i_block](
torch.cat([h, hs.pop()], dim=1), temb)
if len(self.up[i_level].attn) > 0:
h = self.up[i_level].attn[i_block](h)
if i_level != 0:
h = self.up[i_level].upsample(h)
# end
h = self.norm_out(h)
h = nonlinearity(h)
h = self.conv_out(h)
return h
def get_last_layer(self):
return self.conv_out.weight
class Encoder(nn.Module):
def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
resolution, z_channels, double_z=True, use_linear_attn=False, attn_type="vanilla",
**ignore_kwargs):
super().__init__()
if use_linear_attn: attn_type = "linear"
self.ch = ch
self.temb_ch = 0
self.num_resolutions = len(ch_mult)
self.num_res_blocks = num_res_blocks
self.resolution = resolution
self.in_channels = in_channels
# downsampling
self.conv_in = torch.nn.Conv2d(in_channels,
self.ch,
kernel_size=3,
stride=1,
padding=1)
curr_res = resolution
in_ch_mult = (1,)+tuple(ch_mult)
self.in_ch_mult = in_ch_mult
self.down = nn.ModuleList()
for i_level in range(self.num_resolutions):
block = nn.ModuleList()
attn = nn.ModuleList()
block_in = ch*in_ch_mult[i_level]
block_out = ch*ch_mult[i_level]
for i_block in range(self.num_res_blocks):
block.append(ResnetBlock(in_channels=block_in,
out_channels=block_out,
temb_channels=self.temb_ch,
dropout=dropout))
block_in = block_out
if curr_res in attn_resolutions:
attn.append(make_attn(block_in, attn_type=attn_type))# vanilla attention
down = nn.Module()
down.block = block
down.attn = attn
if i_level != self.num_resolutions-1:
down.downsample = Downsample(block_in, resamp_with_conv)
curr_res = curr_res // 2
self.down.append(down)
# middle
self.mid = nn.Module()
self.mid.block_1 = ResnetBlock(in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout)
self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
self.mid.block_2 = ResnetBlock(in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout)
# end
self.norm_out = Normalize(block_in)# GroupNorm
self.conv_out = torch.nn.Conv2d(block_in,
2*z_channels if double_z else z_channels,
kernel_size=3,
stride=1,
padding=1)
def forward(self, x):
# timestep embedding
temb = None
# downsampling
hs = [self.conv_in(x)]
for i_level in range(self.num_resolutions):
for i_block in range(self.num_res_blocks):
h = self.down[i_level].block[i_block](hs[-1], temb)
if len(self.down[i_level].attn) > 0:
h = self.down[i_level].attn[i_block](h)
hs.append(h)
if i_level != self.num_resolutions-1:
hs.append(self.down[i_level].downsample(hs[-1]))
# middle
h = hs[-1]
h = self.mid.block_1(h, temb)
h = self.mid.attn_1(h)
h = self.mid.block_2(h, temb)
# end
h = self.norm_out(h)
h = nonlinearity(h)
h = self.conv_out(h)
return h
class Decoder(nn.Module):
def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,
attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,
resolution, z_channels, give_pre_end=False, tanh_out=False, use_linear_attn=False,
attn_type="vanilla", **ignorekwargs):
super().__init__()
if use_linear_attn: attn_type = "linear"
self.ch = ch
self.temb_ch = 0
self.num_resolutions = len(ch_mult)
self.num_res_blocks = num_res_blocks
self.resolution = resolution
self.in_channels = in_channels
self.give_pre_end = give_pre_end
self.tanh_out = tanh_out
# compute in_ch_mult, block_in and curr_res at lowest res
in_ch_mult = (1,)+tuple(ch_mult)
block_in = ch*ch_mult[self.num_resolutions-1]
curr_res = resolution // 2**(self.num_resolutions-1)
self.z_shape = (1,z_channels,curr_res,curr_res)
print("Working with z of shape {} = {} dimensions.".format(
self.z_shape, np.prod(self.z_shape)))
# z to block_in
self.conv_in = torch.nn.Conv2d(z_channels,
block_in,
kernel_size=3,
stride=1,
padding=1)
# middle
self.mid = nn.Module()
self.mid.block_1 = ResnetBlock(in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout)
self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)
self.mid.block_2 = ResnetBlock(in_channels=block_in,
out_channels=block_in,
temb_channels=self.temb_ch,
dropout=dropout)
# upsampling
self.up = nn.ModuleList()
for i_level in reversed(range(self.num_resolutions)):
block = nn.ModuleList()
attn = nn.ModuleList()
block_out = ch*ch_mult[i_level]
for i_block in range(self.num_res_blocks+1):
block.append(ResnetBlock(in_channels=block_in,
out_channels=block_out,
temb_channels=self.temb_ch,
dropout=dropout))
block_in = block_out
if curr_res in attn_resolutions:
attn.append(make_attn(block_in, attn_type=attn_type))
up = nn.Module()
up.block = block
up.attn = attn
if i_level != 0:
up.upsample = Upsample(block_in, resamp_with_conv)
curr_res = curr_res * 2
self.up.insert(0, up) # prepend to get consistent order
# end
self.norm_out = Normalize(block_in)
self.conv_out = torch.nn.Conv2d(block_in,
out_ch,
kernel_size=3,
stride=1,
padding=1)
def forward(self, z):
#assert z.shape[1:] == self.z_shape[1:]
self.last_z_shape = z.shape
# timestep embedding
temb = None
# z to block_in
h = self.conv_in(z)
# middle
h = self.mid.block_1(h, temb)
h = self.mid.attn_1(h)
h = self.mid.block_2(h, temb)
# upsampling
for i_level in reversed(range(self.num_resolutions)):
for i_block in range(self.num_res_blocks+1):
h = self.up[i_level].block[i_block](h, temb)
if len(self.up[i_level].attn) > 0:
h = self.up[i_level].attn[i_block](h)
if i_level != 0:
h = self.up[i_level].upsample(h)
# end
if self.give_pre_end:
return h
h = self.norm_out(h)
h = nonlinearity(h)
h = self.conv_out(h)
if self.tanh_out:
h = torch.tanh(h)
return h
class SimpleDecoder(nn.Module):
def __init__(self, in_channels, out_channels, *args, **kwargs):
super().__init__()
self.model = nn.ModuleList([nn.Conv2d(in_channels, in_channels, 1),
ResnetBlock(in_channels=in_channels,
out_channels=2 * in_channels,
temb_channels=0, dropout=0.0),
ResnetBlock(in_channels=2 * in_channels,
out_channels=4 * in_channels,
temb_channels=0, dropout=0.0),
ResnetBlock(in_channels=4 * in_channels,
out_channels=2 * in_channels,
temb_channels=0, dropout=0.0),
nn.Conv2d(2*in_channels, in_channels, 1),
Upsample(in_channels, with_conv=True)])
# end
self.norm_out = Normalize(in_channels)
self.conv_out = torch.nn.Conv2d(in_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1)
def forward(self, x):
for i, layer in enumerate(self.model):
if i in [1,2,3]:
x = layer(x, None)
else:
x = layer(x)
h = self.norm_out(x)
h = nonlinearity(h)
x = self.conv_out(h)
return x
class UpsampleDecoder(nn.Module):
def __init__(self, in_channels, out_channels, ch, num_res_blocks, resolution,
ch_mult=(2,2), dropout=0.0):
super().__init__()
# upsampling
self.temb_ch = 0
self.num_resolutions = len(ch_mult)
self.num_res_blocks = num_res_blocks
block_in = in_channels
curr_res = resolution // 2 ** (self.num_resolutions - 1)
self.res_blocks = nn.ModuleList()
self.upsample_blocks = nn.ModuleList()
for i_level in range(self.num_resolutions):
res_block = []
block_out = ch * ch_mult[i_level]
for i_block in range(self.num_res_blocks + 1):
res_block.append(ResnetBlock(in_channels=block_in,
out_channels=block_out,
temb_channels=self.temb_ch,
dropout=dropout))
block_in = block_out
self.res_blocks.append(nn.ModuleList(res_block))
if i_level != self.num_resolutions - 1:
self.upsample_blocks.append(Upsample(block_in, True))
curr_res = curr_res * 2
# end
self.norm_out = Normalize(block_in)
self.conv_out = torch.nn.Conv2d(block_in,
out_channels,
kernel_size=3,
stride=1,
padding=1)
def forward(self, x):
# upsampling
h = x
for k, i_level in enumerate(range(self.num_resolutions)):
for i_block in range(self.num_res_blocks + 1):
h = self.res_blocks[i_level][i_block](h, None)
if i_level != self.num_resolutions - 1:
h = self.upsample_blocks[k](h)
h = self.norm_out(h)
h = nonlinearity(h)
h = self.conv_out(h)
return h
class LatentRescaler(nn.Module):
def __init__(self, factor, in_channels, mid_channels, out_channels, depth=2):
super().__init__()
# residual block, interpolate, residual block
self.factor = factor
self.conv_in = nn.Conv2d(in_channels,
mid_channels,
kernel_size=3,
stride=1,
padding=1)
self.res_block1 = nn.ModuleList([ResnetBlock(in_channels=mid_channels,
out_channels=mid_channels,
temb_channels=0,
dropout=0.0) for _ in range(depth)])
self.attn = AttnBlock(mid_channels)
self.res_block2 = nn.ModuleList([ResnetBlock(in_channels=mid_channels,
out_channels=mid_channels,
temb_channels=0,
dropout=0.0) for _ in range(depth)])
self.conv_out = nn.Conv2d(mid_channels,
out_channels,
kernel_size=1,
)
def forward(self, x):
x = self.conv_in(x)
for block in self.res_block1:
x = block(x, None)
x = torch.nn.functional.interpolate(x, size=(int(round(x.shape[2]*self.factor)), int(round(x.shape[3]*self.factor))))
x = self.attn(x)
for block in self.res_block2:
x = block(x, None)
x = self.conv_out(x)
return x
class MergedRescaleEncoder(nn.Module):
def __init__(self, in_channels, ch, resolution, out_ch, num_res_blocks,
attn_resolutions, dropout=0.0, resamp_with_conv=True,
ch_mult=(1,2,4,8), rescale_factor=1.0, rescale_module_depth=1):
super().__init__()
intermediate_chn = ch * ch_mult[-1]
self.encoder = Encoder(in_channels=in_channels, num_res_blocks=num_res_blocks, ch=ch, ch_mult=ch_mult,
z_channels=intermediate_chn, double_z=False, resolution=resolution,
attn_resolutions=attn_resolutions, dropout=dropout, resamp_with_conv=resamp_with_conv,
out_ch=None)
self.rescaler = LatentRescaler(factor=rescale_factor, in_channels=intermediate_chn,
mid_channels=intermediate_chn, out_channels=out_ch, depth=rescale_module_depth)
def forward(self, x):
x = self.encoder(x)
x = self.rescaler(x)
return x
class MergedRescaleDecoder(nn.Module):
def __init__(self, z_channels, out_ch, resolution, num_res_blocks, attn_resolutions, ch, ch_mult=(1,2,4,8),
dropout=0.0, resamp_with_conv=True, rescale_factor=1.0, rescale_module_depth=1):
super().__init__()
tmp_chn = z_channels*ch_mult[-1]
self.decoder = Decoder(out_ch=out_ch, z_channels=tmp_chn, attn_resolutions=attn_resolutions, dropout=dropout,
resamp_with_conv=resamp_with_conv, in_channels=None, num_res_blocks=num_res_blocks,
ch_mult=ch_mult, resolution=resolution, ch=ch)
self.rescaler = LatentRescaler(factor=rescale_factor, in_channels=z_channels, mid_channels=tmp_chn,
out_channels=tmp_chn, depth=rescale_module_depth)
def forward(self, x):
x = self.rescaler(x)
x = self.decoder(x)
return x
class Upsampler(nn.Module):
def __init__(self, in_size, out_size, in_channels, out_channels, ch_mult=2):
super().__init__()
assert out_size >= in_size
num_blocks = int(np.log2(out_size//in_size))+1
factor_up = 1.+ (out_size % in_size)
print(f"Building {self.__class__.__name__} with in_size: {in_size} --> out_size {out_size} and factor {factor_up}")
self.rescaler = LatentRescaler(factor=factor_up, in_channels=in_channels, mid_channels=2*in_channels,
out_channels=in_channels)
self.decoder = Decoder(out_ch=out_channels, resolution=out_size, z_channels=in_channels, num_res_blocks=2,
attn_resolutions=[], in_channels=None, ch=in_channels,
ch_mult=[ch_mult for _ in range(num_blocks)])
def forward(self, x):
x = self.rescaler(x)
x = self.decoder(x)
return x
class Resize(nn.Module):
def __init__(self, in_channels=None, learned=False, mode="bilinear"):
super().__init__()
self.with_conv = learned
self.mode = mode
if self.with_conv:
print(f"Note: {self.__class__.__name} uses learned downsampling and will ignore the fixed {mode} mode")
raise NotImplementedError()
assert in_channels is not None
# no asymmetric padding in torch conv, must do it ourselves
self.conv = torch.nn.Conv2d(in_channels,
in_channels,
kernel_size=4,
stride=2,
padding=1)
def forward(self, x, scale_factor=1.0):
if scale_factor==1.0:
return x
else:
x = torch.nn.functional.interpolate(x, mode=self.mode, align_corners=False, scale_factor=scale_factor)
return x
class FirstStagePostProcessor(nn.Module):
def __init__(self, ch_mult:list, in_channels,
pretrained_model:nn.Module=None,
reshape=False,
n_channels=None,
dropout=0.,
pretrained_config=None):
super().__init__()
if pretrained_config is None:
assert pretrained_model is not None, 'Either "pretrained_model" or "pretrained_config" must not be None'
self.pretrained_model = pretrained_model
else:
assert pretrained_config is not None, 'Either "pretrained_model" or "pretrained_config" must not be None'
self.instantiate_pretrained(pretrained_config)
self.do_reshape = reshape
if n_channels is None:
n_channels = self.pretrained_model.encoder.ch
self.proj_norm = Normalize(in_channels,num_groups=in_channels//2)
self.proj = nn.Conv2d(in_channels,n_channels,kernel_size=3,
stride=1,padding=1)
blocks = []
downs = []
ch_in = n_channels
for m in ch_mult:
blocks.append(ResnetBlock(in_channels=ch_in,out_channels=m*n_channels,dropout=dropout))
ch_in = m * n_channels
downs.append(Downsample(ch_in, with_conv=False))
self.model = nn.ModuleList(blocks)
self.downsampler = nn.ModuleList(downs)
def instantiate_pretrained(self, config):
model = instantiate_from_config(config)
self.pretrained_model = model.eval()
# self.pretrained_model.train = False
for param in self.pretrained_model.parameters():
param.requires_grad = False
@torch.no_grad()
def encode_with_pretrained(self,x):
c = self.pretrained_model.encode(x)
if isinstance(c, DiagonalGaussianDistribution):
c = c.mode()
return c
def forward(self,x):
z_fs = self.encode_with_pretrained(x)
z = self.proj_norm(z_fs)
z = self.proj(z)
z = nonlinearity(z)
for submodel, downmodel in zip(self.model,self.downsampler):
z = submodel(z,temb=None)
z = downmodel(z)
if self.do_reshape:
z = rearrange(z,'b c h w -> b (h w) c')
return z
| EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/diffusionmodules/model.py |
from abc import abstractmethod
from functools import partial
import math
from typing import Iterable
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as F
from ldm.modules.diffusionmodules.util import (
checkpoint,
conv_nd,
linear,
avg_pool_nd,
zero_module,
normalization,
timestep_embedding,
)
from ldm.modules.attention import SpatialTransformer
from ldm.modules.diffusionmodules.openaimodel import convert_module_to_f16, convert_module_to_f32, AttentionPool2d, \
TimestepBlock, TimestepEmbedSequential, Upsample, TransposedUpsample, Downsample, ResBlock, AttentionBlock, count_flops_attn, \
QKVAttentionLegacy, QKVAttention
class UNetModel(nn.Module):
"""
The full UNet model with attention and timestep embedding.
:param in_channels: channels in the input Tensor.
:param model_channels: base channel count for the model.
:param out_channels: channels in the output Tensor.
:param num_res_blocks: number of residual blocks per downsample.
:param attention_resolutions: a collection of downsample rates at which
attention will take place. May be a set, list, or tuple.
For example, if this contains 4, then at 4x downsampling, attention
will be used.
:param dropout: the dropout probability.
:param channel_mult: channel multiplier for each level of the UNet.
:param conv_resample: if True, use learned convolutions for upsampling and
downsampling.
:param dims: determines if the signal is 1D, 2D, or 3D.
:param num_classes: if specified (as an int), then this model will be
class-conditional with `num_classes` classes.
:param use_checkpoint: use gradient checkpointing to reduce memory usage.
:param num_heads: the number of attention heads in each attention layer.
:param num_heads_channels: if specified, ignore num_heads and instead use
a fixed channel width per attention head.
:param num_heads_upsample: works with num_heads to set a different number
of heads for upsampling. Deprecated.
:param use_scale_shift_norm: use a FiLM-like conditioning mechanism.
:param resblock_updown: use residual blocks for up/downsampling.
:param use_new_attention_order: use a different attention pattern for potentially
increased efficiency.
"""
def __init__(
self,
image_size,
in_channels,
model_channels,
out_channels,
num_res_blocks,
attention_resolutions,
dropout=0,
channel_mult=(1, 2, 4, 8),
conv_resample=True,
dims=2,
num_classes=None,
use_checkpoint=False,
use_fp16=False,
num_heads=-1,
num_head_channels=-1,
num_heads_upsample=-1,
use_scale_shift_norm=False,
resblock_updown=False,
use_new_attention_order=False,
use_spatial_transformer=False, # custom transformer support
transformer_depth=1, # custom transformer support
context_dim=None, # custom transformer support
n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model
legacy=True,
use_context_project=False, # custom text to audio support
use_context_attn=True # custom text to audio support
):
super().__init__()
if use_spatial_transformer:
assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...'
if context_dim is not None and not use_context_project:
assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...'
from omegaconf.listconfig import ListConfig
if type(context_dim) == ListConfig:
context_dim = list(context_dim)
if num_heads_upsample == -1:
num_heads_upsample = num_heads
if num_heads == -1:
assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set'
if num_head_channels == -1:
assert num_heads != -1, 'Either num_heads or num_head_channels has to be set'
self.image_size = image_size
self.in_channels = in_channels
self.model_channels = model_channels
self.out_channels = out_channels
self.num_res_blocks = num_res_blocks
self.attention_resolutions = attention_resolutions
self.dropout = dropout
self.channel_mult = channel_mult
self.conv_resample = conv_resample
self.num_classes = num_classes
self.use_checkpoint = use_checkpoint
self.dtype = th.float16 if use_fp16 else th.float32
self.num_heads = num_heads
self.num_head_channels = num_head_channels
self.num_heads_upsample = num_heads_upsample
self.predict_codebook_ids = n_embed is not None
time_embed_dim = model_channels * 4
self.time_embed = nn.Sequential(
linear(model_channels, time_embed_dim),
nn.SiLU(),
linear(time_embed_dim, time_embed_dim),
)
if self.num_classes is not None:
self.label_emb = nn.Embedding(num_classes, time_embed_dim)
self.input_blocks = nn.ModuleList(
[
TimestepEmbedSequential(
conv_nd(dims, in_channels, model_channels, 3, padding=1)
)
]
)
self._feature_size = model_channels
input_block_chans = [model_channels]
ch = model_channels
ds = 1
for level, mult in enumerate(channel_mult):
for _ in range(num_res_blocks):
layers = [
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=mult * model_channels,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
)
]
ch = mult * model_channels
if ds in attention_resolutions:
if num_head_channels == -1:
dim_head = ch // num_heads
else:
num_heads = ch // num_head_channels
dim_head = num_head_channels
if legacy:
#num_heads = 1
dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
layers.append(
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads,
num_head_channels=dim_head,
use_new_attention_order=use_new_attention_order,
) if not use_spatial_transformer else SpatialTransformer(
ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim
)
)
self.input_blocks.append(TimestepEmbedSequential(*layers))
self._feature_size += ch
input_block_chans.append(ch)
if level != len(channel_mult) - 1:
out_ch = ch
self.input_blocks.append(
TimestepEmbedSequential(
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=out_ch,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
down=True,
)
if resblock_updown
else Downsample(
ch, conv_resample, dims=dims, out_channels=out_ch
)
)
)
ch = out_ch
input_block_chans.append(ch)
ds *= 2
self._feature_size += ch
if num_head_channels == -1:
dim_head = ch // num_heads
else:
num_heads = ch // num_head_channels
dim_head = num_head_channels
if legacy:
#num_heads = 1
dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
self.middle_block = TimestepEmbedSequential(
ResBlock(
ch,
time_embed_dim,
dropout,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
),
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads,
num_head_channels=dim_head,
use_new_attention_order=use_new_attention_order,
) if not use_spatial_transformer else SpatialTransformer(
ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim
),
ResBlock(
ch,
time_embed_dim,
dropout,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
),
)
self._feature_size += ch
self.output_blocks = nn.ModuleList([])
for level, mult in list(enumerate(channel_mult))[::-1]:
for i in range(num_res_blocks + 1):
ich = input_block_chans.pop()
layers = [
ResBlock(
ch + ich,
time_embed_dim,
dropout,
out_channels=model_channels * mult,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
)
]
ch = model_channels * mult
if ds in attention_resolutions:
if num_head_channels == -1:
dim_head = ch // num_heads
else:
num_heads = ch // num_head_channels
dim_head = num_head_channels
if legacy:
#num_heads = 1
dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
layers.append(
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads_upsample,
num_head_channels=dim_head,
use_new_attention_order=use_new_attention_order,
) if not use_spatial_transformer else SpatialTransformer(
ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim
)
)
if level and i == num_res_blocks:
out_ch = ch
layers.append(
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=out_ch,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
up=True,
)
if resblock_updown
else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch)
)
ds //= 2
self.output_blocks.append(TimestepEmbedSequential(*layers))
self._feature_size += ch
self.out = nn.Sequential(
normalization(ch),
nn.SiLU(),
zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)),
)
if self.predict_codebook_ids:
self.id_predictor = nn.Sequential(
normalization(ch),
conv_nd(dims, model_channels, n_embed, 1),
#nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits
)
self.use_context_project = use_context_project
if use_context_project:
self.context_project = linear(context_dim, time_embed_dim)
self.use_context_attn = use_context_attn
def convert_to_fp16(self):
"""
Convert the torso of the model to float16.
"""
self.input_blocks.apply(convert_module_to_f16)
self.middle_block.apply(convert_module_to_f16)
self.output_blocks.apply(convert_module_to_f16)
def convert_to_fp32(self):
"""
Convert the torso of the model to float32.
"""
self.input_blocks.apply(convert_module_to_f32)
self.middle_block.apply(convert_module_to_f32)
self.output_blocks.apply(convert_module_to_f32)
def forward(self, x, timesteps=None, context=None, y=None,**kwargs):
"""
Apply the model to an input batch.
:param x: an [N x C x ...] Tensor of inputs.
:param timesteps: a 1-D batch of timesteps.
:param context: conditioning plugged in via crossattn
:param y: an [N] Tensor of labels, if class-conditional.
:return: an [N x C x ...] Tensor of outputs.
"""
assert (y is not None) == (
self.num_classes is not None
), "must specify y if and only if the model is class-conditional"
hs = []
t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False)
emb = self.time_embed(t_emb)
if self.num_classes is not None:
assert y.shape == (x.shape[0],)
emb = emb + self.label_emb(y)
# For text-to-audio using global CLIP
if self.use_context_project:
context = self.context_project(context)
emb = emb + context.squeeze(1)
h = x.type(self.dtype)
for module in self.input_blocks:
h = module(h, emb, context if self.use_context_attn else None)
hs.append(h)
h = self.middle_block(h, emb, context if self.use_context_attn else None)
for module in self.output_blocks:
h = th.cat([h, hs.pop()], dim=1)
h = module(h, emb, context if self.use_context_attn else None)
h = h.type(x.dtype)
if self.predict_codebook_ids:
return self.id_predictor(h)
else:
return self.out(h)
| EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/diffusionmodules/custom_openaimodel.py |
from abc import abstractmethod
from functools import partial
import math
from typing import Iterable
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as F
from ldm.modules.diffusionmodules.util import (
checkpoint,
conv_nd,
linear,
avg_pool_nd,
zero_module,
normalization,
timestep_embedding,
)
from ldm.modules.attention import SpatialTransformer
# dummy replace
def convert_module_to_f16(x):
pass
def convert_module_to_f32(x):
pass
## go
class AttentionPool2d(nn.Module):
"""
Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py
"""
def __init__(
self,
spacial_dim: int,
embed_dim: int,
num_heads_channels: int,
output_dim: int = None,
):
super().__init__()
self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5)
self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1)
self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1)
self.num_heads = embed_dim // num_heads_channels
self.attention = QKVAttention(self.num_heads)
def forward(self, x):
b, c, *_spatial = x.shape
x = x.reshape(b, c, -1) # NC(HW)
x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1)
x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1)
x = self.qkv_proj(x)
x = self.attention(x)
x = self.c_proj(x)
return x[:, :, 0]
class TimestepBlock(nn.Module):
"""
Any module where forward() takes timestep embeddings as a second argument.
"""
@abstractmethod
def forward(self, x, emb):
"""
Apply the module to `x` given `emb` timestep embeddings.
"""
class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
"""
A sequential module that passes timestep embeddings to the children that
support it as an extra input.
"""
def forward(self, x, emb, context=None):
for layer in self:
if isinstance(layer, TimestepBlock):
x = layer(x, emb)
elif isinstance(layer, SpatialTransformer):
x = layer(x, context)
else:
x = layer(x)
return x
class Upsample(nn.Module):
"""
An upsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
upsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
if use_conv:
self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding)
def forward(self, x):
assert x.shape[1] == self.channels
if self.dims == 3:
x = F.interpolate(
x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest"
)
else:
x = F.interpolate(x, scale_factor=2, mode="nearest")
if self.use_conv:
x = self.conv(x)
return x
class TransposedUpsample(nn.Module):
'Learned 2x upsampling without padding'
def __init__(self, channels, out_channels=None, ks=5):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.up = nn.ConvTranspose2d(self.channels,self.out_channels,kernel_size=ks,stride=2)
def forward(self,x):
return self.up(x)
class Downsample(nn.Module):
"""
A downsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
downsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
stride = 2 if dims != 3 else (1, 2, 2)
if use_conv:
self.op = conv_nd(
dims, self.channels, self.out_channels, 3, stride=stride, padding=padding
)
else:
assert self.channels == self.out_channels
self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
def forward(self, x):
assert x.shape[1] == self.channels
return self.op(x)
class ResBlock(TimestepBlock):
"""
A residual block that can optionally change the number of channels.
:param channels: the number of input channels.
:param emb_channels: the number of timestep embedding channels.
:param dropout: the rate of dropout.
:param out_channels: if specified, the number of out channels.
:param use_conv: if True and out_channels is specified, use a spatial
convolution instead of a smaller 1x1 convolution to change the
channels in the skip connection.
:param dims: determines if the signal is 1D, 2D, or 3D.
:param use_checkpoint: if True, use gradient checkpointing on this module.
:param up: if True, use this block for upsampling.
:param down: if True, use this block for downsampling.
"""
def __init__(
self,
channels,
emb_channels,
dropout,
out_channels=None,
use_conv=False,
use_scale_shift_norm=False,
dims=2,
use_checkpoint=False,
up=False,
down=False,
):
super().__init__()
self.channels = channels
self.emb_channels = emb_channels
self.dropout = dropout
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.use_checkpoint = use_checkpoint
self.use_scale_shift_norm = use_scale_shift_norm
self.in_layers = nn.Sequential(
normalization(channels),
nn.SiLU(),
conv_nd(dims, channels, self.out_channels, 3, padding=1),
)
self.updown = up or down
if up:
self.h_upd = Upsample(channels, False, dims)
self.x_upd = Upsample(channels, False, dims)
elif down:
self.h_upd = Downsample(channels, False, dims)
self.x_upd = Downsample(channels, False, dims)
else:
self.h_upd = self.x_upd = nn.Identity()
self.emb_layers = nn.Sequential(
nn.SiLU(),
linear(
emb_channels,
2 * self.out_channels if use_scale_shift_norm else self.out_channels,
),
)
self.out_layers = nn.Sequential(
normalization(self.out_channels),
nn.SiLU(),
nn.Dropout(p=dropout),
zero_module(
conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1)
),
)
if self.out_channels == channels:
self.skip_connection = nn.Identity()
elif use_conv:
self.skip_connection = conv_nd(
dims, channels, self.out_channels, 3, padding=1
)
else:
self.skip_connection = conv_nd(dims, channels, self.out_channels, 1)
def forward(self, x, emb):
"""
Apply the block to a Tensor, conditioned on a timestep embedding.
:param x: an [N x C x ...] Tensor of features.
:param emb: an [N x emb_channels] Tensor of timestep embeddings.
:return: an [N x C x ...] Tensor of outputs.
"""
return checkpoint(
self._forward, (x, emb), self.parameters(), self.use_checkpoint
)
def _forward(self, x, emb):
if self.updown:
in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1]
h = in_rest(x)
h = self.h_upd(h)
x = self.x_upd(x)
h = in_conv(h)
else:
h = self.in_layers(x)
emb_out = self.emb_layers(emb).type(h.dtype)
while len(emb_out.shape) < len(h.shape):
emb_out = emb_out[..., None]
if self.use_scale_shift_norm:
out_norm, out_rest = self.out_layers[0], self.out_layers[1:]
scale, shift = th.chunk(emb_out, 2, dim=1)
h = out_norm(h) * (1 + scale) + shift
h = out_rest(h)
else:
h = h + emb_out
h = self.out_layers(h)
return self.skip_connection(x) + h
class AttentionBlock(nn.Module):
"""
An attention block that allows spatial positions to attend to each other.
Originally ported from here, but adapted to the N-d case.
https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66.
"""
def __init__(
self,
channels,
num_heads=1,
num_head_channels=-1,
use_checkpoint=False,
use_new_attention_order=False,
):
super().__init__()
self.channels = channels
if num_head_channels == -1:
self.num_heads = num_heads
else:
assert (
channels % num_head_channels == 0
), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}"
self.num_heads = channels // num_head_channels
self.use_checkpoint = use_checkpoint
self.norm = normalization(channels)
self.qkv = conv_nd(1, channels, channels * 3, 1)
if use_new_attention_order:
# split qkv before split heads
self.attention = QKVAttention(self.num_heads)
else:
# split heads before split qkv
self.attention = QKVAttentionLegacy(self.num_heads)
self.proj_out = zero_module(conv_nd(1, channels, channels, 1))
def forward(self, x):
return checkpoint(self._forward, (x,), self.parameters(), True) # TODO: check checkpoint usage, is True # TODO: fix the .half call!!!
#return pt_checkpoint(self._forward, x) # pytorch
def _forward(self, x):
b, c, *spatial = x.shape
x = x.reshape(b, c, -1)
qkv = self.qkv(self.norm(x))
h = self.attention(qkv)
h = self.proj_out(h)
return (x + h).reshape(b, c, *spatial)
def count_flops_attn(model, _x, y):
"""
A counter for the `thop` package to count the operations in an
attention operation.
Meant to be used like:
macs, params = thop.profile(
model,
inputs=(inputs, timestamps),
custom_ops={QKVAttention: QKVAttention.count_flops},
)
"""
b, c, *spatial = y[0].shape
num_spatial = int(np.prod(spatial))
# We perform two matmuls with the same number of ops.
# The first computes the weight matrix, the second computes
# the combination of the value vectors.
matmul_ops = 2 * b * (num_spatial ** 2) * c
model.total_ops += th.DoubleTensor([matmul_ops])
class QKVAttentionLegacy(nn.Module):
"""
A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping
"""
def __init__(self, n_heads):
super().__init__()
self.n_heads = n_heads
def forward(self, qkv):
"""
Apply QKV attention.
:param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs.
:return: an [N x (H * C) x T] tensor after attention.
"""
bs, width, length = qkv.shape
assert width % (3 * self.n_heads) == 0
ch = width // (3 * self.n_heads)
q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1)
scale = 1 / math.sqrt(math.sqrt(ch))
weight = th.einsum(
"bct,bcs->bts", q * scale, k * scale
) # More stable with f16 than dividing afterwards
weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
a = th.einsum("bts,bcs->bct", weight, v)
return a.reshape(bs, -1, length)
@staticmethod
def count_flops(model, _x, y):
return count_flops_attn(model, _x, y)
class QKVAttention(nn.Module):
"""
A module which performs QKV attention and splits in a different order.
"""
def __init__(self, n_heads):
super().__init__()
self.n_heads = n_heads
def forward(self, qkv):
"""
Apply QKV attention.
:param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs.
:return: an [N x (H * C) x T] tensor after attention.
"""
bs, width, length = qkv.shape
assert width % (3 * self.n_heads) == 0
ch = width // (3 * self.n_heads)
q, k, v = qkv.chunk(3, dim=1)
scale = 1 / math.sqrt(math.sqrt(ch))
weight = th.einsum(
"bct,bcs->bts",
(q * scale).view(bs * self.n_heads, ch, length),
(k * scale).view(bs * self.n_heads, ch, length),
) # More stable with f16 than dividing afterwards
weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length))
return a.reshape(bs, -1, length)
@staticmethod
def count_flops(model, _x, y):
return count_flops_attn(model, _x, y)
class UNetModel(nn.Module):
"""
The full UNet model with attention and timestep embedding.
:param in_channels: channels in the input Tensor.
:param model_channels: base channel count for the model.
:param out_channels: channels in the output Tensor.
:param num_res_blocks: number of residual blocks per downsample.
:param attention_resolutions: a collection of downsample rates at which
attention will take place. May be a set, list, or tuple.
For example, if this contains 4, then at 4x downsampling, attention
will be used.
:param dropout: the dropout probability.
:param channel_mult: channel multiplier for each level of the UNet.
:param conv_resample: if True, use learned convolutions for upsampling and
downsampling.
:param dims: determines if the signal is 1D, 2D, or 3D.
:param num_classes: if specified (as an int), then this model will be
class-conditional with `num_classes` classes.
:param use_checkpoint: use gradient checkpointing to reduce memory usage.
:param num_heads: the number of attention heads in each attention layer.
:param num_heads_channels: if specified, ignore num_heads and instead use
a fixed channel width per attention head.
:param num_heads_upsample: works with num_heads to set a different number
of heads for upsampling. Deprecated.
:param use_scale_shift_norm: use a FiLM-like conditioning mechanism.
:param resblock_updown: use residual blocks for up/downsampling.
:param use_new_attention_order: use a different attention pattern for potentially
increased efficiency.
"""
def __init__(
self,
image_size,
in_channels,
model_channels,
out_channels,
num_res_blocks,
attention_resolutions,
dropout=0,
channel_mult=(1, 2, 4, 8),
conv_resample=True,
dims=2,
num_classes=None,
use_checkpoint=False,
use_fp16=False,
num_heads=-1,
num_head_channels=-1,
num_heads_upsample=-1,
use_scale_shift_norm=False,
resblock_updown=False,
use_new_attention_order=False,
use_spatial_transformer=False, # custom transformer support
transformer_depth=1, # custom transformer support
context_dim=None, # custom transformer support
n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model
legacy=True,
):
super().__init__()
if use_spatial_transformer:
assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...'
if context_dim is not None:
assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...'
from omegaconf.listconfig import ListConfig
if type(context_dim) == ListConfig:
context_dim = list(context_dim)
if num_heads_upsample == -1:
num_heads_upsample = num_heads
if num_heads == -1:
assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set'
if num_head_channels == -1:
assert num_heads != -1, 'Either num_heads or num_head_channels has to be set'
self.image_size = image_size
self.in_channels = in_channels
self.model_channels = model_channels
self.out_channels = out_channels
self.num_res_blocks = num_res_blocks
self.attention_resolutions = attention_resolutions
self.dropout = dropout
self.channel_mult = channel_mult
self.conv_resample = conv_resample
self.num_classes = num_classes
self.use_checkpoint = use_checkpoint
self.dtype = th.float16 if use_fp16 else th.float32
self.num_heads = num_heads
self.num_head_channels = num_head_channels
self.num_heads_upsample = num_heads_upsample
self.predict_codebook_ids = n_embed is not None
time_embed_dim = model_channels * 4
self.time_embed = nn.Sequential(
linear(model_channels, time_embed_dim),
nn.SiLU(),
linear(time_embed_dim, time_embed_dim),
)
if self.num_classes is not None:
self.label_emb = nn.Embedding(num_classes, time_embed_dim)
self.input_blocks = nn.ModuleList(
[
TimestepEmbedSequential(
conv_nd(dims, in_channels, model_channels, 3, padding=1)# conv2d for txt2img/audio
)
]
)
self._feature_size = model_channels
input_block_chans = [model_channels]
ch = model_channels
ds = 1
# downsample blocks
for level, mult in enumerate(channel_mult):
for _ in range(num_res_blocks):
layers = [
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=mult * model_channels,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
)
]
ch = mult * model_channels
if ds in attention_resolutions:
if num_head_channels == -1:
dim_head = ch // num_heads
else:
num_heads = ch // num_head_channels
dim_head = num_head_channels
if legacy:
#num_heads = 1
dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
layers.append(
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads,
num_head_channels=dim_head,
use_new_attention_order=use_new_attention_order,
) if not use_spatial_transformer else SpatialTransformer(# transformer_depth is 1
ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim
)
)
self.input_blocks.append(TimestepEmbedSequential(*layers))
self._feature_size += ch
input_block_chans.append(ch)
if level != len(channel_mult) - 1:
out_ch = ch
self.input_blocks.append(
TimestepEmbedSequential(
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=out_ch,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
down=True,
)
if resblock_updown
else Downsample(
ch, conv_resample, dims=dims, out_channels=out_ch
)
)
)
ch = out_ch
input_block_chans.append(ch)
ds *= 2
self._feature_size += ch
if num_head_channels == -1:
dim_head = ch // num_heads
else:
num_heads = ch // num_head_channels
dim_head = num_head_channels
if legacy:
#num_heads = 1
dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
self.middle_block = TimestepEmbedSequential(
ResBlock(
ch,
time_embed_dim,
dropout,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
),
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads,
num_head_channels=dim_head,
use_new_attention_order=use_new_attention_order,
) if not use_spatial_transformer else SpatialTransformer(
ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim
),
ResBlock(
ch,
time_embed_dim,
dropout,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
),
)
self._feature_size += ch
# upsample blocks
self.output_blocks = nn.ModuleList([])
for level, mult in list(enumerate(channel_mult))[::-1]:
for i in range(num_res_blocks + 1):
ich = input_block_chans.pop()
layers = [
ResBlock(
ch + ich,
time_embed_dim,
dropout,
out_channels=model_channels * mult,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
)
]
ch = model_channels * mult
if ds in attention_resolutions:
if num_head_channels == -1:
dim_head = ch // num_heads
else:
num_heads = ch // num_head_channels
dim_head = num_head_channels
if legacy:
#num_heads = 1
dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
layers.append(
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads_upsample,
num_head_channels=dim_head,
use_new_attention_order=use_new_attention_order,
) if not use_spatial_transformer else SpatialTransformer(
ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim
)
)
if level and i == num_res_blocks:
out_ch = ch
layers.append(
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=out_ch,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
up=True,
)
if resblock_updown
else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch)
)
ds //= 2
self.output_blocks.append(TimestepEmbedSequential(*layers))
self._feature_size += ch
self.out = nn.Sequential(
normalization(ch),
nn.SiLU(),
zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)),
)
if self.predict_codebook_ids:
self.id_predictor = nn.Sequential(
normalization(ch),
conv_nd(dims, model_channels, n_embed, 1),
#nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits
)
def convert_to_fp16(self):
"""
Convert the torso of the model to float16.
"""
self.input_blocks.apply(convert_module_to_f16)
self.middle_block.apply(convert_module_to_f16)
self.output_blocks.apply(convert_module_to_f16)
def convert_to_fp32(self):
"""
Convert the torso of the model to float32.
"""
self.input_blocks.apply(convert_module_to_f32)
self.middle_block.apply(convert_module_to_f32)
self.output_blocks.apply(convert_module_to_f32)
def forward(self, x, timesteps=None, context=None, y=None,**kwargs):
"""
Apply the model to an input batch.
:param x: an [N x C x ...] Tensor of inputs.
:param timesteps: a 1-D batch of timesteps,shape [N]
:param context: conditioning plugged in via crossattn. for txt2img shape is [N,77,context_dim]
:param y: an [N] Tensor of labels, if class-conditional.
:return: an [N x C x ...] Tensor of outputs.
"""
# print(f"in unet {x.shape}")
assert (y is not None) == (
self.num_classes is not None
), "must specify y if and only if the model is class-conditional"
hs = []
t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False)# shape [N,self.model_channels]
emb = self.time_embed(t_emb)# shape [N,context_dim]
if self.num_classes is not None:# only for class label
assert y.shape == (x.shape[0],)
emb = emb + self.label_emb(y)
h = x.type(self.dtype)# [N,C,10,106]
for module in self.input_blocks:
h = module(h, emb, context)# 0:[N,self.model_channels,10,106],1:[N,self.model_channels,10,106],2:[N,self.model_channels,10,106] 3:[N,self.model_channels,5,53] 4:[N,self.model_channels,5,53] 5:[N,self.model_channels*2,5,53]
hs.append(h)
h = self.middle_block(h, emb, context)# no shape change
for module in self.output_blocks:
h = th.cat([h, hs.pop()], dim=1)# 在这里c维度乘2或+self.model_channels,其余维度不变
h = module(h, emb, context)# 在这里c维度/2回到之前维度,h,w不变或*2
h = h.type(x.dtype)# 至此h维度和输入x维度回到相同状态
if self.predict_codebook_ids:
return self.id_predictor(h)
else:
return self.out(h)
class EncoderUNetModel(nn.Module):
"""
The half UNet model with attention and timestep embedding.
For usage, see UNet.
"""
def __init__(
self,
image_size,
in_channels,
model_channels,
out_channels,
num_res_blocks,
attention_resolutions,
dropout=0,
channel_mult=(1, 2, 4, 8),
conv_resample=True,
dims=2,
use_checkpoint=False,
use_fp16=False,
num_heads=1,
num_head_channels=-1,
num_heads_upsample=-1,
use_scale_shift_norm=False,
resblock_updown=False,
use_new_attention_order=False,
pool="adaptive",
*args,
**kwargs
):
super().__init__()
if num_heads_upsample == -1:
num_heads_upsample = num_heads
self.in_channels = in_channels
self.model_channels = model_channels
self.out_channels = out_channels
self.num_res_blocks = num_res_blocks
self.attention_resolutions = attention_resolutions
self.dropout = dropout
self.channel_mult = channel_mult
self.conv_resample = conv_resample
self.use_checkpoint = use_checkpoint
self.dtype = th.float16 if use_fp16 else th.float32
self.num_heads = num_heads
self.num_head_channels = num_head_channels
self.num_heads_upsample = num_heads_upsample
time_embed_dim = model_channels * 4
self.time_embed = nn.Sequential(
linear(model_channels, time_embed_dim),
nn.SiLU(),
linear(time_embed_dim, time_embed_dim),
)
self.input_blocks = nn.ModuleList(
[
TimestepEmbedSequential(
conv_nd(dims, in_channels, model_channels, 3, padding=1)
)
]
)
self._feature_size = model_channels
input_block_chans = [model_channels]
ch = model_channels
ds = 1
for level, mult in enumerate(channel_mult):
for _ in range(num_res_blocks):
layers = [
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=mult * model_channels,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
)
]
ch = mult * model_channels
if ds in attention_resolutions:
layers.append(
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads,
num_head_channels=num_head_channels,
use_new_attention_order=use_new_attention_order,
)
)
self.input_blocks.append(TimestepEmbedSequential(*layers))
self._feature_size += ch
input_block_chans.append(ch)
if level != len(channel_mult) - 1:
out_ch = ch
self.input_blocks.append(
TimestepEmbedSequential(
ResBlock(
ch,
time_embed_dim,
dropout,
out_channels=out_ch,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
down=True,
)
if resblock_updown
else Downsample(
ch, conv_resample, dims=dims, out_channels=out_ch
)
)
)
ch = out_ch
input_block_chans.append(ch)
ds *= 2
self._feature_size += ch
self.middle_block = TimestepEmbedSequential(
ResBlock(
ch,
time_embed_dim,
dropout,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
),
AttentionBlock(
ch,
use_checkpoint=use_checkpoint,
num_heads=num_heads,
num_head_channels=num_head_channels,
use_new_attention_order=use_new_attention_order,
),
ResBlock(
ch,
time_embed_dim,
dropout,
dims=dims,
use_checkpoint=use_checkpoint,
use_scale_shift_norm=use_scale_shift_norm,
),
)
self._feature_size += ch
self.pool = pool
if pool == "adaptive":
self.out = nn.Sequential(
normalization(ch),
nn.SiLU(),
nn.AdaptiveAvgPool2d((1, 1)),
zero_module(conv_nd(dims, ch, out_channels, 1)),
nn.Flatten(),
)
elif pool == "attention":
assert num_head_channels != -1
self.out = nn.Sequential(
normalization(ch),
nn.SiLU(),
AttentionPool2d(
(image_size // ds), ch, num_head_channels, out_channels
),
)
elif pool == "spatial":
self.out = nn.Sequential(
nn.Linear(self._feature_size, 2048),
nn.ReLU(),
nn.Linear(2048, self.out_channels),
)
elif pool == "spatial_v2":
self.out = nn.Sequential(
nn.Linear(self._feature_size, 2048),
normalization(2048),
nn.SiLU(),
nn.Linear(2048, self.out_channels),
)
else:
raise NotImplementedError(f"Unexpected {pool} pooling")
def convert_to_fp16(self):
"""
Convert the torso of the model to float16.
"""
self.input_blocks.apply(convert_module_to_f16)
self.middle_block.apply(convert_module_to_f16)
def convert_to_fp32(self):
"""
Convert the torso of the model to float32.
"""
self.input_blocks.apply(convert_module_to_f32)
self.middle_block.apply(convert_module_to_f32)
def forward(self, x, timesteps):
"""
Apply the model to an input batch.
:param x: an [N x C x ...] Tensor of inputs.
:param timesteps: a 1-D batch of timesteps.
:return: an [N x K] Tensor of outputs.
"""
emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))
results = []
h = x.type(self.dtype)
for module in self.input_blocks:
h = module(h, emb)
if self.pool.startswith("spatial"):
results.append(h.type(x.dtype).mean(dim=(2, 3)))
h = self.middle_block(h, emb)
if self.pool.startswith("spatial"):
results.append(h.type(x.dtype).mean(dim=(2, 3)))
h = th.cat(results, axis=-1)
return self.out(h)
else:
h = h.type(x.dtype)
return self.out(h)
| EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/diffusionmodules/openaimodel.py |
EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/distributions/__init__.py |
|
import torch
import numpy as np
class AbstractDistribution:
def sample(self):
raise NotImplementedError()
def mode(self):
raise NotImplementedError()
class DiracDistribution(AbstractDistribution):
def __init__(self, value):
self.value = value
def sample(self):
return self.value
def mode(self):
return self.value
class DiagonalGaussianDistribution(object):
def __init__(self, parameters, deterministic=False):
self.parameters = parameters
self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)
self.logvar = torch.clamp(self.logvar, -30.0, 20.0)
self.deterministic = deterministic
self.std = torch.exp(0.5 * self.logvar)
self.var = torch.exp(self.logvar)
if self.deterministic:
self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)
def sample(self):
x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)
return x
def kl(self, other=None):
if self.deterministic:
return torch.Tensor([0.])
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean, 2)
+ self.var - 1.0 - self.logvar,
dim=[1, 2, 3])
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean, 2) / other.var
+ self.var / other.var - 1.0 - self.logvar + other.logvar,
dim=[1, 2, 3])
def nll(self, sample, dims=[1,2,3]):
if self.deterministic:
return torch.Tensor([0.])
logtwopi = np.log(2.0 * np.pi)
return 0.5 * torch.sum(
logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,
dim=dims)
def mode(self):
return self.mean
def normal_kl(mean1, logvar1, mean2, logvar2):
"""
source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12
Compute the KL divergence between two gaussians.
Shapes are automatically broadcasted, so batches can be compared to
scalars, among other use cases.
"""
tensor = None
for obj in (mean1, logvar1, mean2, logvar2):
if isinstance(obj, torch.Tensor):
tensor = obj
break
assert tensor is not None, "at least one argument must be a Tensor"
# Force variances to be Tensors. Broadcasting helps convert scalars to
# Tensors, but it does not work for torch.exp().
logvar1, logvar2 = [
x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)
for x in (logvar1, logvar2)
]
return 0.5 * (
-1.0
+ logvar2
- logvar1
+ torch.exp(logvar1 - logvar2)
+ ((mean1 - mean2) ** 2) * torch.exp(-logvar2)
)
| EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/distributions/distributions.py |
import sys
import os
import librosa
import numpy as np
import torch
import audio_to_text.captioning.models
import audio_to_text.captioning.models.encoder
import audio_to_text.captioning.models.decoder
import audio_to_text.captioning.utils.train_util as train_util
def load_model(config, checkpoint):
ckpt = torch.load(checkpoint, "cpu")
encoder_cfg = config["model"]["encoder"]
encoder = train_util.init_obj(
audio_to_text.captioning.models.encoder,
encoder_cfg
)
if "pretrained" in encoder_cfg:
pretrained = encoder_cfg["pretrained"]
train_util.load_pretrained_model(encoder,
pretrained,
sys.stdout.write)
decoder_cfg = config["model"]["decoder"]
if "vocab_size" not in decoder_cfg["args"]:
decoder_cfg["args"]["vocab_size"] = len(ckpt["vocabulary"])
decoder = train_util.init_obj(
audio_to_text.captioning.models.decoder,
decoder_cfg
)
if "word_embedding" in decoder_cfg:
decoder.load_word_embedding(**decoder_cfg["word_embedding"])
if "pretrained" in decoder_cfg:
pretrained = decoder_cfg["pretrained"]
train_util.load_pretrained_model(decoder,
pretrained,
sys.stdout.write)
model = train_util.init_obj(audio_to_text.captioning.models, config["model"],
encoder=encoder, decoder=decoder)
train_util.load_pretrained_model(model, ckpt)
model.eval()
return {
"model": model,
"vocabulary": ckpt["vocabulary"]
}
def decode_caption(word_ids, vocabulary):
candidate = []
for word_id in word_ids:
word = vocabulary[word_id]
if word == "<end>":
break
elif word == "<start>":
continue
candidate.append(word)
candidate = " ".join(candidate)
return candidate
class AudioCapModel(object):
def __init__(self,weight_dir,device='cuda'):
config = os.path.join(weight_dir,'config.yaml')
self.config = train_util.parse_config_or_kwargs(config)
checkpoint = os.path.join(weight_dir,'swa.pth')
resumed = load_model(self.config, checkpoint)
model = resumed["model"]
self.vocabulary = resumed["vocabulary"]
self.model = model.to(device)
self.device = device
def caption(self,audio_list):
if isinstance(audio_list,np.ndarray):
audio_list = [audio_list]
elif isinstance(audio_list,str):
audio_list = [librosa.load(audio_list,sr=32000)[0]]
captions = []
for wav in audio_list:
inputwav = torch.as_tensor(wav).float().unsqueeze(0).to(self.device)
wav_len = torch.LongTensor([len(wav)])
input_dict = {
"mode": "inference",
"wav": inputwav,
"wav_len": wav_len,
"specaug": False,
"sample_method": "beam",
}
print(input_dict)
out_dict = self.model(input_dict)
caption_batch = [decode_caption(seq, self.vocabulary) for seq in \
out_dict["seq"].cpu().numpy()]
captions.extend(caption_batch)
return captions
def __call__(self, audio_list):
return self.caption(audio_list)
| EXA-1-master | exa/models/AudioGPT/audio_to_text/inference_waveform.py |
EXA-1-master | exa/models/AudioGPT/audio_to_text/__init__.py |
|
EXA-1-master | exa/models/AudioGPT/audio_to_text/captioning/__init__.py |
|
import math
import torch
class ExponentialDecayScheduler(torch.optim.lr_scheduler._LRScheduler):
def __init__(self, optimizer, total_iters, final_lrs,
warmup_iters=3000, last_epoch=-1, verbose=False):
self.total_iters = total_iters
self.final_lrs = final_lrs
if not isinstance(self.final_lrs, list) and not isinstance(
self.final_lrs, tuple):
self.final_lrs = [self.final_lrs] * len(optimizer.param_groups)
self.warmup_iters = warmup_iters
self.bases = [0.0,] * len(optimizer.param_groups)
super().__init__(optimizer, last_epoch, verbose)
for i, (base_lr, final_lr) in enumerate(zip(self.base_lrs, self.final_lrs)):
base = (final_lr / base_lr) ** (1 / (
self.total_iters - self.warmup_iters))
self.bases[i] = base
def _get_closed_form_lr(self):
warmup_coeff = 1.0
current_iter = self._step_count
if current_iter < self.warmup_iters:
warmup_coeff = current_iter / self.warmup_iters
current_lrs = []
# if not self.linear_warmup:
# for base_lr, final_lr, base in zip(self.base_lrs, self.final_lrs, self.bases):
# # current_lr = warmup_coeff * base_lr * math.exp(((current_iter - self.warmup_iters) / self.total_iters) * math.log(final_lr / base_lr))
# current_lr = warmup_coeff * base_lr * (base ** (current_iter - self.warmup_iters))
# current_lrs.append(current_lr)
# else:
for base_lr, final_lr, base in zip(self.base_lrs, self.final_lrs,
self.bases):
if current_iter <= self.warmup_iters:
current_lr = warmup_coeff * base_lr
else:
# current_lr = warmup_coeff * base_lr * math.exp(((current_iter - self.warmup_iters) / self.total_iters) * math.log(final_lr / base_lr))
current_lr = base_lr * (base ** (current_iter - self.warmup_iters))
current_lrs.append(current_lr)
return current_lrs
def get_lr(self):
return self._get_closed_form_lr()
class NoamScheduler(torch.optim.lr_scheduler._LRScheduler):
def __init__(self, optimizer, model_size=512, factor=1, warmup_iters=3000,
last_epoch=-1, verbose=False):
self.model_size = model_size
self.warmup_iters = warmup_iters
# self.factors = [group["lr"] / (self.model_size ** (-0.5) * self.warmup_iters ** (-0.5)) for group in optimizer.param_groups]
self.factor = factor
super().__init__(optimizer, last_epoch, verbose)
def _get_closed_form_lr(self):
current_iter = self._step_count
current_lrs = []
for _ in self.base_lrs:
current_lr = self.factor * \
(self.model_size ** (-0.5) * min(current_iter ** (-0.5),
current_iter * self.warmup_iters ** (-1.5)))
current_lrs.append(current_lr)
return current_lrs
def get_lr(self):
return self._get_closed_form_lr()
class CosineWithWarmup(torch.optim.lr_scheduler._LRScheduler):
def __init__(self, optimizer, total_iters, warmup_iters,
num_cycles=0.5, last_epoch=-1, verbose=False):
self.total_iters = total_iters
self.warmup_iters = warmup_iters
self.num_cycles = num_cycles
super().__init__(optimizer, last_epoch, verbose)
def lr_lambda(self, iteration):
if iteration < self.warmup_iters:
return float(iteration) / float(max(1, self.warmup_iters))
progress = float(iteration - self.warmup_iters) / float(max(1,
self.total_iters - self.warmup_iters))
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(
self.num_cycles) * 2.0 * progress)))
def _get_closed_form_lr(self):
current_iter = self._step_count
current_lrs = []
for base_lr in self.base_lrs:
current_lr = base_lr * self.lr_lambda(current_iter)
current_lrs.append(current_lr)
return current_lrs
def get_lr(self):
return self._get_closed_form_lr()
if __name__ == "__main__":
model = torch.nn.Linear(10, 5)
optimizer = torch.optim.Adam(model.parameters(), 5e-4)
epochs = 25
iters = 600
scheduler = CosineWithWarmup(optimizer, 600 * 25, 600 * 5,)
# scheduler = ExponentialDecayScheduler(optimizer, 600 * 25, 5e-7, 600 * 5)
criterion = torch.nn.MSELoss()
lrs = []
for epoch in range(1, epochs + 1):
for iteration in range(1, iters + 1):
optimizer.zero_grad()
x = torch.randn(4, 10)
y = torch.randn(4, 5)
loss = criterion(model(x), y)
loss.backward()
optimizer.step()
scheduler.step()
# print(f"lr: {scheduler.get_last_lr()}")
# lrs.append(scheduler.get_last_lr())
lrs.append(optimizer.param_groups[0]["lr"])
import matplotlib.pyplot as plt
plt.plot(list(range(1, len(lrs) + 1)), lrs, '-o', markersize=1)
# plt.legend(loc="best")
plt.xlabel("Iteration")
plt.ylabel("LR")
plt.savefig("lr_curve.png", dpi=100)
| EXA-1-master | exa/models/AudioGPT/audio_to_text/captioning/utils/lr_scheduler.py |
import json
from tqdm import tqdm
import logging
import pickle
from collections import Counter
import re
import fire
class Vocabulary(object):
"""Simple vocabulary wrapper."""
def __init__(self):
self.word2idx = {}
self.idx2word = {}
self.idx = 0
def add_word(self, word):
if not word in self.word2idx:
self.word2idx[word] = self.idx
self.idx2word[self.idx] = word
self.idx += 1
def __call__(self, word):
if not word in self.word2idx:
return self.word2idx["<unk>"]
return self.word2idx[word]
def __len__(self):
return len(self.word2idx)
def build_vocab(input_json: str,
output_json: str,
threshold: int,
keep_punctuation: bool,
character_level: bool = False,
zh: bool = True ):
"""Build vocabulary from csv file with a given threshold to drop all counts < threshold
Args:
input_json(string): Preprossessed json file. Structure like this:
{
'audios': [
{
'audio_id': 'xxx',
'captions': [
{
'caption': 'xxx',
'cap_id': 'xxx'
}
]
},
...
]
}
threshold (int): Threshold to drop all words with counts < threshold
keep_punctuation (bool): Includes or excludes punctuation.
Returns:
vocab (Vocab): Object with the processed vocabulary
"""
data = json.load(open(input_json, "r"))["audios"]
counter = Counter()
pretokenized = "tokens" in data[0]["captions"][0]
if zh:
from ltp import LTP
from zhon.hanzi import punctuation
if not pretokenized:
parser = LTP("base")
for audio_idx in tqdm(range(len(data)), leave=False, ascii=True):
for cap_idx in range(len(data[audio_idx]["captions"])):
if pretokenized:
tokens = data[audio_idx]["captions"][cap_idx]["tokens"].split()
else:
caption = data[audio_idx]["captions"][cap_idx]["caption"]
if character_level:
tokens = list(caption)
else:
tokens, _ = parser.seg([caption])
tokens = tokens[0]
# Remove all punctuations
if not keep_punctuation:
tokens = [token for token in tokens if token not in punctuation]
data[audio_idx]["captions"][cap_idx]["tokens"] = " ".join(tokens)
counter.update(tokens)
else:
if pretokenized:
for audio_idx in tqdm(range(len(data)), leave=False, ascii=True):
for cap_idx in range(len(data[audio_idx]["captions"])):
tokens = data[audio_idx]["captions"][cap_idx]["tokens"].split()
counter.update(tokens)
else:
from pycocoevalcap.tokenizer.ptbtokenizer import PTBTokenizer
captions = {}
for audio_idx in range(len(data)):
audio_id = data[audio_idx]["audio_id"]
captions[audio_id] = []
for cap_idx in range(len(data[audio_idx]["captions"])):
caption = data[audio_idx]["captions"][cap_idx]["caption"]
captions[audio_id].append({
"audio_id": audio_id,
"id": cap_idx,
"caption": caption
})
tokenizer = PTBTokenizer()
captions = tokenizer.tokenize(captions)
for audio_idx in tqdm(range(len(data)), leave=False, ascii=True):
audio_id = data[audio_idx]["audio_id"]
for cap_idx in range(len(data[audio_idx]["captions"])):
tokens = captions[audio_id][cap_idx]
data[audio_idx]["captions"][cap_idx]["tokens"] = tokens
counter.update(tokens.split(" "))
if not pretokenized:
if output_json is None:
output_json = input_json
json.dump({ "audios": data }, open(output_json, "w"), indent=4, ensure_ascii=not zh)
words = [word for word, cnt in counter.items() if cnt >= threshold]
# Create a vocab wrapper and add some special tokens.
vocab = Vocabulary()
vocab.add_word("<pad>")
vocab.add_word("<start>")
vocab.add_word("<end>")
vocab.add_word("<unk>")
# Add the words to the vocabulary.
for word in words:
vocab.add_word(word)
return vocab
def process(input_json: str,
output_file: str,
output_json: str = None,
threshold: int = 1,
keep_punctuation: bool = False,
character_level: bool = False,
zh: bool = True):
logfmt = "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s"
logging.basicConfig(level=logging.INFO, format=logfmt)
logging.info("Build Vocab")
vocabulary = build_vocab(
input_json=input_json, output_json=output_json, threshold=threshold,
keep_punctuation=keep_punctuation, character_level=character_level, zh=zh)
pickle.dump(vocabulary, open(output_file, "wb"))
logging.info("Total vocabulary size: {}".format(len(vocabulary)))
logging.info("Saved vocab to '{}'".format(output_file))
if __name__ == '__main__':
fire.Fire(process)
| EXA-1-master | exa/models/AudioGPT/audio_to_text/captioning/utils/build_vocab_ltp.py |
import json
from tqdm import tqdm
import logging
import pickle
from collections import Counter
import re
import fire
class Vocabulary(object):
"""Simple vocabulary wrapper."""
def __init__(self):
self.word2idx = {}
self.idx2word = {}
self.idx = 0
def add_word(self, word):
if not word in self.word2idx:
self.word2idx[word] = self.idx
self.idx2word[self.idx] = word
self.idx += 1
def __call__(self, word):
if not word in self.word2idx:
return self.word2idx["<unk>"]
return self.word2idx[word]
def __len__(self):
return len(self.word2idx)
def build_vocab(input_json: str,
output_json: str,
threshold: int,
keep_punctuation: bool,
host_address: str,
character_level: bool = False,
retokenize: bool = True,
zh: bool = True ):
"""Build vocabulary from csv file with a given threshold to drop all counts < threshold
Args:
input_json(string): Preprossessed json file. Structure like this:
{
'audios': [
{
'audio_id': 'xxx',
'captions': [
{
'caption': 'xxx',
'cap_id': 'xxx'
}
]
},
...
]
}
threshold (int): Threshold to drop all words with counts < threshold
keep_punctuation (bool): Includes or excludes punctuation.
Returns:
vocab (Vocab): Object with the processed vocabulary
"""
data = json.load(open(input_json, "r"))["audios"]
counter = Counter()
if retokenize:
pretokenized = False
else:
pretokenized = "tokens" in data[0]["captions"][0]
if zh:
from nltk.parse.corenlp import CoreNLPParser
from zhon.hanzi import punctuation
if not pretokenized:
parser = CoreNLPParser(host_address)
for audio_idx in tqdm(range(len(data)), leave=False, ascii=True):
for cap_idx in range(len(data[audio_idx]["captions"])):
if pretokenized:
tokens = data[audio_idx]["captions"][cap_idx]["tokens"].split()
else:
caption = data[audio_idx]["captions"][cap_idx]["caption"]
# Remove all punctuations
if not keep_punctuation:
caption = re.sub("[{}]".format(punctuation), "", caption)
if character_level:
tokens = list(caption)
else:
tokens = list(parser.tokenize(caption))
data[audio_idx]["captions"][cap_idx]["tokens"] = " ".join(tokens)
counter.update(tokens)
else:
if pretokenized:
for audio_idx in tqdm(range(len(data)), leave=False, ascii=True):
for cap_idx in range(len(data[audio_idx]["captions"])):
tokens = data[audio_idx]["captions"][cap_idx]["tokens"].split()
counter.update(tokens)
else:
import spacy
tokenizer = spacy.load("en_core_web_sm", disable=["parser", "ner"])
for audio_idx in tqdm(range(len(data)), leave=False, ascii=True):
captions = data[audio_idx]["captions"]
for cap_idx in range(len(captions)):
caption = captions[cap_idx]["caption"]
doc = tokenizer(caption)
tokens = " ".join([str(token).lower() for token in doc])
data[audio_idx]["captions"][cap_idx]["tokens"] = tokens
counter.update(tokens.split(" "))
if not pretokenized:
if output_json is None:
json.dump({ "audios": data }, open(input_json, "w"),
indent=4, ensure_ascii=not zh)
else:
json.dump({ "audios": data }, open(output_json, "w"),
indent=4, ensure_ascii=not zh)
words = [word for word, cnt in counter.items() if cnt >= threshold]
# Create a vocab wrapper and add some special tokens.
vocab = Vocabulary()
vocab.add_word("<pad>")
vocab.add_word("<start>")
vocab.add_word("<end>")
vocab.add_word("<unk>")
# Add the words to the vocabulary.
for word in words:
vocab.add_word(word)
return vocab
def process(input_json: str,
output_file: str,
output_json: str = None,
threshold: int = 1,
keep_punctuation: bool = False,
character_level: bool = False,
retokenize: bool = False,
host_address: str = "http://localhost:9000",
zh: bool = True):
logfmt = "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s"
logging.basicConfig(level=logging.INFO, format=logfmt)
logging.info("Build Vocab")
vocabulary = build_vocab(
input_json=input_json, output_json=output_json, threshold=threshold,
keep_punctuation=keep_punctuation, host_address=host_address,
character_level=character_level, retokenize=retokenize, zh=zh)
pickle.dump(vocabulary, open(output_file, "wb"))
logging.info("Total vocabulary size: {}".format(len(vocabulary)))
logging.info("Saved vocab to '{}'".format(output_file))
if __name__ == '__main__':
fire.Fire(process)
| EXA-1-master | exa/models/AudioGPT/audio_to_text/captioning/utils/build_vocab_spacy.py |
import json
from tqdm import tqdm
import logging
import pickle
from collections import Counter
import re
import fire
class Vocabulary(object):
"""Simple vocabulary wrapper."""
def __init__(self):
self.word2idx = {}
self.idx2word = {}
self.idx = 0
def add_word(self, word):
if not word in self.word2idx:
self.word2idx[word] = self.idx
self.idx2word[self.idx] = word
self.idx += 1
def __call__(self, word):
if not word in self.word2idx:
return self.word2idx["<unk>"]
return self.word2idx[word]
def __getitem__(self, word_id):
return self.idx2word[word_id]
def __len__(self):
return len(self.word2idx)
def build_vocab(input_json: str,
threshold: int,
keep_punctuation: bool,
host_address: str,
character_level: bool = False,
zh: bool = True ):
"""Build vocabulary from csv file with a given threshold to drop all counts < threshold
Args:
input_json(string): Preprossessed json file. Structure like this:
{
'audios': [
{
'audio_id': 'xxx',
'captions': [
{
'caption': 'xxx',
'cap_id': 'xxx'
}
]
},
...
]
}
threshold (int): Threshold to drop all words with counts < threshold
keep_punctuation (bool): Includes or excludes punctuation.
Returns:
vocab (Vocab): Object with the processed vocabulary
"""
data = json.load(open(input_json, "r"))["audios"]
counter = Counter()
pretokenized = "tokens" in data[0]["captions"][0]
if zh:
from nltk.parse.corenlp import CoreNLPParser
from zhon.hanzi import punctuation
if not pretokenized:
parser = CoreNLPParser(host_address)
for audio_idx in tqdm(range(len(data)), leave=False, ascii=True):
for cap_idx in range(len(data[audio_idx]["captions"])):
if pretokenized:
tokens = data[audio_idx]["captions"][cap_idx]["tokens"].split()
else:
caption = data[audio_idx]["captions"][cap_idx]["caption"]
# Remove all punctuations
if not keep_punctuation:
caption = re.sub("[{}]".format(punctuation), "", caption)
if character_level:
tokens = list(caption)
else:
tokens = list(parser.tokenize(caption))
data[audio_idx]["captions"][cap_idx]["tokens"] = " ".join(tokens)
counter.update(tokens)
else:
if pretokenized:
for audio_idx in tqdm(range(len(data)), leave=False, ascii=True):
for cap_idx in range(len(data[audio_idx]["captions"])):
tokens = data[audio_idx]["captions"][cap_idx]["tokens"].split()
counter.update(tokens)
else:
from pycocoevalcap.tokenizer.ptbtokenizer import PTBTokenizer
captions = {}
for audio_idx in range(len(data)):
audio_id = data[audio_idx]["audio_id"]
captions[audio_id] = []
for cap_idx in range(len(data[audio_idx]["captions"])):
caption = data[audio_idx]["captions"][cap_idx]["caption"]
captions[audio_id].append({
"audio_id": audio_id,
"id": cap_idx,
"caption": caption
})
tokenizer = PTBTokenizer()
captions = tokenizer.tokenize(captions)
for audio_idx in tqdm(range(len(data)), leave=False, ascii=True):
audio_id = data[audio_idx]["audio_id"]
for cap_idx in range(len(data[audio_idx]["captions"])):
tokens = captions[audio_id][cap_idx]
data[audio_idx]["captions"][cap_idx]["tokens"] = tokens
counter.update(tokens.split(" "))
if not pretokenized:
json.dump({ "audios": data }, open(input_json, "w"), indent=4, ensure_ascii=not zh)
words = [word for word, cnt in counter.items() if cnt >= threshold]
# Create a vocab wrapper and add some special tokens.
vocab = Vocabulary()
vocab.add_word("<pad>")
vocab.add_word("<start>")
vocab.add_word("<end>")
vocab.add_word("<unk>")
# Add the words to the vocabulary.
for word in words:
vocab.add_word(word)
return vocab
def process(input_json: str,
output_file: str,
threshold: int = 1,
keep_punctuation: bool = False,
character_level: bool = False,
host_address: str = "http://localhost:9000",
zh: bool = False):
logfmt = "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s"
logging.basicConfig(level=logging.INFO, format=logfmt)
logging.info("Build Vocab")
vocabulary = build_vocab(
input_json=input_json, threshold=threshold, keep_punctuation=keep_punctuation,
host_address=host_address, character_level=character_level, zh=zh)
pickle.dump(vocabulary, open(output_file, "wb"))
logging.info("Total vocabulary size: {}".format(len(vocabulary)))
logging.info("Saved vocab to '{}'".format(output_file))
if __name__ == '__main__':
fire.Fire(process)
| EXA-1-master | exa/models/AudioGPT/audio_to_text/captioning/utils/build_vocab.py |
import argparse
import torch
def main(checkpoint):
state_dict = torch.load(checkpoint, map_location="cpu")
if "optimizer" in state_dict:
del state_dict["optimizer"]
if "lr_scheduler" in state_dict:
del state_dict["lr_scheduler"]
torch.save(state_dict, checkpoint)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("checkpoint", type=str)
args = parser.parse_args()
main(args.checkpoint)
| EXA-1-master | exa/models/AudioGPT/audio_to_text/captioning/utils/remove_optimizer.py |
EXA-1-master | exa/models/AudioGPT/audio_to_text/captioning/utils/__init__.py |
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python3
import os
import sys
import logging
from typing import Callable, Dict, Union
import yaml
import torch
from torch.optim.swa_utils import AveragedModel as torch_average_model
import numpy as np
import pandas as pd
from pprint import pformat
def load_dict_from_csv(csv, cols):
df = pd.read_csv(csv, sep="\t")
output = dict(zip(df[cols[0]], df[cols[1]]))
return output
def init_logger(filename, level="INFO"):
formatter = logging.Formatter(
"[ %(levelname)s : %(asctime)s ] - %(message)s")
logger = logging.getLogger(__name__ + "." + filename)
logger.setLevel(getattr(logging, level))
# Log results to std
# stdhandler = logging.StreamHandler(sys.stdout)
# stdhandler.setFormatter(formatter)
# Dump log to file
filehandler = logging.FileHandler(filename)
filehandler.setFormatter(formatter)
logger.addHandler(filehandler)
# logger.addHandler(stdhandler)
return logger
def init_obj(module, config, **kwargs):# 'captioning.models.encoder'
obj_args = config["args"].copy()
obj_args.update(kwargs)
return getattr(module, config["type"])(**obj_args)
def pprint_dict(in_dict, outputfun=sys.stdout.write, formatter='yaml'):
"""pprint_dict
:param outputfun: function to use, defaults to sys.stdout
:param in_dict: dict to print
"""
if formatter == 'yaml':
format_fun = yaml.dump
elif formatter == 'pretty':
format_fun = pformat
for line in format_fun(in_dict).split('\n'):
outputfun(line)
def merge_a_into_b(a, b):
# merge dict a into dict b. values in a will overwrite b.
for k, v in a.items():
if isinstance(v, dict) and k in b:
assert isinstance(
b[k], dict
), "Cannot inherit key '{}' from base!".format(k)
merge_a_into_b(v, b[k])
else:
b[k] = v
def load_config(config_file):
with open(config_file, "r") as reader:
config = yaml.load(reader, Loader=yaml.FullLoader)
if "inherit_from" in config:
base_config_file = config["inherit_from"]
base_config_file = os.path.join(
os.path.dirname(config_file), base_config_file
)
assert not os.path.samefile(config_file, base_config_file), \
"inherit from itself"
base_config = load_config(base_config_file)
del config["inherit_from"]
merge_a_into_b(config, base_config)
return base_config
return config
def parse_config_or_kwargs(config_file, **kwargs):
yaml_config = load_config(config_file)
# passed kwargs will override yaml config
args = dict(yaml_config, **kwargs)
return args
def store_yaml(config, config_file):
with open(config_file, "w") as con_writer:
yaml.dump(config, con_writer, indent=4, default_flow_style=False)
class MetricImprover:
def __init__(self, mode):
assert mode in ("min", "max")
self.mode = mode
# min: lower -> better; max: higher -> better
self.best_value = np.inf if mode == "min" else -np.inf
def compare(self, x, best_x):
return x < best_x if self.mode == "min" else x > best_x
def __call__(self, x):
if self.compare(x, self.best_value):
self.best_value = x
return True
return False
def state_dict(self):
return self.__dict__
def load_state_dict(self, state_dict):
self.__dict__.update(state_dict)
def fix_batchnorm(model: torch.nn.Module):
def inner(module):
class_name = module.__class__.__name__
if class_name.find("BatchNorm") != -1:
module.eval()
model.apply(inner)
def load_pretrained_model(model: torch.nn.Module,
pretrained: Union[str, Dict],
output_fn: Callable = sys.stdout.write):
if not isinstance(pretrained, dict) and not os.path.exists(pretrained):
output_fn(f"pretrained {pretrained} not exist!")
return
if hasattr(model, "load_pretrained"):
model.load_pretrained(pretrained)
return
if isinstance(pretrained, dict):
state_dict = pretrained
else:
state_dict = torch.load(pretrained, map_location="cpu")
if "model" in state_dict:
state_dict = state_dict["model"]
model_dict = model.state_dict()
pretrained_dict = {
k: v for k, v in state_dict.items() if (k in model_dict) and (
model_dict[k].shape == v.shape)
}
output_fn(f"Loading pretrained keys {pretrained_dict.keys()}")
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict, strict=True)
class AveragedModel(torch_average_model):
def update_parameters(self, model):
for p_swa, p_model in zip(self.parameters(), model.parameters()):
device = p_swa.device
p_model_ = p_model.detach().to(device)
if self.n_averaged == 0:
p_swa.detach().copy_(p_model_)
else:
p_swa.detach().copy_(self.avg_fn(p_swa.detach(), p_model_,
self.n_averaged.to(device)))
for b_swa, b_model in zip(list(self.buffers())[1:], model.buffers()):
device = b_swa.device
b_model_ = b_model.detach().to(device)
if self.n_averaged == 0:
b_swa.detach().copy_(b_model_)
else:
b_swa.detach().copy_(self.avg_fn(b_swa.detach(), b_model_,
self.n_averaged.to(device)))
self.n_averaged += 1
| EXA-1-master | exa/models/AudioGPT/audio_to_text/captioning/utils/train_util.py |
import json
from tqdm import tqdm
import re
import fire
def tokenize_caption(input_json: str,
keep_punctuation: bool = False,
host_address: str = None,
character_level: bool = False,
zh: bool = True,
output_json: str = None):
"""Build vocabulary from csv file with a given threshold to drop all counts < threshold
Args:
input_json(string): Preprossessed json file. Structure like this:
{
'audios': [
{
'audio_id': 'xxx',
'captions': [
{
'caption': 'xxx',
'cap_id': 'xxx'
}
]
},
...
]
}
threshold (int): Threshold to drop all words with counts < threshold
keep_punctuation (bool): Includes or excludes punctuation.
Returns:
vocab (Vocab): Object with the processed vocabulary
"""
data = json.load(open(input_json, "r"))["audios"]
if zh:
from nltk.parse.corenlp import CoreNLPParser
from zhon.hanzi import punctuation
parser = CoreNLPParser(host_address)
for audio_idx in tqdm(range(len(data)), leave=False, ascii=True):
for cap_idx in range(len(data[audio_idx]["captions"])):
caption = data[audio_idx]["captions"][cap_idx]["caption"]
# Remove all punctuations
if not keep_punctuation:
caption = re.sub("[{}]".format(punctuation), "", caption)
if character_level:
tokens = list(caption)
else:
tokens = list(parser.tokenize(caption))
data[audio_idx]["captions"][cap_idx]["tokens"] = " ".join(tokens)
else:
from pycocoevalcap.tokenizer.ptbtokenizer import PTBTokenizer
captions = {}
for audio_idx in range(len(data)):
audio_id = data[audio_idx]["audio_id"]
captions[audio_id] = []
for cap_idx in range(len(data[audio_idx]["captions"])):
caption = data[audio_idx]["captions"][cap_idx]["caption"]
captions[audio_id].append({
"audio_id": audio_id,
"id": cap_idx,
"caption": caption
})
tokenizer = PTBTokenizer()
captions = tokenizer.tokenize(captions)
for audio_idx in tqdm(range(len(data)), leave=False, ascii=True):
audio_id = data[audio_idx]["audio_id"]
for cap_idx in range(len(data[audio_idx]["captions"])):
tokens = captions[audio_id][cap_idx]
data[audio_idx]["captions"][cap_idx]["tokens"] = tokens
if output_json:
json.dump(
{ "audios": data }, open(output_json, "w"),
indent=4, ensure_ascii=not zh)
else:
json.dump(
{ "audios": data }, open(input_json, "w"),
indent=4, ensure_ascii=not zh)
if __name__ == "__main__":
fire.Fire(tokenize_caption)
| EXA-1-master | exa/models/AudioGPT/audio_to_text/captioning/utils/tokenize_caption.py |
import json
import random
import argparse
import numpy as np
from tqdm import tqdm
from h5py import File
import sklearn.metrics
random.seed(1)
parser = argparse.ArgumentParser()
parser.add_argument("train_feature", type=str)
parser.add_argument("train_corpus", type=str)
parser.add_argument("pred_feature", type=str)
parser.add_argument("output_json", type=str)
args = parser.parse_args()
train_embs = []
train_idx_to_audioid = []
with File(args.train_feature, "r") as store:
for audio_id, embedding in tqdm(store.items(), ascii=True):
train_embs.append(embedding[()])
train_idx_to_audioid.append(audio_id)
train_annotation = json.load(open(args.train_corpus, "r"))["audios"]
train_audioid_to_tokens = {}
for item in train_annotation:
audio_id = item["audio_id"]
train_audioid_to_tokens[audio_id] = [cap_item["tokens"] for cap_item in item["captions"]]
train_embs = np.stack(train_embs)
pred_data = []
pred_embs = []
pred_idx_to_audioids = []
with File(args.pred_feature, "r") as store:
for audio_id, embedding in tqdm(store.items(), ascii=True):
pred_embs.append(embedding[()])
pred_idx_to_audioids.append(audio_id)
pred_embs = np.stack(pred_embs)
similarity = sklearn.metrics.pairwise.cosine_similarity(pred_embs, train_embs)
for idx, audio_id in enumerate(pred_idx_to_audioids):
train_idx = similarity[idx].argmax()
pred_data.append({
"filename": audio_id,
"tokens": random.choice(train_audioid_to_tokens[train_idx_to_audioid[train_idx]])
})
json.dump({"predictions": pred_data}, open(args.output_json, "w"), ensure_ascii=False, indent=4)
| EXA-1-master | exa/models/AudioGPT/audio_to_text/captioning/utils/predict_nn.py |
from pathlib import Path
import argparse
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument("--input", help="input filename", type=str, nargs="+")
parser.add_argument("--output", help="output result file", default=None)
args = parser.parse_args()
scores = {}
for path in args.input:
with open(path, "r") as reader:
for line in reader.readlines():
metric, score = line.strip().split(": ")
score = float(score)
if metric not in scores:
scores[metric] = []
scores[metric].append(score)
if len(scores) == 0:
print("No experiment directory found, wrong path?")
exit(1)
with open(args.output, "w") as writer:
print("Average results: ", file=writer)
for metric, score in scores.items():
score = np.array(score)
mean = np.mean(score)
std = np.std(score)
print(f"{metric}: {mean:.3f} (±{std:.3f})", file=writer)
print("", file=writer)
print("Best results: ", file=writer)
for metric, score in scores.items():
score = np.max(score)
print(f"{metric}: {score:.3f}", file=writer)
| EXA-1-master | exa/models/AudioGPT/audio_to_text/captioning/utils/report_results.py |
import copy
import json
import numpy as np
import fire
def evaluate_annotation(key2refs, scorer):
if scorer.method() == "Bleu":
scores = np.array([ 0.0 for n in range(4) ])
else:
scores = 0
num_cap_per_audio = len(next(iter(key2refs.values())))
for i in range(num_cap_per_audio):
if i > 0:
for key in key2refs:
key2refs[key].insert(0, res[key][0])
res = { key: [refs.pop(),] for key, refs in key2refs.items() }
score, _ = scorer.compute_score(key2refs, res)
if scorer.method() == "Bleu":
scores += np.array(score)
else:
scores += score
score = scores / num_cap_per_audio
return score
def evaluate_prediction(key2pred, key2refs, scorer):
if scorer.method() == "Bleu":
scores = np.array([ 0.0 for n in range(4) ])
else:
scores = 0
num_cap_per_audio = len(next(iter(key2refs.values())))
for i in range(num_cap_per_audio):
key2refs_i = {}
for key, refs in key2refs.items():
key2refs_i[key] = refs[:i] + refs[i+1:]
score, _ = scorer.compute_score(key2refs_i, key2pred)
if scorer.method() == "Bleu":
scores += np.array(score)
else:
scores += score
score = scores / num_cap_per_audio
return score
class Evaluator(object):
def eval_annotation(self, annotation, output):
captions = json.load(open(annotation, "r"))["audios"]
key2refs = {}
for audio_idx in range(len(captions)):
audio_id = captions[audio_idx]["audio_id"]
key2refs[audio_id] = []
for caption in captions[audio_idx]["captions"]:
key2refs[audio_id].append(caption["caption"])
from fense.fense import Fense
scores = {}
scorer = Fense()
scores[scorer.method()] = evaluate_annotation(copy.deepcopy(key2refs), scorer)
refs4eval = {}
for key, refs in key2refs.items():
refs4eval[key] = []
for idx, ref in enumerate(refs):
refs4eval[key].append({
"audio_id": key,
"id": idx,
"caption": ref
})
from pycocoevalcap.tokenizer.ptbtokenizer import PTBTokenizer
tokenizer = PTBTokenizer()
key2refs = tokenizer.tokenize(refs4eval)
from pycocoevalcap.bleu.bleu import Bleu
from pycocoevalcap.cider.cider import Cider
from pycocoevalcap.rouge.rouge import Rouge
from pycocoevalcap.meteor.meteor import Meteor
from pycocoevalcap.spice.spice import Spice
scorers = [Bleu(), Rouge(), Cider(), Meteor(), Spice()]
for scorer in scorers:
scores[scorer.method()] = evaluate_annotation(copy.deepcopy(key2refs), scorer)
spider = 0
with open(output, "w") as f:
for name, score in scores.items():
if name == "Bleu":
for n in range(4):
f.write("Bleu-{}: {:6.3f}\n".format(n + 1, score[n]))
else:
f.write("{}: {:6.3f}\n".format(name, score))
if name in ["CIDEr", "SPICE"]:
spider += score
f.write("SPIDEr: {:6.3f}\n".format(spider / 2))
def eval_prediction(self, prediction, annotation, output):
ref_captions = json.load(open(annotation, "r"))["audios"]
key2refs = {}
for audio_idx in range(len(ref_captions)):
audio_id = ref_captions[audio_idx]["audio_id"]
key2refs[audio_id] = []
for caption in ref_captions[audio_idx]["captions"]:
key2refs[audio_id].append(caption["caption"])
pred_captions = json.load(open(prediction, "r"))["predictions"]
key2pred = {}
for audio_idx in range(len(pred_captions)):
item = pred_captions[audio_idx]
audio_id = item["filename"]
key2pred[audio_id] = [item["tokens"]]
from fense.fense import Fense
scores = {}
scorer = Fense()
scores[scorer.method()] = evaluate_prediction(key2pred, key2refs, scorer)
refs4eval = {}
for key, refs in key2refs.items():
refs4eval[key] = []
for idx, ref in enumerate(refs):
refs4eval[key].append({
"audio_id": key,
"id": idx,
"caption": ref
})
preds4eval = {}
for key, preds in key2pred.items():
preds4eval[key] = []
for idx, pred in enumerate(preds):
preds4eval[key].append({
"audio_id": key,
"id": idx,
"caption": pred
})
from pycocoevalcap.tokenizer.ptbtokenizer import PTBTokenizer
tokenizer = PTBTokenizer()
key2refs = tokenizer.tokenize(refs4eval)
key2pred = tokenizer.tokenize(preds4eval)
from pycocoevalcap.bleu.bleu import Bleu
from pycocoevalcap.cider.cider import Cider
from pycocoevalcap.rouge.rouge import Rouge
from pycocoevalcap.meteor.meteor import Meteor
from pycocoevalcap.spice.spice import Spice
scorers = [Bleu(), Rouge(), Cider(), Meteor(), Spice()]
for scorer in scorers:
scores[scorer.method()] = evaluate_prediction(key2pred, key2refs, scorer)
spider = 0
with open(output, "w") as f:
for name, score in scores.items():
if name == "Bleu":
for n in range(4):
f.write("Bleu-{}: {:6.3f}\n".format(n + 1, score[n]))
else:
f.write("{}: {:6.3f}\n".format(name, score))
if name in ["CIDEr", "SPICE"]:
spider += score
f.write("SPIDEr: {:6.3f}\n".format(spider / 2))
if __name__ == "__main__":
fire.Fire(Evaluator)
| EXA-1-master | exa/models/AudioGPT/audio_to_text/captioning/utils/eval_round_robin.py |
import os
import sys
import copy
import pickle
import numpy as np
import pandas as pd
import fire
sys.path.append(os.getcwd())
def coco_score(refs, pred, scorer):
if scorer.method() == "Bleu":
scores = np.array([ 0.0 for n in range(4) ])
else:
scores = 0
num_cap_per_audio = len(refs[list(refs.keys())[0]])
for i in range(num_cap_per_audio):
if i > 0:
for key in refs:
refs[key].insert(0, res[key][0])
res = {key: [refs[key].pop(),] for key in refs}
score, _ = scorer.compute_score(refs, pred)
if scorer.method() == "Bleu":
scores += np.array(score)
else:
scores += score
score = scores / num_cap_per_audio
for key in refs:
refs[key].insert(0, res[key][0])
score_allref, _ = scorer.compute_score(refs, pred)
diff = score_allref - score
return diff
def embedding_score(refs, pred, scorer):
num_cap_per_audio = len(refs[list(refs.keys())[0]])
scores = 0
for i in range(num_cap_per_audio):
res = {key: [refs[key][i],] for key in refs.keys() if len(refs[key]) == num_cap_per_audio}
refs_i = {key: np.concatenate([refs[key][:i], refs[key][i+1:]]) for key in refs.keys() if len(refs[key]) == num_cap_per_audio}
score, _ = scorer.compute_score(refs_i, pred)
scores += score
score = scores / num_cap_per_audio
score_allref, _ = scorer.compute_score(refs, pred)
diff = score_allref - score
return diff
def main(output_file, eval_caption_file, eval_embedding_file, output, zh=False):
output_df = pd.read_json(output_file)
output_df["key"] = output_df["filename"].apply(lambda x: os.path.splitext(os.path.basename(x))[0])
pred = output_df.groupby("key")["tokens"].apply(list).to_dict()
label_df = pd.read_json(eval_caption_file)
if zh:
refs = label_df.groupby("key")["tokens"].apply(list).to_dict()
else:
refs = label_df.groupby("key")["caption"].apply(list).to_dict()
from pycocoevalcap.bleu.bleu import Bleu
from pycocoevalcap.cider.cider import Cider
from pycocoevalcap.rouge.rouge import Rouge
scorer = Bleu(zh=zh)
bleu_scores = coco_score(copy.deepcopy(refs), pred, scorer)
scorer = Cider(zh=zh)
cider_score = coco_score(copy.deepcopy(refs), pred, scorer)
scorer = Rouge(zh=zh)
rouge_score = coco_score(copy.deepcopy(refs), pred, scorer)
if not zh:
from pycocoevalcap.meteor.meteor import Meteor
scorer = Meteor()
meteor_score = coco_score(copy.deepcopy(refs), pred, scorer)
from pycocoevalcap.spice.spice import Spice
scorer = Spice()
spice_score = coco_score(copy.deepcopy(refs), pred, scorer)
# from audiocaptioneval.sentbert.sentencebert import SentenceBert
# scorer = SentenceBert(zh=zh)
# with open(eval_embedding_file, "rb") as f:
# ref_embeddings = pickle.load(f)
# sent_bert = embedding_score(ref_embeddings, pred, scorer)
with open(output, "w") as f:
f.write("Diff:\n")
for n in range(4):
f.write("BLEU-{}: {:6.3f}\n".format(n+1, bleu_scores[n]))
f.write("CIDEr: {:6.3f}\n".format(cider_score))
f.write("ROUGE: {:6.3f}\n".format(rouge_score))
if not zh:
f.write("Meteor: {:6.3f}\n".format(meteor_score))
f.write("SPICE: {:6.3f}\n".format(spice_score))
# f.write("SentenceBert: {:6.3f}\n".format(sent_bert))
if __name__ == "__main__":
fire.Fire(main)
| EXA-1-master | exa/models/AudioGPT/audio_to_text/captioning/utils/model_eval_diff.py |
# coding=utf-8
#!/usr/bin/env python3
import numpy as np
import pandas as pd
import torch
import gensim
from gensim.models import Word2Vec
from tqdm import tqdm
import fire
import sys
import os
sys.path.append(os.getcwd())
from utils.build_vocab import Vocabulary
def create_embedding(vocab_file: str,
embed_size: int,
output: str,
caption_file: str = None,
pretrained_weights_path: str = None,
**word2vec_kwargs):
vocabulary = torch.load(vocab_file, map_location="cpu")
if pretrained_weights_path:
model = gensim.models.KeyedVectors.load_word2vec_format(
fname=pretrained_weights_path,
binary=True,
)
if model.vector_size != embed_size:
assert embed_size < model.vector_size, f"only reduce dimension, cannot add dimesion {model.vector_size} to {embed_size}"
from sklearn.decomposition import PCA
pca = PCA(n_components=embed_size)
model.vectors = pca.fit_transform(model.vectors)
else:
caption_df = pd.read_json(caption_file)
caption_df["tokens"] = caption_df["tokens"].apply(lambda x: ["<start>"] + [token for token in x] + ["<end>"])
sentences = list(caption_df["tokens"].values)
epochs = word2vec_kwargs.get("epochs", 10)
if "epochs" in word2vec_kwargs:
del word2vec_kwargs["epochs"]
model = Word2Vec(size=embed_size, min_count=1, **word2vec_kwargs)
model.build_vocab(sentences=sentences)
model.train(sentences=sentences, total_examples=len(sentences), epochs=epochs)
word_embeddings = np.random.randn(len(vocabulary), embed_size)
if isinstance(model, gensim.models.word2vec.Word2Vec):
model = model.wv
with tqdm(total=len(vocabulary), ascii=True) as pbar:
for word, idx in vocabulary.word2idx.items():
try:
word_embeddings[idx] = model.get_vector(word)
except KeyError:
print(f"word {word} not found in word2vec model, it is random initialized!")
pbar.update()
np.save(output, word_embeddings)
print("Finish writing word2vec embeddings to " + output)
if __name__ == "__main__":
fire.Fire(create_embedding)
| EXA-1-master | exa/models/AudioGPT/audio_to_text/captioning/utils/word2vec/create_word_embedding.py |
# coding=utf-8
#!/usr/bin/env python3
import numpy as np
import pandas as pd
import torch
from gensim.models import FastText
from tqdm import tqdm
import fire
import sys
import os
sys.path.append(os.getcwd())
from utils.build_vocab import Vocabulary
def create_embedding(caption_file: str,
vocab_file: str,
embed_size: int,
output: str,
**fasttext_kwargs):
caption_df = pd.read_json(caption_file)
caption_df["tokens"] = caption_df["tokens"].apply(lambda x: ["<start>"] + [token for token in x] + ["<end>"])
sentences = list(caption_df["tokens"].values)
vocabulary = torch.load(vocab_file, map_location="cpu")
epochs = fasttext_kwargs.get("epochs", 10)
model = FastText(size=embed_size, min_count=1, **fasttext_kwargs)
model.build_vocab(sentences=sentences)
model.train(sentences=sentences, total_examples=len(sentences), epochs=epochs)
word_embeddings = np.zeros((len(vocabulary), embed_size))
with tqdm(total=len(vocabulary), ascii=True) as pbar:
for word, idx in vocabulary.word2idx.items():
if word == "<pad>" or word == "<unk>":
continue
word_embeddings[idx] = model.wv[word]
pbar.update()
np.save(output, word_embeddings)
print("Finish writing fasttext embeddings to " + output)
if __name__ == "__main__":
fire.Fire(create_embedding)
| EXA-1-master | exa/models/AudioGPT/audio_to_text/captioning/utils/fasttext/create_word_embedding.py |
import pickle
import fire
import numpy as np
import pandas as pd
from tqdm import tqdm
class EmbeddingExtractor(object):
def extract_sentbert(self, caption_file: str, output: str, dev: bool=True, zh: bool=False):
from sentence_transformers import SentenceTransformer
lang2model = {
"zh": "distiluse-base-multilingual-cased",
"en": "bert-base-nli-mean-tokens"
}
lang = "zh" if zh else "en"
model = SentenceTransformer(lang2model[lang])
self.extract(caption_file, model, output, dev)
def extract_originbert(self, caption_file: str, output: str, dev: bool=True, ip="localhost"):
from bert_serving.client import BertClient
client = BertClient(ip)
self.extract(caption_file, client, output, dev)
def extract(self, caption_file: str, model, output, dev: bool):
caption_df = pd.read_json(caption_file, dtype={"key": str})
embeddings = {}
if dev:
with tqdm(total=caption_df.shape[0], ascii=True) as pbar:
for idx, row in caption_df.iterrows():
caption = row["caption"]
key = row["key"]
cap_idx = row["caption_index"]
embedding = model.encode([caption])
embedding = np.array(embedding).reshape(-1)
embeddings[f"{key}_{cap_idx}"] = embedding
pbar.update()
else:
dump = {}
with tqdm(total=caption_df.shape[0], ascii=True) as pbar:
for idx, row in caption_df.iterrows():
key = row["key"]
caption = row["caption"]
value = np.array(model.encode([caption])).reshape(-1)
if key not in embeddings.keys():
embeddings[key] = [value]
else:
embeddings[key].append(value)
pbar.update()
for key in embeddings:
dump[key] = np.stack(embeddings[key])
embeddings = dump
with open(output, "wb") as f:
pickle.dump(embeddings, f)
def extract_sbert(self,
input_json: str,
output: str):
from sentence_transformers import SentenceTransformer
import json
import torch
from h5py import File
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = SentenceTransformer("paraphrase-MiniLM-L6-v2")
model = model.to(device)
model.eval()
data = json.load(open(input_json))["audios"]
with torch.no_grad(), tqdm(total=len(data), ascii=True) as pbar, File(output, "w") as store:
for sample in data:
audio_id = sample["audio_id"]
for cap in sample["captions"]:
cap_id = cap["cap_id"]
store[f"{audio_id}_{cap_id}"] = model.encode(cap["caption"])
pbar.update()
if __name__ == "__main__":
fire.Fire(EmbeddingExtractor)
| EXA-1-master | exa/models/AudioGPT/audio_to_text/captioning/utils/bert/create_sent_embedding.py |
# -*- coding: utf-8 -*-
import sys
import os
from bert_serving.client import BertClient
import numpy as np
from tqdm import tqdm
import fire
import torch
sys.path.append(os.getcwd())
from utils.build_vocab import Vocabulary
def main(vocab_file: str, output: str, server_hostname: str):
client = BertClient(ip=server_hostname)
vocabulary = torch.load(vocab_file)
vocab_size = len(vocabulary)
fake_embedding = client.encode(["test"]).reshape(-1)
embed_size = fake_embedding.shape[0]
print("Encoding words into embeddings with size: ", embed_size)
embeddings = np.empty((vocab_size, embed_size))
for i in tqdm(range(len(embeddings)), ascii=True):
embeddings[i] = client.encode([vocabulary.idx2word[i]])
np.save(output, embeddings)
if __name__ == '__main__':
fire.Fire(main)
| EXA-1-master | exa/models/AudioGPT/audio_to_text/captioning/utils/bert/create_word_embedding.py |
# -*- coding: utf-8 -*-
import math
from functools import partial
import numpy as np
import torch
import torch.nn as nn
from .utils import generate_length_mask, init, PositionalEncoding
class BaseDecoder(nn.Module):
"""
Take word/audio embeddings and output the next word probs
Base decoder, cannot be called directly
All decoders should inherit from this class
"""
def __init__(self, emb_dim, vocab_size, fc_emb_dim,
attn_emb_dim, dropout=0.2):
super().__init__()
self.emb_dim = emb_dim
self.vocab_size = vocab_size
self.fc_emb_dim = fc_emb_dim
self.attn_emb_dim = attn_emb_dim
self.word_embedding = nn.Embedding(vocab_size, emb_dim)
self.in_dropout = nn.Dropout(dropout)
def forward(self, x):
raise NotImplementedError
def load_word_embedding(self, weight, freeze=True):
embedding = np.load(weight)
assert embedding.shape[0] == self.vocab_size, "vocabulary size mismatch"
assert embedding.shape[1] == self.emb_dim, "embed size mismatch"
# embeddings = torch.as_tensor(embeddings).float()
# self.word_embeddings.weight = nn.Parameter(embeddings)
# for para in self.word_embeddings.parameters():
# para.requires_grad = tune
self.word_embedding = nn.Embedding.from_pretrained(embedding,
freeze=freeze)
class RnnDecoder(BaseDecoder):
def __init__(self, emb_dim, vocab_size, fc_emb_dim, attn_emb_dim,
dropout, d_model, **kwargs):
super().__init__(emb_dim, vocab_size, fc_emb_dim, attn_emb_dim,
dropout,)
self.d_model = d_model
self.num_layers = kwargs.get('num_layers', 1)
self.bidirectional = kwargs.get('bidirectional', False)
self.rnn_type = kwargs.get('rnn_type', "GRU")
self.classifier = nn.Linear(
self.d_model * (self.bidirectional + 1), vocab_size)
def forward(self, x):
raise NotImplementedError
def init_hidden(self, bs, device):
num_dire = self.bidirectional + 1
n_layer = self.num_layers
hid_dim = self.d_model
if self.rnn_type == "LSTM":
return (torch.zeros(num_dire * n_layer, bs, hid_dim).to(device),
torch.zeros(num_dire * n_layer, bs, hid_dim).to(device))
else:
return torch.zeros(num_dire * n_layer, bs, hid_dim).to(device)
class RnnFcDecoder(RnnDecoder):
def __init__(self, emb_dim, vocab_size, fc_emb_dim, attn_emb_dim, dropout, d_model, **kwargs):
super().__init__(emb_dim, vocab_size, fc_emb_dim, attn_emb_dim, dropout, d_model, **kwargs)
self.model = getattr(nn, self.rnn_type)(
input_size=self.emb_dim * 2,
hidden_size=self.d_model,
batch_first=True,
num_layers=self.num_layers,
bidirectional=self.bidirectional)
self.fc_proj = nn.Linear(self.fc_emb_dim, self.emb_dim)
self.apply(init)
def forward(self, input_dict):
word = input_dict["word"]
state = input_dict.get("state", None)
fc_emb = input_dict["fc_emb"]
word = word.to(fc_emb.device)
embed = self.in_dropout(self.word_embedding(word))
p_fc_emb = self.fc_proj(fc_emb)
# embed: [N, T, embed_size]
embed = torch.cat((embed, p_fc_emb), dim=-1)
out, state = self.model(embed, state)
# out: [N, T, hs], states: [num_layers * num_dire, N, hs]
logits = self.classifier(out)
output = {
"state": state,
"embeds": out,
"logits": logits
}
return output
class Seq2SeqAttention(nn.Module):
def __init__(self, hs_enc, hs_dec, attn_size):
"""
Args:
hs_enc: encoder hidden size
hs_dec: decoder hidden size
attn_size: attention vector size
"""
super(Seq2SeqAttention, self).__init__()
self.h2attn = nn.Linear(hs_enc + hs_dec, attn_size)
self.v = nn.Parameter(torch.randn(attn_size))
self.apply(init)
def forward(self, h_dec, h_enc, src_lens):
"""
Args:
h_dec: decoder hidden (query), [N, hs_dec]
h_enc: encoder memory (key/value), [N, src_max_len, hs_enc]
src_lens: source (encoder memory) lengths, [N, ]
"""
N = h_enc.size(0)
src_max_len = h_enc.size(1)
h_dec = h_dec.unsqueeze(1).repeat(1, src_max_len, 1) # [N, src_max_len, hs_dec]
attn_input = torch.cat((h_dec, h_enc), dim=-1)
attn_out = torch.tanh(self.h2attn(attn_input)) # [N, src_max_len, attn_size]
v = self.v.repeat(N, 1).unsqueeze(1) # [N, 1, attn_size]
score = torch.bmm(v, attn_out.transpose(1, 2)).squeeze(1) # [N, src_max_len]
idxs = torch.arange(src_max_len).repeat(N).view(N, src_max_len)
mask = (idxs < src_lens.view(-1, 1)).to(h_dec.device)
score = score.masked_fill(mask == 0, -1e10)
weights = torch.softmax(score, dim=-1) # [N, src_max_len]
ctx = torch.bmm(weights.unsqueeze(1), h_enc).squeeze(1) # [N, hs_enc]
return ctx, weights
class AttentionProj(nn.Module):
def __init__(self, hs_enc, hs_dec, embed_dim, attn_size):
self.q_proj = nn.Linear(hs_dec, embed_dim)
self.kv_proj = nn.Linear(hs_enc, embed_dim)
self.h2attn = nn.Linear(embed_dim * 2, attn_size)
self.v = nn.Parameter(torch.randn(attn_size))
self.apply(init)
def init(self, m):
if isinstance(m, nn.Linear):
nn.init.kaiming_uniform_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def forward(self, h_dec, h_enc, src_lens):
"""
Args:
h_dec: decoder hidden (query), [N, hs_dec]
h_enc: encoder memory (key/value), [N, src_max_len, hs_enc]
src_lens: source (encoder memory) lengths, [N, ]
"""
h_enc = self.kv_proj(h_enc) # [N, src_max_len, embed_dim]
h_dec = self.q_proj(h_dec) # [N, embed_dim]
N = h_enc.size(0)
src_max_len = h_enc.size(1)
h_dec = h_dec.unsqueeze(1).repeat(1, src_max_len, 1) # [N, src_max_len, hs_dec]
attn_input = torch.cat((h_dec, h_enc), dim=-1)
attn_out = torch.tanh(self.h2attn(attn_input)) # [N, src_max_len, attn_size]
v = self.v.repeat(N, 1).unsqueeze(1) # [N, 1, attn_size]
score = torch.bmm(v, attn_out.transpose(1, 2)).squeeze(1) # [N, src_max_len]
idxs = torch.arange(src_max_len).repeat(N).view(N, src_max_len)
mask = (idxs < src_lens.view(-1, 1)).to(h_dec.device)
score = score.masked_fill(mask == 0, -1e10)
weights = torch.softmax(score, dim=-1) # [N, src_max_len]
ctx = torch.bmm(weights.unsqueeze(1), h_enc).squeeze(1) # [N, hs_enc]
return ctx, weights
class BahAttnDecoder(RnnDecoder):
def __init__(self, emb_dim, vocab_size, fc_emb_dim, attn_emb_dim,
dropout, d_model, **kwargs):
"""
concatenate fc, attn, word to feed to the rnn
"""
super().__init__(emb_dim, vocab_size, fc_emb_dim, attn_emb_dim,
dropout, d_model, **kwargs)
attn_size = kwargs.get("attn_size", self.d_model)
self.model = getattr(nn, self.rnn_type)(
input_size=self.emb_dim * 3,
hidden_size=self.d_model,
batch_first=True,
num_layers=self.num_layers,
bidirectional=self.bidirectional)
self.attn = Seq2SeqAttention(self.attn_emb_dim,
self.d_model * (self.bidirectional + 1) * \
self.num_layers,
attn_size)
self.fc_proj = nn.Linear(self.fc_emb_dim, self.emb_dim)
self.ctx_proj = nn.Linear(self.attn_emb_dim, self.emb_dim)
self.apply(init)
def forward(self, input_dict):
word = input_dict["word"]
state = input_dict.get("state", None) # [n_layer * n_dire, bs, d_model]
fc_emb = input_dict["fc_emb"]
attn_emb = input_dict["attn_emb"]
attn_emb_len = input_dict["attn_emb_len"]
word = word.to(fc_emb.device)
embed = self.in_dropout(self.word_embedding(word))
# embed: [N, 1, embed_size]
if state is None:
state = self.init_hidden(word.size(0), fc_emb.device)
if self.rnn_type == "LSTM":
query = state[0].transpose(0, 1).flatten(1)
else:
query = state.transpose(0, 1).flatten(1)
c, attn_weight = self.attn(query, attn_emb, attn_emb_len)
p_fc_emb = self.fc_proj(fc_emb)
p_ctx = self.ctx_proj(c)
rnn_input = torch.cat((embed, p_ctx.unsqueeze(1), p_fc_emb.unsqueeze(1)),
dim=-1)
out, state = self.model(rnn_input, state)
output = {
"state": state,
"embed": out,
"logit": self.classifier(out),
"attn_weight": attn_weight
}
return output
class BahAttnDecoder2(RnnDecoder):
def __init__(self, emb_dim, vocab_size, fc_emb_dim, attn_emb_dim,
dropout, d_model, **kwargs):
"""
add fc, attn, word together to feed to the rnn
"""
super().__init__(emb_dim, vocab_size, fc_emb_dim, attn_emb_dim,
dropout, d_model, **kwargs)
attn_size = kwargs.get("attn_size", self.d_model)
self.model = getattr(nn, self.rnn_type)(
input_size=self.emb_dim,
hidden_size=self.d_model,
batch_first=True,
num_layers=self.num_layers,
bidirectional=self.bidirectional)
self.attn = Seq2SeqAttention(self.emb_dim,
self.d_model * (self.bidirectional + 1) * \
self.num_layers,
attn_size)
self.fc_proj = nn.Linear(self.fc_emb_dim, self.emb_dim)
self.attn_proj = nn.Linear(self.attn_emb_dim, self.emb_dim)
self.apply(partial(init, method="xavier"))
def forward(self, input_dict):
word = input_dict["word"]
state = input_dict.get("state", None) # [n_layer * n_dire, bs, d_model]
fc_emb = input_dict["fc_emb"]
attn_emb = input_dict["attn_emb"]
attn_emb_len = input_dict["attn_emb_len"]
word = word.to(fc_emb.device)
embed = self.in_dropout(self.word_embedding(word))
p_attn_emb = self.attn_proj(attn_emb)
# embed: [N, 1, embed_size]
if state is None:
state = self.init_hidden(word.size(0), fc_emb.device)
if self.rnn_type == "LSTM":
query = state[0].transpose(0, 1).flatten(1)
else:
query = state.transpose(0, 1).flatten(1)
c, attn_weight = self.attn(query, p_attn_emb, attn_emb_len)
p_fc_emb = self.fc_proj(fc_emb)
rnn_input = embed + c.unsqueeze(1) + p_fc_emb.unsqueeze(1)
out, state = self.model(rnn_input, state)
output = {
"state": state,
"embed": out,
"logit": self.classifier(out),
"attn_weight": attn_weight
}
return output
class ConditionalBahAttnDecoder(RnnDecoder):
def __init__(self, emb_dim, vocab_size, fc_emb_dim, attn_emb_dim,
dropout, d_model, **kwargs):
"""
concatenate fc, attn, word to feed to the rnn
"""
super().__init__(emb_dim, vocab_size, fc_emb_dim, attn_emb_dim,
dropout, d_model, **kwargs)
attn_size = kwargs.get("attn_size", self.d_model)
self.model = getattr(nn, self.rnn_type)(
input_size=self.emb_dim * 3,
hidden_size=self.d_model,
batch_first=True,
num_layers=self.num_layers,
bidirectional=self.bidirectional)
self.attn = Seq2SeqAttention(self.attn_emb_dim,
self.d_model * (self.bidirectional + 1) * \
self.num_layers,
attn_size)
self.ctx_proj = nn.Linear(self.attn_emb_dim, self.emb_dim)
self.condition_embedding = nn.Embedding(2, emb_dim)
self.apply(init)
def forward(self, input_dict):
word = input_dict["word"]
state = input_dict.get("state", None) # [n_layer * n_dire, bs, d_model]
fc_emb = input_dict["fc_emb"]
attn_emb = input_dict["attn_emb"]
attn_emb_len = input_dict["attn_emb_len"]
condition = input_dict["condition"]
word = word.to(fc_emb.device)
embed = self.in_dropout(self.word_embedding(word))
condition = torch.as_tensor([[1 - c, c] for c in condition]).to(fc_emb.device)
condition_emb = torch.matmul(condition, self.condition_embedding.weight)
# condition_embs: [N, emb_dim]
# embed: [N, 1, embed_size]
if state is None:
state = self.init_hidden(word.size(0), fc_emb.device)
if self.rnn_type == "LSTM":
query = state[0].transpose(0, 1).flatten(1)
else:
query = state.transpose(0, 1).flatten(1)
c, attn_weight = self.attn(query, attn_emb, attn_emb_len)
p_ctx = self.ctx_proj(c)
rnn_input = torch.cat((embed, p_ctx.unsqueeze(1), condition_emb.unsqueeze(1)),
dim=-1)
out, state = self.model(rnn_input, state)
output = {
"state": state,
"embed": out,
"logit": self.classifier(out),
"attn_weight": attn_weight
}
return output
class StructBahAttnDecoder(RnnDecoder):
def __init__(self, emb_dim, vocab_size, fc_emb_dim, struct_vocab_size,
attn_emb_dim, dropout, d_model, **kwargs):
"""
concatenate fc, attn, word to feed to the rnn
"""
super().__init__(emb_dim, vocab_size, fc_emb_dim, attn_emb_dim,
dropout, d_model, **kwargs)
attn_size = kwargs.get("attn_size", self.d_model)
self.model = getattr(nn, self.rnn_type)(
input_size=self.emb_dim * 3,
hidden_size=self.d_model,
batch_first=True,
num_layers=self.num_layers,
bidirectional=self.bidirectional)
self.attn = Seq2SeqAttention(self.attn_emb_dim,
self.d_model * (self.bidirectional + 1) * \
self.num_layers,
attn_size)
self.ctx_proj = nn.Linear(self.attn_emb_dim, self.emb_dim)
self.struct_embedding = nn.Embedding(struct_vocab_size, emb_dim)
self.apply(init)
def forward(self, input_dict):
word = input_dict["word"]
state = input_dict.get("state", None) # [n_layer * n_dire, bs, d_model]
fc_emb = input_dict["fc_emb"]
attn_emb = input_dict["attn_emb"]
attn_emb_len = input_dict["attn_emb_len"]
structure = input_dict["structure"]
word = word.to(fc_emb.device)
embed = self.in_dropout(self.word_embedding(word))
struct_emb = self.struct_embedding(structure)
# struct_embs: [N, emb_dim]
# embed: [N, 1, embed_size]
if state is None:
state = self.init_hidden(word.size(0), fc_emb.device)
if self.rnn_type == "LSTM":
query = state[0].transpose(0, 1).flatten(1)
else:
query = state.transpose(0, 1).flatten(1)
c, attn_weight = self.attn(query, attn_emb, attn_emb_len)
p_ctx = self.ctx_proj(c)
rnn_input = torch.cat((embed, p_ctx.unsqueeze(1), struct_emb.unsqueeze(1)), dim=-1)
out, state = self.model(rnn_input, state)
output = {
"state": state,
"embed": out,
"logit": self.classifier(out),
"attn_weight": attn_weight
}
return output
class StyleBahAttnDecoder(RnnDecoder):
def __init__(self, emb_dim, vocab_size, fc_emb_dim, attn_emb_dim,
dropout, d_model, **kwargs):
"""
concatenate fc, attn, word to feed to the rnn
"""
super().__init__(emb_dim, vocab_size, fc_emb_dim, attn_emb_dim,
dropout, d_model, **kwargs)
attn_size = kwargs.get("attn_size", self.d_model)
self.model = getattr(nn, self.rnn_type)(
input_size=self.emb_dim * 3,
hidden_size=self.d_model,
batch_first=True,
num_layers=self.num_layers,
bidirectional=self.bidirectional)
self.attn = Seq2SeqAttention(self.attn_emb_dim,
self.d_model * (self.bidirectional + 1) * \
self.num_layers,
attn_size)
self.ctx_proj = nn.Linear(self.attn_emb_dim, self.emb_dim)
self.apply(init)
def forward(self, input_dict):
word = input_dict["word"]
state = input_dict.get("state", None) # [n_layer * n_dire, bs, d_model]
fc_emb = input_dict["fc_emb"]
attn_emb = input_dict["attn_emb"]
attn_emb_len = input_dict["attn_emb_len"]
style = input_dict["style"]
word = word.to(fc_emb.device)
embed = self.in_dropout(self.word_embedding(word))
# embed: [N, 1, embed_size]
if state is None:
state = self.init_hidden(word.size(0), fc_emb.device)
if self.rnn_type == "LSTM":
query = state[0].transpose(0, 1).flatten(1)
else:
query = state.transpose(0, 1).flatten(1)
c, attn_weight = self.attn(query, attn_emb, attn_emb_len)
p_ctx = self.ctx_proj(c)
rnn_input = torch.cat((embed, p_ctx.unsqueeze(1), style.unsqueeze(1)),
dim=-1)
out, state = self.model(rnn_input, state)
output = {
"state": state,
"embed": out,
"logit": self.classifier(out),
"attn_weight": attn_weight
}
return output
class BahAttnDecoder3(RnnDecoder):
def __init__(self, emb_dim, vocab_size, fc_emb_dim, attn_emb_dim,
dropout, d_model, **kwargs):
"""
concatenate fc, attn, word to feed to the rnn
"""
super().__init__(emb_dim, vocab_size, fc_emb_dim, attn_emb_dim,
dropout, d_model, **kwargs)
attn_size = kwargs.get("attn_size", self.d_model)
self.model = getattr(nn, self.rnn_type)(
input_size=self.emb_dim + attn_emb_dim,
hidden_size=self.d_model,
batch_first=True,
num_layers=self.num_layers,
bidirectional=self.bidirectional)
self.attn = Seq2SeqAttention(self.attn_emb_dim,
self.d_model * (self.bidirectional + 1) * \
self.num_layers,
attn_size)
self.ctx_proj = lambda x: x
self.apply(init)
def forward(self, input_dict):
word = input_dict["word"]
state = input_dict.get("state", None) # [n_layer * n_dire, bs, d_model]
fc_emb = input_dict["fc_emb"]
attn_emb = input_dict["attn_emb"]
attn_emb_len = input_dict["attn_emb_len"]
if word.size(-1) == self.fc_emb_dim: # fc_emb
embed = word.unsqueeze(1)
elif word.size(-1) == 1: # word
word = word.to(fc_emb.device)
embed = self.in_dropout(self.word_embedding(word))
else:
raise Exception(f"problem with word input size {word.size()}")
# embed: [N, 1, embed_size]
if state is None:
state = self.init_hidden(word.size(0), fc_emb.device)
if self.rnn_type == "LSTM":
query = state[0].transpose(0, 1).flatten(1)
else:
query = state.transpose(0, 1).flatten(1)
c, attn_weight = self.attn(query, attn_emb, attn_emb_len)
p_ctx = self.ctx_proj(c)
rnn_input = torch.cat((embed, p_ctx.unsqueeze(1)), dim=-1)
out, state = self.model(rnn_input, state)
output = {
"state": state,
"embed": out,
"logit": self.classifier(out),
"attn_weight": attn_weight
}
return output
class SpecificityBahAttnDecoder(RnnDecoder):
def __init__(self, emb_dim, vocab_size, fc_emb_dim, attn_emb_dim,
dropout, d_model, **kwargs):
"""
concatenate fc, attn, word to feed to the rnn
"""
super().__init__(emb_dim, vocab_size, fc_emb_dim, attn_emb_dim,
dropout, d_model, **kwargs)
attn_size = kwargs.get("attn_size", self.d_model)
self.model = getattr(nn, self.rnn_type)(
input_size=self.emb_dim + attn_emb_dim + 1,
hidden_size=self.d_model,
batch_first=True,
num_layers=self.num_layers,
bidirectional=self.bidirectional)
self.attn = Seq2SeqAttention(self.attn_emb_dim,
self.d_model * (self.bidirectional + 1) * \
self.num_layers,
attn_size)
self.ctx_proj = lambda x: x
self.apply(init)
def forward(self, input_dict):
word = input_dict["word"]
state = input_dict.get("state", None) # [n_layer * n_dire, bs, d_model]
fc_emb = input_dict["fc_emb"]
attn_emb = input_dict["attn_emb"]
attn_emb_len = input_dict["attn_emb_len"]
condition = input_dict["condition"] # [N,]
word = word.to(fc_emb.device)
embed = self.in_dropout(self.word_embedding(word))
# embed: [N, 1, embed_size]
if state is None:
state = self.init_hidden(word.size(0), fc_emb.device)
if self.rnn_type == "LSTM":
query = state[0].transpose(0, 1).flatten(1)
else:
query = state.transpose(0, 1).flatten(1)
c, attn_weight = self.attn(query, attn_emb, attn_emb_len)
p_ctx = self.ctx_proj(c)
rnn_input = torch.cat(
(embed, p_ctx.unsqueeze(1), condition.reshape(-1, 1, 1)),
dim=-1)
out, state = self.model(rnn_input, state)
output = {
"state": state,
"embed": out,
"logit": self.classifier(out),
"attn_weight": attn_weight
}
return output
class TransformerDecoder(BaseDecoder):
def __init__(self, emb_dim, vocab_size, fc_emb_dim, attn_emb_dim, dropout, **kwargs):
super().__init__(emb_dim, vocab_size, fc_emb_dim, attn_emb_dim,
dropout=dropout,)
self.d_model = emb_dim
self.nhead = kwargs.get("nhead", self.d_model // 64)
self.nlayers = kwargs.get("nlayers", 2)
self.dim_feedforward = kwargs.get("dim_feedforward", self.d_model * 4)
self.pos_encoder = PositionalEncoding(self.d_model, dropout)
layer = nn.TransformerDecoderLayer(d_model=self.d_model,
nhead=self.nhead,
dim_feedforward=self.dim_feedforward,
dropout=dropout)
self.model = nn.TransformerDecoder(layer, self.nlayers)
self.classifier = nn.Linear(self.d_model, vocab_size)
self.attn_proj = nn.Sequential(
nn.Linear(self.attn_emb_dim, self.d_model),
nn.ReLU(),
nn.Dropout(dropout),
nn.LayerNorm(self.d_model)
)
# self.attn_proj = lambda x: x
self.init_params()
def init_params(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def generate_square_subsequent_mask(self, max_length):
mask = (torch.triu(torch.ones(max_length, max_length)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask
def forward(self, input_dict):
word = input_dict["word"]
attn_emb = input_dict["attn_emb"]
attn_emb_len = input_dict["attn_emb_len"]
cap_padding_mask = input_dict["cap_padding_mask"]
p_attn_emb = self.attn_proj(attn_emb)
p_attn_emb = p_attn_emb.transpose(0, 1) # [T_src, N, emb_dim]
word = word.to(attn_emb.device)
embed = self.in_dropout(self.word_embedding(word)) * math.sqrt(self.emb_dim) # [N, T, emb_dim]
embed = embed.transpose(0, 1) # [T, N, emb_dim]
embed = self.pos_encoder(embed)
tgt_mask = self.generate_square_subsequent_mask(embed.size(0)).to(attn_emb.device)
memory_key_padding_mask = ~generate_length_mask(attn_emb_len, attn_emb.size(1)).to(attn_emb.device)
output = self.model(embed, p_attn_emb, tgt_mask=tgt_mask,
tgt_key_padding_mask=cap_padding_mask,
memory_key_padding_mask=memory_key_padding_mask)
output = output.transpose(0, 1)
output = {
"embed": output,
"logit": self.classifier(output),
}
return output
class EventTransformerDecoder(TransformerDecoder):
def forward(self, input_dict):
word = input_dict["word"] # index of word embeddings
attn_emb = input_dict["attn_emb"]
attn_emb_len = input_dict["attn_emb_len"]
cap_padding_mask = input_dict["cap_padding_mask"]
event_emb = input_dict["event"] # [N, emb_dim]
p_attn_emb = self.attn_proj(attn_emb)
p_attn_emb = p_attn_emb.transpose(0, 1) # [T_src, N, emb_dim]
word = word.to(attn_emb.device)
embed = self.in_dropout(self.word_embedding(word)) * math.sqrt(self.emb_dim) # [N, T, emb_dim]
embed = embed.transpose(0, 1) # [T, N, emb_dim]
embed += event_emb
embed = self.pos_encoder(embed)
tgt_mask = self.generate_square_subsequent_mask(embed.size(0)).to(attn_emb.device)
memory_key_padding_mask = ~generate_length_mask(attn_emb_len, attn_emb.size(1)).to(attn_emb.device)
output = self.model(embed, p_attn_emb, tgt_mask=tgt_mask,
tgt_key_padding_mask=cap_padding_mask,
memory_key_padding_mask=memory_key_padding_mask)
output = output.transpose(0, 1)
output = {
"embed": output,
"logit": self.classifier(output),
}
return output
class KeywordProbTransformerDecoder(TransformerDecoder):
def __init__(self, emb_dim, vocab_size, fc_emb_dim, attn_emb_dim,
dropout, keyword_classes_num, **kwargs):
super().__init__(emb_dim, vocab_size, fc_emb_dim, attn_emb_dim,
dropout, **kwargs)
self.keyword_proj = nn.Linear(keyword_classes_num, self.d_model)
self.word_keyword_norm = nn.LayerNorm(self.d_model)
def forward(self, input_dict):
word = input_dict["word"] # index of word embeddings
attn_emb = input_dict["attn_emb"]
attn_emb_len = input_dict["attn_emb_len"]
cap_padding_mask = input_dict["cap_padding_mask"]
keyword = input_dict["keyword"] # [N, keyword_classes_num]
p_attn_emb = self.attn_proj(attn_emb)
p_attn_emb = p_attn_emb.transpose(0, 1) # [T_src, N, emb_dim]
word = word.to(attn_emb.device)
embed = self.in_dropout(self.word_embedding(word)) * math.sqrt(self.emb_dim) # [N, T, emb_dim]
embed = embed.transpose(0, 1) # [T, N, emb_dim]
embed += self.keyword_proj(keyword)
embed = self.word_keyword_norm(embed)
embed = self.pos_encoder(embed)
tgt_mask = self.generate_square_subsequent_mask(embed.size(0)).to(attn_emb.device)
memory_key_padding_mask = ~generate_length_mask(attn_emb_len, attn_emb.size(1)).to(attn_emb.device)
output = self.model(embed, p_attn_emb, tgt_mask=tgt_mask,
tgt_key_padding_mask=cap_padding_mask,
memory_key_padding_mask=memory_key_padding_mask)
output = output.transpose(0, 1)
output = {
"embed": output,
"logit": self.classifier(output),
}
return output
| EXA-1-master | exa/models/AudioGPT/audio_to_text/captioning/models/decoder.py |
# -*- coding: utf-8 -*-
import random
import torch
import torch.nn as nn
from .base_model import CaptionModel
from .utils import repeat_tensor
import audio_to_text.captioning.models.decoder
class TransformerModel(CaptionModel):
def __init__(self, encoder: nn.Module, decoder: nn.Module, **kwargs):
if not hasattr(self, "compatible_decoders"):
self.compatible_decoders = (
audio_to_text.captioning.models.decoder.TransformerDecoder,
)
super().__init__(encoder, decoder, **kwargs)
def seq_forward(self, input_dict):
cap = input_dict["cap"]
cap_padding_mask = (cap == self.pad_idx).to(cap.device)
cap_padding_mask = cap_padding_mask[:, :-1]
output = self.decoder(
{
"word": cap[:, :-1],
"attn_emb": input_dict["attn_emb"],
"attn_emb_len": input_dict["attn_emb_len"],
"cap_padding_mask": cap_padding_mask
}
)
return output
def prepare_decoder_input(self, input_dict, output):
decoder_input = {
"attn_emb": input_dict["attn_emb"],
"attn_emb_len": input_dict["attn_emb_len"]
}
t = input_dict["t"]
###############
# determine input word
################
if input_dict["mode"] == "train" and random.random() < input_dict["ss_ratio"]: # training, scheduled sampling
word = input_dict["cap"][:, :t+1]
else:
start_word = torch.tensor([self.start_idx,] * input_dict["attn_emb"].size(0)).unsqueeze(1).long()
if t == 0:
word = start_word
else:
word = torch.cat((start_word, output["seq"][:, :t]), dim=-1)
# word: [N, T]
decoder_input["word"] = word
cap_padding_mask = (word == self.pad_idx).to(input_dict["attn_emb"].device)
decoder_input["cap_padding_mask"] = cap_padding_mask
return decoder_input
def prepare_beamsearch_decoder_input(self, input_dict, output_i):
decoder_input = {}
t = input_dict["t"]
i = input_dict["sample_idx"]
beam_size = input_dict["beam_size"]
###############
# prepare attn embeds
################
if t == 0:
attn_emb = repeat_tensor(input_dict["attn_emb"][i], beam_size)
attn_emb_len = repeat_tensor(input_dict["attn_emb_len"][i], beam_size)
output_i["attn_emb"] = attn_emb
output_i["attn_emb_len"] = attn_emb_len
decoder_input["attn_emb"] = output_i["attn_emb"]
decoder_input["attn_emb_len"] = output_i["attn_emb_len"]
###############
# determine input word
################
start_word = torch.tensor([self.start_idx,] * beam_size).unsqueeze(1).long()
if t == 0:
word = start_word
else:
word = torch.cat((start_word, output_i["seq"]), dim=-1)
decoder_input["word"] = word
cap_padding_mask = (word == self.pad_idx).to(input_dict["attn_emb"].device)
decoder_input["cap_padding_mask"] = cap_padding_mask
return decoder_input
class M2TransformerModel(CaptionModel):
def __init__(self, encoder: nn.Module, decoder: nn.Module, **kwargs):
if not hasattr(self, "compatible_decoders"):
self.compatible_decoders = (
captioning.models.decoder.M2TransformerDecoder,
)
super().__init__(encoder, decoder, **kwargs)
self.check_encoder_compatibility()
def check_encoder_compatibility(self):
assert isinstance(self.encoder, captioning.models.encoder.M2TransformerEncoder), \
f"only M2TransformerModel is compatible with {self.__class__.__name__}"
def seq_forward(self, input_dict):
cap = input_dict["cap"]
output = self.decoder(
{
"word": cap[:, :-1],
"attn_emb": input_dict["attn_emb"],
"attn_emb_mask": input_dict["attn_emb_mask"],
}
)
return output
def prepare_decoder_input(self, input_dict, output):
decoder_input = {
"attn_emb": input_dict["attn_emb"],
"attn_emb_mask": input_dict["attn_emb_mask"]
}
t = input_dict["t"]
###############
# determine input word
################
if input_dict["mode"] == "train" and random.random() < input_dict["ss_ratio"]: # training, scheduled sampling
word = input_dict["cap"][:, :t+1]
else:
start_word = torch.tensor([self.start_idx,] * input_dict["attn_emb"].size(0)).unsqueeze(1).long()
if t == 0:
word = start_word
else:
word = torch.cat((start_word, output["seq"][:, :t]), dim=-1)
# word: [N, T]
decoder_input["word"] = word
return decoder_input
def prepare_beamsearch_decoder_input(self, input_dict, output_i):
decoder_input = {}
t = input_dict["t"]
i = input_dict["sample_idx"]
beam_size = input_dict["beam_size"]
###############
# prepare attn embeds
################
if t == 0:
attn_emb = repeat_tensor(input_dict["attn_emb"][i], beam_size)
attn_emb_mask = repeat_tensor(input_dict["attn_emb_mask"][i], beam_size)
output_i["attn_emb"] = attn_emb
output_i["attn_emb_mask"] = attn_emb_mask
decoder_input["attn_emb"] = output_i["attn_emb"]
decoder_input["attn_emb_mask"] = output_i["attn_emb_mask"]
###############
# determine input word
################
start_word = torch.tensor([self.start_idx,] * beam_size).unsqueeze(1).long()
if t == 0:
word = start_word
else:
word = torch.cat((start_word, output_i["seq"]), dim=-1)
decoder_input["word"] = word
return decoder_input
class EventEncoder(nn.Module):
"""
Encode the Label information in AudioCaps and AudioSet
"""
def __init__(self, emb_dim, vocab_size=527):
super(EventEncoder, self).__init__()
self.label_embedding = nn.Parameter(
torch.randn((vocab_size, emb_dim)), requires_grad=True)
def forward(self, word_idxs):
indices = word_idxs / word_idxs.sum(dim=1, keepdim=True)
embeddings = indices @ self.label_embedding
return embeddings
class EventCondTransformerModel(TransformerModel):
def __init__(self, encoder: nn.Module, decoder: nn.Module, **kwargs):
if not hasattr(self, "compatible_decoders"):
self.compatible_decoders = (
captioning.models.decoder.EventTransformerDecoder,
)
super().__init__(encoder, decoder, **kwargs)
self.label_encoder = EventEncoder(decoder.emb_dim, 527)
self.train_forward_keys += ["events"]
self.inference_forward_keys += ["events"]
# def seq_forward(self, input_dict):
# cap = input_dict["cap"]
# cap_padding_mask = (cap == self.pad_idx).to(cap.device)
# cap_padding_mask = cap_padding_mask[:, :-1]
# output = self.decoder(
# {
# "word": cap[:, :-1],
# "attn_emb": input_dict["attn_emb"],
# "attn_emb_len": input_dict["attn_emb_len"],
# "cap_padding_mask": cap_padding_mask
# }
# )
# return output
def prepare_decoder_input(self, input_dict, output):
decoder_input = super().prepare_decoder_input(input_dict, output)
decoder_input["events"] = self.label_encoder(input_dict["events"])
return decoder_input
def prepare_beamsearch_decoder_input(self, input_dict, output_i):
decoder_input = super().prepare_beamsearch_decoder_input(input_dict, output_i)
t = input_dict["t"]
i = input_dict["sample_idx"]
beam_size = input_dict["beam_size"]
if t == 0:
output_i["events"] = repeat_tensor(self.label_encoder(input_dict["events"])[i], beam_size)
decoder_input["events"] = output_i["events"]
return decoder_input
class KeywordCondTransformerModel(TransformerModel):
def __init__(self, encoder: nn.Module, decoder: nn.Module, **kwargs):
if not hasattr(self, "compatible_decoders"):
self.compatible_decoders = (
captioning.models.decoder.KeywordProbTransformerDecoder,
)
super().__init__(encoder, decoder, **kwargs)
self.train_forward_keys += ["keyword"]
self.inference_forward_keys += ["keyword"]
def seq_forward(self, input_dict):
cap = input_dict["cap"]
cap_padding_mask = (cap == self.pad_idx).to(cap.device)
cap_padding_mask = cap_padding_mask[:, :-1]
keyword = input_dict["keyword"]
output = self.decoder(
{
"word": cap[:, :-1],
"attn_emb": input_dict["attn_emb"],
"attn_emb_len": input_dict["attn_emb_len"],
"keyword": keyword,
"cap_padding_mask": cap_padding_mask
}
)
return output
def prepare_decoder_input(self, input_dict, output):
decoder_input = super().prepare_decoder_input(input_dict, output)
decoder_input["keyword"] = input_dict["keyword"]
return decoder_input
def prepare_beamsearch_decoder_input(self, input_dict, output_i):
decoder_input = super().prepare_beamsearch_decoder_input(input_dict, output_i)
t = input_dict["t"]
i = input_dict["sample_idx"]
beam_size = input_dict["beam_size"]
if t == 0:
output_i["keyword"] = repeat_tensor(input_dict["keyword"][i],
beam_size)
decoder_input["keyword"] = output_i["keyword"]
return decoder_input
| EXA-1-master | exa/models/AudioGPT/audio_to_text/captioning/models/transformer_model.py |
from .base_model import *
from .transformer_model import *
| EXA-1-master | exa/models/AudioGPT/audio_to_text/captioning/models/__init__.py |
# -*- coding: utf-8 -*-
import math
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchaudio import transforms
from torchlibrosa.augmentation import SpecAugmentation
from .utils import mean_with_lens, max_with_lens, \
init, pack_wrapper, generate_length_mask, PositionalEncoding
def init_layer(layer):
"""Initialize a Linear or Convolutional layer. """
nn.init.xavier_uniform_(layer.weight)
if hasattr(layer, 'bias'):
if layer.bias is not None:
layer.bias.data.fill_(0.)
def init_bn(bn):
"""Initialize a Batchnorm layer. """
bn.bias.data.fill_(0.)
bn.weight.data.fill_(1.)
class BaseEncoder(nn.Module):
"""
Encode the given audio into embedding
Base encoder class, cannot be called directly
All encoders should inherit from this class
"""
def __init__(self, spec_dim, fc_feat_dim, attn_feat_dim):
super(BaseEncoder, self).__init__()
self.spec_dim = spec_dim
self.fc_feat_dim = fc_feat_dim
self.attn_feat_dim = attn_feat_dim
def forward(self, x):
#########################
# an encoder first encodes audio feature into embedding, obtaining
# `encoded`: {
# fc_embs: [N, fc_emb_dim],
# attn_embs: [N, attn_max_len, attn_emb_dim],
# attn_emb_lens: [N,]
# }
#########################
raise NotImplementedError
class Block2D(nn.Module):
def __init__(self, cin, cout, kernel_size=3, padding=1):
super().__init__()
self.block = nn.Sequential(
nn.BatchNorm2d(cin),
nn.Conv2d(cin,
cout,
kernel_size=kernel_size,
padding=padding,
bias=False),
nn.LeakyReLU(inplace=True, negative_slope=0.1))
def forward(self, x):
return self.block(x)
class LinearSoftPool(nn.Module):
"""LinearSoftPool
Linear softmax, takes logits and returns a probability, near to the actual maximum value.
Taken from the paper:
A Comparison of Five Multiple Instance Learning Pooling Functions for Sound Event Detection with Weak Labeling
https://arxiv.org/abs/1810.09050
"""
def __init__(self, pooldim=1):
super().__init__()
self.pooldim = pooldim
def forward(self, logits, time_decision):
return (time_decision**2).sum(self.pooldim) / time_decision.sum(
self.pooldim)
class MeanPool(nn.Module):
def __init__(self, pooldim=1):
super().__init__()
self.pooldim = pooldim
def forward(self, logits, decision):
return torch.mean(decision, dim=self.pooldim)
class AttentionPool(nn.Module):
"""docstring for AttentionPool"""
def __init__(self, inputdim, outputdim=10, pooldim=1, **kwargs):
super().__init__()
self.inputdim = inputdim
self.outputdim = outputdim
self.pooldim = pooldim
self.transform = nn.Linear(inputdim, outputdim)
self.activ = nn.Softmax(dim=self.pooldim)
self.eps = 1e-7
def forward(self, logits, decision):
# Input is (B, T, D)
# B, T, D
w = self.activ(torch.clamp(self.transform(logits), -15, 15))
detect = (decision * w).sum(
self.pooldim) / (w.sum(self.pooldim) + self.eps)
# B, T, D
return detect
class MMPool(nn.Module):
def __init__(self, dims):
super().__init__()
self.avgpool = nn.AvgPool2d(dims)
self.maxpool = nn.MaxPool2d(dims)
def forward(self, x):
return self.avgpool(x) + self.maxpool(x)
def parse_poolingfunction(poolingfunction_name='mean', **kwargs):
"""parse_poolingfunction
A heler function to parse any temporal pooling
Pooling is done on dimension 1
:param poolingfunction_name:
:param **kwargs:
"""
poolingfunction_name = poolingfunction_name.lower()
if poolingfunction_name == 'mean':
return MeanPool(pooldim=1)
elif poolingfunction_name == 'linear':
return LinearSoftPool(pooldim=1)
elif poolingfunction_name == 'attention':
return AttentionPool(inputdim=kwargs['inputdim'],
outputdim=kwargs['outputdim'])
def embedding_pooling(x, lens, pooling="mean"):
if pooling == "max":
fc_embs = max_with_lens(x, lens)
elif pooling == "mean":
fc_embs = mean_with_lens(x, lens)
elif pooling == "mean+max":
x_mean = mean_with_lens(x, lens)
x_max = max_with_lens(x, lens)
fc_embs = x_mean + x_max
elif pooling == "last":
indices = (lens - 1).reshape(-1, 1, 1).repeat(1, 1, x.size(-1))
# indices: [N, 1, hidden]
fc_embs = torch.gather(x, 1, indices).squeeze(1)
else:
raise Exception(f"pooling method {pooling} not support")
return fc_embs
class Cdur5Encoder(BaseEncoder):
def __init__(self, spec_dim, fc_feat_dim, attn_feat_dim, pooling="mean"):
super().__init__(spec_dim, fc_feat_dim, attn_feat_dim)
self.pooling = pooling
self.features = nn.Sequential(
Block2D(1, 32),
nn.LPPool2d(4, (2, 4)),
Block2D(32, 128),
Block2D(128, 128),
nn.LPPool2d(4, (2, 4)),
Block2D(128, 128),
Block2D(128, 128),
nn.LPPool2d(4, (1, 4)),
nn.Dropout(0.3),
)
with torch.no_grad():
rnn_input_dim = self.features(
torch.randn(1, 1, 500, spec_dim)).shape
rnn_input_dim = rnn_input_dim[1] * rnn_input_dim[-1]
self.gru = nn.GRU(rnn_input_dim,
128,
bidirectional=True,
batch_first=True)
self.apply(init)
def forward(self, input_dict):
x = input_dict["spec"]
lens = input_dict["spec_len"]
if "upsample" not in input_dict:
input_dict["upsample"] = False
lens = torch.as_tensor(copy.deepcopy(lens))
N, T, _ = x.shape
x = x.unsqueeze(1)
x = self.features(x)
x = x.transpose(1, 2).contiguous().flatten(-2)
x, _ = self.gru(x)
if input_dict["upsample"]:
x = nn.functional.interpolate(
x.transpose(1, 2),
T,
mode='linear',
align_corners=False).transpose(1, 2)
else:
lens //= 4
attn_emb = x
fc_emb = embedding_pooling(x, lens, self.pooling)
return {
"attn_emb": attn_emb,
"fc_emb": fc_emb,
"attn_emb_len": lens
}
def conv_conv_block(in_channel, out_channel):
return nn.Sequential(
nn.Conv2d(in_channel,
out_channel,
kernel_size=3,
bias=False,
padding=1),
nn.BatchNorm2d(out_channel),
nn.ReLU(True),
nn.Conv2d(out_channel,
out_channel,
kernel_size=3,
bias=False,
padding=1),
nn.BatchNorm2d(out_channel),
nn.ReLU(True)
)
class Cdur8Encoder(BaseEncoder):
def __init__(self, spec_dim, fc_feat_dim, attn_feat_dim, pooling="mean"):
super().__init__(spec_dim, fc_feat_dim, attn_feat_dim)
self.pooling = pooling
self.features = nn.Sequential(
conv_conv_block(1, 64),
MMPool((2, 2)),
nn.Dropout(0.2, True),
conv_conv_block(64, 128),
MMPool((2, 2)),
nn.Dropout(0.2, True),
conv_conv_block(128, 256),
MMPool((1, 2)),
nn.Dropout(0.2, True),
conv_conv_block(256, 512),
MMPool((1, 2)),
nn.Dropout(0.2, True),
nn.AdaptiveAvgPool2d((None, 1)),
)
self.init_bn = nn.BatchNorm2d(spec_dim)
self.embedding = nn.Linear(512, 512)
self.gru = nn.GRU(512, 256, bidirectional=True, batch_first=True)
self.apply(init)
def forward(self, input_dict):
x = input_dict["spec"]
lens = input_dict["spec_len"]
lens = torch.as_tensor(copy.deepcopy(lens))
x = x.unsqueeze(1) # B x 1 x T x D
x = x.transpose(1, 3)
x = self.init_bn(x)
x = x.transpose(1, 3)
x = self.features(x)
x = x.transpose(1, 2).contiguous().flatten(-2)
x = F.dropout(x, p=0.5, training=self.training)
x = F.relu_(self.embedding(x))
x, _ = self.gru(x)
attn_emb = x
lens //= 4
fc_emb = embedding_pooling(x, lens, self.pooling)
return {
"attn_emb": attn_emb,
"fc_emb": fc_emb,
"attn_emb_len": lens
}
class Cnn10Encoder(BaseEncoder):
def __init__(self, spec_dim, fc_feat_dim, attn_feat_dim):
super().__init__(spec_dim, fc_feat_dim, attn_feat_dim)
self.features = nn.Sequential(
conv_conv_block(1, 64),
nn.AvgPool2d((2, 2)),
nn.Dropout(0.2, True),
conv_conv_block(64, 128),
nn.AvgPool2d((2, 2)),
nn.Dropout(0.2, True),
conv_conv_block(128, 256),
nn.AvgPool2d((2, 2)),
nn.Dropout(0.2, True),
conv_conv_block(256, 512),
nn.AvgPool2d((2, 2)),
nn.Dropout(0.2, True),
nn.AdaptiveAvgPool2d((None, 1)),
)
self.init_bn = nn.BatchNorm2d(spec_dim)
self.embedding = nn.Linear(512, 512)
self.apply(init)
def forward(self, input_dict):
x = input_dict["spec"]
lens = input_dict["spec_len"]
lens = torch.as_tensor(copy.deepcopy(lens))
x = x.unsqueeze(1) # [N, 1, T, D]
x = x.transpose(1, 3)
x = self.init_bn(x)
x = x.transpose(1, 3)
x = self.features(x) # [N, 512, T/16, 1]
x = x.transpose(1, 2).contiguous().flatten(-2) # [N, T/16, 512]
attn_emb = x
lens //= 16
fc_emb = embedding_pooling(x, lens, "mean+max")
fc_emb = F.dropout(fc_emb, p=0.5, training=self.training)
fc_emb = self.embedding(fc_emb)
fc_emb = F.relu_(fc_emb)
return {
"attn_emb": attn_emb,
"fc_emb": fc_emb,
"attn_emb_len": lens
}
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(ConvBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=(3, 3), stride=(1, 1),
padding=(1, 1), bias=False)
self.conv2 = nn.Conv2d(in_channels=out_channels,
out_channels=out_channels,
kernel_size=(3, 3), stride=(1, 1),
padding=(1, 1), bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
self.bn2 = nn.BatchNorm2d(out_channels)
self.init_weight()
def init_weight(self):
init_layer(self.conv1)
init_layer(self.conv2)
init_bn(self.bn1)
init_bn(self.bn2)
def forward(self, input, pool_size=(2, 2), pool_type='avg'):
x = input
x = F.relu_(self.bn1(self.conv1(x)))
x = F.relu_(self.bn2(self.conv2(x)))
if pool_type == 'max':
x = F.max_pool2d(x, kernel_size=pool_size)
elif pool_type == 'avg':
x = F.avg_pool2d(x, kernel_size=pool_size)
elif pool_type == 'avg+max':
x1 = F.avg_pool2d(x, kernel_size=pool_size)
x2 = F.max_pool2d(x, kernel_size=pool_size)
x = x1 + x2
else:
raise Exception('Incorrect argument!')
return x
class Cnn14Encoder(nn.Module):
def __init__(self, sample_rate=32000):
super().__init__()
sr_to_fmax = {
32000: 14000,
16000: 8000
}
# Logmel spectrogram extractor
self.melspec_extractor = transforms.MelSpectrogram(
sample_rate=sample_rate,
n_fft=32 * sample_rate // 1000,
win_length=32 * sample_rate // 1000,
hop_length=10 * sample_rate // 1000,
f_min=50,
f_max=sr_to_fmax[sample_rate],
n_mels=64,
norm="slaney",
mel_scale="slaney"
)
self.hop_length = 10 * sample_rate // 1000
self.db_transform = transforms.AmplitudeToDB()
# Spec augmenter
self.spec_augmenter = SpecAugmentation(time_drop_width=64,
time_stripes_num=2, freq_drop_width=8, freq_stripes_num=2)
self.bn0 = nn.BatchNorm2d(64)
self.conv_block1 = ConvBlock(in_channels=1, out_channels=64)
self.conv_block2 = ConvBlock(in_channels=64, out_channels=128)
self.conv_block3 = ConvBlock(in_channels=128, out_channels=256)
self.conv_block4 = ConvBlock(in_channels=256, out_channels=512)
self.conv_block5 = ConvBlock(in_channels=512, out_channels=1024)
self.conv_block6 = ConvBlock(in_channels=1024, out_channels=2048)
self.downsample_ratio = 32
self.fc1 = nn.Linear(2048, 2048, bias=True)
self.init_weight()
def init_weight(self):
init_bn(self.bn0)
init_layer(self.fc1)
def load_pretrained(self, pretrained):
checkpoint = torch.load(pretrained, map_location="cpu")
if "model" in checkpoint:
state_keys = checkpoint["model"].keys()
backbone = False
for key in state_keys:
if key.startswith("backbone."):
backbone = True
break
if backbone: # COLA
state_dict = {}
for key, value in checkpoint["model"].items():
if key.startswith("backbone."):
model_key = key.replace("backbone.", "")
state_dict[model_key] = value
else: # PANNs
state_dict = checkpoint["model"]
elif "state_dict" in checkpoint: # CLAP
state_dict = checkpoint["state_dict"]
state_dict_keys = list(filter(
lambda x: "audio_encoder" in x, state_dict.keys()))
state_dict = {
key.replace('audio_encoder.', ''): state_dict[key]
for key in state_dict_keys
}
else:
raise Exception("Unkown checkpoint format")
model_dict = self.state_dict()
pretrained_dict = {
k: v for k, v in state_dict.items() if (k in model_dict) and (
model_dict[k].shape == v.shape)
}
model_dict.update(pretrained_dict)
self.load_state_dict(model_dict, strict=True)
def forward(self, input_dict):
"""
Input: (batch_size, n_samples)"""
waveform = input_dict["wav"]
wave_length = input_dict["wav_len"]
specaug = input_dict["specaug"]
x = self.melspec_extractor(waveform)
x = self.db_transform(x) # (batch_size, mel_bins, time_steps)
x = x.transpose(1, 2)
x = x.unsqueeze(1) # (batch_size, 1, time_steps, mel_bins)
# SpecAugment
if self.training and specaug:
x = self.spec_augmenter(x)
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
x = self.conv_block1(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block2(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block3(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block4(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block5(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block6(x, pool_size=(1, 1), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = torch.mean(x, dim=3)
attn_emb = x.transpose(1, 2)
wave_length = torch.as_tensor(wave_length)
feat_length = torch.div(wave_length, self.hop_length,
rounding_mode="floor") + 1
feat_length = torch.div(feat_length, self.downsample_ratio,
rounding_mode="floor")
x_max = max_with_lens(attn_emb, feat_length)
x_mean = mean_with_lens(attn_emb, feat_length)
x = x_max + x_mean
x = F.dropout(x, p=0.5, training=self.training)
x = F.relu_(self.fc1(x))
fc_emb = F.dropout(x, p=0.5, training=self.training)
output_dict = {
'fc_emb': fc_emb,
'attn_emb': attn_emb,
'attn_emb_len': feat_length
}
return output_dict
class RnnEncoder(BaseEncoder):
def __init__(self, spec_dim, fc_feat_dim, attn_feat_dim,
pooling="mean", **kwargs):
super().__init__(spec_dim, fc_feat_dim, attn_feat_dim)
self.pooling = pooling
self.hidden_size = kwargs.get('hidden_size', 512)
self.bidirectional = kwargs.get('bidirectional', False)
self.num_layers = kwargs.get('num_layers', 1)
self.dropout = kwargs.get('dropout', 0.2)
self.rnn_type = kwargs.get('rnn_type', "GRU")
self.in_bn = kwargs.get('in_bn', False)
self.embed_dim = self.hidden_size * (self.bidirectional + 1)
self.network = getattr(nn, self.rnn_type)(
attn_feat_dim,
self.hidden_size,
num_layers=self.num_layers,
bidirectional=self.bidirectional,
dropout=self.dropout,
batch_first=True)
if self.in_bn:
self.bn = nn.BatchNorm1d(self.embed_dim)
self.apply(init)
def forward(self, input_dict):
x = input_dict["attn"]
lens = input_dict["attn_len"]
lens = torch.as_tensor(lens)
# x: [N, T, E]
if self.in_bn:
x = pack_wrapper(self.bn, x, lens)
out = pack_wrapper(self.network, x, lens)
# out: [N, T, hidden]
attn_emb = out
fc_emb = embedding_pooling(out, lens, self.pooling)
return {
"attn_emb": attn_emb,
"fc_emb": fc_emb,
"attn_emb_len": lens
}
class Cnn14RnnEncoder(nn.Module):
def __init__(self, sample_rate=32000, pretrained=None,
freeze_cnn=False, freeze_cnn_bn=False,
pooling="mean", **kwargs):
super().__init__()
self.cnn = Cnn14Encoder(sample_rate)
self.rnn = RnnEncoder(64, 2048, 2048, pooling, **kwargs)
if pretrained is not None:
self.cnn.load_pretrained(pretrained)
if freeze_cnn:
assert pretrained is not None, "cnn is not pretrained but frozen"
for param in self.cnn.parameters():
param.requires_grad = False
self.freeze_cnn_bn = freeze_cnn_bn
def train(self, mode):
super().train(mode=mode)
if self.freeze_cnn_bn:
def bn_eval(module):
class_name = module.__class__.__name__
if class_name.find("BatchNorm") != -1:
module.eval()
self.cnn.apply(bn_eval)
return self
def forward(self, input_dict):
output_dict = self.cnn(input_dict)
output_dict["attn"] = output_dict["attn_emb"]
output_dict["attn_len"] = output_dict["attn_emb_len"]
del output_dict["attn_emb"], output_dict["attn_emb_len"]
output_dict = self.rnn(output_dict)
return output_dict
class TransformerEncoder(BaseEncoder):
def __init__(self, spec_dim, fc_feat_dim, attn_feat_dim, d_model, **kwargs):
super().__init__(spec_dim, fc_feat_dim, attn_feat_dim)
self.d_model = d_model
dropout = kwargs.get("dropout", 0.2)
self.nhead = kwargs.get("nhead", self.d_model // 64)
self.nlayers = kwargs.get("nlayers", 2)
self.dim_feedforward = kwargs.get("dim_feedforward", self.d_model * 4)
self.attn_proj = nn.Sequential(
nn.Linear(attn_feat_dim, self.d_model),
nn.ReLU(),
nn.Dropout(dropout),
nn.LayerNorm(self.d_model)
)
layer = nn.TransformerEncoderLayer(d_model=self.d_model,
nhead=self.nhead,
dim_feedforward=self.dim_feedforward,
dropout=dropout)
self.model = nn.TransformerEncoder(layer, self.nlayers)
self.cls_token = nn.Parameter(torch.zeros(d_model))
self.init_params()
def init_params(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def forward(self, input_dict):
attn_feat = input_dict["attn"]
attn_feat_len = input_dict["attn_len"]
attn_feat_len = torch.as_tensor(attn_feat_len)
attn_feat = self.attn_proj(attn_feat) # [bs, T, d_model]
cls_emb = self.cls_token.reshape(1, 1, self.d_model).repeat(
attn_feat.size(0), 1, 1)
attn_feat = torch.cat((cls_emb, attn_feat), dim=1)
attn_feat = attn_feat.transpose(0, 1)
attn_feat_len += 1
src_key_padding_mask = ~generate_length_mask(
attn_feat_len, attn_feat.size(0)).to(attn_feat.device)
output = self.model(attn_feat, src_key_padding_mask=src_key_padding_mask)
attn_emb = output.transpose(0, 1)
fc_emb = attn_emb[:, 0]
return {
"attn_emb": attn_emb,
"fc_emb": fc_emb,
"attn_emb_len": attn_feat_len
}
class Cnn14TransformerEncoder(nn.Module):
def __init__(self, sample_rate=32000, pretrained=None,
freeze_cnn=False, freeze_cnn_bn=False,
d_model="mean", **kwargs):
super().__init__()
self.cnn = Cnn14Encoder(sample_rate)
self.trm = TransformerEncoder(64, 2048, 2048, d_model, **kwargs)
if pretrained is not None:
self.cnn.load_pretrained(pretrained)
if freeze_cnn:
assert pretrained is not None, "cnn is not pretrained but frozen"
for param in self.cnn.parameters():
param.requires_grad = False
self.freeze_cnn_bn = freeze_cnn_bn
def train(self, mode):
super().train(mode=mode)
if self.freeze_cnn_bn:
def bn_eval(module):
class_name = module.__class__.__name__
if class_name.find("BatchNorm") != -1:
module.eval()
self.cnn.apply(bn_eval)
return self
def forward(self, input_dict):
output_dict = self.cnn(input_dict)
output_dict["attn"] = output_dict["attn_emb"]
output_dict["attn_len"] = output_dict["attn_emb_len"]
del output_dict["attn_emb"], output_dict["attn_emb_len"]
output_dict = self.trm(output_dict)
return output_dict
| EXA-1-master | exa/models/AudioGPT/audio_to_text/captioning/models/encoder.py |
# -*- coding: utf-8 -*-
from typing import Dict
import torch
import torch.nn as nn
from .utils import mean_with_lens, repeat_tensor
class CaptionModel(nn.Module):
"""
Encoder-decoder captioning model.
"""
pad_idx = 0
start_idx = 1
end_idx = 2
max_length = 20
def __init__(self, encoder: nn.Module, decoder: nn.Module, **kwargs):
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.vocab_size = decoder.vocab_size
self.train_forward_keys = ["cap", "cap_len", "ss_ratio"]
self.inference_forward_keys = ["sample_method", "max_length", "temp"]
freeze_encoder = kwargs.get("freeze_encoder", False)
if freeze_encoder:
for param in self.encoder.parameters():
param.requires_grad = False
self.check_decoder_compatibility()
def check_decoder_compatibility(self):
compatible_decoders = [x.__class__.__name__ for x in self.compatible_decoders]
assert isinstance(self.decoder, self.compatible_decoders), \
f"{self.decoder.__class__.__name__} is incompatible with " \
f"{self.__class__.__name__}, please use decoder in {compatible_decoders} "
@classmethod
def set_index(cls, start_idx, end_idx):
cls.start_idx = start_idx
cls.end_idx = end_idx
def forward(self, input_dict: Dict):
"""
input_dict: {
(required)
mode: train/inference,
spec,
spec_len,
fc,
attn,
attn_len,
[sample_method: greedy],
[temp: 1.0] (in case of no teacher forcing)
(optional, mode=train)
cap,
cap_len,
ss_ratio,
(optional, mode=inference)
sample_method: greedy/beam,
max_length,
temp,
beam_size (optional, sample_method=beam),
n_best (optional, sample_method=beam),
}
"""
# encoder_input_keys = ["spec", "spec_len", "fc", "attn", "attn_len"]
# encoder_input = { key: input_dict[key] for key in encoder_input_keys }
encoder_output_dict = self.encoder(input_dict)
if input_dict["mode"] == "train":
forward_dict = {
"mode": "train", "sample_method": "greedy", "temp": 1.0
}
for key in self.train_forward_keys:
forward_dict[key] = input_dict[key]
forward_dict.update(encoder_output_dict)
output = self.train_forward(forward_dict)
elif input_dict["mode"] == "inference":
forward_dict = {"mode": "inference"}
default_args = { "sample_method": "greedy", "max_length": self.max_length, "temp": 1.0 }
for key in self.inference_forward_keys:
if key in input_dict:
forward_dict[key] = input_dict[key]
else:
forward_dict[key] = default_args[key]
if forward_dict["sample_method"] == "beam":
forward_dict["beam_size"] = input_dict.get("beam_size", 3)
forward_dict["n_best"] = input_dict.get("n_best", False)
forward_dict["n_best_size"] = input_dict.get("n_best_size", forward_dict["beam_size"])
elif forward_dict["sample_method"] == "dbs":
forward_dict["beam_size"] = input_dict.get("beam_size", 6)
forward_dict["group_size"] = input_dict.get("group_size", 3)
forward_dict["diversity_lambda"] = input_dict.get("diversity_lambda", 0.5)
forward_dict["group_nbest"] = input_dict.get("group_nbest", True)
forward_dict.update(encoder_output_dict)
output = self.inference_forward(forward_dict)
else:
raise Exception("mode should be either 'train' or 'inference'")
return output
def prepare_output(self, input_dict):
output = {}
batch_size = input_dict["fc_emb"].size(0)
if input_dict["mode"] == "train":
max_length = input_dict["cap"].size(1) - 1
elif input_dict["mode"] == "inference":
max_length = input_dict["max_length"]
else:
raise Exception("mode should be either 'train' or 'inference'")
device = input_dict["fc_emb"].device
output["seq"] = torch.full((batch_size, max_length), self.end_idx,
dtype=torch.long)
output["logit"] = torch.empty(batch_size, max_length,
self.vocab_size).to(device)
output["sampled_logprob"] = torch.zeros(batch_size, max_length)
output["embed"] = torch.empty(batch_size, max_length,
self.decoder.d_model).to(device)
return output
def train_forward(self, input_dict):
if input_dict["ss_ratio"] != 1: # scheduled sampling training
input_dict["mode"] = "train"
return self.stepwise_forward(input_dict)
output = self.seq_forward(input_dict)
self.train_process(output, input_dict)
return output
def seq_forward(self, input_dict):
raise NotImplementedError
def train_process(self, output, input_dict):
pass
def inference_forward(self, input_dict):
if input_dict["sample_method"] == "beam":
return self.beam_search(input_dict)
elif input_dict["sample_method"] == "dbs":
return self.diverse_beam_search(input_dict)
return self.stepwise_forward(input_dict)
def stepwise_forward(self, input_dict):
"""Step-by-step decoding"""
output = self.prepare_output(input_dict)
max_length = output["seq"].size(1)
# start sampling
for t in range(max_length):
input_dict["t"] = t
self.decode_step(input_dict, output)
if input_dict["mode"] == "inference": # decide whether to stop when sampling
unfinished_t = output["seq"][:, t] != self.end_idx
if t == 0:
unfinished = unfinished_t
else:
unfinished *= unfinished_t
output["seq"][:, t][~unfinished] = self.end_idx
if unfinished.sum() == 0:
break
self.stepwise_process(output)
return output
def decode_step(self, input_dict, output):
"""Decoding operation of timestep t"""
decoder_input = self.prepare_decoder_input(input_dict, output)
# feed to the decoder to get logit
output_t = self.decoder(decoder_input)
logit_t = output_t["logit"]
# assert logit_t.ndim == 3
if logit_t.size(1) == 1:
logit_t = logit_t.squeeze(1)
embed_t = output_t["embed"].squeeze(1)
elif logit_t.size(1) > 1:
logit_t = logit_t[:, -1, :]
embed_t = output_t["embed"][:, -1, :]
else:
raise Exception("no logit output")
# sample the next input word and get the corresponding logit
sampled = self.sample_next_word(logit_t,
method=input_dict["sample_method"],
temp=input_dict["temp"])
output_t.update(sampled)
output_t["t"] = input_dict["t"]
output_t["logit"] = logit_t
output_t["embed"] = embed_t
self.stepwise_process_step(output, output_t)
def prepare_decoder_input(self, input_dict, output):
"""Prepare the inp ut dict for the decoder"""
raise NotImplementedError
def stepwise_process_step(self, output, output_t):
"""Postprocessing (save output values) after each timestep t"""
t = output_t["t"]
output["logit"][:, t, :] = output_t["logit"]
output["seq"][:, t] = output_t["word"]
output["sampled_logprob"][:, t] = output_t["probs"]
output["embed"][:, t, :] = output_t["embed"]
def stepwise_process(self, output):
"""Postprocessing after the whole step-by-step autoregressive decoding"""
pass
def sample_next_word(self, logit, method, temp):
"""Sample the next word, given probs output by the decoder"""
logprob = torch.log_softmax(logit, dim=1)
if method == "greedy":
sampled_logprob, word = torch.max(logprob.detach(), 1)
elif method == "gumbel":
def sample_gumbel(shape, eps=1e-20):
U = torch.rand(shape).to(logprob.device)
return -torch.log(-torch.log(U + eps) + eps)
def gumbel_softmax_sample(logit, temperature):
y = logit + sample_gumbel(logit.size())
return torch.log_softmax(y / temperature, dim=-1)
_logprob = gumbel_softmax_sample(logprob, temp)
_, word = torch.max(_logprob.data, 1)
sampled_logprob = logprob.gather(1, word.unsqueeze(-1))
else:
logprob = logprob / temp
if method.startswith("top"):
top_num = float(method[3:])
if 0 < top_num < 1: # top-p sampling
probs = torch.softmax(logit, dim=1)
sorted_probs, sorted_indices = torch.sort(probs, descending=True, dim=1)
_cumsum = sorted_probs.cumsum(1)
mask = _cumsum < top_num
mask = torch.cat([torch.ones_like(mask[:,:1]), mask[:,:-1]], 1)
sorted_probs = sorted_probs * mask.to(sorted_probs)
sorted_probs = sorted_probs / sorted_probs.sum(1, keepdim=True)
logprob.scatter_(1, sorted_indices, sorted_probs.log())
else: # top-k sampling
k = int(top_num)
tmp = torch.empty_like(logprob).fill_(float('-inf'))
topk, indices = torch.topk(logprob, k, dim=1)
tmp = tmp.scatter(1, indices, topk)
logprob = tmp
word = torch.distributions.Categorical(logits=logprob.detach()).sample()
sampled_logprob = logprob.gather(1, word.unsqueeze(-1)).squeeze(1)
word = word.detach().long()
# sampled_logprob: [N,], word: [N,]
return {"word": word, "probs": sampled_logprob}
def beam_search(self, input_dict):
output = self.prepare_output(input_dict)
max_length = input_dict["max_length"]
beam_size = input_dict["beam_size"]
if input_dict["n_best"]:
n_best_size = input_dict["n_best_size"]
batch_size, max_length = output["seq"].size()
output["seq"] = torch.full((batch_size, n_best_size, max_length),
self.end_idx, dtype=torch.long)
temp = input_dict["temp"]
# instance by instance beam seach
for i in range(output["seq"].size(0)):
output_i = self.prepare_beamsearch_output(input_dict)
input_dict["sample_idx"] = i
for t in range(max_length):
input_dict["t"] = t
output_t = self.beamsearch_step(input_dict, output_i)
#######################################
# merge with previous beam and select the current max prob beam
#######################################
logit_t = output_t["logit"]
if logit_t.size(1) == 1:
logit_t = logit_t.squeeze(1)
elif logit_t.size(1) > 1:
logit_t = logit_t[:, -1, :]
else:
raise Exception("no logit output")
logprob_t = torch.log_softmax(logit_t, dim=1)
logprob_t = torch.log_softmax(logprob_t / temp, dim=1)
logprob_t = output_i["topk_logprob"].unsqueeze(1) + logprob_t
if t == 0: # for the first step, all k seq will have the same probs
topk_logprob, topk_words = logprob_t[0].topk(
beam_size, 0, True, True)
else: # unroll and find top logprob, and their unrolled indices
topk_logprob, topk_words = logprob_t.view(-1).topk(
beam_size, 0, True, True)
topk_words = topk_words.cpu()
output_i["topk_logprob"] = topk_logprob
# output_i["prev_words_beam"] = topk_words // self.vocab_size # [beam_size,]
output_i["prev_words_beam"] = torch.div(topk_words, self.vocab_size,
rounding_mode='trunc')
output_i["next_word"] = topk_words % self.vocab_size # [beam_size,]
if t == 0:
output_i["seq"] = output_i["next_word"].unsqueeze(1)
else:
output_i["seq"] = torch.cat([
output_i["seq"][output_i["prev_words_beam"]],
output_i["next_word"].unsqueeze(1)], dim=1)
# add finished beams to results
is_end = output_i["next_word"] == self.end_idx
if t == max_length - 1:
is_end.fill_(1)
for beam_idx in range(beam_size):
if is_end[beam_idx]:
final_beam = {
"seq": output_i["seq"][beam_idx].clone(),
"score": output_i["topk_logprob"][beam_idx].item()
}
final_beam["score"] = final_beam["score"] / (t + 1)
output_i["done_beams"].append(final_beam)
output_i["topk_logprob"][is_end] -= 1000
self.beamsearch_process_step(output_i, output_t)
self.beamsearch_process(output, output_i, input_dict)
return output
def prepare_beamsearch_output(self, input_dict):
beam_size = input_dict["beam_size"]
device = input_dict["fc_emb"].device
output = {
"topk_logprob": torch.zeros(beam_size).to(device),
"seq": None,
"prev_words_beam": None,
"next_word": None,
"done_beams": [],
}
return output
def beamsearch_step(self, input_dict, output_i):
decoder_input = self.prepare_beamsearch_decoder_input(input_dict, output_i)
output_t = self.decoder(decoder_input)
output_t["t"] = input_dict["t"]
return output_t
def prepare_beamsearch_decoder_input(self, input_dict, output_i):
raise NotImplementedError
def beamsearch_process_step(self, output_i, output_t):
pass
def beamsearch_process(self, output, output_i, input_dict):
i = input_dict["sample_idx"]
done_beams = sorted(output_i["done_beams"], key=lambda x: -x["score"])
if input_dict["n_best"]:
done_beams = done_beams[:input_dict["n_best_size"]]
for out_idx, done_beam in enumerate(done_beams):
seq = done_beam["seq"]
output["seq"][i][out_idx, :len(seq)] = seq
else:
seq = done_beams[0]["seq"]
output["seq"][i][:len(seq)] = seq
def diverse_beam_search(self, input_dict):
def add_diversity(seq_table, logprob, t, divm, diversity_lambda, bdash):
local_time = t - divm
unaug_logprob = logprob.clone()
if divm > 0:
change = torch.zeros(logprob.size(-1))
for prev_choice in range(divm):
prev_decisions = seq_table[prev_choice][..., local_time]
for prev_labels in range(bdash):
change.scatter_add_(0, prev_decisions[prev_labels], change.new_ones(1))
change = change.to(logprob.device)
logprob = logprob - repeat_tensor(change, bdash) * diversity_lambda
return logprob, unaug_logprob
output = self.prepare_output(input_dict)
group_size = input_dict["group_size"]
batch_size = output["seq"].size(0)
beam_size = input_dict["beam_size"]
bdash = beam_size // group_size
input_dict["bdash"] = bdash
diversity_lambda = input_dict["diversity_lambda"]
device = input_dict["fc_emb"].device
max_length = input_dict["max_length"]
temp = input_dict["temp"]
group_nbest = input_dict["group_nbest"]
batch_size, max_length = output["seq"].size()
if group_nbest:
output["seq"] = torch.full((batch_size, beam_size, max_length),
self.end_idx, dtype=torch.long)
else:
output["seq"] = torch.full((batch_size, group_size, max_length),
self.end_idx, dtype=torch.long)
for i in range(batch_size):
input_dict["sample_idx"] = i
seq_table = [torch.LongTensor(bdash, 0) for _ in range(group_size)] # group_size x [bdash, 0]
logprob_table = [torch.zeros(bdash).to(device) for _ in range(group_size)]
done_beams_table = [[] for _ in range(group_size)]
output_i = {
"prev_words_beam": [None for _ in range(group_size)],
"next_word": [None for _ in range(group_size)],
"state": [None for _ in range(group_size)]
}
for t in range(max_length + group_size - 1):
input_dict["t"] = t
for divm in range(group_size):
input_dict["divm"] = divm
if t >= divm and t <= max_length + divm - 1:
local_time = t - divm
decoder_input = self.prepare_dbs_decoder_input(input_dict, output_i)
output_t = self.decoder(decoder_input)
output_t["divm"] = divm
logit_t = output_t["logit"]
if logit_t.size(1) == 1:
logit_t = logit_t.squeeze(1)
elif logit_t.size(1) > 1:
logit_t = logit_t[:, -1, :]
else:
raise Exception("no logit output")
logprob_t = torch.log_softmax(logit_t, dim=1)
logprob_t = torch.log_softmax(logprob_t / temp, dim=1)
logprob_t, unaug_logprob_t = add_diversity(seq_table, logprob_t, t, divm, diversity_lambda, bdash)
logprob_t = logprob_table[divm].unsqueeze(-1) + logprob_t
if local_time == 0: # for the first step, all k seq will have the same probs
topk_logprob, topk_words = logprob_t[0].topk(
bdash, 0, True, True)
else: # unroll and find top logprob, and their unrolled indices
topk_logprob, topk_words = logprob_t.view(-1).topk(
bdash, 0, True, True)
topk_words = topk_words.cpu()
logprob_table[divm] = topk_logprob
output_i["prev_words_beam"][divm] = topk_words // self.vocab_size # [bdash,]
output_i["next_word"][divm] = topk_words % self.vocab_size # [bdash,]
if local_time > 0:
seq_table[divm] = seq_table[divm][output_i["prev_words_beam"][divm]]
seq_table[divm] = torch.cat([
seq_table[divm],
output_i["next_word"][divm].unsqueeze(-1)], -1)
is_end = seq_table[divm][:, t-divm] == self.end_idx
assert seq_table[divm].shape[-1] == t - divm + 1
if t == max_length + divm - 1:
is_end.fill_(1)
for beam_idx in range(bdash):
if is_end[beam_idx]:
final_beam = {
"seq": seq_table[divm][beam_idx].clone(),
"score": logprob_table[divm][beam_idx].item()
}
final_beam["score"] = final_beam["score"] / (t - divm + 1)
done_beams_table[divm].append(final_beam)
logprob_table[divm][is_end] -= 1000
self.dbs_process_step(output_i, output_t)
done_beams_table = [sorted(done_beams_table[divm], key=lambda x: -x["score"])[:bdash] for divm in range(group_size)]
if group_nbest:
done_beams = sum(done_beams_table, [])
else:
done_beams = [group_beam[0] for group_beam in done_beams_table]
for _, done_beam in enumerate(done_beams):
output["seq"][i, _, :len(done_beam["seq"])] = done_beam["seq"]
return output
def prepare_dbs_decoder_input(self, input_dict, output_i):
raise NotImplementedError
def dbs_process_step(self, output_i, output_t):
pass
class CaptionSequenceModel(nn.Module):
def __init__(self, model, seq_output_size):
super().__init__()
self.model = model
if model.decoder.d_model != seq_output_size:
self.output_transform = nn.Linear(model.decoder.d_model, seq_output_size)
else:
self.output_transform = lambda x: x
def forward(self, input_dict):
output = self.model(input_dict)
if input_dict["mode"] == "train":
lens = input_dict["cap_len"] - 1
# seq_outputs: [N, d_model]
elif input_dict["mode"] == "inference":
if "sample_method" in input_dict and input_dict["sample_method"] == "beam":
return output
seq = output["seq"]
lens = torch.where(seq == self.model.end_idx, torch.zeros_like(seq), torch.ones_like(seq)).sum(dim=1)
else:
raise Exception("mode should be either 'train' or 'inference'")
seq_output = mean_with_lens(output["embed"], lens)
seq_output = self.output_transform(seq_output)
output["seq_output"] = seq_output
return output
| EXA-1-master | exa/models/AudioGPT/audio_to_text/captioning/models/base_model.py |
import math
import numpy as np
import torch
import torch.nn as nn
from torch.nn.utils.rnn import PackedSequence, pack_padded_sequence, pad_packed_sequence
def sort_pack_padded_sequence(input, lengths):
sorted_lengths, indices = torch.sort(lengths, descending=True)
tmp = pack_padded_sequence(input[indices], sorted_lengths.cpu(), batch_first=True)
inv_ix = indices.clone()
inv_ix[indices] = torch.arange(0,len(indices)).type_as(inv_ix)
return tmp, inv_ix
def pad_unsort_packed_sequence(input, inv_ix):
tmp, _ = pad_packed_sequence(input, batch_first=True)
tmp = tmp[inv_ix]
return tmp
def pack_wrapper(module, attn_feats, attn_feat_lens):
packed, inv_ix = sort_pack_padded_sequence(attn_feats, attn_feat_lens)
if isinstance(module, torch.nn.RNNBase):
return pad_unsort_packed_sequence(module(packed)[0], inv_ix)
else:
return pad_unsort_packed_sequence(PackedSequence(module(packed[0]), packed[1]), inv_ix)
def generate_length_mask(lens, max_length=None):
lens = torch.as_tensor(lens)
N = lens.size(0)
if max_length is None:
max_length = max(lens)
idxs = torch.arange(max_length).repeat(N).view(N, max_length)
idxs = idxs.to(lens.device)
mask = (idxs < lens.view(-1, 1))
return mask
def mean_with_lens(features, lens):
"""
features: [N, T, ...] (assume the second dimension represents length)
lens: [N,]
"""
lens = torch.as_tensor(lens)
if max(lens) != features.size(1):
max_length = features.size(1)
mask = generate_length_mask(lens, max_length)
else:
mask = generate_length_mask(lens)
mask = mask.to(features.device) # [N, T]
while mask.ndim < features.ndim:
mask = mask.unsqueeze(-1)
feature_mean = features * mask
feature_mean = feature_mean.sum(1)
while lens.ndim < feature_mean.ndim:
lens = lens.unsqueeze(1)
feature_mean = feature_mean / lens.to(features.device)
# feature_mean = features * mask.unsqueeze(-1)
# feature_mean = feature_mean.sum(1) / lens.unsqueeze(1).to(features.device)
return feature_mean
def max_with_lens(features, lens):
"""
features: [N, T, ...] (assume the second dimension represents length)
lens: [N,]
"""
lens = torch.as_tensor(lens)
mask = generate_length_mask(lens).to(features.device) # [N, T]
feature_max = features.clone()
feature_max[~mask] = float("-inf")
feature_max, _ = feature_max.max(1)
return feature_max
def repeat_tensor(x, n):
return x.unsqueeze(0).repeat(n, *([1] * len(x.shape)))
def init(m, method="kaiming"):
if isinstance(m, (nn.Conv2d, nn.Conv1d)):
if method == "kaiming":
nn.init.kaiming_uniform_(m.weight)
elif method == "xavier":
nn.init.xavier_uniform_(m.weight)
else:
raise Exception(f"initialization method {method} not supported")
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, (nn.BatchNorm2d, nn.BatchNorm1d)):
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
if method == "kaiming":
nn.init.kaiming_uniform_(m.weight)
elif method == "xavier":
nn.init.xavier_uniform_(m.weight)
else:
raise Exception(f"initialization method {method} not supported")
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Embedding):
if method == "kaiming":
nn.init.kaiming_uniform_(m.weight)
elif method == "xavier":
nn.init.xavier_uniform_(m.weight)
else:
raise Exception(f"initialization method {method} not supported")
class PositionalEncoding(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=100):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * \
(-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
# self.register_buffer("pe", pe)
self.register_parameter("pe", nn.Parameter(pe, requires_grad=False))
def forward(self, x):
# x: [T, N, E]
x = x + self.pe[:x.size(0), :]
return self.dropout(x)
| EXA-1-master | exa/models/AudioGPT/audio_to_text/captioning/models/utils.py |
import librosa
import librosa.filters
import math
import numpy as np
import scipy.io.wavfile
def load_wav(path):
max_length = 32000 * 10
wav = librosa.core.load(path, sr=32000)[0]
if len(wav) > max_length:
audio = wav[0:max_length]
# pad audio to max length, 10s for AudioCaps
if len(wav) < max_length:
# audio = torch.nn.functional.pad(audio, (0, self.max_length - audio.size(1)), 'constant')
wav = np.pad(wav, (0, max_length - len(wav)), 'constant')
wav = wav[...,None]
return wav
def save_wav(wav, path):
wav *= 32767 / max(0.01, np.max(np.abs(wav)))
scipy.io.wavfile.write(path, 32000, wav.astype(np.int16)) | EXA-1-master | exa/models/AudioGPT/sound_extraction/utils/wav_io.py |
import torch
import numpy as np
import torch.nn.functional as F
from torch.autograd import Variable
from scipy.signal import get_window
import librosa.util as librosa_util
from librosa.util import pad_center, tiny
# from audio_processing import window_sumsquare
def window_sumsquare(window, n_frames, hop_length=512, win_length=1024,
n_fft=1024, dtype=np.float32, norm=None):
"""
# from librosa 0.6
Compute the sum-square envelope of a window function at a given hop length.
This is used to estimate modulation effects induced by windowing
observations in short-time fourier transforms.
Parameters
----------
window : string, tuple, number, callable, or list-like
Window specification, as in `get_window`
n_frames : int > 0
The number of analysis frames
hop_length : int > 0
The number of samples to advance between frames
win_length : [optional]
The length of the window function. By default, this matches `n_fft`.
n_fft : int > 0
The length of each analysis frame.
dtype : np.dtype
The data type of the output
Returns
-------
wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))`
The sum-squared envelope of the window function
"""
if win_length is None:
win_length = n_fft
n = n_fft + hop_length * (n_frames - 1)
x = np.zeros(n, dtype=dtype)
# Compute the squared window at the desired length
win_sq = get_window(window, win_length, fftbins=True)
win_sq = librosa_util.normalize(win_sq, norm=norm)**2
win_sq = librosa_util.pad_center(win_sq, n_fft)
# Fill the envelope
for i in range(n_frames):
sample = i * hop_length
x[sample:min(n, sample + n_fft)] += win_sq[:max(0, min(n_fft, n - sample))]
return x
class STFT(torch.nn.Module):
"""adapted from Prem Seetharaman's https://github.com/pseeth/pytorch-stft"""
def __init__(self, filter_length=1024, hop_length=512, win_length=1024,
window='hann'):
super(STFT, self).__init__()
self.filter_length = filter_length
self.hop_length = hop_length
self.win_length = win_length
self.window = window
self.forward_transform = None
scale = self.filter_length / self.hop_length
fourier_basis = np.fft.fft(np.eye(self.filter_length))
cutoff = int((self.filter_length / 2 + 1))
fourier_basis = np.vstack([np.real(fourier_basis[:cutoff, :]),
np.imag(fourier_basis[:cutoff, :])])
forward_basis = torch.FloatTensor(fourier_basis[:, None, :])
inverse_basis = torch.FloatTensor(
np.linalg.pinv(scale * fourier_basis).T[:, None, :])
if window is not None:
assert(filter_length >= win_length)
# get window and zero center pad it to filter_length
fft_window = get_window(window, win_length, fftbins=True)
fft_window = pad_center(fft_window, filter_length)
fft_window = torch.from_numpy(fft_window).float()
# window the bases
forward_basis *= fft_window
inverse_basis *= fft_window
self.register_buffer('forward_basis', forward_basis.float())
self.register_buffer('inverse_basis', inverse_basis.float())
def transform(self, input_data):
num_batches = input_data.size(0)
num_samples = input_data.size(1)
self.num_samples = num_samples
# similar to librosa, reflect-pad the input
input_data = input_data.view(num_batches, 1, num_samples)
input_data = F.pad(
input_data.unsqueeze(1),
(int(self.filter_length / 2), int(self.filter_length / 2), 0, 0),
mode='reflect')
input_data = input_data.squeeze(1)
forward_transform = F.conv1d(
input_data,
Variable(self.forward_basis, requires_grad=False),
stride=self.hop_length,
padding=0)
cutoff = int((self.filter_length / 2) + 1)
real_part = forward_transform[:, :cutoff, :]
imag_part = forward_transform[:, cutoff:, :]
magnitude = torch.sqrt(real_part**2 + imag_part**2)
phase = torch.autograd.Variable(
torch.atan2(imag_part.data, real_part.data))
return magnitude, phase # [batch_size, F(513), T(1251)]
def inverse(self, magnitude, phase):
recombine_magnitude_phase = torch.cat(
[magnitude*torch.cos(phase), magnitude*torch.sin(phase)], dim=1)
inverse_transform = F.conv_transpose1d(
recombine_magnitude_phase,
Variable(self.inverse_basis, requires_grad=False),
stride=self.hop_length,
padding=0)
if self.window is not None:
window_sum = window_sumsquare(
self.window, magnitude.size(-1), hop_length=self.hop_length,
win_length=self.win_length, n_fft=self.filter_length,
dtype=np.float32)
# remove modulation effects
approx_nonzero_indices = torch.from_numpy(
np.where(window_sum > tiny(window_sum))[0])
window_sum = torch.autograd.Variable(
torch.from_numpy(window_sum), requires_grad=False)
window_sum = window_sum.cuda() if magnitude.is_cuda else window_sum
inverse_transform[:, :, approx_nonzero_indices] /= window_sum[approx_nonzero_indices]
# scale by hop ratio
inverse_transform *= float(self.filter_length) / self.hop_length
inverse_transform = inverse_transform[:, :, int(self.filter_length/2):]
inverse_transform = inverse_transform[:, :, :-int(self.filter_length/2):]
return inverse_transform #[batch_size, 1, sample_num]
def forward(self, input_data):
self.magnitude, self.phase = self.transform(input_data)
reconstruction = self.inverse(self.magnitude, self.phase)
return reconstruction
if __name__ == '__main__':
a = torch.randn(4, 320000)
stft = STFT()
mag, phase = stft.transform(a)
# rec_a = stft.inverse(mag, phase)
print(mag.shape)
| EXA-1-master | exa/models/AudioGPT/sound_extraction/utils/stft.py |
import torch
import numpy as np
def add_noise_and_scale(front, noise, snr_l=0, snr_h=0, scale_lower=1.0, scale_upper=1.0):
"""
:param front: front-head audio, like vocal [samples,channel], will be normlized so any scale will be fine
:param noise: noise, [samples,channel], any scale
:param snr_l: Optional
:param snr_h: Optional
:param scale_lower: Optional
:param scale_upper: Optional
:return: scaled front and noise (noisy = front + noise), all_mel_e2e outputs are noramlized within [-1 , 1]
"""
snr = None
noise, front = normalize_energy_torch(noise), normalize_energy_torch(front) # set noise and vocal to equal range [-1,1]
# print("normalize:",torch.max(noise),torch.max(front))
if snr_l is not None and snr_h is not None:
front, noise, snr = _random_noise(front, noise, snr_l=snr_l, snr_h=snr_h) # remix them with a specific snr
noisy, noise, front = unify_energy_torch(noise + front, noise, front) # normalize noisy, noise and vocal energy into [-1,1]
# print("unify:", torch.max(noise), torch.max(front), torch.max(noisy))
scale = _random_scale(scale_lower, scale_upper) # random scale these three signal
# print("Scale",scale)
noisy, noise, front = noisy * scale, noise * scale, front * scale # apply scale
# print("after scale", torch.max(noisy), torch.max(noise), torch.max(front), snr, scale)
front, noise = _to_numpy(front), _to_numpy(noise) # [num_samples]
mixed_wav = front + noise
return front, noise, mixed_wav, snr, scale
def _random_scale(lower=0.3, upper=0.9):
return float(uniform_torch(lower, upper))
def _random_noise(clean, noise, snr_l=None, snr_h=None):
snr = uniform_torch(snr_l,snr_h)
clean_weight = 10 ** (float(snr) / 20)
return clean, noise/clean_weight, snr
def _to_numpy(wav):
return np.transpose(wav, (1, 0))[0].numpy() # [num_samples]
def normalize_energy(audio, alpha = 1):
'''
:param audio: 1d waveform, [batchsize, *],
:param alpha: the value of output range from: [-alpha,alpha]
:return: 1d waveform which value range from: [-alpha,alpha]
'''
val_max = activelev(audio)
return (audio / val_max) * alpha
def normalize_energy_torch(audio, alpha = 1):
'''
If the signal is almost empty(determined by threshold), if will only be divided by 2**15
:param audio: 1d waveform, 2**15
:param alpha: the value of output range from: [-alpha,alpha]
:return: 1d waveform which value range from: [-alpha,alpha]
'''
val_max = activelev_torch([audio])
return (audio / val_max) * alpha
def unify_energy(*args):
max_amp = activelev(args)
mix_scale = 1.0/max_amp
return [x * mix_scale for x in args]
def unify_energy_torch(*args):
max_amp = activelev_torch(args)
mix_scale = 1.0/max_amp
return [x * mix_scale for x in args]
def activelev(*args):
'''
need to update like matlab
'''
return np.max(np.abs([*args]))
def activelev_torch(*args):
'''
need to update like matlab
'''
res = []
args = args[0]
for each in args:
res.append(torch.max(torch.abs(each)))
return max(res)
def uniform_torch(lower, upper):
if(abs(lower-upper)<1e-5):
return upper
return (upper-lower)*torch.rand(1)+lower
if __name__ == "__main__":
wav1 = torch.randn(1, 32000)
wav2 = torch.randn(1, 32000)
target, noise, snr, scale = add_noise_and_scale(wav1, wav2)
| EXA-1-master | exa/models/AudioGPT/sound_extraction/utils/create_mixtures.py |
import torch
import torch.nn as nn
class Film(nn.Module):
def __init__(self, channels, cond_embedding_dim):
super(Film, self).__init__()
self.linear = nn.Sequential(
nn.Linear(cond_embedding_dim, channels * 2),
nn.ReLU(inplace=True),
nn.Linear(channels * 2, channels),
nn.ReLU(inplace=True)
)
def forward(self, data, cond_vec):
"""
:param data: [batchsize, channels, samples] or [batchsize, channels, T, F] or [batchsize, channels, F, T]
:param cond_vec: [batchsize, cond_embedding_dim]
:return:
"""
bias = self.linear(cond_vec) # [batchsize, channels]
if len(list(data.size())) == 3:
data = data + bias[..., None]
elif len(list(data.size())) == 4:
data = data + bias[..., None, None]
else:
print("Warning: The size of input tensor,", data.size(), "is not correct. Film is not working.")
return data | EXA-1-master | exa/models/AudioGPT/sound_extraction/model/film.py |
from .modules import *
import numpy as np
class UNetRes_FiLM(nn.Module):
def __init__(self, channels, cond_embedding_dim, nsrc=1):
super(UNetRes_FiLM, self).__init__()
activation = 'relu'
momentum = 0.01
self.nsrc = nsrc
self.channels = channels
self.downsample_ratio = 2 ** 6 # This number equals 2^{#encoder_blocks}
self.encoder_block1 = EncoderBlockRes2BCond(in_channels=channels * nsrc, out_channels=32,
downsample=(2, 2), activation=activation, momentum=momentum,
cond_embedding_dim=cond_embedding_dim)
self.encoder_block2 = EncoderBlockRes2BCond(in_channels=32, out_channels=64,
downsample=(2, 2), activation=activation, momentum=momentum,
cond_embedding_dim=cond_embedding_dim)
self.encoder_block3 = EncoderBlockRes2BCond(in_channels=64, out_channels=128,
downsample=(2, 2), activation=activation, momentum=momentum,
cond_embedding_dim=cond_embedding_dim)
self.encoder_block4 = EncoderBlockRes2BCond(in_channels=128, out_channels=256,
downsample=(2, 2), activation=activation, momentum=momentum,
cond_embedding_dim=cond_embedding_dim)
self.encoder_block5 = EncoderBlockRes2BCond(in_channels=256, out_channels=384,
downsample=(2, 2), activation=activation, momentum=momentum,
cond_embedding_dim=cond_embedding_dim)
self.encoder_block6 = EncoderBlockRes2BCond(in_channels=384, out_channels=384,
downsample=(2, 2), activation=activation, momentum=momentum,
cond_embedding_dim=cond_embedding_dim)
self.conv_block7 = ConvBlockResCond(in_channels=384, out_channels=384,
kernel_size=(3, 3), activation=activation, momentum=momentum,
cond_embedding_dim=cond_embedding_dim)
self.decoder_block1 = DecoderBlockRes2BCond(in_channels=384, out_channels=384,
stride=(2, 2), activation=activation, momentum=momentum,
cond_embedding_dim=cond_embedding_dim)
self.decoder_block2 = DecoderBlockRes2BCond(in_channels=384, out_channels=384,
stride=(2, 2), activation=activation, momentum=momentum,
cond_embedding_dim=cond_embedding_dim)
self.decoder_block3 = DecoderBlockRes2BCond(in_channels=384, out_channels=256,
stride=(2, 2), activation=activation, momentum=momentum,
cond_embedding_dim=cond_embedding_dim)
self.decoder_block4 = DecoderBlockRes2BCond(in_channels=256, out_channels=128,
stride=(2, 2), activation=activation, momentum=momentum,
cond_embedding_dim=cond_embedding_dim)
self.decoder_block5 = DecoderBlockRes2BCond(in_channels=128, out_channels=64,
stride=(2, 2), activation=activation, momentum=momentum,
cond_embedding_dim=cond_embedding_dim)
self.decoder_block6 = DecoderBlockRes2BCond(in_channels=64, out_channels=32,
stride=(2, 2), activation=activation, momentum=momentum,
cond_embedding_dim=cond_embedding_dim)
self.after_conv_block1 = ConvBlockResCond(in_channels=32, out_channels=32,
kernel_size=(3, 3), activation=activation, momentum=momentum,
cond_embedding_dim=cond_embedding_dim)
self.after_conv2 = nn.Conv2d(in_channels=32, out_channels=1,
kernel_size=(1, 1), stride=(1, 1), padding=(0, 0), bias=True)
self.init_weights()
def init_weights(self):
init_layer(self.after_conv2)
def forward(self, sp, cond_vec, dec_cond_vec):
"""
Args:
input: sp: (batch_size, channels_num, segment_samples)
Outputs:
output_dict: {
'wav': (batch_size, channels_num, segment_samples),
'sp': (batch_size, channels_num, time_steps, freq_bins)}
"""
x = sp
# Pad spectrogram to be evenly divided by downsample ratio.
origin_len = x.shape[2] # time_steps
pad_len = int(np.ceil(x.shape[2] / self.downsample_ratio)) * self.downsample_ratio - origin_len
x = F.pad(x, pad=(0, 0, 0, pad_len))
x = x[..., 0: x.shape[-1] - 2] # (bs, channels, T, F)
# UNet
(x1_pool, x1) = self.encoder_block1(x, cond_vec) # x1_pool: (bs, 32, T / 2, F / 2)
(x2_pool, x2) = self.encoder_block2(x1_pool, cond_vec) # x2_pool: (bs, 64, T / 4, F / 4)
(x3_pool, x3) = self.encoder_block3(x2_pool, cond_vec) # x3_pool: (bs, 128, T / 8, F / 8)
(x4_pool, x4) = self.encoder_block4(x3_pool, dec_cond_vec) # x4_pool: (bs, 256, T / 16, F / 16)
(x5_pool, x5) = self.encoder_block5(x4_pool, dec_cond_vec) # x5_pool: (bs, 512, T / 32, F / 32)
(x6_pool, x6) = self.encoder_block6(x5_pool, dec_cond_vec) # x6_pool: (bs, 1024, T / 64, F / 64)
x_center = self.conv_block7(x6_pool, dec_cond_vec) # (bs, 2048, T / 64, F / 64)
x7 = self.decoder_block1(x_center, x6, dec_cond_vec) # (bs, 1024, T / 32, F / 32)
x8 = self.decoder_block2(x7, x5, dec_cond_vec) # (bs, 512, T / 16, F / 16)
x9 = self.decoder_block3(x8, x4, cond_vec) # (bs, 256, T / 8, F / 8)
x10 = self.decoder_block4(x9, x3, cond_vec) # (bs, 128, T / 4, F / 4)
x11 = self.decoder_block5(x10, x2, cond_vec) # (bs, 64, T / 2, F / 2)
x12 = self.decoder_block6(x11, x1, cond_vec) # (bs, 32, T, F)
x = self.after_conv_block1(x12, cond_vec) # (bs, 32, T, F)
x = self.after_conv2(x) # (bs, channels, T, F)
# Recover shape
x = F.pad(x, pad=(0, 2))
x = x[:, :, 0: origin_len, :]
return x
if __name__ == "__main__":
model = UNetRes_FiLM(channels=1, cond_embedding_dim=16)
cond_vec = torch.randn((1, 16))
dec_vec = cond_vec
print(model(torch.randn((1, 1, 1001, 513)), cond_vec, dec_vec).size())
| EXA-1-master | exa/models/AudioGPT/sound_extraction/model/resunet_film.py |
import torch
import torch.nn as nn
from transformers import *
import warnings
warnings.filterwarnings('ignore')
# pretrained model name: (model class, model tokenizer, output dimension, token style)
MODELS = {
'prajjwal1/bert-mini': (BertModel, BertTokenizer),
}
class Text_Encoder(nn.Module):
def __init__(self, device):
super(Text_Encoder, self).__init__()
self.base_model = 'prajjwal1/bert-mini'
self.dropout = 0.1
self.tokenizer = MODELS[self.base_model][1].from_pretrained(self.base_model)
self.bert_layer = MODELS[self.base_model][0].from_pretrained(self.base_model,
add_pooling_layer=False,
hidden_dropout_prob=self.dropout,
attention_probs_dropout_prob=self.dropout,
output_hidden_states=True)
self.linear_layer = nn.Sequential(nn.Linear(256, 256), nn.ReLU(inplace=True))
self.device = device
def tokenize(self, caption):
# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
tokenized = self.tokenizer(caption, add_special_tokens=False, padding=True, return_tensors='pt')
input_ids = tokenized['input_ids']
attns_mask = tokenized['attention_mask']
input_ids = input_ids.to(self.device)
attns_mask = attns_mask.to(self.device)
return input_ids, attns_mask
def forward(self, input_ids, attns_mask):
# input_ids, attns_mask = self.tokenize(caption)
output = self.bert_layer(input_ids=input_ids, attention_mask=attns_mask)[0]
cls_embed = output[:, 0, :]
text_embed = self.linear_layer(cls_embed)
return text_embed, output # text_embed: (batch, hidden_size) | EXA-1-master | exa/models/AudioGPT/sound_extraction/model/text_encoder.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from .film import Film
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, activation, momentum):
super(ConvBlock, self).__init__()
self.activation = activation
padding = (kernel_size[0] // 2, kernel_size[1] // 2)
self.conv1 = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=(1, 1),
dilation=(1, 1),
padding=padding,
bias=False,
)
self.bn1 = nn.BatchNorm2d(out_channels, momentum=momentum)
self.conv2 = nn.Conv2d(
in_channels=out_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=(1, 1),
dilation=(1, 1),
padding=padding,
bias=False,
)
self.bn2 = nn.BatchNorm2d(out_channels, momentum=momentum)
self.init_weights()
def init_weights(self):
init_layer(self.conv1)
init_layer(self.conv2)
init_bn(self.bn1)
init_bn(self.bn2)
def forward(self, x):
x = act(self.bn1(self.conv1(x)), self.activation)
x = act(self.bn2(self.conv2(x)), self.activation)
return x
class EncoderBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, downsample, activation, momentum):
super(EncoderBlock, self).__init__()
self.conv_block = ConvBlock(
in_channels, out_channels, kernel_size, activation, momentum
)
self.downsample = downsample
def forward(self, x):
encoder = self.conv_block(x)
encoder_pool = F.avg_pool2d(encoder, kernel_size=self.downsample)
return encoder_pool, encoder
class DecoderBlock(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, upsample, activation, momentum):
super(DecoderBlock, self).__init__()
self.kernel_size = kernel_size
self.stride = upsample
self.activation = activation
self.conv1 = torch.nn.ConvTranspose2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=self.stride,
stride=self.stride,
padding=(0, 0),
bias=False,
dilation=(1, 1),
)
self.bn1 = nn.BatchNorm2d(out_channels, momentum=momentum)
self.conv_block2 = ConvBlock(
out_channels * 2, out_channels, kernel_size, activation, momentum
)
def init_weights(self):
init_layer(self.conv1)
init_bn(self.bn)
def prune(self, x):
"""Prune the shape of x after transpose convolution."""
padding = (self.kernel_size[0] // 2, self.kernel_size[1] // 2)
x = x[
:,
:,
padding[0] : padding[0] - self.stride[0],
padding[1] : padding[1] - self.stride[1]]
return x
def forward(self, input_tensor, concat_tensor):
x = act(self.bn1(self.conv1(input_tensor)), self.activation)
# from IPython import embed; embed(using=False); os._exit(0)
# x = self.prune(x)
x = torch.cat((x, concat_tensor), dim=1)
x = self.conv_block2(x)
return x
class EncoderBlockRes1B(nn.Module):
def __init__(self, in_channels, out_channels, downsample, activation, momentum):
super(EncoderBlockRes1B, self).__init__()
size = (3,3)
self.conv_block1 = ConvBlockRes(in_channels, out_channels, size, activation, momentum)
self.conv_block2 = ConvBlockRes(out_channels, out_channels, size, activation, momentum)
self.conv_block3 = ConvBlockRes(out_channels, out_channels, size, activation, momentum)
self.conv_block4 = ConvBlockRes(out_channels, out_channels, size, activation, momentum)
self.downsample = downsample
def forward(self, x):
encoder = self.conv_block1(x)
encoder = self.conv_block2(encoder)
encoder = self.conv_block3(encoder)
encoder = self.conv_block4(encoder)
encoder_pool = F.avg_pool2d(encoder, kernel_size=self.downsample)
return encoder_pool, encoder
class DecoderBlockRes1B(nn.Module):
def __init__(self, in_channels, out_channels, stride, activation, momentum):
super(DecoderBlockRes1B, self).__init__()
size = (3,3)
self.activation = activation
self.conv1 = torch.nn.ConvTranspose2d(in_channels=in_channels,
out_channels=out_channels, kernel_size=size, stride=stride,
padding=(0, 0), output_padding=(0, 0), bias=False, dilation=1)
self.bn1 = nn.BatchNorm2d(in_channels)
self.conv_block2 = ConvBlockRes(out_channels * 2, out_channels, size, activation, momentum)
self.conv_block3 = ConvBlockRes(out_channels, out_channels, size, activation, momentum)
self.conv_block4 = ConvBlockRes(out_channels, out_channels, size, activation, momentum)
self.conv_block5 = ConvBlockRes(out_channels, out_channels, size, activation, momentum)
def init_weights(self):
init_layer(self.conv1)
def prune(self, x, both=False):
"""Prune the shape of x after transpose convolution.
"""
if(both): x = x[:, :, 0 : - 1, 0:-1]
else: x = x[:, :, 0: - 1, :]
return x
def forward(self, input_tensor, concat_tensor,both=False):
x = self.conv1(F.relu_(self.bn1(input_tensor)))
x = self.prune(x,both=both)
x = torch.cat((x, concat_tensor), dim=1)
x = self.conv_block2(x)
x = self.conv_block3(x)
x = self.conv_block4(x)
x = self.conv_block5(x)
return x
class EncoderBlockRes2BCond(nn.Module):
def __init__(self, in_channels, out_channels, downsample, activation, momentum, cond_embedding_dim):
super(EncoderBlockRes2BCond, self).__init__()
size = (3, 3)
self.conv_block1 = ConvBlockResCond(in_channels, out_channels, size, activation, momentum, cond_embedding_dim)
self.conv_block2 = ConvBlockResCond(out_channels, out_channels, size, activation, momentum, cond_embedding_dim)
self.downsample = downsample
def forward(self, x, cond_vec):
encoder = self.conv_block1(x, cond_vec)
encoder = self.conv_block2(encoder, cond_vec)
encoder_pool = F.avg_pool2d(encoder, kernel_size=self.downsample)
return encoder_pool, encoder
class DecoderBlockRes2BCond(nn.Module):
def __init__(self, in_channels, out_channels, stride, activation, momentum, cond_embedding_dim):
super(DecoderBlockRes2BCond, self).__init__()
size = (3, 3)
self.activation = activation
self.conv1 = torch.nn.ConvTranspose2d(in_channels=in_channels,
out_channels=out_channels, kernel_size=size, stride=stride,
padding=(0, 0), output_padding=(0, 0), bias=False, dilation=1)
self.bn1 = nn.BatchNorm2d(in_channels)
self.conv_block2 = ConvBlockResCond(out_channels * 2, out_channels, size, activation, momentum, cond_embedding_dim)
self.conv_block3 = ConvBlockResCond(out_channels, out_channels, size, activation, momentum, cond_embedding_dim)
def init_weights(self):
init_layer(self.conv1)
def prune(self, x, both=False):
"""Prune the shape of x after transpose convolution.
"""
if(both): x = x[:, :, 0 : - 1, 0:-1]
else: x = x[:, :, 0: - 1, :]
return x
def forward(self, input_tensor, concat_tensor, cond_vec, both=False):
x = self.conv1(F.relu_(self.bn1(input_tensor)))
x = self.prune(x, both=both)
x = torch.cat((x, concat_tensor), dim=1)
x = self.conv_block2(x, cond_vec)
x = self.conv_block3(x, cond_vec)
return x
class EncoderBlockRes4BCond(nn.Module):
def __init__(self, in_channels, out_channels, downsample, activation, momentum, cond_embedding_dim):
super(EncoderBlockRes4B, self).__init__()
size = (3,3)
self.conv_block1 = ConvBlockResCond(in_channels, out_channels, size, activation, momentum, cond_embedding_dim)
self.conv_block2 = ConvBlockResCond(out_channels, out_channels, size, activation, momentum, cond_embedding_dim)
self.conv_block3 = ConvBlockResCond(out_channels, out_channels, size, activation, momentum, cond_embedding_dim)
self.conv_block4 = ConvBlockResCond(out_channels, out_channels, size, activation, momentum, cond_embedding_dim)
self.downsample = downsample
def forward(self, x, cond_vec):
encoder = self.conv_block1(x, cond_vec)
encoder = self.conv_block2(encoder, cond_vec)
encoder = self.conv_block3(encoder, cond_vec)
encoder = self.conv_block4(encoder, cond_vec)
encoder_pool = F.avg_pool2d(encoder, kernel_size=self.downsample)
return encoder_pool, encoder
class DecoderBlockRes4BCond(nn.Module):
def __init__(self, in_channels, out_channels, stride, activation, momentum, cond_embedding_dim):
super(DecoderBlockRes4B, self).__init__()
size = (3, 3)
self.activation = activation
self.conv1 = torch.nn.ConvTranspose2d(in_channels=in_channels,
out_channels=out_channels, kernel_size=size, stride=stride,
padding=(0, 0), output_padding=(0, 0), bias=False, dilation=1)
self.bn1 = nn.BatchNorm2d(in_channels)
self.conv_block2 = ConvBlockResCond(out_channels * 2, out_channels, size, activation, momentum, cond_embedding_dim)
self.conv_block3 = ConvBlockResCond(out_channels, out_channels, size, activation, momentum, cond_embedding_dim)
self.conv_block4 = ConvBlockResCond(out_channels, out_channels, size, activation, momentum, cond_embedding_dim)
self.conv_block5 = ConvBlockResCond(out_channels, out_channels, size, activation, momentum, cond_embedding_dim)
def init_weights(self):
init_layer(self.conv1)
def prune(self, x, both=False):
"""Prune the shape of x after transpose convolution.
"""
if(both): x = x[:, :, 0 : - 1, 0:-1]
else: x = x[:, :, 0: - 1, :]
return x
def forward(self, input_tensor, concat_tensor, cond_vec, both=False):
x = self.conv1(F.relu_(self.bn1(input_tensor)))
x = self.prune(x,both=both)
x = torch.cat((x, concat_tensor), dim=1)
x = self.conv_block2(x, cond_vec)
x = self.conv_block3(x, cond_vec)
x = self.conv_block4(x, cond_vec)
x = self.conv_block5(x, cond_vec)
return x
class EncoderBlockRes4B(nn.Module):
def __init__(self, in_channels, out_channels, downsample, activation, momentum):
super(EncoderBlockRes4B, self).__init__()
size = (3, 3)
self.conv_block1 = ConvBlockRes(in_channels, out_channels, size, activation, momentum)
self.conv_block2 = ConvBlockRes(out_channels, out_channels, size, activation, momentum)
self.conv_block3 = ConvBlockRes(out_channels, out_channels, size, activation, momentum)
self.conv_block4 = ConvBlockRes(out_channels, out_channels, size, activation, momentum)
self.downsample = downsample
def forward(self, x):
encoder = self.conv_block1(x)
encoder = self.conv_block2(encoder)
encoder = self.conv_block3(encoder)
encoder = self.conv_block4(encoder)
encoder_pool = F.avg_pool2d(encoder, kernel_size=self.downsample)
return encoder_pool, encoder
class DecoderBlockRes4B(nn.Module):
def __init__(self, in_channels, out_channels, stride, activation, momentum):
super(DecoderBlockRes4B, self).__init__()
size = (3,3)
self.activation = activation
self.conv1 = torch.nn.ConvTranspose2d(in_channels=in_channels,
out_channels=out_channels, kernel_size=size, stride=stride,
padding=(0, 0), output_padding=(0, 0), bias=False, dilation=1)
self.bn1 = nn.BatchNorm2d(in_channels)
self.conv_block2 = ConvBlockRes(out_channels * 2, out_channels, size, activation, momentum)
self.conv_block3 = ConvBlockRes(out_channels, out_channels, size, activation, momentum)
self.conv_block4 = ConvBlockRes(out_channels, out_channels, size, activation, momentum)
self.conv_block5 = ConvBlockRes(out_channels, out_channels, size, activation, momentum)
def init_weights(self):
init_layer(self.conv1)
def prune(self, x, both=False):
"""Prune the shape of x after transpose convolution.
"""
if(both): x = x[:, :, 0 : - 1, 0:-1]
else: x = x[:, :, 0: - 1, :]
return x
def forward(self, input_tensor, concat_tensor,both=False):
x = self.conv1(F.relu_(self.bn1(input_tensor)))
x = self.prune(x,both=both)
x = torch.cat((x, concat_tensor), dim=1)
x = self.conv_block2(x)
x = self.conv_block3(x)
x = self.conv_block4(x)
x = self.conv_block5(x)
return x
class ConvBlockResCond(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, activation, momentum, cond_embedding_dim):
r"""Residual block.
"""
super(ConvBlockResCond, self).__init__()
self.activation = activation
padding = [kernel_size[0] // 2, kernel_size[1] // 2]
self.bn1 = nn.BatchNorm2d(in_channels)
self.bn2 = nn.BatchNorm2d(out_channels)
self.conv1 = nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size, stride=(1, 1),
dilation=(1, 1), padding=padding, bias=False)
self.film1 = Film(channels=out_channels, cond_embedding_dim=cond_embedding_dim)
self.conv2 = nn.Conv2d(in_channels=out_channels,
out_channels=out_channels,
kernel_size=kernel_size, stride=(1, 1),
dilation=(1, 1), padding=padding, bias=False)
self.film2 = Film(channels=out_channels, cond_embedding_dim=cond_embedding_dim)
if in_channels != out_channels:
self.shortcut = nn.Conv2d(in_channels=in_channels,
out_channels=out_channels, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0))
self.film_res = Film(channels=out_channels, cond_embedding_dim=cond_embedding_dim)
self.is_shortcut = True
else:
self.is_shortcut = False
self.init_weights()
def init_weights(self):
init_bn(self.bn1)
init_bn(self.bn2)
init_layer(self.conv1)
init_layer(self.conv2)
if self.is_shortcut:
init_layer(self.shortcut)
def forward(self, x, cond_vec):
origin = x
x = self.conv1(F.leaky_relu_(self.bn1(x), negative_slope=0.01))
x = self.film1(x, cond_vec)
x = self.conv2(F.leaky_relu_(self.bn2(x), negative_slope=0.01))
x = self.film2(x, cond_vec)
if self.is_shortcut:
residual = self.shortcut(origin)
residual = self.film_res(residual, cond_vec)
return residual + x
else:
return origin + x
class ConvBlockRes(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, activation, momentum):
r"""Residual block.
"""
super(ConvBlockRes, self).__init__()
self.activation = activation
padding = [kernel_size[0] // 2, kernel_size[1] // 2]
self.bn1 = nn.BatchNorm2d(in_channels)
self.bn2 = nn.BatchNorm2d(out_channels)
self.conv1 = nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size, stride=(1, 1),
dilation=(1, 1), padding=padding, bias=False)
self.conv2 = nn.Conv2d(in_channels=out_channels,
out_channels=out_channels,
kernel_size=kernel_size, stride=(1, 1),
dilation=(1, 1), padding=padding, bias=False)
if in_channels != out_channels:
self.shortcut = nn.Conv2d(in_channels=in_channels,
out_channels=out_channels, kernel_size=(1, 1), stride=(1, 1), padding=(0, 0))
self.is_shortcut = True
else:
self.is_shortcut = False
self.init_weights()
def init_weights(self):
init_bn(self.bn1)
init_bn(self.bn2)
init_layer(self.conv1)
init_layer(self.conv2)
if self.is_shortcut:
init_layer(self.shortcut)
def forward(self, x):
origin = x
x = self.conv1(F.leaky_relu_(self.bn1(x), negative_slope=0.01))
x = self.conv2(F.leaky_relu_(self.bn2(x), negative_slope=0.01))
if self.is_shortcut:
return self.shortcut(origin) + x
else:
return origin + x
def init_layer(layer):
"""Initialize a Linear or Convolutional layer. """
nn.init.xavier_uniform_(layer.weight)
if hasattr(layer, 'bias'):
if layer.bias is not None:
layer.bias.data.fill_(0.)
def init_bn(bn):
"""Initialize a Batchnorm layer. """
bn.bias.data.fill_(0.)
bn.weight.data.fill_(1.)
def init_gru(rnn):
"""Initialize a GRU layer. """
def _concat_init(tensor, init_funcs):
(length, fan_out) = tensor.shape
fan_in = length // len(init_funcs)
for (i, init_func) in enumerate(init_funcs):
init_func(tensor[i * fan_in: (i + 1) * fan_in, :])
def _inner_uniform(tensor):
fan_in = nn.init._calculate_correct_fan(tensor, 'fan_in')
nn.init.uniform_(tensor, -math.sqrt(3 / fan_in), math.sqrt(3 / fan_in))
for i in range(rnn.num_layers):
_concat_init(
getattr(rnn, 'weight_ih_l{}'.format(i)),
[_inner_uniform, _inner_uniform, _inner_uniform]
)
torch.nn.init.constant_(getattr(rnn, 'bias_ih_l{}'.format(i)), 0)
_concat_init(
getattr(rnn, 'weight_hh_l{}'.format(i)),
[_inner_uniform, _inner_uniform, nn.init.orthogonal_]
)
torch.nn.init.constant_(getattr(rnn, 'bias_hh_l{}'.format(i)), 0)
def act(x, activation):
if activation == 'relu':
return F.relu_(x)
elif activation == 'leaky_relu':
return F.leaky_relu_(x, negative_slope=0.2)
elif activation == 'swish':
return x * torch.sigmoid(x)
else:
raise Exception('Incorrect activation!') | EXA-1-master | exa/models/AudioGPT/sound_extraction/model/modules.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
from .text_encoder import Text_Encoder
from .resunet_film import UNetRes_FiLM
class LASSNet(nn.Module):
def __init__(self, device='cuda'):
super(LASSNet, self).__init__()
self.text_embedder = Text_Encoder(device)
self.UNet = UNetRes_FiLM(channels=1, cond_embedding_dim=256)
def forward(self, x, caption):
# x: (Batch, 1, T, 128))
input_ids, attns_mask = self.text_embedder.tokenize(caption)
cond_vec = self.text_embedder(input_ids, attns_mask)[0]
dec_cond_vec = cond_vec
mask = self.UNet(x, cond_vec, dec_cond_vec)
mask = torch.sigmoid(mask)
return mask
def get_tokenizer(self):
return self.text_embedder.tokenizer
| EXA-1-master | exa/models/AudioGPT/sound_extraction/model/LASSNet.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
import torch as th
import torch.nn as nn
import torch.nn.functional as F
class TimeWarperFunction(th.autograd.Function):
@staticmethod
def forward(ctx, input, warpfield):
'''
:param ctx: autograd context
:param input: input signal (B x 2 x T)
:param warpfield: the corresponding warpfield (B x 2 x T)
:return: the warped signal (B x 2 x T)
'''
ctx.save_for_backward(input, warpfield)
# compute index list to lookup warped input values
idx_left = warpfield.floor().type(th.long)
idx_right = th.clamp(warpfield.ceil().type(th.long), max=input.shape[-1]-1)
# compute weight for linear interpolation
alpha = warpfield - warpfield.floor()
# linear interpolation
output = (1 - alpha) * th.gather(input, 2, idx_left) + alpha * th.gather(input, 2, idx_right)
return output
@staticmethod
def backward(ctx, grad_output):
input, warpfield = ctx.saved_tensors
# compute index list to lookup warped input values
idx_left = warpfield.floor().type(th.long)
idx_right = th.clamp(warpfield.ceil().type(th.long), max=input.shape[-1]-1)
# warpfield gradient
grad_warpfield = th.gather(input, 2, idx_right) - th.gather(input, 2, idx_left)
grad_warpfield = grad_output * grad_warpfield
# input gradient
grad_input = th.zeros(input.shape, device=input.device)
alpha = warpfield - warpfield.floor()
grad_input = grad_input.scatter_add(2, idx_left, grad_output * (1 - alpha)) + \
grad_input.scatter_add(2, idx_right, grad_output * alpha)
return grad_input, grad_warpfield
class TimeWarper(nn.Module):
def __init__(self):
super().__init__()
self.warper = TimeWarperFunction().apply
def _to_absolute_positions(self, warpfield, seq_length):
# translate warpfield from relative warp indices to absolute indices ([1...T] + warpfield)
temp_range = th.arange(seq_length, dtype=th.float)
temp_range = temp_range.cuda() if warpfield.is_cuda else temp_range
return th.clamp(warpfield + temp_range[None, None, :], min=0, max=seq_length-1)
def forward(self, input, warpfield):
'''
:param input: audio signal to be warped (B x 2 x T)
:param warpfield: the corresponding warpfield (B x 2 x T)
:return: the warped signal (B x 2 x T)
'''
warpfield = self._to_absolute_positions(warpfield, input.shape[-1])
warped = self.warper(input, warpfield)
return warped
class MonotoneTimeWarper(TimeWarper):
def forward(self, input, warpfield):
'''
:param input: audio signal to be warped (B x 2 x T)
:param warpfield: the corresponding warpfield (B x 2 x T)
:return: the warped signal (B x 2 x T), ensured to be monotonous
'''
warpfield = self._to_absolute_positions(warpfield, input.shape[-1])
# ensure monotonicity: each warp must be at least as big as previous_warp-1
warpfield = th.cummax(warpfield, dim=-1)[0]
# print('warpfield ',warpfield.shape)
# warp
warped = self.warper(input, warpfield)
return warped
class GeometricTimeWarper(TimeWarper):
def __init__(self, sampling_rate=48000):
super().__init__()
self.sampling_rate = sampling_rate
def displacements2warpfield(self, displacements, seq_length):
distance = th.sum(displacements**2, dim=2) ** 0.5
distance = F.interpolate(distance, size=seq_length)
warpfield = -distance / 343.0 * self.sampling_rate
return warpfield
def forward(self, input, displacements):
'''
:param input: audio signal to be warped (B x 2 x T)
:param displacements: sequence of 3D displacement vectors for geometric warping (B x 3 x T)
:return: the warped signal (B x 2 x T)
'''
warpfield = self.displacements2warpfield(displacements, input.shape[-1])
# print('Ge warpfield ', warpfield.shape)
# assert 1==2
warped = super().forward(input, warpfield)
return warped
| EXA-1-master | exa/models/AudioGPT/mono2binaural/src/warping.py |
import numpy as np
import scipy.linalg
from scipy.spatial.transform import Rotation as R
import torch as th
import torch.nn as nn
import torch.nn.functional as F
from src.warping import GeometricTimeWarper, MonotoneTimeWarper
from src.utils import Net
class GeometricWarper(nn.Module):
def __init__(self, sampling_rate=48000):
super().__init__()
self.warper = GeometricTimeWarper(sampling_rate=sampling_rate)
def _transmitter_mouth(self, view):
# offset between tracking markers and real mouth position in the dataset
mouth_offset = np.array([0.09, 0, -0.20])
quat = view[:, 3:, :].transpose(2, 1).contiguous().detach().cpu().view(-1, 4).numpy()
# make sure zero-padded values are set to non-zero values (else scipy raises an exception)
norms = scipy.linalg.norm(quat, axis=1)
eps_val = (norms == 0).astype(np.float32)
quat = quat + eps_val[:, None]
transmitter_rot_mat = R.from_quat(quat)
transmitter_mouth = transmitter_rot_mat.apply(mouth_offset, inverse=True)
transmitter_mouth = th.Tensor(transmitter_mouth).view(view.shape[0], -1, 3).transpose(2, 1).contiguous()
if view.is_cuda:
transmitter_mouth = transmitter_mouth.cuda()
return transmitter_mouth
def _3d_displacements(self, view):
transmitter_mouth = self._transmitter_mouth(view)
# offset between tracking markers and ears in the dataset
left_ear_offset = th.Tensor([0, -0.08, -0.22]).cuda() if view.is_cuda else th.Tensor([0, -0.08, -0.22])
right_ear_offset = th.Tensor([0, 0.08, -0.22]).cuda() if view.is_cuda else th.Tensor([0, 0.08, -0.22])
# compute displacements between transmitter mouth and receiver left/right ear
displacement_left = view[:, 0:3, :] + transmitter_mouth - left_ear_offset[None, :, None]
displacement_right = view[:, 0:3, :] + transmitter_mouth - right_ear_offset[None, :, None]
displacement = th.stack([displacement_left, displacement_right], dim=1)
return displacement
def _warpfield(self, view, seq_length):
return self.warper.displacements2warpfield(self._3d_displacements(view), seq_length)
def forward(self, mono, view):
'''
:param mono: input signal as tensor of shape B x 1 x T
:param view: rx/tx position/orientation as tensor of shape B x 7 x K (K = T / 400)
:return: warped: warped left/right ear signal as tensor of shape B x 2 x T
'''
return self.warper(th.cat([mono, mono], dim=1), self._3d_displacements(view))
class Warpnet(nn.Module):
def __init__(self, layers=4, channels=64, view_dim=7):
super().__init__()
self.layers = [nn.Conv1d(view_dim if l == 0 else channels, channels, kernel_size=2) for l in range(layers)]
self.layers = nn.ModuleList(self.layers)
self.linear = nn.Conv1d(channels, 2, kernel_size=1)
self.neural_warper = MonotoneTimeWarper()
self.geometric_warper = GeometricWarper()
def neural_warpfield(self, view, seq_length):
warpfield = view
for layer in self.layers:
warpfield = F.pad(warpfield, pad=[1, 0])
warpfield = F.relu(layer(warpfield))
warpfield = self.linear(warpfield)
warpfield = F.interpolate(warpfield, size=seq_length)
return warpfield
def forward(self, mono, view):
'''
:param mono: input signal as tensor of shape B x 1 x T
:param view: rx/tx position/orientation as tensor of shape B x 7 x K (K = T / 400)
:return: warped: warped left/right ear signal as tensor of shape B x 2 x T
'''
geometric_warpfield = self.geometric_warper._warpfield(view, mono.shape[-1])
neural_warpfield = self.neural_warpfield(view, mono.shape[-1])
warpfield = geometric_warpfield + neural_warpfield
# ensure causality
warpfield = -F.relu(-warpfield) # the predicted warp
warped = self.neural_warper(th.cat([mono, mono], dim=1), warpfield)
return warped
class BinauralNetwork(Net):
def __init__(self,
view_dim=7,
warpnet_layers=4,
warpnet_channels=64,
model_name='binaural_network',
use_cuda=True):
super().__init__(model_name, use_cuda)
self.warper = Warpnet(warpnet_layers, warpnet_channels)
if self.use_cuda:
self.cuda()
def forward(self, mono, view):
'''
:param mono: the input signal as a B x 1 x T tensor
:param view: the receiver/transmitter position as a B x 7 x T tensor
:return: out: the binaural output produced by the network
intermediate: a two-channel audio signal obtained from the output of each intermediate layer
as a list of B x 2 x T tensors
'''
# print('mono ', mono.shape)
# print('view ', view.shape)
warped = self.warper(mono, view)
# print('warped ', warped.shape)
return warped
| EXA-1-master | exa/models/AudioGPT/mono2binaural/src/models.py |
"""
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
"""
import numpy as np
import torch as th
#import torchaudio as ta
class Net(th.nn.Module):
def __init__(self, model_name="network", use_cuda=True):
super().__init__()
self.use_cuda = use_cuda
self.model_name = model_name
def save(self, model_dir, suffix=''):
'''
save the network to model_dir/model_name.suffix.net
:param model_dir: directory to save the model to
:param suffix: suffix to append after model name
'''
if self.use_cuda:
self.cpu()
if suffix == "":
fname = f"{model_dir}/{self.model_name}.net"
else:
fname = f"{model_dir}/{self.model_name}.{suffix}.net"
th.save(self.state_dict(), fname)
if self.use_cuda:
self.cuda()
def load_from_file(self, model_file):
'''
load network parameters from model_file
:param model_file: file containing the model parameters
'''
if self.use_cuda:
self.cpu()
states = th.load(model_file)
self.load_state_dict(states)
if self.use_cuda:
self.cuda()
print(f"Loaded: {model_file}")
def load(self, model_dir, suffix=''):
'''
load network parameters from model_dir/model_name.suffix.net
:param model_dir: directory to load the model from
:param suffix: suffix to append after model name
'''
if suffix == "":
fname = f"{model_dir}/{self.model_name}.net"
else:
fname = f"{model_dir}/{self.model_name}.{suffix}.net"
self.load_from_file(fname)
def num_trainable_parameters(self):
'''
:return: the number of trainable parameters in the model
'''
return sum(p.numel() for p in self.parameters() if p.requires_grad)
# class NewbobAdam(th.optim.Adam):
# def __init__(self,
# weights,
# net,
# artifacts_dir,
# initial_learning_rate=0.001,
# decay=0.5,
# max_decay=0.01
# ):
# '''
# Newbob learning rate scheduler
# :param weights: weights to optimize
# :param net: the network, must be an instance of type src.utils.Net
# :param artifacts_dir: (str) directory to save/restore models to/from
# :param initial_learning_rate: (float) initial learning rate
# :param decay: (float) value to decrease learning rate by when loss doesn't improve further
# :param max_decay: (float) maximum decay of learning rate
# '''
# super().__init__(weights, lr=initial_learning_rate)
# self.last_epoch_loss = np.inf
# self.total_decay = 1
# self.net = net
# self.decay = decay
# self.max_decay = max_decay
# self.artifacts_dir = artifacts_dir
# # store initial state as backup
# if decay < 1.0:
# net.save(artifacts_dir, suffix="newbob")
# def update_lr(self, loss):
# '''
# update the learning rate based on the current loss value and historic loss values
# :param loss: the loss after the current iteration
# '''
# if loss > self.last_epoch_loss and self.decay < 1.0 and self.total_decay > self.max_decay:
# self.total_decay = self.total_decay * self.decay
# print(f"NewbobAdam: Decay learning rate (loss degraded from {self.last_epoch_loss} to {loss})."
# f"Total decay: {self.total_decay}")
# # restore previous network state
# self.net.load(self.artifacts_dir, suffix="newbob")
# # decrease learning rate
# for param_group in self.param_groups:
# param_group['lr'] = param_group['lr'] * self.decay
# else:
# self.last_epoch_loss = loss
# # save last snapshot to restore it in case of lr decrease
# if self.decay < 1.0 and self.total_decay > self.max_decay:
# self.net.save(self.artifacts_dir, suffix="newbob")
# class FourierTransform:
# def __init__(self,
# fft_bins=2048,
# win_length_ms=40,
# frame_rate_hz=100,
# causal=False,
# preemphasis=0.0,
# sample_rate=48000,
# normalized=False):
# self.sample_rate = sample_rate
# self.frame_rate_hz = frame_rate_hz
# self.preemphasis = preemphasis
# self.fft_bins = fft_bins
# self.win_length = int(sample_rate * win_length_ms / 1000)
# self.hop_length = int(sample_rate / frame_rate_hz)
# self.causal = causal
# self.normalized = normalized
# if self.win_length > self.fft_bins:
# print('FourierTransform Warning: fft_bins should be larger than win_length')
# def _convert_format(self, data, expected_dims):
# if not type(data) == th.Tensor:
# data = th.Tensor(data)
# if len(data.shape) < expected_dims:
# data = data.unsqueeze(0)
# if not len(data.shape) == expected_dims:
# raise Exception(f"FourierTransform: data needs to be a Tensor with {expected_dims} dimensions but got shape {data.shape}")
# return data
# def _preemphasis(self, audio):
# if self.preemphasis > 0:
# return th.cat((audio[:, 0:1], audio[:, 1:] - self.preemphasis * audio[:, :-1]), dim=1)
# return audio
# def _revert_preemphasis(self, audio):
# if self.preemphasis > 0:
# for i in range(1, audio.shape[1]):
# audio[:, i] = audio[:, i] + self.preemphasis * audio[:, i-1]
# return audio
# def _magphase(self, complex_stft):
# mag, phase = ta.functional.magphase(complex_stft, 1.0)
# return mag, phase
# def stft(self, audio):
# '''
# wrapper around th.stft
# audio: wave signal as th.Tensor
# '''
# hann = th.hann_window(self.win_length)
# hann = hann.cuda() if audio.is_cuda else hann
# spec = th.stft(audio, n_fft=self.fft_bins, hop_length=self.hop_length, win_length=self.win_length,
# window=hann, center=not self.causal, normalized=self.normalized)
# return spec.contiguous()
# def complex_spectrogram(self, audio):
# '''
# audio: wave signal as th.Tensor
# return: th.Tensor of size channels x frequencies x time_steps (channels x y_axis x x_axis)
# '''
# self._convert_format(audio, expected_dims=2)
# audio = self._preemphasis(audio)
# return self.stft(audio)
# def magnitude_phase(self, audio):
# '''
# audio: wave signal as th.Tensor
# return: tuple containing two th.Tensor of size channels x frequencies x time_steps for magnitude and phase spectrum
# '''
# stft = self.complex_spectrogram(audio)
# return self._magphase(stft)
# def mag_spectrogram(self, audio):
# '''
# audio: wave signal as th.Tensor
# return: magnitude spectrum as th.Tensor of size channels x frequencies x time_steps for magnitude and phase spectrum
# '''
# return self.magnitude_phase(audio)[0]
# def power_spectrogram(self, audio):
# '''
# audio: wave signal as th.Tensor
# return: power spectrum as th.Tensor of size channels x frequencies x time_steps for magnitude and phase spectrum
# '''
# return th.pow(self.mag_spectrogram(audio), 2.0)
# def phase_spectrogram(self, audio):
# '''
# audio: wave signal as th.Tensor
# return: phase spectrum as th.Tensor of size channels x frequencies x time_steps for magnitude and phase spectrum
# '''
# return self.magnitude_phase(audio)[1]
# def mel_spectrogram(self, audio, n_mels):
# '''
# audio: wave signal as th.Tensor
# n_mels: number of bins used for mel scale warping
# return: mel spectrogram as th.Tensor of size channels x n_mels x time_steps for magnitude and phase spectrum
# '''
# spec = self.power_spectrogram(audio)
# mel_warping = ta.transforms.MelScale(n_mels, self.sample_rate)
# return mel_warping(spec)
# def complex_spec2wav(self, complex_spec, length):
# '''
# inverse stft
# complex_spec: complex spectrum as th.Tensor of size channels x frequencies x time_steps x 2 (real part/imaginary part)
# length: length of the audio to be reconstructed (in frames)
# '''
# complex_spec = self._convert_format(complex_spec, expected_dims=4)
# hann = th.hann_window(self.win_length)
# hann = hann.cuda() if complex_spec.is_cuda else hann
# wav = ta.functional.istft(complex_spec, n_fft=self.fft_bins, hop_length=self.hop_length, win_length=self.win_length, window=hann, length=length, center=not self.causal)
# wav = self._revert_preemphasis(wav)
# return wav
# def magphase2wav(self, mag_spec, phase_spec, length):
# '''
# reconstruction of wav signal from magnitude and phase spectrum
# mag_spec: magnitude spectrum as th.Tensor of size channels x frequencies x time_steps
# phase_spec: phase spectrum as th.Tensor of size channels x frequencies x time_steps
# length: length of the audio to be reconstructed (in frames)
# '''
# mag_spec = self._convert_format(mag_spec, expected_dims=3)
# phase_spec = self._convert_format(phase_spec, expected_dims=3)
# complex_spec = th.stack([mag_spec * th.cos(phase_spec), mag_spec * th.sin(phase_spec)], dim=-1)
# return self.complex_spec2wav(complex_spec, length)
| EXA-1-master | exa/models/AudioGPT/mono2binaural/src/utils.py |
EXA-1-master | exa/models/AudioGPT/audio_detection/__init__.py |
|
EXA-1-master | exa/models/AudioGPT/audio_detection/audio_infer/__init__.py |
|
import numpy as np
import csv
sample_rate = 32000
clip_samples = sample_rate * 10 # Audio clips are 10-second
# Load label
with open('./audio_detection/audio_infer/metadata/class_labels_indices.csv', 'r') as f:
reader = csv.reader(f, delimiter=',')
lines = list(reader)
labels = []
ids = [] # Each label has a unique id such as "/m/068hy"
for i1 in range(1, len(lines)):
id = lines[i1][1]
label = lines[i1][2]
ids.append(id)
labels.append(label)
classes_num = len(labels)
lb_to_ix = {label : i for i, label in enumerate(labels)}
ix_to_lb = {i : label for i, label in enumerate(labels)}
id_to_ix = {id : i for i, id in enumerate(ids)}
ix_to_id = {i : id for i, id in enumerate(ids)}
full_samples_per_class = np.array([
937432, 16344, 7822, 10271, 2043, 14420, 733, 1511,
1258, 424, 1751, 704, 369, 590, 1063, 1375,
5026, 743, 853, 1648, 714, 1497, 1251, 2139,
1093, 133, 224, 39469, 6423, 407, 1559, 4546,
6826, 7464, 2468, 549, 4063, 334, 587, 238,
1766, 691, 114, 2153, 236, 209, 421, 740,
269, 959, 137, 4192, 485, 1515, 655, 274,
69, 157, 1128, 807, 1022, 346, 98, 680,
890, 352, 4169, 2061, 1753, 9883, 1339, 708,
37857, 18504, 12864, 2475, 2182, 757, 3624, 677,
1683, 3583, 444, 1780, 2364, 409, 4060, 3097,
3143, 502, 723, 600, 230, 852, 1498, 1865,
1879, 2429, 5498, 5430, 2139, 1761, 1051, 831,
2401, 2258, 1672, 1711, 987, 646, 794, 25061,
5792, 4256, 96, 8126, 2740, 752, 513, 554,
106, 254, 1592, 556, 331, 615, 2841, 737,
265, 1349, 358, 1731, 1115, 295, 1070, 972,
174, 937780, 112337, 42509, 49200, 11415, 6092, 13851,
2665, 1678, 13344, 2329, 1415, 2244, 1099, 5024,
9872, 10948, 4409, 2732, 1211, 1289, 4807, 5136,
1867, 16134, 14519, 3086, 19261, 6499, 4273, 2790,
8820, 1228, 1575, 4420, 3685, 2019, 664, 324,
513, 411, 436, 2997, 5162, 3806, 1389, 899,
8088, 7004, 1105, 3633, 2621, 9753, 1082, 26854,
3415, 4991, 2129, 5546, 4489, 2850, 1977, 1908,
1719, 1106, 1049, 152, 136, 802, 488, 592,
2081, 2712, 1665, 1128, 250, 544, 789, 2715,
8063, 7056, 2267, 8034, 6092, 3815, 1833, 3277,
8813, 2111, 4662, 2678, 2954, 5227, 1472, 2591,
3714, 1974, 1795, 4680, 3751, 6585, 2109, 36617,
6083, 16264, 17351, 3449, 5034, 3931, 2599, 4134,
3892, 2334, 2211, 4516, 2766, 2862, 3422, 1788,
2544, 2403, 2892, 4042, 3460, 1516, 1972, 1563,
1579, 2776, 1647, 4535, 3921, 1261, 6074, 2922,
3068, 1948, 4407, 712, 1294, 1019, 1572, 3764,
5218, 975, 1539, 6376, 1606, 6091, 1138, 1169,
7925, 3136, 1108, 2677, 2680, 1383, 3144, 2653,
1986, 1800, 1308, 1344, 122231, 12977, 2552, 2678,
7824, 768, 8587, 39503, 3474, 661, 430, 193,
1405, 1442, 3588, 6280, 10515, 785, 710, 305,
206, 4990, 5329, 3398, 1771, 3022, 6907, 1523,
8588, 12203, 666, 2113, 7916, 434, 1636, 5185,
1062, 664, 952, 3490, 2811, 2749, 2848, 15555,
363, 117, 1494, 1647, 5886, 4021, 633, 1013,
5951, 11343, 2324, 243, 372, 943, 734, 242,
3161, 122, 127, 201, 1654, 768, 134, 1467,
642, 1148, 2156, 1368, 1176, 302, 1909, 61,
223, 1812, 287, 422, 311, 228, 748, 230,
1876, 539, 1814, 737, 689, 1140, 591, 943,
353, 289, 198, 490, 7938, 1841, 850, 457,
814, 146, 551, 728, 1627, 620, 648, 1621,
2731, 535, 88, 1736, 736, 328, 293, 3170,
344, 384, 7640, 433, 215, 715, 626, 128,
3059, 1833, 2069, 3732, 1640, 1508, 836, 567,
2837, 1151, 2068, 695, 1494, 3173, 364, 88,
188, 740, 677, 273, 1533, 821, 1091, 293,
647, 318, 1202, 328, 532, 2847, 526, 721,
370, 258, 956, 1269, 1641, 339, 1322, 4485,
286, 1874, 277, 757, 1393, 1330, 380, 146,
377, 394, 318, 339, 1477, 1886, 101, 1435,
284, 1425, 686, 621, 221, 117, 87, 1340,
201, 1243, 1222, 651, 1899, 421, 712, 1016,
1279, 124, 351, 258, 7043, 368, 666, 162,
7664, 137, 70159, 26179, 6321, 32236, 33320, 771,
1169, 269, 1103, 444, 364, 2710, 121, 751,
1609, 855, 1141, 2287, 1940, 3943, 289])
| EXA-1-master | exa/models/AudioGPT/audio_detection/audio_infer/utils/config.py |
import sys
class ExceptionHook:
instance = None
def __call__(self, *args, **kwargs):
if self.instance is None:
from IPython.core import ultratb
self.instance = ultratb.FormattedTB(mode='Plain',
color_scheme='Linux', call_pdb=1)
return self.instance(*args, **kwargs)
sys.excepthook = ExceptionHook()
| EXA-1-master | exa/models/AudioGPT/audio_detection/audio_infer/utils/crash.py |
import argparse
import csv
import os
from utilities import create_folder
def dcase2017task4(args):
"""Create black list. Black list is a list of audio ids that will be
skipped in training.
"""
# Augments & parameters
workspace = args.workspace
# Black list from DCASE 2017 Task 4
test_weak_csv = 'metadata/black_list/groundtruth_weak_label_testing_set.csv'
evaluation_weak_csv = 'metadata/black_list/groundtruth_weak_label_evaluation_set.csv'
black_list_csv = os.path.join(workspace, 'black_list', 'dcase2017task4.csv')
create_folder(os.path.dirname(black_list_csv))
def get_id_sets(csv_path):
with open(csv_path, 'r') as fr:
reader = csv.reader(fr, delimiter='\t')
lines = list(reader)
ids_set = []
for line in lines:
"""line: ['-5QrBL6MzLg_60.000_70.000.wav', '60.000', '70.000', 'Train horn']"""
ids_set.append(line[0][0 : 11])
ids_set = list(set(ids_set))
return ids_set
test_ids_set = get_id_sets(test_weak_csv)
evaluation_ids_set = get_id_sets(evaluation_weak_csv)
full_ids_set = test_ids_set + evaluation_ids_set
# Write black list
fw = open(black_list_csv, 'w')
for id in full_ids_set:
fw.write('{}\n'.format(id))
print('Write black list to {}'.format(black_list_csv))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
subparsers = parser.add_subparsers(dest='mode')
parser_dcase2017task4 = subparsers.add_parser('dcase2017task4')
parser_dcase2017task4.add_argument('--workspace', type=str, required=True)
args = parser.parse_args()
if args.mode == 'dcase2017task4':
dcase2017task4(args)
else:
raise Exception('Error argument!') | EXA-1-master | exa/models/AudioGPT/audio_detection/audio_infer/utils/create_black_list.py |
import numpy as np
import argparse
import csv
import os
import glob
import datetime
import time
import logging
import h5py
import librosa
from utilities import (create_folder, get_filename, create_logging,
float32_to_int16, pad_or_truncate, read_metadata)
import config
def split_unbalanced_csv_to_partial_csvs(args):
"""Split unbalanced csv to part csvs. Each part csv contains up to 50000 ids.
"""
unbalanced_csv_path = args.unbalanced_csv
unbalanced_partial_csvs_dir = args.unbalanced_partial_csvs_dir
create_folder(unbalanced_partial_csvs_dir)
with open(unbalanced_csv_path, 'r') as f:
lines = f.readlines()
lines = lines[3:] # Remove head info
audios_num_per_file = 50000
files_num = int(np.ceil(len(lines) / float(audios_num_per_file)))
for r in range(files_num):
lines_per_file = lines[r * audios_num_per_file :
(r + 1) * audios_num_per_file]
out_csv_path = os.path.join(unbalanced_partial_csvs_dir,
'unbalanced_train_segments_part{:02d}.csv'.format(r))
with open(out_csv_path, 'w') as f:
f.write('empty\n')
f.write('empty\n')
f.write('empty\n')
for line in lines_per_file:
f.write(line)
print('Write out csv to {}'.format(out_csv_path))
def download_wavs(args):
"""Download videos and extract audio in wav format.
"""
# Paths
csv_path = args.csv_path
audios_dir = args.audios_dir
mini_data = args.mini_data
if mini_data:
logs_dir = '_logs/download_dataset/{}'.format(get_filename(csv_path))
else:
logs_dir = '_logs/download_dataset_minidata/{}'.format(get_filename(csv_path))
create_folder(audios_dir)
create_folder(logs_dir)
create_logging(logs_dir, filemode='w')
logging.info('Download log is saved to {}'.format(logs_dir))
# Read csv
with open(csv_path, 'r') as f:
lines = f.readlines()
lines = lines[3:] # Remove csv head info
if mini_data:
lines = lines[0 : 10] # Download partial data for debug
download_time = time.time()
# Download
for (n, line) in enumerate(lines):
items = line.split(', ')
audio_id = items[0]
start_time = float(items[1])
end_time = float(items[2])
duration = end_time - start_time
logging.info('{} {} start_time: {:.1f}, end_time: {:.1f}'.format(
n, audio_id, start_time, end_time))
# Download full video of whatever format
video_name = os.path.join(audios_dir, '_Y{}.%(ext)s'.format(audio_id))
os.system("youtube-dl --quiet -o '{}' -x https://www.youtube.com/watch?v={}"\
.format(video_name, audio_id))
video_paths = glob.glob(os.path.join(audios_dir, '_Y' + audio_id + '.*'))
# If download successful
if len(video_paths) > 0:
video_path = video_paths[0] # Choose one video
# Add 'Y' to the head because some video ids are started with '-'
# which will cause problem
audio_path = os.path.join(audios_dir, 'Y' + audio_id + '.wav')
# Extract audio in wav format
os.system("ffmpeg -loglevel panic -i {} -ac 1 -ar 32000 -ss {} -t 00:00:{} {} "\
.format(video_path,
str(datetime.timedelta(seconds=start_time)), duration,
audio_path))
# Remove downloaded video
os.system("rm {}".format(video_path))
logging.info("Download and convert to {}".format(audio_path))
logging.info('Download finished! Time spent: {:.3f} s'.format(
time.time() - download_time))
logging.info('Logs can be viewed in {}'.format(logs_dir))
def pack_waveforms_to_hdf5(args):
"""Pack waveform and target of several audio clips to a single hdf5 file.
This can speed up loading and training.
"""
# Arguments & parameters
audios_dir = args.audios_dir
csv_path = args.csv_path
waveforms_hdf5_path = args.waveforms_hdf5_path
mini_data = args.mini_data
clip_samples = config.clip_samples
classes_num = config.classes_num
sample_rate = config.sample_rate
id_to_ix = config.id_to_ix
# Paths
if mini_data:
prefix = 'mini_'
waveforms_hdf5_path += '.mini'
else:
prefix = ''
create_folder(os.path.dirname(waveforms_hdf5_path))
logs_dir = '_logs/pack_waveforms_to_hdf5/{}{}'.format(prefix, get_filename(csv_path))
create_folder(logs_dir)
create_logging(logs_dir, filemode='w')
logging.info('Write logs to {}'.format(logs_dir))
# Read csv file
meta_dict = read_metadata(csv_path, classes_num, id_to_ix)
if mini_data:
mini_num = 10
for key in meta_dict.keys():
meta_dict[key] = meta_dict[key][0 : mini_num]
audios_num = len(meta_dict['audio_name'])
# Pack waveform to hdf5
total_time = time.time()
with h5py.File(waveforms_hdf5_path, 'w') as hf:
hf.create_dataset('audio_name', shape=((audios_num,)), dtype='S20')
hf.create_dataset('waveform', shape=((audios_num, clip_samples)), dtype=np.int16)
hf.create_dataset('target', shape=((audios_num, classes_num)), dtype=np.bool)
hf.attrs.create('sample_rate', data=sample_rate, dtype=np.int32)
# Pack waveform & target of several audio clips to a single hdf5 file
for n in range(audios_num):
audio_path = os.path.join(audios_dir, meta_dict['audio_name'][n])
if os.path.isfile(audio_path):
logging.info('{} {}'.format(n, audio_path))
(audio, _) = librosa.core.load(audio_path, sr=sample_rate, mono=True)
audio = pad_or_truncate(audio, clip_samples)
hf['audio_name'][n] = meta_dict['audio_name'][n].encode()
hf['waveform'][n] = float32_to_int16(audio)
hf['target'][n] = meta_dict['target'][n]
else:
logging.info('{} File does not exist! {}'.format(n, audio_path))
logging.info('Write to {}'.format(waveforms_hdf5_path))
logging.info('Pack hdf5 time: {:.3f}'.format(time.time() - total_time))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest='mode')
parser_split = subparsers.add_parser('split_unbalanced_csv_to_partial_csvs')
parser_split.add_argument('--unbalanced_csv', type=str, required=True, help='Path of unbalanced_csv file to read.')
parser_split.add_argument('--unbalanced_partial_csvs_dir', type=str, required=True, help='Directory to save out split unbalanced partial csv.')
parser_download_wavs = subparsers.add_parser('download_wavs')
parser_download_wavs.add_argument('--csv_path', type=str, required=True, help='Path of csv file containing audio info to be downloaded.')
parser_download_wavs.add_argument('--audios_dir', type=str, required=True, help='Directory to save out downloaded audio.')
parser_download_wavs.add_argument('--mini_data', action='store_true', default=True, help='Set true to only download 10 audios for debugging.')
parser_pack_wavs = subparsers.add_parser('pack_waveforms_to_hdf5')
parser_pack_wavs.add_argument('--csv_path', type=str, required=True, help='Path of csv file containing audio info to be downloaded.')
parser_pack_wavs.add_argument('--audios_dir', type=str, required=True, help='Directory to save out downloaded audio.')
parser_pack_wavs.add_argument('--waveforms_hdf5_path', type=str, required=True, help='Path to save out packed hdf5.')
parser_pack_wavs.add_argument('--mini_data', action='store_true', default=False, help='Set true to only download 10 audios for debugging.')
args = parser.parse_args()
if args.mode == 'split_unbalanced_csv_to_partial_csvs':
split_unbalanced_csv_to_partial_csvs(args)
elif args.mode == 'download_wavs':
download_wavs(args)
elif args.mode == 'pack_waveforms_to_hdf5':
pack_waveforms_to_hdf5(args)
else:
raise Exception('Incorrect arguments!') | EXA-1-master | exa/models/AudioGPT/audio_detection/audio_infer/utils/dataset.py |
import numpy as np
import argparse
import csv
import os
import glob
import datetime
import time
import logging
import h5py
import librosa
from utilities import create_folder, get_sub_filepaths
import config
def create_indexes(args):
"""Create indexes a for dataloader to read for training. When users have
a new task and their own data, they need to create similar indexes. The
indexes contain meta information of "where to find the data for training".
"""
# Arguments & parameters
waveforms_hdf5_path = args.waveforms_hdf5_path
indexes_hdf5_path = args.indexes_hdf5_path
# Paths
create_folder(os.path.dirname(indexes_hdf5_path))
with h5py.File(waveforms_hdf5_path, 'r') as hr:
with h5py.File(indexes_hdf5_path, 'w') as hw:
audios_num = len(hr['audio_name'])
hw.create_dataset('audio_name', data=hr['audio_name'][:], dtype='S20')
hw.create_dataset('target', data=hr['target'][:], dtype=np.bool)
hw.create_dataset('hdf5_path', data=[waveforms_hdf5_path.encode()] * audios_num, dtype='S200')
hw.create_dataset('index_in_hdf5', data=np.arange(audios_num), dtype=np.int32)
print('Write to {}'.format(indexes_hdf5_path))
def combine_full_indexes(args):
"""Combine all balanced and unbalanced indexes hdf5s to a single hdf5. This
combined indexes hdf5 is used for training with full data (~20k balanced
audio clips + ~1.9m unbalanced audio clips).
"""
# Arguments & parameters
indexes_hdf5s_dir = args.indexes_hdf5s_dir
full_indexes_hdf5_path = args.full_indexes_hdf5_path
classes_num = config.classes_num
# Paths
paths = get_sub_filepaths(indexes_hdf5s_dir)
paths = [path for path in paths if (
'train' in path and 'full_train' not in path and 'mini' not in path)]
print('Total {} hdf5 to combine.'.format(len(paths)))
with h5py.File(full_indexes_hdf5_path, 'w') as full_hf:
full_hf.create_dataset(
name='audio_name',
shape=(0,),
maxshape=(None,),
dtype='S20')
full_hf.create_dataset(
name='target',
shape=(0, classes_num),
maxshape=(None, classes_num),
dtype=np.bool)
full_hf.create_dataset(
name='hdf5_path',
shape=(0,),
maxshape=(None,),
dtype='S200')
full_hf.create_dataset(
name='index_in_hdf5',
shape=(0,),
maxshape=(None,),
dtype=np.int32)
for path in paths:
with h5py.File(path, 'r') as part_hf:
print(path)
n = len(full_hf['audio_name'][:])
new_n = n + len(part_hf['audio_name'][:])
full_hf['audio_name'].resize((new_n,))
full_hf['audio_name'][n : new_n] = part_hf['audio_name'][:]
full_hf['target'].resize((new_n, classes_num))
full_hf['target'][n : new_n] = part_hf['target'][:]
full_hf['hdf5_path'].resize((new_n,))
full_hf['hdf5_path'][n : new_n] = part_hf['hdf5_path'][:]
full_hf['index_in_hdf5'].resize((new_n,))
full_hf['index_in_hdf5'][n : new_n] = part_hf['index_in_hdf5'][:]
print('Write combined full hdf5 to {}'.format(full_indexes_hdf5_path))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest='mode')
parser_create_indexes = subparsers.add_parser('create_indexes')
parser_create_indexes.add_argument('--waveforms_hdf5_path', type=str, required=True, help='Path of packed waveforms hdf5.')
parser_create_indexes.add_argument('--indexes_hdf5_path', type=str, required=True, help='Path to write out indexes hdf5.')
parser_combine_full_indexes = subparsers.add_parser('combine_full_indexes')
parser_combine_full_indexes.add_argument('--indexes_hdf5s_dir', type=str, required=True, help='Directory containing indexes hdf5s to be combined.')
parser_combine_full_indexes.add_argument('--full_indexes_hdf5_path', type=str, required=True, help='Path to write out full indexes hdf5 file.')
args = parser.parse_args()
if args.mode == 'create_indexes':
create_indexes(args)
elif args.mode == 'combine_full_indexes':
combine_full_indexes(args)
else:
raise Exception('Incorrect arguments!') | EXA-1-master | exa/models/AudioGPT/audio_detection/audio_infer/utils/create_indexes.py |
import os
import sys
import numpy as np
import argparse
import h5py
import time
import _pickle as cPickle
import _pickle
import matplotlib.pyplot as plt
import csv
from sklearn import metrics
from utilities import (create_folder, get_filename, d_prime)
import config
def _load_metrics0(filename, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, data_type, model_type, loss_type, balanced, augmentation, batch_size):
workspace0 = '/mnt/cephfs_new_wj/speechsv/qiuqiang.kong/workspaces/pub_audioset_tagging_cnn_transfer'
statistics_path = os.path.join(workspace0, 'statistics', filename,
'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
'data_type={}'.format(data_type), model_type,
'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'statistics.pkl')
statistics_dict = cPickle.load(open(statistics_path, 'rb'))
bal_map = np.array([statistics['average_precision'] for statistics in statistics_dict['bal']]) # (N, classes_num)
bal_map = np.mean(bal_map, axis=-1)
test_map = np.array([statistics['average_precision'] for statistics in statistics_dict['test']]) # (N, classes_num)
test_map = np.mean(test_map, axis=-1)
legend = '{}, {}, bal={}, aug={}, bs={}'.format(data_type, model_type, balanced, augmentation, batch_size)
# return {'bal_map': bal_map, 'test_map': test_map, 'legend': legend}
return bal_map, test_map, legend
def _load_metrics0_classwise(filename, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, data_type, model_type, loss_type, balanced, augmentation, batch_size):
workspace0 = '/mnt/cephfs_new_wj/speechsv/qiuqiang.kong/workspaces/pub_audioset_tagging_cnn_transfer'
statistics_path = os.path.join(workspace0, 'statistics', filename,
'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
'data_type={}'.format(data_type), model_type,
'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'statistics.pkl')
statistics_dict = cPickle.load(open(statistics_path, 'rb'))
return statistics_dict['test'][300]['average_precision']
def _load_metrics0_classwise2(filename, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, data_type, model_type, loss_type, balanced, augmentation, batch_size):
workspace0 = '/mnt/cephfs_new_wj/speechsv/qiuqiang.kong/workspaces/pub_audioset_tagging_cnn_transfer'
statistics_path = os.path.join(workspace0, 'statistics', filename,
'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
'data_type={}'.format(data_type), model_type,
'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'statistics.pkl')
statistics_dict = cPickle.load(open(statistics_path, 'rb'))
k = 270
mAP = np.mean(statistics_dict['test'][k]['average_precision'])
mAUC = np.mean(statistics_dict['test'][k]['auc'])
dprime = d_prime(mAUC)
return mAP, mAUC, dprime
def _load_metrics_classwise(filename, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, data_type, model_type, loss_type, balanced, augmentation, batch_size):
workspace = '/mnt/cephfs_new_wj/speechsv/kongqiuqiang/workspaces/cvssp/pub_audioset_tagging_cnn'
statistics_path = os.path.join(workspace, 'statistics', filename,
'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
'data_type={}'.format(data_type), model_type,
'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'statistics.pkl')
statistics_dict = cPickle.load(open(statistics_path, 'rb'))
k = 300
mAP = np.mean(statistics_dict['test'][k]['average_precision'])
mAUC = np.mean(statistics_dict['test'][k]['auc'])
dprime = d_prime(mAUC)
return mAP, mAUC, dprime
def plot(args):
# Arguments & parameters
dataset_dir = args.dataset_dir
workspace = args.workspace
select = args.select
classes_num = config.classes_num
max_plot_iteration = 1000000
iterations = np.arange(0, max_plot_iteration, 2000)
class_labels_indices_path = os.path.join(dataset_dir, 'metadata',
'class_labels_indices.csv')
save_out_path = 'results/{}.pdf'.format(select)
create_folder(os.path.dirname(save_out_path))
# Read labels
labels = config.labels
# Plot
fig, ax = plt.subplots(1, 1, figsize=(15, 8))
lines = []
def _load_metrics(filename, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, data_type, model_type, loss_type, balanced, augmentation, batch_size):
statistics_path = os.path.join(workspace, 'statistics', filename,
'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
'data_type={}'.format(data_type), model_type,
'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'statistics.pkl')
statistics_dict = cPickle.load(open(statistics_path, 'rb'))
bal_map = np.array([statistics['average_precision'] for statistics in statistics_dict['bal']]) # (N, classes_num)
bal_map = np.mean(bal_map, axis=-1)
test_map = np.array([statistics['average_precision'] for statistics in statistics_dict['test']]) # (N, classes_num)
test_map = np.mean(test_map, axis=-1)
legend = '{}, {}, bal={}, aug={}, bs={}'.format(data_type, model_type, balanced, augmentation, batch_size)
# return {'bal_map': bal_map, 'test_map': test_map, 'legend': legend}
return bal_map, test_map, legend
bal_alpha = 0.3
test_alpha = 1.0
lines = []
if select == '1_cnn13':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_no_dropout', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13_no_specaug', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_no_specaug', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_no_dropout', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'none', 32)
line, = ax.plot(bal_map, color='k', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_no_mixup', color='k', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_mixup_in_wave', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='c', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_mixup_in_wave', color='c', alpha=test_alpha)
lines.append(line)
elif select == '1_pooling':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_gwrp', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13_gmpgapgwrp', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_att', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13_gmpgapatt', color='g', alpha=test_alpha)
lines.append(line)
elif select == '1_resnet':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'ResNet18', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='ResNet18', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'ResNet34', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='k', alpha=bal_alpha)
line, = ax.plot(test_map, label='resnet34', color='k', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'ResNet50', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='c', alpha=bal_alpha)
line, = ax.plot(test_map, label='resnet50', color='c', alpha=test_alpha)
lines.append(line)
elif select == '1_densenet':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'DenseNet121', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='densenet121', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'DenseNet201', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='densenet201', color='g', alpha=test_alpha)
lines.append(line)
elif select == '1_cnn9':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn5', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn5', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn9', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn9', color='g', alpha=test_alpha)
lines.append(line)
elif select == '1_hop':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
500, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13_hop500', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
640, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13_hop640', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
1000, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='k', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13_hop1000', color='k', alpha=test_alpha)
lines.append(line)
elif select == '1_emb':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_emb32', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13_emb32', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_emb128', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13_emb128', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_emb512', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='k', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13_emb512', color='k', alpha=test_alpha)
lines.append(line)
elif select == '1_mobilenet':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'MobileNetV1', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='mobilenetv1', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'MobileNetV2', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='mobilenetv2', color='g', alpha=test_alpha)
lines.append(line)
elif select == '1_waveform':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn1d_LeeNet', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn1d_LeeNet', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn1d_LeeNet18', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn1d_LeeNet18', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn1d_DaiNet', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='k', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn1d_DaiNet', color='k', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn1d_ResNet34', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='c', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn1d_ResNet34', color='c', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn1d_ResNet50', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='m', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn1d_ResNet50', color='m', alpha=test_alpha)
lines.append(line)
elif select == '1_waveform_cnn2d':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_SpAndWav', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_SpAndWav', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_WavCnn2d', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_WavCnn2d', color='g', alpha=test_alpha)
lines.append(line)
elif select == '1_decision_level':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_DecisionLevelMax', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_DecisionLevelMax', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_DecisionLevelAvg', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_DecisionLevelAvg', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_DecisionLevelAtt', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='k', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_DecisionLevelAtt', color='k', alpha=test_alpha)
lines.append(line)
elif select == '1_transformer':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_Transformer1', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_Transformer1', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_Transformer3', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_Transformer3', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_Transformer6', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='k', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_Transformer6', color='k', alpha=test_alpha)
lines.append(line)
elif select == '1_aug':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,balanced,mixup', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'none', 'none', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,none,none', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'none', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,balanced,none', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup_from_0_epoch', 32)
line, = ax.plot(bal_map, color='m', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,balanced,mixup_from_0_epoch', color='m', alpha=test_alpha)
lines.append(line)
elif select == '1_bal_train_aug':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'balanced_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,balanced,mixup', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'balanced_train', 'Cnn14', 'clip_bce', 'none', 'none', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,none,none', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'balanced_train', 'Cnn14', 'clip_bce', 'balanced', 'none', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,balanced,none', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'balanced_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup_from_0_epoch', 32)
line, = ax.plot(bal_map, color='m', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,balanced,mixup_from_0_epoch', color='m', alpha=test_alpha)
lines.append(line)
elif select == '1_sr':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14_16k', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14_16k', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14_8k', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14_8k', color='b', alpha=test_alpha)
lines.append(line)
elif select == '1_time_domain':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14_mixup_time_domain', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14_time_domain', color='b', alpha=test_alpha)
lines.append(line)
elif select == '1_partial_full':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'partial_0.9_full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,partial_0.9', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'partial_0.8_full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,partial_0.8', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'partial_0.7_full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='k', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,partial_0.7', color='k', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'partial_0.5_full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='m', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,partial_0.5', color='m', alpha=test_alpha)
lines.append(line)
elif select == '1_window':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 2048,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14_win2048', color='b', alpha=test_alpha)
lines.append(line)
elif select == '1_melbins':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 32, 50, 14000, 'full_train', 'Cnn14_mel32', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14_mel32', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 128, 50, 14000, 'full_train', 'Cnn14_mel128', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14_mel128', color='g', alpha=test_alpha)
lines.append(line)
elif select == '1_alternate':
max_plot_iteration = 2000000
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'alternate', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14_alternate', color='b', alpha=test_alpha)
lines.append(line)
elif select == '2_all':
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn9', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn9', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn5', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn5', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'MobileNetV1', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='MobileNetV1', color='k', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn1d_ResNet34', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn1d_ResNet34', color='grey', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'ResNet34', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='ResNet34', color='grey', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_WavCnn2d', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_WavCnn2d', color='m', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_SpAndWav', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_SpAndWav', color='orange', alpha=test_alpha)
lines.append(line)
elif select == '2_emb':
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_emb32', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_emb32', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_emb128', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_128', color='k', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_emb512', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_512', color='g', alpha=test_alpha)
lines.append(line)
elif select == '2_aug':
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_no_specaug', 'clip_bce', 'none', 'none', 32)
line, = ax.plot(bal_map, color='c', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,none,none', color='c', alpha=test_alpha)
lines.append(line)
ax.set_ylim(0, 1.)
ax.set_xlim(0, len(iterations))
ax.xaxis.set_ticks(np.arange(0, len(iterations), 25))
ax.xaxis.set_ticklabels(np.arange(0, max_plot_iteration, 50000))
ax.yaxis.set_ticks(np.arange(0, 1.01, 0.05))
ax.yaxis.set_ticklabels(np.around(np.arange(0, 1.01, 0.05), decimals=2))
ax.grid(color='b', linestyle='solid', linewidth=0.3)
plt.legend(handles=lines, loc=2)
# box = ax.get_position()
# ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# ax.legend(handles=lines, bbox_to_anchor=(1.0, 1.0))
plt.savefig(save_out_path)
print('Save figure to {}'.format(save_out_path))
def plot_for_paper(args):
# Arguments & parameters
dataset_dir = args.dataset_dir
workspace = args.workspace
select = args.select
classes_num = config.classes_num
max_plot_iteration = 1000000
iterations = np.arange(0, max_plot_iteration, 2000)
class_labels_indices_path = os.path.join(dataset_dir, 'metadata',
'class_labels_indices.csv')
save_out_path = 'results/paper_{}.pdf'.format(select)
create_folder(os.path.dirname(save_out_path))
# Read labels
labels = config.labels
# Plot
fig, ax = plt.subplots(1, 1, figsize=(6, 4))
lines = []
def _load_metrics(filename, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, data_type, model_type, loss_type, balanced, augmentation, batch_size):
statistics_path = os.path.join(workspace, 'statistics', filename,
'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
'data_type={}'.format(data_type), model_type,
'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'statistics.pkl')
statistics_dict = cPickle.load(open(statistics_path, 'rb'))
bal_map = np.array([statistics['average_precision'] for statistics in statistics_dict['bal']]) # (N, classes_num)
bal_map = np.mean(bal_map, axis=-1)
test_map = np.array([statistics['average_precision'] for statistics in statistics_dict['test']]) # (N, classes_num)
test_map = np.mean(test_map, axis=-1)
legend = '{}, {}, bal={}, aug={}, bs={}'.format(data_type, model_type, balanced, augmentation, batch_size)
# return {'bal_map': bal_map, 'test_map': test_map, 'legend': legend}
return bal_map, test_map, legend
bal_alpha = 0.3
test_alpha = 1.0
lines = []
linewidth = 1.
max_plot_iteration = 540000
if select == '2_all':
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha, linewidth=linewidth)
line, = ax.plot(test_map, label='CNN14', color='r', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
# (bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
# 320, 64, 50, 14000, 'full_train', 'Cnn9', 'clip_bce', 'balanced', 'mixup', 32)
# line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
# line, = ax.plot(test_map, label='cnn9', color='r', alpha=test_alpha)
# lines.append(line)
# (bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
# 320, 64, 50, 14000, 'full_train', 'Cnn5', 'clip_bce', 'balanced', 'mixup', 32)
# line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
# line, = ax.plot(test_map, label='cnn5', color='g', alpha=test_alpha)
# lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'MobileNetV1', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha, linewidth=linewidth)
line, = ax.plot(test_map, label='MobileNetV1', color='b', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
# (bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
# 320, 64, 50, 14000, 'full_train', 'Cnn1d_ResNet34', 'clip_bce', 'balanced', 'mixup', 32)
# line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
# line, = ax.plot(test_map, label='Cnn1d_ResNet34', color='grey', alpha=test_alpha)
# lines.append(line)
# (bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
# 320, 64, 50, 14000, 'full_train', 'Cnn13_WavCnn2d', 'clip_bce', 'balanced', 'mixup', 32)
# line, = ax.plot(bal_map, color='g', alpha=bal_alpha, linewidth=linewidth)
# line, = ax.plot(test_map, label='Wavegram-CNN', color='g', alpha=test_alpha, linewidth=linewidth)
# lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_SpAndWav', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha, linewidth=linewidth)
line, = ax.plot(test_map, label='Wavegram-Logmel-CNN', color='g', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
elif select == '2_emb':
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha, linewidth=linewidth)
line, = ax.plot(test_map, label='CNN14,emb=2048', color='r', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_emb32', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha, linewidth=linewidth)
line, = ax.plot(test_map, label='CNN14,emb=32', color='b', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_emb128', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha, linewidth=linewidth)
line, = ax.plot(test_map, label='CNN14,emb=128', color='g', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
# (bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
# 320, 64, 50, 14000, 'full_train', 'Cnn13_emb512', 'clip_bce', 'balanced', 'mixup', 32)
# line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
# line, = ax.plot(test_map, label='Cnn13_512', color='g', alpha=test_alpha)
# lines.append(line)
elif select == '2_bal':
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha, linewidth=linewidth)
line, = ax.plot(test_map, label='CNN14,bal,mixup (1.9m)', color='r', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14_mixup_time_domain', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='y', alpha=bal_alpha, linewidth=linewidth)
line, = ax.plot(test_map, label='CNN14,bal,mixup-wav (1.9m)', color='y', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'none', 'none', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha, linewidth=linewidth)
line, = ax.plot(test_map, label='CNN14,no-bal,no-mixup (1.9m)', color='b', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'none', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha, linewidth=linewidth)
line, = ax.plot(test_map, label='CNN14,bal,no-mixup (1.9m)', color='g', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'balanced_train', 'Cnn14', 'clip_bce', 'balanced', 'none', 32)
line, = ax.plot(bal_map, color='k', alpha=bal_alpha, linewidth=linewidth)
line, = ax.plot(test_map, label='CNN14,bal,no-mixup (20k)', color='k', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'balanced_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='m', alpha=bal_alpha, linewidth=linewidth)
line, = ax.plot(test_map, label='CNN14,bal,mixup (20k)', color='m', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
elif select == '2_sr':
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha, linewidth=linewidth)
line, = ax.plot(test_map, label='CNN14,32kHz', color='r', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14_16k', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha, linewidth=linewidth)
line, = ax.plot(test_map, label='CNN14,16kHz', color='b', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14_8k', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha, linewidth=linewidth)
line, = ax.plot(test_map, label='CNN14,8kHz', color='g', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
elif select == '2_partial':
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha, linewidth=linewidth)
line, = ax.plot(test_map, label='CNN14 (100% full)', color='r', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
# (bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
# 320, 64, 50, 14000, 'partial_0.9_full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
# line, = ax.plot(bal_map, color='b', alpha=bal_alpha, linewidth=linewidth)
# line, = ax.plot(test_map, label='cnn14,partial_0.9', color='b', alpha=test_alpha, linewidth=linewidth)
# lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'partial_0.8_full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha, linewidth=linewidth)
line, = ax.plot(test_map, label='CNN14 (80% full)', color='b', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
# (bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
# 320, 64, 50, 14000, 'partial_0.7_full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
# line, = ax.plot(bal_map, color='k', alpha=bal_alpha, linewidth=linewidth)
# line, = ax.plot(test_map, label='cnn14,partial_0.7', color='k', alpha=test_alpha, linewidth=linewidth)
# lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'partial_0.5_full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha, linewidth=linewidth)
line, = ax.plot(test_map, label='cnn14 (50% full)', color='g', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
elif select == '2_melbins':
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha, linewidth=linewidth)
line, = ax.plot(test_map, label='CNN14,64-melbins', color='r', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 32, 50, 14000, 'full_train', 'Cnn14_mel32', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='CNN14,32-melbins', color='b', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 128, 50, 14000, 'full_train', 'Cnn14_mel128', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='CNN14,128-melbins', color='g', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
ax.set_ylim(0, 0.8)
ax.set_xlim(0, len(iterations))
ax.set_xlabel('Iterations')
ax.set_ylabel('mAP')
ax.xaxis.set_ticks(np.arange(0, len(iterations), 50))
# ax.xaxis.set_ticklabels(np.arange(0, max_plot_iteration, 50000))
ax.xaxis.set_ticklabels(['0', '100k', '200k', '300k', '400k', '500k'])
ax.yaxis.set_ticks(np.arange(0, 0.81, 0.05))
ax.yaxis.set_ticklabels(['0', '', '0.1', '', '0.2', '', '0.3', '', '0.4', '', '0.5', '', '0.6', '', '0.7', '', '0.8'])
# ax.yaxis.set_ticklabels(np.around(np.arange(0, 0.81, 0.05), decimals=2))
ax.yaxis.grid(color='k', linestyle='solid', alpha=0.3, linewidth=0.3)
ax.xaxis.grid(color='k', linestyle='solid', alpha=0.3, linewidth=0.3)
plt.legend(handles=lines, loc=2)
plt.tight_layout(0, 0, 0)
# box = ax.get_position()
# ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# ax.legend(handles=lines, bbox_to_anchor=(1.0, 1.0))
plt.savefig(save_out_path)
print('Save figure to {}'.format(save_out_path))
def plot_for_paper2(args):
# Arguments & parameters
dataset_dir = args.dataset_dir
workspace = args.workspace
classes_num = config.classes_num
max_plot_iteration = 1000000
iterations = np.arange(0, max_plot_iteration, 2000)
class_labels_indices_path = os.path.join(dataset_dir, 'metadata',
'class_labels_indices.csv')
save_out_path = 'results/paper2.pdf'
create_folder(os.path.dirname(save_out_path))
# Read labels
labels = config.labels
# Plot
fig, ax = plt.subplots(2, 3, figsize=(14, 7))
lines = []
def _load_metrics(filename, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, data_type, model_type, loss_type, balanced, augmentation, batch_size):
statistics_path = os.path.join(workspace, 'statistics', filename,
'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
'data_type={}'.format(data_type), model_type,
'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'statistics.pkl')
statistics_dict = cPickle.load(open(statistics_path, 'rb'))
bal_map = np.array([statistics['average_precision'] for statistics in statistics_dict['bal']]) # (N, classes_num)
bal_map = np.mean(bal_map, axis=-1)
test_map = np.array([statistics['average_precision'] for statistics in statistics_dict['test']]) # (N, classes_num)
test_map = np.mean(test_map, axis=-1)
legend = '{}, {}, bal={}, aug={}, bs={}'.format(data_type, model_type, balanced, augmentation, batch_size)
# return {'bal_map': bal_map, 'test_map': test_map, 'legend': legend}
return bal_map, test_map, legend
def _load_metrics0(filename, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, data_type, model_type, loss_type, balanced, augmentation, batch_size):
workspace0 = '/mnt/cephfs_new_wj/speechsv/qiuqiang.kong/workspaces/pub_audioset_tagging_cnn_transfer'
statistics_path = os.path.join(workspace0, 'statistics', filename,
'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
'data_type={}'.format(data_type), model_type,
'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'statistics.pkl')
statistics_dict = cPickle.load(open(statistics_path, 'rb'))
bal_map = np.array([statistics['average_precision'] for statistics in statistics_dict['bal']]) # (N, classes_num)
bal_map = np.mean(bal_map, axis=-1)
test_map = np.array([statistics['average_precision'] for statistics in statistics_dict['test']]) # (N, classes_num)
test_map = np.mean(test_map, axis=-1)
legend = '{}, {}, bal={}, aug={}, bs={}'.format(data_type, model_type, balanced, augmentation, batch_size)
# return {'bal_map': bal_map, 'test_map': test_map, 'legend': legend}
return bal_map, test_map, legend
bal_alpha = 0.3
test_alpha = 1.0
lines = []
linewidth = 1.
max_plot_iteration = 540000
if True:
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax[0, 0].plot(bal_map, color='r', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 0].plot(test_map, label='CNN14', color='r', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
# (bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
# 320, 64, 50, 14000, 'full_train', 'Cnn9', 'clip_bce', 'balanced', 'mixup', 32)
# line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
# line, = ax.plot(test_map, label='cnn9', color='r', alpha=test_alpha)
# lines.append(line)
# (bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
# 320, 64, 50, 14000, 'full_train', 'Cnn5', 'clip_bce', 'balanced', 'mixup', 32)
# line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
# line, = ax.plot(test_map, label='cnn5', color='g', alpha=test_alpha)
# lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'MobileNetV1', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax[0, 0].plot(bal_map, color='b', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 0].plot(test_map, label='MobileNetV1', color='b', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
# (bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
# 320, 64, 50, 14000, 'full_train', 'Cnn1d_ResNet34', 'clip_bce', 'balanced', 'mixup', 32)
# line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
# line, = ax.plot(test_map, label='Cnn1d_ResNet34', color='grey', alpha=test_alpha)
# lines.append(line)
# (bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
# 320, 64, 50, 14000, 'full_train', 'ResNet34', 'clip_bce', 'balanced', 'mixup', 32)
# line, = ax[0, 0].plot(bal_map, color='k', alpha=bal_alpha, linewidth=linewidth)
# line, = ax[0, 0].plot(test_map, label='ResNet38', color='k', alpha=test_alpha, linewidth=linewidth)
# lines.append(line)
# (bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
# 320, 64, 50, 14000, 'full_train', 'Cnn13_WavCnn2d', 'clip_bce', 'balanced', 'mixup', 32)
# line, = ax.plot(bal_map, color='g', alpha=bal_alpha, linewidth=linewidth)
# line, = ax.plot(test_map, label='Wavegram-CNN', color='g', alpha=test_alpha, linewidth=linewidth)
# lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_SpAndWav', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax[0, 0].plot(bal_map, color='g', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 0].plot(test_map, label='Wavegram-Logmel-CNN', color='g', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
ax[0, 0].legend(handles=lines, loc=2)
ax[0, 0].set_title('(a) Comparison of architectures')
if True:
lines = []
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax[0, 1].plot(bal_map, color='r', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 1].plot(test_map, label='CNN14,bal,mixup (1.9m)', color='r', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'none', 'none', 32)
line, = ax[0, 1].plot(bal_map, color='b', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 1].plot(test_map, label='CNN14,no-bal,no-mixup (1.9m)', color='b', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14_mixup_time_domain', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax[0, 1].plot(bal_map, color='y', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 1].plot(test_map, label='CNN14,bal,mixup-wav (1.9m)', color='y', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'none', 32)
line, = ax[0, 1].plot(bal_map, color='g', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 1].plot(test_map, label='CNN14,bal,no-mixup (1.9m)', color='g', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'balanced_train', 'Cnn14', 'clip_bce', 'balanced', 'none', 32)
line, = ax[0, 1].plot(bal_map, color='k', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 1].plot(test_map, label='CNN14,bal,no-mixup (20k)', color='k', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'balanced_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax[0, 1].plot(bal_map, color='m', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 1].plot(test_map, label='CNN14,bal,mixup (20k)', color='m', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
ax[0, 1].legend(handles=lines, loc=2, fontsize=8)
ax[0, 1].set_title('(b) Comparison of training data and augmentation')
if True:
lines = []
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax[0, 2].plot(bal_map, color='r', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 2].plot(test_map, label='CNN14,emb=2048', color='r', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_emb32', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax[0, 2].plot(bal_map, color='b', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 2].plot(test_map, label='CNN14,emb=32', color='b', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_emb128', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax[0, 2].plot(bal_map, color='g', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 2].plot(test_map, label='CNN14,emb=128', color='g', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
ax[0, 2].legend(handles=lines, loc=2)
ax[0, 2].set_title('(c) Comparison of embedding size')
if True:
lines = []
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax[1, 0].plot(bal_map, color='r', alpha=bal_alpha, linewidth=linewidth)
line, = ax[1, 0].plot(test_map, label='CNN14 (100% full)', color='r', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'partial_0.8_full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax[1, 0].plot(bal_map, color='b', alpha=bal_alpha, linewidth=linewidth)
line, = ax[1, 0].plot(test_map, label='CNN14 (80% full)', color='b', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'partial_0.5_full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax[1, 0].plot(bal_map, color='g', alpha=bal_alpha, linewidth=linewidth)
line, = ax[1, 0].plot(test_map, label='cnn14 (50% full)', color='g', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
ax[1, 0].legend(handles=lines, loc=2)
ax[1, 0].set_title('(d) Comparison of amount of training data')
if True:
lines = []
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax[1, 1].plot(bal_map, color='r', alpha=bal_alpha, linewidth=linewidth)
line, = ax[1, 1].plot(test_map, label='CNN14,32kHz', color='r', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14_16k', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax[1, 1].plot(bal_map, color='b', alpha=bal_alpha, linewidth=linewidth)
line, = ax[1, 1].plot(test_map, label='CNN14,16kHz', color='b', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14_8k', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax[1, 1].plot(bal_map, color='g', alpha=bal_alpha, linewidth=linewidth)
line, = ax[1, 1].plot(test_map, label='CNN14,8kHz', color='g', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
ax[1, 1].legend(handles=lines, loc=2)
ax[1, 1].set_title('(e) Comparison of sampling rate')
if True:
lines = []
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax[1, 2].plot(bal_map, color='r', alpha=bal_alpha, linewidth=linewidth)
line, = ax[1, 2].plot(test_map, label='CNN14,64-melbins', color='r', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 32, 50, 14000, 'full_train', 'Cnn14_mel32', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax[1, 2].plot(bal_map, color='b', alpha=bal_alpha)
line, = ax[1, 2].plot(test_map, label='CNN14,32-melbins', color='b', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 128, 50, 14000, 'full_train', 'Cnn14_mel128', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax[1, 2].plot(bal_map, color='g', alpha=bal_alpha)
line, = ax[1, 2].plot(test_map, label='CNN14,128-melbins', color='g', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
ax[1, 2].legend(handles=lines, loc=2)
ax[1, 2].set_title('(f) Comparison of mel bins number')
for i in range(2):
for j in range(3):
ax[i, j].set_ylim(0, 0.8)
ax[i, j].set_xlim(0, len(iterations))
ax[i, j].set_xlabel('Iterations')
ax[i, j].set_ylabel('mAP')
ax[i, j].xaxis.set_ticks(np.arange(0, len(iterations), 50))
# ax.xaxis.set_ticklabels(np.arange(0, max_plot_iteration, 50000))
ax[i, j].xaxis.set_ticklabels(['0', '100k', '200k', '300k', '400k', '500k'])
ax[i, j].yaxis.set_ticks(np.arange(0, 0.81, 0.05))
ax[i, j].yaxis.set_ticklabels(['0', '', '0.1', '', '0.2', '', '0.3', '', '0.4', '', '0.5', '', '0.6', '', '0.7', '', '0.8'])
# ax.yaxis.set_ticklabels(np.around(np.arange(0, 0.81, 0.05), decimals=2))
ax[i, j].yaxis.grid(color='k', linestyle='solid', alpha=0.3, linewidth=0.3)
ax[i, j].xaxis.grid(color='k', linestyle='solid', alpha=0.3, linewidth=0.3)
plt.tight_layout(0, 1, 0)
# box = ax.get_position()
# ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# ax.legend(handles=lines, bbox_to_anchor=(1.0, 1.0))
plt.savefig(save_out_path)
print('Save figure to {}'.format(save_out_path))
def table_values(args):
# Arguments & parameters
dataset_dir = args.dataset_dir
workspace = args.workspace
select = args.select
def _load_metrics(filename, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, data_type, model_type, loss_type, balanced, augmentation, batch_size, iteration):
statistics_path = os.path.join(workspace, 'statistics', filename,
'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
'data_type={}'.format(data_type), model_type,
'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'statistics.pkl')
statistics_dict = cPickle.load(open(statistics_path, 'rb'))
idx = iteration // 2000
mAP = np.mean(statistics_dict['test'][idx]['average_precision'])
mAUC = np.mean(statistics_dict['test'][idx]['auc'])
dprime = d_prime(mAUC)
print('mAP: {:.3f}'.format(mAP))
print('mAUC: {:.3f}'.format(mAUC))
print('dprime: {:.3f}'.format(dprime))
if select == 'cnn13':
iteration = 600000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'cnn5':
iteration = 440000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn5', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'cnn9':
iteration = 440000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn9', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'cnn13_decisionlevelmax':
iteration = 400000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_DecisionLevelMax', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'cnn13_decisionlevelavg':
iteration = 600000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_DecisionLevelAvg', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'cnn13_decisionlevelatt':
iteration = 600000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_DecisionLevelAtt', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'cnn13_emb32':
iteration = 560000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_emb32', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'cnn13_emb128':
iteration = 560000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_emb128', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'cnn13_emb512':
iteration = 440000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_emb512', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'cnn13_hop500':
iteration = 440000
_load_metrics('main', 32000, 1024,
500, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'cnn13_hop640':
iteration = 440000
_load_metrics('main', 32000, 1024,
640, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'cnn13_hop1000':
iteration = 540000
_load_metrics('main', 32000, 1024,
1000, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'mobilenetv1':
iteration = 560000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'MobileNetV1', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'mobilenetv2':
iteration = 560000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'MobileNetV2', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'resnet18':
iteration = 600000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'ResNet18', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'resnet34':
iteration = 600000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'ResNet34', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'resnet50':
iteration = 600000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'ResNet50', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'dainet':
iteration = 600000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn1d_DaiNet', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'leenet':
iteration = 540000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn1d_LeeNet', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'leenet18':
iteration = 440000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn1d_LeeNet18', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'resnet34_1d':
iteration = 500000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn1d_ResNet34', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'resnet50_1d':
iteration = 500000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn1d_ResNet50', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'waveform_cnn2d':
iteration = 660000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_WavCnn2d', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'waveform_spandwav':
iteration = 700000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_SpAndWav', 'clip_bce', 'balanced', 'mixup', 32, iteration)
def crop_label(label):
max_len = 16
if len(label) <= max_len:
return label
else:
words = label.split(' ')
cropped_label = ''
for w in words:
if len(cropped_label + ' ' + w) > max_len:
break
else:
cropped_label += ' {}'.format(w)
return cropped_label
def add_comma(integer):
integer = int(integer)
if integer >= 1000:
return str(integer // 1000) + ',' + str(integer % 1000)
else:
return str(integer)
def plot_class_iteration(args):
# Arguments & parameters
workspace = args.workspace
select = args.select
save_out_path = 'results_map/class_iteration_map.pdf'
create_folder(os.path.dirname(save_out_path))
def _load_metrics(filename, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, data_type, model_type, loss_type, balanced, augmentation, batch_size, iteration):
statistics_path = os.path.join(workspace, 'statistics', filename,
'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
'data_type={}'.format(data_type), model_type,
'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'statistics.pkl')
statistics_dict = cPickle.load(open(statistics_path, 'rb'))
return statistics_dict
iteration = 600000
statistics_dict = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32, iteration)
mAP_mat = np.array([e['average_precision'] for e in statistics_dict['test']])
mAP_mat = mAP_mat[0 : 300, :]
sorted_indexes = np.argsort(config.full_samples_per_class)[::-1]
fig, axs = plt.subplots(1, 3, figsize=(20, 5))
ranges = [np.arange(0, 10), np.arange(250, 260), np.arange(517, 527)]
axs[0].set_ylabel('AP')
for col in range(0, 3):
axs[col].set_ylim(0, 1.)
axs[col].set_xlim(0, 301)
axs[col].set_xlabel('Iterations')
axs[col].set_ylabel('AP')
axs[col].xaxis.set_ticks(np.arange(0, 301, 100))
axs[col].xaxis.set_ticklabels(['0', '200k', '400k', '600k'])
lines = []
for _ix in ranges[col]:
_label = crop_label(config.labels[sorted_indexes[_ix]]) + \
' ({})'.format(add_comma(config.full_samples_per_class[sorted_indexes[_ix]]))
line, = axs[col].plot(mAP_mat[:, sorted_indexes[_ix]], label=_label)
lines.append(line)
box = axs[col].get_position()
axs[col].set_position([box.x0, box.y0, box.width * 1., box.height])
axs[col].legend(handles=lines, bbox_to_anchor=(1., 1.))
axs[col].yaxis.grid(color='k', linestyle='solid', alpha=0.3, linewidth=0.3)
plt.tight_layout(pad=4, w_pad=1, h_pad=1)
plt.savefig(save_out_path)
print(save_out_path)
def _load_old_metrics(workspace, filename, iteration, data_type):
assert data_type in ['train', 'test']
stat_name = "stat_{}_iters.p".format(iteration)
# Load stats
stat_path = os.path.join(workspace, "stats", filename, data_type, stat_name)
try:
stats = cPickle.load(open(stat_path, 'rb'))
except:
stats = cPickle.load(open(stat_path, 'rb'), encoding='latin1')
precisions = [stat['precisions'] for stat in stats]
recalls = [stat['recalls'] for stat in stats]
maps = np.array([stat['AP'] for stat in stats])
aucs = np.array([stat['auc'] for stat in stats])
return {'average_precision': maps, 'AUC': aucs}
def _sort(ys):
sorted_idxes = np.argsort(ys)
sorted_idxes = sorted_idxes[::-1]
sorted_ys = ys[sorted_idxes]
sorted_lbs = [config.labels[e] for e in sorted_idxes]
return sorted_ys, sorted_idxes, sorted_lbs
def load_data(hdf5_path):
with h5py.File(hdf5_path, 'r') as hf:
x = hf['x'][:]
y = hf['y'][:]
video_id_list = list(hf['video_id_list'][:])
return x, y, video_id_list
def get_avg_stats(workspace, bgn_iter, fin_iter, interval_iter, filename, data_type):
assert data_type in ['train', 'test']
bal_train_hdf5 = "/vol/vssp/msos/audioset/packed_features/bal_train.h5"
eval_hdf5 = "/vol/vssp/msos/audioset/packed_features/eval.h5"
unbal_train_hdf5 = "/vol/vssp/msos/audioset/packed_features/unbal_train.h5"
t1 = time.time()
if data_type == 'test':
(te_x, te_y, te_id_list) = load_data(eval_hdf5)
elif data_type == 'train':
(te_x, te_y, te_id_list) = load_data(bal_train_hdf5)
y = te_y
prob_dir = os.path.join(workspace, "probs", filename, data_type)
names = os.listdir(prob_dir)
probs = []
iters = range(bgn_iter, fin_iter, interval_iter)
for iter in iters:
pickle_path = os.path.join(prob_dir, "prob_%d_iters.p" % iter)
try:
prob = cPickle.load(open(pickle_path, 'rb'))
except:
prob = cPickle.load(open(pickle_path, 'rb'), encoding='latin1')
probs.append(prob)
avg_prob = np.mean(np.array(probs), axis=0)
n_out = y.shape[1]
stats = []
for k in range(n_out): # around 7 seconds
(precisions, recalls, thresholds) = metrics.precision_recall_curve(y[:, k], avg_prob[:, k])
avg_precision = metrics.average_precision_score(y[:, k], avg_prob[:, k], average=None)
(fpr, tpr, thresholds) = metrics.roc_curve(y[:, k], avg_prob[:, k])
auc = metrics.roc_auc_score(y[:, k], avg_prob[:, k], average=None)
# eer = pp_data.eer(avg_prob[:, k], y[:, k])
skip = 1000
dict = {'precisions': precisions[0::skip], 'recalls': recalls[0::skip], 'AP': avg_precision,
'fpr': fpr[0::skip], 'fnr': 1. - tpr[0::skip], 'auc': auc}
stats.append(dict)
mAPs = np.array([e['AP'] for e in stats])
aucs = np.array([e['auc'] for e in stats])
print("Get avg time: {}".format(time.time() - t1))
return {'average_precision': mAPs, 'auc': aucs}
def _samples_num_per_class():
bal_train_hdf5 = "/vol/vssp/msos/audioset/packed_features/bal_train.h5"
eval_hdf5 = "/vol/vssp/msos/audioset/packed_features/eval.h5"
unbal_train_hdf5 = "/vol/vssp/msos/audioset/packed_features/unbal_train.h5"
(x, y, id_list) = load_data(eval_hdf5)
eval_num = np.sum(y, axis=0)
(x, y, id_list) = load_data(bal_train_hdf5)
bal_num = np.sum(y, axis=0)
(x, y, id_list) = load_data(unbal_train_hdf5)
unbal_num = np.sum(y, axis=0)
return bal_num, unbal_num, eval_num
def get_label_quality():
rate_csv = '/vol/vssp/msos/qk/workspaces/pub_audioset_tagging_cnn_transfer/metadata/qa_true_counts.csv'
with open(rate_csv, 'r') as f:
reader = csv.reader(f, delimiter=',')
lis = list(reader)
rates = []
for n in range(1, len(lis)):
li = lis[n]
if float(li[1]) == 0:
rate = None
else:
rate = float(li[2]) / float(li[1])
rates.append(rate)
return rates
def summary_stats(args):
# Arguments & parameters
workspace = args.workspace
out_stat_path = os.path.join(workspace, 'results', 'stats_for_paper.pkl')
create_folder(os.path.dirname(out_stat_path))
# Old workspace
old_workspace = '/vol/vssp/msos/qk/workspaces/audioset_classification'
# bal_train_metrics = _load_old_metrics(old_workspace, 'tmp127', 20000, 'train')
# eval_metrics = _load_old_metrics(old_workspace, 'tmp127', 20000, 'test')
bal_train_metrics = get_avg_stats(old_workspace, bgn_iter=10000, fin_iter=50001, interval_iter=5000, filename='tmp127_re', data_type='train')
eval_metrics = get_avg_stats(old_workspace, bgn_iter=10000, fin_iter=50001, interval_iter=5000, filename='tmp127_re', data_type='test')
maps0te = eval_metrics['average_precision']
(maps0te, sorted_idxes, sorted_lbs) = _sort(maps0te)
bal_num, unbal_num, eval_num = _samples_num_per_class()
output_dict = {
'labels': config.labels,
'label_quality': get_label_quality(),
'sorted_indexes_for_plot': sorted_idxes,
'official_balanced_trainig_samples': bal_num,
'official_unbalanced_training_samples': unbal_num,
'official_eval_samples': eval_num,
'downloaded_full_training_samples': config.full_samples_per_class,
'averaging_instance_system_avg_9_probs_from_10000_to_50000_iterations':
{'bal_train': bal_train_metrics, 'eval': eval_metrics}
}
def _load_metrics(filename, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, data_type, model_type, loss_type, balanced, augmentation, batch_size, iteration):
_workspace = '/vol/vssp/msos/qk/bytedance/workspaces_important/pub_audioset_tagging_cnn_transfer'
statistics_path = os.path.join(_workspace, 'statistics', filename,
'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
'data_type={}'.format(data_type), model_type,
'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'statistics.pkl')
statistics_dict = cPickle.load(open(statistics_path, 'rb'))
_idx = iteration // 2000
_dict = {'bal_train': {'average_precision': statistics_dict['bal'][_idx]['average_precision'],
'auc': statistics_dict['bal'][_idx]['auc']},
'eval': {'average_precision': statistics_dict['test'][_idx]['average_precision'],
'auc': statistics_dict['test'][_idx]['auc']}}
return _dict
iteration = 600000
output_dict['cnn13_system_iteration60k'] = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32, iteration)
iteration = 560000
output_dict['mobilenetv1_system_iteration56k'] = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'MobileNetV1', 'clip_bce', 'balanced', 'mixup', 32, iteration)
cPickle.dump(output_dict, open(out_stat_path, 'wb'))
print('Write stats for paper to {}'.format(out_stat_path))
def prepare_plot_long_4_rows(sorted_lbs):
N = len(sorted_lbs)
f,(ax1a, ax2a, ax3a, ax4a) = plt.subplots(4, 1,sharey=False, facecolor='w', figsize=(10, 12))
fontsize = 5
K = 132
ax1a.set_xlim(0, K)
ax2a.set_xlim(K, 2 * K)
ax3a.set_xlim(2 * K, 3 * K)
ax4a.set_xlim(3 * K, N)
truncated_sorted_lbs = []
for lb in sorted_lbs:
lb = lb[0 : 25]
words = lb.split(' ')
if len(words[-1]) < 3:
lb = ' '.join(words[0:-1])
truncated_sorted_lbs.append(lb)
ax1a.grid(which='major', axis='x', linestyle='-', alpha=0.3)
ax2a.grid(which='major', axis='x', linestyle='-', alpha=0.3)
ax3a.grid(which='major', axis='x', linestyle='-', alpha=0.3)
ax4a.grid(which='major', axis='x', linestyle='-', alpha=0.3)
ax1a.set_yscale('log')
ax2a.set_yscale('log')
ax3a.set_yscale('log')
ax4a.set_yscale('log')
ax1b = ax1a.twinx()
ax2b = ax2a.twinx()
ax3b = ax3a.twinx()
ax4b = ax4a.twinx()
ax1b.set_ylim(0., 1.)
ax2b.set_ylim(0., 1.)
ax3b.set_ylim(0., 1.)
ax4b.set_ylim(0., 1.)
ax1b.set_ylabel('Average precision')
ax2b.set_ylabel('Average precision')
ax3b.set_ylabel('Average precision')
ax4b.set_ylabel('Average precision')
ax1b.yaxis.grid(color='grey', linestyle='--', alpha=0.5)
ax2b.yaxis.grid(color='grey', linestyle='--', alpha=0.5)
ax3b.yaxis.grid(color='grey', linestyle='--', alpha=0.5)
ax4b.yaxis.grid(color='grey', linestyle='--', alpha=0.5)
ax1a.xaxis.set_ticks(np.arange(K))
ax1a.xaxis.set_ticklabels(truncated_sorted_lbs[0:K], rotation=90, fontsize=fontsize)
ax1a.xaxis.tick_bottom()
ax1a.set_ylabel("Number of audio clips")
ax2a.xaxis.set_ticks(np.arange(K, 2*K))
ax2a.xaxis.set_ticklabels(truncated_sorted_lbs[K:2*K], rotation=90, fontsize=fontsize)
ax2a.xaxis.tick_bottom()
# ax2a.tick_params(left='off', which='both')
ax2a.set_ylabel("Number of audio clips")
ax3a.xaxis.set_ticks(np.arange(2*K, 3*K))
ax3a.xaxis.set_ticklabels(truncated_sorted_lbs[2*K:3*K], rotation=90, fontsize=fontsize)
ax3a.xaxis.tick_bottom()
ax3a.set_ylabel("Number of audio clips")
ax4a.xaxis.set_ticks(np.arange(3*K, N))
ax4a.xaxis.set_ticklabels(truncated_sorted_lbs[3*K:], rotation=90, fontsize=fontsize)
ax4a.xaxis.tick_bottom()
# ax4a.tick_params(left='off', which='both')
ax4a.set_ylabel("Number of audio clips")
ax1a.spines['right'].set_visible(False)
ax1b.spines['right'].set_visible(False)
ax2a.spines['left'].set_visible(False)
ax2b.spines['left'].set_visible(False)
ax2a.spines['right'].set_visible(False)
ax2b.spines['right'].set_visible(False)
ax3a.spines['left'].set_visible(False)
ax3b.spines['left'].set_visible(False)
ax3a.spines['right'].set_visible(False)
ax3b.spines['right'].set_visible(False)
ax4a.spines['left'].set_visible(False)
ax4b.spines['left'].set_visible(False)
plt.subplots_adjust(hspace = 0.8)
return ax1a, ax2a, ax3a, ax4a, ax1b, ax2b, ax3b, ax4b
def _scatter_4_rows(x, ax, ax2, ax3, ax4, s, c, marker='.', alpha=1.):
N = len(x)
ax.scatter(np.arange(N), x, s=s, c=c, marker=marker, alpha=alpha)
ax2.scatter(np.arange(N), x, s=s, c=c, marker=marker, alpha=alpha)
ax3.scatter(np.arange(N), x, s=s, c=c, marker=marker, alpha=alpha)
ax4.scatter(np.arange(N), x, s=s, c=c, marker=marker, alpha=alpha)
def _plot_4_rows(x, ax, ax2, ax3, ax4, c, linewidth=1.0, alpha=1.0, label=""):
N = len(x)
ax.plot(x, c=c, linewidth=linewidth, alpha=alpha)
ax2.plot(x, c=c, linewidth=linewidth, alpha=alpha)
ax3.plot(x, c=c, linewidth=linewidth, alpha=alpha)
line, = ax4.plot(x, c=c, linewidth=linewidth, alpha=alpha, label=label)
return line
def plot_long_fig(args):
# Arguments & parameters
workspace = args.workspace
# Paths
stat_path = os.path.join(workspace, 'results', 'stats_for_paper.pkl')
save_out_path = 'results/long_fig.pdf'
create_folder(os.path.dirname(save_out_path))
# Stats
stats = cPickle.load(open(stat_path, 'rb'))
N = len(config.labels)
sorted_indexes = stats['sorted_indexes_for_plot']
sorted_labels = np.array(config.labels)[sorted_indexes]
audio_clips_per_class = stats['official_balanced_trainig_samples'] + stats['official_unbalanced_training_samples']
audio_clips_per_class = audio_clips_per_class[sorted_indexes]
(ax1a, ax2a, ax3a, ax4a, ax1b, ax2b, ax3b, ax4b) = prepare_plot_long_4_rows(sorted_labels)
# plot the same data on both axes
ax1a.bar(np.arange(N), audio_clips_per_class, alpha=0.3)
ax2a.bar(np.arange(N), audio_clips_per_class, alpha=0.3)
ax3a.bar(np.arange(N), audio_clips_per_class, alpha=0.3)
ax4a.bar(np.arange(N), audio_clips_per_class, alpha=0.3)
maps_avg_instances = stats['averaging_instance_system_avg_9_probs_from_10000_to_50000_iterations']['eval']['average_precision']
maps_avg_instances = maps_avg_instances[sorted_indexes]
maps_cnn13 = stats['cnn13_system_iteration60k']['eval']['average_precision']
maps_cnn13 = maps_cnn13[sorted_indexes]
maps_mobilenetv1 = stats['mobilenetv1_system_iteration56k']['eval']['average_precision']
maps_mobilenetv1 = maps_mobilenetv1[sorted_indexes]
maps_logmel_wavegram_cnn = _load_metrics0_classwise('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_SpAndWav', 'clip_bce', 'balanced', 'mixup', 32)
maps_logmel_wavegram_cnn = maps_logmel_wavegram_cnn[sorted_indexes]
_scatter_4_rows(maps_avg_instances, ax1b, ax2b, ax3b, ax4b, s=5, c='k')
_scatter_4_rows(maps_cnn13, ax1b, ax2b, ax3b, ax4b, s=5, c='r')
_scatter_4_rows(maps_mobilenetv1, ax1b, ax2b, ax3b, ax4b, s=5, c='b')
_scatter_4_rows(maps_logmel_wavegram_cnn, ax1b, ax2b, ax3b, ax4b, s=5, c='g')
linewidth = 0.7
line0te = _plot_4_rows(maps_avg_instances, ax1b, ax2b, ax3b, ax4b, c='k', linewidth=linewidth, label='AP with averaging instances (baseline)')
line1te = _plot_4_rows(maps_cnn13, ax1b, ax2b, ax3b, ax4b, c='r', linewidth=linewidth, label='AP with CNN14')
line2te = _plot_4_rows(maps_mobilenetv1, ax1b, ax2b, ax3b, ax4b, c='b', linewidth=linewidth, label='AP with MobileNetV1')
line3te = _plot_4_rows(maps_logmel_wavegram_cnn, ax1b, ax2b, ax3b, ax4b, c='g', linewidth=linewidth, label='AP with Wavegram-Logmel-CNN')
label_quality = stats['label_quality']
sorted_rate = np.array(label_quality)[sorted_indexes]
for k in range(len(sorted_rate)):
if sorted_rate[k] and sorted_rate[k] == 1:
sorted_rate[k] = 0.99
ax1b.scatter(np.arange(N)[sorted_rate != None], sorted_rate[sorted_rate != None], s=12, c='r', linewidth=0.8, marker='+')
ax2b.scatter(np.arange(N)[sorted_rate != None], sorted_rate[sorted_rate != None], s=12, c='r', linewidth=0.8, marker='+')
ax3b.scatter(np.arange(N)[sorted_rate != None], sorted_rate[sorted_rate != None], s=12, c='r', linewidth=0.8, marker='+')
line_label_quality = ax4b.scatter(np.arange(N)[sorted_rate != None], sorted_rate[sorted_rate != None], s=12, c='r', linewidth=0.8, marker='+', label='Label quality')
ax1b.scatter(np.arange(N)[sorted_rate == None], 0.5 * np.ones(len(np.arange(N)[sorted_rate == None])), s=12, c='r', linewidth=0.8, marker='_')
ax2b.scatter(np.arange(N)[sorted_rate == None], 0.5 * np.ones(len(np.arange(N)[sorted_rate == None])), s=12, c='r', linewidth=0.8, marker='_')
ax3b.scatter(np.arange(N)[sorted_rate == None], 0.5 * np.ones(len(np.arange(N)[sorted_rate == None])), s=12, c='r', linewidth=0.8, marker='_')
ax4b.scatter(np.arange(N)[sorted_rate == None], 0.5 * np.ones(len(np.arange(N)[sorted_rate == None])), s=12, c='r', linewidth=0.8, marker='_')
plt.legend(handles=[line0te, line1te, line2te, line3te, line_label_quality], fontsize=6, loc=1)
plt.savefig(save_out_path)
print('Save fig to {}'.format(save_out_path))
def plot_flops(args):
# Arguments & parameters
workspace = args.workspace
# Paths
save_out_path = 'results_map/flops.pdf'
create_folder(os.path.dirname(save_out_path))
plt.figure(figsize=(5, 5))
fig, ax = plt.subplots(1, 1)
model_types = np.array(['Cnn6', 'Cnn10', 'Cnn14', 'ResNet22', 'ResNet38', 'ResNet54',
'MobileNetV1', 'MobileNetV2', 'DaiNet', 'LeeNet', 'LeeNet18',
'Res1dNet30', 'Res1dNet44', 'Wavegram-CNN', 'Wavegram-\nLogmel-CNN'])
flops = np.array([21.986, 21.986, 42.220, 30.081, 48.962, 54.563, 3.614, 2.810,
30.395, 4.741, 26.369, 32.688, 61.833, 44.234, 53.510])
mAPs = np.array([0.343, 0.380, 0.431, 0.430, 0.434, 0.429, 0.389, 0.383, 0.295,
0.266, 0.336, 0.365, 0.355, 0.389, 0.439])
sorted_indexes = np.sort(flops)
ax.scatter(flops, mAPs)
shift = [[1, 0.002], [1, -0.006], [-1, -0.014], [-2, 0.006], [-7, 0.006],
[1, -0.01], [0.5, 0.004], [-1, -0.014], [1, -0.007], [0.8, -0.008],
[1, -0.007], [1, 0.002], [-6, -0.015], [1, -0.008], [0.8, 0]]
for i, model_type in enumerate(model_types):
ax.annotate(model_type, (flops[i] + shift[i][0], mAPs[i] + shift[i][1]))
ax.plot(flops[[0, 1, 2]], mAPs[[0, 1, 2]])
ax.plot(flops[[3, 4, 5]], mAPs[[3, 4, 5]])
ax.plot(flops[[6, 7]], mAPs[[6, 7]])
ax.plot(flops[[9, 10]], mAPs[[9, 10]])
ax.plot(flops[[11, 12]], mAPs[[11, 12]])
ax.plot(flops[[13, 14]], mAPs[[13, 14]])
ax.set_xlim(0, 70)
ax.set_ylim(0.2, 0.5)
ax.set_xlabel('Multi-adds (million)')
ax.set_ylabel('mAP')
plt.tight_layout(0, 0, 0)
plt.savefig(save_out_path)
print('Write out figure to {}'.format(save_out_path))
def spearman(args):
# Arguments & parameters
workspace = args.workspace
# Paths
stat_path = os.path.join(workspace, 'results', 'stats_for_paper.pkl')
# Stats
stats = cPickle.load(open(stat_path, 'rb'))
label_quality = np.array([qu if qu else 0.5 for qu in stats['label_quality']])
training_samples = np.array(stats['official_balanced_trainig_samples']) + \
np.array(stats['official_unbalanced_training_samples'])
mAP = stats['averaging_instance_system_avg_9_probs_from_10000_to_50000_iterations']['eval']['average_precision']
import scipy
samples_spearman = scipy.stats.spearmanr(training_samples, mAP)[0]
quality_spearman = scipy.stats.spearmanr(label_quality, mAP)[0]
print('Training samples spearman: {:.3f}'.format(samples_spearman))
print('Quality spearman: {:.3f}'.format(quality_spearman))
def print_results(args):
(mAP, mAUC, dprime) = _load_metrics_classwise('main', 32000, 1024, 320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
(mAP, mAUC, dprime) = _load_metrics_classwise('main', 32000, 1024, 320, 64, 50, 14000, 'full_train', 'Cnn14_mixup_time_domain', 'clip_bce', 'balanced', 'mixup', 32)
(mAP, mAUC, dprime) = _load_metrics_classwise('main', 32000, 1024, 320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'none', 32)
(mAP, mAUC, dprime) = _load_metrics_classwise('main', 32000, 1024, 320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'none', 'none', 32)
(mAP, mAUC, dprime) = _load_metrics_classwise('main', 32000, 1024, 320, 64, 50, 14000, 'balanced_train', 'Cnn14', 'clip_bce', 'none', 'none', 32)
(mAP, mAUC, dprime) = _load_metrics_classwise('main', 32000, 1024, 320, 64, 50, 14000, 'balanced_train', 'Cnn14', 'clip_bce', 'balanced', 'none', 32)
(mAP, mAUC, dprime) = _load_metrics_classwise('main', 32000, 1024, 320, 64, 50, 14000, 'balanced_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
#
(mAP, mAUC, dprime) = _load_metrics0_classwise2('main', 32000, 1024, 320, 64, 50, 14000, 'full_train', 'Cnn13_emb32', 'clip_bce', 'balanced', 'mixup', 32)
(mAP, mAUC, dprime) = _load_metrics0_classwise2('main', 32000, 1024, 320, 64, 50, 14000, 'full_train', 'Cnn13_emb128', 'clip_bce', 'balanced', 'mixup', 32)
# partial
(mAP, mAUC, dprime) = _load_metrics_classwise('main', 32000, 1024, 320, 64, 50, 14000, 'partial_0.8_full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
(mAP, mAUC, dprime) = _load_metrics_classwise('main', 32000, 1024, 320, 64, 50, 14000, 'partial_0.5_full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
# Sample rate
(mAP, mAUC, dprime) = _load_metrics_classwise('main', 32000, 1024, 320, 64, 50, 14000, 'full_train', 'Cnn14_16k', 'clip_bce', 'balanced', 'mixup', 32)
(mAP, mAUC, dprime) = _load_metrics_classwise('main', 32000, 1024, 320, 64, 50, 14000, 'full_train', 'Cnn14_8k', 'clip_bce', 'balanced', 'mixup', 32)
# Mel bins
(mAP, mAUC, dprime) = _load_metrics_classwise('main', 32000, 1024, 320, 128, 50, 14000, 'full_train', 'Cnn14_mel128', 'clip_bce', 'balanced', 'mixup', 32)
(mAP, mAUC, dprime) = _load_metrics_classwise('main', 32000, 1024, 320, 32, 50, 14000, 'full_train', 'Cnn14_mel32', 'clip_bce', 'balanced', 'mixup', 32)
import crash
asdf
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
subparsers = parser.add_subparsers(dest='mode')
parser_plot = subparsers.add_parser('plot')
parser_plot.add_argument('--dataset_dir', type=str, required=True)
parser_plot.add_argument('--workspace', type=str, required=True)
parser_plot.add_argument('--select', type=str, required=True)
parser_plot = subparsers.add_parser('plot_for_paper')
parser_plot.add_argument('--dataset_dir', type=str, required=True)
parser_plot.add_argument('--workspace', type=str, required=True)
parser_plot.add_argument('--select', type=str, required=True)
parser_plot = subparsers.add_parser('plot_for_paper2')
parser_plot.add_argument('--dataset_dir', type=str, required=True)
parser_plot.add_argument('--workspace', type=str, required=True)
parser_values = subparsers.add_parser('plot_class_iteration')
parser_values.add_argument('--workspace', type=str, required=True)
parser_values.add_argument('--select', type=str, required=True)
parser_summary_stats = subparsers.add_parser('summary_stats')
parser_summary_stats.add_argument('--workspace', type=str, required=True)
parser_plot_long = subparsers.add_parser('plot_long_fig')
parser_plot_long.add_argument('--workspace', type=str, required=True)
parser_plot_flops = subparsers.add_parser('plot_flops')
parser_plot_flops.add_argument('--workspace', type=str, required=True)
parser_spearman = subparsers.add_parser('spearman')
parser_spearman.add_argument('--workspace', type=str, required=True)
parser_print = subparsers.add_parser('print')
parser_print.add_argument('--workspace', type=str, required=True)
args = parser.parse_args()
if args.mode == 'plot':
plot(args)
elif args.mode == 'plot_for_paper':
plot_for_paper(args)
elif args.mode == 'plot_for_paper2':
plot_for_paper2(args)
elif args.mode == 'table_values':
table_values(args)
elif args.mode == 'plot_class_iteration':
plot_class_iteration(args)
elif args.mode == 'summary_stats':
summary_stats(args)
elif args.mode == 'plot_long_fig':
plot_long_fig(args)
elif args.mode == 'plot_flops':
plot_flops(args)
elif args.mode == 'spearman':
spearman(args)
elif args.mode == 'print':
print_results(args)
else:
raise Exception('Error argument!') | EXA-1-master | exa/models/AudioGPT/audio_detection/audio_infer/utils/plot_statistics.py |
import os
import sys
import numpy as np
import argparse
import h5py
import time
import pickle
import matplotlib.pyplot as plt
import csv
from sklearn import metrics
from utilities import (create_folder, get_filename, d_prime)
import config
def load_statistics(statistics_path):
statistics_dict = pickle.load(open(statistics_path, 'rb'))
bal_map = np.array([statistics['average_precision'] for statistics in statistics_dict['bal']]) # (N, classes_num)
bal_map = np.mean(bal_map, axis=-1)
test_map = np.array([statistics['average_precision'] for statistics in statistics_dict['test']]) # (N, classes_num)
test_map = np.mean(test_map, axis=-1)
return bal_map, test_map
def crop_label(label):
max_len = 16
if len(label) <= max_len:
return label
else:
words = label.split(' ')
cropped_label = ''
for w in words:
if len(cropped_label + ' ' + w) > max_len:
break
else:
cropped_label += ' {}'.format(w)
return cropped_label
def add_comma(integer):
"""E.g., 1234567 -> 1,234,567
"""
integer = int(integer)
if integer >= 1000:
return str(integer // 1000) + ',' + str(integer % 1000)
else:
return str(integer)
def plot_classwise_iteration_map(args):
# Paths
save_out_path = 'results/classwise_iteration_map.pdf'
create_folder(os.path.dirname(save_out_path))
# Load statistics
statistics_dict = pickle.load(open('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_full_train_WavegramLogmelCnn_balanced_mixup_bs32.pkl', 'rb'))
mAP_mat = np.array([e['average_precision'] for e in statistics_dict['test']])
mAP_mat = mAP_mat[0 : 300, :] # 300 * 2000 = 600k iterations
sorted_indexes = np.argsort(config.full_samples_per_class)[::-1]
fig, axs = plt.subplots(1, 3, figsize=(20, 5))
ranges = [np.arange(0, 10), np.arange(250, 260), np.arange(517, 527)]
axs[0].set_ylabel('AP')
for col in range(0, 3):
axs[col].set_ylim(0, 1.)
axs[col].set_xlim(0, 301)
axs[col].set_xlabel('Iterations')
axs[col].set_ylabel('AP')
axs[col].xaxis.set_ticks(np.arange(0, 301, 100))
axs[col].xaxis.set_ticklabels(['0', '200k', '400k', '600k'])
lines = []
for _ix in ranges[col]:
_label = crop_label(config.labels[sorted_indexes[_ix]]) + \
' ({})'.format(add_comma(config.full_samples_per_class[sorted_indexes[_ix]]))
line, = axs[col].plot(mAP_mat[:, sorted_indexes[_ix]], label=_label)
lines.append(line)
box = axs[col].get_position()
axs[col].set_position([box.x0, box.y0, box.width * 1., box.height])
axs[col].legend(handles=lines, bbox_to_anchor=(1., 1.))
axs[col].yaxis.grid(color='k', linestyle='solid', alpha=0.3, linewidth=0.3)
plt.tight_layout(pad=4, w_pad=1, h_pad=1)
plt.savefig(save_out_path)
print(save_out_path)
def plot_six_figures(args):
# Arguments & parameters
classes_num = config.classes_num
labels = config.labels
max_plot_iteration = 540000
iterations = np.arange(0, max_plot_iteration, 2000)
# Paths
class_labels_indices_path = os.path.join('metadata', 'class_labels_indices.csv')
save_out_path = 'results/six_figures.pdf'
create_folder(os.path.dirname(save_out_path))
# Plot
fig, ax = plt.subplots(2, 3, figsize=(14, 7))
bal_alpha = 0.3
test_alpha = 1.0
linewidth = 1.
# (a) Comparison of architectures
if True:
lines = []
# Wavegram-Logmel-CNN
(bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_full_train_WavegramLogmelCnn_balanced_mixup_bs32.pkl')
line, = ax[0, 0].plot(bal_map, color='g', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 0].plot(test_map, label='Wavegram-Logmel-CNN', color='g', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
# Cnn14
(bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_full_train_Cnn14_balanced_mixup_bs32.pkl')
line, = ax[0, 0].plot(bal_map, color='r', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 0].plot(test_map, label='CNN14', color='r', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
# MobileNetV1
(bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_full_train_MobileNetV1_balanced_mixup_bs32.pkl')
line, = ax[0, 0].plot(bal_map, color='b', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 0].plot(test_map, label='MobileNetV1', color='b', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
ax[0, 0].legend(handles=lines, loc=2)
ax[0, 0].set_title('(a) Comparison of architectures')
# (b) Comparison of training data and augmentation'
if True:
lines = []
# Full data + balanced sampler + mixup
(bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_full_train_Cnn14_balanced_mixup_bs32.pkl')
line, = ax[0, 1].plot(bal_map, color='r', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 1].plot(test_map, label='CNN14,bal,mixup (1.9m)', color='r', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
# Full data + balanced sampler + mixup in time domain
(bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_full_train_Cnn14_balanced_mixup_timedomain_bs32.pkl')
line, = ax[0, 1].plot(bal_map, color='y', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 1].plot(test_map, label='CNN14,bal,mixup-wav (1.9m)', color='y', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
# Full data + balanced sampler + no mixup
(bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_full_train_Cnn14_balanced_nomixup_bs32.pkl')
line, = ax[0, 1].plot(bal_map, color='g', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 1].plot(test_map, label='CNN14,bal,no-mixup (1.9m)', color='g', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
# Full data + uniform sampler + no mixup
(bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_full_train_Cnn14_nobalanced_nomixup_bs32.pkl')
line, = ax[0, 1].plot(bal_map, color='b', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 1].plot(test_map, label='CNN14,no-bal,no-mixup (1.9m)', color='b', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
# Balanced data + balanced sampler + mixup
(bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_balanced_train_Cnn14_balanced_mixup_bs32.pkl')
line, = ax[0, 1].plot(bal_map, color='m', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 1].plot(test_map, label='CNN14,bal,mixup (20k)', color='m', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
# Balanced data + balanced sampler + no mixup
(bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_balanced_train_Cnn14_balanced_nomixup_bs32.pkl')
line, = ax[0, 1].plot(bal_map, color='k', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 1].plot(test_map, label='CNN14,bal,no-mixup (20k)', color='k', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
ax[0, 1].legend(handles=lines, loc=2, fontsize=8)
ax[0, 1].set_title('(b) Comparison of training data and augmentation')
# (c) Comparison of embedding size
if True:
lines = []
# Embedding size 2048
(bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_full_train_Cnn14_balanced_mixup_bs32.pkl')
line, = ax[0, 2].plot(bal_map, color='r', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 2].plot(test_map, label='CNN14,emb=2048', color='r', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
# Embedding size 128
(bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_full_train_Cnn14_emb128_balanced_mixup_bs32.pkl')
line, = ax[0, 2].plot(bal_map, color='g', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 2].plot(test_map, label='CNN14,emb=128', color='g', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
# Embedding size 32
(bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_full_train_Cnn14_emb32_balanced_mixup_bs32.pkl')
line, = ax[0, 2].plot(bal_map, color='b', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 2].plot(test_map, label='CNN14,emb=32', color='b', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
ax[0, 2].legend(handles=lines, loc=2)
ax[0, 2].set_title('(c) Comparison of embedding size')
# (d) Comparison of amount of training data
if True:
lines = []
# 100% of full training data
(bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_full_train_Cnn14_balanced_mixup_bs32.pkl')
line, = ax[1, 0].plot(bal_map, color='r', alpha=bal_alpha, linewidth=linewidth)
line, = ax[1, 0].plot(test_map, label='CNN14 (100% full)', color='r', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
# 80% of full training data
(bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_0.8full_train_Cnn14_balanced_mixup_bs32.pkl')
line, = ax[1, 0].plot(bal_map, color='b', alpha=bal_alpha, linewidth=linewidth)
line, = ax[1, 0].plot(test_map, label='CNN14 (80% full)', color='b', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
# 50% of full training data
(bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_0.5full_train_Cnn14_balanced_mixup_bs32.pkl')
line, = ax[1, 0].plot(bal_map, color='g', alpha=bal_alpha, linewidth=linewidth)
line, = ax[1, 0].plot(test_map, label='cnn14 (50% full)', color='g', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
ax[1, 0].legend(handles=lines, loc=2)
ax[1, 0].set_title('(d) Comparison of amount of training data')
# (e) Comparison of sampling rate
if True:
lines = []
# Cnn14 + 32 kHz
(bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_full_train_Cnn14_balanced_mixup_bs32.pkl')
line, = ax[1, 1].plot(bal_map, color='r', alpha=bal_alpha, linewidth=linewidth)
line, = ax[1, 1].plot(test_map, label='CNN14,32kHz', color='r', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
# Cnn14 + 16 kHz
(bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_full_train_Cnn14_16k_balanced_mixup_bs32.pkl')
line, = ax[1, 1].plot(bal_map, color='b', alpha=bal_alpha, linewidth=linewidth)
line, = ax[1, 1].plot(test_map, label='CNN14,16kHz', color='b', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
# Cnn14 + 8 kHz
(bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_full_train_Cnn14_8k_balanced_mixup_bs32.pkl')
line, = ax[1, 1].plot(bal_map, color='g', alpha=bal_alpha, linewidth=linewidth)
line, = ax[1, 1].plot(test_map, label='CNN14,8kHz', color='g', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
ax[1, 1].legend(handles=lines, loc=2)
ax[1, 1].set_title('(e) Comparison of sampling rate')
# (f) Comparison of mel bins number
if True:
lines = []
# Cnn14 + 128 mel bins
(bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel128_fmin50_fmax14000_full_train_Cnn14_balanced_mixup_bs32.pkl')
line, = ax[1, 2].plot(bal_map, color='g', alpha=bal_alpha)
line, = ax[1, 2].plot(test_map, label='CNN14,128-melbins', color='g', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
# Cnn14 + 64 mel bins
(bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_full_train_Cnn14_balanced_mixup_bs32.pkl')
line, = ax[1, 2].plot(bal_map, color='r', alpha=bal_alpha, linewidth=linewidth)
line, = ax[1, 2].plot(test_map, label='CNN14,64-melbins', color='r', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
# Cnn14 + 32 mel bins
(bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel32_fmin50_fmax14000_full_train_Cnn14_balanced_mixup_bs32.pkl')
line, = ax[1, 2].plot(bal_map, color='b', alpha=bal_alpha)
line, = ax[1, 2].plot(test_map, label='CNN14,32-melbins', color='b', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
ax[1, 2].legend(handles=lines, loc=2)
ax[1, 2].set_title('(f) Comparison of mel bins number')
for i in range(2):
for j in range(3):
ax[i, j].set_ylim(0, 0.8)
ax[i, j].set_xlim(0, len(iterations))
ax[i, j].set_xlabel('Iterations')
ax[i, j].set_ylabel('mAP')
ax[i, j].xaxis.set_ticks(np.arange(0, len(iterations), 50))
ax[i, j].xaxis.set_ticklabels(['0', '100k', '200k', '300k', '400k', '500k'])
ax[i, j].yaxis.set_ticks(np.arange(0, 0.81, 0.05))
ax[i, j].yaxis.set_ticklabels(['0', '', '0.1', '', '0.2', '', '0.3',
'', '0.4', '', '0.5', '', '0.6', '', '0.7', '', '0.8'])
ax[i, j].yaxis.grid(color='k', linestyle='solid', alpha=0.3, linewidth=0.3)
ax[i, j].xaxis.grid(color='k', linestyle='solid', alpha=0.3, linewidth=0.3)
plt.tight_layout(0, 1, 0)
plt.savefig(save_out_path)
print('Save figure to {}'.format(save_out_path))
def plot_complexity_map(args):
# Paths
save_out_path = 'results/complexity_mAP.pdf'
create_folder(os.path.dirname(save_out_path))
plt.figure(figsize=(5, 5))
fig, ax = plt.subplots(1, 1)
model_types = np.array(['Cnn6', 'Cnn10', 'Cnn14', 'ResNet22', 'ResNet38', 'ResNet54',
'MobileNetV1', 'MobileNetV2', 'DaiNet', 'LeeNet', 'LeeNet18',
'Res1dNet30', 'Res1dNet44', 'Wavegram-CNN', 'Wavegram-\nLogmel-CNN'])
flops = np.array([21.986, 28.166, 42.220, 30.081, 48.962, 54.563, 3.614, 2.810,
30.395, 4.741, 26.369, 32.688, 61.833, 44.234, 53.510])
mAPs = np.array([0.343, 0.380, 0.431, 0.430, 0.434, 0.429, 0.389, 0.383, 0.295,
0.266, 0.336, 0.365, 0.355, 0.389, 0.439])
sorted_indexes = np.sort(flops)
ax.scatter(flops, mAPs)
shift = [[-5.5, -0.004], [1, -0.004], [-1, -0.014], [-2, 0.006], [-7, 0.006],
[1, -0.01], [0.5, 0.004], [-1, -0.014], [1, -0.007], [0.8, -0.008],
[1, -0.007], [1, 0.002], [-6, -0.015], [1, -0.008], [0.8, 0]]
for i, model_type in enumerate(model_types):
ax.annotate(model_type, (flops[i] + shift[i][0], mAPs[i] + shift[i][1]))
ax.plot(flops[[0, 1, 2]], mAPs[[0, 1, 2]])
ax.plot(flops[[3, 4, 5]], mAPs[[3, 4, 5]])
ax.plot(flops[[6, 7]], mAPs[[6, 7]])
ax.plot(flops[[9, 10]], mAPs[[9, 10]])
ax.plot(flops[[11, 12]], mAPs[[11, 12]])
ax.plot(flops[[13, 14]], mAPs[[13, 14]])
ax.set_xlim(0, 70)
ax.set_ylim(0.2, 0.5)
ax.set_xlabel('Multi-load_statisticss (million)', fontsize=15)
ax.set_ylabel('mAP', fontsize=15)
ax.tick_params(axis='x', labelsize=12)
ax.tick_params(axis='y', labelsize=12)
plt.tight_layout(0, 0, 0)
plt.savefig(save_out_path)
print('Write out figure to {}'.format(save_out_path))
def plot_long_fig(args):
# Paths
stats = pickle.load(open('paper_statistics/stats_for_long_fig.pkl', 'rb'))
save_out_path = 'results/long_fig.pdf'
create_folder(os.path.dirname(save_out_path))
# Load meta
N = len(config.labels)
sorted_indexes = stats['sorted_indexes_for_plot']
sorted_labels = np.array(config.labels)[sorted_indexes]
audio_clips_per_class = stats['official_balanced_training_samples'] + stats['official_unbalanced_training_samples']
audio_clips_per_class = audio_clips_per_class[sorted_indexes]
# Prepare axes for plot
(ax1a, ax2a, ax3a, ax4a, ax1b, ax2b, ax3b, ax4b) = prepare_plot_long_4_rows(sorted_labels)
# plot the number of training samples
ax1a.bar(np.arange(N), audio_clips_per_class, alpha=0.3)
ax2a.bar(np.arange(N), audio_clips_per_class, alpha=0.3)
ax3a.bar(np.arange(N), audio_clips_per_class, alpha=0.3)
ax4a.bar(np.arange(N), audio_clips_per_class, alpha=0.3)
# Load mAP of different systems
"""Average instance system of [1] with an mAP of 0.317.
[1] Kong, Qiuqiang, Changsong Yu, Yong Xu, Turab Iqbal, Wenwu Wang, and
Mark D. Plumbley. "Weakly labelled audioset tagging with attention neural
networks." IEEE/ACM Transactions on Audio, Speech, and Language Processing
27, no. 11 (2019): 1791-1802."""
maps_avg_instances = stats['averaging_instance_system_avg_9_probs_from_10000_to_50000_iterations']['eval']['average_precision']
maps_avg_instances = maps_avg_instances[sorted_indexes]
# PANNs Cnn14
maps_panns_cnn14 = stats['panns_cnn14']['eval']['average_precision']
maps_panns_cnn14 = maps_panns_cnn14[sorted_indexes]
# PANNs MobileNetV1
maps_panns_mobilenetv1 = stats['panns_mobilenetv1']['eval']['average_precision']
maps_panns_mobilenetv1 = maps_panns_mobilenetv1[sorted_indexes]
# PANNs Wavegram-Logmel-Cnn14
maps_panns_wavegram_logmel_cnn14 = stats['panns_wavegram_logmel_cnn14']['eval']['average_precision']
maps_panns_wavegram_logmel_cnn14 = maps_panns_wavegram_logmel_cnn14[sorted_indexes]
# Plot mAPs
_scatter_4_rows(maps_panns_wavegram_logmel_cnn14, ax1b, ax2b, ax3b, ax4b, s=5, c='g')
_scatter_4_rows(maps_panns_cnn14, ax1b, ax2b, ax3b, ax4b, s=5, c='r')
_scatter_4_rows(maps_panns_mobilenetv1, ax1b, ax2b, ax3b, ax4b, s=5, c='b')
_scatter_4_rows(maps_avg_instances, ax1b, ax2b, ax3b, ax4b, s=5, c='k')
linewidth = 0.7
line0te = _plot_4_rows(maps_panns_wavegram_logmel_cnn14, ax1b, ax2b, ax3b, ax4b,
c='g', linewidth=linewidth, label='AP with Wavegram-Logmel-CNN')
line1te = _plot_4_rows(maps_panns_cnn14, ax1b, ax2b, ax3b, ax4b, c='r',
linewidth=linewidth, label='AP with CNN14')
line2te = _plot_4_rows(maps_panns_mobilenetv1, ax1b, ax2b, ax3b, ax4b, c='b',
linewidth=linewidth, label='AP with MobileNetV1')
line3te = _plot_4_rows(maps_avg_instances, ax1b, ax2b, ax3b, ax4b, c='k',
linewidth=linewidth, label='AP with averaging instances (baseline)')
# Plot label quality
label_quality = stats['label_quality']
sorted_label_quality = np.array(label_quality)[sorted_indexes]
for k in range(len(sorted_label_quality)):
if sorted_label_quality[k] and sorted_label_quality[k] == 1:
sorted_label_quality[k] = 0.99
ax1b.scatter(np.arange(N)[sorted_label_quality != None],
sorted_label_quality[sorted_label_quality != None], s=12, c='r', linewidth=0.8, marker='+')
ax2b.scatter(np.arange(N)[sorted_label_quality != None],
sorted_label_quality[sorted_label_quality != None], s=12, c='r', linewidth=0.8, marker='+')
ax3b.scatter(np.arange(N)[sorted_label_quality != None],
sorted_label_quality[sorted_label_quality != None], s=12, c='r', linewidth=0.8, marker='+')
line_label_quality = ax4b.scatter(np.arange(N)[sorted_label_quality != None],
sorted_label_quality[sorted_label_quality != None], s=12, c='r', linewidth=0.8, marker='+', label='Label quality')
ax1b.scatter(np.arange(N)[sorted_label_quality == None],
0.5 * np.ones(len(np.arange(N)[sorted_label_quality == None])), s=12, c='r', linewidth=0.8, marker='_')
ax2b.scatter(np.arange(N)[sorted_label_quality == None],
0.5 * np.ones(len(np.arange(N)[sorted_label_quality == None])), s=12, c='r', linewidth=0.8, marker='_')
ax3b.scatter(np.arange(N)[sorted_label_quality == None],
0.5 * np.ones(len(np.arange(N)[sorted_label_quality == None])), s=12, c='r', linewidth=0.8, marker='_')
ax4b.scatter(np.arange(N)[sorted_label_quality == None],
0.5 * np.ones(len(np.arange(N)[sorted_label_quality == None])), s=12, c='r', linewidth=0.8, marker='_')
plt.legend(handles=[line0te, line1te, line2te, line3te, line_label_quality], fontsize=6, loc=1)
plt.tight_layout(0, 0, 0)
plt.savefig(save_out_path)
print('Save fig to {}'.format(save_out_path))
def prepare_plot_long_4_rows(sorted_lbs):
N = len(sorted_lbs)
f,(ax1a, ax2a, ax3a, ax4a) = plt.subplots(4, 1, sharey=False, facecolor='w', figsize=(10, 10.5))
fontsize = 5
K = 132
ax1a.set_xlim(0, K)
ax2a.set_xlim(K, 2 * K)
ax3a.set_xlim(2 * K, 3 * K)
ax4a.set_xlim(3 * K, N)
truncated_sorted_lbs = []
for lb in sorted_lbs:
lb = lb[0 : 25]
words = lb.split(' ')
if len(words[-1]) < 3:
lb = ' '.join(words[0:-1])
truncated_sorted_lbs.append(lb)
ax1a.grid(which='major', axis='x', linestyle='-', alpha=0.3)
ax2a.grid(which='major', axis='x', linestyle='-', alpha=0.3)
ax3a.grid(which='major', axis='x', linestyle='-', alpha=0.3)
ax4a.grid(which='major', axis='x', linestyle='-', alpha=0.3)
ax1a.set_yscale('log')
ax2a.set_yscale('log')
ax3a.set_yscale('log')
ax4a.set_yscale('log')
ax1b = ax1a.twinx()
ax2b = ax2a.twinx()
ax3b = ax3a.twinx()
ax4b = ax4a.twinx()
ax1b.set_ylim(0., 1.)
ax2b.set_ylim(0., 1.)
ax3b.set_ylim(0., 1.)
ax4b.set_ylim(0., 1.)
ax1b.set_ylabel('Average precision')
ax2b.set_ylabel('Average precision')
ax3b.set_ylabel('Average precision')
ax4b.set_ylabel('Average precision')
ax1b.yaxis.grid(color='grey', linestyle='--', alpha=0.5)
ax2b.yaxis.grid(color='grey', linestyle='--', alpha=0.5)
ax3b.yaxis.grid(color='grey', linestyle='--', alpha=0.5)
ax4b.yaxis.grid(color='grey', linestyle='--', alpha=0.5)
ax1a.xaxis.set_ticks(np.arange(K))
ax1a.xaxis.set_ticklabels(truncated_sorted_lbs[0:K], rotation=90, fontsize=fontsize)
ax1a.xaxis.tick_bottom()
ax1a.set_ylabel("Number of audio clips")
ax2a.xaxis.set_ticks(np.arange(K, 2*K))
ax2a.xaxis.set_ticklabels(truncated_sorted_lbs[K:2*K], rotation=90, fontsize=fontsize)
ax2a.xaxis.tick_bottom()
ax2a.set_ylabel("Number of audio clips")
ax3a.xaxis.set_ticks(np.arange(2*K, 3*K))
ax3a.xaxis.set_ticklabels(truncated_sorted_lbs[2*K:3*K], rotation=90, fontsize=fontsize)
ax3a.xaxis.tick_bottom()
ax3a.set_ylabel("Number of audio clips")
ax4a.xaxis.set_ticks(np.arange(3*K, N))
ax4a.xaxis.set_ticklabels(truncated_sorted_lbs[3*K:], rotation=90, fontsize=fontsize)
ax4a.xaxis.tick_bottom()
ax4a.set_ylabel("Number of audio clips")
ax1a.spines['right'].set_visible(False)
ax1b.spines['right'].set_visible(False)
ax2a.spines['left'].set_visible(False)
ax2b.spines['left'].set_visible(False)
ax2a.spines['right'].set_visible(False)
ax2b.spines['right'].set_visible(False)
ax3a.spines['left'].set_visible(False)
ax3b.spines['left'].set_visible(False)
ax3a.spines['right'].set_visible(False)
ax3b.spines['right'].set_visible(False)
ax4a.spines['left'].set_visible(False)
ax4b.spines['left'].set_visible(False)
plt.subplots_adjust(hspace = 0.8)
return ax1a, ax2a, ax3a, ax4a, ax1b, ax2b, ax3b, ax4b
def _scatter_4_rows(x, ax, ax2, ax3, ax4, s, c, marker='.', alpha=1.):
N = len(x)
ax.scatter(np.arange(N), x, s=s, c=c, marker=marker, alpha=alpha)
ax2.scatter(np.arange(N), x, s=s, c=c, marker=marker, alpha=alpha)
ax3.scatter(np.arange(N), x, s=s, c=c, marker=marker, alpha=alpha)
ax4.scatter(np.arange(N), x, s=s, c=c, marker=marker, alpha=alpha)
def _plot_4_rows(x, ax, ax2, ax3, ax4, c, linewidth=1.0, alpha=1.0, label=""):
N = len(x)
ax.plot(x, c=c, linewidth=linewidth, alpha=alpha)
ax2.plot(x, c=c, linewidth=linewidth, alpha=alpha)
ax3.plot(x, c=c, linewidth=linewidth, alpha=alpha)
line, = ax4.plot(x, c=c, linewidth=linewidth, alpha=alpha, label=label)
return line
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
subparsers = parser.add_subparsers(dest='mode')
parser_classwise_iteration_map = subparsers.add_parser('plot_classwise_iteration_map')
parser_six_figures = subparsers.add_parser('plot_six_figures')
parser_complexity_map = subparsers.add_parser('plot_complexity_map')
parser_long_fig = subparsers.add_parser('plot_long_fig')
args = parser.parse_args()
if args.mode == 'plot_classwise_iteration_map':
plot_classwise_iteration_map(args)
elif args.mode == 'plot_six_figures':
plot_six_figures(args)
elif args.mode == 'plot_complexity_map':
plot_complexity_map(args)
elif args.mode == 'plot_long_fig':
plot_long_fig(args)
else:
raise Exception('Incorrect argument!') | EXA-1-master | exa/models/AudioGPT/audio_detection/audio_infer/utils/plot_for_paper.py |
import os
import logging
import h5py
import soundfile
import librosa
import numpy as np
import pandas as pd
from scipy import stats
import datetime
import pickle
def create_folder(fd):
if not os.path.exists(fd):
os.makedirs(fd)
def get_filename(path):
path = os.path.realpath(path)
na_ext = path.split('/')[-1]
na = os.path.splitext(na_ext)[0]
return na
def get_sub_filepaths(folder):
paths = []
for root, dirs, files in os.walk(folder):
for name in files:
path = os.path.join(root, name)
paths.append(path)
return paths
def create_logging(log_dir, filemode):
create_folder(log_dir)
i1 = 0
while os.path.isfile(os.path.join(log_dir, '{:04d}.log'.format(i1))):
i1 += 1
log_path = os.path.join(log_dir, '{:04d}.log'.format(i1))
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename=log_path,
filemode=filemode)
# Print to console
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
return logging
def read_metadata(csv_path, classes_num, id_to_ix):
"""Read metadata of AudioSet from a csv file.
Args:
csv_path: str
Returns:
meta_dict: {'audio_name': (audios_num,), 'target': (audios_num, classes_num)}
"""
with open(csv_path, 'r') as fr:
lines = fr.readlines()
lines = lines[3:] # Remove heads
audios_num = len(lines)
targets = np.zeros((audios_num, classes_num), dtype=np.bool)
audio_names = []
for n, line in enumerate(lines):
items = line.split(', ')
"""items: ['--4gqARaEJE', '0.000', '10.000', '"/m/068hy,/m/07q6cd_,/m/0bt9lr,/m/0jbk"\n']"""
audio_name = 'Y{}.wav'.format(items[0]) # Audios are started with an extra 'Y' when downloading
label_ids = items[3].split('"')[1].split(',')
audio_names.append(audio_name)
# Target
for id in label_ids:
ix = id_to_ix[id]
targets[n, ix] = 1
meta_dict = {'audio_name': np.array(audio_names), 'target': targets}
return meta_dict
def float32_to_int16(x):
assert np.max(np.abs(x)) <= 1.2
x = np.clip(x, -1, 1)
return (x * 32767.).astype(np.int16)
def int16_to_float32(x):
return (x / 32767.).astype(np.float32)
def pad_or_truncate(x, audio_length):
"""Pad all audio to specific length."""
if len(x) <= audio_length:
return np.concatenate((x, np.zeros(audio_length - len(x))), axis=0)
else:
return x[0 : audio_length]
def d_prime(auc):
d_prime = stats.norm().ppf(auc) * np.sqrt(2.0)
return d_prime
class Mixup(object):
def __init__(self, mixup_alpha, random_seed=1234):
"""Mixup coefficient generator.
"""
self.mixup_alpha = mixup_alpha
self.random_state = np.random.RandomState(random_seed)
def get_lambda(self, batch_size):
"""Get mixup random coefficients.
Args:
batch_size: int
Returns:
mixup_lambdas: (batch_size,)
"""
mixup_lambdas = []
for n in range(0, batch_size, 2):
lam = self.random_state.beta(self.mixup_alpha, self.mixup_alpha, 1)[0]
mixup_lambdas.append(lam)
mixup_lambdas.append(1. - lam)
return np.array(mixup_lambdas)
class StatisticsContainer(object):
def __init__(self, statistics_path):
"""Contain statistics of different training iterations.
"""
self.statistics_path = statistics_path
self.backup_statistics_path = '{}_{}.pkl'.format(
os.path.splitext(self.statistics_path)[0],
datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
self.statistics_dict = {'bal': [], 'test': []}
def append(self, iteration, statistics, data_type):
statistics['iteration'] = iteration
self.statistics_dict[data_type].append(statistics)
def dump(self):
pickle.dump(self.statistics_dict, open(self.statistics_path, 'wb'))
pickle.dump(self.statistics_dict, open(self.backup_statistics_path, 'wb'))
logging.info(' Dump statistics to {}'.format(self.statistics_path))
logging.info(' Dump statistics to {}'.format(self.backup_statistics_path))
def load_state_dict(self, resume_iteration):
self.statistics_dict = pickle.load(open(self.statistics_path, 'rb'))
resume_statistics_dict = {'bal': [], 'test': []}
for key in self.statistics_dict.keys():
for statistics in self.statistics_dict[key]:
if statistics['iteration'] <= resume_iteration:
resume_statistics_dict[key].append(statistics)
self.statistics_dict = resume_statistics_dict | EXA-1-master | exa/models/AudioGPT/audio_detection/audio_infer/utils/utilities.py |
import numpy as np
import h5py
import csv
import time
import logging
from utilities import int16_to_float32
def read_black_list(black_list_csv):
"""Read audio names from black list.
"""
with open(black_list_csv, 'r') as fr:
reader = csv.reader(fr)
lines = list(reader)
black_list_names = ['Y{}.wav'.format(line[0]) for line in lines]
return black_list_names
class AudioSetDataset(object):
def __init__(self, sample_rate=32000):
"""This class takes the meta of an audio clip as input, and return
the waveform and target of the audio clip. This class is used by DataLoader.
"""
self.sample_rate = sample_rate
def __getitem__(self, meta):
"""Load waveform and target of an audio clip.
Args:
meta: {
'hdf5_path': str,
'index_in_hdf5': int}
Returns:
data_dict: {
'audio_name': str,
'waveform': (clip_samples,),
'target': (classes_num,)}
"""
hdf5_path = meta['hdf5_path']
index_in_hdf5 = meta['index_in_hdf5']
with h5py.File(hdf5_path, 'r') as hf:
audio_name = hf['audio_name'][index_in_hdf5].decode()
waveform = int16_to_float32(hf['waveform'][index_in_hdf5])
waveform = self.resample(waveform)
target = hf['target'][index_in_hdf5].astype(np.float32)
data_dict = {
'audio_name': audio_name, 'waveform': waveform, 'target': target}
return data_dict
def resample(self, waveform):
"""Resample.
Args:
waveform: (clip_samples,)
Returns:
(resampled_clip_samples,)
"""
if self.sample_rate == 32000:
return waveform
elif self.sample_rate == 16000:
return waveform[0 :: 2]
elif self.sample_rate == 8000:
return waveform[0 :: 4]
else:
raise Exception('Incorrect sample rate!')
class Base(object):
def __init__(self, indexes_hdf5_path, batch_size, black_list_csv, random_seed):
"""Base class of train sampler.
Args:
indexes_hdf5_path: string
batch_size: int
black_list_csv: string
random_seed: int
"""
self.batch_size = batch_size
self.random_state = np.random.RandomState(random_seed)
# Black list
if black_list_csv:
self.black_list_names = read_black_list(black_list_csv)
else:
self.black_list_names = []
logging.info('Black list samples: {}'.format(len(self.black_list_names)))
# Load target
load_time = time.time()
with h5py.File(indexes_hdf5_path, 'r') as hf:
self.audio_names = [audio_name.decode() for audio_name in hf['audio_name'][:]]
self.hdf5_paths = [hdf5_path.decode() for hdf5_path in hf['hdf5_path'][:]]
self.indexes_in_hdf5 = hf['index_in_hdf5'][:]
self.targets = hf['target'][:].astype(np.float32)
(self.audios_num, self.classes_num) = self.targets.shape
logging.info('Training number: {}'.format(self.audios_num))
logging.info('Load target time: {:.3f} s'.format(time.time() - load_time))
class TrainSampler(Base):
def __init__(self, indexes_hdf5_path, batch_size, black_list_csv=None,
random_seed=1234):
"""Balanced sampler. Generate batch meta for training.
Args:
indexes_hdf5_path: string
batch_size: int
black_list_csv: string
random_seed: int
"""
super(TrainSampler, self).__init__(indexes_hdf5_path, batch_size,
black_list_csv, random_seed)
self.indexes = np.arange(self.audios_num)
# Shuffle indexes
self.random_state.shuffle(self.indexes)
self.pointer = 0
def __iter__(self):
"""Generate batch meta for training.
Returns:
batch_meta: e.g.: [
{'hdf5_path': string, 'index_in_hdf5': int},
...]
"""
batch_size = self.batch_size
while True:
batch_meta = []
i = 0
while i < batch_size:
index = self.indexes[self.pointer]
self.pointer += 1
# Shuffle indexes and reset pointer
if self.pointer >= self.audios_num:
self.pointer = 0
self.random_state.shuffle(self.indexes)
# If audio in black list then continue
if self.audio_names[index] in self.black_list_names:
continue
else:
batch_meta.append({
'hdf5_path': self.hdf5_paths[index],
'index_in_hdf5': self.indexes_in_hdf5[index]})
i += 1
yield batch_meta
def state_dict(self):
state = {
'indexes': self.indexes,
'pointer': self.pointer}
return state
def load_state_dict(self, state):
self.indexes = state['indexes']
self.pointer = state['pointer']
class BalancedTrainSampler(Base):
def __init__(self, indexes_hdf5_path, batch_size, black_list_csv=None,
random_seed=1234):
"""Balanced sampler. Generate batch meta for training. Data are equally
sampled from different sound classes.
Args:
indexes_hdf5_path: string
batch_size: int
black_list_csv: string
random_seed: int
"""
super(BalancedTrainSampler, self).__init__(indexes_hdf5_path,
batch_size, black_list_csv, random_seed)
self.samples_num_per_class = np.sum(self.targets, axis=0)
logging.info('samples_num_per_class: {}'.format(
self.samples_num_per_class.astype(np.int32)))
# Training indexes of all sound classes. E.g.:
# [[0, 11, 12, ...], [3, 4, 15, 16, ...], [7, 8, ...], ...]
self.indexes_per_class = []
for k in range(self.classes_num):
self.indexes_per_class.append(
np.where(self.targets[:, k] == 1)[0])
# Shuffle indexes
for k in range(self.classes_num):
self.random_state.shuffle(self.indexes_per_class[k])
self.queue = []
self.pointers_of_classes = [0] * self.classes_num
def expand_queue(self, queue):
classes_set = np.arange(self.classes_num).tolist()
self.random_state.shuffle(classes_set)
queue += classes_set
return queue
def __iter__(self):
"""Generate batch meta for training.
Returns:
batch_meta: e.g.: [
{'hdf5_path': string, 'index_in_hdf5': int},
...]
"""
batch_size = self.batch_size
while True:
batch_meta = []
i = 0
while i < batch_size:
if len(self.queue) == 0:
self.queue = self.expand_queue(self.queue)
class_id = self.queue.pop(0)
pointer = self.pointers_of_classes[class_id]
self.pointers_of_classes[class_id] += 1
index = self.indexes_per_class[class_id][pointer]
# When finish one epoch of a sound class, then shuffle its indexes and reset pointer
if self.pointers_of_classes[class_id] >= self.samples_num_per_class[class_id]:
self.pointers_of_classes[class_id] = 0
self.random_state.shuffle(self.indexes_per_class[class_id])
# If audio in black list then continue
if self.audio_names[index] in self.black_list_names:
continue
else:
batch_meta.append({
'hdf5_path': self.hdf5_paths[index],
'index_in_hdf5': self.indexes_in_hdf5[index]})
i += 1
yield batch_meta
def state_dict(self):
state = {
'indexes_per_class': self.indexes_per_class,
'queue': self.queue,
'pointers_of_classes': self.pointers_of_classes}
return state
def load_state_dict(self, state):
self.indexes_per_class = state['indexes_per_class']
self.queue = state['queue']
self.pointers_of_classes = state['pointers_of_classes']
class AlternateTrainSampler(Base):
def __init__(self, indexes_hdf5_path, batch_size, black_list_csv=None,
random_seed=1234):
"""AlternateSampler is a combination of Sampler and Balanced Sampler.
AlternateSampler alternately sample data from Sampler and Blanced Sampler.
Args:
indexes_hdf5_path: string
batch_size: int
black_list_csv: string
random_seed: int
"""
self.sampler1 = TrainSampler(indexes_hdf5_path, batch_size,
black_list_csv, random_seed)
self.sampler2 = BalancedTrainSampler(indexes_hdf5_path, batch_size,
black_list_csv, random_seed)
self.batch_size = batch_size
self.count = 0
def __iter__(self):
"""Generate batch meta for training.
Returns:
batch_meta: e.g.: [
{'hdf5_path': string, 'index_in_hdf5': int},
...]
"""
batch_size = self.batch_size
while True:
self.count += 1
if self.count % 2 == 0:
batch_meta = []
i = 0
while i < batch_size:
index = self.sampler1.indexes[self.sampler1.pointer]
self.sampler1.pointer += 1
# Shuffle indexes and reset pointer
if self.sampler1.pointer >= self.sampler1.audios_num:
self.sampler1.pointer = 0
self.sampler1.random_state.shuffle(self.sampler1.indexes)
# If audio in black list then continue
if self.sampler1.audio_names[index] in self.sampler1.black_list_names:
continue
else:
batch_meta.append({
'hdf5_path': self.sampler1.hdf5_paths[index],
'index_in_hdf5': self.sampler1.indexes_in_hdf5[index]})
i += 1
elif self.count % 2 == 1:
batch_meta = []
i = 0
while i < batch_size:
if len(self.sampler2.queue) == 0:
self.sampler2.queue = self.sampler2.expand_queue(self.sampler2.queue)
class_id = self.sampler2.queue.pop(0)
pointer = self.sampler2.pointers_of_classes[class_id]
self.sampler2.pointers_of_classes[class_id] += 1
index = self.sampler2.indexes_per_class[class_id][pointer]
# When finish one epoch of a sound class, then shuffle its indexes and reset pointer
if self.sampler2.pointers_of_classes[class_id] >= self.sampler2.samples_num_per_class[class_id]:
self.sampler2.pointers_of_classes[class_id] = 0
self.sampler2.random_state.shuffle(self.sampler2.indexes_per_class[class_id])
# If audio in black list then continue
if self.sampler2.audio_names[index] in self.sampler2.black_list_names:
continue
else:
batch_meta.append({
'hdf5_path': self.sampler2.hdf5_paths[index],
'index_in_hdf5': self.sampler2.indexes_in_hdf5[index]})
i += 1
yield batch_meta
def state_dict(self):
state = {
'sampler1': self.sampler1.state_dict(),
'sampler2': self.sampler2.state_dict()}
return state
def load_state_dict(self, state):
self.sampler1.load_state_dict(state['sampler1'])
self.sampler2.load_state_dict(state['sampler2'])
class EvaluateSampler(object):
def __init__(self, indexes_hdf5_path, batch_size):
"""Evaluate sampler. Generate batch meta for evaluation.
Args:
indexes_hdf5_path: string
batch_size: int
"""
self.batch_size = batch_size
with h5py.File(indexes_hdf5_path, 'r') as hf:
self.audio_names = [audio_name.decode() for audio_name in hf['audio_name'][:]]
self.hdf5_paths = [hdf5_path.decode() for hdf5_path in hf['hdf5_path'][:]]
self.indexes_in_hdf5 = hf['index_in_hdf5'][:]
self.targets = hf['target'][:].astype(np.float32)
self.audios_num = len(self.audio_names)
def __iter__(self):
"""Generate batch meta for training.
Returns:
batch_meta: e.g.: [
{'hdf5_path': string,
'index_in_hdf5': int}
...]
"""
batch_size = self.batch_size
pointer = 0
while pointer < self.audios_num:
batch_indexes = np.arange(pointer,
min(pointer + batch_size, self.audios_num))
batch_meta = []
for index in batch_indexes:
batch_meta.append({
'audio_name': self.audio_names[index],
'hdf5_path': self.hdf5_paths[index],
'index_in_hdf5': self.indexes_in_hdf5[index],
'target': self.targets[index]})
pointer += batch_size
yield batch_meta
def collate_fn(list_data_dict):
"""Collate data.
Args:
list_data_dict, e.g., [{'audio_name': str, 'waveform': (clip_samples,), ...},
{'audio_name': str, 'waveform': (clip_samples,), ...},
...]
Returns:
np_data_dict, dict, e.g.,
{'audio_name': (batch_size,), 'waveform': (batch_size, clip_samples), ...}
"""
np_data_dict = {}
for key in list_data_dict[0].keys():
np_data_dict[key] = np.array([data_dict[key] for data_dict in list_data_dict])
return np_data_dict | EXA-1-master | exa/models/AudioGPT/audio_detection/audio_infer/utils/data_generator.py |
import numpy as np
import time
import torch
import torch.nn as nn
def move_data_to_device(x, device):
if 'float' in str(x.dtype):
x = torch.Tensor(x)
elif 'int' in str(x.dtype):
x = torch.LongTensor(x)
else:
return x
return x.to(device)
def do_mixup(x, mixup_lambda):
"""Mixup x of even indexes (0, 2, 4, ...) with x of odd indexes
(1, 3, 5, ...).
Args:
x: (batch_size * 2, ...)
mixup_lambda: (batch_size * 2,)
Returns:
out: (batch_size, ...)
"""
out = (x[0 :: 2].transpose(0, -1) * mixup_lambda[0 :: 2] + \
x[1 :: 2].transpose(0, -1) * mixup_lambda[1 :: 2]).transpose(0, -1)
return out
def append_to_dict(dict, key, value):
if key in dict.keys():
dict[key].append(value)
else:
dict[key] = [value]
def forward(model, generator, return_input=False,
return_target=False):
"""Forward data to a model.
Args:
model: object
generator: object
return_input: bool
return_target: bool
Returns:
audio_name: (audios_num,)
clipwise_output: (audios_num, classes_num)
(ifexist) segmentwise_output: (audios_num, segments_num, classes_num)
(ifexist) framewise_output: (audios_num, frames_num, classes_num)
(optional) return_input: (audios_num, segment_samples)
(optional) return_target: (audios_num, classes_num)
"""
output_dict = {}
device = next(model.parameters()).device
time1 = time.time()
# Forward data to a model in mini-batches
for n, batch_data_dict in enumerate(generator):
print(n)
batch_waveform = move_data_to_device(batch_data_dict['waveform'], device)
with torch.no_grad():
model.eval()
batch_output = model(batch_waveform)
append_to_dict(output_dict, 'audio_name', batch_data_dict['audio_name'])
append_to_dict(output_dict, 'clipwise_output',
batch_output['clipwise_output'].data.cpu().numpy())
if 'segmentwise_output' in batch_output.keys():
append_to_dict(output_dict, 'segmentwise_output',
batch_output['segmentwise_output'].data.cpu().numpy())
if 'framewise_output' in batch_output.keys():
append_to_dict(output_dict, 'framewise_output',
batch_output['framewise_output'].data.cpu().numpy())
if return_input:
append_to_dict(output_dict, 'waveform', batch_data_dict['waveform'])
if return_target:
if 'target' in batch_data_dict.keys():
append_to_dict(output_dict, 'target', batch_data_dict['target'])
if n % 10 == 0:
print(' --- Inference time: {:.3f} s / 10 iterations ---'.format(
time.time() - time1))
time1 = time.time()
for key in output_dict.keys():
output_dict[key] = np.concatenate(output_dict[key], axis=0)
return output_dict
def interpolate(x, ratio):
"""Interpolate data in time domain. This is used to compensate the
resolution reduction in downsampling of a CNN.
Args:
x: (batch_size, time_steps, classes_num)
ratio: int, ratio to interpolate
Returns:
upsampled: (batch_size, time_steps * ratio, classes_num)
"""
(batch_size, time_steps, classes_num) = x.shape
upsampled = x[:, :, None, :].repeat(1, 1, ratio, 1)
upsampled = upsampled.reshape(batch_size, time_steps * ratio, classes_num)
return upsampled
def pad_framewise_output(framewise_output, frames_num):
"""Pad framewise_output to the same length as input frames. The pad value
is the same as the value of the last frame.
Args:
framewise_output: (batch_size, frames_num, classes_num)
frames_num: int, number of frames to pad
Outputs:
output: (batch_size, frames_num, classes_num)
"""
pad = framewise_output[:, -1 :, :].repeat(1, frames_num - framewise_output.shape[1], 1)
"""tensor for padding"""
output = torch.cat((framewise_output, pad), dim=1)
"""(batch_size, frames_num, classes_num)"""
return output
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def count_flops(model, audio_length):
"""Count flops. Code modified from others' implementation.
"""
multiply_adds = True
list_conv2d=[]
def conv2d_hook(self, input, output):
batch_size, input_channels, input_height, input_width = input[0].size()
output_channels, output_height, output_width = output[0].size()
kernel_ops = self.kernel_size[0] * self.kernel_size[1] * (self.in_channels / self.groups) * (2 if multiply_adds else 1)
bias_ops = 1 if self.bias is not None else 0
params = output_channels * (kernel_ops + bias_ops)
flops = batch_size * params * output_height * output_width
list_conv2d.append(flops)
list_conv1d=[]
def conv1d_hook(self, input, output):
batch_size, input_channels, input_length = input[0].size()
output_channels, output_length = output[0].size()
kernel_ops = self.kernel_size[0] * (self.in_channels / self.groups) * (2 if multiply_adds else 1)
bias_ops = 1 if self.bias is not None else 0
params = output_channels * (kernel_ops + bias_ops)
flops = batch_size * params * output_length
list_conv1d.append(flops)
list_linear=[]
def linear_hook(self, input, output):
batch_size = input[0].size(0) if input[0].dim() == 2 else 1
weight_ops = self.weight.nelement() * (2 if multiply_adds else 1)
bias_ops = self.bias.nelement()
flops = batch_size * (weight_ops + bias_ops)
list_linear.append(flops)
list_bn=[]
def bn_hook(self, input, output):
list_bn.append(input[0].nelement() * 2)
list_relu=[]
def relu_hook(self, input, output):
list_relu.append(input[0].nelement() * 2)
list_pooling2d=[]
def pooling2d_hook(self, input, output):
batch_size, input_channels, input_height, input_width = input[0].size()
output_channels, output_height, output_width = output[0].size()
kernel_ops = self.kernel_size * self.kernel_size
bias_ops = 0
params = output_channels * (kernel_ops + bias_ops)
flops = batch_size * params * output_height * output_width
list_pooling2d.append(flops)
list_pooling1d=[]
def pooling1d_hook(self, input, output):
batch_size, input_channels, input_length = input[0].size()
output_channels, output_length = output[0].size()
kernel_ops = self.kernel_size[0]
bias_ops = 0
params = output_channels * (kernel_ops + bias_ops)
flops = batch_size * params * output_length
list_pooling2d.append(flops)
def foo(net):
childrens = list(net.children())
if not childrens:
if isinstance(net, nn.Conv2d):
net.register_forward_hook(conv2d_hook)
elif isinstance(net, nn.Conv1d):
net.register_forward_hook(conv1d_hook)
elif isinstance(net, nn.Linear):
net.register_forward_hook(linear_hook)
elif isinstance(net, nn.BatchNorm2d) or isinstance(net, nn.BatchNorm1d):
net.register_forward_hook(bn_hook)
elif isinstance(net, nn.ReLU):
net.register_forward_hook(relu_hook)
elif isinstance(net, nn.AvgPool2d) or isinstance(net, nn.MaxPool2d):
net.register_forward_hook(pooling2d_hook)
elif isinstance(net, nn.AvgPool1d) or isinstance(net, nn.MaxPool1d):
net.register_forward_hook(pooling1d_hook)
else:
print('Warning: flop of module {} is not counted!'.format(net))
return
for c in childrens:
foo(c)
# Register hook
foo(model)
device = device = next(model.parameters()).device
input = torch.rand(1, audio_length).to(device)
out = model(input)
total_flops = sum(list_conv2d) + sum(list_conv1d) + sum(list_linear) + \
sum(list_bn) + sum(list_relu) + sum(list_pooling2d) + sum(list_pooling1d)
return total_flops | EXA-1-master | exa/models/AudioGPT/audio_detection/audio_infer/pytorch/pytorch_utils.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchlibrosa.stft import Spectrogram, LogmelFilterBank
from torchlibrosa.augmentation import SpecAugmentation
from audio_infer.pytorch.pytorch_utils import do_mixup, interpolate, pad_framewise_output
import os
import sys
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torchlibrosa.stft import Spectrogram, LogmelFilterBank
from torchlibrosa.augmentation import SpecAugmentation
from audio_infer.pytorch.pytorch_utils import do_mixup
import torch.utils.checkpoint as checkpoint
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
import warnings
from functools import partial
#from mmdet.models.builder import BACKBONES
from mmdet.utils import get_root_logger
from mmcv.runner import load_checkpoint
os.environ['TORCH_HOME'] = '../pretrained_models'
from copy import deepcopy
from timm.models.helpers import load_pretrained
from torch.cuda.amp import autocast
from collections import OrderedDict
import io
import re
from mmcv.runner import _load_checkpoint, load_state_dict
import mmcv.runner
import copy
import random
from einops import rearrange
from einops.layers.torch import Rearrange, Reduce
from torch import nn, einsum
def load_checkpoint(model,
filename,
map_location=None,
strict=False,
logger=None,
revise_keys=[(r'^module\.', '')]):
"""Load checkpoint from a file or URI.
Args:
model (Module): Module to load checkpoint.
filename (str): Accept local filepath, URL, ``torchvision://xxx``,
``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for
details.
map_location (str): Same as :func:`torch.load`.
strict (bool): Whether to allow different params for the model and
checkpoint.
logger (:mod:`logging.Logger` or None): The logger for error message.
revise_keys (list): A list of customized keywords to modify the
state_dict in checkpoint. Each item is a (pattern, replacement)
pair of the regular expression operations. Default: strip
the prefix 'module.' by [(r'^module\\.', '')].
Returns:
dict or OrderedDict: The loaded checkpoint.
"""
checkpoint = _load_checkpoint(filename, map_location, logger)
new_proj = torch.nn.Conv2d(1, 64, kernel_size=(7, 7), stride=(4, 4), padding=(2, 2))
new_proj.weight = torch.nn.Parameter(torch.sum(checkpoint['patch_embed1.proj.weight'], dim=1).unsqueeze(1))
checkpoint['patch_embed1.proj.weight'] = new_proj.weight
# OrderedDict is a subclass of dict
if not isinstance(checkpoint, dict):
raise RuntimeError(
f'No state_dict found in checkpoint file {filename}')
# get state_dict from checkpoint
if 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
else:
state_dict = checkpoint
# strip prefix of state_dict
metadata = getattr(state_dict, '_metadata', OrderedDict())
for p, r in revise_keys:
state_dict = OrderedDict(
{re.sub(p, r, k): v
for k, v in state_dict.items()})
state_dict = OrderedDict({k.replace('backbone.',''):v for k,v in state_dict.items()})
# Keep metadata in state_dict
state_dict._metadata = metadata
# load state_dict
load_state_dict(model, state_dict, strict, logger)
return checkpoint
def init_layer(layer):
"""Initialize a Linear or Convolutional layer. """
nn.init.xavier_uniform_(layer.weight)
if hasattr(layer, 'bias'):
if layer.bias is not None:
layer.bias.data.fill_(0.)
def init_bn(bn):
"""Initialize a Batchnorm layer. """
bn.bias.data.fill_(0.)
bn.weight.data.fill_(1.)
class TimeShift(nn.Module):
def __init__(self, mean, std):
super().__init__()
self.mean = mean
self.std = std
def forward(self, x):
if self.training:
shift = torch.empty(1).normal_(self.mean, self.std).int().item()
x = torch.roll(x, shift, dims=2)
return x
class LinearSoftPool(nn.Module):
"""LinearSoftPool
Linear softmax, takes logits and returns a probability, near to the actual maximum value.
Taken from the paper:
A Comparison of Five Multiple Instance Learning Pooling Functions for Sound Event Detection with Weak Labeling
https://arxiv.org/abs/1810.09050
"""
def __init__(self, pooldim=1):
super().__init__()
self.pooldim = pooldim
def forward(self, logits, time_decision):
return (time_decision**2).sum(self.pooldim) / time_decision.sum(
self.pooldim)
class PVT(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, classes_num):
super(PVT, self).__init__()
window = 'hann'
center = True
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
# Spectrogram extractor
self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
win_length=window_size, window=window, center=center, pad_mode=pad_mode,
freeze_parameters=True)
# Logmel feature extractor
self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin, top_db=top_db,
freeze_parameters=True)
self.time_shift = TimeShift(0, 10)
# Spec augmenter
self.spec_augmenter = SpecAugmentation(time_drop_width=64, time_stripes_num=2,
freq_drop_width=8, freq_stripes_num=2)
self.bn0 = nn.BatchNorm2d(64)
self.pvt_transformer = PyramidVisionTransformerV2(tdim=1001,
fdim=64,
patch_size=7,
stride=4,
in_chans=1,
num_classes=classes_num,
embed_dims=[64, 128, 320, 512],
depths=[3, 4, 6, 3],
num_heads=[1, 2, 5, 8],
mlp_ratios=[8, 8, 4, 4],
qkv_bias=True,
qk_scale=None,
drop_rate=0.0,
drop_path_rate=0.1,
sr_ratios=[8, 4, 2, 1],
norm_layer=partial(nn.LayerNorm, eps=1e-6),
num_stages=4,
#pretrained='https://github.com/whai362/PVT/releases/download/v2/pvt_v2_b2.pth'
)
#self.temp_pool = LinearSoftPool()
self.avgpool = nn.AdaptiveAvgPool1d(1)
self.fc_audioset = nn.Linear(512, classes_num, bias=True)
self.init_weights()
def init_weights(self):
init_bn(self.bn0)
init_layer(self.fc_audioset)
def forward(self, input, mixup_lambda=None):
"""Input: (batch_size, times_steps, freq_bins)"""
interpolate_ratio = 32
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
frames_num = x.shape[2]
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
if self.training:
x = self.time_shift(x)
x = self.spec_augmenter(x)
# Mixup on spectrogram
if self.training and mixup_lambda is not None:
x = do_mixup(x, mixup_lambda)
#print(x.shape) #torch.Size([10, 1, 1001, 64])
x = self.pvt_transformer(x)
#print(x.shape) #torch.Size([10, 800, 128])
x = torch.mean(x, dim=3)
x = x.transpose(1, 2).contiguous()
framewise_output = torch.sigmoid(self.fc_audioset(x))
#clipwise_output = torch.mean(framewise_output, dim=1)
#clipwise_output = self.temp_pool(x, framewise_output).clamp(1e-7, 1.).squeeze(1)
x = framewise_output.transpose(1, 2).contiguous()
x = self.avgpool(x)
clipwise_output = torch.flatten(x, 1)
#print(framewise_output.shape) #torch.Size([10, 100, 17])
framewise_output = interpolate(framewise_output, interpolate_ratio)
#framewise_output = framewise_output[:,:1000,:]
#framewise_output = pad_framewise_output(framewise_output, frames_num)
output_dict = {'framewise_output': framewise_output,
'clipwise_output': clipwise_output}
return output_dict
class PVT2(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, classes_num):
super(PVT2, self).__init__()
window = 'hann'
center = True
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
# Spectrogram extractor
self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
win_length=window_size, window=window, center=center, pad_mode=pad_mode,
freeze_parameters=True)
# Logmel feature extractor
self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin, top_db=top_db,
freeze_parameters=True)
self.time_shift = TimeShift(0, 10)
# Spec augmenter
self.spec_augmenter = SpecAugmentation(time_drop_width=64, time_stripes_num=2,
freq_drop_width=8, freq_stripes_num=2)
self.bn0 = nn.BatchNorm2d(64)
self.pvt_transformer = PyramidVisionTransformerV2(tdim=1001,
fdim=64,
patch_size=7,
stride=4,
in_chans=1,
num_classes=classes_num,
embed_dims=[64, 128, 320, 512],
depths=[3, 4, 6, 3],
num_heads=[1, 2, 5, 8],
mlp_ratios=[8, 8, 4, 4],
qkv_bias=True,
qk_scale=None,
drop_rate=0.0,
drop_path_rate=0.1,
sr_ratios=[8, 4, 2, 1],
norm_layer=partial(nn.LayerNorm, eps=1e-6),
num_stages=4,
pretrained='https://github.com/whai362/PVT/releases/download/v2/pvt_v2_b2.pth'
)
#self.temp_pool = LinearSoftPool()
self.fc_audioset = nn.Linear(512, classes_num, bias=True)
self.init_weights()
def init_weights(self):
init_bn(self.bn0)
init_layer(self.fc_audioset)
def forward(self, input, mixup_lambda=None):
"""Input: (batch_size, times_steps, freq_bins)"""
interpolate_ratio = 32
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
frames_num = x.shape[2]
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
if self.training:
#x = self.time_shift(x)
x = self.spec_augmenter(x)
# Mixup on spectrogram
if self.training and mixup_lambda is not None:
x = do_mixup(x, mixup_lambda)
#print(x.shape) #torch.Size([10, 1, 1001, 64])
x = self.pvt_transformer(x)
#print(x.shape) #torch.Size([10, 800, 128])
x = torch.mean(x, dim=3)
x = x.transpose(1, 2).contiguous()
framewise_output = torch.sigmoid(self.fc_audioset(x))
clipwise_output = torch.mean(framewise_output, dim=1)
#clipwise_output = self.temp_pool(x, framewise_output).clamp(1e-7, 1.).squeeze(1)
#print(framewise_output.shape) #torch.Size([10, 100, 17])
framewise_output = interpolate(framewise_output, interpolate_ratio)
#framewise_output = framewise_output[:,:1000,:]
#framewise_output = pad_framewise_output(framewise_output, frames_num)
output_dict = {'framewise_output': framewise_output,
'clipwise_output': clipwise_output}
return output_dict
class PVT_2layer(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, classes_num):
super(PVT_2layer, self).__init__()
window = 'hann'
center = True
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
# Spectrogram extractor
self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
win_length=window_size, window=window, center=center, pad_mode=pad_mode,
freeze_parameters=True)
# Logmel feature extractor
self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin, top_db=top_db,
freeze_parameters=True)
self.time_shift = TimeShift(0, 10)
# Spec augmenter
self.spec_augmenter = SpecAugmentation(time_drop_width=64, time_stripes_num=2,
freq_drop_width=8, freq_stripes_num=2)
self.bn0 = nn.BatchNorm2d(64)
self.pvt_transformer = PyramidVisionTransformerV2(tdim=1001,
fdim=64,
patch_size=7,
stride=4,
in_chans=1,
num_classes=classes_num,
embed_dims=[64, 128],
depths=[3, 4],
num_heads=[1, 2],
mlp_ratios=[8, 8],
qkv_bias=True,
qk_scale=None,
drop_rate=0.0,
drop_path_rate=0.1,
sr_ratios=[8, 4],
norm_layer=partial(nn.LayerNorm, eps=1e-6),
num_stages=2,
pretrained='https://github.com/whai362/PVT/releases/download/v2/pvt_v2_b2.pth'
)
#self.temp_pool = LinearSoftPool()
self.avgpool = nn.AdaptiveAvgPool1d(1)
self.fc_audioset = nn.Linear(128, classes_num, bias=True)
self.init_weights()
def init_weights(self):
init_bn(self.bn0)
init_layer(self.fc_audioset)
def forward(self, input, mixup_lambda=None):
"""Input: (batch_size, times_steps, freq_bins)"""
interpolate_ratio = 8
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
frames_num = x.shape[2]
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
if self.training:
x = self.time_shift(x)
x = self.spec_augmenter(x)
# Mixup on spectrogram
if self.training and mixup_lambda is not None:
x = do_mixup(x, mixup_lambda)
#print(x.shape) #torch.Size([10, 1, 1001, 64])
x = self.pvt_transformer(x)
#print(x.shape) #torch.Size([10, 800, 128])
x = torch.mean(x, dim=3)
x = x.transpose(1, 2).contiguous()
framewise_output = torch.sigmoid(self.fc_audioset(x))
#clipwise_output = torch.mean(framewise_output, dim=1)
#clipwise_output = self.temp_pool(x, framewise_output).clamp(1e-7, 1.).squeeze(1)
x = framewise_output.transpose(1, 2).contiguous()
x = self.avgpool(x)
clipwise_output = torch.flatten(x, 1)
#print(framewise_output.shape) #torch.Size([10, 100, 17])
framewise_output = interpolate(framewise_output, interpolate_ratio)
#framewise_output = framewise_output[:,:1000,:]
#framewise_output = pad_framewise_output(framewise_output, frames_num)
output_dict = {'framewise_output': framewise_output,
'clipwise_output': clipwise_output}
return output_dict
class PVT_lr(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, classes_num):
super(PVT_lr, self).__init__()
window = 'hann'
center = True
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
# Spectrogram extractor
self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
win_length=window_size, window=window, center=center, pad_mode=pad_mode,
freeze_parameters=True)
# Logmel feature extractor
self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin, top_db=top_db,
freeze_parameters=True)
self.time_shift = TimeShift(0, 10)
# Spec augmenter
self.spec_augmenter = SpecAugmentation(time_drop_width=64, time_stripes_num=2,
freq_drop_width=8, freq_stripes_num=2)
self.bn0 = nn.BatchNorm2d(64)
self.pvt_transformer = PyramidVisionTransformerV2(tdim=1001,
fdim=64,
patch_size=7,
stride=4,
in_chans=1,
num_classes=classes_num,
embed_dims=[64, 128, 320, 512],
depths=[3, 4, 6, 3],
num_heads=[1, 2, 5, 8],
mlp_ratios=[8, 8, 4, 4],
qkv_bias=True,
qk_scale=None,
drop_rate=0.0,
drop_path_rate=0.1,
sr_ratios=[8, 4, 2, 1],
norm_layer=partial(nn.LayerNorm, eps=1e-6),
num_stages=4,
pretrained='https://github.com/whai362/PVT/releases/download/v2/pvt_v2_b2.pth'
)
self.temp_pool = LinearSoftPool()
self.fc_audioset = nn.Linear(512, classes_num, bias=True)
self.init_weights()
def init_weights(self):
init_bn(self.bn0)
init_layer(self.fc_audioset)
def forward(self, input, mixup_lambda=None):
"""Input: (batch_size, times_steps, freq_bins)"""
interpolate_ratio = 32
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
frames_num = x.shape[2]
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
if self.training:
x = self.time_shift(x)
x = self.spec_augmenter(x)
# Mixup on spectrogram
if self.training and mixup_lambda is not None:
x = do_mixup(x, mixup_lambda)
#print(x.shape) #torch.Size([10, 1, 1001, 64])
x = self.pvt_transformer(x)
#print(x.shape) #torch.Size([10, 800, 128])
x = torch.mean(x, dim=3)
x = x.transpose(1, 2).contiguous()
framewise_output = torch.sigmoid(self.fc_audioset(x))
clipwise_output = self.temp_pool(x, framewise_output).clamp(1e-7, 1.).squeeze(1)
#print(framewise_output.shape) #torch.Size([10, 100, 17])
framewise_output = interpolate(framewise_output, interpolate_ratio)
#framewise_output = framewise_output[:,:1000,:]
#framewise_output = pad_framewise_output(framewise_output, frames_num)
output_dict = {'framewise_output': framewise_output,
'clipwise_output': clipwise_output}
return output_dict
class PVT_nopretrain(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, classes_num):
super(PVT_nopretrain, self).__init__()
window = 'hann'
center = True
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
# Spectrogram extractor
self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
win_length=window_size, window=window, center=center, pad_mode=pad_mode,
freeze_parameters=True)
# Logmel feature extractor
self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin, top_db=top_db,
freeze_parameters=True)
self.time_shift = TimeShift(0, 10)
# Spec augmenter
self.spec_augmenter = SpecAugmentation(time_drop_width=64, time_stripes_num=2,
freq_drop_width=8, freq_stripes_num=2)
self.bn0 = nn.BatchNorm2d(64)
self.pvt_transformer = PyramidVisionTransformerV2(tdim=1001,
fdim=64,
patch_size=7,
stride=4,
in_chans=1,
num_classes=classes_num,
embed_dims=[64, 128, 320, 512],
depths=[3, 4, 6, 3],
num_heads=[1, 2, 5, 8],
mlp_ratios=[8, 8, 4, 4],
qkv_bias=True,
qk_scale=None,
drop_rate=0.0,
drop_path_rate=0.1,
sr_ratios=[8, 4, 2, 1],
norm_layer=partial(nn.LayerNorm, eps=1e-6),
num_stages=4,
#pretrained='https://github.com/whai362/PVT/releases/download/v2/pvt_v2_b2.pth'
)
self.temp_pool = LinearSoftPool()
self.fc_audioset = nn.Linear(512, classes_num, bias=True)
self.init_weights()
def init_weights(self):
init_bn(self.bn0)
init_layer(self.fc_audioset)
def forward(self, input, mixup_lambda=None):
"""Input: (batch_size, times_steps, freq_bins)"""
interpolate_ratio = 32
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
frames_num = x.shape[2]
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
if self.training:
x = self.time_shift(x)
x = self.spec_augmenter(x)
# Mixup on spectrogram
if self.training and mixup_lambda is not None:
x = do_mixup(x, mixup_lambda)
#print(x.shape) #torch.Size([10, 1, 1001, 64])
x = self.pvt_transformer(x)
#print(x.shape) #torch.Size([10, 800, 128])
x = torch.mean(x, dim=3)
x = x.transpose(1, 2).contiguous()
framewise_output = torch.sigmoid(self.fc_audioset(x))
clipwise_output = self.temp_pool(x, framewise_output).clamp(1e-7, 1.).squeeze(1)
#print(framewise_output.shape) #torch.Size([10, 100, 17])
framewise_output = interpolate(framewise_output, interpolate_ratio)
framewise_output = framewise_output[:,:1000,:]
#framewise_output = pad_framewise_output(framewise_output, frames_num)
output_dict = {'framewise_output': framewise_output,
'clipwise_output': clipwise_output}
return output_dict
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0., linear=False):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.dwconv = DWConv(hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
self.linear = linear
if self.linear:
self.relu = nn.ReLU()
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x, H, W):
x = self.fc1(x)
if self.linear:
x = self.relu(x)
x = self.dwconv(x, H, W)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., sr_ratio=1, linear=False):
super().__init__()
assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}."
self.dim = dim
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = qk_scale or head_dim ** -0.5
self.q = nn.Linear(dim, dim, bias=qkv_bias)
self.kv = nn.Linear(dim, dim * 2, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
self.linear = linear
self.sr_ratio = sr_ratio
if not linear:
if sr_ratio > 1:
self.sr = nn.Conv2d(dim, dim, kernel_size=sr_ratio, stride=sr_ratio)
self.norm = nn.LayerNorm(dim)
else:
self.pool = nn.AdaptiveAvgPool2d(7)
self.sr = nn.Conv2d(dim, dim, kernel_size=1, stride=1)
self.norm = nn.LayerNorm(dim)
self.act = nn.GELU()
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x, H, W):
B, N, C = x.shape
q = self.q(x).reshape(B, N, self.num_heads, C // self.num_heads).permute(0, 2, 1, 3)
if not self.linear:
if self.sr_ratio > 1:
x_ = x.permute(0, 2, 1).reshape(B, C, H, W)
x_ = self.sr(x_).reshape(B, C, -1).permute(0, 2, 1)
x_ = self.norm(x_)
kv = self.kv(x_).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
else:
kv = self.kv(x).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
else:
x_ = x.permute(0, 2, 1).reshape(B, C, H, W)
x_ = self.sr(self.pool(x_)).reshape(B, C, -1).permute(0, 2, 1)
x_ = self.norm(x_)
x_ = self.act(x_)
kv = self.kv(x_).reshape(B, -1, 2, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
k, v = kv[0], kv[1]
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Pooling(nn.Module):
"""
Implementation of pooling for PoolFormer
--pool_size: pooling size
"""
def __init__(self, pool_size=3):
super().__init__()
self.pool = nn.AvgPool2d(
pool_size, stride=1, padding=pool_size//2, count_include_pad=False)
def forward(self, x):
return self.pool(x) - x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., act_layer=nn.GELU, norm_layer=nn.LayerNorm, sr_ratio=1, linear=False):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim,
num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
attn_drop=attn_drop, proj_drop=drop, sr_ratio=sr_ratio, linear=linear)
#self.norm3 = norm_layer(dim)
#self.token_mixer = Pooling(pool_size=3)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop, linear=linear)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x, H, W):
x = x + self.drop_path(self.attn(self.norm1(x), H, W))
x = x + self.drop_path(self.mlp(self.norm2(x), H, W))
return x
class OverlapPatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, tdim, fdim, patch_size=7, stride=4, in_chans=3, embed_dim=768):
super().__init__()
img_size = (tdim, fdim)
patch_size = to_2tuple(patch_size)
self.img_size = img_size
self.patch_size = patch_size
self.H, self.W = img_size[0] // stride, img_size[1] // stride
self.num_patches = self.H * self.W
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=stride,
padding=(patch_size[0] // 3, patch_size[1] // 3))
self.norm = nn.LayerNorm(embed_dim)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
x = self.proj(x)
_, _, H, W = x.shape
x = x.flatten(2).transpose(1, 2)
x = self.norm(x)
return x, H, W
class PyramidVisionTransformerV2(nn.Module):
def __init__(self, tdim=1001, fdim=64, patch_size=16, stride=4, in_chans=3, num_classes=1000, embed_dims=[64, 128, 256, 512],
num_heads=[1, 2, 4, 8], mlp_ratios=[4, 4, 4, 4], qkv_bias=False, qk_scale=None, drop_rate=0.,
attn_drop_rate=0., drop_path_rate=0.1, norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=[3, 4, 6, 3],
sr_ratios=[8, 4, 2, 1], num_stages=2, linear=False, pretrained=None):
super().__init__()
# self.num_classes = num_classes
self.depths = depths
self.num_stages = num_stages
self.linear = linear
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
cur = 0
for i in range(num_stages):
patch_embed = OverlapPatchEmbed(tdim=tdim if i == 0 else tdim // (2 ** (i + 1)),
fdim=fdim if i == 0 else tdim // (2 ** (i + 1)),
patch_size=7 if i == 0 else 3,
stride=stride if i == 0 else 2,
in_chans=in_chans if i == 0 else embed_dims[i - 1],
embed_dim=embed_dims[i])
block = nn.ModuleList([Block(
dim=embed_dims[i], num_heads=num_heads[i], mlp_ratio=mlp_ratios[i], qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[cur + j], norm_layer=norm_layer,
sr_ratio=sr_ratios[i], linear=linear)
for j in range(depths[i])])
norm = norm_layer(embed_dims[i])
cur += depths[i]
setattr(self, f"patch_embed{i + 1}", patch_embed)
setattr(self, f"block{i + 1}", block)
setattr(self, f"norm{i + 1}", norm)
#self.n = nn.Linear(125, 250, bias=True)
# classification head
# self.head = nn.Linear(embed_dims[3], num_classes) if num_classes > 0 else nn.Identity()
self.apply(self._init_weights)
self.init_weights(pretrained)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
fan_out = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
fan_out //= m.groups
m.weight.data.normal_(0, math.sqrt(2.0 / fan_out))
if m.bias is not None:
m.bias.data.zero_()
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, map_location='cpu', strict=False, logger=logger)
def freeze_patch_emb(self):
self.patch_embed1.requires_grad = False
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed1', 'pos_embed2', 'pos_embed3', 'pos_embed4', 'cls_token'} # has pos_embed may be better
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
B = x.shape[0]
for i in range(self.num_stages):
patch_embed = getattr(self, f"patch_embed{i + 1}")
block = getattr(self, f"block{i + 1}")
norm = getattr(self, f"norm{i + 1}")
x, H, W = patch_embed(x)
#print(x.shape)
for blk in block:
x = blk(x, H, W)
#print(x.shape)
x = norm(x)
#if i != self.num_stages - 1:
x = x.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()
#print(x.shape)
return x
def forward(self, x):
x = self.forward_features(x)
# x = self.head(x)
return x
class DWConv(nn.Module):
def __init__(self, dim=768):
super(DWConv, self).__init__()
self.dwconv = nn.Conv2d(dim, dim, 3, 1, 1, bias=True, groups=dim)
def forward(self, x, H, W):
B, N, C = x.shape
x = x.transpose(1, 2).view(B, C, H, W)
x = self.dwconv(x)
x = x.flatten(2).transpose(1, 2)
return x
def _conv_filter(state_dict, patch_size=16):
""" convert patch embedding weight from manual patchify + linear proj to conv"""
out_dict = {}
for k, v in state_dict.items():
if 'patch_embed.proj.weight' in k:
v = v.reshape((v.shape[0], 3, patch_size, patch_size))
out_dict[k] = v
return out_dict
| EXA-1-master | exa/models/AudioGPT/audio_detection/audio_infer/pytorch/models.py |
import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], '../utils'))
import numpy as np
import argparse
import librosa
import matplotlib.pyplot as plt
import torch
from utilities import create_folder, get_filename
from models import *
from pytorch_utils import move_data_to_device
import config
def audio_tagging(args):
"""Inference audio tagging result of an audio clip.
"""
# Arugments & parameters
sample_rate = args.sample_rate
window_size = args.window_size
hop_size = args.hop_size
mel_bins = args.mel_bins
fmin = args.fmin
fmax = args.fmax
model_type = args.model_type
checkpoint_path = args.checkpoint_path
audio_path = args.audio_path
device = torch.device('cuda') if args.cuda and torch.cuda.is_available() else torch.device('cpu')
classes_num = config.classes_num
labels = config.labels
# Model
Model = eval(model_type)
model = Model(sample_rate=sample_rate, window_size=window_size,
hop_size=hop_size, mel_bins=mel_bins, fmin=fmin, fmax=fmax,
classes_num=classes_num)
checkpoint = torch.load(checkpoint_path, map_location=device)
model.load_state_dict(checkpoint['model'])
# Parallel
if 'cuda' in str(device):
model.to(device)
print('GPU number: {}'.format(torch.cuda.device_count()))
model = torch.nn.DataParallel(model)
else:
print('Using CPU.')
# Load audio
(waveform, _) = librosa.core.load(audio_path, sr=sample_rate, mono=True)
waveform = waveform[None, :] # (1, audio_length)
waveform = move_data_to_device(waveform, device)
# Forward
with torch.no_grad():
model.eval()
batch_output_dict = model(waveform, None)
clipwise_output = batch_output_dict['clipwise_output'].data.cpu().numpy()[0]
"""(classes_num,)"""
sorted_indexes = np.argsort(clipwise_output)[::-1]
# Print audio tagging top probabilities
for k in range(10):
print('{}: {:.3f}'.format(np.array(labels)[sorted_indexes[k]],
clipwise_output[sorted_indexes[k]]))
# Print embedding
if 'embedding' in batch_output_dict.keys():
embedding = batch_output_dict['embedding'].data.cpu().numpy()[0]
print('embedding: {}'.format(embedding.shape))
return clipwise_output, labels
def sound_event_detection(args):
"""Inference sound event detection result of an audio clip.
"""
# Arugments & parameters
sample_rate = args.sample_rate
window_size = args.window_size
hop_size = args.hop_size
mel_bins = args.mel_bins
fmin = args.fmin
fmax = args.fmax
model_type = args.model_type
checkpoint_path = args.checkpoint_path
audio_path = args.audio_path
device = torch.device('cuda') if args.cuda and torch.cuda.is_available() else torch.device('cpu')
classes_num = config.classes_num
labels = config.labels
frames_per_second = sample_rate // hop_size
# Paths
fig_path = os.path.join('results', '{}.png'.format(get_filename(audio_path)))
create_folder(os.path.dirname(fig_path))
# Model
Model = eval(model_type)
model = Model(sample_rate=sample_rate, window_size=window_size,
hop_size=hop_size, mel_bins=mel_bins, fmin=fmin, fmax=fmax,
classes_num=classes_num)
checkpoint = torch.load(checkpoint_path, map_location=device)
model.load_state_dict(checkpoint['model'])
# Parallel
print('GPU number: {}'.format(torch.cuda.device_count()))
model = torch.nn.DataParallel(model)
if 'cuda' in str(device):
model.to(device)
# Load audio
(waveform, _) = librosa.core.load(audio_path, sr=sample_rate, mono=True)
waveform = waveform[None, :] # (1, audio_length)
waveform = move_data_to_device(waveform, device)
# Forward
with torch.no_grad():
model.eval()
batch_output_dict = model(waveform, None)
framewise_output = batch_output_dict['framewise_output'].data.cpu().numpy()[0]
"""(time_steps, classes_num)"""
print('Sound event detection result (time_steps x classes_num): {}'.format(
framewise_output.shape))
sorted_indexes = np.argsort(np.max(framewise_output, axis=0))[::-1]
top_k = 10 # Show top results
top_result_mat = framewise_output[:, sorted_indexes[0 : top_k]]
"""(time_steps, top_k)"""
# Plot result
stft = librosa.core.stft(y=waveform[0].data.cpu().numpy(), n_fft=window_size,
hop_length=hop_size, window='hann', center=True)
frames_num = stft.shape[-1]
fig, axs = plt.subplots(2, 1, sharex=True, figsize=(10, 4))
axs[0].matshow(np.log(np.abs(stft)), origin='lower', aspect='auto', cmap='jet')
axs[0].set_ylabel('Frequency bins')
axs[0].set_title('Log spectrogram')
axs[1].matshow(top_result_mat.T, origin='upper', aspect='auto', cmap='jet', vmin=0, vmax=1)
axs[1].xaxis.set_ticks(np.arange(0, frames_num, frames_per_second))
axs[1].xaxis.set_ticklabels(np.arange(0, frames_num / frames_per_second))
axs[1].yaxis.set_ticks(np.arange(0, top_k))
axs[1].yaxis.set_ticklabels(np.array(labels)[sorted_indexes[0 : top_k]])
axs[1].yaxis.grid(color='k', linestyle='solid', linewidth=0.3, alpha=0.3)
axs[1].set_xlabel('Seconds')
axs[1].xaxis.set_ticks_position('bottom')
plt.tight_layout()
plt.savefig(fig_path)
print('Save sound event detection visualization to {}'.format(fig_path))
return framewise_output, labels
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Example of parser. ')
subparsers = parser.add_subparsers(dest='mode')
parser_at = subparsers.add_parser('audio_tagging')
parser_at.add_argument('--sample_rate', type=int, default=32000)
parser_at.add_argument('--window_size', type=int, default=1024)
parser_at.add_argument('--hop_size', type=int, default=320)
parser_at.add_argument('--mel_bins', type=int, default=64)
parser_at.add_argument('--fmin', type=int, default=50)
parser_at.add_argument('--fmax', type=int, default=14000)
parser_at.add_argument('--model_type', type=str, required=True)
parser_at.add_argument('--checkpoint_path', type=str, required=True)
parser_at.add_argument('--audio_path', type=str, required=True)
parser_at.add_argument('--cuda', action='store_true', default=False)
parser_sed = subparsers.add_parser('sound_event_detection')
parser_sed.add_argument('--sample_rate', type=int, default=32000)
parser_sed.add_argument('--window_size', type=int, default=1024)
parser_sed.add_argument('--hop_size', type=int, default=320)
parser_sed.add_argument('--mel_bins', type=int, default=64)
parser_sed.add_argument('--fmin', type=int, default=50)
parser_sed.add_argument('--fmax', type=int, default=14000)
parser_sed.add_argument('--model_type', type=str, required=True)
parser_sed.add_argument('--checkpoint_path', type=str, required=True)
parser_sed.add_argument('--audio_path', type=str, required=True)
parser_sed.add_argument('--cuda', action='store_true', default=False)
args = parser.parse_args()
if args.mode == 'audio_tagging':
audio_tagging(args)
elif args.mode == 'sound_event_detection':
sound_event_detection(args)
else:
raise Exception('Error argument!') | EXA-1-master | exa/models/AudioGPT/audio_detection/audio_infer/pytorch/inference.py |
from sklearn import metrics
from pytorch_utils import forward
class Evaluator(object):
def __init__(self, model):
"""Evaluator.
Args:
model: object
"""
self.model = model
def evaluate(self, data_loader):
"""Forward evaluation data and calculate statistics.
Args:
data_loader: object
Returns:
statistics: dict,
{'average_precision': (classes_num,), 'auc': (classes_num,)}
"""
# Forward
output_dict = forward(
model=self.model,
generator=data_loader,
return_target=True)
clipwise_output = output_dict['clipwise_output'] # (audios_num, classes_num)
target = output_dict['target'] # (audios_num, classes_num)
average_precision = metrics.average_precision_score(
target, clipwise_output, average=None)
auc = metrics.roc_auc_score(target, clipwise_output, average=None)
statistics = {'average_precision': average_precision, 'auc': auc}
return statistics | EXA-1-master | exa/models/AudioGPT/audio_detection/audio_infer/pytorch/evaluate.py |
import torch
import torch.nn.functional as F
def clip_bce(output_dict, target_dict):
"""Binary crossentropy loss.
"""
return F.binary_cross_entropy(
output_dict['clipwise_output'], target_dict['target'])
def get_loss_func(loss_type):
if loss_type == 'clip_bce':
return clip_bce | EXA-1-master | exa/models/AudioGPT/audio_detection/audio_infer/pytorch/losses.py |
import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], '../utils'))
import numpy as np
import argparse
import h5py
import math
import time
import logging
import matplotlib.pyplot as plt
import torch
torch.backends.cudnn.benchmark=True
torch.manual_seed(0)
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data
from utilities import get_filename
from models import *
import config
class Transfer_Cnn14(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, classes_num, freeze_base):
"""Classifier for a new task using pretrained Cnn14 as a sub module.
"""
super(Transfer_Cnn14, self).__init__()
audioset_classes_num = 527
self.base = Cnn14(sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, audioset_classes_num)
# Transfer to another task layer
self.fc_transfer = nn.Linear(2048, classes_num, bias=True)
if freeze_base:
# Freeze AudioSet pretrained layers
for param in self.base.parameters():
param.requires_grad = False
self.init_weights()
def init_weights(self):
init_layer(self.fc_transfer)
def load_from_pretrain(self, pretrained_checkpoint_path):
checkpoint = torch.load(pretrained_checkpoint_path)
self.base.load_state_dict(checkpoint['model'])
def forward(self, input, mixup_lambda=None):
"""Input: (batch_size, data_length)
"""
output_dict = self.base(input, mixup_lambda)
embedding = output_dict['embedding']
clipwise_output = torch.log_softmax(self.fc_transfer(embedding), dim=-1)
output_dict['clipwise_output'] = clipwise_output
return output_dict
def train(args):
# Arugments & parameters
sample_rate = args.sample_rate
window_size = args.window_size
hop_size = args.hop_size
mel_bins = args.mel_bins
fmin = args.fmin
fmax = args.fmax
model_type = args.model_type
pretrained_checkpoint_path = args.pretrained_checkpoint_path
freeze_base = args.freeze_base
device = 'cuda' if (args.cuda and torch.cuda.is_available()) else 'cpu'
classes_num = config.classes_num
pretrain = True if pretrained_checkpoint_path else False
# Model
Model = eval(model_type)
model = Model(sample_rate, window_size, hop_size, mel_bins, fmin, fmax,
classes_num, freeze_base)
# Load pretrained model
if pretrain:
logging.info('Load pretrained model from {}'.format(pretrained_checkpoint_path))
model.load_from_pretrain(pretrained_checkpoint_path)
# Parallel
print('GPU number: {}'.format(torch.cuda.device_count()))
model = torch.nn.DataParallel(model)
if 'cuda' in device:
model.to(device)
print('Load pretrained model successfully!')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Example of parser. ')
subparsers = parser.add_subparsers(dest='mode')
# Train
parser_train = subparsers.add_parser('train')
parser_train.add_argument('--sample_rate', type=int, required=True)
parser_train.add_argument('--window_size', type=int, required=True)
parser_train.add_argument('--hop_size', type=int, required=True)
parser_train.add_argument('--mel_bins', type=int, required=True)
parser_train.add_argument('--fmin', type=int, required=True)
parser_train.add_argument('--fmax', type=int, required=True)
parser_train.add_argument('--model_type', type=str, required=True)
parser_train.add_argument('--pretrained_checkpoint_path', type=str)
parser_train.add_argument('--freeze_base', action='store_true', default=False)
parser_train.add_argument('--cuda', action='store_true', default=False)
# Parse arguments
args = parser.parse_args()
args.filename = get_filename(__file__)
if args.mode == 'train':
train(args)
else:
raise Exception('Error argument!') | EXA-1-master | exa/models/AudioGPT/audio_detection/audio_infer/pytorch/finetune_template.py |
import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], '../utils'))
import numpy as np
import argparse
import time
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data
from utilities import (create_folder, get_filename, create_logging, Mixup,
StatisticsContainer)
from models import (PVT, PVT2, PVT_lr, PVT_nopretrain, PVT_2layer, Cnn14, Cnn14_no_specaug, Cnn14_no_dropout,
Cnn6, Cnn10, ResNet22, ResNet38, ResNet54, Cnn14_emb512, Cnn14_emb128,
Cnn14_emb32, MobileNetV1, MobileNetV2, LeeNet11, LeeNet24, DaiNet19,
Res1dNet31, Res1dNet51, Wavegram_Cnn14, Wavegram_Logmel_Cnn14,
Wavegram_Logmel128_Cnn14, Cnn14_16k, Cnn14_8k, Cnn14_mel32, Cnn14_mel128,
Cnn14_mixup_time_domain, Cnn14_DecisionLevelMax, Cnn14_DecisionLevelAtt, Cnn6_Transformer, GLAM, GLAM2, GLAM3, Cnn4, EAT)
#from models_test import (PVT_test)
#from models1 import (PVT1)
#from models_vig import (VIG, VIG2)
#from models_vvt import (VVT)
#from models2 import (MPVIT, MPVIT2)
#from models_reshape import (PVT_reshape, PVT_tscam)
#from models_swin import (Swin, Swin_nopretrain)
#from models_swin2 import (Swin2)
#from models_van import (Van, Van_tiny)
#from models_focal import (Focal)
#from models_cross import (Cross)
#from models_cov import (Cov)
#from models_cnn import (Cnn_light)
#from models_twins import (Twins)
#from models_cmt import (Cmt, Cmt1)
#from models_shunted import (Shunted)
#from models_quadtree import (Quadtree, Quadtree2, Quadtree_nopretrain)
#from models_davit import (Davit_tscam, Davit, Davit_nopretrain)
from pytorch_utils import (move_data_to_device, count_parameters, count_flops,
do_mixup)
from data_generator import (AudioSetDataset, TrainSampler, BalancedTrainSampler,
AlternateTrainSampler, EvaluateSampler, collate_fn)
from evaluate import Evaluator
import config
from losses import get_loss_func
def train(args):
"""Train AudioSet tagging model.
Args:
dataset_dir: str
workspace: str
data_type: 'balanced_train' | 'full_train'
window_size: int
hop_size: int
mel_bins: int
model_type: str
loss_type: 'clip_bce'
balanced: 'none' | 'balanced' | 'alternate'
augmentation: 'none' | 'mixup'
batch_size: int
learning_rate: float
resume_iteration: int
early_stop: int
accumulation_steps: int
cuda: bool
"""
# Arugments & parameters
workspace = args.workspace
data_type = args.data_type
sample_rate = args.sample_rate
window_size = args.window_size
hop_size = args.hop_size
mel_bins = args.mel_bins
fmin = args.fmin
fmax = args.fmax
model_type = args.model_type
loss_type = args.loss_type
balanced = args.balanced
augmentation = args.augmentation
batch_size = args.batch_size
learning_rate = args.learning_rate
resume_iteration = args.resume_iteration
early_stop = args.early_stop
device = torch.device('cuda') if args.cuda and torch.cuda.is_available() else torch.device('cpu')
filename = args.filename
num_workers = 8
clip_samples = config.clip_samples
classes_num = config.classes_num
loss_func = get_loss_func(loss_type)
# Paths
black_list_csv = None
train_indexes_hdf5_path = os.path.join(workspace, 'hdf5s', 'indexes',
'{}.h5'.format(data_type))
eval_bal_indexes_hdf5_path = os.path.join(workspace,
'hdf5s', 'indexes', 'balanced_train.h5')
eval_test_indexes_hdf5_path = os.path.join(workspace, 'hdf5s', 'indexes',
'eval.h5')
checkpoints_dir = os.path.join(workspace, 'checkpoints', filename,
'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
'data_type={}'.format(data_type), model_type,
'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size))
create_folder(checkpoints_dir)
statistics_path = os.path.join(workspace, 'statistics', filename,
'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
'data_type={}'.format(data_type), model_type,
'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'statistics.pkl')
create_folder(os.path.dirname(statistics_path))
logs_dir = os.path.join(workspace, 'logs', filename,
'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
'data_type={}'.format(data_type), model_type,
'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size))
create_logging(logs_dir, filemode='w')
logging.info(args)
if 'cuda' in str(device):
logging.info('Using GPU.')
device = 'cuda'
else:
logging.info('Using CPU. Set --cuda flag to use GPU.')
device = 'cpu'
# Model
Model = eval(model_type)
model = Model(sample_rate=sample_rate, window_size=window_size,
hop_size=hop_size, mel_bins=mel_bins, fmin=fmin, fmax=fmax,
classes_num=classes_num)
total = sum(p.numel() for p in model.parameters())
print("Total params: %.2fM" % (total/1e6))
logging.info("Total params: %.2fM" % (total/1e6))
#params_num = count_parameters(model)
# flops_num = count_flops(model, clip_samples)
#logging.info('Parameters num: {}'.format(params_num))
# logging.info('Flops num: {:.3f} G'.format(flops_num / 1e9))
# Dataset will be used by DataLoader later. Dataset takes a meta as input
# and return a waveform and a target.
dataset = AudioSetDataset(sample_rate=sample_rate)
# Train sampler
if balanced == 'none':
Sampler = TrainSampler
elif balanced == 'balanced':
Sampler = BalancedTrainSampler
elif balanced == 'alternate':
Sampler = AlternateTrainSampler
train_sampler = Sampler(
indexes_hdf5_path=train_indexes_hdf5_path,
batch_size=batch_size * 2 if 'mixup' in augmentation else batch_size,
black_list_csv=black_list_csv)
# Evaluate sampler
eval_bal_sampler = EvaluateSampler(
indexes_hdf5_path=eval_bal_indexes_hdf5_path, batch_size=batch_size)
eval_test_sampler = EvaluateSampler(
indexes_hdf5_path=eval_test_indexes_hdf5_path, batch_size=batch_size)
# Data loader
train_loader = torch.utils.data.DataLoader(dataset=dataset,
batch_sampler=train_sampler, collate_fn=collate_fn,
num_workers=num_workers, pin_memory=True)
eval_bal_loader = torch.utils.data.DataLoader(dataset=dataset,
batch_sampler=eval_bal_sampler, collate_fn=collate_fn,
num_workers=num_workers, pin_memory=True)
eval_test_loader = torch.utils.data.DataLoader(dataset=dataset,
batch_sampler=eval_test_sampler, collate_fn=collate_fn,
num_workers=num_workers, pin_memory=True)
mix=0.5
if 'mixup' in augmentation:
mixup_augmenter = Mixup(mixup_alpha=mix)
print(mix)
logging.info(mix)
# Evaluator
evaluator = Evaluator(model=model)
# Statistics
statistics_container = StatisticsContainer(statistics_path)
# Optimizer
optimizer = optim.AdamW(model.parameters(), lr=learning_rate, betas=(0.9, 0.999), eps=1e-08, weight_decay=0.05, amsgrad=True)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.5, patience=4, min_lr=1e-06, verbose=True)
train_bgn_time = time.time()
# Resume training
if resume_iteration > 0:
resume_checkpoint_path = os.path.join(workspace, 'checkpoints', filename,
'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
'data_type={}'.format(data_type), model_type,
'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'{}_iterations.pth'.format(resume_iteration))
logging.info('Loading checkpoint {}'.format(resume_checkpoint_path))
checkpoint = torch.load(resume_checkpoint_path)
model.load_state_dict(checkpoint['model'])
train_sampler.load_state_dict(checkpoint['sampler'])
statistics_container.load_state_dict(resume_iteration)
iteration = checkpoint['iteration']
else:
iteration = 0
# Parallel
print('GPU number: {}'.format(torch.cuda.device_count()))
model = torch.nn.DataParallel(model)
if 'cuda' in str(device):
model.to(device)
if resume_iteration:
optimizer.load_state_dict(checkpoint['optimizer'])
scheduler.load_state_dict(checkpoint['scheduler'])
print(optimizer.state_dict()['param_groups'][0]['lr'])
time1 = time.time()
for batch_data_dict in train_loader:
"""batch_data_dict: {
'audio_name': (batch_size [*2 if mixup],),
'waveform': (batch_size [*2 if mixup], clip_samples),
'target': (batch_size [*2 if mixup], classes_num),
(ifexist) 'mixup_lambda': (batch_size * 2,)}
"""
# Evaluate
if (iteration % 2000 == 0 and iteration >= resume_iteration) or (iteration == 0):
train_fin_time = time.time()
bal_statistics = evaluator.evaluate(eval_bal_loader)
test_statistics = evaluator.evaluate(eval_test_loader)
logging.info('Validate bal mAP: {:.3f}'.format(
np.mean(bal_statistics['average_precision'])))
logging.info('Validate test mAP: {:.3f}'.format(
np.mean(test_statistics['average_precision'])))
statistics_container.append(iteration, bal_statistics, data_type='bal')
statistics_container.append(iteration, test_statistics, data_type='test')
statistics_container.dump()
train_time = train_fin_time - train_bgn_time
validate_time = time.time() - train_fin_time
logging.info(
'iteration: {}, train time: {:.3f} s, validate time: {:.3f} s'
''.format(iteration, train_time, validate_time))
logging.info('------------------------------------')
train_bgn_time = time.time()
# Save model
if iteration % 2000 == 0:
checkpoint = {
'iteration': iteration,
'model': model.module.state_dict(),
'sampler': train_sampler.state_dict(),
'optimizer': optimizer.state_dict(),
'scheduler': scheduler.state_dict()}
checkpoint_path = os.path.join(
checkpoints_dir, '{}_iterations.pth'.format(iteration))
torch.save(checkpoint, checkpoint_path)
logging.info('Model saved to {}'.format(checkpoint_path))
# Mixup lambda
if 'mixup' in augmentation:
batch_data_dict['mixup_lambda'] = mixup_augmenter.get_lambda(
batch_size=len(batch_data_dict['waveform']))
# Move data to device
for key in batch_data_dict.keys():
batch_data_dict[key] = move_data_to_device(batch_data_dict[key], device)
# Forward
model.train()
if 'mixup' in augmentation:
batch_output_dict = model(batch_data_dict['waveform'],
batch_data_dict['mixup_lambda'])
"""{'clipwise_output': (batch_size, classes_num), ...}"""
batch_target_dict = {'target': do_mixup(batch_data_dict['target'],
batch_data_dict['mixup_lambda'])}
"""{'target': (batch_size, classes_num)}"""
else:
batch_output_dict = model(batch_data_dict['waveform'], None)
"""{'clipwise_output': (batch_size, classes_num), ...}"""
batch_target_dict = {'target': batch_data_dict['target']}
"""{'target': (batch_size, classes_num)}"""
# Loss
loss = loss_func(batch_output_dict, batch_target_dict)
# Backward
loss.backward()
optimizer.step()
optimizer.zero_grad()
if iteration % 10 == 0:
print(iteration, loss)
#print('--- Iteration: {}, train time: {:.3f} s / 10 iterations ---'\
# .format(iteration, time.time() - time1))
#time1 = time.time()
if iteration % 2000 == 0:
scheduler.step(np.mean(test_statistics['average_precision']))
print(optimizer.state_dict()['param_groups'][0]['lr'])
logging.info(optimizer.state_dict()['param_groups'][0]['lr'])
# Stop learning
if iteration == early_stop:
break
iteration += 1
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Example of parser. ')
subparsers = parser.add_subparsers(dest='mode')
parser_train = subparsers.add_parser('train')
parser_train.add_argument('--workspace', type=str, required=True)
parser_train.add_argument('--data_type', type=str, default='full_train', choices=['balanced_train', 'full_train'])
parser_train.add_argument('--sample_rate', type=int, default=32000)
parser_train.add_argument('--window_size', type=int, default=1024)
parser_train.add_argument('--hop_size', type=int, default=320)
parser_train.add_argument('--mel_bins', type=int, default=64)
parser_train.add_argument('--fmin', type=int, default=50)
parser_train.add_argument('--fmax', type=int, default=14000)
parser_train.add_argument('--model_type', type=str, required=True)
parser_train.add_argument('--loss_type', type=str, default='clip_bce', choices=['clip_bce'])
parser_train.add_argument('--balanced', type=str, default='balanced', choices=['none', 'balanced', 'alternate'])
parser_train.add_argument('--augmentation', type=str, default='mixup', choices=['none', 'mixup'])
parser_train.add_argument('--batch_size', type=int, default=32)
parser_train.add_argument('--learning_rate', type=float, default=1e-3)
parser_train.add_argument('--resume_iteration', type=int, default=0)
parser_train.add_argument('--early_stop', type=int, default=1000000)
parser_train.add_argument('--cuda', action='store_true', default=False)
args = parser.parse_args()
args.filename = get_filename(__file__)
if args.mode == 'train':
train(args)
else:
raise Exception('Error argument!') | EXA-1-master | exa/models/AudioGPT/audio_detection/audio_infer/pytorch/main.py |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2021/3/9 16:33
# @Author : dongchao yang
# @File : train.py
from itertools import zip_longest
import numpy as np
from scipy import ndimage
import torch
import torch.nn as nn
import torch.nn.functional as F
import time
from torchlibrosa.augmentation import SpecAugmentation
from torchlibrosa.stft import Spectrogram, LogmelFilterBank
import math
from sklearn.cluster import KMeans
import os
import time
from functools import partial
# import timm
# from timm.models.layers import DropPath, to_2tuple, trunc_normal_
import warnings
from functools import partial
# from timm.models.registry import register_model
# from timm.models.vision_transformer import _cfg
# from mmdet.utils import get_root_logger
# from mmcv.runner import load_checkpoint
# from mmcv.runner import _load_checkpoint, load_state_dict
# import mmcv.runner
import copy
from collections import OrderedDict
import io
import re
DEBUG=0
event_labels = ['Alarm', 'Alarm_clock', 'Animal', 'Applause', 'Arrow', 'Artillery_fire',
'Babbling', 'Baby_laughter', 'Bark', 'Basketball_bounce', 'Battle_cry',
'Bell', 'Bird', 'Bleat', 'Bouncing', 'Breathing', 'Buzz', 'Camera',
'Cap_gun', 'Car', 'Car_alarm', 'Cat', 'Caw', 'Cheering', 'Child_singing',
'Choir', 'Chop', 'Chopping_(food)', 'Clapping', 'Clickety-clack', 'Clicking',
'Clip-clop', 'Cluck', 'Coin_(dropping)', 'Computer_keyboard', 'Conversation',
'Coo', 'Cough', 'Cowbell', 'Creak', 'Cricket', 'Croak', 'Crow', 'Crowd', 'DTMF',
'Dog', 'Door', 'Drill', 'Drip', 'Engine', 'Engine_starting', 'Explosion', 'Fart',
'Female_singing', 'Filing_(rasp)', 'Finger_snapping', 'Fire', 'Fire_alarm', 'Firecracker',
'Fireworks', 'Frog', 'Gasp', 'Gears', 'Giggle', 'Glass', 'Glass_shatter', 'Gobble', 'Groan',
'Growling', 'Hammer', 'Hands', 'Hiccup', 'Honk', 'Hoot', 'Howl', 'Human_sounds', 'Human_voice',
'Insect', 'Laughter', 'Liquid', 'Machine_gun', 'Male_singing', 'Mechanisms', 'Meow', 'Moo',
'Motorcycle', 'Mouse', 'Music', 'Oink', 'Owl', 'Pant', 'Pant_(dog)', 'Patter', 'Pig', 'Plop',
'Pour', 'Power_tool', 'Purr', 'Quack', 'Radio', 'Rain_on_surface', 'Rapping', 'Rattle',
'Reversing_beeps', 'Ringtone', 'Roar', 'Run', 'Rustle', 'Scissors', 'Scrape', 'Scratch',
'Screaming', 'Sewing_machine', 'Shout', 'Shuffle', 'Shuffling_cards', 'Singing',
'Single-lens_reflex_camera', 'Siren', 'Skateboard', 'Sniff', 'Snoring', 'Speech',
'Speech_synthesizer', 'Spray', 'Squeak', 'Squeal', 'Steam', 'Stir', 'Surface_contact',
'Tap', 'Tap_dance', 'Telephone_bell_ringing', 'Television', 'Tick', 'Tick-tock', 'Tools',
'Train', 'Train_horn', 'Train_wheels_squealing', 'Truck', 'Turkey', 'Typewriter', 'Typing',
'Vehicle', 'Video_game_sound', 'Water', 'Whimper_(dog)', 'Whip', 'Whispering', 'Whistle',
'Whistling', 'Whoop', 'Wind', 'Writing', 'Yip', 'and_pans', 'bird_song', 'bleep', 'clink',
'cock-a-doodle-doo', 'crinkling', 'dove', 'dribble', 'eructation', 'faucet', 'flapping_wings',
'footsteps', 'gunfire', 'heartbeat', 'infant_cry', 'kid_speaking', 'man_speaking', 'mastication',
'mice', 'river', 'rooster', 'silverware', 'skidding', 'smack', 'sobbing', 'speedboat', 'splatter',
'surf', 'thud', 'thwack', 'toot', 'truck_horn', 'tweet', 'vroom', 'waterfowl', 'woman_speaking']
def load_checkpoint(model,
filename,
map_location=None,
strict=False,
logger=None,
revise_keys=[(r'^module\.', '')]):
"""Load checkpoint from a file or URI.
Args:
model (Module): Module to load checkpoint.
filename (str): Accept local filepath, URL, ``torchvision://xxx``,
``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for
details.
map_location (str): Same as :func:`torch.load`.
strict (bool): Whether to allow different params for the model and
checkpoint.
logger (:mod:`logging.Logger` or None): The logger for error message.
revise_keys (list): A list of customized keywords to modify the
state_dict in checkpoint. Each item is a (pattern, replacement)
pair of the regular expression operations. Default: strip
the prefix 'module.' by [(r'^module\\.', '')].
Returns:
dict or OrderedDict: The loaded checkpoint.
"""
checkpoint = _load_checkpoint(filename, map_location, logger)
'''
new_proj = torch.nn.Conv2d(1, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
new_proj.weight = torch.nn.Parameter(torch.sum(checkpoint['patch_embed1.proj.weight'], dim=1).unsqueeze(1))
checkpoint['patch_embed1.proj.weight'] = new_proj.weight
new_proj.weight = torch.nn.Parameter(torch.sum(checkpoint['patch_embed1.proj.weight'], dim=2).unsqueeze(2).repeat(1,1,3,1))
checkpoint['patch_embed1.proj.weight'] = new_proj.weight
new_proj.weight = torch.nn.Parameter(torch.sum(checkpoint['patch_embed1.proj.weight'], dim=3).unsqueeze(3).repeat(1,1,1,3))
checkpoint['patch_embed1.proj.weight'] = new_proj.weight
'''
new_proj = torch.nn.Conv2d(1, 64, kernel_size=(7, 7), stride=(4, 4), padding=(2, 2))
new_proj.weight = torch.nn.Parameter(torch.sum(checkpoint['patch_embed1.proj.weight'], dim=1).unsqueeze(1))
checkpoint['patch_embed1.proj.weight'] = new_proj.weight
# OrderedDict is a subclass of dict
if not isinstance(checkpoint, dict):
raise RuntimeError(
f'No state_dict found in checkpoint file {filename}')
# get state_dict from checkpoint
if 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
else:
state_dict = checkpoint
# strip prefix of state_dict
metadata = getattr(state_dict, '_metadata', OrderedDict())
for p, r in revise_keys:
state_dict = OrderedDict(
{re.sub(p, r, k): v
for k, v in state_dict.items()})
state_dict = OrderedDict({k.replace('backbone.',''):v for k,v in state_dict.items()})
# Keep metadata in state_dict
state_dict._metadata = metadata
# load state_dict
load_state_dict(model, state_dict, strict, logger)
return checkpoint
def init_weights(m):
if isinstance(m, (nn.Conv2d, nn.Conv1d)):
nn.init.kaiming_normal_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
if isinstance(m, nn.Linear):
nn.init.kaiming_uniform_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def init_layer(layer):
"""Initialize a Linear or Convolutional layer. """
nn.init.xavier_uniform_(layer.weight)
if hasattr(layer, 'bias'):
if layer.bias is not None:
layer.bias.data.fill_(0.)
def init_bn(bn):
"""Initialize a Batchnorm layer. """
bn.bias.data.fill_(0.)
bn.weight.data.fill_(1.)
class MaxPool(nn.Module):
def __init__(self, pooldim=1):
super().__init__()
self.pooldim = pooldim
def forward(self, logits, decision):
return torch.max(decision, dim=self.pooldim)[0]
class LinearSoftPool(nn.Module):
"""LinearSoftPool
Linear softmax, takes logits and returns a probability, near to the actual maximum value.
Taken from the paper:
A Comparison of Five Multiple Instance Learning Pooling Functions for Sound Event Detection with Weak Labeling
https://arxiv.org/abs/1810.09050
"""
def __init__(self, pooldim=1):
super().__init__()
self.pooldim = pooldim
def forward(self, logits, time_decision):
return (time_decision**2).sum(self.pooldim) / (time_decision.sum(
self.pooldim)+1e-7)
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(ConvBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=(3, 3), stride=(1, 1),
padding=(1, 1), bias=False)
self.conv2 = nn.Conv2d(in_channels=out_channels,
out_channels=out_channels,
kernel_size=(3, 3), stride=(1, 1),
padding=(1, 1), bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
self.bn2 = nn.BatchNorm2d(out_channels)
self.init_weight()
def init_weight(self):
init_layer(self.conv1)
init_layer(self.conv2)
init_bn(self.bn1)
init_bn(self.bn2)
def forward(self, input, pool_size=(2, 2), pool_type='avg'):
x = input
x = F.relu_(self.bn1(self.conv1(x)))
x = F.relu_(self.bn2(self.conv2(x)))
if pool_type == 'max':
x = F.max_pool2d(x, kernel_size=pool_size)
elif pool_type == 'avg':
x = F.avg_pool2d(x, kernel_size=pool_size)
elif pool_type == 'avg+max':
x1 = F.avg_pool2d(x, kernel_size=pool_size)
x2 = F.max_pool2d(x, kernel_size=pool_size)
x = x1 + x2
else:
raise Exception('Incorrect argument!')
return x
class ConvBlock_GLU(nn.Module):
def __init__(self, in_channels, out_channels,kernel_size=(3,3)):
super(ConvBlock_GLU, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size, stride=(1, 1),
padding=(1, 1), bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
self.sigmoid = nn.Sigmoid()
self.init_weight()
def init_weight(self):
init_layer(self.conv1)
init_bn(self.bn1)
def forward(self, input, pool_size=(2, 2), pool_type='avg'):
x = input
x = self.bn1(self.conv1(x))
cnn1 = self.sigmoid(x[:, :x.shape[1]//2, :, :])
cnn2 = x[:,x.shape[1]//2:,:,:]
x = cnn1*cnn2
if pool_type == 'max':
x = F.max_pool2d(x, kernel_size=pool_size)
elif pool_type == 'avg':
x = F.avg_pool2d(x, kernel_size=pool_size)
elif pool_type == 'avg+max':
x1 = F.avg_pool2d(x, kernel_size=pool_size)
x2 = F.max_pool2d(x, kernel_size=pool_size)
x = x1 + x2
elif pool_type == 'None':
pass
elif pool_type == 'LP':
pass
#nn.LPPool2d(4, pool_size)
else:
raise Exception('Incorrect argument!')
return x
class Mul_scale_GLU(nn.Module):
def __init__(self):
super(Mul_scale_GLU,self).__init__()
self.conv_block1_1 = ConvBlock_GLU(in_channels=1, out_channels=64,kernel_size=(1,1)) # 1*1
self.conv_block1_2 = ConvBlock_GLU(in_channels=1, out_channels=64,kernel_size=(3,3)) # 3*3
self.conv_block1_3 = ConvBlock_GLU(in_channels=1, out_channels=64,kernel_size=(5,5)) # 5*5
self.conv_block2 = ConvBlock_GLU(in_channels=96, out_channels=128*2)
# self.conv_block3 = ConvBlock(in_channels=64, out_channels=128)
self.conv_block3 = ConvBlock_GLU(in_channels=128, out_channels=128*2)
self.conv_block4 = ConvBlock_GLU(in_channels=128, out_channels=256*2)
self.conv_block5 = ConvBlock_GLU(in_channels=256, out_channels=256*2)
self.conv_block6 = ConvBlock_GLU(in_channels=256, out_channels=512*2)
self.conv_block7 = ConvBlock_GLU(in_channels=512, out_channels=512*2)
self.padding = nn.ReplicationPad2d((0,1,0,1))
def forward(self, input, fi=None):
"""
Input: (batch_size, data_length)"""
x1 = self.conv_block1_1(input, pool_size=(2, 2), pool_type='avg')
x1 = x1[:,:,:500,:32]
#print('x1 ',x1.shape)
x2 = self.conv_block1_2(input,pool_size=(2,2),pool_type='avg')
#print('x2 ',x2.shape)
x3 = self.conv_block1_3(input,pool_size=(2,2),pool_type='avg')
x3 = self.padding(x3)
#print('x3 ',x3.shape)
# assert 1==2
x = torch.cat([x1,x2],dim=1)
x = torch.cat([x,x3],dim=1)
#print('x ',x.shape)
x = self.conv_block2(x, pool_size=(2, 2), pool_type='None')
x = self.conv_block3(x,pool_size=(2,2),pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training) #
#print('x2,3 ',x.shape)
x = self.conv_block4(x, pool_size=(2, 4), pool_type='None')
x = self.conv_block5(x,pool_size=(2,4),pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
#print('x4,5 ',x.shape)
x = self.conv_block6(x, pool_size=(1, 4), pool_type='None')
x = self.conv_block7(x, pool_size=(1, 4), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
# print('x6,7 ',x.shape)
# assert 1==2
return x
class Cnn14(nn.Module):
def __init__(self, sample_rate=32000, window_size=1024, hop_size=320, mel_bins=64, fmin=50,
fmax=14000, classes_num=527):
super(Cnn14, self).__init__()
window = 'hann'
center = True
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
# Spectrogram extractor
self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
win_length=window_size, window=window, center=center, pad_mode=pad_mode,
freeze_parameters=True)
# Logmel feature extractor
self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin, top_db=top_db,
freeze_parameters=True)
# Spec augmenter
self.spec_augmenter = SpecAugmentation(time_drop_width=64, time_stripes_num=2,
freq_drop_width=8, freq_stripes_num=2)
self.bn0 = nn.BatchNorm2d(64)
self.conv_block1 = ConvBlock(in_channels=1, out_channels=64)
self.conv_block2 = ConvBlock(in_channels=64, out_channels=128)
self.conv_block3 = ConvBlock(in_channels=128, out_channels=256)
self.conv_block4 = ConvBlock(in_channels=256, out_channels=512)
self.conv_block5 = ConvBlock(in_channels=512, out_channels=1024)
self.conv_block6 = ConvBlock(in_channels=1024, out_channels=2048)
self.fc1 = nn.Linear(2048, 128, bias=True)
self.fc_audioset = nn.Linear(128, classes_num, bias=True)
self.init_weight()
def init_weight(self):
init_layer(self.fc1)
init_layer(self.fc_audioset)
def forward(self, input_, mixup_lambda=None):
"""
Input: (batch_size, data_length)"""
input_ = input_.unsqueeze(1)
x = self.conv_block1(input_, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block2(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block3(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block4(x, pool_size=(1, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block5(x, pool_size=(1, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block6(x, pool_size=(1, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
# print(x.shape)
# x = torch.mean(x, dim=3)
x = x.transpose(1, 2).contiguous().flatten(-2)
x = self.fc1(x)
# print(x.shape)
# assert 1==2
# (x1,_) = torch.max(x, dim=2)
# x2 = torch.mean(x, dim=2)
# x = x1 + x2
# x = F.dropout(x, p=0.5, training=self.training)
# x = F.relu_(self.fc1(x))
# embedding = F.dropout(x, p=0.5, training=self.training)
return x
class Cnn10_fi(nn.Module):
def __init__(self):
super(Cnn10_fi, self).__init__()
self.conv_block1 = ConvBlock(in_channels=1, out_channels=64)
self.conv_block2 = ConvBlock(in_channels=64, out_channels=128)
self.conv_block3 = ConvBlock(in_channels=128, out_channels=256)
self.conv_block4 = ConvBlock(in_channels=256, out_channels=512)
# self.fc1 = nn.Linear(512, 512, bias=True)
# self.fc_audioset = nn.Linear(512, classes_num, bias=True)
# self.init_weight()
def forward(self, input, fi=None):
"""
Input: (batch_size, data_length)"""
x = self.conv_block1(input, pool_size=(2, 2), pool_type='avg')
if fi != None:
gamma = fi[:,0].unsqueeze(1).unsqueeze(2).unsqueeze(3).expand_as(x)
beta = fi[:,1].unsqueeze(1).unsqueeze(2).unsqueeze(3).expand_as(x)
x = (gamma)*x + beta
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block2(x, pool_size=(2, 2), pool_type='avg')
if fi != None:
gamma = fi[:,0].unsqueeze(1).unsqueeze(2).unsqueeze(3).expand_as(x)
beta = fi[:,1].unsqueeze(1).unsqueeze(2).unsqueeze(3).expand_as(x)
x = (gamma)*x + beta
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block3(x, pool_size=(2, 4), pool_type='avg')
if fi != None:
gamma = fi[:,0].unsqueeze(1).unsqueeze(2).unsqueeze(3).expand_as(x)
beta = fi[:,1].unsqueeze(1).unsqueeze(2).unsqueeze(3).expand_as(x)
x = (gamma)*x + beta
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block4(x, pool_size=(1, 4), pool_type='avg')
if fi != None:
gamma = fi[:,0].unsqueeze(1).unsqueeze(2).unsqueeze(3).expand_as(x)
beta = fi[:,1].unsqueeze(1).unsqueeze(2).unsqueeze(3).expand_as(x)
x = (gamma)*x + beta
x = F.dropout(x, p=0.2, training=self.training)
return x
class Cnn10_mul_scale(nn.Module):
def __init__(self,scale=8):
super(Cnn10_mul_scale, self).__init__()
self.conv_block1_1 = ConvBlock_GLU(in_channels=1, out_channels=64,kernel_size=(1,1))
self.conv_block1_2 = ConvBlock_GLU(in_channels=1, out_channels=64,kernel_size=(3,3))
self.conv_block1_3 = ConvBlock_GLU(in_channels=1, out_channels=64,kernel_size=(5,5))
self.conv_block2 = ConvBlock(in_channels=96, out_channels=128)
self.conv_block3 = ConvBlock(in_channels=128, out_channels=256)
self.conv_block4 = ConvBlock(in_channels=256, out_channels=512)
self.scale = scale
self.padding = nn.ReplicationPad2d((0,1,0,1))
def forward(self, input, pool_size=(2, 2), pool_type='avg'):
"""
Input: (batch_size, data_length)"""
if self.scale == 8:
pool_size1 = (2,2)
pool_size2 = (2,2)
pool_size3 = (2,4)
pool_size4 = (1,4)
elif self.scale == 4:
pool_size1 = (2,2)
pool_size2 = (2,2)
pool_size3 = (1,4)
pool_size4 = (1,4)
elif self.scale == 2:
pool_size1 = (2,2)
pool_size2 = (1,2)
pool_size3 = (1,4)
pool_size4 = (1,4)
else:
pool_size1 = (1,2)
pool_size2 = (1,2)
pool_size3 = (1,4)
pool_size4 = (1,4)
# print('input ',input.shape)
x1 = self.conv_block1_1(input, pool_size=pool_size1, pool_type='avg')
x1 = x1[:,:,:500,:32]
#print('x1 ',x1.shape)
x2 = self.conv_block1_2(input, pool_size=pool_size1, pool_type='avg')
#print('x2 ',x2.shape)
x3 = self.conv_block1_3(input, pool_size=pool_size1, pool_type='avg')
x3 = self.padding(x3)
#print('x3 ',x3.shape)
# assert 1==2
m_i = min(x3.shape[2],min(x1.shape[2],x2.shape[2]))
#print('m_i ', m_i)
x = torch.cat([x1[:,:,:m_i,:],x2[:,:, :m_i,:],x3[:,:, :m_i,:]],dim=1)
# x = torch.cat([x,x3],dim=1)
# x = self.conv_block1(input, pool_size=pool_size1, pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block2(x, pool_size=pool_size2, pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block3(x, pool_size=pool_size3, pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block4(x, pool_size=pool_size4, pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
return x
class Cnn10(nn.Module):
def __init__(self,scale=8):
super(Cnn10, self).__init__()
self.conv_block1 = ConvBlock(in_channels=1, out_channels=64)
self.conv_block2 = ConvBlock(in_channels=64, out_channels=128)
self.conv_block3 = ConvBlock(in_channels=128, out_channels=256)
self.conv_block4 = ConvBlock(in_channels=256, out_channels=512)
self.scale = scale
def forward(self, input, pool_size=(2, 2), pool_type='avg'):
"""
Input: (batch_size, data_length)"""
if self.scale == 8:
pool_size1 = (2,2)
pool_size2 = (2,2)
pool_size3 = (2,4)
pool_size4 = (1,4)
elif self.scale == 4:
pool_size1 = (2,2)
pool_size2 = (2,2)
pool_size3 = (1,4)
pool_size4 = (1,4)
elif self.scale == 2:
pool_size1 = (2,2)
pool_size2 = (1,2)
pool_size3 = (1,4)
pool_size4 = (1,4)
else:
pool_size1 = (1,2)
pool_size2 = (1,2)
pool_size3 = (1,4)
pool_size4 = (1,4)
x = self.conv_block1(input, pool_size=pool_size1, pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block2(x, pool_size=pool_size2, pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block3(x, pool_size=pool_size3, pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block4(x, pool_size=pool_size4, pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
return x
class MeanPool(nn.Module):
def __init__(self, pooldim=1):
super().__init__()
self.pooldim = pooldim
def forward(self, logits, decision):
return torch.mean(decision, dim=self.pooldim)
class ResPool(nn.Module):
def __init__(self, pooldim=1):
super().__init__()
self.pooldim = pooldim
self.linPool = LinearSoftPool(pooldim=1)
class AutoExpPool(nn.Module):
def __init__(self, outputdim=10, pooldim=1):
super().__init__()
self.outputdim = outputdim
self.alpha = nn.Parameter(torch.full((outputdim, ), 1))
self.pooldim = pooldim
def forward(self, logits, decision):
scaled = self.alpha * decision # \alpha * P(Y|x) in the paper
return (logits * torch.exp(scaled)).sum(
self.pooldim) / torch.exp(scaled).sum(self.pooldim)
class SoftPool(nn.Module):
def __init__(self, T=1, pooldim=1):
super().__init__()
self.pooldim = pooldim
self.T = T
def forward(self, logits, decision):
w = torch.softmax(decision / self.T, dim=self.pooldim)
return torch.sum(decision * w, dim=self.pooldim)
class AutoPool(nn.Module):
"""docstring for AutoPool"""
def __init__(self, outputdim=10, pooldim=1):
super().__init__()
self.outputdim = outputdim
self.alpha = nn.Parameter(torch.ones(outputdim))
self.dim = pooldim
def forward(self, logits, decision):
scaled = self.alpha * decision # \alpha * P(Y|x) in the paper
weight = torch.softmax(scaled, dim=self.dim)
return torch.sum(decision * weight, dim=self.dim) # B x C
class ExtAttentionPool(nn.Module):
def __init__(self, inputdim, outputdim=10, pooldim=1, **kwargs):
super().__init__()
self.inputdim = inputdim
self.outputdim = outputdim
self.pooldim = pooldim
self.attention = nn.Linear(inputdim, outputdim)
nn.init.zeros_(self.attention.weight)
nn.init.zeros_(self.attention.bias)
self.activ = nn.Softmax(dim=self.pooldim)
def forward(self, logits, decision):
# Logits of shape (B, T, D), decision of shape (B, T, C)
w_x = self.activ(self.attention(logits) / self.outputdim)
h = (logits.permute(0, 2, 1).contiguous().unsqueeze(-2) *
w_x.unsqueeze(-1)).flatten(-2).contiguous()
return torch.sum(h, self.pooldim)
class AttentionPool(nn.Module):
"""docstring for AttentionPool"""
def __init__(self, inputdim, outputdim=10, pooldim=1, **kwargs):
super().__init__()
self.inputdim = inputdim
self.outputdim = outputdim
self.pooldim = pooldim
self.transform = nn.Linear(inputdim, outputdim)
self.activ = nn.Softmax(dim=self.pooldim)
self.eps = 1e-7
def forward(self, logits, decision):
# Input is (B, T, D)
# B, T , D
w = self.activ(torch.clamp(self.transform(logits), -15, 15))
detect = (decision * w).sum(
self.pooldim) / (w.sum(self.pooldim) + self.eps)
# B, T, D
return detect
class Block2D(nn.Module):
def __init__(self, cin, cout, kernel_size=3, padding=1):
super().__init__()
self.block = nn.Sequential(
nn.BatchNorm2d(cin),
nn.Conv2d(cin,
cout,
kernel_size=kernel_size,
padding=padding,
bias=False),
nn.LeakyReLU(inplace=True, negative_slope=0.1))
def forward(self, x):
return self.block(x)
class AudioCNN(nn.Module):
def __init__(self, classes_num):
super(AudioCNN, self).__init__()
self.conv_block1 = ConvBlock(in_channels=1, out_channels=64)
self.conv_block2 = ConvBlock(in_channels=64, out_channels=128)
self.conv_block3 = ConvBlock(in_channels=128, out_channels=256)
self.conv_block4 = ConvBlock(in_channels=256, out_channels=512)
self.fc1 = nn.Linear(512,128,bias=True)
self.fc = nn.Linear(128, classes_num, bias=True)
self.init_weights()
def init_weights(self):
init_layer(self.fc)
def forward(self, input):
'''
Input: (batch_size, times_steps, freq_bins)'''
# [128, 801, 168] --> [128,1,801,168]
x = input[:, None, :, :]
'''(batch_size, 1, times_steps, freq_bins)'''
x = self.conv_block1(x, pool_size=(2, 2), pool_type='avg') # 128,64,400,84
x = self.conv_block2(x, pool_size=(2, 2), pool_type='avg') # 128,128,200,42
x = self.conv_block3(x, pool_size=(2, 2), pool_type='avg') # 128,256,100,21
x = self.conv_block4(x, pool_size=(2, 2), pool_type='avg') # 128,512,50,10
'''(batch_size, feature_maps, time_steps, freq_bins)'''
x = torch.mean(x, dim=3) # (batch_size, feature_maps, time_stpes) # 128,512,50
(x, _) = torch.max(x, dim=2) # (batch_size, feature_maps) 128,512
x = self.fc1(x) # 128,128
output = self.fc(x) # 128,10
return x,output
def extract(self,input):
'''Input: (batch_size, times_steps, freq_bins)'''
x = input[:, None, :, :]
x = self.conv_block1(x, pool_size=(2, 2), pool_type='avg')
x = self.conv_block2(x, pool_size=(2, 2), pool_type='avg')
x = self.conv_block3(x, pool_size=(2, 2), pool_type='avg')
x = self.conv_block4(x, pool_size=(2, 2), pool_type='avg')
'''(batch_size, feature_maps, time_steps, freq_bins)'''
x = torch.mean(x, dim=3) # (batch_size, feature_maps, time_stpes)
(x, _) = torch.max(x, dim=2) # (batch_size, feature_maps)
x = self.fc1(x) # 128,128
return x
def parse_poolingfunction(poolingfunction_name='mean', **kwargs):
"""parse_poolingfunction
A heler function to parse any temporal pooling
Pooling is done on dimension 1
:param poolingfunction_name:
:param **kwargs:
"""
poolingfunction_name = poolingfunction_name.lower()
if poolingfunction_name == 'mean':
return MeanPool(pooldim=1)
elif poolingfunction_name == 'max':
return MaxPool(pooldim=1)
elif poolingfunction_name == 'linear':
return LinearSoftPool(pooldim=1)
elif poolingfunction_name == 'expalpha':
return AutoExpPool(outputdim=kwargs['outputdim'], pooldim=1)
elif poolingfunction_name == 'soft':
return SoftPool(pooldim=1)
elif poolingfunction_name == 'auto':
return AutoPool(outputdim=kwargs['outputdim'])
elif poolingfunction_name == 'attention':
return AttentionPool(inputdim=kwargs['inputdim'],
outputdim=kwargs['outputdim'])
class conv1d(nn.Module):
def __init__(self, nin, nout, kernel_size=3, stride=1, padding='VALID', dilation=1):
super(conv1d, self).__init__()
if padding == 'VALID':
dconv_pad = 0
elif padding == 'SAME':
dconv_pad = dilation * ((kernel_size - 1) // 2)
else:
raise ValueError("Padding Mode Error!")
self.conv = nn.Conv1d(nin, nout, kernel_size=kernel_size, stride=stride, padding=dconv_pad)
self.act = nn.ReLU()
self.init_layer(self.conv)
def init_layer(self, layer, nonlinearity='relu'):
"""Initialize a Linear or Convolutional layer. """
nn.init.kaiming_normal_(layer.weight, nonlinearity=nonlinearity)
nn.init.constant_(layer.bias, 0.1)
def forward(self, x):
out = self.act(self.conv(x))
return out
class Atten_1(nn.Module):
def __init__(self, input_dim, context=2, dropout_rate=0.2):
super(Atten_1, self).__init__()
self._matrix_k = nn.Linear(input_dim, input_dim // 4)
self._matrix_q = nn.Linear(input_dim, input_dim // 4)
self.relu = nn.ReLU()
self.context = context
self._dropout_layer = nn.Dropout(dropout_rate)
self.init_layer(self._matrix_k)
self.init_layer(self._matrix_q)
def init_layer(self, layer, nonlinearity='leaky_relu'):
"""Initialize a Linear or Convolutional layer. """
nn.init.kaiming_uniform_(layer.weight, nonlinearity=nonlinearity)
if hasattr(layer, 'bias'):
if layer.bias is not None:
layer.bias.data.fill_(0.)
def forward(self, input_x):
k_x = input_x
k_x = self.relu(self._matrix_k(k_x))
k_x = self._dropout_layer(k_x)
# print('k_x ',k_x.shape)
q_x = input_x[:, self.context, :]
# print('q_x ',q_x.shape)
q_x = q_x[:, None, :]
# print('q_x1 ',q_x.shape)
q_x = self.relu(self._matrix_q(q_x))
q_x = self._dropout_layer(q_x)
# print('q_x2 ',q_x.shape)
x_ = torch.matmul(k_x, q_x.transpose(-2, -1) / math.sqrt(k_x.size(-1)))
# print('x_ ',x_.shape)
x_ = x_.squeeze(2)
alpha = F.softmax(x_, dim=-1)
att_ = alpha
# print('alpha ',alpha)
alpha = alpha.unsqueeze(2).repeat(1,1,input_x.shape[2])
# print('alpha ',alpha)
# alpha = alpha.view(alpha.size(0), alpha.size(1), alpha.size(2), 1)
out = alpha * input_x
# print('out ', out.shape)
# out = out.mean(2)
out = out.mean(1)
# print('out ',out.shape)
# assert 1==2
#y = alpha * input_x
#return y, att_
out = input_x[:, self.context, :] + out
return out
class Fusion(nn.Module):
def __init__(self, inputdim, inputdim2, n_fac):
super().__init__()
self.fuse_layer1 = conv1d(inputdim, inputdim2*n_fac,1)
self.fuse_layer2 = conv1d(inputdim2, inputdim2*n_fac,1)
self.avg_pool = nn.AvgPool1d(n_fac, stride=n_fac) # 沿着最后一个维度进行pooling
def forward(self,embedding,mix_embed):
embedding = embedding.permute(0,2,1)
fuse1_out = self.fuse_layer1(embedding) # [2, 501, 2560] ,512*5, 1D卷积融合,spk_embeding ,扩大其维度
fuse1_out = fuse1_out.permute(0,2,1)
mix_embed = mix_embed.permute(0,2,1)
fuse2_out = self.fuse_layer2(mix_embed) # [2, 501, 2560] ,512*5, 1D卷积融合,spk_embeding ,扩大其维度
fuse2_out = fuse2_out.permute(0,2,1)
as_embs = torch.mul(fuse1_out, fuse2_out) # 相乘 [2, 501, 2560]
# (10, 501, 512)
as_embs = self.avg_pool(as_embs) # [2, 501, 512] 相当于 2560//5
return as_embs
class CDur_fusion(nn.Module):
def __init__(self, inputdim, outputdim, **kwargs):
super().__init__()
self.features = nn.Sequential(
Block2D(1, 32),
nn.LPPool2d(4, (2, 4)),
Block2D(32, 128),
Block2D(128, 128),
nn.LPPool2d(4, (2, 4)),
Block2D(128, 128),
Block2D(128, 128),
nn.LPPool2d(4, (1, 4)),
nn.Dropout(0.3),
)
with torch.no_grad():
rnn_input_dim = self.features(torch.randn(1, 1, 500,inputdim)).shape
rnn_input_dim = rnn_input_dim[1] * rnn_input_dim[-1]
self.gru = nn.GRU(128, 128, bidirectional=True, batch_first=True)
self.fusion = Fusion(128,2)
self.fc = nn.Linear(256,256)
self.outputlayer = nn.Linear(256, outputdim)
self.features.apply(init_weights)
self.outputlayer.apply(init_weights)
def forward(self, x, embedding): #
batch, time, dim = x.shape
x = x.unsqueeze(1) # (b,1,t,d)
x = self.features(x) #
x = x.transpose(1, 2).contiguous().flatten(-2) # 重新拷贝一份x,之后推平-2:-1之间的维度 # (b,125,128)
embedding = embedding.unsqueeze(1)
embedding = embedding.repeat(1, x.shape[1], 1)
x = self.fusion(embedding,x)
#x = torch.cat((x, embedding), dim=2) # [B, T, 128 + emb_dim]
if not hasattr(self, '_flattened'):
self.gru.flatten_parameters()
x, _ = self.gru(x) # x torch.Size([16, 125, 256])
x = self.fc(x)
decision_time = torch.softmax(self.outputlayer(x),dim=2) # x torch.Size([16, 125, 2])
decision_up = torch.nn.functional.interpolate(
decision_time.transpose(1, 2), # [16, 2, 125]
time, # 501
mode='linear',
align_corners=False).transpose(1, 2) # 从125插值回 501 ?--> (16,501,2)
return decision_time[:,:,0],decision_up
class CDur(nn.Module):
def __init__(self, inputdim, outputdim,time_resolution, **kwargs):
super().__init__()
self.features = nn.Sequential(
Block2D(1, 32),
nn.LPPool2d(4, (2, 4)),
Block2D(32, 128),
Block2D(128, 128),
nn.LPPool2d(4, (2, 4)),
Block2D(128, 128),
Block2D(128, 128),
nn.LPPool2d(4, (2, 4)),
nn.Dropout(0.3),
)
with torch.no_grad():
rnn_input_dim = self.features(torch.randn(1, 1, 500,inputdim)).shape
rnn_input_dim = rnn_input_dim[1] * rnn_input_dim[-1]
self.gru = nn.GRU(256, 256, bidirectional=True, batch_first=True)
self.fc = nn.Linear(512,256)
self.outputlayer = nn.Linear(256, outputdim)
self.features.apply(init_weights)
self.outputlayer.apply(init_weights)
def forward(self, x, embedding,one_hot=None): #
batch, time, dim = x.shape
x = x.unsqueeze(1) # (b,1,t,d)
x = self.features(x) #
x = x.transpose(1, 2).contiguous().flatten(-2) # 重新拷贝一份x,之后推平-2:-1之间的维度 # (b,125,128)
embedding = embedding.unsqueeze(1)
embedding = embedding.repeat(1, x.shape[1], 1)
x = torch.cat((x, embedding), dim=2) # [B, T, 128 + emb_dim]
if not hasattr(self, '_flattened'):
self.gru.flatten_parameters()
x, _ = self.gru(x) # x torch.Size([16, 125, 256])
x = self.fc(x)
decision_time = torch.softmax(self.outputlayer(x),dim=2) # x torch.Size([16, 125, 2])
decision_up = torch.nn.functional.interpolate(
decision_time.transpose(1, 2), # [16, 2, 125]
time, # 501
mode='linear',
align_corners=False).transpose(1, 2) # 从125插值回 501 ?--> (16,501,2)
return decision_time[:,:,0],decision_up
class CDur_big(nn.Module):
def __init__(self, inputdim, outputdim, **kwargs):
super().__init__()
self.features = nn.Sequential(
Block2D(1, 64),
Block2D(64, 64),
nn.LPPool2d(4, (2, 2)),
Block2D(64, 128),
Block2D(128, 128),
nn.LPPool2d(4, (2, 2)),
Block2D(128, 256),
Block2D(256, 256),
nn.LPPool2d(4, (2, 4)),
Block2D(256, 512),
Block2D(512, 512),
nn.LPPool2d(4, (1, 4)),
nn.Dropout(0.3),)
with torch.no_grad():
rnn_input_dim = self.features(torch.randn(1, 1, 500,inputdim)).shape
rnn_input_dim = rnn_input_dim[1] * rnn_input_dim[-1]
self.gru = nn.GRU(640, 512, bidirectional=True, batch_first=True)
self.fc = nn.Linear(1024,256)
self.outputlayer = nn.Linear(256, outputdim)
self.features.apply(init_weights)
self.outputlayer.apply(init_weights)
def forward(self, x, embedding): #
batch, time, dim = x.shape
x = x.unsqueeze(1) # (b,1,t,d)
x = self.features(x) #
x = x.transpose(1, 2).contiguous().flatten(-2) # 重新拷贝一份x,之后推平-2:-1之间的维度 # (b,125,512)
embedding = embedding.unsqueeze(1)
embedding = embedding.repeat(1, x.shape[1], 1)
x = torch.cat((x, embedding), dim=2) # [B, T, 128 + emb_dim]
if not hasattr(self, '_flattened'):
self.gru.flatten_parameters()
x, _ = self.gru(x) # x torch.Size([16, 125, 256])
x = self.fc(x)
decision_time = torch.softmax(self.outputlayer(x),dim=2) # x torch.Size([16, 125, 2])
decision_up = torch.nn.functional.interpolate(
decision_time.transpose(1, 2), # [16, 2, 125]
time, # 501
mode='linear',
align_corners=False).transpose(1, 2) # 从125插值回 501 ?--> (16,501,2)
return decision_time[:,:,0],decision_up
class CDur_GLU(nn.Module):
def __init__(self, inputdim, outputdim, **kwargs):
super().__init__()
self.features = Mul_scale_GLU()
# with torch.no_grad():
# rnn_input_dim = self.features(torch.randn(1, 1, 500,inputdim)).shape
# rnn_input_dim = rnn_input_dim[1] * rnn_input_dim[-1]
self.gru = nn.GRU(640, 512,1, bidirectional=True, batch_first=True) # previous is 640
# self.gru = LSTMModel(640, 512,1)
self.fc = nn.Linear(1024,256)
self.outputlayer = nn.Linear(256, outputdim)
# self.features.apply(init_weights)
self.outputlayer.apply(init_weights)
def forward(self, x, embedding,one_hot=None): #
batch, time, dim = x.shape
x = x.unsqueeze(1) # (b,1,t,d)
x = self.features(x) #
x = x.transpose(1, 2).contiguous().flatten(-2) # 重新拷贝一份x,之后推平-2:-1之间的维度 # (b,125,512)
# print('x ',x.shape)
# assert 1==2
embedding = embedding.unsqueeze(1)
embedding = embedding.repeat(1, x.shape[1], 1)
x = torch.cat((x, embedding), dim=2) # [B, T, 128 + emb_dim]
if not hasattr(self, '_flattened'):
self.gru.flatten_parameters()
x, _ = self.gru(x) # x torch.Size([16, 125, 256])
# x = self.gru(x) # x torch.Size([16, 125, 256])
x = self.fc(x)
decision_time = torch.softmax(self.outputlayer(x),dim=2) # x torch.Size([16, 125, 2])
decision_up = torch.nn.functional.interpolate(
decision_time.transpose(1, 2), # [16, 2, 125]
time, # 501
mode='linear',
align_corners=False).transpose(1, 2) # 从125插值回 501 ?--> (16,501,2)
return decision_time[:,:,0],decision_up
class CDur_CNN14(nn.Module):
def __init__(self, inputdim, outputdim,time_resolution,**kwargs):
super().__init__()
if time_resolution==125:
self.features = Cnn10(8)
elif time_resolution == 250:
#print('time_resolution ',time_resolution)
self.features = Cnn10(4)
elif time_resolution == 500:
self.features = Cnn10(2)
else:
self.features = Cnn10(0)
with torch.no_grad():
rnn_input_dim = self.features(torch.randn(1, 1, 500,inputdim)).shape
rnn_input_dim = rnn_input_dim[1] * rnn_input_dim[-1]
# self.features = Cnn10()
self.gru = nn.GRU(640, 512, bidirectional=True, batch_first=True)
# self.gru = LSTMModel(640, 512,1)
self.fc = nn.Linear(1024,256)
self.outputlayer = nn.Linear(256, outputdim)
# self.features.apply(init_weights)
self.outputlayer.apply(init_weights)
def forward(self, x, embedding,one_hot=None):
batch, time, dim = x.shape
x = x.unsqueeze(1) # (b,1,t,d)
x = self.features(x) #
x = x.transpose(1, 2).contiguous().flatten(-2) # 重新拷贝一份x,之后推平-2:-1之间的维度 # (b,125,512)
# print('x ',x.shape)
# assert 1==2
embedding = embedding.unsqueeze(1)
embedding = embedding.repeat(1, x.shape[1], 1)
x = torch.cat((x, embedding), dim=2) # [B, T, 128 + emb_dim]
if not hasattr(self, '_flattened'):
self.gru.flatten_parameters()
x, _ = self.gru(x) # x torch.Size([16, 125, 256])
# x = self.gru(x) # x torch.Size([16, 125, 256])
x = self.fc(x)
decision_time = torch.softmax(self.outputlayer(x),dim=2) # x torch.Size([16, 125, 2])
decision_up = torch.nn.functional.interpolate(
decision_time.transpose(1, 2), # [16, 2, 125]
time, # 501
mode='linear',
align_corners=False).transpose(1, 2) # 从125插值回 501 ?--> (16,501,2)
return decision_time[:,:,0],decision_up
class CDur_CNN_mul_scale(nn.Module):
def __init__(self, inputdim, outputdim,time_resolution,**kwargs):
super().__init__()
if time_resolution==125:
self.features = Cnn10_mul_scale(8)
elif time_resolution == 250:
#print('time_resolution ',time_resolution)
self.features = Cnn10_mul_scale(4)
elif time_resolution == 500:
self.features = Cnn10_mul_scale(2)
else:
self.features = Cnn10_mul_scale(0)
# with torch.no_grad():
# rnn_input_dim = self.features(torch.randn(1, 1, 500,inputdim)).shape
# rnn_input_dim = rnn_input_dim[1] * rnn_input_dim[-1]
# self.features = Cnn10()
self.gru = nn.GRU(640, 512, bidirectional=True, batch_first=True)
# self.gru = LSTMModel(640, 512,1)
self.fc = nn.Linear(1024,256)
self.outputlayer = nn.Linear(256, outputdim)
# self.features.apply(init_weights)
self.outputlayer.apply(init_weights)
def forward(self, x, embedding,one_hot=None):
# print('x ',x.shape)
# assert 1==2
batch, time, dim = x.shape
x = x.unsqueeze(1) # (b,1,t,d)
x = self.features(x) #
x = x.transpose(1, 2).contiguous().flatten(-2) # 重新拷贝一份x,之后推平-2:-1之间的维度 # (b,125,512)
# print('x ',x.shape)
# assert 1==2
embedding = embedding.unsqueeze(1)
embedding = embedding.repeat(1, x.shape[1], 1)
x = torch.cat((x, embedding), dim=2) # [B, T, 128 + emb_dim]
if not hasattr(self, '_flattened'):
self.gru.flatten_parameters()
x, _ = self.gru(x) # x torch.Size([16, 125, 256])
# x = self.gru(x) # x torch.Size([16, 125, 256])
x = self.fc(x)
decision_time = torch.softmax(self.outputlayer(x),dim=2) # x torch.Size([16, 125, 2])
decision_up = torch.nn.functional.interpolate(
decision_time.transpose(1, 2), # [16, 2, 125]
time, # 501
mode='linear',
align_corners=False).transpose(1, 2) # 从125插值回 501 ?--> (16,501,2)
return decision_time[:,:,0],decision_up
class CDur_CNN_mul_scale_fusion(nn.Module):
def __init__(self, inputdim, outputdim, time_resolution,**kwargs):
super().__init__()
if time_resolution==125:
self.features = Cnn10_mul_scale(8)
elif time_resolution == 250:
#print('time_resolution ',time_resolution)
self.features = Cnn10_mul_scale(4)
elif time_resolution == 500:
self.features = Cnn10_mul_scale(2)
else:
self.features = Cnn10_mul_scale(0)
# with torch.no_grad():
# rnn_input_dim = self.features(torch.randn(1, 1, 500,inputdim)).shape
# rnn_input_dim = rnn_input_dim[1] * rnn_input_dim[-1]
# self.features = Cnn10()
self.gru = nn.GRU(512, 512, bidirectional=True, batch_first=True)
# self.gru = LSTMModel(640, 512,1)
self.fc = nn.Linear(1024,256)
self.fusion = Fusion(128,512,2)
self.outputlayer = nn.Linear(256, outputdim)
# self.features.apply(init_weights)
self.outputlayer.apply(init_weights)
def forward(self, x, embedding,one_hot=None):
# print('x ',x.shape)
# assert 1==2
batch, time, dim = x.shape
x = x.unsqueeze(1) # (b,1,t,d)
x = self.features(x) #
x = x.transpose(1, 2).contiguous().flatten(-2) # 重新拷贝一份x,之后推平-2:-1之间的维度 # (b,125,512)
# print('x ',x.shape)
# assert 1==2
embedding = embedding.unsqueeze(1)
embedding = embedding.repeat(1, x.shape[1], 1)
x = self.fusion(embedding, x)
#x = torch.cat((x, embedding), dim=2) # [B, T, 128 + emb_dim]
if not hasattr(self, '_flattened'):
self.gru.flatten_parameters()
x, _ = self.gru(x) # x torch.Size([16, 125, 256])
# x = self.gru(x) # x torch.Size([16, 125, 256])
x = self.fc(x)
decision_time = torch.softmax(self.outputlayer(x),dim=2) # x torch.Size([16, 125, 2])
decision_up = torch.nn.functional.interpolate(
decision_time.transpose(1, 2), # [16, 2, 125]
time, # 501
mode='linear',
align_corners=False).transpose(1, 2) # 从125插值回 501 ?--> (16,501,2)
return decision_time[:,:,0],decision_up
class RaDur_fusion(nn.Module):
def __init__(self, model_config, inputdim, outputdim, time_resolution, **kwargs):
super().__init__()
self.encoder = Cnn14()
self.detection = CDur_CNN_mul_scale_fusion(inputdim, outputdim, time_resolution)
self.softmax = nn.Softmax(dim=2)
#self.temperature = 5
# if model_config['pre_train']:
# self.encoder.load_state_dict(torch.load(model_config['encoder_path'])['model'])
# self.detection.load_state_dict(torch.load(model_config['CDur_path']))
self.q = nn.Linear(128,128)
self.k = nn.Linear(128,128)
self.q_ee = nn.Linear(128, 128)
self.k_ee = nn.Linear(128, 128)
self.temperature = 11.3 # sqrt(128)
self.att_pool = model_config['att_pool']
self.enhancement = model_config['enhancement']
self.tao = model_config['tao']
self.top = model_config['top']
self.bn = nn.BatchNorm1d(128)
self.EE_fusion = Fusion(128, 128, 4)
def get_w(self,q,k):
q = self.q(q)
k = self.k(k)
q = q.unsqueeze(1)
attn = torch.bmm(q, k.transpose(1, 2))
attn = attn/self.temperature
attn = self.softmax(attn)
return attn
def get_w_ee(self,q,k):
q = self.q_ee(q)
k = self.k_ee(k)
q = q.unsqueeze(1)
attn = torch.bmm(q, k.transpose(1, 2))
attn = attn/self.temperature
attn = self.softmax(attn)
return attn
def attention_pooling(self, embeddings, mean_embedding):
att_pool_w = self.get_w(mean_embedding,embeddings)
embedding = torch.bmm(att_pool_w, embeddings).squeeze(1)
# print(embedding.shape)
# print(att_pool_w.shape)
# print(att_pool_w[0])
# assert 1==2
return embedding
def select_topk_embeddings(self, scores, embeddings, k):
_, idx_DESC = scores.sort(descending=True, dim=1) # 根据分数进行排序
top_k = _[:,:k]
# print('top_k ', top_k)
# top_k = top_k.mean(1)
idx_topk = idx_DESC[:, :k] # 取top_k个
# print('index ', idx_topk)
idx_topk = idx_topk.unsqueeze(2).expand([-1, -1, embeddings.shape[2]])
selected_embeddings = torch.gather(embeddings, 1, idx_topk)
return selected_embeddings,top_k
def sum_with_attention(self, embedding, top_k, selected_embeddings):
# print('embedding ',embedding)
# print('selected_embeddings ',selected_embeddings.shape)
att_1 = self.get_w_ee(embedding, selected_embeddings)
att_1 = att_1.squeeze(1)
#print('att_1 ',att_1.shape)
larger = top_k > self.tao
# print('larger ',larger)
top_k = top_k*larger
# print('top_k ',top_k.shape)
# print('top_k ',top_k)
att_1 = att_1*top_k
#print('att_1 ',att_1.shape)
# assert 1==2
att_2 = att_1.unsqueeze(2).repeat(1,1,128)
Es = selected_embeddings*att_2
return Es
def orcal_EE(self, x, embedding, label):
batch, time, dim = x.shape
mixture_embedding = self.encoder(x) # 8, 125, 128
mixture_embedding = mixture_embedding.transpose(1,2)
mixture_embedding = self.bn(mixture_embedding)
mixture_embedding = mixture_embedding.transpose(1,2)
x = x.unsqueeze(1) # (b,1,t,d)
x = self.detection.features(x) #
x = x.transpose(1, 2).contiguous().flatten(-2) # 重新拷贝一份x,之后推平-2:-1之间的维度 # (b,125,128)
embedding_pre = embedding.unsqueeze(1)
embedding_pre = embedding_pre.repeat(1, x.shape[1], 1)
f = self.detection.fusion(embedding_pre, x) # the first stage results
#f = torch.cat((x, embedding_pre), dim=2) # [B, T, 128 + emb_dim]
if not hasattr(self, '_flattened'):
self.detection.gru.flatten_parameters()
f, _ = self.detection.gru(f) # x torch.Size([16, 125, 256])
f = self.detection.fc(f)
decision_time = torch.softmax(self.detection.outputlayer(f),dim=2) # x torch.Size([16, 125, 2])
selected_embeddings, top_k = self.select_topk_embeddings(decision_time[:,:,0], mixture_embedding, self.top)
selected_embeddings = self.sum_with_attention(embedding, top_k, selected_embeddings) # add the weight
mix_embedding = selected_embeddings.mean(1).unsqueeze(1) #
mix_embedding = mix_embedding.repeat(1, x.shape[1], 1)
embedding = embedding.unsqueeze(1)
embedding = embedding.repeat(1, x.shape[1], 1)
mix_embedding = self.EE_fusion(mix_embedding, embedding) # 使用神经网络进行融合
# mix_embedding2 = selected_embeddings2.mean(1)
#mix_embedding = embedding + mix_embedding # 直接相加
# new detection results
# embedding_now = mix_embedding.unsqueeze(1)
# embedding_now = embedding_now.repeat(1, x.shape[1], 1)
f_now = self.detection.fusion(mix_embedding, x)
#f_now = torch.cat((x, embedding_now), dim=2) #
f_now, _ = self.detection.gru(f_now) # x torch.Size([16, 125, 256])
f_now = self.detection.fc(f_now)
decision_time_now = torch.softmax(self.detection.outputlayer(f_now), dim=2) # x torch.Size([16, 125, 2])
top_k = top_k.mean(1) # get avg score,higher score will have more weight
larger = top_k > self.tao
top_k = top_k * larger
top_k = top_k/2.0
# print('top_k ',top_k)
# assert 1==2
# print('tok_k[ ',top_k.shape)
# print('decision_time ',decision_time.shape)
# print('decision_time_now ',decision_time_now.shape)
neg_w = top_k.unsqueeze(1).unsqueeze(2)
neg_w = neg_w.repeat(1, decision_time_now.shape[1], decision_time_now.shape[2])
# print('neg_w ',neg_w.shape)
#print('neg_w ',neg_w[:,0:10,0])
pos_w = 1-neg_w
#print('pos_w ',pos_w[:,0:10,0])
decision_time_final = decision_time*pos_w + neg_w*decision_time_now
#print('decision_time_final ',decision_time_final[0,0:10,0])
# print(decision_time_final[0,:,:])
#assert 1==2
return decision_time_final
def forward(self, x, ref, label=None):
batch, time, dim = x.shape
logit = torch.zeros(1).cuda()
embeddings = self.encoder(ref)
mean_embedding = embeddings.mean(1)
if self.att_pool == True:
mean_embedding = self.bn(mean_embedding)
embeddings = embeddings.transpose(1,2)
embeddings = self.bn(embeddings)
embeddings = embeddings.transpose(1,2)
embedding = self.attention_pooling(embeddings, mean_embedding)
else:
embedding = mean_embedding
if self.enhancement == True:
decision_time = self.orcal_EE(x, embedding, label)
decision_up = torch.nn.functional.interpolate(
decision_time.transpose(1, 2), # [16, 2, 125]
time, # 501
mode='linear',
align_corners=False).transpose(1, 2) # 从125插值回 501 ?--> (16,501,2)
return decision_time[:,:,0], decision_up, logit
x = x.unsqueeze(1) # (b,1,t,d)
x = self.detection.features(x) #
x = x.transpose(1, 2).contiguous().flatten(-2) # 重新拷贝一份x,之后推平-2:-1之间的维度 # (b,125,128)
embedding = embedding.unsqueeze(1)
embedding = embedding.repeat(1, x.shape[1], 1)
# x = torch.cat((x, embedding), dim=2) # [B, T, 128 + emb_dim]
x = self.detection.fusion(embedding, x)
# embedding = embedding.unsqueeze(1)
# embedding = embedding.repeat(1, x.shape[1], 1)
# x = torch.cat((x, embedding), dim=2) # [B, T, 128 + emb_dim]
if not hasattr(self, '_flattened'):
self.detection.gru.flatten_parameters()
x, _ = self.detection.gru(x) # x torch.Size([16, 125, 256])
x = self.detection.fc(x)
decision_time = torch.softmax(self.detection.outputlayer(x),dim=2) # x torch.Size([16, 125, 2])
decision_up = torch.nn.functional.interpolate(
decision_time.transpose(1, 2),
time, # 501
mode='linear',
align_corners=False).transpose(1, 2) # 从125插值回 501 ?--> (16,501,2)
return decision_time[:,:,0], decision_up, logit
| EXA-1-master | exa/models/AudioGPT/audio_detection/target_sound_detection/src/models.py |
# !/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2021/3/9 16:33
# @Author : dongchao yang
# @File : train.py
import collections
import sys
from loguru import logger
from pprint import pformat
import numpy as np
import pandas as pd
import scipy
import six
import sklearn.preprocessing as pre
import torch
import tqdm
import yaml
from scipy.interpolate import interp1d
def parse_config_or_kwargs(config_file, **kwargs):
"""parse_config_or_kwargs
:param config_file: Config file that has parameters, yaml format
:param **kwargs: Other alternative parameters or overwrites for config
"""
with open(config_file) as con_read:
yaml_config = yaml.load(con_read, Loader=yaml.FullLoader)
arguments = dict(yaml_config, **kwargs)
return arguments
def find_contiguous_regions(activity_array): # in this part, if you cannot understand the binary operation, I think you can write a O(n) complexity method
"""Find contiguous regions from bool valued numpy.array.
Copy of https://dcase-repo.github.io/dcase_util/_modules/dcase_util/data/decisions.html#DecisionEncoder
Reason is:
1. This does not belong to a class necessarily
2. Import DecisionEncoder requires sndfile over some other imports..which causes some problems on clusters
"""
change_indices = np.logical_xor(activity_array[1:], activity_array[:-1]).nonzero()[0]
change_indices += 1
if activity_array[0]:
# If the first element of activity_array is True add 0 at the beginning
change_indices = np.r_[0, change_indices]
if activity_array[-1]:
# If the last element of activity_array is True, add the length of the array
change_indices = np.r_[change_indices, activity_array.size]
# print(change_indices.reshape((-1, 2)))
# Reshape the result into two columns
return change_indices.reshape((-1, 2))
def split_train_cv(
data_frame: pd.DataFrame,
frac: float = 0.9,
y=None, # Only for stratified, computes necessary split
**kwargs):
"""split_train_cv
:param data_frame:
:type data_frame: pd.DataFrame
:param frac:
:type frac: float
"""
if kwargs.get('mode',
None) == 'urbansed': # Filenames are DATA_-1 DATA_-2 etc
data_frame.loc[:, 'id'] = data_frame.groupby(
data_frame['filename'].str.split('_').apply(
lambda x: '_'.join(x[:-1]))).ngroup()
sampler = np.random.permutation(data_frame['id'].nunique())
num_train = int(frac * len(sampler))
train_indexes = sampler[:num_train]
cv_indexes = sampler[num_train:]
train_data = data_frame[data_frame['id'].isin(train_indexes)]
cv_data = data_frame[data_frame['id'].isin(cv_indexes)]
del train_data['id']
del cv_data['id']
elif kwargs.get('mode', None) == 'stratified': # stratified --> 分层的 ?
# Use statified sampling
from skmultilearn.model_selection import iterative_train_test_split
index_train, _, index_cv, _ = iterative_train_test_split(
data_frame.index.values.reshape(-1, 1), y, test_size=1. - frac)
train_data = data_frame[data_frame.index.isin(index_train.squeeze())]
cv_data = data_frame[data_frame.index.isin(index_cv.squeeze())] # cv --> cross validation
else:
# Simply split train_test
train_data = data_frame.sample(frac=frac, random_state=10)
cv_data = data_frame[~data_frame.index.isin(train_data.index)]
return train_data, cv_data
def pprint_dict(in_dict, outputfun=sys.stdout.write, formatter='yaml'): # print yaml file
"""pprint_dict
:param outputfun: function to use, defaults to sys.stdout
:param in_dict: dict to print
"""
if formatter == 'yaml':
format_fun = yaml.dump
elif formatter == 'pretty':
format_fun = pformat
for line in format_fun(in_dict).split('\n'):
outputfun(line)
def getfile_outlogger(outputfile):
log_format = "[<green>{time:YYYY-MM-DD HH:mm:ss}</green>] {message}"
logger.configure(handlers=[{"sink": sys.stderr, "format": log_format}])
if outputfile:
logger.add(outputfile, enqueue=True, format=log_format)
return logger
# according label, get encoder
def train_labelencoder(labels: pd.Series, sparse=True):
"""encode_labels
Encodes labels
:param labels: pd.Series representing the raw labels e.g., Speech, Water
:param encoder (optional): Encoder already fitted
returns encoded labels (many hot) and the encoder
"""
assert isinstance(labels, pd.Series), "Labels need to be series"
if isinstance(labels[0], six.string_types):
# In case of using non processed strings, e.g., Vaccum, Speech
label_array = labels.str.split(',').values.tolist() # split label according to ','
elif isinstance(labels[0], np.ndarray):
# Encoder does not like to see numpy array
label_array = [lab.tolist() for lab in labels]
elif isinstance(labels[0], collections.Iterable):
label_array = labels
encoder = pre.MultiLabelBinarizer(sparse_output=sparse)
encoder.fit(label_array)
return encoder
def encode_labels(labels: pd.Series, encoder=None, sparse=True):
"""encode_labels
Encodes labels
:param labels: pd.Series representing the raw labels e.g., Speech, Water
:param encoder (optional): Encoder already fitted
returns encoded labels (many hot) and the encoder
"""
assert isinstance(labels, pd.Series), "Labels need to be series"
instance = labels.iloc[0]
if isinstance(instance, six.string_types):
# In case of using non processed strings, e.g., Vaccum, Speech
label_array = labels.str.split(',').values.tolist()
elif isinstance(instance, np.ndarray):
# Encoder does not like to see numpy array
label_array = [lab.tolist() for lab in labels]
elif isinstance(instance, collections.Iterable):
label_array = labels
# get label_array, it is a list ,contain a lot of label, this label are string type
if not encoder:
encoder = pre.MultiLabelBinarizer(sparse_output=sparse) # if we encoder is None, we should init a encoder firstly.
encoder.fit(label_array)
labels_encoded = encoder.transform(label_array) # transform string to digit
return labels_encoded, encoder
# return pd.arrays.SparseArray(
# [row.toarray().ravel() for row in labels_encoded]), encoder
def decode_with_timestamps(events,labels: np.array):
"""decode_with_timestamps
Decodes the predicted label array (2d) into a list of
[(Labelname, onset, offset), ...]
:param encoder: Encoder during training
:type encoder: pre.MultiLabelBinarizer
:param labels: n-dim array
:type labels: np.array
"""
# print('events ',events)
# print('labels ',labels.shape)
#assert 1==2
if labels.ndim == 2:
#print('...')
return [_decode_with_timestamps(events[i],labels[i]) for i in range(labels.shape[0])]
else:
return _decode_with_timestamps(events,labels)
def median_filter(x, window_size, threshold=0.5):
"""median_filter
:param x: input prediction array of shape (B, T, C) or (B, T).
Input is a sequence of probabilities 0 <= x <= 1
:param window_size: An integer to use
:param threshold: Binary thresholding threshold
"""
x = binarize(x, threshold=threshold) # transfer to 0 or 1
if x.ndim == 3:
size = (1, window_size, 1)
elif x.ndim == 2 and x.shape[0] == 1:
# Assume input is class-specific median filtering
# E.g, Batch x Time [1, 501]
size = (1, window_size)
elif x.ndim == 2 and x.shape[0] > 1:
# Assume input is standard median pooling, class-independent
# E.g., Time x Class [501, 10]
size = (window_size, 1)
return scipy.ndimage.median_filter(x, size=size)
def _decode_with_timestamps(events,labels):
result_labels = []
# print('.......')
# print('labels ',labels.shape)
# print(labels)
change_indices = find_contiguous_regions(labels)
# print(change_indices)
# assert 1==2
for row in change_indices:
result_labels.append((events,row[0], row[1]))
return result_labels
def inverse_transform_labels(encoder, pred):
if pred.ndim == 3:
return [encoder.inverse_transform(x) for x in pred]
else:
return encoder.inverse_transform(pred)
def binarize(pred, threshold=0.5):
# Batch_wise
if pred.ndim == 3:
return np.array(
[pre.binarize(sub, threshold=threshold) for sub in pred])
else:
return pre.binarize(pred, threshold=threshold)
def double_threshold(x, high_thres, low_thres, n_connect=1):
"""double_threshold
Helper function to calculate double threshold for n-dim arrays
:param x: input array
:param high_thres: high threshold value
:param low_thres: Low threshold value
:param n_connect: Distance of <= n clusters will be merged
"""
assert x.ndim <= 3, "Whoops something went wrong with the input ({}), check if its <= 3 dims".format(
x.shape)
if x.ndim == 3:
apply_dim = 1
elif x.ndim < 3:
apply_dim = 0
# x is assumed to be 3d: (batch, time, dim)
# Assumed to be 2d : (time, dim)
# Assumed to be 1d : (time)
# time axis is therefore at 1 for 3d and 0 for 2d (
return np.apply_along_axis(lambda x: _double_threshold(
x, high_thres, low_thres, n_connect=n_connect),
axis=apply_dim,
arr=x)
def _double_threshold(x, high_thres, low_thres, n_connect=1, return_arr=True): # in nature, double_threshold considers boundary question
"""_double_threshold
Computes a double threshold over the input array
:param x: input array, needs to be 1d
:param high_thres: High threshold over the array
:param low_thres: Low threshold over the array
:param n_connect: Postprocessing, maximal distance between clusters to connect
:param return_arr: By default this function returns the filtered indiced, but if return_arr = True it returns an array of tsame size as x filled with ones and zeros.
"""
assert x.ndim == 1, "Input needs to be 1d"
high_locations = np.where(x > high_thres)[0] # return the index, where value is greater than high_thres
locations = x > low_thres # return true of false
encoded_pairs = find_contiguous_regions(locations)
# print('encoded_pairs ',encoded_pairs)
filtered_list = list(
filter(
lambda pair:
((pair[0] <= high_locations) & (high_locations <= pair[1])).any(),
encoded_pairs)) # find encoded_pair where inclide a high_lacations
#print('filtered_list ',filtered_list)
filtered_list = connect_(filtered_list, n_connect) # if the distance of two pair is less than n_connect, we can merge them
if return_arr:
zero_one_arr = np.zeros_like(x, dtype=int)
for sl in filtered_list:
zero_one_arr[sl[0]:sl[1]] = 1
return zero_one_arr
return filtered_list
def connect_clusters(x, n=1):
if x.ndim == 1:
return connect_clusters_(x, n)
if x.ndim >= 2:
return np.apply_along_axis(lambda a: connect_clusters_(a, n=n), -2, x)
def connect_clusters_(x, n=1):
"""connect_clusters_
Connects clustered predictions (0,1) in x with range n
:param x: Input array. zero-one format
:param n: Number of frames to skip until connection can be made
"""
assert x.ndim == 1, "input needs to be 1d"
reg = find_contiguous_regions(x)
start_end = connect_(reg, n=n)
zero_one_arr = np.zeros_like(x, dtype=int)
for sl in start_end:
zero_one_arr[sl[0]:sl[1]] = 1
return zero_one_arr
def connect_(pairs, n=1):
"""connect_
Connects two adjacent clusters if their distance is <= n
:param pairs: Clusters of iterateables e.g., [(1,5),(7,10)]
:param n: distance between two clusters
"""
if len(pairs) == 0:
return []
start_, end_ = pairs[0]
new_pairs = []
for i, (next_item, cur_item) in enumerate(zip(pairs[1:], pairs[0:])):
end_ = next_item[1]
if next_item[0] - cur_item[1] <= n:
pass
else:
new_pairs.append((start_, cur_item[1]))
start_ = next_item[0]
new_pairs.append((start_, end_))
return new_pairs
def predictions_to_time(df, ratio):
df.onset = df.onset * ratio
df.offset = df.offset * ratio
return df
def upgrade_resolution(arr, scale):
print('arr ',arr.shape)
x = np.arange(0, arr.shape[0])
f = interp1d(x, arr, kind='linear', axis=0, fill_value='extrapolate')
scale_x = np.arange(0, arr.shape[0], 1 / scale)
up_scale = f(scale_x)
return up_scale
# a = [0.1,0.2,0.3,0.8,0.4,0.1,0.3,0.9,0.4]
# a = np.array(a)
# b = a>0.2
# _double_threshold(a,0.7,0.2) | EXA-1-master | exa/models/AudioGPT/audio_detection/target_sound_detection/src/utils.py |
from setuptools import setup, find_packages
setup(
name = 'recurrent-memory-transformer-pytorch',
packages = find_packages(exclude=[]),
version = '0.2.2',
license='MIT',
description = 'Recurrent Memory Transformer - Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/recurrent-memory-transformer-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'attention mechanism',
'recurrence',
'memory',
'long-context'
],
install_requires=[
'einops>=0.6.1',
'torch>=1.6',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| EXA-1-master | exa/models/recurrent-memory-transformer-pytorch/setup.py |
import gzip
import random
import tqdm
import numpy as np
import torch
from torch.optim import Adam
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
from recurrent_memory_transformer_pytorch import RecurrentMemoryTransformer, RecurrentMemoryTransformerWrapper
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 1e-4
VALIDATE_EVERY = 100
PRIME_LENGTH = 128
GENERATE_EVERY = 250
GENERATE_LENGTH = 2048
SEQ_LEN = 2048
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return "".join(list(map(decode_token, tokens)))
# instantiate palm
model = RecurrentMemoryTransformer(
num_tokens = 256,
dim = 512,
depth = 6,
dim_head = 64,
heads = 8,
seq_len = 512,
use_flash_attn = True,
num_memory_tokens = 128,
use_xl_memories = True,
xl_mem_len = 256
)
model = RecurrentMemoryTransformerWrapper(model)
model.cuda()
# prepare enwik8 data
with gzip.open("./data/enwik8.gz") as file:
data = np.frombuffer(file.read(int(95e6)), dtype=np.uint8).copy()
np_train, np_valid = np.split(data, [int(90e6)])
data_train, data_val = torch.from_numpy(np_train), torch.from_numpy(np_valid)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len, (1,))
full_seq = self.data[rand_start : rand_start + self.seq_len + 1].long()
return full_seq.cuda()
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size = BATCH_SIZE))
val_loader = cycle(DataLoader(val_dataset, batch_size = BATCH_SIZE))
# optimizer
optim = Adam(model.parameters(), lr = LEARNING_RATE)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10.0, desc="training"):
model.train()
total_loss = 0.
for _ in range(GRADIENT_ACCUMULATE_EVERY):
loss = model(
next(train_loader),
memory_replay_backprop = True,
mrbp_loss_weight = 1. / GRADIENT_ACCUMULATE_EVERY
)
total_loss += loss
print(f"training loss: {total_loss.item()}")
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optim.step()
optim.zero_grad()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
loss, _ = model(next(val_loader), return_loss = True)
print(f"validation loss: {loss.item()}")
if i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:PRIME_LENGTH]
prime = decode_tokens(inp)
print(f"%s \n\n %s", (prime, "*" * 100))
sample = model.generate(inp[None, :], length = GENERATE_LENGTH)
output_str = decode_tokens(sample[0])
print(output_str, "\n")
| EXA-1-master | exa/models/recurrent-memory-transformer-pytorch/train.py |
from recurrent_memory_transformer_pytorch.recurrent_memory_transformer import RecurrentMemoryTransformer, RecurrentMemoryTransformerWrapper
| EXA-1-master | exa/models/recurrent-memory-transformer-pytorch/recurrent_memory_transformer_pytorch/__init__.py |
from collections import namedtuple
from functools import wraps
from packaging import version
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange
# constants
Config = namedtuple('EfficientAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient'])
# helpers
def exists(val):
return val is not None
def once(fn):
called = False
@wraps(fn)
def inner(x):
nonlocal called
if called:
return
called = True
return fn(x)
return inner
print_once = once(print)
# main class
class Attend(nn.Module):
def __init__(
self,
dropout = 0.,
causal = False,
use_flash = False
):
super().__init__()
self.dropout = dropout
self.attn_dropout = nn.Dropout(dropout)
self.causal = causal
self.register_buffer("mask", None, persistent=False)
self.use_flash = use_flash
assert not (use_flash and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above'
# determine efficient attention configs for cuda and cpu
self.cpu_config = Config(True, True, True)
self.cuda_config = None
if not torch.cuda.is_available() or not use_flash:
return
device_properties = torch.cuda.get_device_properties(torch.device('cuda'))
if device_properties.major == 8 and device_properties.minor == 0:
print_once('A100 GPU detected, using flash attention if input tensor is on cuda')
self.cuda_config = Config(True, False, False)
else:
print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda')
self.cuda_config = Config(False, True, True)
def get_mask(self, n, device):
if exists(self.mask) and self.mask.shape[-1] >= n:
return self.mask[:n, :n]
mask = torch.ones((n, n), device=device, dtype=torch.bool).triu(1)
self.register_buffer("mask", mask, persistent=False)
return mask
def flash_attn(self, q, k, v, mask = None):
_, heads, q_len, _, k_len, is_cuda = *q.shape, k.shape[-2], q.is_cuda
# Check if mask exists and expand to compatible shape
# The mask is B L, so it would have to be expanded to B H N L
if exists(mask) and mask.ndim != 4:
mask = rearrange(mask, 'b j -> b 1 1 j')
mask = mask.expand(-1, heads, q_len, -1)
# Check if there is a compatible device for flash attention
config = self.cuda_config if is_cuda else self.cpu_config
# pytorch 2.0 flash attn: q, k, v, mask, dropout, causal, softmax_scale
with torch.backends.cuda.sdp_kernel(**config._asdict()):
out = F.scaled_dot_product_attention(
q, k, v,
attn_mask = mask,
dropout_p = self.dropout if self.training else 0.,
is_causal = self.causal
)
return out
def forward(self, q, k, v, mask = None):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
n, device = q.shape[-2], q.device
scale = q.shape[-1] ** -0.5
if self.use_flash:
return self.flash_attn(q, k, v, mask = mask)
# similarity
sim = einsum("b h i d, b h j d -> b h i j", q, k) * scale
# key padding mask
if exists(mask):
mask = rearrange(mask, 'b j -> b 1 1 j')
sim = sim.masked_fill(~mask, -torch.finfo(sim.dtype).max)
# causal mask
if self.causal:
causal_mask = self.get_mask(n, device)
sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max)
# attention
attn = sim.softmax(dim=-1)
attn = self.attn_dropout(attn)
# aggregate values
out = einsum("b h i j, b h j d -> b h i d", attn, v)
return out
| EXA-1-master | exa/models/recurrent-memory-transformer-pytorch/recurrent_memory_transformer_pytorch/attend.py |
import math
from functools import partial
from itertools import zip_longest
from contextlib import nullcontext
from typing import Optional, List
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, repeat, pack, unpack
from recurrent_memory_transformer_pytorch.attend import Attend
# constants
Linear = partial(nn.Linear, bias = False)
# helpers
def exists(val):
return val is not None
def identity(t, *args, **kwargs):
return t
def default(*vals):
for val in vals:
if exists(val):
return val
return None
def eval_decorator(fn):
def inner(self, *args, **kwargs):
was_training = self.training
self.eval()
out = fn(self, *args, **kwargs)
self.train(was_training)
return out
return inner
def divisible_by(numer, denom):
return (numer % denom) == 0
# sampling helpers
def log(t, eps = 1e-20):
return torch.log(t.clamp(min = eps))
def gumbel_noise(t):
noise = torch.zeros_like(t).uniform_(0, 1)
return -log(-log(noise))
def gumbel_sample(t, temperature = 1., dim = -1):
return ((t / max(temperature, 1e-10)) + gumbel_noise(t)).argmax(dim = dim)
def top_k(logits, thres = 0.9):
k = math.ceil((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
#========================================================= multiway architecture
def multiwayWrapper(args, module, dim=1):
if args.multiway:
return MultiWayNetwork(Module, dim=dim)
return module
def set_split_position(position):
def apply_fn(module):
if hasattr(module, "split_position"):
module.split_position = position
return apply_fn
#========================================================= multiway architecture
def token_shift_fn(t, ps):
read_mem, t, write_mem = unpack(t, ps, 'b * d')
t, t_shift = t.chunk(2, dim = -1)
t_shift = F.pad(t_shift, (0, 0, 1, -1), value = 0.)
t = torch.cat((t, t_shift), dim = -1)
return torch.cat((read_mem, t, write_mem), dim = -2)
def frac_gradient(t, frac = 1.):
if frac == 1.:
return t
return t * frac + t.detach() * (1. - frac)
# rotary embedding
class RotaryEmbedding(nn.Module):
def __init__(self, dim, theta = 32768):
super().__init__()
inv_freq = 1. / (theta ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer('inv_freq', inv_freq)
def forward(self, positions):
freqs = torch.einsum('i , j -> i j', positions, self.inv_freq)
freqs = torch.cat((freqs, freqs), dim = -1)
return freqs
def rotate_half(x):
x1, x2 = x.chunk(2, dim=-1)
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(pos, t):
return (t * pos.cos()) + (rotate_half(t) * pos.sin())
# norms
class RMSNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.scale = dim ** 0.5
self.gamma = nn.Parameter(torch.ones(dim))
def forward(self, x):
return F.normalize(x, dim = -1) * self.scale * self.gamma
# feedforward
class GEGLU(nn.Module):
def forward(self, x):
x, gate = x.chunk(2, dim = -1)
return x * F.gelu(gate)
def FeedForward(dim, mult = 4):
dim_inner = int(dim * mult * 2 / 3)
return nn.Sequential(
RMSNorm(dim),
Linear(dim, dim_inner * 2, bias = False),
GEGLU(),
RMSNorm(dim_inner),
Linear(dim_inner, dim, bias = False)
)
# attention
class Attention(nn.Module):
def __init__(
self,
*,
dim,
causal = False,
dim_head = 64,
heads = 8,
dropout = 0.,
use_flash_attn = False,
use_custom_causal_attn_mask = False
):
super().__init__()
dim_inner = dim_head * heads
self.heads = heads
self.attend = Attend(
causal = causal and not use_custom_causal_attn_mask,
dropout = dropout,
use_flash = use_flash_attn
)
self.norm = RMSNorm(dim)
self.to_q = Linear(dim, dim_inner)
self.to_kv = Linear(dim, dim_inner * 2)
self.to_out = Linear(dim_inner, dim)
def forward(
self,
x,
rotary_emb = None,
mask = None,
xl_memories = None
):
h = self.heads
x = self.norm(x)
q = self.to_q(x)
k, v = self.to_kv(x).chunk(2, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v))
next_xl_memories = torch.stack((k, v))
if exists(xl_memories):
kx, vx = xl_memories
k = torch.cat((kx, k), dim = -2)
v = torch.cat((vx, v), dim = -2)
mask = F.pad(mask, (xl_memories.shape[-2], 0), value = True)
if exists(rotary_emb):
q = apply_rotary_pos_emb(rotary_emb, q)
k = apply_rotary_pos_emb(rotary_emb, k)
out = self.attend(q, k, v, mask = mask)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out), next_xl_memories
# transformer
class RecurrentMemoryTransformer(nn.Module):
def __init__(
self,
dim,
*,
num_tokens,
depth,
num_memory_tokens,
seq_len,
causal = True,
dim_head = 64,
heads = 8,
ff_mult = 4,
use_flash_attn = False,
ignore_index = -1,
abs_pos_emb = True,
rotary_pos_emb = False,
token_shift = True,
use_xl_memories = False,
xl_mem_len = None,
enhanced_xl_recurrence = False, # add simple method for enhancing receptive field of xl memories, from ernie-doc paper
emb_gradient_frac = 0.1, # trick from cogview paper that leads to a bit more stability
memory_not_causal = True,
args,
# flash attention behaves a bit more optimally if causal mask is not explicitly passed in - but if the memories perform better without a causal mask, it is necessary to have this turned on
):
super().__init__()
self.causal = causal
self.seq_len = seq_len
self.emb_gradient_frac = emb_gradient_frac
assert num_memory_tokens > 0
self.token_emb = nn.Embedding(num_tokens, dim)
# positions
assert any([abs_pos_emb, rotary_pos_emb, token_shift])
self.pos_emb = nn.Embedding(seq_len, dim) if abs_pos_emb else None
self.rotary_pos_emb = RotaryEmbedding(dim_head) if rotary_pos_emb else None
self.maybe_token_shift = token_shift_fn if token_shift else identity
# memory related
self.num_memory_tokens = num_memory_tokens
self.read_memory_emb = nn.Parameter(torch.zeros(dim))
nn.init.normal_(self.read_memory_emb, std = 0.02)
self.memory_tokens = nn.Parameter(torch.randn(num_memory_tokens, dim))
nn.init.normal_(self.memory_tokens, std = 0.02)
# xl memories
xl_mem_len = default(xl_mem_len, seq_len)
assert xl_mem_len <= seq_len
self.xl_mem_len = xl_mem_len
self.use_xl_memories = use_xl_memories
assert not (rotary_pos_emb and use_xl_memories), 'rotary not compatible with xl memories yet'
self.enhanced_xl_recurrence = enhanced_xl_recurrence
# layers
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
Attention(
dim = dim,
dim_head = dim_head,
causal = causal,
heads = heads,
use_flash_attn = use_flash_attn,
use_custom_causal_attn_mask = memory_not_causal
),
FeedForward(dim = dim, mult = ff_mult)
]))
self.to_logits = nn.Sequential(
RMSNorm(dim),
nn.Linear(dim, num_tokens)
)
self.ignore_index = ignore_index
# whether to use custom attention mask if causal and memory should not be causal
self.use_custom_causal_attn_mask = causal and memory_not_causal
def init_memory(self, batch):
return repeat(self.memory_tokens, 'm d -> b m d', b = batch)
def forward(
self,
x,
read_memories = None,
*,
mask = None,
labels = None,
xl_memories: Optional[List[torch.Tensor]] = None
):
b, n, device, mem_length, return_loss = *x.shape, x.device, self.num_memory_tokens, exists(labels)
assert n <= self.seq_len
pos = torch.arange(n, device = device)
x = self.token_emb(x)
# maybe absolute positional embedding
if exists(self.pos_emb):
x = x + self.pos_emb(pos)
# trick from cogview paper
x = frac_gradient(x, self.emb_gradient_frac)
# prepare read and write memories, as in paper
write_memories = self.init_memory(b)
if exists(read_memories):
read_mem_length = mem_length
read_memories = read_memories + self.read_memory_emb
else:
read_mem_length = 0
read_memories = x[:, 0:0]
# concat to main sequence using einop's pack
x, ps = pack([read_memories, x, write_memories], 'b * d')
# take care of mask
if exists(mask):
mask = F.pad(mask, (read_mem_length, mem_length), value = True)
# custom causal mask, if needed
if self.use_custom_causal_attn_mask:
causal_mask = torch.ones((n, n), device = device, dtype = torch.bool).tril()
causal_mask = F.pad(causal_mask, (0, mem_length, read_mem_length, 0), value = False)
causal_mask = F.pad(causal_mask, (read_mem_length, 0, 0, mem_length), value = True)
assert not exists(mask)
mask = rearrange(causal_mask, 'i j -> 1 1 i j')
# rotary embedding - offset main positions by 10000, and keep all memories at position 0
rotary_emb = None
if exists(self.rotary_pos_emb):
pos = pos + 10000
pos = F.pad(pos, (read_mem_length, mem_length), value = 0)
rotary_emb = self.rotary_pos_emb(pos)
shift_fn = partial(self.maybe_token_shift, ps = ps)
# prepare xl memories
xl_memories = default(xl_memories, [])
xl_memories_iter = iter(xl_memories)
new_xl_memories = []
if self.enhanced_xl_recurrence and len(xl_memories) > 1: # simply shift all the xl memories down by one, so lower layer gets access to representations from layer above
xl_memories = [*xl_memories[1:], xl_memories[0]]
# attention and feedforward
for attn, ff in self.layers:
attn_out, xl_memories = attn(shift_fn(x), mask = mask, xl_memories = next(xl_memories_iter, None), rotary_emb = rotary_emb)
new_xl_memories.append(xl_memories)
x = x + attn_out
x = ff(shift_fn(x)) + x
# whether to return xl memories
next_xl_memories = None
if self.use_xl_memories:
next_xl_memories = list(map(lambda t: torch.detach(t[..., -self.xl_mem_len:, :]), new_xl_memories))
# split out memories using unpack
read_memories, x, write_memories = unpack(x, ps, 'b * d')
# to logits
logits = self.to_logits(x)
if not return_loss:
return logits, write_memories, next_xl_memories
loss = F.cross_entropy(
rearrange(logits, 'b n c -> b c n'),
labels,
ignore_index = self.ignore_index
)
return loss, write_memories, next_xl_memories
# wrapper to manage many segments
class RecurrentMemoryTransformerWrapper(nn.Module):
def __init__(
self,
transformer: RecurrentMemoryTransformer
):
super().__init__()
self.transformer = transformer
self.seq_len = transformer.seq_len
@torch.no_grad()
@eval_decorator
def generate(
self,
prime,
*,
length,
memories = None,
xl_memories: Optional[List[torch.Tensor]] = None,
temperature = 1.,
filter_thres = 0.9,
):
assert self.transformer.causal, 'only autoregressive transformers can generate'
start_len, seq_len = prime.shape[-1], self.seq_len
assert length >= start_len
*past_segments, curr_segment = prime.split(seq_len, dim = -1)
# catch memories up to the current segment
for past_segment in past_segments:
_, memories, xl_memories = self.transformer(past_segment, memories, xl_memories = xl_memories)
# sample for the remaining length
for ind in range(length - start_len):
logits, next_memories, next_xl_memories = self.transformer(curr_segment, memories, xl_memories = xl_memories)
logits = logits[:, -1]
filtered_logits = top_k(logits, thres = filter_thres)
sampled = gumbel_sample(filtered_logits, temperature = temperature)
sampled = rearrange(sampled, 'b -> b 1')
curr_segment = torch.cat((curr_segment, sampled), dim = -1)
if divisible_by(curr_segment.shape[-1] - 1, seq_len):
memories = next_memories
xl_memories = next_xl_memories
past_segment, curr_segment = curr_segment[..., :seq_len], curr_segment[..., -1:]
past_segments.append(past_segment)
# add current segment to all segments
past_segments.append(curr_segment)
# reconcat all segments
output = torch.cat(past_segments, dim = -1)
output = output[:, start_len:]
return output
def forward(
self,
x,
memories = None,
*,
mask = None,
xl_memories: Optional[List[torch.Tensor]] = None,
return_loss = False,
labels = None,
memory_replay_backprop = False, # whether to have the class do the backwards pass memory efficiently
mrbp_loss_weight = 1. # if using memory replay backprop with gradient accumulation, scale loss by this factor ex. (1. / <num grad accum steps>)
):
seq_len = self.seq_len
labels = None
if (return_loss or memory_replay_backprop) and not exists(labels):
x, labels = x[:, :-1], x[:, 1:]
# segment input
segments = x.split(seq_len, dim = -1)
total_length = x.shape[-1]
num_segments = len(segments)
segment_length_frac = tuple(map(lambda t: t.shape[-1] / total_length, segments))
# default values
label_segments = mask_segments = (None,)
# take care of labels
if exists(labels):
label_segments = labels.split(seq_len, dim = -1)
# take care of the mask
if exists(mask):
mask_segments = mask.split(seq_len, dim = -1)
# keep replay buffer
replay_buffer = [memories]
# replay buffer for xl memories
xl_segments = [xl_memories]
# decide context of forward depending on whether doing memory-replay-backprop
forward_context = nullcontext if not memory_replay_backprop else torch.no_grad
# forward and get all outputs (can be either loss or logits)
logits = []
losses = []
for segment, mask_segment, label_segment, loss_weight in zip_longest(segments, mask_segments, label_segments, segment_length_frac):
with forward_context():
output, memories, xl_memories = self.transformer(segment, memories, mask = mask_segment, labels = label_segment)
replay_buffer.append(memories)
xl_segments.append(xl_memories)
if return_loss:
losses.append(output * loss_weight)
else:
logits.append(output)
# whether to do memory replay backpropagation
# https://arxiv.org/abs/2010.06891
# algorithm 1
if memory_replay_backprop:
memories_grad = torch.zeros_like(replay_buffer[-1])
reversed_inputs = zip_longest(*map(reversed, [
range(num_segments),
segments,
replay_buffer[:-1],
xl_segments[:-1],
mask_segments,
label_segments,
segment_length_frac,
]))
total_loss = 0.
for i, segment, segment_memories, segment_xl_memories, mask_segment, label_segment, loss_weight in reversed_inputs:
is_first = i == 0
if exists(segment_memories):
segment_memories.requires_grad_()
loss, next_segment_memories, _ = self.transformer(segment, segment_memories, mask = mask_segment, xl_memories = segment_xl_memories, labels = label_segment)
weighted_loss = loss * loss_weight * mrbp_loss_weight
weighted_loss.backward(retain_graph = True)
next_segment_memories.backward(memories_grad)
total_loss += weighted_loss
if is_first:
continue
memories_grad.copy_(segment_memories.grad.data)
return total_loss
# return logits if needed
if not return_loss:
logits = torch.cat(logits, dim = -2)
return logits, memories
# otherwise return losses
return sum(losses), memories
| EXA-1-master | exa/models/recurrent-memory-transformer-pytorch/recurrent_memory_transformer_pytorch/recurrent_memory_transformer.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import codecs
import os
import platform
import re
from glob import glob
import setuptools
from setuptools import Extension
from setuptools.command.build_ext import build_ext
def clean_html(raw_html):
cleanr = re.compile("<.*?>")
cleantext = re.sub(cleanr, "", raw_html).strip()
return cleantext
# Single sourcing code from here:
# https://packaging.python.org/guides/single-sourcing-package-version/
def find_version(*file_paths):
here = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
with codecs.open(os.path.join(here, *parts), "r") as fp:
return fp.read()
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
def fetch_long_description():
with open("README.md", encoding="utf8") as f:
readme = f.read()
# https://stackoverflow.com/a/12982689
readme = clean_html(readme)
return readme
def fetch_requirements():
requirements_file = "requirements.txt"
if platform.system() == "Windows":
DEPENDENCY_LINKS.append("https://download.pytorch.org/whl/torch_stable.html")
with open(requirements_file) as f:
reqs = f.read()
reqs = reqs.strip().split("\n")
reqs = remove_specific_requirements(reqs)
return reqs
def remove_specific_requirements(reqs):
rtd = "READTHEDOCS" in os.environ
excluded = {"fasttext": rtd}
updated_reqs = []
for req in reqs:
without_version = req.split("==")[0]
if not excluded.get(without_version, False):
updated_reqs.append(req)
return updated_reqs
def fetch_files_from_folder(folder):
options = glob(f"{folder}/**", recursive=True)
data_files = []
# All files inside the folder need to be added to package_data
# which would include yaml configs as well as project READMEs
for option in options:
if os.path.isdir(option):
files = []
for f in glob(os.path.join(option, "*")):
if os.path.isfile(f):
files.append(f)
data_files += files
return data_files
def fetch_package_data():
current_dir = os.getcwd()
mmf_folder = os.path.dirname(os.path.abspath(__file__))
# The files for package data need to be relative to mmf package dir
os.chdir(os.path.join(mmf_folder, "mmf"))
data_files = fetch_files_from_folder("projects")
data_files += fetch_files_from_folder("tools")
data_files += fetch_files_from_folder("configs")
data_files += glob(os.path.join("utils", "phoc", "cphoc.*"))
os.chdir(current_dir)
return data_files
DISTNAME = "mmf"
DESCRIPTION = "mmf: a modular framework for vision and language multimodal \
research."
LONG_DESCRIPTION = fetch_long_description()
LONG_DESCRIPTION_CONTENT_TYPE = "text/markdown"
AUTHOR = "Facebook AI Research"
AUTHOR_EMAIL = "[email protected]"
DEPENDENCY_LINKS = []
REQUIREMENTS = (fetch_requirements(),)
# Need to exclude folders in tests as well so as they don't create an extra package
# If something from tools is regularly used consider converting it into a cli command
EXCLUDES = ("data", "docs", "tests", "tests.*", "tools", "tools.*")
CMD_CLASS = {"build_ext": build_ext}
EXT_MODULES = [
Extension(
"mmf.utils.phoc.cphoc", sources=["mmf/utils/phoc/src/cphoc.c"], language="c"
)
]
if "READTHEDOCS" in os.environ:
# Don't build extensions when generating docs
EXT_MODULES = []
CMD_CLASS.pop("build_ext", None)
# use CPU build of PyTorch
DEPENDENCY_LINKS.append(
"https://download.pytorch.org/whl/cpu/torch-1.5.0%2B"
+ "cpu-cp36-cp36m-linux_x86_64.whl"
)
if __name__ == "__main__":
setuptools.setup(
name=DISTNAME,
install_requires=REQUIREMENTS,
include_package_data=True,
package_data={"mmf": fetch_package_data()},
packages=setuptools.find_packages(exclude=EXCLUDES),
python_requires=">=3.6",
ext_modules=EXT_MODULES,
cmdclass=CMD_CLASS,
version=find_version("mmf", "version.py"),
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type=LONG_DESCRIPTION_CONTENT_TYPE,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
dependency_links=DEPENDENCY_LINKS,
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: BSD License",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Operating System :: OS Independent",
],
entry_points={
"console_scripts": [
"mmf_run = mmf_cli.run:run",
"mmf_predict = mmf_cli.predict:predict",
"mmf_convert_hm = mmf_cli.hm_convert:main",
"mmf_interactive = mmf_cli.interactive:interactive",
]
},
)
| EXA-1-master | exa/models/mmf-main/setup.py |
# Copyright (c) Facebook, Inc. and its affiliates.
| EXA-1-master | exa/models/mmf-main/tools/__init__.py |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
import lib as sweep
from lib import hyperparam
def get_grid(args):
max_update = 22000
return [
hyperparam("run_type", "train_val"),
hyperparam("config", "projects/visual_bert/configs/vqa2/defaults.yaml"),
# hyperparam("--fp16", save_dir_key=lambda val: "fp16"),
hyperparam("training.num_workers", 5),
hyperparam("dataset", "vqa2"),
hyperparam("model", "visual_bert", save_dir_key=lambda val: val),
# For nlvr2, we are able to fit batch of size 16 on single GPU with 16GB
# memory. Same number is 32 for VQA2, so scale accordingly
hyperparam(
"training.batch_size", [512, 256], save_dir_key=lambda val: f"bs{val}"
),
hyperparam("training.seed", 1, save_dir_key=lambda val: f"s{val}"),
hyperparam("scheduler.type", ["warmup_cosine"]),
hyperparam("scheduler.params.num_warmup_steps", 2000),
hyperparam("scheduler.params.num_training_steps", max_update),
hyperparam("optimizer.type", "adam_w", save_dir_key=lambda val: val),
hyperparam(
"optimizer.params.lr", [5e-5, 1e-5], save_dir_key=lambda val: f"lr{val}"
),
hyperparam("optimizer.params.eps", 1e-8),
hyperparam(
"training.max_updates", max_update, save_dir_key=lambda val: f"mu{val}"
),
hyperparam("training.log_format", "json"),
hyperparam("training.pin_memory", True),
hyperparam("training.log_interval", 1000),
hyperparam("training.checkpoint_interval", 1000),
hyperparam("training.evaluation_interval", 4000),
hyperparam("training.find_unused_parameters", True),
hyperparam(
"model_config.visual_bert.freeze_base",
[False],
save_dir_key=lambda val: f"fb{val}",
),
]
def postprocess_hyperparams(args, config):
"""Postprocess a given hyperparameter configuration."""
pass
if __name__ == "__main__":
sweep.main(get_grid, postprocess_hyperparams)
| EXA-1-master | exa/models/mmf-main/tools/sweeps/sweep_visual_bert.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# Copied from fairseq. Mostly written by @myleott. Adapted accordingly for mmf
import datetime
import itertools
import os
import random
import shlex
import shutil
import subprocess
from collections import OrderedDict
from glob import glob
from mmf.utils.general import get_mmf_root
def main(get_grid, postprocess_hyperparams, args):
if args.local:
args.num_nodes = 1
# compute all possible hyperparameter configurations
grid = get_grid(args)
grid_product = list(itertools.product(*[hp.values for hp in grid]))
# randomly shuffle configurations
random.seed(args.seed)
random.shuffle(grid_product)
for i, hp_values in enumerate(grid_product):
config = OrderedDict()
for hp, value in zip(grid, hp_values):
config[hp.name] = hp
config[hp.name].current_value = value
# postprocess hyperparams
postprocess_hyperparams(args, config)
# launch training
job_id = launch_train(args, config)
if job_id is not None:
print(f"Launched {job_id}")
if args.sequential and not args.local and job_id is not None:
args.dep = job_id
if i == args.num_trials - 1:
break
def copy_all_python_files(source, snapshot_main_dir, code_snapshot_hash):
"""
Copies following files from source to destination:
a) all *.py files at direct source location.
b) all mmf/*.py recursively.
"""
os.makedirs(snapshot_main_dir, exist_ok=True)
destination = os.path.join(snapshot_main_dir, code_snapshot_hash)
assert not os.path.exists(
destination
), f"Code snapshot: {code_snapshot_hash} alredy exists"
os.makedirs(destination)
all_pys = (
glob(os.path.join(source, "mmf/**/*.py"), recursive=True)
+ glob(os.path.join(source, "tools/**/*.py"), recursive=True)
+ glob(os.path.join(source, "*.py"))
)
for filepath in all_pys:
directory, filename = os.path.split(filepath)
if directory:
os.makedirs(os.path.join(destination, directory), exist_ok=True)
shutil.copy2(
os.path.join(source, filepath), os.path.join(destination, filepath)
)
return destination
def launch_train(args, config):
def dry_run(msg):
if args.dry_run:
print(f"| dry-run: {msg}")
return args.dry_run
destination = ""
if args.snapshot_code:
# Currently hash is just the current time in ISO format.
code_snapshot_hash = datetime.datetime.now().isoformat()
destination = copy_all_python_files(
".", "slurm_snapshot_code", code_snapshot_hash
)
# compute save_dir
save_dir_key = ".".join(
filter(
lambda save_dir_key: save_dir_key is not None,
[hp.get_save_dir_key() for hp in config.values()],
)
)
save_dir_key = save_dir_key.replace(",", "_")
num_total_gpus = args.num_nodes * args.num_gpus
save_dir = os.path.join(
args.checkpoints_dir, f"{args.prefix}.{save_dir_key}.ngpu{num_total_gpus}"
)
tensorboard_logdir = os.path.join(
args.tensorboard_logdir, f"{args.prefix}.{save_dir_key}.ngpu{num_total_gpus}"
)
# create save directory if it doesn"t exist
if not os.path.exists(save_dir):
if not dry_run(f"create directory: {save_dir}"):
os.makedirs(save_dir)
# copy baseline model
checkpoint_last = os.path.join(save_dir, "current.ckpt")
if (
args.baseline_model
and not os.path.exists(checkpoint_last)
and not dry_run(f"initialize with baseline model: {args.baseline_model}")
):
if not os.path.exists(args.baseline_model):
raise FileNotFoundError(
f"Cannot find baseline model: {args.baseline_model}"
)
shutil.copyfile(args.baseline_model, checkpoint_last)
# check for whether the run failed
if has_finished(save_dir):
if args.resume_finished:
dry_run(f"restart previously finished run: {save_dir}")
else:
print(f"skip finished run (override with --resume-finished): {save_dir}")
return
elif has_failed(save_dir):
if args.resume_failed:
dry_run(f"resume failed run: {save_dir}")
else:
print(f"skip failed run (override with --resume-failed): {save_dir}")
return
elif has_started(save_dir):
print(f"skip in progress run: {save_dir}")
return
# generate train command
train_cmd = [
"python",
"-u",
os.path.join(get_mmf_root(), "..", "mmf_cli", "run.py"),
]
train_cmd.extend(["distributed.world_size", str(args.num_nodes * args.num_gpus)])
if args.num_nodes > 1:
train_cmd.extend(["distributed.port", str(get_random_port())])
if args.config is not None:
train_cmd.extend(["config", args.config])
train_cmd.extend(["checkpoint.resume", "True"])
train_cmd.extend(["env.save_dir", save_dir])
if args.tensorboard:
train_cmd.extend(["training.tensorboard", "1"])
train_cmd.extend(["env.tensorboard_logdir", tensorboard_logdir])
for hp in config.values():
train_cmd.extend(map(str, hp.get_cli_args()))
if args.extra_args is not None and len(args.extra_args) > 0:
# convert commands with equal sign to the other format without the equal sign
# e.g. ["training.batch_size=128"] to ["training.batch_size", "128"]
extra_args = [c for arg in args.extra_args for c in arg.split("=")]
train_cmd.extend(extra_args)
if args.dry_run:
print(train_cmd)
train_cmd_str = " ".join(train_cmd)
dry_run(f"train command: {train_cmd_str}")
# start training
env = os.environ.copy()
env["OMP_NUM_THREADS"] = "2"
if args.local:
assert (
args.num_nodes == 1
), "distributed training cannot be combined with --local"
if not dry_run("start training locally"):
if "CUDA_VISIBLE_DEVICES" not in env:
env["CUDA_VISIBLE_DEVICES"] = ",".join(map(str, range(args.num_gpus)))
env["NCCL_DEBUG"] = "INFO"
train_proc = subprocess.Popen(train_cmd, env=env)
train_proc.wait()
else:
train_log = os.path.join(save_dir, "train.log")
train_stderr = os.path.join(save_dir, "train.stderr.%j") # %j = slurm job id
# set environment
if args.num_nodes > 1:
env["NCCL_SOCKET_IFNAME"] = "^docker0,lo"
env["NCCL_DEBUG"] = "INFO"
else:
env["NCCL_SOCKET_IFNAME"] = ""
srun_cmd = [
"srun",
"--job-name",
f"{args.prefix}.{save_dir_key}",
"--output",
train_log,
"--error",
train_stderr,
"--open-mode",
"append",
"--unbuffered",
]
if args.salloc:
srun_cmd += [
"--nodes",
str(args.num_nodes),
"--ntasks",
str(args.num_nodes),
]
srun_cmd += train_cmd
srun_cmd_str = " ".join(map(shlex.quote, srun_cmd)) + " &"
# build command
if not args.salloc:
excluded_hosts = os.environ.get("EXCLUDED_HOSTS", None)
included_hosts = os.environ.get("INCLUDED_HOSTS", None)
gres = f"gpu:{args.gpu_type}:{args.num_gpus}"
sbatch_cmd = [
"sbatch",
"--job-name",
f"{args.prefix}.{save_dir_key}",
"--gres",
gres,
"--nodes",
str(args.num_nodes),
"--ntasks-per-node",
"1",
"--cpus-per-task",
str(int(8 * args.num_gpus)),
"--output",
train_log,
"--error",
train_stderr,
"--open-mode",
"append",
# "--no-requeue",
"--signal",
"B:USR1@180",
]
if args.constraint:
sbatch_cmd += ["-C", args.constraint]
if args.partition:
sbatch_cmd += ["--partition", args.partition]
if args.reservation:
sbatch_cmd += ["--reservation", args.reservation]
if args.exclusive:
sbatch_cmd += ["--exclusive"]
if args.comment:
comment = args.comment
if args.snapshot_code:
comment += f", Code Location: {destination}"
sbatch_cmd += ["--comment", comment]
if args.snapshot_code:
sbatch_cmd += ["--comment", f"Code Location: {destination}"]
if args.dep is not None:
sbatch_cmd.extend(["-d", str(args.dep)])
if args.time is not None:
sbatch_cmd.extend(["--time", args.time])
if args.mem is not None:
sbatch_cmd += ["--mem", args.mem]
else:
sbatch_cmd += ["--mem-per-cpu", "7G"]
sbatch_cmd += ["-x", excluded_hosts] if excluded_hosts is not None else []
sbatch_cmd += ["-w", included_hosts] if included_hosts is not None else []
wrapped_cmd = (
requeue_support()
+ "\n"
+ srun_cmd_str
+ " \n wait $! \n sleep 610 & \n wait $!"
)
sbatch_cmd += ["--wrap", wrapped_cmd]
sbatch_cmd_str = " ".join(map(shlex.quote, sbatch_cmd))
else:
sbatch_cmd = srun_cmd
sbatch_cmd_str = srun_cmd_str
if args.dry_run:
dry_run("start remote training")
dry_run(f"- log stdout to: {train_log}")
dry_run(f"- log stderr to: {train_stderr}")
dry_run(f"- run command: {sbatch_cmd_str}")
sbatch_cmd += ["--test-only"]
with subprocess.Popen(
sbatch_cmd, stdout=subprocess.PIPE, env=env
) as train_proc:
stdout = train_proc.stdout.read().decode("utf-8")
print(stdout)
else:
with open(train_log, "a") as train_log_h:
# log most recent git commit
git_commit = subprocess.check_output(
"git log | head -n 1", shell=True, encoding="utf-8"
)
print(git_commit.rstrip(), file=train_log_h)
if args.baseline_model:
print(f"baseline model: {args.baseline_model}", file=train_log_h)
with open(train_log, "a") as train_log_h:
print(f"running command: {sbatch_cmd_str}\n")
print(f"running command: {sbatch_cmd_str}\n", file=train_log_h)
with subprocess.Popen(
sbatch_cmd, stdout=subprocess.PIPE, env=env
) as train_proc:
stdout = train_proc.stdout.read().decode("utf-8")
print(stdout, file=train_log_h)
try:
job_id = int(stdout.rstrip().split()[-1])
return job_id
except IndexError:
return None
def has_finished(save_dir):
train_log = os.path.join(save_dir, "train.log")
if not os.path.exists(train_log):
return False
with open(train_log) as h:
lines = h.readlines()
if len(lines) == 0:
return False
if "Finished run" in lines[-1]:
return True
return False
def has_failed(save_dir):
if not os.path.exists(save_dir):
return False
# find max job id
job_ids = []
for fn in os.listdir(save_dir):
if fn.startswith("train.stderr."):
job_ids.append(int(fn.split(".")[-1]))
if len(job_ids) == 0:
return False
max_job_id = max(job_ids)
def _has_failed(stderr_fn):
with open(stderr_fn) as h:
for line in h:
if len(line.strip()) > 0:
# assume that any output in stderr indicates an error
return True
return False
return _has_failed(os.path.join(save_dir, f"train.stderr.{max_job_id}"))
def has_started(save_dir):
train_log = os.path.join(save_dir, "train.log")
if not os.path.exists(train_log):
return False
return True
def get_random_port():
old_state = random.getstate()
random.seed()
port = random.randint(10000, 20000)
random.setstate(old_state)
return port
def requeue_support():
return """
trap_handler () {
echo "Caught signal: " $1
# SIGTERM must be bypassed
if [ "$1" = "TERM" ]; then
echo "bypass sigterm"
else
# Submit a new job to the queue
echo "Requeuing " $SLURM_JOB_ID
scontrol requeue $SLURM_JOB_ID
fi
}
# Install signal handler
trap 'trap_handler USR1' USR1
trap 'trap_handler TERM' TERM
"""
| EXA-1-master | exa/models/mmf-main/tools/sweeps/lib/slurm.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# Copied from fairseq. Mostly written by @myleott. Adapted accordingly for mmf
import argparse
import datetime
import json
import os
import socket
# if argv is None, we will read from sys.argv (invoke params)
def get_args(argv=None):
parser = argparse.ArgumentParser("Script for launching hyperparameter sweeps")
parser.add_argument(
"-p",
"--prefix",
required=True,
help="save checkpoints and logs in <checkpoints-dir>/<prefix>.<save_dir_key>",
)
parser.add_argument(
"-t",
"--num_trials",
required=True,
type=int,
help="number of random hyperparam configurations to try (-1 for grid search)",
)
parser.add_argument(
"-g", "--num_gpus", type=int, required=True, help="number of GPUs per node"
)
parser.add_argument(
"-n",
"--num_nodes",
type=int,
default=1,
help="number of nodes for distributed training",
)
parser.add_argument(
"--model_type",
type=str,
default="aicommerce__multimodal_model",
help="registered model type",
)
parser.add_argument(
"--oncall", type=str, default="ai_commerce", help="oncall team "
)
parser.add_argument(
"--capabilities",
type=str,
default="GPU_V100_HOST",
help="hardware capabilities",
)
parser.add_argument("--seed", type=int, default=1234)
parser.add_argument(
"--config", type=str, default=None, help="configuration for model"
)
parser.add_argument(
"--extra_args",
type=str,
nargs="*",
help="extra arguments to be passed into MMF command (e.g. config arguments)",
)
parser.add_argument(
"--baseline_model", help="path to baseline model from which to resume training"
)
parser.add_argument(
"--force_checkpoints_dir", help="force using a given checkpoint dir"
)
parser.add_argument(
"--resume_failed",
action="store_true",
help="resume any runs that failed (assumes --num_trials and --seed"
+ " are the same)",
)
parser.add_argument(
"--resume_finished",
action="store_true",
help="force any runs that finished to begin again (uncommon)",
)
parser.add_argument(
"--dry_run",
action="store_true",
help="output only a list of actions to perform without performing them",
)
parser.add_argument("--local", action="store_true", help="run job locally")
parser.add_argument("--debug", action="store_true", help="debug")
hostname = socket.gethostname()
if "fair" in hostname:
default_backend = "slurm"
parser.add_argument(
"--checkpoints_dir",
default=os.path.join(
"/checkpoint", os.environ["USER"], str(datetime.date.today())
),
help="save checkpoints and logs in "
+ "<checkpoints-dir>/<prefix>.<save_dir_key>",
)
else:
default_backend = "fblearner"
parser.add_argument(
"--checkpoints_dir",
default=os.path.join(
"/mnt/vol/gfsai-east/ai-group/users",
os.environ["USER"],
"checkpoints",
str(datetime.date.today()),
),
help="save checkpoints and logs in "
+ "<checkpoints-dir>/<prefix>.<save_dir_key>",
)
parser.add_argument(
"--scheduler_type",
type=str,
default="flow",
help="scheduler type to run the job. Eg. flow, mast_prod, etc. See https:"
+ "//www.internalfb.com/code/fbsource/fbcode/pytorch/lightning/meta/envir"
+ "onment/config_util.py for the list of scheduler type to MAST Cluster "
+ "config mappings",
)
parser.add_argument(
"--model_store_config",
type=json.loads,
help="a dict of required configs if using modelstore checkpointing."
+ "Config class: https://www.internalfb.com/code/fbsource/fbcode/pytorch/l"
+ "ightning/meta/checkpointer/conf/__init__.py?lines=11\nEg.\n"
+ """'{"model_entity_id":0,"model_type":"xray_video_av","""
+ """"model_series":"xray_video","""
+ """"oncall":"cv_video_understanding_support","""
+ """"tag_names":["is_cu_model"]}'""",
)
parser.add_argument(
"--workflow",
default="faim.mmf_run.train_workflow@faim",
help="fblearner workflow name",
)
parser.add_argument(
"--buck-target", default=None, help="fblearner buck-target if required"
)
parser.add_argument(
"--backend", choices=["slurm", "fblearner"], default=default_backend
)
# FBLearner params
parser.add_argument(
"--entitlement", help="entitlement to use", default="bigbasin_atn_fair"
)
parser.add_argument(
"--run-as-secure-group",
help="secure group to use",
default="fair_research_and_engineering",
)
# Slurm params
parser.add_argument(
"--salloc", action="store_true", help="run agaist current allocation"
)
parser.add_argument("--partition", help="partition to run on", default="learnfair")
parser.add_argument("--reservation", help="reservation to run on")
parser.add_argument(
"--exclusive", action="store_true", help="if set, get exclusive host"
)
parser.add_argument(
"--dep",
metavar="JOBID",
type=int,
help="add JOBID as a dependency (i.e., wait for it to finish)",
)
parser.add_argument(
"--sequential", action="store_true", help="schedule jobs to run sequentially"
)
parser.add_argument(
"--time", default="4320", help="expected job duration in minutes"
)
parser.add_argument("--mem", "--mem", help="memory to request")
parser.add_argument("--gpu-type", default="volta")
parser.add_argument(
"--constraint",
metavar="CONSTRAINT",
help="gpu constraint, if any. e.g. 'volta'",
)
parser.add_argument("--comment", help="comment string")
parser.add_argument(
"--snapshot_code",
action="store_true",
default=False,
help="Flag for creating a snapshot of training code while creating slurm job,"
" path is './slurm_snapshot_code/<TIME_ISO_FORMAT/>:', "
"can find time from comment of slurm job.",
)
parser.add_argument(
"--tensorboard_logdir",
default=os.path.join(
"/checkpoint",
os.environ["USER"],
"tensorboard_logs",
str(datetime.date.today()),
),
help="save tensorboard logs in <tensorboard-logdir>/<prefix>.<save_dir_key>",
)
parser.add_argument(
"--tensorboard",
default=0,
type=int,
help="enable tensorboard logging by passing --tensorboard 1",
)
parser.add_argument("--task", type=str, help="specify task name")
parser.add_argument(
"--table_filter",
type=str,
help="specify table filter, e.g. ds=02-02-2022",
)
parser.add_argument("--buck-version", type=str, default="v2")
# Will read sys.argv if argv is None
args = parser.parse_args(argv)
return args
class hyperparam:
"""Base class for defining hyperparameters."""
def __init__(self, name, values=None, binary_flag=False, save_dir_key=None):
"""
Arguments:
- name : the name of the hyperparameter (e.g., `--dropout`)
- values : the set of values to sweep over (e.g., `[0.0, 0.1, 0.2]`)
- binary_flag : whether the hyperparameter uses a boolean flag
(e.g., `--no-save`)
- save_dir_key : function that takes the hyperparameter value and returns
the "key" to be appended to the output directory name
"""
self.name = name
if values is None: # syntactic sugar for binary flags
self.values = [True]
self.binary_flag = True
else:
self.values = values if isinstance(values, list) else [values]
self.binary_flag = binary_flag
self.save_dir_key = save_dir_key
self.current_value = None
if len(self.values) > 1 and self.save_dir_key is None:
raise ValueError(
f"{name} has more than one value but is missing a save_dir_key!"
)
def get_cli_args(self):
if self.binary_flag:
return [self.name] if self.current_value else []
else:
return [self.name, self.current_value]
def get_save_dir_key(self):
if self.save_dir_key is None:
return None
if self.binary_flag:
return self.save_dir_key(1) if self.current_value else None
return self.save_dir_key(self.current_value)
def main(get_grid, postprocess_hyperparams):
args = get_args()
if args.backend == "slurm":
from .slurm import main as backend_main
elif args.backend == "fblearner":
from .fblearner import main as backend_main
backend_main(get_grid, postprocess_hyperparams, args)
| EXA-1-master | exa/models/mmf-main/tools/sweeps/lib/__init__.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.