python_code
stringlengths 0
992k
| repo_name
stringlengths 8
46
| file_path
stringlengths 5
162
|
---|---|---|
import subprocess
import matplotlib
import os
matplotlib.use('Agg')
import librosa
import librosa.filters
import numpy as np
from scipy import signal
from scipy.io import wavfile
def save_wav(wav, path, sr, norm=False):
if norm:
wav = wav / np.abs(wav).max()
wav *= 32767
# proposed by @dsmiller
wavfile.write(path, sr, wav.astype(np.int16))
def get_hop_size(hparams):
hop_size = hparams['hop_size']
if hop_size is None:
assert hparams['frame_shift_ms'] is not None
hop_size = int(hparams['frame_shift_ms'] / 1000 * hparams['audio_sample_rate'])
return hop_size
###########################################################################################
def _stft(y, hparams):
return librosa.stft(y=y, n_fft=hparams['fft_size'], hop_length=get_hop_size(hparams),
win_length=hparams['win_size'], pad_mode='constant')
def _istft(y, hparams):
return librosa.istft(y, hop_length=get_hop_size(hparams), win_length=hparams['win_size'])
def librosa_pad_lr(x, fsize, fshift, pad_sides=1):
'''compute right padding (final frame) or both sides padding (first and final frames)
'''
assert pad_sides in (1, 2)
# return int(fsize // 2)
pad = (x.shape[0] // fshift + 1) * fshift - x.shape[0]
if pad_sides == 1:
return 0, pad
else:
return pad // 2, pad // 2 + pad % 2
# Conversions
def amp_to_db(x):
return 20 * np.log10(np.maximum(1e-5, x))
def normalize(S, hparams):
return (S - hparams['min_level_db']) / -hparams['min_level_db']
def denormalize(D, hparams):
return (D * -hparams['min_level_db']) + hparams['min_level_db']
def rnnoise(filename, out_fn=None, verbose=False, out_sample_rate=22050):
assert os.path.exists('./rnnoise/examples/rnnoise_demo'), INSTALL_STR
if out_fn is None:
out_fn = f"{filename[:-4]}.denoised.wav"
out_48k_fn = f"{out_fn}.48000.wav"
tmp0_fn = f"{out_fn}.0.wav"
tmp1_fn = f"{out_fn}.1.wav"
tmp2_fn = f"{out_fn}.2.raw"
tmp3_fn = f"{out_fn}.3.raw"
if verbose:
print("Pre-processing audio...") # wav to pcm raw
subprocess.check_call(
f'sox "{filename}" -G -r48000 "{tmp0_fn}"', shell=True, stdin=subprocess.PIPE) # convert to raw
subprocess.check_call(
f'sox -v 0.95 "{tmp0_fn}" "{tmp1_fn}"', shell=True, stdin=subprocess.PIPE) # convert to raw
subprocess.check_call(
f'ffmpeg -y -i "{tmp1_fn}" -loglevel quiet -f s16le -ac 1 -ar 48000 "{tmp2_fn}"',
shell=True, stdin=subprocess.PIPE) # convert to raw
if verbose:
print("Applying rnnoise algorithm to audio...") # rnnoise
subprocess.check_call(
f'./rnnoise/examples/rnnoise_demo "{tmp2_fn}" "{tmp3_fn}"', shell=True)
if verbose:
print("Post-processing audio...") # pcm raw to wav
if filename == out_fn:
subprocess.check_call(f'rm -f "{out_fn}"', shell=True)
subprocess.check_call(
f'sox -t raw -r 48000 -b 16 -e signed-integer -c 1 "{tmp3_fn}" "{out_48k_fn}"', shell=True)
subprocess.check_call(f'sox "{out_48k_fn}" -G -r{out_sample_rate} "{out_fn}"', shell=True)
subprocess.check_call(f'rm -f "{tmp0_fn}" "{tmp1_fn}" "{tmp2_fn}" "{tmp3_fn}" "{out_48k_fn}"', shell=True)
if verbose:
print("Audio-filtering completed!") | EXA-1-master | exa/models/AudioGPT/NeuralSeq/utils/audio.py |
from numpy import array, zeros, full, argmin, inf, ndim
from scipy.spatial.distance import cdist
from math import isinf
def dtw(x, y, dist, warp=1, w=inf, s=1.0):
"""
Computes Dynamic Time Warping (DTW) of two sequences.
:param array x: N1*M array
:param array y: N2*M array
:param func dist: distance used as cost measure
:param int warp: how many shifts are computed.
:param int w: window size limiting the maximal distance between indices of matched entries |i,j|.
:param float s: weight applied on off-diagonal moves of the path. As s gets larger, the warping path is increasingly biased towards the diagonal
Returns the minimum distance, the cost matrix, the accumulated cost matrix, and the wrap path.
"""
assert len(x)
assert len(y)
assert isinf(w) or (w >= abs(len(x) - len(y)))
assert s > 0
r, c = len(x), len(y)
if not isinf(w):
D0 = full((r + 1, c + 1), inf)
for i in range(1, r + 1):
D0[i, max(1, i - w):min(c + 1, i + w + 1)] = 0
D0[0, 0] = 0
else:
D0 = zeros((r + 1, c + 1))
D0[0, 1:] = inf
D0[1:, 0] = inf
D1 = D0[1:, 1:] # view
for i in range(r):
for j in range(c):
if (isinf(w) or (max(0, i - w) <= j <= min(c, i + w))):
D1[i, j] = dist(x[i], y[j])
C = D1.copy()
jrange = range(c)
for i in range(r):
if not isinf(w):
jrange = range(max(0, i - w), min(c, i + w + 1))
for j in jrange:
min_list = [D0[i, j]]
for k in range(1, warp + 1):
i_k = min(i + k, r)
j_k = min(j + k, c)
min_list += [D0[i_k, j] * s, D0[i, j_k] * s]
D1[i, j] += min(min_list)
if len(x) == 1:
path = zeros(len(y)), range(len(y))
elif len(y) == 1:
path = range(len(x)), zeros(len(x))
else:
path = _traceback(D0)
return D1[-1, -1], C, D1, path
def accelerated_dtw(x, y, dist, warp=1):
"""
Computes Dynamic Time Warping (DTW) of two sequences in a faster way.
Instead of iterating through each element and calculating each distance,
this uses the cdist function from scipy (https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.cdist.html)
:param array x: N1*M array
:param array y: N2*M array
:param string or func dist: distance parameter for cdist. When string is given, cdist uses optimized functions for the distance metrics.
If a string is passed, the distance function can be 'braycurtis', 'canberra', 'chebyshev', 'cityblock', 'correlation', 'cosine', 'dice', 'euclidean', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'wminkowski', 'yule'.
:param int warp: how many shifts are computed.
Returns the minimum distance, the cost matrix, the accumulated cost matrix, and the wrap path.
"""
assert len(x)
assert len(y)
if ndim(x) == 1:
x = x.reshape(-1, 1)
if ndim(y) == 1:
y = y.reshape(-1, 1)
r, c = len(x), len(y)
D0 = zeros((r + 1, c + 1))
D0[0, 1:] = inf
D0[1:, 0] = inf
D1 = D0[1:, 1:]
D0[1:, 1:] = cdist(x, y, dist)
C = D1.copy()
for i in range(r):
for j in range(c):
min_list = [D0[i, j]]
for k in range(1, warp + 1):
min_list += [D0[min(i + k, r), j],
D0[i, min(j + k, c)]]
D1[i, j] += min(min_list)
if len(x) == 1:
path = zeros(len(y)), range(len(y))
elif len(y) == 1:
path = range(len(x)), zeros(len(x))
else:
path = _traceback(D0)
return D1[-1, -1], C, D1, path
def _traceback(D):
i, j = array(D.shape) - 2
p, q = [i], [j]
while (i > 0) or (j > 0):
tb = argmin((D[i, j], D[i, j + 1], D[i + 1, j]))
if tb == 0:
i -= 1
j -= 1
elif tb == 1:
i -= 1
else: # (tb == 2):
j -= 1
p.insert(0, i)
q.insert(0, j)
return array(p), array(q)
if __name__ == '__main__':
w = inf
s = 1.0
if 1: # 1-D numeric
from sklearn.metrics.pairwise import manhattan_distances
import numpy as np
x = [0, 0, 1, 1, 2, 4, 2, 1, 2, 0]
x = np.array(x).reshape([-1,1,1])
y = [1, 1, 1, 2, 2, 2, 2, 3, 2, 0]
y = np.array(y).reshape([-1,1,1])
dist_fun = manhattan_distances
w = 1
# s = 1.2
elif 0: # 2-D numeric
from sklearn.metrics.pairwise import euclidean_distances
x = [[0, 0], [0, 1], [1, 1], [1, 2], [2, 2], [4, 3], [2, 3], [1, 1], [2, 2], [0, 1]]
y = [[1, 0], [1, 1], [1, 1], [2, 1], [4, 3], [4, 3], [2, 3], [3, 1], [1, 2], [1, 0]]
dist_fun = euclidean_distances
else: # 1-D list of strings
from nltk.metrics.distance import edit_distance
# x = ['we', 'shelled', 'clams', 'for', 'the', 'chowder']
# y = ['class', 'too']
x = ['i', 'soon', 'found', 'myself', 'muttering', 'to', 'the', 'walls']
y = ['see', 'drown', 'himself']
# x = 'we talked about the situation'.split()
# y = 'we talked about the situation'.split()
dist_fun = edit_distance
dist, cost, acc, path = dtw(x, y, dist_fun, w=w, s=s)
# Vizualize
from matplotlib import pyplot as plt
plt.imshow(cost.T, origin='lower', cmap=plt.cm.Reds, interpolation='nearest')
plt.plot(path[0], path[1], '-o') # relation
plt.xticks(range(len(x)), x)
plt.yticks(range(len(y)), y)
plt.xlabel('x')
plt.ylabel('y')
plt.axis('tight')
if isinf(w):
plt.title('Minimum distance: {}, slope weight: {}'.format(dist, s))
else:
plt.title('Minimum distance: {}, window widht: {}, slope weight: {}'.format(dist, w, s))
plt.show()
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/utils/dtw.py |
import os
import traceback
from multiprocessing import Queue, Process
def chunked_worker(worker_id, map_func, args, results_queue=None, init_ctx_func=None):
ctx = init_ctx_func(worker_id) if init_ctx_func is not None else None
for job_idx, arg in args:
try:
if ctx is not None:
res = map_func(*arg, ctx=ctx)
else:
res = map_func(*arg)
results_queue.put((job_idx, res))
except:
traceback.print_exc()
results_queue.put((job_idx, None))
def chunked_multiprocess_run(map_func, args, num_workers=None, ordered=True, init_ctx_func=None, q_max_size=1000):
args = zip(range(len(args)), args)
args = list(args)
n_jobs = len(args)
if num_workers is None:
num_workers = int(os.getenv('N_PROC', os.cpu_count()))
results_queues = []
if ordered:
for i in range(num_workers):
results_queues.append(Queue(maxsize=q_max_size // num_workers))
else:
results_queue = Queue(maxsize=q_max_size)
for i in range(num_workers):
results_queues.append(results_queue)
workers = []
for i in range(num_workers):
args_worker = args[i::num_workers]
p = Process(target=chunked_worker, args=(
i, map_func, args_worker, results_queues[i], init_ctx_func), daemon=True)
workers.append(p)
p.start()
for n_finished in range(n_jobs):
results_queue = results_queues[n_finished % num_workers]
job_idx, res = results_queue.get()
assert job_idx == n_finished or not ordered, (job_idx, n_finished)
yield res
for w in workers:
w.join()
w.close()
def multiprocess_run_tqdm(map_func, args, num_workers=None, ordered=True, init_ctx_func=None,
multithread=False, desc=None):
for i, res in tqdm(enumerate(
multiprocess_run(map_func, args, num_workers, ordered, init_ctx_func, multithread)),
total=len(args), desc=desc):
yield i, res | EXA-1-master | exa/models/AudioGPT/NeuralSeq/utils/multiprocess_utils.py |
import os
from data_gen.tts.base_preprocess import BasePreprocessor
import glob
class LibrittsPreAlign(BasePreprocessor):
def meta_data(self):
wav_fns = sorted(glob.glob(f'{self.raw_data_dir}/*/*/*.wav'))
for wav_fn in wav_fns:
item_name = os.path.basename(wav_fn)[:-4]
txt_fn = f'{wav_fn[:-4]}.normalized.txt'
with open(txt_fn, 'r') as f:
txt = f.readlines()
f.close()
spk = item_name.split("_")[0]
# Example:
#
# 'item_name': '103_1241_000000_000001'
# 'wav_fn': 'LibriTTS/train-clean-100/103/1241/103_1241_000000_000001.wav'
# 'txt': 'matthew Cuthbert is surprised'
# 'spk_name': '103'
yield {'item_name': item_name, 'wav_fn': wav_fn, 'txt': txt[0], 'spk_name': spk}
if __name__ == "__main__":
LibrittsPreAlign().process()
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/configs/tts/libritts/pre_align.py |
import os
from data_gen.tts.base_preprocess import BasePreprocessor
import glob
import re
class EmoPreAlign(BasePreprocessor):
def meta_data(self):
spks = ['0012', '0011', '0013', '0014', '0015', '0016', '0017', '0018', '0019', '0020']
pattern = re.compile('[\t\n ]+')
for spk in spks:
for line in open(f"{self.raw_data_dir}/{spk}/{spk}.txt", 'r'): # 打开文件
line = re.sub(pattern, ' ', line)
if line == ' ': continue
split_ = line.split(' ')
txt = ' '.join(split_[1: -2])
item_name = split_[0]
emotion = split_[-2]
wav_fn = f'{self.raw_data_dir}/{spk}/{emotion}/{item_name}.wav'
yield item_name, wav_fn, txt, spk, emotion
if __name__ == "__main__":
EmoPreAlign().process()
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/configs/tts/emotion/pre_align.py |
import torch
from inference.svs.base_svs_infer import BaseSVSInfer
from utils import load_ckpt
from utils.hparams import hparams
from modulesmodules.diff.shallow_diffusion_tts import GaussianDiffusion
from tasks.svs.diffsinger_task import DIFF_DECODERS
class DiffSingerCascadeInfer(BaseSVSInfer):
def build_model(self):
model = GaussianDiffusion(
phone_encoder=self.ph_encoder,
out_dims=hparams['audio_num_mel_bins'], denoise_fn=DIFF_DECODERS[hparams['diff_decoder_type']](hparams),
timesteps=hparams['timesteps'],
K_step=hparams['K_step'],
loss_type=hparams['diff_loss_type'],
spec_min=hparams['spec_min'], spec_max=hparams['spec_max'],
)
model.eval()
load_ckpt(model, hparams['work_dir'], 'model')
return model
def forward_model(self, inp):
sample = self.input_to_batch(inp)
txt_tokens = sample['txt_tokens'] # [B, T_t]
spk_id = sample.get('spk_ids')
with torch.no_grad():
output = self.model(txt_tokens, spk_id=spk_id, ref_mels=None, infer=True,
pitch_midi=sample['pitch_midi'], midi_dur=sample['midi_dur'],
is_slur=sample['is_slur'])
mel_out = output['mel_out'] # [B, T,80]
f0_pred = output['f0_denorm']
wav_out = self.run_vocoder(mel_out, f0=f0_pred)
wav_out = wav_out.cpu().numpy()
return wav_out[0]
if __name__ == '__main__':
inp = {
'text': '小酒窝长睫毛AP是你最美的记号',
'notes': 'C#4/Db4 | F#4/Gb4 | G#4/Ab4 | A#4/Bb4 F#4/Gb4 | F#4/Gb4 C#4/Db4 | C#4/Db4 | rest | C#4/Db4 | A#4/Bb4 | G#4/Ab4 | A#4/Bb4 | G#4/Ab4 | F4 | C#4/Db4',
'notes_duration': '0.407140 | 0.376190 | 0.242180 | 0.509550 0.183420 | 0.315400 0.235020 | 0.361660 | 0.223070 | 0.377270 | 0.340550 | 0.299620 | 0.344510 | 0.283770 | 0.323390 | 0.360340',
'input_type': 'word'
} # user input: Chinese characters
c = {
'text': '小酒窝长睫毛AP是你最美的记号',
'ph_seq': 'x iao j iu w o ch ang ang j ie ie m ao AP sh i n i z ui m ei d e j i h ao',
'note_seq': 'C#4/Db4 C#4/Db4 F#4/Gb4 F#4/Gb4 G#4/Ab4 G#4/Ab4 A#4/Bb4 A#4/Bb4 F#4/Gb4 F#4/Gb4 F#4/Gb4 C#4/Db4 C#4/Db4 C#4/Db4 rest C#4/Db4 C#4/Db4 A#4/Bb4 A#4/Bb4 G#4/Ab4 G#4/Ab4 A#4/Bb4 A#4/Bb4 G#4/Ab4 G#4/Ab4 F4 F4 C#4/Db4 C#4/Db4',
'note_dur_seq': '0.407140 0.407140 0.376190 0.376190 0.242180 0.242180 0.509550 0.509550 0.183420 0.315400 0.315400 0.235020 0.361660 0.361660 0.223070 0.377270 0.377270 0.340550 0.340550 0.299620 0.299620 0.344510 0.344510 0.283770 0.283770 0.323390 0.323390 0.360340 0.360340',
'is_slur_seq': '0 0 0 0 0 0 0 0 1 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0',
'input_type': 'phoneme'
} # input like Opencpop dataset.
DiffSingerCascadeInfer.example_run(inp)
# # CUDA_VISIBLE_DEVICES=1 python inference/svs/ds_cascade.py --config egs/egs_bases/svs/midi/cascade/opencs/ds60_rel.yaml --exp_name 0303_opencpop_ds58_midi | EXA-1-master | exa/models/AudioGPT/NeuralSeq/inference/svs/ds_cascade.py |
import os
import torch
import numpy as np
from modules.hifigan.hifigan import HifiGanGenerator
from vocoders.hifigan import HifiGAN
from inference.svs.opencpop.map import cpop_pinyin2ph_func
from utils import load_ckpt
from utils.hparams import set_hparams, hparams
from utils.text_encoder import TokenTextEncoder
from pypinyin import pinyin, lazy_pinyin, Style
import librosa
import glob
import re
class BaseSVSInfer:
def __init__(self, hparams, device=None):
if device is None:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
self.hparams = hparams
self.device = device
phone_list = ["AP", "SP", "a", "ai", "an", "ang", "ao", "b", "c", "ch", "d", "e", "ei", "en", "eng", "er", "f", "g",
"h", "i", "ia", "ian", "iang", "iao", "ie", "in", "ing", "iong", "iu", "j", "k", "l", "m", "n", "o",
"ong", "ou", "p", "q", "r", "s", "sh", "t", "u", "ua", "uai", "uan", "uang", "ui", "un", "uo", "v",
"van", "ve", "vn", "w", "x", "y", "z", "zh"]
self.ph_encoder = TokenTextEncoder(None, vocab_list=phone_list, replace_oov=',')
self.pinyin2phs = cpop_pinyin2ph_func()
self.spk_map = {'opencpop': 0}
self.model = self.build_model()
self.model.eval()
self.model.to(self.device)
self.vocoder = self.build_vocoder()
self.vocoder.eval()
self.vocoder.to(self.device)
def build_model(self):
raise NotImplementedError
def forward_model(self, inp):
raise NotImplementedError
def build_vocoder(self):
base_dir = hparams['vocoder_ckpt']
config_path = f'{base_dir}/config.yaml'
ckpt = sorted(glob.glob(f'{base_dir}/model_ckpt_steps_*.ckpt'), key=
lambda x: int(re.findall(f'{base_dir}/model_ckpt_steps_(\d+).ckpt', x)[0]))[-1]
print('| load HifiGAN: ', ckpt)
ckpt_dict = torch.load(ckpt, map_location="cpu")
config = set_hparams(config_path, global_hparams=False)
state = ckpt_dict["state_dict"]["model_gen"]
vocoder = HifiGanGenerator(config)
vocoder.load_state_dict(state, strict=True)
vocoder.remove_weight_norm()
vocoder = vocoder.eval().to(self.device)
return vocoder
def run_vocoder(self, c, **kwargs):
c = c.transpose(2, 1) # [B, 80, T]
f0 = kwargs.get('f0') # [B, T]
if f0 is not None and hparams.get('use_nsf'):
# f0 = torch.FloatTensor(f0).to(self.device)
y = self.vocoder(c, f0).view(-1)
else:
y = self.vocoder(c).view(-1)
# [T]
return y[None]
def preprocess_word_level_input(self, inp):
# Pypinyin can't solve polyphonic words
text_raw = inp['text'].replace('最长', '最常').replace('长睫毛', '常睫毛') \
.replace('那么长', '那么常').replace('多长', '多常') \
.replace('很长', '很常') # We hope someone could provide a better g2p module for us by opening pull requests.
# lyric
pinyins = lazy_pinyin(text_raw, strict=False)
ph_per_word_lst = [self.pinyin2phs[pinyin.strip()] for pinyin in pinyins if pinyin.strip() in self.pinyin2phs]
# Note
note_per_word_lst = [x.strip() for x in inp['notes'].split('|') if x.strip() != '']
mididur_per_word_lst = [x.strip() for x in inp['notes_duration'].split('|') if x.strip() != '']
if len(note_per_word_lst) == len(ph_per_word_lst) == len(mididur_per_word_lst):
print('Pass word-notes check.')
else:
print('The number of words does\'t match the number of notes\' windows. ',
'You should split the note(s) for each word by | mark.')
print(ph_per_word_lst, note_per_word_lst, mididur_per_word_lst)
print(len(ph_per_word_lst), len(note_per_word_lst), len(mididur_per_word_lst))
return None
note_lst = []
ph_lst = []
midi_dur_lst = []
is_slur = []
for idx, ph_per_word in enumerate(ph_per_word_lst):
# for phs in one word:
# single ph like ['ai'] or multiple phs like ['n', 'i']
ph_in_this_word = ph_per_word.split()
# for notes in one word:
# single note like ['D4'] or multiple notes like ['D4', 'E4'] which means a 'slur' here.
note_in_this_word = note_per_word_lst[idx].split()
midi_dur_in_this_word = mididur_per_word_lst[idx].split()
# process for the model input
# Step 1.
# Deal with note of 'not slur' case or the first note of 'slur' case
# j ie
# F#4/Gb4 F#4/Gb4
# 0 0
for ph in ph_in_this_word:
ph_lst.append(ph)
note_lst.append(note_in_this_word[0])
midi_dur_lst.append(midi_dur_in_this_word[0])
is_slur.append(0)
# step 2.
# Deal with the 2nd, 3rd... notes of 'slur' case
# j ie ie
# F#4/Gb4 F#4/Gb4 C#4/Db4
# 0 0 1
if len(note_in_this_word) > 1: # is_slur = True, we should repeat the YUNMU to match the 2nd, 3rd... notes.
for idx in range(1, len(note_in_this_word)):
ph_lst.append(ph_in_this_word[-1])
note_lst.append(note_in_this_word[idx])
midi_dur_lst.append(midi_dur_in_this_word[idx])
is_slur.append(1)
ph_seq = ' '.join(ph_lst)
if len(ph_lst) == len(note_lst) == len(midi_dur_lst):
print(len(ph_lst), len(note_lst), len(midi_dur_lst))
print('Pass word-notes check.')
else:
print('The number of words does\'t match the number of notes\' windows. ',
'You should split the note(s) for each word by | mark.')
return None
return ph_seq, note_lst, midi_dur_lst, is_slur
def preprocess_phoneme_level_input(self, inp):
ph_seq = inp['ph_seq']
note_lst = inp['note_seq'].split()
midi_dur_lst = inp['note_dur_seq'].split()
is_slur = [float(x) for x in inp['is_slur_seq'].split()]
print(len(note_lst), len(ph_seq.split()), len(midi_dur_lst))
if len(note_lst) == len(ph_seq.split()) == len(midi_dur_lst):
print('Pass word-notes check.')
else:
print('The number of words does\'t match the number of notes\' windows. ',
'You should split the note(s) for each word by | mark.')
return None
return ph_seq, note_lst, midi_dur_lst, is_slur
def preprocess_input(self, inp, input_type='word'):
"""
:param inp: {'text': str, 'item_name': (str, optional), 'spk_name': (str, optional)}
:return:
"""
item_name = inp.get('item_name', '<ITEM_NAME>')
spk_name = inp.get('spk_name', 'opencpop')
# single spk
spk_id = self.spk_map[spk_name]
# get ph seq, note lst, midi dur lst, is slur lst.
if input_type == 'word':
ret = self.preprocess_word_level_input(inp)
elif input_type == 'phoneme': # like transcriptions.txt in Opencpop dataset.
ret = self.preprocess_phoneme_level_input(inp)
else:
print('Invalid input type.')
return None
if ret:
ph_seq, note_lst, midi_dur_lst, is_slur = ret
else:
print('==========> Preprocess_word_level or phone_level input wrong.')
return None
# convert note lst to midi id; convert note dur lst to midi duration
try:
midis = [librosa.note_to_midi(x.split("/")[0]) if x != 'rest' else 0
for x in note_lst]
midi_dur_lst = [float(x) for x in midi_dur_lst]
except Exception as e:
print(e)
print('Invalid Input Type.')
return None
ph_token = self.ph_encoder.encode(ph_seq)
item = {'item_name': item_name, 'text': inp['text'], 'ph': ph_seq, 'spk_id': spk_id,
'ph_token': ph_token, 'pitch_midi': np.asarray(midis), 'midi_dur': np.asarray(midi_dur_lst),
'is_slur': np.asarray(is_slur), }
item['ph_len'] = len(item['ph_token'])
return item
def input_to_batch(self, item):
item_names = [item['item_name']]
text = [item['text']]
ph = [item['ph']]
txt_tokens = torch.LongTensor(item['ph_token'])[None, :].to(self.device)
txt_lengths = torch.LongTensor([txt_tokens.shape[1]]).to(self.device)
spk_ids = torch.LongTensor(item['spk_id'])[None, :].to(self.device)
pitch_midi = torch.LongTensor(item['pitch_midi'])[None, :hparams['max_frames']].to(self.device)
midi_dur = torch.FloatTensor(item['midi_dur'])[None, :hparams['max_frames']].to(self.device)
is_slur = torch.LongTensor(item['is_slur'])[None, :hparams['max_frames']].to(self.device)
batch = {
'item_name': item_names,
'text': text,
'ph': ph,
'txt_tokens': txt_tokens,
'txt_lengths': txt_lengths,
'spk_ids': spk_ids,
'pitch_midi': pitch_midi,
'midi_dur': midi_dur,
'is_slur': is_slur
}
return batch
def postprocess_output(self, output):
return output
def infer_once(self, inp):
inp = self.preprocess_input(inp, input_type=inp['input_type'] if inp.get('input_type') else 'word')
output = self.forward_model(inp)
output = self.postprocess_output(output)
return output
@classmethod
def example_run(cls, inp):
from utils.audio import save_wav
set_hparams(print_hparams=False)
infer_ins = cls(hparams)
out = infer_ins.infer_once(inp)
os.makedirs('infer_out', exist_ok=True)
save_wav(out, f'infer_out/example_out.wav', hparams['audio_sample_rate'])
# if __name__ == '__main__':
# debug
# a = BaseSVSInfer(hparams)
# a.preprocess_input({'text': '你 说 你 不 SP 懂 为 何 在 这 时 牵 手 AP',
# 'notes': 'D#4/Eb4 | D#4/Eb4 | D#4/Eb4 | D#4/Eb4 | rest | D#4/Eb4 | D4 | D4 | D4 | D#4/Eb4 | F4 | D#4/Eb4 | D4 | rest',
# 'notes_duration': '0.113740 | 0.329060 | 0.287950 | 0.133480 | 0.150900 | 0.484730 | 0.242010 | 0.180820 | 0.343570 | 0.152050 | 0.266720 | 0.280310 | 0.633300 | 0.444590'
# })
# b = {
# 'text': '小酒窝长睫毛AP是你最美的记号',
# 'notes': 'C#4/Db4 | F#4/Gb4 | G#4/Ab4 | A#4/Bb4 F#4/Gb4 | F#4/Gb4 C#4/Db4 | C#4/Db4 | rest | C#4/Db4 | A#4/Bb4 | G#4/Ab4 | A#4/Bb4 | G#4/Ab4 | F4 | C#4/Db4',
# 'notes_duration': '0.407140 | 0.376190 | 0.242180 | 0.509550 0.183420 | 0.315400 0.235020 | 0.361660 | 0.223070 | 0.377270 | 0.340550 | 0.299620 | 0.344510 | 0.283770 | 0.323390 | 0.360340'
# }
# c = {
# 'text': '小酒窝长睫毛AP是你最美的记号',
# 'ph_seq': 'x iao j iu w o ch ang ang j ie ie m ao AP sh i n i z ui m ei d e j i h ao',
# 'note_seq': 'C#4/Db4 C#4/Db4 F#4/Gb4 F#4/Gb4 G#4/Ab4 G#4/Ab4 A#4/Bb4 A#4/Bb4 F#4/Gb4 F#4/Gb4 F#4/Gb4 C#4/Db4 C#4/Db4 C#4/Db4 rest C#4/Db4 C#4/Db4 A#4/Bb4 A#4/Bb4 G#4/Ab4 G#4/Ab4 A#4/Bb4 A#4/Bb4 G#4/Ab4 G#4/Ab4 F4 F4 C#4/Db4 C#4/Db4',
# 'note_dur_seq': '0.407140 0.407140 0.376190 0.376190 0.242180 0.242180 0.509550 0.509550 0.183420 0.315400 0.315400 0.235020 0.361660 0.361660 0.223070 0.377270 0.377270 0.340550 0.340550 0.299620 0.299620 0.344510 0.344510 0.283770 0.283770 0.323390 0.323390 0.360340 0.360340',
# 'is_slur_seq': '0 0 0 0 0 0 0 0 1 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0'
# } # input like Opencpop dataset.
# a.preprocess_input(b)
# a.preprocess_input(c, input_type='phoneme') | EXA-1-master | exa/models/AudioGPT/NeuralSeq/inference/svs/base_svs_infer.py |
import torch
# from inference.tts.fs import FastSpeechInfer
# from modules.tts.fs2_orig import FastSpeech2Orig
from inference.svs.base_svs_infer import BaseSVSInfer
from utils import load_ckpt
from utils.hparams import hparams
from modules.diff.shallow_diffusion_tts import GaussianDiffusion
from tasks.svs.diffsinger_task import DIFF_DECODERS
from modules.fastspeech.pe import PitchExtractor
import utils
class DiffSingerE2EInfer(BaseSVSInfer):
def build_model(self):
model = GaussianDiffusion(
phone_encoder=self.ph_encoder,
out_dims=hparams['audio_num_mel_bins'], denoise_fn=DIFF_DECODERS[hparams['diff_decoder_type']](hparams),
timesteps=hparams['timesteps'],
K_step=hparams['K_step'],
loss_type=hparams['diff_loss_type'],
spec_min=hparams['spec_min'], spec_max=hparams['spec_max'],
)
model.eval()
load_ckpt(model, hparams['work_dir'], 'model')
if hparams.get('pe_enable') is not None and hparams['pe_enable']:
self.pe = PitchExtractor().to(self.device)
utils.load_ckpt(self.pe, hparams['pe_ckpt'], 'model', strict=True)
self.pe.eval()
return model
def forward_model(self, inp):
sample = self.input_to_batch(inp)
txt_tokens = sample['txt_tokens'] # [B, T_t]
spk_id = sample.get('spk_ids')
with torch.no_grad():
output = self.model(txt_tokens, spk_id=spk_id, ref_mels=None, infer=True,
pitch_midi=sample['pitch_midi'], midi_dur=sample['midi_dur'],
is_slur=sample['is_slur'])
mel_out = output['mel_out'] # [B, T,80]
if hparams.get('pe_enable') is not None and hparams['pe_enable']:
f0_pred = self.pe(mel_out)['f0_denorm_pred'] # pe predict from Pred mel
else:
f0_pred = output['f0_denorm']
wav_out = self.run_vocoder(mel_out, f0=f0_pred)
wav_out = wav_out.cpu().numpy()
return wav_out[0]
if __name__ == '__main__':
inp = {
'text': '小酒窝长睫毛AP是你最美的记号',
'notes': 'C#4/Db4 | F#4/Gb4 | G#4/Ab4 | A#4/Bb4 F#4/Gb4 | F#4/Gb4 C#4/Db4 | C#4/Db4 | rest | C#4/Db4 | A#4/Bb4 | G#4/Ab4 | A#4/Bb4 | G#4/Ab4 | F4 | C#4/Db4',
'notes_duration': '0.407140 | 0.376190 | 0.242180 | 0.509550 0.183420 | 0.315400 0.235020 | 0.361660 | 0.223070 | 0.377270 | 0.340550 | 0.299620 | 0.344510 | 0.283770 | 0.323390 | 0.360340',
'input_type': 'word'
} # user input: Chinese characters
inp = {
'text': '小酒窝长睫毛AP是你最美的记号',
'ph_seq': 'x iao j iu w o ch ang ang j ie ie m ao AP sh i n i z ui m ei d e j i h ao',
'note_seq': 'C#4/Db4 C#4/Db4 F#4/Gb4 F#4/Gb4 G#4/Ab4 G#4/Ab4 A#4/Bb4 A#4/Bb4 F#4/Gb4 F#4/Gb4 F#4/Gb4 C#4/Db4 C#4/Db4 C#4/Db4 rest C#4/Db4 C#4/Db4 A#4/Bb4 A#4/Bb4 G#4/Ab4 G#4/Ab4 A#4/Bb4 A#4/Bb4 G#4/Ab4 G#4/Ab4 F4 F4 C#4/Db4 C#4/Db4',
'note_dur_seq': '0.407140 0.407140 0.376190 0.376190 0.242180 0.242180 0.509550 0.509550 0.183420 0.315400 0.315400 0.235020 0.361660 0.361660 0.223070 0.377270 0.377270 0.340550 0.340550 0.299620 0.299620 0.344510 0.344510 0.283770 0.283770 0.323390 0.323390 0.360340 0.360340',
'is_slur_seq': '0 0 0 0 0 0 0 0 1 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0',
'input_type': 'phoneme'
} # input like Opencpop dataset.
DiffSingerE2EInfer.example_run(inp)
# CUDA_VISIBLE_DEVICES=3 python inference/svs/ds_e2e.py --config egs/egs_bases/svs/midi/e2e/opencpop/ds100_adj_rel.yaml --exp_name 0228_opencpop_ds100_rel | EXA-1-master | exa/models/AudioGPT/NeuralSeq/inference/svs/ds_e2e.py |
def cpop_pinyin2ph_func():
# In the README file of opencpop dataset, they defined a "pinyin to phoneme mapping table"
pinyin2phs = {'AP': 'AP', 'SP': 'SP'}
with open('NeuralSeq/inference/svs/opencpop/cpop_pinyin2ph.txt') as rf:
for line in rf.readlines():
elements = [x.strip() for x in line.split('|') if x.strip() != '']
pinyin2phs[elements[0]] = elements[1]
return pinyin2phs | EXA-1-master | exa/models/AudioGPT/NeuralSeq/inference/svs/opencpop/map.py |
import torch
from inference.tts.base_tts_infer import BaseTTSInfer
from utils.ckpt_utils import load_ckpt
from modules.portaspeech.portaspeech import PortaSpeech
class TTSInference(BaseTTSInfer):
def __init__(self, hparams, device=None):
super().__init__(hparams, device)
print("Initializing TTS model to %s" % device)
self.spk_map = self.preprocessor.load_spk_map(self.data_dir)
print("TTS loaded!")
def build_model(self):
model = PortaSpeech(self.ph_encoder, self.word_encoder)
load_ckpt(model, self.hparams['work_dir'], 'model')
with torch.no_grad():
model.store_inverse_all()
return model
def forward_model(self, inp):
sample = self.input_to_batch(inp)
with torch.no_grad():
output = self.model(
sample['txt_tokens'],
sample['word_tokens'],
ph2word=sample['ph2word'],
word_len=sample['word_lengths'].max(),
infer=True,
forward_post_glow=True,
spk_id=sample.get('spk_ids')
)
mel_out = output['mel_out']
wav_out = self.run_vocoder(mel_out)
wav_out = wav_out.cpu().numpy()
return wav_out[0]
def preprocess_input(self, inp):
"""
:param inp: {'text': str, 'item_name': (str, optional), 'spk_name': (str, optional)}
:return:
"""
preprocessor, preprocess_args = self.preprocessor, self.preprocess_args
text_raw = inp['text']
item_name = inp.get('item_name', '<ITEM_NAME>')
spk_name = inp.get('spk_name', '<SINGLE_SPK>')
ph, txt, word, ph2word, ph_gb_word = preprocessor.txt_to_ph(
preprocessor.txt_processor, text_raw, preprocess_args)
word_token = self.word_encoder.encode(word)
ph_token = self.ph_encoder.encode(ph)
spk_id = self.spk_map[spk_name]
item = {'item_name': item_name, 'text': txt, 'ph': ph, 'spk_id': spk_id,
'ph_token': ph_token, 'word_token': word_token, 'ph2word': ph2word,
'ph_words':ph_gb_word, 'words': word}
item['ph_len'] = len(item['ph_token'])
return item
def input_to_batch(self, item):
item_names = [item['item_name']]
text = [item['text']]
ph = [item['ph']]
txt_tokens = torch.LongTensor(item['ph_token'])[None, :].to(self.device)
txt_lengths = torch.LongTensor([txt_tokens.shape[1]]).to(self.device)
word_tokens = torch.LongTensor(item['word_token'])[None, :].to(self.device)
word_lengths = torch.LongTensor([txt_tokens.shape[1]]).to(self.device)
ph2word = torch.LongTensor(item['ph2word'])[None, :].to(self.device)
spk_ids = torch.LongTensor(item['spk_id'])[None, :].to(self.device)
batch = {
'item_name': item_names,
'text': text,
'ph': ph,
'txt_tokens': txt_tokens,
'txt_lengths': txt_lengths,
'word_tokens': word_tokens,
'word_lengths': word_lengths,
'ph2word': ph2word,
'spk_ids': spk_ids,
}
return batch
def postprocess_output(self, output):
return output
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/inference/tts/PortaSpeech.py |
import torch
import os
import importlib
from inference.tts.base_tts_infer import BaseTTSInfer
from utils.ckpt_utils import load_ckpt, get_last_checkpoint
from modules.GenerSpeech.model.generspeech import GenerSpeech
from data_gen.tts.emotion import inference as EmotionEncoder
from data_gen.tts.emotion.inference import embed_utterance as Embed_utterance
from data_gen.tts.emotion.inference import preprocess_wav
from data_gen.tts.data_gen_utils import is_sil_phoneme
from resemblyzer import VoiceEncoder
from utils import audio
class GenerSpeechInfer(BaseTTSInfer):
def build_model(self):
model = GenerSpeech(self.ph_encoder)
model.eval()
load_ckpt(model, self.hparams['work_dir'], 'model')
return model
def preprocess_input(self, inp):
"""
:param inp: {'text': str, 'item_name': (str, optional), 'spk_name': (str, optional)}
:return:
"""
# processed text
preprocessor, preprocess_args = self.preprocessor, self.preprocess_args
text_raw = inp['text']
item_name = inp.get('item_name', '<ITEM_NAME>')
ph, txt, word, ph2word, ph_gb_word = preprocessor.txt_to_ph(preprocessor.txt_processor, text_raw, preprocess_args)
ph_token = self.ph_encoder.encode(ph)
# processed ref audio
ref_audio = inp['ref_audio']
processed_ref_audio = 'example/temp.wav'
voice_encoder = VoiceEncoder().cuda()
encoder = [self.ph_encoder, self.word_encoder]
EmotionEncoder.load_model(self.hparams['emotion_encoder_path'])
binarizer_cls = self.hparams.get("binarizer_cls", 'data_gen.tts.base_binarizerr.BaseBinarizer')
pkg = ".".join(binarizer_cls.split(".")[:-1])
cls_name = binarizer_cls.split(".")[-1]
binarizer_cls = getattr(importlib.import_module(pkg), cls_name)
ref_audio_raw, ref_text_raw = self.asr(ref_audio) # prepare text
ph_ref, txt_ref, word_ref, ph2word_ref, ph_gb_word_ref = preprocessor.txt_to_ph(preprocessor.txt_processor, ref_text_raw, preprocess_args)
ph_gb_word_nosil = ["_".join([p for p in w.split("_") if not is_sil_phoneme(p)]) for w in ph_gb_word_ref.split(" ") if not is_sil_phoneme(w)]
phs_for_align = ['SIL'] + ph_gb_word_nosil + ['SIL']
phs_for_align = " ".join(phs_for_align)
# prepare files for alignment
os.system('rm -r example/; mkdir example/')
audio.save_wav(ref_audio_raw, processed_ref_audio, self.hparams['audio_sample_rate'])
with open(f'example/temp.lab', 'w') as f_txt:
f_txt.write(phs_for_align)
os.system(f'mfa align example/ {self.hparams["binary_data_dir"]}/mfa_dict.txt {self.hparams["binary_data_dir"]}/mfa_model.zip example/textgrid/ --clean')
item2tgfn = 'example/textgrid/temp.TextGrid' # prepare textgrid alignment
item = binarizer_cls.process_item(item_name, ph_ref, txt_ref, item2tgfn, processed_ref_audio, 0, 0, encoder, self.hparams['binarization_args'])
item['emo_embed'] = Embed_utterance(preprocess_wav(item['wav_fn']))
item['spk_embed'] = voice_encoder.embed_utterance(item['wav'])
item.update({
'ref_ph': item['ph'],
'ph': ph,
'ph_token': ph_token,
'text': txt
})
return item
def input_to_batch(self, item):
item_names = [item['item_name']]
text = [item['text']]
ph = [item['ph']]
txt_tokens = torch.LongTensor(item['ph_token'])[None, :].to(self.device)
txt_lengths = torch.LongTensor([txt_tokens.shape[1]]).to(self.device)
mels = torch.FloatTensor(item['mel'])[None, :].to(self.device)
f0 = torch.FloatTensor(item['f0'])[None, :].to(self.device)
# uv = torch.FloatTensor(item['uv']).to(self.device)
mel2ph = torch.LongTensor(item['mel2ph'])[None, :].to(self.device)
spk_embed = torch.FloatTensor(item['spk_embed'])[None, :].to(self.device)
emo_embed = torch.FloatTensor(item['emo_embed'])[None, :].to(self.device)
ph2word = torch.LongTensor(item['ph2word'])[None, :].to(self.device)
mel2word = torch.LongTensor(item['mel2word'])[None, :].to(self.device)
word_tokens = torch.LongTensor(item['word_tokens'])[None, :].to(self.device)
batch = {
'item_name': item_names,
'text': text,
'ph': ph,
'mels': mels,
'f0': f0,
'txt_tokens': txt_tokens,
'txt_lengths': txt_lengths,
'spk_embed': spk_embed,
'emo_embed': emo_embed,
'mel2ph': mel2ph,
'ph2word': ph2word,
'mel2word': mel2word,
'word_tokens': word_tokens,
}
return batch
def forward_model(self, inp):
sample = self.input_to_batch(inp)
txt_tokens = sample['txt_tokens'] # [B, T_t]
with torch.no_grad():
output = self.model(txt_tokens, ref_mel2ph=sample['mel2ph'], ref_mel2word=sample['mel2word'], ref_mels=sample['mels'],
spk_embed=sample['spk_embed'], emo_embed=sample['emo_embed'], global_steps=300000, infer=True)
mel_out = output['mel_out']
wav_out = self.run_vocoder(mel_out)
wav_out = wav_out.squeeze().cpu().numpy()
return wav_out
if __name__ == '__main__':
inp = {
'text': 'here we go',
'ref_audio': 'assets/0011_001570.wav'
}
GenerSpeechInfer.example_run(inp)
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/inference/tts/GenerSpeech.py |
from tasks.tts.dataset_utils import FastSpeechWordDataset
from tasks.tts.tts_utils import load_data_preprocessor
from vocoders.hifigan import HifiGanGenerator
import os
import librosa
import soundfile as sf
from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
from string import punctuation
import torch
from utils.ckpt_utils import load_ckpt
from utils.hparams import set_hparams
from utils.hparams import hparams as hp
class BaseTTSInfer:
def __init__(self, hparams, device=None):
if device is None:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
self.hparams = hparams
self.device = device
self.data_dir = hparams['binary_data_dir']
self.preprocessor, self.preprocess_args = load_data_preprocessor()
self.ph_encoder, self.word_encoder = self.preprocessor.load_dict(self.data_dir)
self.ds_cls = FastSpeechWordDataset
self.model = self.build_model()
self.model.eval()
self.model.to(self.device)
self.vocoder = self.build_vocoder()
self.vocoder.eval()
self.vocoder.to(self.device)
self.asr_processor, self.asr_model = self.build_asr()
def build_model(self):
raise NotImplementedError
def forward_model(self, inp):
raise NotImplementedError
def build_asr(self):
# load pretrained model
processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h") # facebook/wav2vec2-base-960h wav2vec2-large-960h-lv60-self
model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h").to(self.device)
return processor, model
def build_vocoder(self):
base_dir = self.hparams['vocoder_ckpt']
config_path = f'{base_dir}/config.yaml'
config = set_hparams(config_path, global_hparams=False)
vocoder = HifiGanGenerator(config)
load_ckpt(vocoder, base_dir, 'model_gen')
return vocoder
def run_vocoder(self, c):
c = c.transpose(2, 1)
y = self.vocoder(c)[:, 0]
return y
def preprocess_input(self, inp):
raise NotImplementedError
def input_to_batch(self, item):
raise NotImplementedError
def postprocess_output(self, output):
return output
def infer_once(self, inp):
inp = self.preprocess_input(inp)
output = self.forward_model(inp)
output = self.postprocess_output(output)
return output
@classmethod
def example_run(cls, inp):
from utils.audio import save_wav
#set_hparams(print_hparams=False)
infer_ins = cls(hp)
out = infer_ins.infer_once(inp)
os.makedirs('infer_out', exist_ok=True)
save_wav(out, f'infer_out/{hp["text"]}.wav', hp['audio_sample_rate'])
print(f'Save at infer_out/{hp["text"]}.wav.')
def asr(self, file):
sample_rate = self.hparams['audio_sample_rate']
audio_input, source_sample_rate = sf.read(file)
# Resample the wav if needed
if sample_rate is not None and source_sample_rate != sample_rate:
audio_input = librosa.resample(audio_input, source_sample_rate, sample_rate)
# pad input values and return pt tensor
input_values = self.asr_processor(audio_input, sampling_rate=sample_rate, return_tensors="pt").input_values
# retrieve logits & take argmax
logits = self.asr_model(input_values.cuda()).logits
predicted_ids = torch.argmax(logits, dim=-1)
# transcribe
transcription = self.asr_processor.decode(predicted_ids[0])
transcription = transcription.rstrip(punctuation)
return audio_input, transcription | EXA-1-master | exa/models/AudioGPT/NeuralSeq/inference/tts/base_tts_infer.py |
EXA-1-master | exa/models/AudioGPT/NeuralSeq/modules/__init__.py |
|
# -*- coding: utf-8 -*-
# Copyright 2019 Tomoki Hayashi
# MIT License (https://opensource.org/licenses/MIT)
"""STFT-based Loss modules."""
import librosa
import torch
from modules.parallel_wavegan.losses import LogSTFTMagnitudeLoss, SpectralConvergengeLoss, stft
class STFTLoss(torch.nn.Module):
"""STFT loss module."""
def __init__(self, fft_size=1024, shift_size=120, win_length=600, window="hann_window",
use_mel_loss=False):
"""Initialize STFT loss module."""
super(STFTLoss, self).__init__()
self.fft_size = fft_size
self.shift_size = shift_size
self.win_length = win_length
self.window = getattr(torch, window)(win_length)
self.spectral_convergenge_loss = SpectralConvergengeLoss()
self.log_stft_magnitude_loss = LogSTFTMagnitudeLoss()
self.use_mel_loss = use_mel_loss
self.mel_basis = None
def forward(self, x, y):
"""Calculate forward propagation.
Args:
x (Tensor): Predicted signal (B, T).
y (Tensor): Groundtruth signal (B, T).
Returns:
Tensor: Spectral convergence loss value.
Tensor: Log STFT magnitude loss value.
"""
x_mag = stft(x, self.fft_size, self.shift_size, self.win_length, self.window)
y_mag = stft(y, self.fft_size, self.shift_size, self.win_length, self.window)
if self.use_mel_loss:
if self.mel_basis is None:
self.mel_basis = torch.from_numpy(librosa.filters.mel(22050, self.fft_size, 80)).cuda().T
x_mag = x_mag @ self.mel_basis
y_mag = y_mag @ self.mel_basis
sc_loss = self.spectral_convergenge_loss(x_mag, y_mag)
mag_loss = self.log_stft_magnitude_loss(x_mag, y_mag)
return sc_loss, mag_loss
class MultiResolutionSTFTLoss(torch.nn.Module):
"""Multi resolution STFT loss module."""
def __init__(self,
fft_sizes=[1024, 2048, 512],
hop_sizes=[120, 240, 50],
win_lengths=[600, 1200, 240],
window="hann_window",
use_mel_loss=False):
"""Initialize Multi resolution STFT loss module.
Args:
fft_sizes (list): List of FFT sizes.
hop_sizes (list): List of hop sizes.
win_lengths (list): List of window lengths.
window (str): Window function type.
"""
super(MultiResolutionSTFTLoss, self).__init__()
assert len(fft_sizes) == len(hop_sizes) == len(win_lengths)
self.stft_losses = torch.nn.ModuleList()
for fs, ss, wl in zip(fft_sizes, hop_sizes, win_lengths):
self.stft_losses += [STFTLoss(fs, ss, wl, window, use_mel_loss)]
def forward(self, x, y):
"""Calculate forward propagation.
Args:
x (Tensor): Predicted signal (B, T).
y (Tensor): Groundtruth signal (B, T).
Returns:
Tensor: Multi resolution spectral convergence loss value.
Tensor: Multi resolution log STFT magnitude loss value.
"""
sc_loss = 0.0
mag_loss = 0.0
for f in self.stft_losses:
sc_l, mag_l = f(x, y)
sc_loss += sc_l
mag_loss += mag_l
sc_loss /= len(self.stft_losses)
mag_loss /= len(self.stft_losses)
return sc_loss, mag_loss
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/modules/parallel_wavegan/stft_loss.py |
EXA-1-master | exa/models/AudioGPT/NeuralSeq/modules/parallel_wavegan/__init__.py |
|
# -*- coding: utf-8 -*-
# Copyright 2019 Tomoki Hayashi
# MIT License (https://opensource.org/licenses/MIT)
"""STFT-based Loss modules."""
import torch
import torch.nn.functional as F
def stft(x, fft_size, hop_size, win_length, window):
"""Perform STFT and convert to magnitude spectrogram.
Args:
x (Tensor): Input signal tensor (B, T).
fft_size (int): FFT size.
hop_size (int): Hop size.
win_length (int): Window length.
window (str): Window function type.
Returns:
Tensor: Magnitude spectrogram (B, #frames, fft_size // 2 + 1).
"""
x_stft = torch.stft(x, fft_size, hop_size, win_length, window)
real = x_stft[..., 0]
imag = x_stft[..., 1]
# NOTE(kan-bayashi): clamp is needed to avoid nan or inf
return torch.sqrt(torch.clamp(real ** 2 + imag ** 2, min=1e-7)).transpose(2, 1)
class SpectralConvergengeLoss(torch.nn.Module):
"""Spectral convergence loss module."""
def __init__(self):
"""Initilize spectral convergence loss module."""
super(SpectralConvergengeLoss, self).__init__()
def forward(self, x_mag, y_mag):
"""Calculate forward propagation.
Args:
x_mag (Tensor): Magnitude spectrogram of predicted signal (B, #frames, #freq_bins).
y_mag (Tensor): Magnitude spectrogram of groundtruth signal (B, #frames, #freq_bins).
Returns:
Tensor: Spectral convergence loss value.
"""
return torch.norm(y_mag - x_mag, p="fro") / torch.norm(y_mag, p="fro")
class LogSTFTMagnitudeLoss(torch.nn.Module):
"""Log STFT magnitude loss module."""
def __init__(self):
"""Initilize los STFT magnitude loss module."""
super(LogSTFTMagnitudeLoss, self).__init__()
def forward(self, x_mag, y_mag):
"""Calculate forward propagation.
Args:
x_mag (Tensor): Magnitude spectrogram of predicted signal (B, #frames, #freq_bins).
y_mag (Tensor): Magnitude spectrogram of groundtruth signal (B, #frames, #freq_bins).
Returns:
Tensor: Log STFT magnitude loss value.
"""
return F.l1_loss(torch.log(y_mag), torch.log(x_mag))
class STFTLoss(torch.nn.Module):
"""STFT loss module."""
def __init__(self, fft_size=1024, shift_size=120, win_length=600, window="hann_window"):
"""Initialize STFT loss module."""
super(STFTLoss, self).__init__()
self.fft_size = fft_size
self.shift_size = shift_size
self.win_length = win_length
self.window = getattr(torch, window)(win_length)
self.spectral_convergenge_loss = SpectralConvergengeLoss()
self.log_stft_magnitude_loss = LogSTFTMagnitudeLoss()
def forward(self, x, y):
"""Calculate forward propagation.
Args:
x (Tensor): Predicted signal (B, T).
y (Tensor): Groundtruth signal (B, T).
Returns:
Tensor: Spectral convergence loss value.
Tensor: Log STFT magnitude loss value.
"""
x_mag = stft(x, self.fft_size, self.shift_size, self.win_length, self.window)
y_mag = stft(y, self.fft_size, self.shift_size, self.win_length, self.window)
sc_loss = self.spectral_convergenge_loss(x_mag, y_mag)
mag_loss = self.log_stft_magnitude_loss(x_mag, y_mag)
return sc_loss, mag_loss
class MultiResolutionSTFTLoss(torch.nn.Module):
"""Multi resolution STFT loss module."""
def __init__(self,
fft_sizes=[1024, 2048, 512],
hop_sizes=[120, 240, 50],
win_lengths=[600, 1200, 240],
window="hann_window"):
"""Initialize Multi resolution STFT loss module.
Args:
fft_sizes (list): List of FFT sizes.
hop_sizes (list): List of hop sizes.
win_lengths (list): List of window lengths.
window (str): Window function type.
"""
super(MultiResolutionSTFTLoss, self).__init__()
assert len(fft_sizes) == len(hop_sizes) == len(win_lengths)
self.stft_losses = torch.nn.ModuleList()
for fs, ss, wl in zip(fft_sizes, hop_sizes, win_lengths):
self.stft_losses += [STFTLoss(fs, ss, wl, window)]
def forward(self, x, y):
"""Calculate forward propagation.
Args:
x (Tensor): Predicted signal (B, T).
y (Tensor): Groundtruth signal (B, T).
Returns:
Tensor: Multi resolution spectral convergence loss value.
Tensor: Multi resolution log STFT magnitude loss value.
"""
sc_loss = 0.0
mag_loss = 0.0
for f in self.stft_losses:
sc_l, mag_l = f(x, y)
sc_loss += sc_l
mag_loss += mag_l
sc_loss /= len(self.stft_losses)
mag_loss /= len(self.stft_losses)
return sc_loss, mag_loss
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/modules/parallel_wavegan/losses/stft_loss.py |
from .stft_loss import * # NOQA
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/modules/parallel_wavegan/losses/__init__.py |
# -*- coding: utf-8 -*-
# Copyright 2020 MINH ANH (@dathudeptrai)
# MIT License (https://opensource.org/licenses/MIT)
"""Tensorflow Layer modules complatible with pytorch."""
import tensorflow as tf
class TFReflectionPad1d(tf.keras.layers.Layer):
"""Tensorflow ReflectionPad1d module."""
def __init__(self, padding_size):
"""Initialize TFReflectionPad1d module.
Args:
padding_size (int): Padding size.
"""
super(TFReflectionPad1d, self).__init__()
self.padding_size = padding_size
@tf.function
def call(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, T, 1, C).
Returns:
Tensor: Padded tensor (B, T + 2 * padding_size, 1, C).
"""
return tf.pad(x, [[0, 0], [self.padding_size, self.padding_size], [0, 0], [0, 0]], "REFLECT")
class TFConvTranspose1d(tf.keras.layers.Layer):
"""Tensorflow ConvTranspose1d module."""
def __init__(self, channels, kernel_size, stride, padding):
"""Initialize TFConvTranspose1d( module.
Args:
channels (int): Number of channels.
kernel_size (int): kernel size.
strides (int): Stride width.
padding (str): Padding type ("same" or "valid").
"""
super(TFConvTranspose1d, self).__init__()
self.conv1d_transpose = tf.keras.layers.Conv2DTranspose(
filters=channels,
kernel_size=(kernel_size, 1),
strides=(stride, 1),
padding=padding,
)
@tf.function
def call(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, T, 1, C).
Returns:
Tensors: Output tensor (B, T', 1, C').
"""
x = self.conv1d_transpose(x)
return x
class TFResidualStack(tf.keras.layers.Layer):
"""Tensorflow ResidualStack module."""
def __init__(self,
kernel_size,
channels,
dilation,
bias,
nonlinear_activation,
nonlinear_activation_params,
padding,
):
"""Initialize TFResidualStack module.
Args:
kernel_size (int): Kernel size.
channles (int): Number of channels.
dilation (int): Dilation ine.
bias (bool): Whether to add bias parameter in convolution layers.
nonlinear_activation (str): Activation function module name.
nonlinear_activation_params (dict): Hyperparameters for activation function.
padding (str): Padding type ("same" or "valid").
"""
super(TFResidualStack, self).__init__()
self.block = [
getattr(tf.keras.layers, nonlinear_activation)(**nonlinear_activation_params),
TFReflectionPad1d(dilation),
tf.keras.layers.Conv2D(
filters=channels,
kernel_size=(kernel_size, 1),
dilation_rate=(dilation, 1),
use_bias=bias,
padding="valid",
),
getattr(tf.keras.layers, nonlinear_activation)(**nonlinear_activation_params),
tf.keras.layers.Conv2D(filters=channels, kernel_size=1, use_bias=bias)
]
self.shortcut = tf.keras.layers.Conv2D(filters=channels, kernel_size=1, use_bias=bias)
@tf.function
def call(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, T, 1, C).
Returns:
Tensor: Output tensor (B, T, 1, C).
"""
_x = tf.identity(x)
for i, layer in enumerate(self.block):
_x = layer(_x)
shortcut = self.shortcut(x)
return shortcut + _x
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/modules/parallel_wavegan/layers/tf_layers.py |
# -*- coding: utf-8 -*-
# Copyright 2020 Tomoki Hayashi
# MIT License (https://opensource.org/licenses/MIT)
"""Causal convolusion layer modules."""
import torch
class CausalConv1d(torch.nn.Module):
"""CausalConv1d module with customized initialization."""
def __init__(self, in_channels, out_channels, kernel_size,
dilation=1, bias=True, pad="ConstantPad1d", pad_params={"value": 0.0}):
"""Initialize CausalConv1d module."""
super(CausalConv1d, self).__init__()
self.pad = getattr(torch.nn, pad)((kernel_size - 1) * dilation, **pad_params)
self.conv = torch.nn.Conv1d(in_channels, out_channels, kernel_size,
dilation=dilation, bias=bias)
def forward(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, in_channels, T).
Returns:
Tensor: Output tensor (B, out_channels, T).
"""
return self.conv(self.pad(x))[:, :, :x.size(2)]
class CausalConvTranspose1d(torch.nn.Module):
"""CausalConvTranspose1d module with customized initialization."""
def __init__(self, in_channels, out_channels, kernel_size, stride, bias=True):
"""Initialize CausalConvTranspose1d module."""
super(CausalConvTranspose1d, self).__init__()
self.deconv = torch.nn.ConvTranspose1d(
in_channels, out_channels, kernel_size, stride, bias=bias)
self.stride = stride
def forward(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, in_channels, T_in).
Returns:
Tensor: Output tensor (B, out_channels, T_out).
"""
return self.deconv(x)[:, :, :-self.stride]
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/modules/parallel_wavegan/layers/causal_conv.py |
from .causal_conv import * # NOQA
from .pqmf import * # NOQA
from .residual_block import * # NOQA
from modules.parallel_wavegan.layers.residual_stack import * # NOQA
from .upsample import * # NOQA
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/modules/parallel_wavegan/layers/__init__.py |
# -*- coding: utf-8 -*-
"""Upsampling module.
This code is modified from https://github.com/r9y9/wavenet_vocoder.
"""
import numpy as np
import torch
import torch.nn.functional as F
from . import Conv1d
class Stretch2d(torch.nn.Module):
"""Stretch2d module."""
def __init__(self, x_scale, y_scale, mode="nearest"):
"""Initialize Stretch2d module.
Args:
x_scale (int): X scaling factor (Time axis in spectrogram).
y_scale (int): Y scaling factor (Frequency axis in spectrogram).
mode (str): Interpolation mode.
"""
super(Stretch2d, self).__init__()
self.x_scale = x_scale
self.y_scale = y_scale
self.mode = mode
def forward(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, C, F, T).
Returns:
Tensor: Interpolated tensor (B, C, F * y_scale, T * x_scale),
"""
return F.interpolate(
x, scale_factor=(self.y_scale, self.x_scale), mode=self.mode)
class Conv2d(torch.nn.Conv2d):
"""Conv2d module with customized initialization."""
def __init__(self, *args, **kwargs):
"""Initialize Conv2d module."""
super(Conv2d, self).__init__(*args, **kwargs)
def reset_parameters(self):
"""Reset parameters."""
self.weight.data.fill_(1. / np.prod(self.kernel_size))
if self.bias is not None:
torch.nn.init.constant_(self.bias, 0.0)
class UpsampleNetwork(torch.nn.Module):
"""Upsampling network module."""
def __init__(self,
upsample_scales,
nonlinear_activation=None,
nonlinear_activation_params={},
interpolate_mode="nearest",
freq_axis_kernel_size=1,
use_causal_conv=False,
):
"""Initialize upsampling network module.
Args:
upsample_scales (list): List of upsampling scales.
nonlinear_activation (str): Activation function name.
nonlinear_activation_params (dict): Arguments for specified activation function.
interpolate_mode (str): Interpolation mode.
freq_axis_kernel_size (int): Kernel size in the direction of frequency axis.
"""
super(UpsampleNetwork, self).__init__()
self.use_causal_conv = use_causal_conv
self.up_layers = torch.nn.ModuleList()
for scale in upsample_scales:
# interpolation layer
stretch = Stretch2d(scale, 1, interpolate_mode)
self.up_layers += [stretch]
# conv layer
assert (freq_axis_kernel_size - 1) % 2 == 0, "Not support even number freq axis kernel size."
freq_axis_padding = (freq_axis_kernel_size - 1) // 2
kernel_size = (freq_axis_kernel_size, scale * 2 + 1)
if use_causal_conv:
padding = (freq_axis_padding, scale * 2)
else:
padding = (freq_axis_padding, scale)
conv = Conv2d(1, 1, kernel_size=kernel_size, padding=padding, bias=False)
self.up_layers += [conv]
# nonlinear
if nonlinear_activation is not None:
nonlinear = getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params)
self.up_layers += [nonlinear]
def forward(self, c):
"""Calculate forward propagation.
Args:
c : Input tensor (B, C, T).
Returns:
Tensor: Upsampled tensor (B, C, T'), where T' = T * prod(upsample_scales).
"""
c = c.unsqueeze(1) # (B, 1, C, T)
for f in self.up_layers:
if self.use_causal_conv and isinstance(f, Conv2d):
c = f(c)[..., :c.size(-1)]
else:
c = f(c)
return c.squeeze(1) # (B, C, T')
class ConvInUpsampleNetwork(torch.nn.Module):
"""Convolution + upsampling network module."""
def __init__(self,
upsample_scales,
nonlinear_activation=None,
nonlinear_activation_params={},
interpolate_mode="nearest",
freq_axis_kernel_size=1,
aux_channels=80,
aux_context_window=0,
use_causal_conv=False
):
"""Initialize convolution + upsampling network module.
Args:
upsample_scales (list): List of upsampling scales.
nonlinear_activation (str): Activation function name.
nonlinear_activation_params (dict): Arguments for specified activation function.
mode (str): Interpolation mode.
freq_axis_kernel_size (int): Kernel size in the direction of frequency axis.
aux_channels (int): Number of channels of pre-convolutional layer.
aux_context_window (int): Context window size of the pre-convolutional layer.
use_causal_conv (bool): Whether to use causal structure.
"""
super(ConvInUpsampleNetwork, self).__init__()
self.aux_context_window = aux_context_window
self.use_causal_conv = use_causal_conv and aux_context_window > 0
# To capture wide-context information in conditional features
kernel_size = aux_context_window + 1 if use_causal_conv else 2 * aux_context_window + 1
# NOTE(kan-bayashi): Here do not use padding because the input is already padded
self.conv_in = Conv1d(aux_channels, aux_channels, kernel_size=kernel_size, bias=False)
self.upsample = UpsampleNetwork(
upsample_scales=upsample_scales,
nonlinear_activation=nonlinear_activation,
nonlinear_activation_params=nonlinear_activation_params,
interpolate_mode=interpolate_mode,
freq_axis_kernel_size=freq_axis_kernel_size,
use_causal_conv=use_causal_conv,
)
def forward(self, c):
"""Calculate forward propagation.
Args:
c : Input tensor (B, C, T').
Returns:
Tensor: Upsampled tensor (B, C, T),
where T = (T' - aux_context_window * 2) * prod(upsample_scales).
Note:
The length of inputs considers the context window size.
"""
c_ = self.conv_in(c)
c = c_[:, :, :-self.aux_context_window] if self.use_causal_conv else c_
return self.upsample(c)
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/modules/parallel_wavegan/layers/upsample.py |
# -*- coding: utf-8 -*-
"""Residual block module in WaveNet.
This code is modified from https://github.com/r9y9/wavenet_vocoder.
"""
import math
import torch
import torch.nn.functional as F
class Conv1d(torch.nn.Conv1d):
"""Conv1d module with customized initialization."""
def __init__(self, *args, **kwargs):
"""Initialize Conv1d module."""
super(Conv1d, self).__init__(*args, **kwargs)
def reset_parameters(self):
"""Reset parameters."""
torch.nn.init.kaiming_normal_(self.weight, nonlinearity="relu")
if self.bias is not None:
torch.nn.init.constant_(self.bias, 0.0)
class Conv1d1x1(Conv1d):
"""1x1 Conv1d with customized initialization."""
def __init__(self, in_channels, out_channels, bias):
"""Initialize 1x1 Conv1d module."""
super(Conv1d1x1, self).__init__(in_channels, out_channels,
kernel_size=1, padding=0,
dilation=1, bias=bias)
class ResidualBlock(torch.nn.Module):
"""Residual block module in WaveNet."""
def __init__(self,
kernel_size=3,
residual_channels=64,
gate_channels=128,
skip_channels=64,
aux_channels=80,
dropout=0.0,
dilation=1,
bias=True,
use_causal_conv=False
):
"""Initialize ResidualBlock module.
Args:
kernel_size (int): Kernel size of dilation convolution layer.
residual_channels (int): Number of channels for residual connection.
skip_channels (int): Number of channels for skip connection.
aux_channels (int): Local conditioning channels i.e. auxiliary input dimension.
dropout (float): Dropout probability.
dilation (int): Dilation factor.
bias (bool): Whether to add bias parameter in convolution layers.
use_causal_conv (bool): Whether to use use_causal_conv or non-use_causal_conv convolution.
"""
super(ResidualBlock, self).__init__()
self.dropout = dropout
# no future time stamps available
if use_causal_conv:
padding = (kernel_size - 1) * dilation
else:
assert (kernel_size - 1) % 2 == 0, "Not support even number kernel size."
padding = (kernel_size - 1) // 2 * dilation
self.use_causal_conv = use_causal_conv
# dilation conv
self.conv = Conv1d(residual_channels, gate_channels, kernel_size,
padding=padding, dilation=dilation, bias=bias)
# local conditioning
if aux_channels > 0:
self.conv1x1_aux = Conv1d1x1(aux_channels, gate_channels, bias=False)
else:
self.conv1x1_aux = None
# conv output is split into two groups
gate_out_channels = gate_channels // 2
self.conv1x1_out = Conv1d1x1(gate_out_channels, residual_channels, bias=bias)
self.conv1x1_skip = Conv1d1x1(gate_out_channels, skip_channels, bias=bias)
def forward(self, x, c):
"""Calculate forward propagation.
Args:
x (Tensor): Input tensor (B, residual_channels, T).
c (Tensor): Local conditioning auxiliary tensor (B, aux_channels, T).
Returns:
Tensor: Output tensor for residual connection (B, residual_channels, T).
Tensor: Output tensor for skip connection (B, skip_channels, T).
"""
residual = x
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.conv(x)
# remove future time steps if use_causal_conv conv
x = x[:, :, :residual.size(-1)] if self.use_causal_conv else x
# split into two part for gated activation
splitdim = 1
xa, xb = x.split(x.size(splitdim) // 2, dim=splitdim)
# local conditioning
if c is not None:
assert self.conv1x1_aux is not None
c = self.conv1x1_aux(c)
ca, cb = c.split(c.size(splitdim) // 2, dim=splitdim)
xa, xb = xa + ca, xb + cb
x = torch.tanh(xa) * torch.sigmoid(xb)
# for skip connection
s = self.conv1x1_skip(x)
# for residual connection
x = (self.conv1x1_out(x) + residual) * math.sqrt(0.5)
return x, s
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/modules/parallel_wavegan/layers/residual_block.py |
# -*- coding: utf-8 -*-
# Copyright 2020 Tomoki Hayashi
# MIT License (https://opensource.org/licenses/MIT)
"""Pseudo QMF modules."""
import numpy as np
import torch
import torch.nn.functional as F
from scipy.signal import kaiser
def design_prototype_filter(taps=62, cutoff_ratio=0.15, beta=9.0):
"""Design prototype filter for PQMF.
This method is based on `A Kaiser window approach for the design of prototype
filters of cosine modulated filterbanks`_.
Args:
taps (int): The number of filter taps.
cutoff_ratio (float): Cut-off frequency ratio.
beta (float): Beta coefficient for kaiser window.
Returns:
ndarray: Impluse response of prototype filter (taps + 1,).
.. _`A Kaiser window approach for the design of prototype filters of cosine modulated filterbanks`:
https://ieeexplore.ieee.org/abstract/document/681427
"""
# check the arguments are valid
assert taps % 2 == 0, "The number of taps mush be even number."
assert 0.0 < cutoff_ratio < 1.0, "Cutoff ratio must be > 0.0 and < 1.0."
# make initial filter
omega_c = np.pi * cutoff_ratio
with np.errstate(invalid='ignore'):
h_i = np.sin(omega_c * (np.arange(taps + 1) - 0.5 * taps)) \
/ (np.pi * (np.arange(taps + 1) - 0.5 * taps))
h_i[taps // 2] = np.cos(0) * cutoff_ratio # fix nan due to indeterminate form
# apply kaiser window
w = kaiser(taps + 1, beta)
h = h_i * w
return h
class PQMF(torch.nn.Module):
"""PQMF module.
This module is based on `Near-perfect-reconstruction pseudo-QMF banks`_.
.. _`Near-perfect-reconstruction pseudo-QMF banks`:
https://ieeexplore.ieee.org/document/258122
"""
def __init__(self, subbands=4, taps=62, cutoff_ratio=0.15, beta=9.0):
"""Initilize PQMF module.
Args:
subbands (int): The number of subbands.
taps (int): The number of filter taps.
cutoff_ratio (float): Cut-off frequency ratio.
beta (float): Beta coefficient for kaiser window.
"""
super(PQMF, self).__init__()
# define filter coefficient
h_proto = design_prototype_filter(taps, cutoff_ratio, beta)
h_analysis = np.zeros((subbands, len(h_proto)))
h_synthesis = np.zeros((subbands, len(h_proto)))
for k in range(subbands):
h_analysis[k] = 2 * h_proto * np.cos(
(2 * k + 1) * (np.pi / (2 * subbands)) *
(np.arange(taps + 1) - ((taps - 1) / 2)) +
(-1) ** k * np.pi / 4)
h_synthesis[k] = 2 * h_proto * np.cos(
(2 * k + 1) * (np.pi / (2 * subbands)) *
(np.arange(taps + 1) - ((taps - 1) / 2)) -
(-1) ** k * np.pi / 4)
# convert to tensor
analysis_filter = torch.from_numpy(h_analysis).float().unsqueeze(1)
synthesis_filter = torch.from_numpy(h_synthesis).float().unsqueeze(0)
# register coefficients as beffer
self.register_buffer("analysis_filter", analysis_filter)
self.register_buffer("synthesis_filter", synthesis_filter)
# filter for downsampling & upsampling
updown_filter = torch.zeros((subbands, subbands, subbands)).float()
for k in range(subbands):
updown_filter[k, k, 0] = 1.0
self.register_buffer("updown_filter", updown_filter)
self.subbands = subbands
# keep padding info
self.pad_fn = torch.nn.ConstantPad1d(taps // 2, 0.0)
def analysis(self, x):
"""Analysis with PQMF.
Args:
x (Tensor): Input tensor (B, 1, T).
Returns:
Tensor: Output tensor (B, subbands, T // subbands).
"""
x = F.conv1d(self.pad_fn(x), self.analysis_filter)
return F.conv1d(x, self.updown_filter, stride=self.subbands)
def synthesis(self, x):
"""Synthesis with PQMF.
Args:
x (Tensor): Input tensor (B, subbands, T // subbands).
Returns:
Tensor: Output tensor (B, 1, T).
"""
x = F.conv_transpose1d(x, self.updown_filter * self.subbands, stride=self.subbands)
return F.conv1d(self.pad_fn(x), self.synthesis_filter)
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/modules/parallel_wavegan/layers/pqmf.py |
# -*- coding: utf-8 -*-
# Copyright 2020 Tomoki Hayashi
# MIT License (https://opensource.org/licenses/MIT)
"""Residual stack module in MelGAN."""
import torch
from . import CausalConv1d
class ResidualStack(torch.nn.Module):
"""Residual stack module introduced in MelGAN."""
def __init__(self,
kernel_size=3,
channels=32,
dilation=1,
bias=True,
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"negative_slope": 0.2},
pad="ReflectionPad1d",
pad_params={},
use_causal_conv=False,
):
"""Initialize ResidualStack module.
Args:
kernel_size (int): Kernel size of dilation convolution layer.
channels (int): Number of channels of convolution layers.
dilation (int): Dilation factor.
bias (bool): Whether to add bias parameter in convolution layers.
nonlinear_activation (str): Activation function module name.
nonlinear_activation_params (dict): Hyperparameters for activation function.
pad (str): Padding function module name before dilated convolution layer.
pad_params (dict): Hyperparameters for padding function.
use_causal_conv (bool): Whether to use causal convolution.
"""
super(ResidualStack, self).__init__()
# defile residual stack part
if not use_causal_conv:
assert (kernel_size - 1) % 2 == 0, "Not support even number kernel size."
self.stack = torch.nn.Sequential(
getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params),
getattr(torch.nn, pad)((kernel_size - 1) // 2 * dilation, **pad_params),
torch.nn.Conv1d(channels, channels, kernel_size, dilation=dilation, bias=bias),
getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params),
torch.nn.Conv1d(channels, channels, 1, bias=bias),
)
else:
self.stack = torch.nn.Sequential(
getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params),
CausalConv1d(channels, channels, kernel_size, dilation=dilation,
bias=bias, pad=pad, pad_params=pad_params),
getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params),
torch.nn.Conv1d(channels, channels, 1, bias=bias),
)
# defile extra layer for skip connection
self.skip_layer = torch.nn.Conv1d(channels, channels, 1, bias=bias)
def forward(self, c):
"""Calculate forward propagation.
Args:
c (Tensor): Input tensor (B, channels, T).
Returns:
Tensor: Output tensor (B, chennels, T).
"""
return self.stack(c) + self.skip_layer(c)
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/modules/parallel_wavegan/layers/residual_stack.py |
from .utils import * # NOQA
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/modules/parallel_wavegan/utils/__init__.py |
# -*- coding: utf-8 -*-
# Copyright 2019 Tomoki Hayashi
# MIT License (https://opensource.org/licenses/MIT)
"""Utility functions."""
import fnmatch
import logging
import os
import sys
import h5py
import numpy as np
def find_files(root_dir, query="*.wav", include_root_dir=True):
"""Find files recursively.
Args:
root_dir (str): Root root_dir to find.
query (str): Query to find.
include_root_dir (bool): If False, root_dir name is not included.
Returns:
list: List of found filenames.
"""
files = []
for root, dirnames, filenames in os.walk(root_dir, followlinks=True):
for filename in fnmatch.filter(filenames, query):
files.append(os.path.join(root, filename))
if not include_root_dir:
files = [file_.replace(root_dir + "/", "") for file_ in files]
return files
def read_hdf5(hdf5_name, hdf5_path):
"""Read hdf5 dataset.
Args:
hdf5_name (str): Filename of hdf5 file.
hdf5_path (str): Dataset name in hdf5 file.
Return:
any: Dataset values.
"""
if not os.path.exists(hdf5_name):
logging.error(f"There is no such a hdf5 file ({hdf5_name}).")
sys.exit(1)
hdf5_file = h5py.File(hdf5_name, "r")
if hdf5_path not in hdf5_file:
logging.error(f"There is no such a data in hdf5 file. ({hdf5_path})")
sys.exit(1)
hdf5_data = hdf5_file[hdf5_path][()]
hdf5_file.close()
return hdf5_data
def write_hdf5(hdf5_name, hdf5_path, write_data, is_overwrite=True):
"""Write dataset to hdf5.
Args:
hdf5_name (str): Hdf5 dataset filename.
hdf5_path (str): Dataset path in hdf5.
write_data (ndarray): Data to write.
is_overwrite (bool): Whether to overwrite dataset.
"""
# convert to numpy array
write_data = np.array(write_data)
# check folder existence
folder_name, _ = os.path.split(hdf5_name)
if not os.path.exists(folder_name) and len(folder_name) != 0:
os.makedirs(folder_name)
# check hdf5 existence
if os.path.exists(hdf5_name):
# if already exists, open with r+ mode
hdf5_file = h5py.File(hdf5_name, "r+")
# check dataset existence
if hdf5_path in hdf5_file:
if is_overwrite:
logging.warning("Dataset in hdf5 file already exists. "
"recreate dataset in hdf5.")
hdf5_file.__delitem__(hdf5_path)
else:
logging.error("Dataset in hdf5 file already exists. "
"if you want to overwrite, please set is_overwrite = True.")
hdf5_file.close()
sys.exit(1)
else:
# if not exists, open with w mode
hdf5_file = h5py.File(hdf5_name, "w")
# write data to hdf5
hdf5_file.create_dataset(hdf5_path, data=write_data)
hdf5_file.flush()
hdf5_file.close()
class HDF5ScpLoader(object):
"""Loader class for a fests.scp file of hdf5 file.
Examples:
key1 /some/path/a.h5:feats
key2 /some/path/b.h5:feats
key3 /some/path/c.h5:feats
key4 /some/path/d.h5:feats
...
>>> loader = HDF5ScpLoader("hdf5.scp")
>>> array = loader["key1"]
key1 /some/path/a.h5
key2 /some/path/b.h5
key3 /some/path/c.h5
key4 /some/path/d.h5
...
>>> loader = HDF5ScpLoader("hdf5.scp", "feats")
>>> array = loader["key1"]
"""
def __init__(self, feats_scp, default_hdf5_path="feats"):
"""Initialize HDF5 scp loader.
Args:
feats_scp (str): Kaldi-style feats.scp file with hdf5 format.
default_hdf5_path (str): Path in hdf5 file. If the scp contain the info, not used.
"""
self.default_hdf5_path = default_hdf5_path
with open(feats_scp) as f:
lines = [line.replace("\n", "") for line in f.readlines()]
self.data = {}
for line in lines:
key, value = line.split()
self.data[key] = value
def get_path(self, key):
"""Get hdf5 file path for a given key."""
return self.data[key]
def __getitem__(self, key):
"""Get ndarray for a given key."""
p = self.data[key]
if ":" in p:
return read_hdf5(*p.split(":"))
else:
return read_hdf5(p, self.default_hdf5_path)
def __len__(self):
"""Return the length of the scp file."""
return len(self.data)
def __iter__(self):
"""Return the iterator of the scp file."""
return iter(self.data)
def keys(self):
"""Return the keys of the scp file."""
return self.data.keys()
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/modules/parallel_wavegan/utils/utils.py |
from .melgan import * # NOQA
from .parallel_wavegan import * # NOQA
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/modules/parallel_wavegan/models/__init__.py |
# -*- coding: utf-8 -*-
# Copyright 2020 Tomoki Hayashi
# MIT License (https://opensource.org/licenses/MIT)
"""MelGAN Modules."""
import logging
import numpy as np
import torch
from modules.parallel_wavegan.layers import CausalConv1d
from modules.parallel_wavegan.layers import CausalConvTranspose1d
from modules.parallel_wavegan.layers import ResidualStack
class MelGANGenerator(torch.nn.Module):
"""MelGAN generator module."""
def __init__(self,
in_channels=80,
out_channels=1,
kernel_size=7,
channels=512,
bias=True,
upsample_scales=[8, 8, 2, 2],
stack_kernel_size=3,
stacks=3,
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"negative_slope": 0.2},
pad="ReflectionPad1d",
pad_params={},
use_final_nonlinear_activation=True,
use_weight_norm=True,
use_causal_conv=False,
):
"""Initialize MelGANGenerator module.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
kernel_size (int): Kernel size of initial and final conv layer.
channels (int): Initial number of channels for conv layer.
bias (bool): Whether to add bias parameter in convolution layers.
upsample_scales (list): List of upsampling scales.
stack_kernel_size (int): Kernel size of dilated conv layers in residual stack.
stacks (int): Number of stacks in a single residual stack.
nonlinear_activation (str): Activation function module name.
nonlinear_activation_params (dict): Hyperparameters for activation function.
pad (str): Padding function module name before dilated convolution layer.
pad_params (dict): Hyperparameters for padding function.
use_final_nonlinear_activation (torch.nn.Module): Activation function for the final layer.
use_weight_norm (bool): Whether to use weight norm.
If set to true, it will be applied to all of the conv layers.
use_causal_conv (bool): Whether to use causal convolution.
"""
super(MelGANGenerator, self).__init__()
# check hyper parameters is valid
assert channels >= np.prod(upsample_scales)
assert channels % (2 ** len(upsample_scales)) == 0
if not use_causal_conv:
assert (kernel_size - 1) % 2 == 0, "Not support even number kernel size."
# add initial layer
layers = []
if not use_causal_conv:
layers += [
getattr(torch.nn, pad)((kernel_size - 1) // 2, **pad_params),
torch.nn.Conv1d(in_channels, channels, kernel_size, bias=bias),
]
else:
layers += [
CausalConv1d(in_channels, channels, kernel_size,
bias=bias, pad=pad, pad_params=pad_params),
]
for i, upsample_scale in enumerate(upsample_scales):
# add upsampling layer
layers += [getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params)]
if not use_causal_conv:
layers += [
torch.nn.ConvTranspose1d(
channels // (2 ** i),
channels // (2 ** (i + 1)),
upsample_scale * 2,
stride=upsample_scale,
padding=upsample_scale // 2 + upsample_scale % 2,
output_padding=upsample_scale % 2,
bias=bias,
)
]
else:
layers += [
CausalConvTranspose1d(
channels // (2 ** i),
channels // (2 ** (i + 1)),
upsample_scale * 2,
stride=upsample_scale,
bias=bias,
)
]
# add residual stack
for j in range(stacks):
layers += [
ResidualStack(
kernel_size=stack_kernel_size,
channels=channels // (2 ** (i + 1)),
dilation=stack_kernel_size ** j,
bias=bias,
nonlinear_activation=nonlinear_activation,
nonlinear_activation_params=nonlinear_activation_params,
pad=pad,
pad_params=pad_params,
use_causal_conv=use_causal_conv,
)
]
# add final layer
layers += [getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params)]
if not use_causal_conv:
layers += [
getattr(torch.nn, pad)((kernel_size - 1) // 2, **pad_params),
torch.nn.Conv1d(channels // (2 ** (i + 1)), out_channels, kernel_size, bias=bias),
]
else:
layers += [
CausalConv1d(channels // (2 ** (i + 1)), out_channels, kernel_size,
bias=bias, pad=pad, pad_params=pad_params),
]
if use_final_nonlinear_activation:
layers += [torch.nn.Tanh()]
# define the model as a single function
self.melgan = torch.nn.Sequential(*layers)
# apply weight norm
if use_weight_norm:
self.apply_weight_norm()
# reset parameters
self.reset_parameters()
def forward(self, c):
"""Calculate forward propagation.
Args:
c (Tensor): Input tensor (B, channels, T).
Returns:
Tensor: Output tensor (B, 1, T ** prod(upsample_scales)).
"""
return self.melgan(c)
def remove_weight_norm(self):
"""Remove weight normalization module from all of the layers."""
def _remove_weight_norm(m):
try:
logging.debug(f"Weight norm is removed from {m}.")
torch.nn.utils.remove_weight_norm(m)
except ValueError: # this module didn't have weight norm
return
self.apply(_remove_weight_norm)
def apply_weight_norm(self):
"""Apply weight normalization module from all of the layers."""
def _apply_weight_norm(m):
if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.ConvTranspose1d):
torch.nn.utils.weight_norm(m)
logging.debug(f"Weight norm is applied to {m}.")
self.apply(_apply_weight_norm)
def reset_parameters(self):
"""Reset parameters.
This initialization follows official implementation manner.
https://github.com/descriptinc/melgan-neurips/blob/master/spec2wav/modules.py
"""
def _reset_parameters(m):
if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.ConvTranspose1d):
m.weight.data.normal_(0.0, 0.02)
logging.debug(f"Reset parameters in {m}.")
self.apply(_reset_parameters)
class MelGANDiscriminator(torch.nn.Module):
"""MelGAN discriminator module."""
def __init__(self,
in_channels=1,
out_channels=1,
kernel_sizes=[5, 3],
channels=16,
max_downsample_channels=1024,
bias=True,
downsample_scales=[4, 4, 4, 4],
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"negative_slope": 0.2},
pad="ReflectionPad1d",
pad_params={},
):
"""Initilize MelGAN discriminator module.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
kernel_sizes (list): List of two kernel sizes. The prod will be used for the first conv layer,
and the first and the second kernel sizes will be used for the last two layers.
For example if kernel_sizes = [5, 3], the first layer kernel size will be 5 * 3 = 15,
the last two layers' kernel size will be 5 and 3, respectively.
channels (int): Initial number of channels for conv layer.
max_downsample_channels (int): Maximum number of channels for downsampling layers.
bias (bool): Whether to add bias parameter in convolution layers.
downsample_scales (list): List of downsampling scales.
nonlinear_activation (str): Activation function module name.
nonlinear_activation_params (dict): Hyperparameters for activation function.
pad (str): Padding function module name before dilated convolution layer.
pad_params (dict): Hyperparameters for padding function.
"""
super(MelGANDiscriminator, self).__init__()
self.layers = torch.nn.ModuleList()
# check kernel size is valid
assert len(kernel_sizes) == 2
assert kernel_sizes[0] % 2 == 1
assert kernel_sizes[1] % 2 == 1
# add first layer
self.layers += [
torch.nn.Sequential(
getattr(torch.nn, pad)((np.prod(kernel_sizes) - 1) // 2, **pad_params),
torch.nn.Conv1d(in_channels, channels, np.prod(kernel_sizes), bias=bias),
getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params),
)
]
# add downsample layers
in_chs = channels
for downsample_scale in downsample_scales:
out_chs = min(in_chs * downsample_scale, max_downsample_channels)
self.layers += [
torch.nn.Sequential(
torch.nn.Conv1d(
in_chs, out_chs,
kernel_size=downsample_scale * 10 + 1,
stride=downsample_scale,
padding=downsample_scale * 5,
groups=in_chs // 4,
bias=bias,
),
getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params),
)
]
in_chs = out_chs
# add final layers
out_chs = min(in_chs * 2, max_downsample_channels)
self.layers += [
torch.nn.Sequential(
torch.nn.Conv1d(
in_chs, out_chs, kernel_sizes[0],
padding=(kernel_sizes[0] - 1) // 2,
bias=bias,
),
getattr(torch.nn, nonlinear_activation)(**nonlinear_activation_params),
)
]
self.layers += [
torch.nn.Conv1d(
out_chs, out_channels, kernel_sizes[1],
padding=(kernel_sizes[1] - 1) // 2,
bias=bias,
),
]
def forward(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, 1, T).
Returns:
List: List of output tensors of each layer.
"""
outs = []
for f in self.layers:
x = f(x)
outs += [x]
return outs
class MelGANMultiScaleDiscriminator(torch.nn.Module):
"""MelGAN multi-scale discriminator module."""
def __init__(self,
in_channels=1,
out_channels=1,
scales=3,
downsample_pooling="AvgPool1d",
# follow the official implementation setting
downsample_pooling_params={
"kernel_size": 4,
"stride": 2,
"padding": 1,
"count_include_pad": False,
},
kernel_sizes=[5, 3],
channels=16,
max_downsample_channels=1024,
bias=True,
downsample_scales=[4, 4, 4, 4],
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"negative_slope": 0.2},
pad="ReflectionPad1d",
pad_params={},
use_weight_norm=True,
):
"""Initilize MelGAN multi-scale discriminator module.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
downsample_pooling (str): Pooling module name for downsampling of the inputs.
downsample_pooling_params (dict): Parameters for the above pooling module.
kernel_sizes (list): List of two kernel sizes. The sum will be used for the first conv layer,
and the first and the second kernel sizes will be used for the last two layers.
channels (int): Initial number of channels for conv layer.
max_downsample_channels (int): Maximum number of channels for downsampling layers.
bias (bool): Whether to add bias parameter in convolution layers.
downsample_scales (list): List of downsampling scales.
nonlinear_activation (str): Activation function module name.
nonlinear_activation_params (dict): Hyperparameters for activation function.
pad (str): Padding function module name before dilated convolution layer.
pad_params (dict): Hyperparameters for padding function.
use_causal_conv (bool): Whether to use causal convolution.
"""
super(MelGANMultiScaleDiscriminator, self).__init__()
self.discriminators = torch.nn.ModuleList()
# add discriminators
for _ in range(scales):
self.discriminators += [
MelGANDiscriminator(
in_channels=in_channels,
out_channels=out_channels,
kernel_sizes=kernel_sizes,
channels=channels,
max_downsample_channels=max_downsample_channels,
bias=bias,
downsample_scales=downsample_scales,
nonlinear_activation=nonlinear_activation,
nonlinear_activation_params=nonlinear_activation_params,
pad=pad,
pad_params=pad_params,
)
]
self.pooling = getattr(torch.nn, downsample_pooling)(**downsample_pooling_params)
# apply weight norm
if use_weight_norm:
self.apply_weight_norm()
# reset parameters
self.reset_parameters()
def forward(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, 1, T).
Returns:
List: List of list of each discriminator outputs, which consists of each layer output tensors.
"""
outs = []
for f in self.discriminators:
outs += [f(x)]
x = self.pooling(x)
return outs
def remove_weight_norm(self):
"""Remove weight normalization module from all of the layers."""
def _remove_weight_norm(m):
try:
logging.debug(f"Weight norm is removed from {m}.")
torch.nn.utils.remove_weight_norm(m)
except ValueError: # this module didn't have weight norm
return
self.apply(_remove_weight_norm)
def apply_weight_norm(self):
"""Apply weight normalization module from all of the layers."""
def _apply_weight_norm(m):
if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.ConvTranspose1d):
torch.nn.utils.weight_norm(m)
logging.debug(f"Weight norm is applied to {m}.")
self.apply(_apply_weight_norm)
def reset_parameters(self):
"""Reset parameters.
This initialization follows official implementation manner.
https://github.com/descriptinc/melgan-neurips/blob/master/spec2wav/modules.py
"""
def _reset_parameters(m):
if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.ConvTranspose1d):
m.weight.data.normal_(0.0, 0.02)
logging.debug(f"Reset parameters in {m}.")
self.apply(_reset_parameters)
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/modules/parallel_wavegan/models/melgan.py |
# -*- coding: utf-8 -*-
# Copyright 2019 Tomoki Hayashi
# MIT License (https://opensource.org/licenses/MIT)
"""Parallel WaveGAN Modules."""
import logging
import math
import torch
from torch import nn
from modules.parallel_wavegan.layers import Conv1d
from modules.parallel_wavegan.layers import Conv1d1x1
from modules.parallel_wavegan.layers import ResidualBlock
from modules.parallel_wavegan.layers import upsample
from modules.parallel_wavegan import models
class ParallelWaveGANGenerator(torch.nn.Module):
"""Parallel WaveGAN Generator module."""
def __init__(self,
in_channels=1,
out_channels=1,
kernel_size=3,
layers=30,
stacks=3,
residual_channels=64,
gate_channels=128,
skip_channels=64,
aux_channels=80,
aux_context_window=2,
dropout=0.0,
bias=True,
use_weight_norm=True,
use_causal_conv=False,
upsample_conditional_features=True,
upsample_net="ConvInUpsampleNetwork",
upsample_params={"upsample_scales": [4, 4, 4, 4]},
use_pitch_embed=False,
):
"""Initialize Parallel WaveGAN Generator module.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
kernel_size (int): Kernel size of dilated convolution.
layers (int): Number of residual block layers.
stacks (int): Number of stacks i.e., dilation cycles.
residual_channels (int): Number of channels in residual conv.
gate_channels (int): Number of channels in gated conv.
skip_channels (int): Number of channels in skip conv.
aux_channels (int): Number of channels for auxiliary feature conv.
aux_context_window (int): Context window size for auxiliary feature.
dropout (float): Dropout rate. 0.0 means no dropout applied.
bias (bool): Whether to use bias parameter in conv layer.
use_weight_norm (bool): Whether to use weight norm.
If set to true, it will be applied to all of the conv layers.
use_causal_conv (bool): Whether to use causal structure.
upsample_conditional_features (bool): Whether to use upsampling network.
upsample_net (str): Upsampling network architecture.
upsample_params (dict): Upsampling network parameters.
"""
super(ParallelWaveGANGenerator, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.aux_channels = aux_channels
self.layers = layers
self.stacks = stacks
self.kernel_size = kernel_size
# check the number of layers and stacks
assert layers % stacks == 0
layers_per_stack = layers // stacks
# define first convolution
self.first_conv = Conv1d1x1(in_channels, residual_channels, bias=True)
# define conv + upsampling network
if upsample_conditional_features:
upsample_params.update({
"use_causal_conv": use_causal_conv,
})
if upsample_net == "MelGANGenerator":
assert aux_context_window == 0
upsample_params.update({
"use_weight_norm": False, # not to apply twice
"use_final_nonlinear_activation": False,
})
self.upsample_net = getattr(models, upsample_net)(**upsample_params)
else:
if upsample_net == "ConvInUpsampleNetwork":
upsample_params.update({
"aux_channels": aux_channels,
"aux_context_window": aux_context_window,
})
self.upsample_net = getattr(upsample, upsample_net)(**upsample_params)
else:
self.upsample_net = None
# define residual blocks
self.conv_layers = torch.nn.ModuleList()
for layer in range(layers):
dilation = 2 ** (layer % layers_per_stack)
conv = ResidualBlock(
kernel_size=kernel_size,
residual_channels=residual_channels,
gate_channels=gate_channels,
skip_channels=skip_channels,
aux_channels=aux_channels,
dilation=dilation,
dropout=dropout,
bias=bias,
use_causal_conv=use_causal_conv,
)
self.conv_layers += [conv]
# define output layers
self.last_conv_layers = torch.nn.ModuleList([
torch.nn.ReLU(inplace=True),
Conv1d1x1(skip_channels, skip_channels, bias=True),
torch.nn.ReLU(inplace=True),
Conv1d1x1(skip_channels, out_channels, bias=True),
])
self.use_pitch_embed = use_pitch_embed
if use_pitch_embed:
self.pitch_embed = nn.Embedding(300, aux_channels, 0)
self.c_proj = nn.Linear(2 * aux_channels, aux_channels)
# apply weight norm
if use_weight_norm:
self.apply_weight_norm()
def forward(self, x, c=None, pitch=None, **kwargs):
"""Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, C_in, T).
c (Tensor): Local conditioning auxiliary features (B, C ,T').
pitch (Tensor): Local conditioning pitch (B, T').
Returns:
Tensor: Output tensor (B, C_out, T)
"""
# perform upsampling
if c is not None and self.upsample_net is not None:
if self.use_pitch_embed:
p = self.pitch_embed(pitch)
c = self.c_proj(torch.cat([c.transpose(1, 2), p], -1)).transpose(1, 2)
c = self.upsample_net(c)
assert c.size(-1) == x.size(-1), (c.size(-1), x.size(-1))
# encode to hidden representation
x = self.first_conv(x)
skips = 0
for f in self.conv_layers:
x, h = f(x, c)
skips += h
skips *= math.sqrt(1.0 / len(self.conv_layers))
# apply final layers
x = skips
for f in self.last_conv_layers:
x = f(x)
return x
def remove_weight_norm(self):
"""Remove weight normalization module from all of the layers."""
def _remove_weight_norm(m):
try:
logging.debug(f"Weight norm is removed from {m}.")
torch.nn.utils.remove_weight_norm(m)
except ValueError: # this module didn't have weight norm
return
self.apply(_remove_weight_norm)
def apply_weight_norm(self):
"""Apply weight normalization module from all of the layers."""
def _apply_weight_norm(m):
if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.Conv2d):
torch.nn.utils.weight_norm(m)
logging.debug(f"Weight norm is applied to {m}.")
self.apply(_apply_weight_norm)
@staticmethod
def _get_receptive_field_size(layers, stacks, kernel_size,
dilation=lambda x: 2 ** x):
assert layers % stacks == 0
layers_per_cycle = layers // stacks
dilations = [dilation(i % layers_per_cycle) for i in range(layers)]
return (kernel_size - 1) * sum(dilations) + 1
@property
def receptive_field_size(self):
"""Return receptive field size."""
return self._get_receptive_field_size(self.layers, self.stacks, self.kernel_size)
class ParallelWaveGANDiscriminator(torch.nn.Module):
"""Parallel WaveGAN Discriminator module."""
def __init__(self,
in_channels=1,
out_channels=1,
kernel_size=3,
layers=10,
conv_channels=64,
dilation_factor=1,
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"negative_slope": 0.2},
bias=True,
use_weight_norm=True,
):
"""Initialize Parallel WaveGAN Discriminator module.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
kernel_size (int): Number of output channels.
layers (int): Number of conv layers.
conv_channels (int): Number of chnn layers.
dilation_factor (int): Dilation factor. For example, if dilation_factor = 2,
the dilation will be 2, 4, 8, ..., and so on.
nonlinear_activation (str): Nonlinear function after each conv.
nonlinear_activation_params (dict): Nonlinear function parameters
bias (bool): Whether to use bias parameter in conv.
use_weight_norm (bool) Whether to use weight norm.
If set to true, it will be applied to all of the conv layers.
"""
super(ParallelWaveGANDiscriminator, self).__init__()
assert (kernel_size - 1) % 2 == 0, "Not support even number kernel size."
assert dilation_factor > 0, "Dilation factor must be > 0."
self.conv_layers = torch.nn.ModuleList()
conv_in_channels = in_channels
for i in range(layers - 1):
if i == 0:
dilation = 1
else:
dilation = i if dilation_factor == 1 else dilation_factor ** i
conv_in_channels = conv_channels
padding = (kernel_size - 1) // 2 * dilation
conv_layer = [
Conv1d(conv_in_channels, conv_channels,
kernel_size=kernel_size, padding=padding,
dilation=dilation, bias=bias),
getattr(torch.nn, nonlinear_activation)(inplace=True, **nonlinear_activation_params)
]
self.conv_layers += conv_layer
padding = (kernel_size - 1) // 2
last_conv_layer = Conv1d(
conv_in_channels, out_channels,
kernel_size=kernel_size, padding=padding, bias=bias)
self.conv_layers += [last_conv_layer]
# apply weight norm
if use_weight_norm:
self.apply_weight_norm()
def forward(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, 1, T).
Returns:
Tensor: Output tensor (B, 1, T)
"""
for f in self.conv_layers:
x = f(x)
return x
def apply_weight_norm(self):
"""Apply weight normalization module from all of the layers."""
def _apply_weight_norm(m):
if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.Conv2d):
torch.nn.utils.weight_norm(m)
logging.debug(f"Weight norm is applied to {m}.")
self.apply(_apply_weight_norm)
def remove_weight_norm(self):
"""Remove weight normalization module from all of the layers."""
def _remove_weight_norm(m):
try:
logging.debug(f"Weight norm is removed from {m}.")
torch.nn.utils.remove_weight_norm(m)
except ValueError: # this module didn't have weight norm
return
self.apply(_remove_weight_norm)
class ResidualParallelWaveGANDiscriminator(torch.nn.Module):
"""Parallel WaveGAN Discriminator module."""
def __init__(self,
in_channels=1,
out_channels=1,
kernel_size=3,
layers=30,
stacks=3,
residual_channels=64,
gate_channels=128,
skip_channels=64,
dropout=0.0,
bias=True,
use_weight_norm=True,
use_causal_conv=False,
nonlinear_activation="LeakyReLU",
nonlinear_activation_params={"negative_slope": 0.2},
):
"""Initialize Parallel WaveGAN Discriminator module.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
kernel_size (int): Kernel size of dilated convolution.
layers (int): Number of residual block layers.
stacks (int): Number of stacks i.e., dilation cycles.
residual_channels (int): Number of channels in residual conv.
gate_channels (int): Number of channels in gated conv.
skip_channels (int): Number of channels in skip conv.
dropout (float): Dropout rate. 0.0 means no dropout applied.
bias (bool): Whether to use bias parameter in conv.
use_weight_norm (bool): Whether to use weight norm.
If set to true, it will be applied to all of the conv layers.
use_causal_conv (bool): Whether to use causal structure.
nonlinear_activation_params (dict): Nonlinear function parameters
"""
super(ResidualParallelWaveGANDiscriminator, self).__init__()
assert (kernel_size - 1) % 2 == 0, "Not support even number kernel size."
self.in_channels = in_channels
self.out_channels = out_channels
self.layers = layers
self.stacks = stacks
self.kernel_size = kernel_size
# check the number of layers and stacks
assert layers % stacks == 0
layers_per_stack = layers // stacks
# define first convolution
self.first_conv = torch.nn.Sequential(
Conv1d1x1(in_channels, residual_channels, bias=True),
getattr(torch.nn, nonlinear_activation)(
inplace=True, **nonlinear_activation_params),
)
# define residual blocks
self.conv_layers = torch.nn.ModuleList()
for layer in range(layers):
dilation = 2 ** (layer % layers_per_stack)
conv = ResidualBlock(
kernel_size=kernel_size,
residual_channels=residual_channels,
gate_channels=gate_channels,
skip_channels=skip_channels,
aux_channels=-1,
dilation=dilation,
dropout=dropout,
bias=bias,
use_causal_conv=use_causal_conv,
)
self.conv_layers += [conv]
# define output layers
self.last_conv_layers = torch.nn.ModuleList([
getattr(torch.nn, nonlinear_activation)(
inplace=True, **nonlinear_activation_params),
Conv1d1x1(skip_channels, skip_channels, bias=True),
getattr(torch.nn, nonlinear_activation)(
inplace=True, **nonlinear_activation_params),
Conv1d1x1(skip_channels, out_channels, bias=True),
])
# apply weight norm
if use_weight_norm:
self.apply_weight_norm()
def forward(self, x):
"""Calculate forward propagation.
Args:
x (Tensor): Input noise signal (B, 1, T).
Returns:
Tensor: Output tensor (B, 1, T)
"""
x = self.first_conv(x)
skips = 0
for f in self.conv_layers:
x, h = f(x, None)
skips += h
skips *= math.sqrt(1.0 / len(self.conv_layers))
# apply final layers
x = skips
for f in self.last_conv_layers:
x = f(x)
return x
def apply_weight_norm(self):
"""Apply weight normalization module from all of the layers."""
def _apply_weight_norm(m):
if isinstance(m, torch.nn.Conv1d) or isinstance(m, torch.nn.Conv2d):
torch.nn.utils.weight_norm(m)
logging.debug(f"Weight norm is applied to {m}.")
self.apply(_apply_weight_norm)
def remove_weight_norm(self):
"""Remove weight normalization module from all of the layers."""
def _remove_weight_norm(m):
try:
logging.debug(f"Weight norm is removed from {m}.")
torch.nn.utils.remove_weight_norm(m)
except ValueError: # this module didn't have weight norm
return
self.apply(_remove_weight_norm)
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/modules/parallel_wavegan/models/parallel_wavegan.py |
import torch
import numpy as np
import sys
import torch.nn.functional as torch_nn_func
class SineGen(torch.nn.Module):
""" Definition of sine generator
SineGen(samp_rate, harmonic_num = 0,
sine_amp = 0.1, noise_std = 0.003,
voiced_threshold = 0,
flag_for_pulse=False)
samp_rate: sampling rate in Hz
harmonic_num: number of harmonic overtones (default 0)
sine_amp: amplitude of sine-wavefrom (default 0.1)
noise_std: std of Gaussian noise (default 0.003)
voiced_thoreshold: F0 threshold for U/V classification (default 0)
flag_for_pulse: this SinGen is used inside PulseGen (default False)
Note: when flag_for_pulse is True, the first time step of a voiced
segment is always sin(np.pi) or cos(0)
"""
def __init__(self, samp_rate, harmonic_num=0,
sine_amp=0.1, noise_std=0.003,
voiced_threshold=0,
flag_for_pulse=False):
super(SineGen, self).__init__()
self.sine_amp = sine_amp
self.noise_std = noise_std
self.harmonic_num = harmonic_num
self.dim = self.harmonic_num + 1
self.sampling_rate = samp_rate
self.voiced_threshold = voiced_threshold
self.flag_for_pulse = flag_for_pulse
def _f02uv(self, f0):
# generate uv signal
uv = torch.ones_like(f0)
uv = uv * (f0 > self.voiced_threshold)
return uv
def _f02sine(self, f0_values):
""" f0_values: (batchsize, length, dim)
where dim indicates fundamental tone and overtones
"""
# convert to F0 in rad. The interger part n can be ignored
# because 2 * np.pi * n doesn't affect phase
rad_values = (f0_values / self.sampling_rate) % 1
# initial phase noise (no noise for fundamental component)
rand_ini = torch.rand(f0_values.shape[0], f0_values.shape[2], \
device=f0_values.device)
rand_ini[:, 0] = 0
rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
# instantanouse phase sine[t] = sin(2*pi \sum_i=1 ^{t} rad)
if not self.flag_for_pulse:
# for normal case
# To prevent torch.cumsum numerical overflow,
# it is necessary to add -1 whenever \sum_k=1^n rad_value_k > 1.
# Buffer tmp_over_one_idx indicates the time step to add -1.
# This will not change F0 of sine because (x-1) * 2*pi = x * 2*pi
tmp_over_one = torch.cumsum(rad_values, 1) % 1
tmp_over_one_idx = (tmp_over_one[:, 1:, :] -
tmp_over_one[:, :-1, :]) < 0
cumsum_shift = torch.zeros_like(rad_values)
cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
sines = torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1)
* 2 * np.pi)
else:
# If necessary, make sure that the first time step of every
# voiced segments is sin(pi) or cos(0)
# This is used for pulse-train generation
# identify the last time step in unvoiced segments
uv = self._f02uv(f0_values)
uv_1 = torch.roll(uv, shifts=-1, dims=1)
uv_1[:, -1, :] = 1
u_loc = (uv < 1) * (uv_1 > 0)
# get the instantanouse phase
tmp_cumsum = torch.cumsum(rad_values, dim=1)
# different batch needs to be processed differently
for idx in range(f0_values.shape[0]):
temp_sum = tmp_cumsum[idx, u_loc[idx, :, 0], :]
temp_sum[1:, :] = temp_sum[1:, :] - temp_sum[0:-1, :]
# stores the accumulation of i.phase within
# each voiced segments
tmp_cumsum[idx, :, :] = 0
tmp_cumsum[idx, u_loc[idx, :, 0], :] = temp_sum
# rad_values - tmp_cumsum: remove the accumulation of i.phase
# within the previous voiced segment.
i_phase = torch.cumsum(rad_values - tmp_cumsum, dim=1)
# get the sines
sines = torch.cos(i_phase * 2 * np.pi)
return sines
def forward(self, f0):
""" sine_tensor, uv = forward(f0)
input F0: tensor(batchsize=1, length, dim=1)
f0 for unvoiced steps should be 0
output sine_tensor: tensor(batchsize=1, length, dim)
output uv: tensor(batchsize=1, length, 1)
"""
with torch.no_grad():
f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim,
device=f0.device)
# fundamental component
f0_buf[:, :, 0] = f0[:, :, 0]
for idx in np.arange(self.harmonic_num):
# idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (idx + 2)
# generate sine waveforms
sine_waves = self._f02sine(f0_buf) * self.sine_amp
# generate uv signal
# uv = torch.ones(f0.shape)
# uv = uv * (f0 > self.voiced_threshold)
uv = self._f02uv(f0)
# noise: for unvoiced should be similar to sine_amp
# std = self.sine_amp/3 -> max value ~ self.sine_amp
# . for voiced regions is self.noise_std
noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
noise = noise_amp * torch.randn_like(sine_waves)
# first: set the unvoiced part to 0 by uv
# then: additive noise
sine_waves = sine_waves * uv + noise
return sine_waves, uv, noise
class PulseGen(torch.nn.Module):
""" Definition of Pulse train generator
There are many ways to implement pulse generator.
Here, PulseGen is based on SinGen. For a perfect
"""
def __init__(self, samp_rate, pulse_amp = 0.1,
noise_std = 0.003, voiced_threshold = 0):
super(PulseGen, self).__init__()
self.pulse_amp = pulse_amp
self.sampling_rate = samp_rate
self.voiced_threshold = voiced_threshold
self.noise_std = noise_std
self.l_sinegen = SineGen(self.sampling_rate, harmonic_num=0, \
sine_amp=self.pulse_amp, noise_std=0, \
voiced_threshold=self.voiced_threshold, \
flag_for_pulse=True)
def forward(self, f0):
""" Pulse train generator
pulse_train, uv = forward(f0)
input F0: tensor(batchsize=1, length, dim=1)
f0 for unvoiced steps should be 0
output pulse_train: tensor(batchsize=1, length, dim)
output uv: tensor(batchsize=1, length, 1)
Note: self.l_sine doesn't make sure that the initial phase of
a voiced segment is np.pi, the first pulse in a voiced segment
may not be at the first time step within a voiced segment
"""
with torch.no_grad():
sine_wav, uv, noise = self.l_sinegen(f0)
# sine without additive noise
pure_sine = sine_wav - noise
# step t corresponds to a pulse if
# sine[t] > sine[t+1] & sine[t] > sine[t-1]
# & sine[t-1], sine[t+1], and sine[t] are voiced
# or
# sine[t] is voiced, sine[t-1] is unvoiced
# we use torch.roll to simulate sine[t+1] and sine[t-1]
sine_1 = torch.roll(pure_sine, shifts=1, dims=1)
uv_1 = torch.roll(uv, shifts=1, dims=1)
uv_1[:, 0, :] = 0
sine_2 = torch.roll(pure_sine, shifts=-1, dims=1)
uv_2 = torch.roll(uv, shifts=-1, dims=1)
uv_2[:, -1, :] = 0
loc = (pure_sine > sine_1) * (pure_sine > sine_2) \
* (uv_1 > 0) * (uv_2 > 0) * (uv > 0) \
+ (uv_1 < 1) * (uv > 0)
# pulse train without noise
pulse_train = pure_sine * loc
# additive noise to pulse train
# note that noise from sinegen is zero in voiced regions
pulse_noise = torch.randn_like(pure_sine) * self.noise_std
# with additive noise on pulse, and unvoiced regions
pulse_train += pulse_noise * loc + pulse_noise * (1 - uv)
return pulse_train, sine_wav, uv, pulse_noise
class SignalsConv1d(torch.nn.Module):
""" Filtering input signal with time invariant filter
Note: FIRFilter conducted filtering given fixed FIR weight
SignalsConv1d convolves two signals
Note: this is based on torch.nn.functional.conv1d
"""
def __init__(self):
super(SignalsConv1d, self).__init__()
def forward(self, signal, system_ir):
""" output = forward(signal, system_ir)
signal: (batchsize, length1, dim)
system_ir: (length2, dim)
output: (batchsize, length1, dim)
"""
if signal.shape[-1] != system_ir.shape[-1]:
print("Error: SignalsConv1d expects shape:")
print("signal (batchsize, length1, dim)")
print("system_id (batchsize, length2, dim)")
print("But received signal: {:s}".format(str(signal.shape)))
print(" system_ir: {:s}".format(str(system_ir.shape)))
sys.exit(1)
padding_length = system_ir.shape[0] - 1
groups = signal.shape[-1]
# pad signal on the left
signal_pad = torch_nn_func.pad(signal.permute(0, 2, 1), \
(padding_length, 0))
# prepare system impulse response as (dim, 1, length2)
# also flip the impulse response
ir = torch.flip(system_ir.unsqueeze(1).permute(2, 1, 0), \
dims=[2])
# convolute
output = torch_nn_func.conv1d(signal_pad, ir, groups=groups)
return output.permute(0, 2, 1)
class CyclicNoiseGen_v1(torch.nn.Module):
""" CyclicnoiseGen_v1
Cyclic noise with a single parameter of beta.
Pytorch v1 implementation assumes f_t is also fixed
"""
def __init__(self, samp_rate,
noise_std=0.003, voiced_threshold=0):
super(CyclicNoiseGen_v1, self).__init__()
self.samp_rate = samp_rate
self.noise_std = noise_std
self.voiced_threshold = voiced_threshold
self.l_pulse = PulseGen(samp_rate, pulse_amp=1.0,
noise_std=noise_std,
voiced_threshold=voiced_threshold)
self.l_conv = SignalsConv1d()
def noise_decay(self, beta, f0mean):
""" decayed_noise = noise_decay(beta, f0mean)
decayed_noise = n[t]exp(-t * f_mean / beta / samp_rate)
beta: (dim=1) or (batchsize=1, 1, dim=1)
f0mean (batchsize=1, 1, dim=1)
decayed_noise (batchsize=1, length, dim=1)
"""
with torch.no_grad():
# exp(-1.0 n / T) < 0.01 => n > -log(0.01)*T = 4.60*T
# truncate the noise when decayed by -40 dB
length = 4.6 * self.samp_rate / f0mean
length = length.int()
time_idx = torch.arange(0, length, device=beta.device)
time_idx = time_idx.unsqueeze(0).unsqueeze(2)
time_idx = time_idx.repeat(beta.shape[0], 1, beta.shape[2])
noise = torch.randn(time_idx.shape, device=beta.device)
# due to Pytorch implementation, use f0_mean as the f0 factor
decay = torch.exp(-time_idx * f0mean / beta / self.samp_rate)
return noise * self.noise_std * decay
def forward(self, f0s, beta):
""" Producde cyclic-noise
"""
# pulse train
pulse_train, sine_wav, uv, noise = self.l_pulse(f0s)
pure_pulse = pulse_train - noise
# decayed_noise (length, dim=1)
if (uv < 1).all():
# all unvoiced
cyc_noise = torch.zeros_like(sine_wav)
else:
f0mean = f0s[uv > 0].mean()
decayed_noise = self.noise_decay(beta, f0mean)[0, :, :]
# convolute
cyc_noise = self.l_conv(pure_pulse, decayed_noise)
# add noise in invoiced segments
cyc_noise = cyc_noise + noise * (1.0 - uv)
return cyc_noise, pulse_train, sine_wav, uv, noise
class SineGen(torch.nn.Module):
""" Definition of sine generator
SineGen(samp_rate, harmonic_num = 0,
sine_amp = 0.1, noise_std = 0.003,
voiced_threshold = 0,
flag_for_pulse=False)
samp_rate: sampling rate in Hz
harmonic_num: number of harmonic overtones (default 0)
sine_amp: amplitude of sine-wavefrom (default 0.1)
noise_std: std of Gaussian noise (default 0.003)
voiced_thoreshold: F0 threshold for U/V classification (default 0)
flag_for_pulse: this SinGen is used inside PulseGen (default False)
Note: when flag_for_pulse is True, the first time step of a voiced
segment is always sin(np.pi) or cos(0)
"""
def __init__(self, samp_rate, harmonic_num=0,
sine_amp=0.1, noise_std=0.003,
voiced_threshold=0,
flag_for_pulse=False):
super(SineGen, self).__init__()
self.sine_amp = sine_amp
self.noise_std = noise_std
self.harmonic_num = harmonic_num
self.dim = self.harmonic_num + 1
self.sampling_rate = samp_rate
self.voiced_threshold = voiced_threshold
self.flag_for_pulse = flag_for_pulse
def _f02uv(self, f0):
# generate uv signal
uv = torch.ones_like(f0)
uv = uv * (f0 > self.voiced_threshold)
return uv
def _f02sine(self, f0_values):
""" f0_values: (batchsize, length, dim)
where dim indicates fundamental tone and overtones
"""
# convert to F0 in rad. The interger part n can be ignored
# because 2 * np.pi * n doesn't affect phase
rad_values = (f0_values / self.sampling_rate) % 1
# initial phase noise (no noise for fundamental component)
rand_ini = torch.rand(f0_values.shape[0], f0_values.shape[2], \
device=f0_values.device)
rand_ini[:, 0] = 0
rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini
# instantanouse phase sine[t] = sin(2*pi \sum_i=1 ^{t} rad)
if not self.flag_for_pulse:
# for normal case
# To prevent torch.cumsum numerical overflow,
# it is necessary to add -1 whenever \sum_k=1^n rad_value_k > 1.
# Buffer tmp_over_one_idx indicates the time step to add -1.
# This will not change F0 of sine because (x-1) * 2*pi = x * 2*pi
tmp_over_one = torch.cumsum(rad_values, 1) % 1
tmp_over_one_idx = (tmp_over_one[:, 1:, :] -
tmp_over_one[:, :-1, :]) < 0
cumsum_shift = torch.zeros_like(rad_values)
cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0
sines = torch.sin(torch.cumsum(rad_values + cumsum_shift, dim=1)
* 2 * np.pi)
else:
# If necessary, make sure that the first time step of every
# voiced segments is sin(pi) or cos(0)
# This is used for pulse-train generation
# identify the last time step in unvoiced segments
uv = self._f02uv(f0_values)
uv_1 = torch.roll(uv, shifts=-1, dims=1)
uv_1[:, -1, :] = 1
u_loc = (uv < 1) * (uv_1 > 0)
# get the instantanouse phase
tmp_cumsum = torch.cumsum(rad_values, dim=1)
# different batch needs to be processed differently
for idx in range(f0_values.shape[0]):
temp_sum = tmp_cumsum[idx, u_loc[idx, :, 0], :]
temp_sum[1:, :] = temp_sum[1:, :] - temp_sum[0:-1, :]
# stores the accumulation of i.phase within
# each voiced segments
tmp_cumsum[idx, :, :] = 0
tmp_cumsum[idx, u_loc[idx, :, 0], :] = temp_sum
# rad_values - tmp_cumsum: remove the accumulation of i.phase
# within the previous voiced segment.
i_phase = torch.cumsum(rad_values - tmp_cumsum, dim=1)
# get the sines
sines = torch.cos(i_phase * 2 * np.pi)
return sines
def forward(self, f0):
""" sine_tensor, uv = forward(f0)
input F0: tensor(batchsize=1, length, dim=1)
f0 for unvoiced steps should be 0
output sine_tensor: tensor(batchsize=1, length, dim)
output uv: tensor(batchsize=1, length, 1)
"""
with torch.no_grad():
f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, \
device=f0.device)
# fundamental component
f0_buf[:, :, 0] = f0[:, :, 0]
for idx in np.arange(self.harmonic_num):
# idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic
f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * (idx + 2)
# generate sine waveforms
sine_waves = self._f02sine(f0_buf) * self.sine_amp
# generate uv signal
# uv = torch.ones(f0.shape)
# uv = uv * (f0 > self.voiced_threshold)
uv = self._f02uv(f0)
# noise: for unvoiced should be similar to sine_amp
# std = self.sine_amp/3 -> max value ~ self.sine_amp
# . for voiced regions is self.noise_std
noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3
noise = noise_amp * torch.randn_like(sine_waves)
# first: set the unvoiced part to 0 by uv
# then: additive noise
sine_waves = sine_waves * uv + noise
return sine_waves, uv, noise
class SourceModuleCycNoise_v1(torch.nn.Module):
""" SourceModuleCycNoise_v1
SourceModule(sampling_rate, noise_std=0.003, voiced_threshod=0)
sampling_rate: sampling_rate in Hz
noise_std: std of Gaussian noise (default: 0.003)
voiced_threshold: threshold to set U/V given F0 (default: 0)
cyc, noise, uv = SourceModuleCycNoise_v1(F0_upsampled, beta)
F0_upsampled (batchsize, length, 1)
beta (1)
cyc (batchsize, length, 1)
noise (batchsize, length, 1)
uv (batchsize, length, 1)
"""
def __init__(self, sampling_rate, noise_std=0.003, voiced_threshod=0):
super(SourceModuleCycNoise_v1, self).__init__()
self.sampling_rate = sampling_rate
self.noise_std = noise_std
self.l_cyc_gen = CyclicNoiseGen_v1(sampling_rate, noise_std,
voiced_threshod)
def forward(self, f0_upsamped, beta):
"""
cyc, noise, uv = SourceModuleCycNoise_v1(F0, beta)
F0_upsampled (batchsize, length, 1)
beta (1)
cyc (batchsize, length, 1)
noise (batchsize, length, 1)
uv (batchsize, length, 1)
"""
# source for harmonic branch
cyc, pulse, sine, uv, add_noi = self.l_cyc_gen(f0_upsamped, beta)
# source for noise branch, in the same shape as uv
noise = torch.randn_like(uv) * self.noise_std / 3
return cyc, noise, uv
class SourceModuleHnNSF(torch.nn.Module):
""" SourceModule for hn-nsf
SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1,
add_noise_std=0.003, voiced_threshod=0)
sampling_rate: sampling_rate in Hz
harmonic_num: number of harmonic above F0 (default: 0)
sine_amp: amplitude of sine source signal (default: 0.1)
add_noise_std: std of additive Gaussian noise (default: 0.003)
note that amplitude of noise in unvoiced is decided
by sine_amp
voiced_threshold: threhold to set U/V given F0 (default: 0)
Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
F0_sampled (batchsize, length, 1)
Sine_source (batchsize, length, 1)
noise_source (batchsize, length 1)
uv (batchsize, length, 1)
"""
def __init__(self, sampling_rate, harmonic_num=0, sine_amp=0.1,
add_noise_std=0.003, voiced_threshod=0):
super(SourceModuleHnNSF, self).__init__()
self.sine_amp = sine_amp
self.noise_std = add_noise_std
# to produce sine waveforms
self.l_sin_gen = SineGen(sampling_rate, harmonic_num,
sine_amp, add_noise_std, voiced_threshod)
# to merge source harmonics into a single excitation
self.l_linear = torch.nn.Linear(harmonic_num + 1, 1)
self.l_tanh = torch.nn.Tanh()
def forward(self, x):
"""
Sine_source, noise_source = SourceModuleHnNSF(F0_sampled)
F0_sampled (batchsize, length, 1)
Sine_source (batchsize, length, 1)
noise_source (batchsize, length 1)
"""
# source for harmonic branch
sine_wavs, uv, _ = self.l_sin_gen(x)
sine_merge = self.l_tanh(self.l_linear(sine_wavs))
# source for noise branch, in the same shape as uv
noise = torch.randn_like(uv) * self.sine_amp / 3
return sine_merge, noise, uv
if __name__ == '__main__':
source = SourceModuleCycNoise_v1(24000)
x = torch.randn(16, 25600, 1)
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/modules/parallel_wavegan/models/source.py |
from torch.optim import * # NOQA
from .radam import * # NOQA
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/modules/parallel_wavegan/optimizers/__init__.py |
# -*- coding: utf-8 -*-
"""RAdam optimizer.
This code is drived from https://github.com/LiyuanLucasLiu/RAdam.
"""
import math
import torch
from torch.optim.optimizer import Optimizer
class RAdam(Optimizer):
"""Rectified Adam optimizer."""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0):
"""Initilize RAdam optimizer."""
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
self.buffer = [[None, None, None] for ind in range(10)]
super(RAdam, self).__init__(params, defaults)
def __setstate__(self, state):
"""Set state."""
super(RAdam, self).__setstate__(state)
def step(self, closure=None):
"""Run one step."""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('RAdam does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
exp_avg.mul_(beta1).add_(1 - beta1, grad)
state['step'] += 1
buffered = self.buffer[int(state['step'] % 10)]
if state['step'] == buffered[0]:
N_sma, step_size = buffered[1], buffered[2]
else:
buffered[0] = state['step']
beta2_t = beta2 ** state['step']
N_sma_max = 2 / (1 - beta2) - 1
N_sma = N_sma_max - 2 * state['step'] * beta2_t / (1 - beta2_t)
buffered[1] = N_sma
# more conservative since it's an approximated value
if N_sma >= 5:
step_size = math.sqrt(
(1 - beta2_t) * (N_sma - 4) / (N_sma_max - 4) * (N_sma - 2) / N_sma * N_sma_max / (N_sma_max - 2)) / (1 - beta1 ** state['step']) # NOQA
else:
step_size = 1.0 / (1 - beta1 ** state['step'])
buffered[2] = step_size
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
# more conservative since it's an approximated value
if N_sma >= 5:
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_data_fp32.addcdiv_(-step_size * group['lr'], exp_avg, denom)
else:
p_data_fp32.add_(-step_size * group['lr'], exp_avg)
p.data.copy_(p_data_fp32)
return loss
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/modules/parallel_wavegan/optimizers/radam.py |
from modules.commons.common_layers import *
from modules.commons.common_layers import Embedding
from modules.fastspeech.tts_modules import FastspeechDecoder, DurationPredictor, LengthRegulator, PitchPredictor, \
EnergyPredictor, FastspeechEncoder
from utils.cwt import cwt2f0
from utils.hparams import hparams
from utils.pitch_utils import f0_to_coarse, denorm_f0, norm_f0
from modules.fastspeech.fs2 import FastSpeech2
class FastspeechMIDIEncoder(FastspeechEncoder):
def forward_embedding(self, txt_tokens, midi_embedding, midi_dur_embedding, slur_embedding):
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(txt_tokens)
x = x + midi_embedding + midi_dur_embedding + slur_embedding
if hparams['use_pos_embed']:
if hparams.get('rel_pos') is not None and hparams['rel_pos']:
x = self.embed_positions(x)
else:
positions = self.embed_positions(txt_tokens)
x = x + positions
x = F.dropout(x, p=self.dropout, training=self.training)
return x
def forward(self, txt_tokens, midi_embedding, midi_dur_embedding, slur_embedding):
"""
:param txt_tokens: [B, T]
:return: {
'encoder_out': [T x B x C]
}
"""
encoder_padding_mask = txt_tokens.eq(self.padding_idx).data
x = self.forward_embedding(txt_tokens, midi_embedding, midi_dur_embedding, slur_embedding) # [B, T, H]
x = super(FastspeechEncoder, self).forward(x, encoder_padding_mask)
return x
FS_ENCODERS = {
'fft': lambda hp, embed_tokens, d: FastspeechMIDIEncoder(
embed_tokens, hp['hidden_size'], hp['enc_layers'], hp['enc_ffn_kernel_size'],
num_heads=hp['num_heads']),
}
class FastSpeech2MIDI(FastSpeech2):
def __init__(self, dictionary, out_dims=None):
super().__init__(dictionary, out_dims)
del self.encoder
self.encoder = FS_ENCODERS[hparams['encoder_type']](hparams, self.encoder_embed_tokens, self.dictionary)
self.midi_embed = Embedding(300, self.hidden_size, self.padding_idx)
self.midi_dur_layer = Linear(1, self.hidden_size)
self.is_slur_embed = Embedding(2, self.hidden_size)
def forward(self, txt_tokens, mel2ph=None, spk_embed=None,
ref_mels=None, f0=None, uv=None, energy=None, skip_decoder=False,
spk_embed_dur_id=None, spk_embed_f0_id=None, infer=False, **kwargs):
ret = {}
midi_embedding = self.midi_embed(kwargs['pitch_midi'])
midi_dur_embedding, slur_embedding = 0, 0
if kwargs.get('midi_dur') is not None:
midi_dur_embedding = self.midi_dur_layer(kwargs['midi_dur'][:, :, None]) # [B, T, 1] -> [B, T, H]
if kwargs.get('is_slur') is not None:
slur_embedding = self.is_slur_embed(kwargs['is_slur'])
encoder_out = self.encoder(txt_tokens, midi_embedding, midi_dur_embedding, slur_embedding) # [B, T, C]
src_nonpadding = (txt_tokens > 0).float()[:, :, None]
# add ref style embed
# Not implemented
# variance encoder
var_embed = 0
# encoder_out_dur denotes encoder outputs for duration predictor
# in speech adaptation, duration predictor use old speaker embedding
if hparams['use_spk_embed']:
spk_embed_dur = spk_embed_f0 = spk_embed = self.spk_embed_proj(spk_embed)[:, None, :]
elif hparams['use_spk_id']:
spk_embed_id = spk_embed
if spk_embed_dur_id is None:
spk_embed_dur_id = spk_embed_id
if spk_embed_f0_id is None:
spk_embed_f0_id = spk_embed_id
spk_embed = self.spk_embed_proj(spk_embed_id)[:, None, :]
spk_embed_dur = spk_embed_f0 = spk_embed
if hparams['use_split_spk_id']:
spk_embed_dur = self.spk_embed_dur(spk_embed_dur_id)[:, None, :]
spk_embed_f0 = self.spk_embed_f0(spk_embed_f0_id)[:, None, :]
else:
spk_embed_dur = spk_embed_f0 = spk_embed = 0
# add dur
dur_inp = (encoder_out + var_embed + spk_embed_dur) * src_nonpadding
mel2ph = self.add_dur(dur_inp, mel2ph, txt_tokens, ret)
decoder_inp = F.pad(encoder_out, [0, 0, 1, 0])
mel2ph_ = mel2ph[..., None].repeat([1, 1, encoder_out.shape[-1]])
decoder_inp_origin = decoder_inp = torch.gather(decoder_inp, 1, mel2ph_) # [B, T, H]
tgt_nonpadding = (mel2ph > 0).float()[:, :, None]
# add pitch and energy embed
pitch_inp = (decoder_inp_origin + var_embed + spk_embed_f0) * tgt_nonpadding
if hparams['use_pitch_embed']:
pitch_inp_ph = (encoder_out + var_embed + spk_embed_f0) * src_nonpadding
decoder_inp = decoder_inp + self.add_pitch(pitch_inp, f0, uv, mel2ph, ret, encoder_out=pitch_inp_ph)
if hparams['use_energy_embed']:
decoder_inp = decoder_inp + self.add_energy(pitch_inp, energy, ret)
ret['decoder_inp'] = decoder_inp = (decoder_inp + spk_embed) * tgt_nonpadding
if skip_decoder:
return ret
ret['mel_out'] = self.run_decoder(decoder_inp, tgt_nonpadding, ret, infer=infer, **kwargs)
return ret
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/modules/diffsinger_midi/fs2.py |
import math
import torch
from torch import nn
from torch.nn import Parameter
import torch.onnx.operators
import torch.nn.functional as F
import utils
class Reshape(nn.Module):
def __init__(self, *args):
super(Reshape, self).__init__()
self.shape = args
def forward(self, x):
return x.view(self.shape)
class Permute(nn.Module):
def __init__(self, *args):
super(Permute, self).__init__()
self.args = args
def forward(self, x):
return x.permute(self.args)
class LinearNorm(torch.nn.Module):
def __init__(self, in_dim, out_dim, bias=True, w_init_gain='linear'):
super(LinearNorm, self).__init__()
self.linear_layer = torch.nn.Linear(in_dim, out_dim, bias=bias)
torch.nn.init.xavier_uniform_(
self.linear_layer.weight,
gain=torch.nn.init.calculate_gain(w_init_gain))
def forward(self, x):
return self.linear_layer(x)
class ConvNorm(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1,
padding=None, dilation=1, bias=True, w_init_gain='linear'):
super(ConvNorm, self).__init__()
if padding is None:
assert (kernel_size % 2 == 1)
padding = int(dilation * (kernel_size - 1) / 2)
self.conv = torch.nn.Conv1d(in_channels, out_channels,
kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation,
bias=bias)
torch.nn.init.xavier_uniform_(
self.conv.weight, gain=torch.nn.init.calculate_gain(w_init_gain))
def forward(self, signal):
conv_signal = self.conv(signal)
return conv_signal
def Embedding(num_embeddings, embedding_dim, padding_idx=None):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
if padding_idx is not None:
nn.init.constant_(m.weight[padding_idx], 0)
return m
def LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True, export=False):
if not export and torch.cuda.is_available():
try:
from apex.normalization import FusedLayerNorm
return FusedLayerNorm(normalized_shape, eps, elementwise_affine)
except ImportError:
pass
return torch.nn.LayerNorm(normalized_shape, eps, elementwise_affine)
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.)
return m
class SinusoidalPositionalEmbedding(nn.Module):
"""This module produces sinusoidal positional embeddings of any length.
Padding symbols are ignored.
"""
def __init__(self, embedding_dim, padding_idx, init_size=1024):
super().__init__()
self.embedding_dim = embedding_dim
self.padding_idx = padding_idx
self.weights = SinusoidalPositionalEmbedding.get_embedding(
init_size,
embedding_dim,
padding_idx,
)
self.register_buffer('_float_tensor', torch.FloatTensor(1))
@staticmethod
def get_embedding(num_embeddings, embedding_dim, padding_idx=None):
"""Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
"""
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)
emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0)
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)
if embedding_dim % 2 == 1:
# zero pad
emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
if padding_idx is not None:
emb[padding_idx, :] = 0
return emb
def forward(self, input, incremental_state=None, timestep=None, positions=None, **kwargs):
"""Input is expected to be of size [bsz x seqlen]."""
bsz, seq_len = input.shape[:2]
max_pos = self.padding_idx + 1 + seq_len
if self.weights is None or max_pos > self.weights.size(0):
# recompute/expand embeddings if needed
self.weights = SinusoidalPositionalEmbedding.get_embedding(
max_pos,
self.embedding_dim,
self.padding_idx,
)
self.weights = self.weights.to(self._float_tensor)
if incremental_state is not None:
# positions is the same for every token when decoding a single step
pos = timestep.view(-1)[0] + 1 if timestep is not None else seq_len
return self.weights[self.padding_idx + pos, :].expand(bsz, 1, -1)
positions = utils.make_positions(input, self.padding_idx) if positions is None else positions
return self.weights.index_select(0, positions.view(-1)).view(bsz, seq_len, -1).detach()
def max_positions(self):
"""Maximum number of supported positions."""
return int(1e5) # an arbitrary large number
class ConvTBC(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, padding=0):
super(ConvTBC, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.padding = padding
self.weight = torch.nn.Parameter(torch.Tensor(
self.kernel_size, in_channels, out_channels))
self.bias = torch.nn.Parameter(torch.Tensor(out_channels))
def forward(self, input):
return torch.conv_tbc(input.contiguous(), self.weight, self.bias, self.padding)
class MultiheadAttention(nn.Module):
def __init__(self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0., bias=True,
add_bias_kv=False, add_zero_attn=False, self_attention=False,
encoder_decoder_attention=False):
super().__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim ** -0.5
self.self_attention = self_attention
self.encoder_decoder_attention = encoder_decoder_attention
assert not self.self_attention or self.qkv_same_dim, 'Self-attention requires query, key and ' \
'value to be of the same size'
if self.qkv_same_dim:
self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim))
else:
self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim))
self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim))
self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
if bias:
self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim))
else:
self.register_parameter('in_proj_bias', None)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.reset_parameters()
self.enable_torch_version = False
if hasattr(F, "multi_head_attention_forward"):
self.enable_torch_version = True
else:
self.enable_torch_version = False
self.last_attn_probs = None
def reset_parameters(self):
if self.qkv_same_dim:
nn.init.xavier_uniform_(self.in_proj_weight)
else:
nn.init.xavier_uniform_(self.k_proj_weight)
nn.init.xavier_uniform_(self.v_proj_weight)
nn.init.xavier_uniform_(self.q_proj_weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.in_proj_bias is not None:
nn.init.constant_(self.in_proj_bias, 0.)
nn.init.constant_(self.out_proj.bias, 0.)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def forward(
self,
query, key, value,
key_padding_mask=None,
incremental_state=None,
need_weights=True,
static_kv=False,
attn_mask=None,
before_softmax=False,
need_head_weights=False,
enc_dec_attn_constraint_mask=None,
reset_attn_weight=None
):
"""Input shape: Time x Batch x Channel
Args:
key_padding_mask (ByteTensor, optional): mask to exclude
keys that are pads, of shape `(batch, src_len)`, where
padding elements are indicated by 1s.
need_weights (bool, optional): return the attention weights,
averaged over heads (default: False).
attn_mask (ByteTensor, optional): typically used to
implement causal attention, where the mask prevents the
attention from looking forward in time (default: None).
before_softmax (bool, optional): return the raw attention
weights and values before the attention softmax.
need_head_weights (bool, optional): return the attention
weights for each head. Implies *need_weights*. Default:
return the average attention weights over all heads.
"""
if need_head_weights:
need_weights = True
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
if self.enable_torch_version and incremental_state is None and not static_kv and reset_attn_weight is None:
if self.qkv_same_dim:
return F.multi_head_attention_forward(query, key, value,
self.embed_dim, self.num_heads,
self.in_proj_weight,
self.in_proj_bias, self.bias_k, self.bias_v,
self.add_zero_attn, self.dropout,
self.out_proj.weight, self.out_proj.bias,
self.training, key_padding_mask, need_weights,
attn_mask)
else:
return F.multi_head_attention_forward(query, key, value,
self.embed_dim, self.num_heads,
torch.empty([0]),
self.in_proj_bias, self.bias_k, self.bias_v,
self.add_zero_attn, self.dropout,
self.out_proj.weight, self.out_proj.bias,
self.training, key_padding_mask, need_weights,
attn_mask, use_separate_proj_weight=True,
q_proj_weight=self.q_proj_weight,
k_proj_weight=self.k_proj_weight,
v_proj_weight=self.v_proj_weight)
if incremental_state is not None:
print('Not implemented error.')
exit()
else:
saved_state = None
if self.self_attention:
# self-attention
q, k, v = self.in_proj_qkv(query)
elif self.encoder_decoder_attention:
# encoder-decoder attention
q = self.in_proj_q(query)
if key is None:
assert value is None
k = v = None
else:
k = self.in_proj_k(key)
v = self.in_proj_v(key)
else:
q = self.in_proj_q(query)
k = self.in_proj_k(key)
v = self.in_proj_v(value)
q *= self.scaling
if self.bias_k is not None:
assert self.bias_v is not None
k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[key_padding_mask, key_padding_mask.new_zeros(key_padding_mask.size(0), 1)], dim=1)
q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim).transpose(0, 1)
if k is not None:
k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)
if v is not None:
v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)
if saved_state is not None:
print('Not implemented error.')
exit()
src_len = k.size(1)
# This is part of a workaround to get around fork/join parallelism
# not supporting Optional types.
if key_padding_mask is not None and key_padding_mask.shape == torch.Size([]):
key_padding_mask = None
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if self.add_zero_attn:
src_len += 1
k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1)
v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1)
if attn_mask is not None:
attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[key_padding_mask, torch.zeros(key_padding_mask.size(0), 1).type_as(key_padding_mask)], dim=1)
attn_weights = torch.bmm(q, k.transpose(1, 2))
attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz)
assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]
if attn_mask is not None:
if len(attn_mask.shape) == 2:
attn_mask = attn_mask.unsqueeze(0)
elif len(attn_mask.shape) == 3:
attn_mask = attn_mask[:, None].repeat([1, self.num_heads, 1, 1]).reshape(
bsz * self.num_heads, tgt_len, src_len)
attn_weights = attn_weights + attn_mask
if enc_dec_attn_constraint_mask is not None: # bs x head x L_kv
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.masked_fill(
enc_dec_attn_constraint_mask.unsqueeze(2).bool(),
-1e9,
)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if key_padding_mask is not None:
# don't attend to padding symbols
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2),
-1e9,
)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_logits = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
if before_softmax:
return attn_weights, v
attn_weights_float = utils.softmax(attn_weights, dim=-1)
attn_weights = attn_weights_float.type_as(attn_weights)
attn_probs = F.dropout(attn_weights_float.type_as(attn_weights), p=self.dropout, training=self.training)
if reset_attn_weight is not None:
if reset_attn_weight:
self.last_attn_probs = attn_probs.detach()
else:
assert self.last_attn_probs is not None
attn_probs = self.last_attn_probs
attn = torch.bmm(attn_probs, v)
assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn = self.out_proj(attn)
if need_weights:
attn_weights = attn_weights_float.view(bsz, self.num_heads, tgt_len, src_len).transpose(1, 0)
if not need_head_weights:
# average attention weights over heads
attn_weights = attn_weights.mean(dim=0)
else:
attn_weights = None
return attn, (attn_weights, attn_logits)
def in_proj_qkv(self, query):
return self._in_proj(query).chunk(3, dim=-1)
def in_proj_q(self, query):
if self.qkv_same_dim:
return self._in_proj(query, end=self.embed_dim)
else:
bias = self.in_proj_bias
if bias is not None:
bias = bias[:self.embed_dim]
return F.linear(query, self.q_proj_weight, bias)
def in_proj_k(self, key):
if self.qkv_same_dim:
return self._in_proj(key, start=self.embed_dim, end=2 * self.embed_dim)
else:
weight = self.k_proj_weight
bias = self.in_proj_bias
if bias is not None:
bias = bias[self.embed_dim:2 * self.embed_dim]
return F.linear(key, weight, bias)
def in_proj_v(self, value):
if self.qkv_same_dim:
return self._in_proj(value, start=2 * self.embed_dim)
else:
weight = self.v_proj_weight
bias = self.in_proj_bias
if bias is not None:
bias = bias[2 * self.embed_dim:]
return F.linear(value, weight, bias)
def _in_proj(self, input, start=0, end=None):
weight = self.in_proj_weight
bias = self.in_proj_bias
weight = weight[start:end, :]
if bias is not None:
bias = bias[start:end]
return F.linear(input, weight, bias)
def apply_sparse_mask(self, attn_weights, tgt_len, src_len, bsz):
return attn_weights
class Swish(torch.autograd.Function):
@staticmethod
def forward(ctx, i):
result = i * torch.sigmoid(i)
ctx.save_for_backward(i)
return result
@staticmethod
def backward(ctx, grad_output):
i = ctx.saved_variables[0]
sigmoid_i = torch.sigmoid(i)
return grad_output * (sigmoid_i * (1 + i * (1 - sigmoid_i)))
class CustomSwish(nn.Module):
def forward(self, input_tensor):
return Swish.apply(input_tensor)
class TransformerFFNLayer(nn.Module):
def __init__(self, hidden_size, filter_size, padding="SAME", kernel_size=1, dropout=0., act='gelu'):
super().__init__()
self.kernel_size = kernel_size
self.dropout = dropout
self.act = act
if padding == 'SAME':
self.ffn_1 = nn.Conv1d(hidden_size, filter_size, kernel_size, padding=kernel_size // 2)
elif padding == 'LEFT':
self.ffn_1 = nn.Sequential(
nn.ConstantPad1d((kernel_size - 1, 0), 0.0),
nn.Conv1d(hidden_size, filter_size, kernel_size)
)
self.ffn_2 = Linear(filter_size, hidden_size)
if self.act == 'swish':
self.swish_fn = CustomSwish()
def forward(self, x, incremental_state=None):
# x: T x B x C
if incremental_state is not None:
assert incremental_state is None, 'Nar-generation does not allow this.'
exit(1)
x = self.ffn_1(x.permute(1, 2, 0)).permute(2, 0, 1)
x = x * self.kernel_size ** -0.5
if incremental_state is not None:
x = x[-1:]
if self.act == 'gelu':
x = F.gelu(x)
if self.act == 'relu':
x = F.relu(x)
if self.act == 'swish':
x = self.swish_fn(x)
x = F.dropout(x, self.dropout, training=self.training)
x = self.ffn_2(x)
return x
class BatchNorm1dTBC(nn.Module):
def __init__(self, c):
super(BatchNorm1dTBC, self).__init__()
self.bn = nn.BatchNorm1d(c)
def forward(self, x):
"""
:param x: [T, B, C]
:return: [T, B, C]
"""
x = x.permute(1, 2, 0) # [B, C, T]
x = self.bn(x) # [B, C, T]
x = x.permute(2, 0, 1) # [T, B, C]
return x
class EncSALayer(nn.Module):
def __init__(self, c, num_heads, dropout, attention_dropout=0.1,
relu_dropout=0.1, kernel_size=9, padding='SAME', norm='ln', act='gelu'):
super().__init__()
self.c = c
self.dropout = dropout
self.num_heads = num_heads
if num_heads > 0:
if norm == 'ln':
self.layer_norm1 = LayerNorm(c)
elif norm == 'bn':
self.layer_norm1 = BatchNorm1dTBC(c)
self.self_attn = MultiheadAttention(
self.c, num_heads, self_attention=True, dropout=attention_dropout, bias=False,
)
if norm == 'ln':
self.layer_norm2 = LayerNorm(c)
elif norm == 'bn':
self.layer_norm2 = BatchNorm1dTBC(c)
self.ffn = TransformerFFNLayer(
c, 4 * c, kernel_size=kernel_size, dropout=relu_dropout, padding=padding, act=act)
def forward(self, x, encoder_padding_mask=None, **kwargs):
layer_norm_training = kwargs.get('layer_norm_training', None)
if layer_norm_training is not None:
self.layer_norm1.training = layer_norm_training
self.layer_norm2.training = layer_norm_training
if self.num_heads > 0:
residual = x
x = self.layer_norm1(x)
x, _, = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=encoder_padding_mask
)
x = F.dropout(x, self.dropout, training=self.training)
x = residual + x
x = x * (1 - encoder_padding_mask.float()).transpose(0, 1)[..., None]
residual = x
x = self.layer_norm2(x)
x = self.ffn(x)
x = F.dropout(x, self.dropout, training=self.training)
x = residual + x
x = x * (1 - encoder_padding_mask.float()).transpose(0, 1)[..., None]
return x
class DecSALayer(nn.Module):
def __init__(self, c, num_heads, dropout, attention_dropout=0.1, relu_dropout=0.1, kernel_size=9, act='gelu'):
super().__init__()
self.c = c
self.dropout = dropout
self.layer_norm1 = LayerNorm(c)
self.self_attn = MultiheadAttention(
c, num_heads, self_attention=True, dropout=attention_dropout, bias=False
)
self.layer_norm2 = LayerNorm(c)
self.encoder_attn = MultiheadAttention(
c, num_heads, encoder_decoder_attention=True, dropout=attention_dropout, bias=False,
)
self.layer_norm3 = LayerNorm(c)
self.ffn = TransformerFFNLayer(
c, 4 * c, padding='LEFT', kernel_size=kernel_size, dropout=relu_dropout, act=act)
def forward(
self,
x,
encoder_out=None,
encoder_padding_mask=None,
incremental_state=None,
self_attn_mask=None,
self_attn_padding_mask=None,
attn_out=None,
reset_attn_weight=None,
**kwargs,
):
layer_norm_training = kwargs.get('layer_norm_training', None)
if layer_norm_training is not None:
self.layer_norm1.training = layer_norm_training
self.layer_norm2.training = layer_norm_training
self.layer_norm3.training = layer_norm_training
residual = x
x = self.layer_norm1(x)
x, _ = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
incremental_state=incremental_state,
attn_mask=self_attn_mask
)
x = F.dropout(x, self.dropout, training=self.training)
x = residual + x
residual = x
x = self.layer_norm2(x)
if encoder_out is not None:
x, attn = self.encoder_attn(
query=x,
key=encoder_out,
value=encoder_out,
key_padding_mask=encoder_padding_mask,
incremental_state=incremental_state,
static_kv=True,
enc_dec_attn_constraint_mask=None, #utils.get_incremental_state(self, incremental_state, 'enc_dec_attn_constraint_mask'),
reset_attn_weight=reset_attn_weight
)
attn_logits = attn[1]
else:
assert attn_out is not None
x = self.encoder_attn.in_proj_v(attn_out.transpose(0, 1))
attn_logits = None
x = F.dropout(x, self.dropout, training=self.training)
x = residual + x
residual = x
x = self.layer_norm3(x)
x = self.ffn(x, incremental_state=incremental_state)
x = F.dropout(x, self.dropout, training=self.training)
x = residual + x
# if len(attn_logits.size()) > 3:
# indices = attn_logits.softmax(-1).max(-1).values.sum(-1).argmax(-1)
# attn_logits = attn_logits.gather(1,
# indices[:, None, None, None].repeat(1, 1, attn_logits.size(-2), attn_logits.size(-1))).squeeze(1)
return x, attn_logits
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/modules/commons/common_layers.py |
# '''
# https://github.com/One-sixth/ms_ssim_pytorch/blob/master/ssim.py
# '''
#
# import torch
# import torch.jit
# import torch.nn.functional as F
#
#
# @torch.jit.script
# def create_window(window_size: int, sigma: float, channel: int):
# '''
# Create 1-D gauss kernel
# :param window_size: the size of gauss kernel
# :param sigma: sigma of normal distribution
# :param channel: input channel
# :return: 1D kernel
# '''
# coords = torch.arange(window_size, dtype=torch.float)
# coords -= window_size // 2
#
# g = torch.exp(-(coords ** 2) / (2 * sigma ** 2))
# g /= g.sum()
#
# g = g.reshape(1, 1, 1, -1).repeat(channel, 1, 1, 1)
# return g
#
#
# @torch.jit.script
# def _gaussian_filter(x, window_1d, use_padding: bool):
# '''
# Blur input with 1-D kernel
# :param x: batch of tensors to be blured
# :param window_1d: 1-D gauss kernel
# :param use_padding: padding image before conv
# :return: blured tensors
# '''
# C = x.shape[1]
# padding = 0
# if use_padding:
# window_size = window_1d.shape[3]
# padding = window_size // 2
# out = F.conv2d(x, window_1d, stride=1, padding=(0, padding), groups=C)
# out = F.conv2d(out, window_1d.transpose(2, 3), stride=1, padding=(padding, 0), groups=C)
# return out
#
#
# @torch.jit.script
# def ssim(X, Y, window, data_range: float, use_padding: bool = False):
# '''
# Calculate ssim index for X and Y
# :param X: images [B, C, H, N_bins]
# :param Y: images [B, C, H, N_bins]
# :param window: 1-D gauss kernel
# :param data_range: value range of input images. (usually 1.0 or 255)
# :param use_padding: padding image before conv
# :return:
# '''
#
# K1 = 0.01
# K2 = 0.03
# compensation = 1.0
#
# C1 = (K1 * data_range) ** 2
# C2 = (K2 * data_range) ** 2
#
# mu1 = _gaussian_filter(X, window, use_padding)
# mu2 = _gaussian_filter(Y, window, use_padding)
# sigma1_sq = _gaussian_filter(X * X, window, use_padding)
# sigma2_sq = _gaussian_filter(Y * Y, window, use_padding)
# sigma12 = _gaussian_filter(X * Y, window, use_padding)
#
# mu1_sq = mu1.pow(2)
# mu2_sq = mu2.pow(2)
# mu1_mu2 = mu1 * mu2
#
# sigma1_sq = compensation * (sigma1_sq - mu1_sq)
# sigma2_sq = compensation * (sigma2_sq - mu2_sq)
# sigma12 = compensation * (sigma12 - mu1_mu2)
#
# cs_map = (2 * sigma12 + C2) / (sigma1_sq + sigma2_sq + C2)
# # Fixed the issue that the negative value of cs_map caused ms_ssim to output Nan.
# cs_map = cs_map.clamp_min(0.)
# ssim_map = ((2 * mu1_mu2 + C1) / (mu1_sq + mu2_sq + C1)) * cs_map
#
# ssim_val = ssim_map.mean(dim=(1, 2, 3)) # reduce along CHW
# cs = cs_map.mean(dim=(1, 2, 3))
#
# return ssim_val, cs
#
#
# @torch.jit.script
# def ms_ssim(X, Y, window, data_range: float, weights, use_padding: bool = False, eps: float = 1e-8):
# '''
# interface of ms-ssim
# :param X: a batch of images, (N,C,H,W)
# :param Y: a batch of images, (N,C,H,W)
# :param window: 1-D gauss kernel
# :param data_range: value range of input images. (usually 1.0 or 255)
# :param weights: weights for different levels
# :param use_padding: padding image before conv
# :param eps: use for avoid grad nan.
# :return:
# '''
# levels = weights.shape[0]
# cs_vals = []
# ssim_vals = []
# for _ in range(levels):
# ssim_val, cs = ssim(X, Y, window=window, data_range=data_range, use_padding=use_padding)
# # Use for fix a issue. When c = a ** b and a is 0, c.backward() will cause the a.grad become inf.
# ssim_val = ssim_val.clamp_min(eps)
# cs = cs.clamp_min(eps)
# cs_vals.append(cs)
#
# ssim_vals.append(ssim_val)
# padding = (X.shape[2] % 2, X.shape[3] % 2)
# X = F.avg_pool2d(X, kernel_size=2, stride=2, padding=padding)
# Y = F.avg_pool2d(Y, kernel_size=2, stride=2, padding=padding)
#
# cs_vals = torch.stack(cs_vals, dim=0)
# ms_ssim_val = torch.prod((cs_vals[:-1] ** weights[:-1].unsqueeze(1)) * (ssim_vals[-1] ** weights[-1]), dim=0)
# return ms_ssim_val
#
#
# class SSIM(torch.jit.ScriptModule):
# __constants__ = ['data_range', 'use_padding']
#
# def __init__(self, window_size=11, window_sigma=1.5, data_range=255., channel=3, use_padding=False):
# '''
# :param window_size: the size of gauss kernel
# :param window_sigma: sigma of normal distribution
# :param data_range: value range of input images. (usually 1.0 or 255)
# :param channel: input channels (default: 3)
# :param use_padding: padding image before conv
# '''
# super().__init__()
# assert window_size % 2 == 1, 'Window size must be odd.'
# window = create_window(window_size, window_sigma, channel)
# self.register_buffer('window', window)
# self.data_range = data_range
# self.use_padding = use_padding
#
# @torch.jit.script_method
# def forward(self, X, Y):
# r = ssim(X, Y, window=self.window, data_range=self.data_range, use_padding=self.use_padding)
# return r[0]
#
#
# class MS_SSIM(torch.jit.ScriptModule):
# __constants__ = ['data_range', 'use_padding', 'eps']
#
# def __init__(self, window_size=11, window_sigma=1.5, data_range=255., channel=3, use_padding=False, weights=None,
# levels=None, eps=1e-8):
# '''
# class for ms-ssim
# :param window_size: the size of gauss kernel
# :param window_sigma: sigma of normal distribution
# :param data_range: value range of input images. (usually 1.0 or 255)
# :param channel: input channels
# :param use_padding: padding image before conv
# :param weights: weights for different levels. (default [0.0448, 0.2856, 0.3001, 0.2363, 0.1333])
# :param levels: number of downsampling
# :param eps: Use for fix a issue. When c = a ** b and a is 0, c.backward() will cause the a.grad become inf.
# '''
# super().__init__()
# assert window_size % 2 == 1, 'Window size must be odd.'
# self.data_range = data_range
# self.use_padding = use_padding
# self.eps = eps
#
# window = create_window(window_size, window_sigma, channel)
# self.register_buffer('window', window)
#
# if weights is None:
# weights = [0.0448, 0.2856, 0.3001, 0.2363, 0.1333]
# weights = torch.tensor(weights, dtype=torch.float)
#
# if levels is not None:
# weights = weights[:levels]
# weights = weights / weights.sum()
#
# self.register_buffer('weights', weights)
#
# @torch.jit.script_method
# def forward(self, X, Y):
# return ms_ssim(X, Y, window=self.window, data_range=self.data_range, weights=self.weights,
# use_padding=self.use_padding, eps=self.eps)
#
#
# if __name__ == '__main__':
# print('Simple Test')
# im = torch.randint(0, 255, (5, 3, 256, 256), dtype=torch.float, device='cuda')
# img1 = im / 255
# img2 = img1 * 0.5
#
# losser = SSIM(data_range=1.).cuda()
# loss = losser(img1, img2).mean()
#
# losser2 = MS_SSIM(data_range=1.).cuda()
# loss2 = losser2(img1, img2).mean()
#
# print(loss.item())
# print(loss2.item())
#
# if __name__ == '__main__':
# print('Training Test')
# import cv2
# import torch.optim
# import numpy as np
# import imageio
# import time
#
# out_test_video = False
# # 最好不要直接输出gif图,会非常大,最好先输出mkv文件后用ffmpeg转换到GIF
# video_use_gif = False
#
# im = cv2.imread('test_img1.jpg', 1)
# t_im = torch.from_numpy(im).cuda().permute(2, 0, 1).float()[None] / 255.
#
# if out_test_video:
# if video_use_gif:
# fps = 0.5
# out_wh = (im.shape[1] // 2, im.shape[0] // 2)
# suffix = '.gif'
# else:
# fps = 5
# out_wh = (im.shape[1], im.shape[0])
# suffix = '.mkv'
# video_last_time = time.perf_counter()
# video = imageio.get_writer('ssim_test' + suffix, fps=fps)
#
# # 测试ssim
# print('Training SSIM')
# rand_im = torch.randint_like(t_im, 0, 255, dtype=torch.float32) / 255.
# rand_im.requires_grad = True
# optim = torch.optim.Adam([rand_im], 0.003, eps=1e-8)
# losser = SSIM(data_range=1., channel=t_im.shape[1]).cuda()
# ssim_score = 0
# while ssim_score < 0.999:
# optim.zero_grad()
# loss = losser(rand_im, t_im)
# (-loss).sum().backward()
# ssim_score = loss.item()
# optim.step()
# r_im = np.transpose(rand_im.detach().cpu().numpy().clip(0, 1) * 255, [0, 2, 3, 1]).astype(np.uint8)[0]
# r_im = cv2.putText(r_im, 'ssim %f' % ssim_score, (10, 30), cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 0), 2)
#
# if out_test_video:
# if time.perf_counter() - video_last_time > 1. / fps:
# video_last_time = time.perf_counter()
# out_frame = cv2.cvtColor(r_im, cv2.COLOR_BGR2RGB)
# out_frame = cv2.resize(out_frame, out_wh, interpolation=cv2.INTER_AREA)
# if isinstance(out_frame, cv2.UMat):
# out_frame = out_frame.get()
# video.append_data(out_frame)
#
# cv2.imshow('ssim', r_im)
# cv2.setWindowTitle('ssim', 'ssim %f' % ssim_score)
# cv2.waitKey(1)
#
# if out_test_video:
# video.close()
#
# # 测试ms_ssim
# if out_test_video:
# if video_use_gif:
# fps = 0.5
# out_wh = (im.shape[1] // 2, im.shape[0] // 2)
# suffix = '.gif'
# else:
# fps = 5
# out_wh = (im.shape[1], im.shape[0])
# suffix = '.mkv'
# video_last_time = time.perf_counter()
# video = imageio.get_writer('ms_ssim_test' + suffix, fps=fps)
#
# print('Training MS_SSIM')
# rand_im = torch.randint_like(t_im, 0, 255, dtype=torch.float32) / 255.
# rand_im.requires_grad = True
# optim = torch.optim.Adam([rand_im], 0.003, eps=1e-8)
# losser = MS_SSIM(data_range=1., channel=t_im.shape[1]).cuda()
# ssim_score = 0
# while ssim_score < 0.999:
# optim.zero_grad()
# loss = losser(rand_im, t_im)
# (-loss).sum().backward()
# ssim_score = loss.item()
# optim.step()
# r_im = np.transpose(rand_im.detach().cpu().numpy().clip(0, 1) * 255, [0, 2, 3, 1]).astype(np.uint8)[0]
# r_im = cv2.putText(r_im, 'ms_ssim %f' % ssim_score, (10, 30), cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 0), 2)
#
# if out_test_video:
# if time.perf_counter() - video_last_time > 1. / fps:
# video_last_time = time.perf_counter()
# out_frame = cv2.cvtColor(r_im, cv2.COLOR_BGR2RGB)
# out_frame = cv2.resize(out_frame, out_wh, interpolation=cv2.INTER_AREA)
# if isinstance(out_frame, cv2.UMat):
# out_frame = out_frame.get()
# video.append_data(out_frame)
#
# cv2.imshow('ms_ssim', r_im)
# cv2.setWindowTitle('ms_ssim', 'ms_ssim %f' % ssim_score)
# cv2.waitKey(1)
#
# if out_test_video:
# video.close()
"""
Adapted from https://github.com/Po-Hsun-Su/pytorch-ssim
"""
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from math import exp
def gaussian(window_size, sigma):
gauss = torch.Tensor([exp(-(x - window_size // 2) ** 2 / float(2 * sigma ** 2)) for x in range(window_size)])
return gauss / gauss.sum()
def create_window(window_size, channel):
_1D_window = gaussian(window_size, 1.5).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous())
return window
def _ssim(img1, img2, window, window_size, channel, size_average=True):
mu1 = F.conv2d(img1, window, padding=window_size // 2, groups=channel)
mu2 = F.conv2d(img2, window, padding=window_size // 2, groups=channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1 * mu2
sigma1_sq = F.conv2d(img1 * img1, window, padding=window_size // 2, groups=channel) - mu1_sq
sigma2_sq = F.conv2d(img2 * img2, window, padding=window_size // 2, groups=channel) - mu2_sq
sigma12 = F.conv2d(img1 * img2, window, padding=window_size // 2, groups=channel) - mu1_mu2
C1 = 0.01 ** 2
C2 = 0.03 ** 2
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2))
if size_average:
return ssim_map.mean()
else:
return ssim_map.mean(1)
class SSIM(torch.nn.Module):
def __init__(self, window_size=11, size_average=True):
super(SSIM, self).__init__()
self.window_size = window_size
self.size_average = size_average
self.channel = 1
self.window = create_window(window_size, self.channel)
def forward(self, img1, img2):
(_, channel, _, _) = img1.size()
if channel == self.channel and self.window.data.type() == img1.data.type():
window = self.window
else:
window = create_window(self.window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
self.window = window
self.channel = channel
return _ssim(img1, img2, window, self.window_size, channel, self.size_average)
window = None
def ssim(img1, img2, window_size=11, size_average=True):
(_, channel, _, _) = img1.size()
global window
if window is None:
window = create_window(window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
return _ssim(img1, img2, window, window_size, channel, size_average)
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/modules/commons/ssim.py |
import torch
from torch import nn
def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
n_channels_int = n_channels[0]
in_act = input_a + input_b
t_act = torch.tanh(in_act[:, :n_channels_int, :])
s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
acts = t_act * s_act
return acts
class WN(torch.nn.Module):
def __init__(self, hidden_size, kernel_size, dilation_rate, n_layers, c_cond=0,
p_dropout=0, share_cond_layers=False, is_BTC=False):
super(WN, self).__init__()
assert (kernel_size % 2 == 1)
assert (hidden_size % 2 == 0)
self.is_BTC = is_BTC
self.hidden_size = hidden_size
self.kernel_size = kernel_size
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.gin_channels = c_cond
self.p_dropout = p_dropout
self.share_cond_layers = share_cond_layers
self.in_layers = torch.nn.ModuleList()
self.res_skip_layers = torch.nn.ModuleList()
self.drop = nn.Dropout(p_dropout)
if c_cond != 0 and not share_cond_layers:
cond_layer = torch.nn.Conv1d(c_cond, 2 * hidden_size * n_layers, 1)
self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
for i in range(n_layers):
dilation = dilation_rate ** i
padding = int((kernel_size * dilation - dilation) / 2)
in_layer = torch.nn.Conv1d(hidden_size, 2 * hidden_size, kernel_size,
dilation=dilation, padding=padding)
in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
self.in_layers.append(in_layer)
# last one is not necessary
if i < n_layers - 1:
res_skip_channels = 2 * hidden_size
else:
res_skip_channels = hidden_size
res_skip_layer = torch.nn.Conv1d(hidden_size, res_skip_channels, 1)
res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
self.res_skip_layers.append(res_skip_layer)
def forward(self, x, nonpadding=None, cond=None):
if self.is_BTC:
x = x.transpose(1, 2)
cond = cond.transpose(1, 2) if cond is not None else None
nonpadding = nonpadding.transpose(1, 2) if nonpadding is not None else None
if nonpadding is None:
nonpadding = 1
output = torch.zeros_like(x)
n_channels_tensor = torch.IntTensor([self.hidden_size])
if cond is not None and not self.share_cond_layers:
cond = self.cond_layer(cond)
for i in range(self.n_layers):
x_in = self.in_layers[i](x)
x_in = self.drop(x_in)
if cond is not None:
cond_offset = i * 2 * self.hidden_size
cond_l = cond[:, cond_offset:cond_offset + 2 * self.hidden_size, :]
else:
cond_l = torch.zeros_like(x_in)
acts = fused_add_tanh_sigmoid_multiply(x_in, cond_l, n_channels_tensor)
res_skip_acts = self.res_skip_layers[i](acts)
if i < self.n_layers - 1:
x = (x + res_skip_acts[:, :self.hidden_size, :]) * nonpadding
output = output + res_skip_acts[:, self.hidden_size:, :]
else:
output = output + res_skip_acts
output = output * nonpadding
if self.is_BTC:
output = output.transpose(1, 2)
return output
def remove_weight_norm(self):
def remove_weight_norm(m):
try:
nn.utils.remove_weight_norm(m)
except ValueError: # this module didn't have weight norm
return
self.apply(remove_weight_norm)
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/modules/commons/wavenet.py |
import math
import torch
from torch import nn
from torch.nn import functional as F
from utils.hparams import hparams
from modules.commons.common_layers import Embedding
from utils.tts_utils import group_hidden_by_segs, expand_word2ph
import transformers
def convert_pad_shape(pad_shape):
l = pad_shape[::-1]
pad_shape = [item for sublist in l for item in sublist]
return pad_shape
def shift_1d(x):
x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [1, 0]]))[:, :, :-1]
return x
def sequence_mask(length, max_length=None):
if max_length is None:
max_length = length.max()
x = torch.arange(max_length, dtype=length.dtype, device=length.device)
return x.unsqueeze(0) < length.unsqueeze(1)
class Encoder(nn.Module):
def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0.,
window_size=None, block_length=None, pre_ln=False, **kwargs):
super().__init__()
self.hidden_channels = hidden_channels
self.filter_channels = filter_channels
self.n_heads = n_heads
self.n_layers = n_layers
self.kernel_size = kernel_size
self.p_dropout = p_dropout
self.window_size = window_size
self.block_length = block_length
self.pre_ln = pre_ln
self.drop = nn.Dropout(p_dropout)
self.attn_layers = nn.ModuleList()
self.norm_layers_1 = nn.ModuleList()
self.ffn_layers = nn.ModuleList()
self.norm_layers_2 = nn.ModuleList()
for i in range(self.n_layers):
self.attn_layers.append(
MultiHeadAttention(hidden_channels, hidden_channels, n_heads, window_size=window_size,
p_dropout=p_dropout, block_length=block_length))
self.norm_layers_1.append(LayerNorm(hidden_channels))
self.ffn_layers.append(
FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout))
self.norm_layers_2.append(LayerNorm(hidden_channels))
if pre_ln:
self.last_ln = LayerNorm(hidden_channels)
def forward(self, x, x_mask):
attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1)
for i in range(self.n_layers):
x = x * x_mask
x_ = x
if self.pre_ln:
x = self.norm_layers_1[i](x)
y = self.attn_layers[i](x, x, attn_mask)
y = self.drop(y)
x = x_ + y
if not self.pre_ln:
x = self.norm_layers_1[i](x)
x_ = x
if self.pre_ln:
x = self.norm_layers_2[i](x)
y = self.ffn_layers[i](x, x_mask)
y = self.drop(y)
x = x_ + y
if not self.pre_ln:
x = self.norm_layers_2[i](x)
if self.pre_ln:
x = self.last_ln(x)
x = x * x_mask
return x
class MultiHeadAttention(nn.Module):
def __init__(self, channels, out_channels, n_heads, window_size=None, heads_share=True, p_dropout=0.,
block_length=None, proximal_bias=False, proximal_init=False):
super().__init__()
assert channels % n_heads == 0
self.channels = channels
self.out_channels = out_channels
self.n_heads = n_heads
self.window_size = window_size
self.heads_share = heads_share
self.block_length = block_length
self.proximal_bias = proximal_bias
self.p_dropout = p_dropout
self.attn = None
self.k_channels = channels // n_heads
self.conv_q = nn.Conv1d(channels, channels, 1)
self.conv_k = nn.Conv1d(channels, channels, 1)
self.conv_v = nn.Conv1d(channels, channels, 1)
if window_size is not None:
n_heads_rel = 1 if heads_share else n_heads
rel_stddev = self.k_channels ** -0.5
self.emb_rel_k = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
self.emb_rel_v = nn.Parameter(torch.randn(n_heads_rel, window_size * 2 + 1, self.k_channels) * rel_stddev)
self.conv_o = nn.Conv1d(channels, out_channels, 1)
self.drop = nn.Dropout(p_dropout)
nn.init.xavier_uniform_(self.conv_q.weight)
nn.init.xavier_uniform_(self.conv_k.weight)
if proximal_init:
self.conv_k.weight.data.copy_(self.conv_q.weight.data)
self.conv_k.bias.data.copy_(self.conv_q.bias.data)
nn.init.xavier_uniform_(self.conv_v.weight)
def forward(self, x, c, attn_mask=None):
q = self.conv_q(x)
k = self.conv_k(c)
v = self.conv_v(c)
x, self.attn = self.attention(q, k, v, mask=attn_mask)
x = self.conv_o(x)
return x
def attention(self, query, key, value, mask=None):
# reshape [b, d, t] -> [b, n_h, t, d_k]
b, d, t_s, t_t = (*key.size(), query.size(2))
query = query.view(b, self.n_heads, self.k_channels, t_t).transpose(2, 3)
key = key.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
value = value.view(b, self.n_heads, self.k_channels, t_s).transpose(2, 3)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(self.k_channels)
if self.window_size is not None:
assert t_s == t_t, "Relative attention is only available for self-attention."
key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s)
rel_logits = self._matmul_with_relative_keys(query, key_relative_embeddings)
rel_logits = self._relative_position_to_absolute_position(rel_logits)
scores_local = rel_logits / math.sqrt(self.k_channels)
scores = scores + scores_local
if self.proximal_bias:
assert t_s == t_t, "Proximal bias is only available for self-attention."
scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype)
if mask is not None:
scores = scores.masked_fill(mask == 0, -1e4)
if self.block_length is not None:
block_mask = torch.ones_like(scores).triu(-self.block_length).tril(self.block_length)
scores = scores * block_mask + -1e4 * (1 - block_mask)
p_attn = F.softmax(scores, dim=-1) # [b, n_h, t_t, t_s]
p_attn = self.drop(p_attn)
output = torch.matmul(p_attn, value)
if self.window_size is not None:
relative_weights = self._absolute_position_to_relative_position(p_attn)
value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s)
output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings)
output = output.transpose(2, 3).contiguous().view(b, d, t_t) # [b, n_h, t_t, d_k] -> [b, d, t_t]
return output, p_attn
def _matmul_with_relative_values(self, x, y):
"""
x: [b, h, l, m]
y: [h or 1, m, d]
ret: [b, h, l, d]
"""
ret = torch.matmul(x, y.unsqueeze(0))
return ret
def _matmul_with_relative_keys(self, x, y):
"""
x: [b, h, l, d]
y: [h or 1, m, d]
ret: [b, h, l, m]
"""
ret = torch.matmul(x, y.unsqueeze(0).transpose(-2, -1))
return ret
def _get_relative_embeddings(self, relative_embeddings, length):
max_relative_position = 2 * self.window_size + 1
# Pad first before slice to avoid using cond ops.
pad_length = max(length - (self.window_size + 1), 0)
slice_start_position = max((self.window_size + 1) - length, 0)
slice_end_position = slice_start_position + 2 * length - 1
if pad_length > 0:
padded_relative_embeddings = F.pad(
relative_embeddings,
convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]))
else:
padded_relative_embeddings = relative_embeddings
used_relative_embeddings = padded_relative_embeddings[:, slice_start_position:slice_end_position]
return used_relative_embeddings
def _relative_position_to_absolute_position(self, x):
"""
x: [b, h, l, 2*l-1]
ret: [b, h, l, l]
"""
batch, heads, length, _ = x.size()
# Concat columns of pad to shift from relative to absolute indexing.
x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, 1]]))
# Concat extra elements so to add up to shape (len+1, 2*len-1).
x_flat = x.view([batch, heads, length * 2 * length])
x_flat = F.pad(x_flat, convert_pad_shape([[0, 0], [0, 0], [0, length - 1]]))
# Reshape and slice out the padded elements.
x_final = x_flat.view([batch, heads, length + 1, 2 * length - 1])[:, :, :length, length - 1:]
return x_final
def _absolute_position_to_relative_position(self, x):
"""
x: [b, h, l, l]
ret: [b, h, l, 2*l-1]
"""
batch, heads, length, _ = x.size()
# padd along column
x = F.pad(x, convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length - 1]]))
x_flat = x.view([batch, heads, length ** 2 + length * (length - 1)])
# add 0's in the beginning that will skew the elements after reshape
x_flat = F.pad(x_flat, convert_pad_shape([[0, 0], [0, 0], [length, 0]]))
x_final = x_flat.view([batch, heads, length, 2 * length])[:, :, :, 1:]
return x_final
def _attention_bias_proximal(self, length):
"""Bias for self-attention to encourage attention to close positions.
Args:
length: an integer scalar.
Returns:
a Tensor with shape [1, 1, length, length]
"""
r = torch.arange(length, dtype=torch.float32)
diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1)
return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0)
class FFN(nn.Module):
def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.filter_channels = filter_channels
self.kernel_size = kernel_size
self.p_dropout = p_dropout
self.activation = activation
self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2)
self.conv_2 = nn.Conv1d(filter_channels, out_channels, 1)
self.drop = nn.Dropout(p_dropout)
def forward(self, x, x_mask):
x = self.conv_1(x * x_mask)
if self.activation == "gelu":
x = x * torch.sigmoid(1.702 * x)
else:
x = torch.relu(x)
x = self.drop(x)
x = self.conv_2(x * x_mask)
return x * x_mask
class LayerNorm(nn.Module):
def __init__(self, channels, eps=1e-4):
super().__init__()
self.channels = channels
self.eps = eps
self.gamma = nn.Parameter(torch.ones(channels))
self.beta = nn.Parameter(torch.zeros(channels))
def forward(self, x):
n_dims = len(x.shape)
mean = torch.mean(x, 1, keepdim=True)
variance = torch.mean((x - mean) ** 2, 1, keepdim=True)
x = (x - mean) * torch.rsqrt(variance + self.eps)
shape = [1, -1] + [1] * (n_dims - 2)
x = x * self.gamma.view(*shape) + self.beta.view(*shape)
return x
class ConvReluNorm(nn.Module):
def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
super().__init__()
self.in_channels = in_channels
self.hidden_channels = hidden_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.n_layers = n_layers
self.p_dropout = p_dropout
assert n_layers > 1, "Number of layers should be larger than 0."
self.conv_layers = nn.ModuleList()
self.norm_layers = nn.ModuleList()
self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size // 2))
self.norm_layers.append(LayerNorm(hidden_channels))
self.relu_drop = nn.Sequential(
nn.ReLU(),
nn.Dropout(p_dropout))
for _ in range(n_layers - 1):
self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size // 2))
self.norm_layers.append(LayerNorm(hidden_channels))
self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
self.proj.weight.data.zero_()
self.proj.bias.data.zero_()
def forward(self, x, x_mask):
x_org = x
for i in range(self.n_layers):
x = self.conv_layers[i](x * x_mask)
x = self.norm_layers[i](x)
x = self.relu_drop(x)
x = x_org + self.proj(x)
return x * x_mask
class RelTransformerEncoder(nn.Module):
def __init__(self,
n_vocab,
out_channels,
hidden_channels,
filter_channels,
n_heads,
n_layers,
kernel_size,
p_dropout=0.0,
window_size=4,
block_length=None,
prenet=True,
pre_ln=True,
):
super().__init__()
self.n_vocab = n_vocab
self.out_channels = out_channels
self.hidden_channels = hidden_channels
self.filter_channels = filter_channels
self.n_heads = n_heads
self.n_layers = n_layers
self.kernel_size = kernel_size
self.p_dropout = p_dropout
self.window_size = window_size
self.block_length = block_length
self.prenet = prenet
if n_vocab > 0:
self.emb = Embedding(n_vocab, hidden_channels, padding_idx=0)
if prenet:
self.pre = ConvReluNorm(hidden_channels, hidden_channels, hidden_channels,
kernel_size=5, n_layers=3, p_dropout=0)
self.encoder = Encoder(
hidden_channels,
filter_channels,
n_heads,
n_layers,
kernel_size,
p_dropout,
window_size=window_size,
block_length=block_length,
pre_ln=pre_ln,
)
def forward(self, x, x_mask=None):
if self.n_vocab > 0:
x_lengths = (x > 0).long().sum(-1)
x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h]
else:
x_lengths = (x.abs().sum(-1) > 0).long().sum(-1)
x = torch.transpose(x, 1, -1) # [b, h, t]
x_mask = torch.unsqueeze(sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
if self.prenet:
x = self.pre(x, x_mask)
x = self.encoder(x, x_mask)
return x.transpose(1, 2)
class Pooler(nn.Module):
"""
Parameter-free poolers to get the sentence embedding
'cls': [CLS] representation with BERT/RoBERTa's MLP pooler.
'cls_before_pooler': [CLS] representation without the original MLP pooler.
'avg': average of the last layers' hidden states at each token.
'avg_top2': average of the last two layers.
'avg_first_last': average of the first and the last layers.
"""
def __init__(self, pooler_type):
super().__init__()
self.pooler_type = pooler_type
assert self.pooler_type in ["cls", "cls_before_pooler", "avg", "avg_top2", "avg_first_last"], "unrecognized pooling type %s" % self.pooler_type
def forward(self, attention_mask, outputs):
last_hidden = outputs.last_hidden_state
pooler_output = outputs.pooler_output
hidden_states = outputs.hidden_states
if self.pooler_type in ['cls_before_pooler', 'cls']:
return last_hidden[:, 0]
elif self.pooler_type == "avg":
return ((last_hidden * attention_mask.unsqueeze(-1)).sum(1) / attention_mask.sum(-1).unsqueeze(-1))
elif self.pooler_type == "avg_first_last":
first_hidden = hidden_states[0]
last_hidden = hidden_states[-1]
pooled_result = ((first_hidden + last_hidden) / 2.0 * attention_mask.unsqueeze(-1)).sum(1) / attention_mask.sum(-1).unsqueeze(-1)
return pooled_result
elif self.pooler_type == "avg_top2":
second_last_hidden = hidden_states[-2]
last_hidden = hidden_states[-1]
pooled_result = ((last_hidden + second_last_hidden) / 2.0 * attention_mask.unsqueeze(-1)).sum(1) / attention_mask.sum(-1).unsqueeze(-1)
return pooled_result
else:
raise NotImplementedError
class Similarity(nn.Module):
"""
Dot product or cosine similarity
"""
def __init__(self, temp):
super().__init__()
self.temp = temp
self.cos = nn.CosineSimilarity(dim=-1)
self.record = None
self.pos_avg = 0.0
self.neg_avg = 0.0
def forward(self, x, y):
sim = self.cos(x, y)
self.record = sim.detach() # [64,64]
min_size = min(self.record.shape[0], self.record.shape[1]) # 64
num_item = self.record.shape[0] * self.record.shape[1] # 4096
self.pos_avg = self.record.diag().sum() / min_size
if num_item - min_size == 0:
self.neg_avg = (self.record.sum() - self.record.diag().sum()) / 1
return sim / self.temp
if torch.any(torch.isnan(self.record)).item() is True:
print("we got self.record has nan when compute neg_avg")
if torch.any(torch.isnan(self.record.diag())).item() is True:
print("we got self.record.diag() has nan when compute neg_avg")
self.neg_avg = (self.record.sum() - self.record.diag().sum()) / (num_item - min_size)
return sim / self.temp
class BertPredictionHeadTransform(nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.dense = nn.Linear(hidden_size, hidden_size)
self.transform_act_fn = F.gelu
self.LayerNorm = nn.LayerNorm(hidden_size, eps=1e-12)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, hid_dim, out_dim):
super().__init__()
self.transform = BertPredictionHeadTransform(hid_dim)
self.decoder = nn.Linear(hid_dim, out_dim, bias=False)
self.bias = nn.Parameter(torch.zeros(out_dim))
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
# V2_2
# change add to concat.
# now support finetune BERT
# grad_bert=0.1 & trainable_block_idx=0
class BERTRelTransformerEncoder(nn.Module):
def __init__(self,
n_vocab,
out_channels,
hidden_channels,
filter_channels,
n_heads,
n_layers,
kernel_size,
p_dropout=0.0,
window_size=4,
block_length=None,
prenet=True,
pre_ln=True,
):
super().__init__()
self.n_vocab = n_vocab
self.out_channels = out_channels
self.hidden_channels = hidden_channels
self.filter_channels = filter_channels
self.n_heads = n_heads
self.n_layers = n_layers
self.kernel_size = kernel_size
self.p_dropout = p_dropout
self.window_size = window_size
self.block_length = block_length
self.prenet = prenet
if n_vocab > 0:
self.emb = Embedding(n_vocab, hidden_channels, padding_idx=0)
if prenet:
self.pre = ConvReluNorm(hidden_channels, hidden_channels, hidden_channels,
kernel_size=5, n_layers=3, p_dropout=0)
self.encoder1 = Encoder(
hidden_channels,
filter_channels,
n_heads,
n_layers//2,
kernel_size,
p_dropout,
window_size=window_size,
block_length=block_length,
pre_ln=pre_ln,
)
self.encoder2 = Encoder(
hidden_channels,
filter_channels,
n_heads,
n_layers - n_layers//2,
kernel_size,
p_dropout,
window_size=window_size,
block_length=block_length,
pre_ln=pre_ln,
)
if hparams['ds_name'] in ['ljspeech', 'libritts', 'librispeech']:
model_name = 'bert-base-uncased'
elif hparams['ds_name'] in ['biaobei', 'wenetspeech']:
model_name = 'bert-base-chinese'
else:
raise NotImplementedError()
self.tokenizer = transformers.AutoTokenizer.from_pretrained(model_name)
config = transformers.AutoConfig.from_pretrained(model_name)
if hparams.get("load_bert_from_pretrained", True):
print("Load BERT from pretrained model ...")
self.bert = transformers.AutoModel.from_pretrained(model_name,config=config)
trainable_start_block = hparams.get("bert_trainable_start_block", 0)
else:
print("Initialize BERT from scratch!")
self.bert = transformers.BertModel(config=config)
trainable_start_block = 0
for k, v in self.bert.named_parameters():
if 'embeddings' in k:
v.requires_grad = False
elif 'encoder.layer' in k:
block_idx = int(k.split(".")[2])
if block_idx < trainable_start_block:
v.requires_grad = False
else:
v.requires_grad = True
elif 'cls' in k:
v.requires_grad = True
else:
print("Unhandled key: {}, set to requires_grad...".format(k))
v.requires_grad = True
self.bert_combine = nn.Sequential(*[
nn.Conv1d(768 + hidden_channels, hidden_channels, 3, 1, 1),
nn.ReLU(),
])
self.pooler = Pooler("avg")
self.sim = Similarity(temp=0.05)
def forward(self, x, x_mask=None, bert_feats=None, ph2word=None, **kwargs):
if self.n_vocab > 0:
x_lengths = (x > 0).long().sum(-1)
x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h]
else:
x_lengths = (x.abs().sum(-1) > 0).long().sum(-1)
x = torch.transpose(x, 1, -1) # [b, h, t]
x_mask = torch.unsqueeze(sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype)
if self.prenet:
x = self.pre(x, x_mask)
x = self.encoder1(x, x_mask)
bert_outputs = self.bert(bert_feats['bert_input_ids'],
attention_mask=bert_feats['bert_attention_mask'],
token_type_ids=bert_feats['bert_token_type_ids'],
output_hidden_states=True)
bert_num_blocks = hparams.get("bert_num_blocks", 12) # total 1+12blocks in bert
bert_embedding = bert_outputs['hidden_states'][bert_num_blocks]
# bert_embedding = bert_outputs['last_hidden_state']
grad_bert = hparams.get("grad_bert", 0.1)
bert_embedding = bert_embedding.detach() * (1-grad_bert) + bert_embedding * grad_bert
bert_word_embedding, _ = group_hidden_by_segs(bert_embedding, bert_feats['bert_token2word'], bert_feats['bert_token2word'].max().item())
bert_ph_embedding = expand_word2ph(bert_word_embedding, ph2word)
bert_ph_embedding = bert_ph_embedding.transpose(1,2)
x = torch.cat([x, bert_ph_embedding], dim=1)
x = self.bert_combine(x)
x = self.encoder2(x, x_mask)
return x.transpose(1, 2)
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/modules/commons/rel_transformer.py |
import math
import torch
from torch import nn
from torch.nn import Parameter, Linear
from modules.commons.common_layers import LayerNorm, Embedding
from utils.tts_utils import get_incremental_state, set_incremental_state, softmax, make_positions
import torch.nn.functional as F
DEFAULT_MAX_SOURCE_POSITIONS = 2000
DEFAULT_MAX_TARGET_POSITIONS = 2000
class SinusoidalPositionalEmbedding(nn.Module):
"""This module produces sinusoidal positional embeddings of any length.
Padding symbols are ignored.
"""
def __init__(self, embedding_dim, padding_idx, init_size=1024):
super().__init__()
self.embedding_dim = embedding_dim
self.padding_idx = padding_idx
self.weights = SinusoidalPositionalEmbedding.get_embedding(
init_size,
embedding_dim,
padding_idx,
)
self.register_buffer('_float_tensor', torch.FloatTensor(1))
@staticmethod
def get_embedding(num_embeddings, embedding_dim, padding_idx=None):
"""Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
"""
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)
emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0)
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)
if embedding_dim % 2 == 1:
# zero pad
emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
if padding_idx is not None:
emb[padding_idx, :] = 0
return emb
def forward(self, input, incremental_state=None, timestep=None, positions=None, **kwargs):
"""Input is expected to be of size [bsz x seqlen]."""
bsz, seq_len = input.shape[:2]
max_pos = self.padding_idx + 1 + seq_len
if self.weights is None or max_pos > self.weights.size(0):
# recompute/expand embeddings if needed
self.weights = SinusoidalPositionalEmbedding.get_embedding(
max_pos,
self.embedding_dim,
self.padding_idx,
)
self.weights = self.weights.to(self._float_tensor)
if incremental_state is not None:
# positions is the same for every token when decoding a single step
pos = timestep.view(-1)[0] + 1 if timestep is not None else seq_len
return self.weights[self.padding_idx + pos, :].expand(bsz, 1, -1)
positions = make_positions(input, self.padding_idx) if positions is None else positions
return self.weights.index_select(0, positions.view(-1)).view(bsz, seq_len, -1).detach()
def max_positions(self):
"""Maximum number of supported positions."""
return int(1e5) # an arbitrary large number
class TransformerFFNLayer(nn.Module):
def __init__(self, hidden_size, filter_size, padding="SAME", kernel_size=1, dropout=0., act='gelu'):
super().__init__()
self.kernel_size = kernel_size
self.dropout = dropout
self.act = act
if padding == 'SAME':
self.ffn_1 = nn.Conv1d(hidden_size, filter_size, kernel_size, padding=kernel_size // 2)
elif padding == 'LEFT':
self.ffn_1 = nn.Sequential(
nn.ConstantPad1d((kernel_size - 1, 0), 0.0),
nn.Conv1d(hidden_size, filter_size, kernel_size)
)
self.ffn_2 = Linear(filter_size, hidden_size)
def forward(self, x, incremental_state=None):
# x: T x B x C
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if 'prev_input' in saved_state:
prev_input = saved_state['prev_input']
x = torch.cat((prev_input, x), dim=0)
x = x[-self.kernel_size:]
saved_state['prev_input'] = x
self._set_input_buffer(incremental_state, saved_state)
x = self.ffn_1(x.permute(1, 2, 0)).permute(2, 0, 1)
x = x * self.kernel_size ** -0.5
if incremental_state is not None:
x = x[-1:]
if self.act == 'gelu':
x = F.gelu(x)
if self.act == 'relu':
x = F.relu(x)
x = F.dropout(x, self.dropout, training=self.training)
x = self.ffn_2(x)
return x
def _get_input_buffer(self, incremental_state):
return get_incremental_state(
self,
incremental_state,
'f',
) or {}
def _set_input_buffer(self, incremental_state, buffer):
set_incremental_state(
self,
incremental_state,
'f',
buffer,
)
def clear_buffer(self, incremental_state):
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if 'prev_input' in saved_state:
del saved_state['prev_input']
self._set_input_buffer(incremental_state, saved_state)
class MultiheadAttention(nn.Module):
def __init__(self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0., bias=True,
add_bias_kv=False, add_zero_attn=False, self_attention=False,
encoder_decoder_attention=False):
super().__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim ** -0.5
self.self_attention = self_attention
self.encoder_decoder_attention = encoder_decoder_attention
assert not self.self_attention or self.qkv_same_dim, 'Self-attention requires query, key and ' \
'value to be of the same size'
if self.qkv_same_dim:
self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim))
else:
self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim))
self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim))
self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
if bias:
self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim))
else:
self.register_parameter('in_proj_bias', None)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.reset_parameters()
self.enable_torch_version = False
if hasattr(F, "multi_head_attention_forward"):
self.enable_torch_version = True
else:
self.enable_torch_version = False
self.last_attn_probs = None
def reset_parameters(self):
if self.qkv_same_dim:
nn.init.xavier_uniform_(self.in_proj_weight)
else:
nn.init.xavier_uniform_(self.k_proj_weight)
nn.init.xavier_uniform_(self.v_proj_weight)
nn.init.xavier_uniform_(self.q_proj_weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.in_proj_bias is not None:
nn.init.constant_(self.in_proj_bias, 0.)
nn.init.constant_(self.out_proj.bias, 0.)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def forward(
self,
query, key, value,
key_padding_mask=None,
incremental_state=None,
need_weights=True,
static_kv=False,
attn_mask=None,
before_softmax=False,
need_head_weights=False,
enc_dec_attn_constraint_mask=None,
reset_attn_weight=None
):
"""Input shape: Time x Batch x Channel
Args:
key_padding_mask (ByteTensor, optional): mask to exclude
keys that are pads, of shape `(batch, src_len)`, where
padding elements are indicated by 1s.
need_weights (bool, optional): return the attention weights,
averaged over heads (default: False).
attn_mask (ByteTensor, optional): typically used to
implement causal attention, where the mask prevents the
attention from looking forward in time (default: None).
before_softmax (bool, optional): return the raw attention
weights and values before the attention softmax.
need_head_weights (bool, optional): return the attention
weights for each head. Implies *need_weights*. Default:
return the average attention weights over all heads.
"""
if need_head_weights:
need_weights = True
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
if self.enable_torch_version and incremental_state is None and not static_kv and reset_attn_weight is None:
if self.qkv_same_dim:
return F.multi_head_attention_forward(query, key, value,
self.embed_dim, self.num_heads,
self.in_proj_weight,
self.in_proj_bias, self.bias_k, self.bias_v,
self.add_zero_attn, self.dropout,
self.out_proj.weight, self.out_proj.bias,
self.training, key_padding_mask, need_weights,
attn_mask)
else:
return F.multi_head_attention_forward(query, key, value,
self.embed_dim, self.num_heads,
torch.empty([0]),
self.in_proj_bias, self.bias_k, self.bias_v,
self.add_zero_attn, self.dropout,
self.out_proj.weight, self.out_proj.bias,
self.training, key_padding_mask, need_weights,
attn_mask, use_separate_proj_weight=True,
q_proj_weight=self.q_proj_weight,
k_proj_weight=self.k_proj_weight,
v_proj_weight=self.v_proj_weight)
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if 'prev_key' in saved_state:
# previous time steps are cached - no need to recompute
# key and value if they are static
if static_kv:
assert self.encoder_decoder_attention and not self.self_attention
key = value = None
else:
saved_state = None
if self.self_attention:
# self-attention
q, k, v = self.in_proj_qkv(query)
elif self.encoder_decoder_attention:
# encoder-decoder attention
q = self.in_proj_q(query)
if key is None:
assert value is None
k = v = None
else:
k = self.in_proj_k(key)
v = self.in_proj_v(key)
else:
q = self.in_proj_q(query)
k = self.in_proj_k(key)
v = self.in_proj_v(value)
q *= self.scaling
if self.bias_k is not None:
assert self.bias_v is not None
k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[key_padding_mask, key_padding_mask.new_zeros(key_padding_mask.size(0), 1)], dim=1)
q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim).transpose(0, 1)
if k is not None:
k = k.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)
if v is not None:
v = v.contiguous().view(-1, bsz * self.num_heads, self.head_dim).transpose(0, 1)
if saved_state is not None:
# saved states are stored with shape (bsz, num_heads, seq_len, head_dim)
if 'prev_key' in saved_state:
prev_key = saved_state['prev_key'].view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
k = prev_key
else:
k = torch.cat((prev_key, k), dim=1)
if 'prev_value' in saved_state:
prev_value = saved_state['prev_value'].view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
v = prev_value
else:
v = torch.cat((prev_value, v), dim=1)
if 'prev_key_padding_mask' in saved_state and saved_state['prev_key_padding_mask'] is not None:
prev_key_padding_mask = saved_state['prev_key_padding_mask']
if static_kv:
key_padding_mask = prev_key_padding_mask
else:
key_padding_mask = torch.cat((prev_key_padding_mask, key_padding_mask), dim=1)
saved_state['prev_key'] = k.view(bsz, self.num_heads, -1, self.head_dim)
saved_state['prev_value'] = v.view(bsz, self.num_heads, -1, self.head_dim)
saved_state['prev_key_padding_mask'] = key_padding_mask
self._set_input_buffer(incremental_state, saved_state)
src_len = k.size(1)
# This is part of a workaround to get around fork/join parallelism
# not supporting Optional types.
if key_padding_mask is not None and key_padding_mask.shape == torch.Size([]):
key_padding_mask = None
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if self.add_zero_attn:
src_len += 1
k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1)
v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1)
if attn_mask is not None:
attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[key_padding_mask, torch.zeros(key_padding_mask.size(0), 1).type_as(key_padding_mask)], dim=1)
attn_weights = torch.bmm(q, k.transpose(1, 2))
attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz)
assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]
if attn_mask is not None:
if len(attn_mask.shape) == 2:
attn_mask = attn_mask.unsqueeze(0)
elif len(attn_mask.shape) == 3:
attn_mask = attn_mask[:, None].repeat([1, self.num_heads, 1, 1]).reshape(
bsz * self.num_heads, tgt_len, src_len)
attn_weights = attn_weights + attn_mask
if enc_dec_attn_constraint_mask is not None: # bs x head x L_kv
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.masked_fill(
enc_dec_attn_constraint_mask.unsqueeze(2).bool(),
-1e8,
)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if key_padding_mask is not None:
# don't attend to padding symbols
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2),
-1e8,
)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_logits = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
if before_softmax:
return attn_weights, v
attn_weights_float = softmax(attn_weights, dim=-1)
attn_weights = attn_weights_float.type_as(attn_weights)
attn_probs = F.dropout(attn_weights_float.type_as(attn_weights), p=self.dropout, training=self.training)
if reset_attn_weight is not None:
if reset_attn_weight:
self.last_attn_probs = attn_probs.detach()
else:
assert self.last_attn_probs is not None
attn_probs = self.last_attn_probs
attn = torch.bmm(attn_probs, v)
assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn = self.out_proj(attn)
if need_weights:
attn_weights = attn_weights_float.view(bsz, self.num_heads, tgt_len, src_len).transpose(1, 0)
if not need_head_weights:
# average attention weights over heads
attn_weights = attn_weights.mean(dim=0)
else:
attn_weights = None
return attn, (attn_weights, attn_logits)
def in_proj_qkv(self, query):
return self._in_proj(query).chunk(3, dim=-1)
def in_proj_q(self, query):
if self.qkv_same_dim:
return self._in_proj(query, end=self.embed_dim)
else:
bias = self.in_proj_bias
if bias is not None:
bias = bias[:self.embed_dim]
return F.linear(query, self.q_proj_weight, bias)
def in_proj_k(self, key):
if self.qkv_same_dim:
return self._in_proj(key, start=self.embed_dim, end=2 * self.embed_dim)
else:
weight = self.k_proj_weight
bias = self.in_proj_bias
if bias is not None:
bias = bias[self.embed_dim:2 * self.embed_dim]
return F.linear(key, weight, bias)
def in_proj_v(self, value):
if self.qkv_same_dim:
return self._in_proj(value, start=2 * self.embed_dim)
else:
weight = self.v_proj_weight
bias = self.in_proj_bias
if bias is not None:
bias = bias[2 * self.embed_dim:]
return F.linear(value, weight, bias)
def _in_proj(self, input, start=0, end=None):
weight = self.in_proj_weight
bias = self.in_proj_bias
weight = weight[start:end, :]
if bias is not None:
bias = bias[start:end]
return F.linear(input, weight, bias)
def _get_input_buffer(self, incremental_state):
return get_incremental_state(
self,
incremental_state,
'attn_state',
) or {}
def _set_input_buffer(self, incremental_state, buffer):
set_incremental_state(
self,
incremental_state,
'attn_state',
buffer,
)
def apply_sparse_mask(self, attn_weights, tgt_len, src_len, bsz):
return attn_weights
def clear_buffer(self, incremental_state=None):
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if 'prev_key' in saved_state:
del saved_state['prev_key']
if 'prev_value' in saved_state:
del saved_state['prev_value']
self._set_input_buffer(incremental_state, saved_state)
class EncSALayer(nn.Module):
def __init__(self, c, num_heads, dropout, attention_dropout=0.1,
relu_dropout=0.1, kernel_size=9, padding='SAME', act='gelu'):
super().__init__()
self.c = c
self.dropout = dropout
self.num_heads = num_heads
if num_heads > 0:
self.layer_norm1 = LayerNorm(c)
self.self_attn = MultiheadAttention(
self.c, num_heads, self_attention=True, dropout=attention_dropout, bias=False)
self.layer_norm2 = LayerNorm(c)
self.ffn = TransformerFFNLayer(
c, 4 * c, kernel_size=kernel_size, dropout=relu_dropout, padding=padding, act=act)
def forward(self, x, encoder_padding_mask=None, **kwargs):
layer_norm_training = kwargs.get('layer_norm_training', None)
if layer_norm_training is not None:
self.layer_norm1.training = layer_norm_training
self.layer_norm2.training = layer_norm_training
if self.num_heads > 0:
residual = x
x = self.layer_norm1(x)
x, _, = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=encoder_padding_mask
)
x = F.dropout(x, self.dropout, training=self.training)
x = residual + x
x = x * (1 - encoder_padding_mask.float()).transpose(0, 1)[..., None]
residual = x
x = self.layer_norm2(x)
x = self.ffn(x)
x = F.dropout(x, self.dropout, training=self.training)
x = residual + x
x = x * (1 - encoder_padding_mask.float()).transpose(0, 1)[..., None]
return x
class DecSALayer(nn.Module):
def __init__(self, c, num_heads, dropout, attention_dropout=0.1, relu_dropout=0.1,
kernel_size=9, act='gelu'):
super().__init__()
self.c = c
self.dropout = dropout
self.layer_norm1 = LayerNorm(c)
self.self_attn = MultiheadAttention(
c, num_heads, self_attention=True, dropout=attention_dropout, bias=False
)
self.layer_norm2 = LayerNorm(c)
self.encoder_attn = MultiheadAttention(
c, num_heads, encoder_decoder_attention=True, dropout=attention_dropout, bias=False,
)
self.layer_norm3 = LayerNorm(c)
self.ffn = TransformerFFNLayer(
c, 4 * c, padding='LEFT', kernel_size=kernel_size, dropout=relu_dropout, act=act)
def forward(
self,
x,
encoder_out=None,
encoder_padding_mask=None,
incremental_state=None,
self_attn_mask=None,
self_attn_padding_mask=None,
attn_out=None,
reset_attn_weight=None,
**kwargs,
):
layer_norm_training = kwargs.get('layer_norm_training', None)
if layer_norm_training is not None:
self.layer_norm1.training = layer_norm_training
self.layer_norm2.training = layer_norm_training
self.layer_norm3.training = layer_norm_training
residual = x
x = self.layer_norm1(x)
x, _ = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
incremental_state=incremental_state,
attn_mask=self_attn_mask
)
x = F.dropout(x, self.dropout, training=self.training)
x = residual + x
attn_logits = None
if encoder_out is not None or attn_out is not None:
residual = x
x = self.layer_norm2(x)
if encoder_out is not None:
x, attn = self.encoder_attn(
query=x,
key=encoder_out,
value=encoder_out,
key_padding_mask=encoder_padding_mask,
incremental_state=incremental_state,
static_kv=True,
enc_dec_attn_constraint_mask=get_incremental_state(self, incremental_state,
'enc_dec_attn_constraint_mask'),
reset_attn_weight=reset_attn_weight
)
attn_logits = attn[1]
elif attn_out is not None:
x = self.encoder_attn.in_proj_v(attn_out)
if encoder_out is not None or attn_out is not None:
x = F.dropout(x, self.dropout, training=self.training)
x = residual + x
residual = x
x = self.layer_norm3(x)
x = self.ffn(x, incremental_state=incremental_state)
x = F.dropout(x, self.dropout, training=self.training)
x = residual + x
return x, attn_logits
def clear_buffer(self, input, encoder_out=None, encoder_padding_mask=None, incremental_state=None):
self.encoder_attn.clear_buffer(incremental_state)
self.ffn.clear_buffer(incremental_state)
def set_buffer(self, name, tensor, incremental_state):
return set_incremental_state(self, incremental_state, name, tensor)
class TransformerEncoderLayer(nn.Module):
def __init__(self, hidden_size, dropout, kernel_size=9, num_heads=2):
super().__init__()
self.hidden_size = hidden_size
self.dropout = dropout
self.num_heads = num_heads
self.op = EncSALayer(
hidden_size, num_heads, dropout=dropout,
attention_dropout=0.0, relu_dropout=dropout,
kernel_size=kernel_size)
def forward(self, x, **kwargs):
return self.op(x, **kwargs)
class TransformerDecoderLayer(nn.Module):
def __init__(self, hidden_size, dropout, kernel_size=9, num_heads=2):
super().__init__()
self.hidden_size = hidden_size
self.dropout = dropout
self.num_heads = num_heads
self.op = DecSALayer(
hidden_size, num_heads, dropout=dropout,
attention_dropout=0.0, relu_dropout=dropout,
kernel_size=kernel_size)
def forward(self, x, **kwargs):
return self.op(x, **kwargs)
def clear_buffer(self, *args):
return self.op.clear_buffer(*args)
def set_buffer(self, *args):
return self.op.set_buffer(*args)
class FFTBlocks(nn.Module):
def __init__(self, hidden_size, num_layers, ffn_kernel_size=9, dropout=0.0,
num_heads=2, use_pos_embed=True, use_last_norm=True,
use_pos_embed_alpha=True):
super().__init__()
self.num_layers = num_layers
embed_dim = self.hidden_size = hidden_size
self.dropout = dropout
self.use_pos_embed = use_pos_embed
self.use_last_norm = use_last_norm
if use_pos_embed:
self.max_source_positions = DEFAULT_MAX_TARGET_POSITIONS
self.padding_idx = 0
self.pos_embed_alpha = nn.Parameter(torch.Tensor([1])) if use_pos_embed_alpha else 1
self.embed_positions = SinusoidalPositionalEmbedding(
embed_dim, self.padding_idx, init_size=DEFAULT_MAX_TARGET_POSITIONS,
)
self.layers = nn.ModuleList([])
self.layers.extend([
TransformerEncoderLayer(self.hidden_size, self.dropout,
kernel_size=ffn_kernel_size, num_heads=num_heads)
for _ in range(self.num_layers)
])
if self.use_last_norm:
self.layer_norm = nn.LayerNorm(embed_dim)
else:
self.layer_norm = None
def forward(self, x, padding_mask=None, attn_mask=None, return_hiddens=False):
"""
:param x: [B, T, C]
:param padding_mask: [B, T]
:return: [B, T, C] or [L, B, T, C]
"""
padding_mask = x.abs().sum(-1).eq(0).data if padding_mask is None else padding_mask
nonpadding_mask_TB = 1 - padding_mask.transpose(0, 1).float()[:, :, None] # [T, B, 1]
if self.use_pos_embed:
positions = self.pos_embed_alpha * self.embed_positions(x[..., 0])
x = x + positions
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1) * nonpadding_mask_TB
hiddens = []
for layer in self.layers:
x = layer(x, encoder_padding_mask=padding_mask, attn_mask=attn_mask) * nonpadding_mask_TB
hiddens.append(x)
if self.use_last_norm:
x = self.layer_norm(x) * nonpadding_mask_TB
if return_hiddens:
x = torch.stack(hiddens, 0) # [L, T, B, C]
x = x.transpose(1, 2) # [L, B, T, C]
else:
x = x.transpose(0, 1) # [B, T, C]
return x
class FastSpeechEncoder(FFTBlocks):
def __init__(self, dict_size, hidden_size=256, num_layers=4, kernel_size=9, num_heads=2,
dropout=0.0):
super().__init__(hidden_size, num_layers, kernel_size, num_heads=num_heads,
use_pos_embed=False, dropout=dropout) # use_pos_embed_alpha for compatibility
self.embed_tokens = Embedding(dict_size, hidden_size, 0)
self.embed_scale = math.sqrt(hidden_size)
self.padding_idx = 0
self.embed_positions = SinusoidalPositionalEmbedding(
hidden_size, self.padding_idx, init_size=DEFAULT_MAX_TARGET_POSITIONS,
)
def forward(self, txt_tokens, attn_mask=None):
"""
:param txt_tokens: [B, T]
:return: {
'encoder_out': [B x T x C]
}
"""
encoder_padding_mask = txt_tokens.eq(self.padding_idx).data
x = self.forward_embedding(txt_tokens) # [B, T, H]
if self.num_layers > 0:
x = super(FastSpeechEncoder, self).forward(x, encoder_padding_mask, attn_mask=attn_mask)
return x
def forward_embedding(self, txt_tokens):
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(txt_tokens)
if self.use_pos_embed:
positions = self.embed_positions(txt_tokens)
x = x + positions
x = F.dropout(x, p=self.dropout, training=self.training)
return x
class FastSpeechDecoder(FFTBlocks):
def __init__(self, hidden_size=256, num_layers=4, kernel_size=9, num_heads=2):
super().__init__(hidden_size, num_layers, kernel_size, num_heads=num_heads)
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/modules/commons/transformer.py |
import torch
import torch.nn.functional as F
def build_word_mask(x2word, y2word):
return (x2word[:, :, None] == y2word[:, None, :]).long()
def mel2ph_to_mel2word(mel2ph, ph2word):
mel2word = (ph2word - 1).gather(1, (mel2ph - 1).clamp(min=0)) + 1
mel2word = mel2word * (mel2ph > 0).long()
return mel2word
def clip_mel2token_to_multiple(mel2token, frames_multiple):
max_frames = mel2token.shape[1] // frames_multiple * frames_multiple
mel2token = mel2token[:, :max_frames]
return mel2token
def expand_states(h, mel2token):
h = F.pad(h, [0, 0, 1, 0])
mel2token_ = mel2token[..., None].repeat([1, 1, h.shape[-1]])
h = torch.gather(h, 1, mel2token_) # [B, T, H]
return h
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/modules/commons/align_ops.py |
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from modules.commons.common_layers import Embedding
from modules.fastspeech.tts_modules import LayerNorm
class LambdaLayer(nn.Module):
def __init__(self, lambd):
super(LambdaLayer, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
def init_weights_func(m):
classname = m.__class__.__name__
if classname.find("Conv1d") != -1:
torch.nn.init.xavier_uniform_(m.weight)
class ResidualBlock(nn.Module):
"""Implements conv->PReLU->norm n-times"""
def __init__(self, channels, kernel_size, dilation, n=2, norm_type='bn', dropout=0.0,
c_multiple=2, ln_eps=1e-12):
super(ResidualBlock, self).__init__()
if norm_type == 'bn':
norm_builder = lambda: nn.BatchNorm1d(channels)
elif norm_type == 'in':
norm_builder = lambda: nn.InstanceNorm1d(channels, affine=True)
elif norm_type == 'gn':
norm_builder = lambda: nn.GroupNorm(8, channels)
elif norm_type == 'ln':
norm_builder = lambda: LayerNorm(channels, dim=1, eps=ln_eps)
else:
norm_builder = lambda: nn.Identity()
self.blocks = [
nn.Sequential(
norm_builder(),
nn.Conv1d(channels, c_multiple * channels, kernel_size, dilation=dilation,
padding=(dilation * (kernel_size - 1)) // 2),
LambdaLayer(lambda x: x * kernel_size ** -0.5),
nn.GELU(),
nn.Conv1d(c_multiple * channels, channels, 1, dilation=dilation),
)
for i in range(n)
]
self.blocks = nn.ModuleList(self.blocks)
self.dropout = dropout
def forward(self, x):
nonpadding = (x.abs().sum(1) > 0).float()[:, None, :]
for b in self.blocks:
x_ = b(x)
if self.dropout > 0 and self.training:
x_ = F.dropout(x_, self.dropout, training=self.training)
x = x + x_
x = x * nonpadding
return x
class ConvBlocks(nn.Module):
"""Decodes the expanded phoneme encoding into spectrograms"""
def __init__(self, hidden_size, out_dims, dilations, kernel_size,
norm_type='ln', layers_in_block=2, c_multiple=2,
dropout=0.0, ln_eps=1e-5,
init_weights=True, is_BTC=True, num_layers=None, post_net_kernel=3):
super(ConvBlocks, self).__init__()
self.is_BTC = is_BTC
if num_layers is not None:
dilations = [1] * num_layers
self.res_blocks = nn.Sequential(
*[ResidualBlock(hidden_size, kernel_size, d,
n=layers_in_block, norm_type=norm_type, c_multiple=c_multiple,
dropout=dropout, ln_eps=ln_eps)
for d in dilations],
)
if norm_type == 'bn':
norm = nn.BatchNorm1d(hidden_size)
elif norm_type == 'in':
norm = nn.InstanceNorm1d(hidden_size, affine=True)
elif norm_type == 'gn':
norm = nn.GroupNorm(8, hidden_size)
elif norm_type == 'ln':
norm = LayerNorm(hidden_size, dim=1, eps=ln_eps)
self.last_norm = norm
self.post_net1 = nn.Conv1d(hidden_size, out_dims, kernel_size=post_net_kernel,
padding=post_net_kernel // 2)
if init_weights:
self.apply(init_weights_func)
def forward(self, x, nonpadding=None):
"""
:param x: [B, T, H]
:return: [B, T, H]
"""
if self.is_BTC:
x = x.transpose(1, 2)
if nonpadding is None:
nonpadding = (x.abs().sum(1) > 0).float()[:, None, :]
elif self.is_BTC:
nonpadding = nonpadding.transpose(1, 2)
x = self.res_blocks(x) * nonpadding
x = self.last_norm(x) * nonpadding
x = self.post_net1(x) * nonpadding
if self.is_BTC:
x = x.transpose(1, 2)
return x
class TextConvEncoder(ConvBlocks):
def __init__(self, dict_size, hidden_size, out_dims, dilations, kernel_size,
norm_type='ln', layers_in_block=2, c_multiple=2,
dropout=0.0, ln_eps=1e-5, init_weights=True, num_layers=None, post_net_kernel=3):
super().__init__(hidden_size, out_dims, dilations, kernel_size,
norm_type, layers_in_block, c_multiple,
dropout, ln_eps, init_weights, num_layers=num_layers,
post_net_kernel=post_net_kernel)
self.embed_tokens = Embedding(dict_size, hidden_size, 0)
self.embed_scale = math.sqrt(hidden_size)
def forward(self, txt_tokens):
"""
:param txt_tokens: [B, T]
:return: {
'encoder_out': [B x T x C]
}
"""
x = self.embed_scale * self.embed_tokens(txt_tokens)
return super().forward(x)
class ConditionalConvBlocks(ConvBlocks):
def __init__(self, hidden_size, c_cond, c_out, dilations, kernel_size,
norm_type='ln', layers_in_block=2, c_multiple=2,
dropout=0.0, ln_eps=1e-5, init_weights=True, is_BTC=True, num_layers=None):
super().__init__(hidden_size, c_out, dilations, kernel_size,
norm_type, layers_in_block, c_multiple,
dropout, ln_eps, init_weights, is_BTC=False, num_layers=num_layers)
self.g_prenet = nn.Conv1d(c_cond, hidden_size, 3, padding=1)
self.is_BTC_ = is_BTC
if init_weights:
self.g_prenet.apply(init_weights_func)
def forward(self, x, cond, nonpadding=None):
if self.is_BTC_:
x = x.transpose(1, 2)
cond = cond.transpose(1, 2)
if nonpadding is not None:
nonpadding = nonpadding.transpose(1, 2)
if nonpadding is None:
nonpadding = x.abs().sum(1)[:, None]
x = x + self.g_prenet(cond)
x = x * nonpadding
x = super(ConditionalConvBlocks, self).forward(x) # input needs to be BTC
if self.is_BTC_:
x = x.transpose(1, 2)
return x
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/modules/commons/conv.py |
import math
import torch
class PositionalEncoding(torch.nn.Module):
"""Positional encoding.
Args:
d_model (int): Embedding dimension.
dropout_rate (float): Dropout rate.
max_len (int): Maximum input length.
reverse (bool): Whether to reverse the input position.
"""
def __init__(self, d_model, dropout_rate, max_len=5000, reverse=False):
"""Construct an PositionalEncoding object."""
super(PositionalEncoding, self).__init__()
self.d_model = d_model
self.reverse = reverse
self.xscale = math.sqrt(self.d_model)
self.dropout = torch.nn.Dropout(p=dropout_rate)
self.pe = None
self.extend_pe(torch.tensor(0.0).expand(1, max_len))
def extend_pe(self, x):
"""Reset the positional encodings."""
if self.pe is not None:
if self.pe.size(1) >= x.size(1):
if self.pe.dtype != x.dtype or self.pe.device != x.device:
self.pe = self.pe.to(dtype=x.dtype, device=x.device)
return
pe = torch.zeros(x.size(1), self.d_model)
if self.reverse:
position = torch.arange(
x.size(1) - 1, -1, -1.0, dtype=torch.float32
).unsqueeze(1)
else:
position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1)
div_term = torch.exp(
torch.arange(0, self.d_model, 2, dtype=torch.float32)
* -(math.log(10000.0) / self.d_model)
)
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.pe = pe.to(device=x.device, dtype=x.dtype)
def forward(self, x: torch.Tensor):
"""Add positional encoding.
Args:
x (torch.Tensor): Input tensor (batch, time, `*`).
Returns:
torch.Tensor: Encoded tensor (batch, time, `*`).
"""
self.extend_pe(x)
x = x * self.xscale + self.pe[:, : x.size(1)]
return self.dropout(x)
class ScaledPositionalEncoding(PositionalEncoding):
"""Scaled positional encoding module.
See Sec. 3.2 https://arxiv.org/abs/1809.08895
Args:
d_model (int): Embedding dimension.
dropout_rate (float): Dropout rate.
max_len (int): Maximum input length.
"""
def __init__(self, d_model, dropout_rate, max_len=5000):
"""Initialize class."""
super().__init__(d_model=d_model, dropout_rate=dropout_rate, max_len=max_len)
self.alpha = torch.nn.Parameter(torch.tensor(1.0))
def reset_parameters(self):
"""Reset parameters."""
self.alpha.data = torch.tensor(1.0)
def forward(self, x):
"""Add positional encoding.
Args:
x (torch.Tensor): Input tensor (batch, time, `*`).
Returns:
torch.Tensor: Encoded tensor (batch, time, `*`).
"""
self.extend_pe(x)
x = x + self.alpha * self.pe[:, : x.size(1)]
return self.dropout(x)
class RelPositionalEncoding(PositionalEncoding):
"""Relative positional encoding module.
See : Appendix B in https://arxiv.org/abs/1901.02860
Args:
d_model (int): Embedding dimension.
dropout_rate (float): Dropout rate.
max_len (int): Maximum input length.
"""
def __init__(self, d_model, dropout_rate, max_len=5000):
"""Initialize class."""
super().__init__(d_model, dropout_rate, max_len, reverse=True)
def forward(self, x):
"""Compute positional encoding.
Args:
x (torch.Tensor): Input tensor (batch, time, `*`).
Returns:
torch.Tensor: Encoded tensor (batch, time, `*`).
torch.Tensor: Positional embedding tensor (1, time, `*`).
"""
self.extend_pe(x)
x = x * self.xscale
pos_emb = self.pe[:, : x.size(1)]
return self.dropout(x) + self.dropout(pos_emb) | EXA-1-master | exa/models/AudioGPT/NeuralSeq/modules/commons/espnet_positional_embedding.py |
import scipy
from torch.nn import functional as F
import torch
from torch import nn
import numpy as np
from modules.commons.wavenet import WN
from modules.glow import utils
class ActNorm(nn.Module):
def __init__(self, channels, ddi=False, **kwargs):
super().__init__()
self.channels = channels
self.initialized = not ddi
self.logs = nn.Parameter(torch.zeros(1, channels, 1))
self.bias = nn.Parameter(torch.zeros(1, channels, 1))
def forward(self, x, x_mask=None, reverse=False, **kwargs):
if x_mask is None:
x_mask = torch.ones(x.size(0), 1, x.size(2)).to(device=x.device, dtype=x.dtype)
x_len = torch.sum(x_mask, [1, 2])
if not self.initialized:
self.initialize(x, x_mask)
self.initialized = True
if reverse:
z = (x - self.bias) * torch.exp(-self.logs) * x_mask
logdet = torch.sum(-self.logs) * x_len
else:
z = (self.bias + torch.exp(self.logs) * x) * x_mask
logdet = torch.sum(self.logs) * x_len # [b]
return z, logdet
def store_inverse(self):
pass
def set_ddi(self, ddi):
self.initialized = not ddi
def initialize(self, x, x_mask):
with torch.no_grad():
denom = torch.sum(x_mask, [0, 2])
m = torch.sum(x * x_mask, [0, 2]) / denom
m_sq = torch.sum(x * x * x_mask, [0, 2]) / denom
v = m_sq - (m ** 2)
logs = 0.5 * torch.log(torch.clamp_min(v, 1e-6))
bias_init = (-m * torch.exp(-logs)).view(*self.bias.shape).to(dtype=self.bias.dtype)
logs_init = (-logs).view(*self.logs.shape).to(dtype=self.logs.dtype)
self.bias.data.copy_(bias_init)
self.logs.data.copy_(logs_init)
class InvConvNear(nn.Module):
def __init__(self, channels, n_split=4, no_jacobian=False, lu=True, n_sqz=2, **kwargs):
super().__init__()
assert (n_split % 2 == 0)
self.channels = channels
self.n_split = n_split
self.n_sqz = n_sqz
self.no_jacobian = no_jacobian
w_init = torch.qr(torch.FloatTensor(self.n_split, self.n_split).normal_())[0]
if torch.det(w_init) < 0:
w_init[:, 0] = -1 * w_init[:, 0]
self.lu = lu
if lu:
# LU decomposition can slightly speed up the inverse
np_p, np_l, np_u = scipy.linalg.lu(w_init)
np_s = np.diag(np_u)
np_sign_s = np.sign(np_s)
np_log_s = np.log(np.abs(np_s))
np_u = np.triu(np_u, k=1)
l_mask = np.tril(np.ones(w_init.shape, dtype=float), -1)
eye = np.eye(*w_init.shape, dtype=float)
self.register_buffer('p', torch.Tensor(np_p.astype(float)))
self.register_buffer('sign_s', torch.Tensor(np_sign_s.astype(float)))
self.l = nn.Parameter(torch.Tensor(np_l.astype(float)), requires_grad=True)
self.log_s = nn.Parameter(torch.Tensor(np_log_s.astype(float)), requires_grad=True)
self.u = nn.Parameter(torch.Tensor(np_u.astype(float)), requires_grad=True)
self.register_buffer('l_mask', torch.Tensor(l_mask))
self.register_buffer('eye', torch.Tensor(eye))
else:
self.weight = nn.Parameter(w_init)
def forward(self, x, x_mask=None, reverse=False, **kwargs):
b, c, t = x.size()
assert (c % self.n_split == 0)
if x_mask is None:
x_mask = 1
x_len = torch.ones((b,), dtype=x.dtype, device=x.device) * t
else:
x_len = torch.sum(x_mask, [1, 2])
x = x.view(b, self.n_sqz, c // self.n_split, self.n_split // self.n_sqz, t)
x = x.permute(0, 1, 3, 2, 4).contiguous().view(b, self.n_split, c // self.n_split, t)
if self.lu:
self.weight, log_s = self._get_weight()
logdet = log_s.sum()
logdet = logdet * (c / self.n_split) * x_len
else:
logdet = torch.logdet(self.weight) * (c / self.n_split) * x_len # [b]
if reverse:
if hasattr(self, "weight_inv"):
weight = self.weight_inv
else:
weight = torch.inverse(self.weight.float()).to(dtype=self.weight.dtype)
logdet = -logdet
else:
weight = self.weight
if self.no_jacobian:
logdet = 0
weight = weight.view(self.n_split, self.n_split, 1, 1)
z = F.conv2d(x, weight)
z = z.view(b, self.n_sqz, self.n_split // self.n_sqz, c // self.n_split, t)
z = z.permute(0, 1, 3, 2, 4).contiguous().view(b, c, t) * x_mask
return z, logdet
def _get_weight(self):
l, log_s, u = self.l, self.log_s, self.u
l = l * self.l_mask + self.eye
u = u * self.l_mask.transpose(0, 1).contiguous() + torch.diag(self.sign_s * torch.exp(log_s))
weight = torch.matmul(self.p, torch.matmul(l, u))
return weight, log_s
def store_inverse(self):
weight, _ = self._get_weight()
self.weight_inv = torch.inverse(weight.float()).to(next(self.parameters()).device)
class InvConv(nn.Module):
def __init__(self, channels, no_jacobian=False, lu=True, **kwargs):
super().__init__()
w_shape = [channels, channels]
w_init = np.linalg.qr(np.random.randn(*w_shape))[0].astype(float)
LU_decomposed = lu
if not LU_decomposed:
# Sample a random orthogonal matrix:
self.register_parameter("weight", nn.Parameter(torch.Tensor(w_init)))
else:
np_p, np_l, np_u = scipy.linalg.lu(w_init)
np_s = np.diag(np_u)
np_sign_s = np.sign(np_s)
np_log_s = np.log(np.abs(np_s))
np_u = np.triu(np_u, k=1)
l_mask = np.tril(np.ones(w_shape, dtype=float), -1)
eye = np.eye(*w_shape, dtype=float)
self.register_buffer('p', torch.Tensor(np_p.astype(float)))
self.register_buffer('sign_s', torch.Tensor(np_sign_s.astype(float)))
self.l = nn.Parameter(torch.Tensor(np_l.astype(float)))
self.log_s = nn.Parameter(torch.Tensor(np_log_s.astype(float)))
self.u = nn.Parameter(torch.Tensor(np_u.astype(float)))
self.l_mask = torch.Tensor(l_mask)
self.eye = torch.Tensor(eye)
self.w_shape = w_shape
self.LU = LU_decomposed
self.weight = None
def get_weight(self, device, reverse):
w_shape = self.w_shape
self.p = self.p.to(device)
self.sign_s = self.sign_s.to(device)
self.l_mask = self.l_mask.to(device)
self.eye = self.eye.to(device)
l = self.l * self.l_mask + self.eye
u = self.u * self.l_mask.transpose(0, 1).contiguous() + torch.diag(self.sign_s * torch.exp(self.log_s))
dlogdet = self.log_s.sum()
if not reverse:
w = torch.matmul(self.p, torch.matmul(l, u))
else:
l = torch.inverse(l.double()).float()
u = torch.inverse(u.double()).float()
w = torch.matmul(u, torch.matmul(l, self.p.inverse()))
return w.view(w_shape[0], w_shape[1], 1), dlogdet
def forward(self, x, x_mask=None, reverse=False, **kwargs):
"""
log-det = log|abs(|W|)| * pixels
"""
b, c, t = x.size()
if x_mask is None:
x_len = torch.ones((b,), dtype=x.dtype, device=x.device) * t
else:
x_len = torch.sum(x_mask, [1, 2])
logdet = 0
if not reverse:
weight, dlogdet = self.get_weight(x.device, reverse)
z = F.conv1d(x, weight)
if logdet is not None:
logdet = logdet + dlogdet * x_len
return z, logdet
else:
if self.weight is None:
weight, dlogdet = self.get_weight(x.device, reverse)
else:
weight, dlogdet = self.weight, self.dlogdet
z = F.conv1d(x, weight)
if logdet is not None:
logdet = logdet - dlogdet * x_len
return z, logdet
def store_inverse(self):
self.weight, self.dlogdet = self.get_weight('cuda', reverse=True)
class CouplingBlock(nn.Module):
def __init__(self, in_channels, hidden_channels, kernel_size, dilation_rate, n_layers,
gin_channels=0, p_dropout=0, sigmoid_scale=False, wn=None):
super().__init__()
self.in_channels = in_channels
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.gin_channels = gin_channels
self.p_dropout = p_dropout
self.sigmoid_scale = sigmoid_scale
start = torch.nn.Conv1d(in_channels // 2, hidden_channels, 1)
start = torch.nn.utils.weight_norm(start)
self.start = start
# Initializing last layer to 0 makes the affine coupling layers
# do nothing at first. This helps with training stability
end = torch.nn.Conv1d(hidden_channels, in_channels, 1)
end.weight.data.zero_()
end.bias.data.zero_()
self.end = end
self.wn = WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels, p_dropout)
if wn is not None:
self.wn.in_layers = wn.in_layers
self.wn.res_skip_layers = wn.res_skip_layers
def forward(self, x, x_mask=None, reverse=False, g=None, **kwargs):
if x_mask is None:
x_mask = 1
x_0, x_1 = x[:, :self.in_channels // 2], x[:, self.in_channels // 2:]
x = self.start(x_0) * x_mask
x = self.wn(x, x_mask, g)
out = self.end(x)
z_0 = x_0
m = out[:, :self.in_channels // 2, :]
logs = out[:, self.in_channels // 2:, :]
if self.sigmoid_scale:
logs = torch.log(1e-6 + torch.sigmoid(logs + 2))
if reverse:
z_1 = (x_1 - m) * torch.exp(-logs) * x_mask
logdet = torch.sum(-logs * x_mask, [1, 2])
else:
z_1 = (m + torch.exp(logs) * x_1) * x_mask
logdet = torch.sum(logs * x_mask, [1, 2])
z = torch.cat([z_0, z_1], 1)
return z, logdet
def store_inverse(self):
self.wn.remove_weight_norm()
class Glow(nn.Module):
def __init__(self,
in_channels,
hidden_channels,
kernel_size,
dilation_rate,
n_blocks,
n_layers,
p_dropout=0.,
n_split=4,
n_sqz=2,
sigmoid_scale=False,
gin_channels=0,
inv_conv_type='near',
share_cond_layers=False,
share_wn_layers=0,
):
super().__init__()
self.in_channels = in_channels
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.dilation_rate = dilation_rate
self.n_blocks = n_blocks
self.n_layers = n_layers
self.p_dropout = p_dropout
self.n_split = n_split
self.n_sqz = n_sqz
self.sigmoid_scale = sigmoid_scale
self.gin_channels = gin_channels
self.share_cond_layers = share_cond_layers
if gin_channels != 0 and share_cond_layers:
cond_layer = torch.nn.Conv1d(gin_channels * n_sqz, 2 * hidden_channels * n_layers, 1)
self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
wn = None
self.flows = nn.ModuleList()
for b in range(n_blocks):
self.flows.append(ActNorm(channels=in_channels * n_sqz))
if inv_conv_type == 'near':
self.flows.append(InvConvNear(channels=in_channels * n_sqz, n_split=n_split, n_sqz=n_sqz))
if inv_conv_type == 'invconv':
self.flows.append(InvConv(channels=in_channels * n_sqz))
if share_wn_layers > 0:
if b % share_wn_layers == 0:
wn = WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels * n_sqz,
p_dropout, share_cond_layers)
self.flows.append(
CouplingBlock(
in_channels * n_sqz,
hidden_channels,
kernel_size=kernel_size,
dilation_rate=dilation_rate,
n_layers=n_layers,
gin_channels=gin_channels * n_sqz,
p_dropout=p_dropout,
sigmoid_scale=sigmoid_scale,
wn=wn
))
def forward(self, x, x_mask=None, g=None, reverse=False, return_hiddens=False):
logdet_tot = 0
if not reverse:
flows = self.flows
else:
flows = reversed(self.flows)
if return_hiddens:
hs = []
if self.n_sqz > 1:
x, x_mask_ = utils.squeeze(x, x_mask, self.n_sqz)
if g is not None:
g, _ = utils.squeeze(g, x_mask, self.n_sqz)
x_mask = x_mask_
if self.share_cond_layers and g is not None:
g = self.cond_layer(g)
for f in flows:
x, logdet = f(x, x_mask, g=g, reverse=reverse)
if return_hiddens:
hs.append(x)
logdet_tot += logdet
if self.n_sqz > 1:
x, x_mask = utils.unsqueeze(x, x_mask, self.n_sqz)
if return_hiddens:
return x, logdet_tot, hs
return x, logdet_tot
def store_inverse(self):
def remove_weight_norm(m):
try:
nn.utils.remove_weight_norm(m)
except ValueError: # this module didn't have weight norm
return
self.apply(remove_weight_norm)
for f in self.flows:
f.store_inverse()
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/modules/commons/normalizing_flow/glow_modules.py |
import torch
from torch import nn
from modules.commons.conv import ConditionalConvBlocks
from modules.commons.wavenet import WN
class FlipLayer(nn.Module):
def forward(self, x, nonpadding, cond=None, reverse=False):
x = torch.flip(x, [1])
return x
class CouplingLayer(nn.Module):
def __init__(self, c_in, hidden_size, kernel_size, n_layers, p_dropout=0, c_in_g=0, nn_type='wn'):
super().__init__()
self.channels = c_in
self.hidden_size = hidden_size
self.kernel_size = kernel_size
self.n_layers = n_layers
self.c_half = c_in // 2
self.pre = nn.Conv1d(self.c_half, hidden_size, 1)
if nn_type == 'wn':
self.enc = WN(hidden_size, kernel_size, 1, n_layers, p_dropout=p_dropout,
c_cond=c_in_g)
elif nn_type == 'conv':
self.enc = ConditionalConvBlocks(
hidden_size, c_in_g, hidden_size, None, kernel_size,
layers_in_block=1, is_BTC=False, num_layers=n_layers)
self.post = nn.Conv1d(hidden_size, self.c_half, 1)
def forward(self, x, nonpadding, cond=None, reverse=False):
x0, x1 = x[:, :self.c_half], x[:, self.c_half:]
x_ = self.pre(x0) * nonpadding
x_ = self.enc(x_, nonpadding=nonpadding, cond=cond)
m = self.post(x_)
x1 = m + x1 if not reverse else x1 - m
x = torch.cat([x0, x1], 1)
return x * nonpadding
class ResFlow(nn.Module):
def __init__(self,
c_in,
hidden_size,
kernel_size,
n_flow_layers,
n_flow_steps=4,
c_cond=0,
nn_type='wn'):
super().__init__()
self.flows = nn.ModuleList()
for i in range(n_flow_steps):
self.flows.append(
CouplingLayer(c_in, hidden_size, kernel_size, n_flow_layers, c_in_g=c_cond, nn_type=nn_type))
self.flows.append(FlipLayer())
def forward(self, x, nonpadding, cond=None, reverse=False):
for flow in (self.flows if not reverse else reversed(self.flows)):
x = flow(x, nonpadding, cond=cond, reverse=reverse)
return x
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/modules/commons/normalizing_flow/res_flow.py |
import torch
def squeeze(x, x_mask=None, n_sqz=2):
b, c, t = x.size()
t = (t // n_sqz) * n_sqz
x = x[:, :, :t]
x_sqz = x.view(b, c, t // n_sqz, n_sqz)
x_sqz = x_sqz.permute(0, 3, 1, 2).contiguous().view(b, c * n_sqz, t // n_sqz)
if x_mask is not None:
x_mask = x_mask[:, :, n_sqz - 1::n_sqz]
else:
x_mask = torch.ones(b, 1, t // n_sqz).to(device=x.device, dtype=x.dtype)
return x_sqz * x_mask, x_mask
def unsqueeze(x, x_mask=None, n_sqz=2):
b, c, t = x.size()
x_unsqz = x.view(b, n_sqz, c // n_sqz, t)
x_unsqz = x_unsqz.permute(0, 2, 3, 1).contiguous().view(b, c // n_sqz, t * n_sqz)
if x_mask is not None:
x_mask = x_mask.unsqueeze(-1).repeat(1, 1, 1, n_sqz).view(b, 1, t * n_sqz)
else:
x_mask = torch.ones(b, 1, t * n_sqz).to(device=x.device, dtype=x.dtype)
return x_unsqz * x_mask, x_mask
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/modules/commons/normalizing_flow/utils.py |
import numpy as np
import torch
import torch.utils.data
from librosa.filters import mel as librosa_mel_fn
from scipy.io.wavfile import read
MAX_WAV_VALUE = 32768.0
def load_wav(full_path):
sampling_rate, data = read(full_path)
return data, sampling_rate
def dynamic_range_compression(x, C=1, clip_val=1e-5):
return np.log(np.clip(x, a_min=clip_val, a_max=None) * C)
def dynamic_range_decompression(x, C=1):
return np.exp(x) / C
def dynamic_range_compression_torch(x, C=1, clip_val=1e-5):
return torch.log(torch.clamp(x, min=clip_val) * C)
def dynamic_range_decompression_torch(x, C=1):
return torch.exp(x) / C
def spectral_normalize_torch(magnitudes):
output = dynamic_range_compression_torch(magnitudes)
return output
def spectral_de_normalize_torch(magnitudes):
output = dynamic_range_decompression_torch(magnitudes)
return output
mel_basis = {}
hann_window = {}
def mel_spectrogram(y, hparams, center=False, complex=False):
# hop_size: 512 # For 22050Hz, 275 ~= 12.5 ms (0.0125 * sample_rate)
# win_size: 2048 # For 22050Hz, 1100 ~= 50 ms (If None, win_size: fft_size) (0.05 * sample_rate)
# fmin: 55 # Set this to 55 if your speaker is male! if female, 95 should help taking off noise. (To test depending on dataset. Pitch info: male~[65, 260], female~[100, 525])
# fmax: 10000 # To be increased/reduced depending on data.
# fft_size: 2048 # Extra window size is filled with 0 paddings to match this parameter
# n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax,
n_fft = hparams['fft_size']
num_mels = hparams['audio_num_mel_bins']
sampling_rate = hparams['audio_sample_rate']
hop_size = hparams['hop_size']
win_size = hparams['win_size']
fmin = hparams['fmin']
fmax = hparams['fmax']
y = y.clamp(min=-1., max=1.)
global mel_basis, hann_window
if fmax not in mel_basis:
mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)
mel_basis[str(fmax) + '_' + str(y.device)] = torch.from_numpy(mel).float().to(y.device)
hann_window[str(y.device)] = torch.hann_window(win_size).to(y.device)
y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)),
mode='reflect')
y = y.squeeze(1)
spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[str(y.device)],
center=center, pad_mode='reflect', normalized=False, onesided=True)
if not complex:
spec = torch.sqrt(spec.pow(2).sum(-1) + (1e-9))
spec = torch.matmul(mel_basis[str(fmax) + '_' + str(y.device)], spec)
spec = spectral_normalize_torch(spec)
else:
B, C, T, _ = spec.shape
spec = spec.transpose(1, 2) # [B, T, n_fft, 2]
return spec
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/modules/hifigan/mel_utils.py |
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
from modules.parallel_wavegan.layers import UpsampleNetwork, ConvInUpsampleNetwork
from modules.parallel_wavegan.models.source import SourceModuleHnNSF
import numpy as np
LRELU_SLOPE = 0.1
def init_weights(m, mean=0.0, std=0.01):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
m.weight.data.normal_(mean, std)
def apply_weight_norm(m):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
weight_norm(m)
def get_padding(kernel_size, dilation=1):
return int((kernel_size * dilation - dilation) / 2)
class ResBlock1(torch.nn.Module):
def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5)):
super(ResBlock1, self).__init__()
self.h = h
self.convs1 = nn.ModuleList([
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
padding=get_padding(kernel_size, dilation[0]))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
padding=get_padding(kernel_size, dilation[1]))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
padding=get_padding(kernel_size, dilation[2])))
])
self.convs1.apply(init_weights)
self.convs2 = nn.ModuleList([
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
padding=get_padding(kernel_size, 1))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
padding=get_padding(kernel_size, 1))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
padding=get_padding(kernel_size, 1)))
])
self.convs2.apply(init_weights)
def forward(self, x):
for c1, c2 in zip(self.convs1, self.convs2):
xt = F.leaky_relu(x, LRELU_SLOPE)
xt = c1(xt)
xt = F.leaky_relu(xt, LRELU_SLOPE)
xt = c2(xt)
x = xt + x
return x
def remove_weight_norm(self):
for l in self.convs1:
remove_weight_norm(l)
for l in self.convs2:
remove_weight_norm(l)
class ResBlock2(torch.nn.Module):
def __init__(self, h, channels, kernel_size=3, dilation=(1, 3)):
super(ResBlock2, self).__init__()
self.h = h
self.convs = nn.ModuleList([
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
padding=get_padding(kernel_size, dilation[0]))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
padding=get_padding(kernel_size, dilation[1])))
])
self.convs.apply(init_weights)
def forward(self, x):
for c in self.convs:
xt = F.leaky_relu(x, LRELU_SLOPE)
xt = c(xt)
x = xt + x
return x
def remove_weight_norm(self):
for l in self.convs:
remove_weight_norm(l)
class Conv1d1x1(Conv1d):
"""1x1 Conv1d with customized initialization."""
def __init__(self, in_channels, out_channels, bias):
"""Initialize 1x1 Conv1d module."""
super(Conv1d1x1, self).__init__(in_channels, out_channels,
kernel_size=1, padding=0,
dilation=1, bias=bias)
class HifiGanGenerator(torch.nn.Module):
def __init__(self, h, c_out=1):
super(HifiGanGenerator, self).__init__()
self.h = h
self.num_kernels = len(h['resblock_kernel_sizes'])
self.num_upsamples = len(h['upsample_rates'])
if h['use_pitch_embed']:
self.harmonic_num = 8
self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(h['upsample_rates']))
self.m_source = SourceModuleHnNSF(
sampling_rate=h['audio_sample_rate'],
harmonic_num=self.harmonic_num)
self.noise_convs = nn.ModuleList()
self.conv_pre = weight_norm(Conv1d(80, h['upsample_initial_channel'], 7, 1, padding=3))
resblock = ResBlock1 if h['resblock'] == '1' else ResBlock2
self.ups = nn.ModuleList()
for i, (u, k) in enumerate(zip(h['upsample_rates'], h['upsample_kernel_sizes'])):
c_cur = h['upsample_initial_channel'] // (2 ** (i + 1))
self.ups.append(weight_norm(
ConvTranspose1d(c_cur * 2, c_cur, k, u, padding=(k - u) // 2)))
if h['use_pitch_embed']:
if i + 1 < len(h['upsample_rates']):
stride_f0 = np.prod(h['upsample_rates'][i + 1:])
self.noise_convs.append(Conv1d(
1, c_cur, kernel_size=stride_f0 * 2, stride=stride_f0, padding=stride_f0 // 2))
else:
self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1))
self.resblocks = nn.ModuleList()
for i in range(len(self.ups)):
ch = h['upsample_initial_channel'] // (2 ** (i + 1))
for j, (k, d) in enumerate(zip(h['resblock_kernel_sizes'], h['resblock_dilation_sizes'])):
self.resblocks.append(resblock(h, ch, k, d))
self.conv_post = weight_norm(Conv1d(ch, c_out, 7, 1, padding=3))
self.ups.apply(init_weights)
self.conv_post.apply(init_weights)
def forward(self, x, f0=None):
if f0 is not None:
# harmonic-source signal, noise-source signal, uv flag
f0 = self.f0_upsamp(f0[:, None]).transpose(1, 2)
har_source, noi_source, uv = self.m_source(f0)
har_source = har_source.transpose(1, 2)
x = self.conv_pre(x)
for i in range(self.num_upsamples):
x = F.leaky_relu(x, LRELU_SLOPE)
x = self.ups[i](x)
if f0 is not None:
x_source = self.noise_convs[i](har_source)
x = x + x_source
xs = None
for j in range(self.num_kernels):
if xs is None:
xs = self.resblocks[i * self.num_kernels + j](x)
else:
xs += self.resblocks[i * self.num_kernels + j](x)
x = xs / self.num_kernels
x = F.leaky_relu(x)
x = self.conv_post(x)
x = torch.tanh(x)
return x
def remove_weight_norm(self):
print('Removing weight norm...')
for l in self.ups:
remove_weight_norm(l)
for l in self.resblocks:
l.remove_weight_norm()
remove_weight_norm(self.conv_pre)
remove_weight_norm(self.conv_post)
class DiscriminatorP(torch.nn.Module):
def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False, use_cond=False, c_in=1):
super(DiscriminatorP, self).__init__()
self.use_cond = use_cond
if use_cond:
from utils.hparams import hparams
t = hparams['hop_size']
self.cond_net = torch.nn.ConvTranspose1d(80, 1, t * 2, stride=t, padding=t // 2)
c_in = 2
self.period = period
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
self.convs = nn.ModuleList([
norm_f(Conv2d(c_in, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(2, 0))),
])
self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
def forward(self, x, mel):
fmap = []
if self.use_cond:
x_mel = self.cond_net(mel)
x = torch.cat([x_mel, x], 1)
# 1d to 2d
b, c, t = x.shape
if t % self.period != 0: # pad first
n_pad = self.period - (t % self.period)
x = F.pad(x, (0, n_pad), "reflect")
t = t + n_pad
x = x.view(b, c, t // self.period, self.period)
for l in self.convs:
x = l(x)
x = F.leaky_relu(x, LRELU_SLOPE)
fmap.append(x)
x = self.conv_post(x)
fmap.append(x)
x = torch.flatten(x, 1, -1)
return x, fmap
class MultiPeriodDiscriminator(torch.nn.Module):
def __init__(self, use_cond=False, c_in=1):
super(MultiPeriodDiscriminator, self).__init__()
self.discriminators = nn.ModuleList([
DiscriminatorP(2, use_cond=use_cond, c_in=c_in),
DiscriminatorP(3, use_cond=use_cond, c_in=c_in),
DiscriminatorP(5, use_cond=use_cond, c_in=c_in),
DiscriminatorP(7, use_cond=use_cond, c_in=c_in),
DiscriminatorP(11, use_cond=use_cond, c_in=c_in),
])
def forward(self, y, y_hat, mel=None):
y_d_rs = []
y_d_gs = []
fmap_rs = []
fmap_gs = []
for i, d in enumerate(self.discriminators):
y_d_r, fmap_r = d(y, mel)
y_d_g, fmap_g = d(y_hat, mel)
y_d_rs.append(y_d_r)
fmap_rs.append(fmap_r)
y_d_gs.append(y_d_g)
fmap_gs.append(fmap_g)
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
class DiscriminatorS(torch.nn.Module):
def __init__(self, use_spectral_norm=False, use_cond=False, upsample_rates=None, c_in=1):
super(DiscriminatorS, self).__init__()
self.use_cond = use_cond
if use_cond:
t = np.prod(upsample_rates)
self.cond_net = torch.nn.ConvTranspose1d(80, 1, t * 2, stride=t, padding=t // 2)
c_in = 2
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
self.convs = nn.ModuleList([
norm_f(Conv1d(c_in, 128, 15, 1, padding=7)),
norm_f(Conv1d(128, 128, 41, 2, groups=4, padding=20)),
norm_f(Conv1d(128, 256, 41, 2, groups=16, padding=20)),
norm_f(Conv1d(256, 512, 41, 4, groups=16, padding=20)),
norm_f(Conv1d(512, 1024, 41, 4, groups=16, padding=20)),
norm_f(Conv1d(1024, 1024, 41, 1, groups=16, padding=20)),
norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
])
self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
def forward(self, x, mel):
if self.use_cond:
x_mel = self.cond_net(mel)
x = torch.cat([x_mel, x], 1)
fmap = []
for l in self.convs:
x = l(x)
x = F.leaky_relu(x, LRELU_SLOPE)
fmap.append(x)
x = self.conv_post(x)
fmap.append(x)
x = torch.flatten(x, 1, -1)
return x, fmap
class MultiScaleDiscriminator(torch.nn.Module):
def __init__(self, use_cond=False, c_in=1):
super(MultiScaleDiscriminator, self).__init__()
from utils.hparams import hparams
self.discriminators = nn.ModuleList([
DiscriminatorS(use_spectral_norm=True, use_cond=use_cond,
upsample_rates=[4, 4, hparams['hop_size'] // 16],
c_in=c_in),
DiscriminatorS(use_cond=use_cond,
upsample_rates=[4, 4, hparams['hop_size'] // 32],
c_in=c_in),
DiscriminatorS(use_cond=use_cond,
upsample_rates=[4, 4, hparams['hop_size'] // 64],
c_in=c_in),
])
self.meanpools = nn.ModuleList([
AvgPool1d(4, 2, padding=1),
AvgPool1d(4, 2, padding=1)
])
def forward(self, y, y_hat, mel=None):
y_d_rs = []
y_d_gs = []
fmap_rs = []
fmap_gs = []
for i, d in enumerate(self.discriminators):
if i != 0:
y = self.meanpools[i - 1](y)
y_hat = self.meanpools[i - 1](y_hat)
y_d_r, fmap_r = d(y, mel)
y_d_g, fmap_g = d(y_hat, mel)
y_d_rs.append(y_d_r)
fmap_rs.append(fmap_r)
y_d_gs.append(y_d_g)
fmap_gs.append(fmap_g)
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
def feature_loss(fmap_r, fmap_g):
loss = 0
for dr, dg in zip(fmap_r, fmap_g):
for rl, gl in zip(dr, dg):
loss += torch.mean(torch.abs(rl - gl))
return loss * 2
def discriminator_loss(disc_real_outputs, disc_generated_outputs):
r_losses = 0
g_losses = 0
for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
r_loss = torch.mean((1 - dr) ** 2)
g_loss = torch.mean(dg ** 2)
r_losses += r_loss
g_losses += g_loss
r_losses = r_losses / len(disc_real_outputs)
g_losses = g_losses / len(disc_real_outputs)
return r_losses, g_losses
def cond_discriminator_loss(outputs):
loss = 0
for dg in outputs:
g_loss = torch.mean(dg ** 2)
loss += g_loss
loss = loss / len(outputs)
return loss
def generator_loss(disc_outputs):
loss = 0
for dg in disc_outputs:
l = torch.mean((1 - dg) ** 2)
loss += l
loss = loss / len(disc_outputs)
return loss
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/modules/hifigan/hifigan.py |
import math
import random
from functools import partial
from inspect import isfunction
from pathlib import Path
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from tqdm import tqdm
from einops import rearrange
from modules.fastspeech.fs2 import FastSpeech2
from modules.diffsinger_midi.fs2 import FastSpeech2MIDI
from utils.hparams import hparams
def exists(x):
return x is not None
def default(val, d):
if exists(val):
return val
return d() if isfunction(d) else d
def cycle(dl):
while True:
for data in dl:
yield data
def num_to_groups(num, divisor):
groups = num // divisor
remainder = num % divisor
arr = [divisor] * groups
if remainder > 0:
arr.append(remainder)
return arr
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, *args, **kwargs):
return self.fn(x, *args, **kwargs) + x
class SinusoidalPosEmb(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x):
device = x.device
half_dim = self.dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, device=device) * -emb)
emb = x[:, None] * emb[None, :]
emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
return emb
class Mish(nn.Module):
def forward(self, x):
return x * torch.tanh(F.softplus(x))
class Upsample(nn.Module):
def __init__(self, dim):
super().__init__()
self.conv = nn.ConvTranspose2d(dim, dim, 4, 2, 1)
def forward(self, x):
return self.conv(x)
class Downsample(nn.Module):
def __init__(self, dim):
super().__init__()
self.conv = nn.Conv2d(dim, dim, 3, 2, 1)
def forward(self, x):
return self.conv(x)
class Rezero(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
self.g = nn.Parameter(torch.zeros(1))
def forward(self, x):
return self.fn(x) * self.g
# building block modules
class Block(nn.Module):
def __init__(self, dim, dim_out, groups=8):
super().__init__()
self.block = nn.Sequential(
nn.Conv2d(dim, dim_out, 3, padding=1),
nn.GroupNorm(groups, dim_out),
Mish()
)
def forward(self, x):
return self.block(x)
class ResnetBlock(nn.Module):
def __init__(self, dim, dim_out, *, time_emb_dim, groups=8):
super().__init__()
self.mlp = nn.Sequential(
Mish(),
nn.Linear(time_emb_dim, dim_out)
)
self.block1 = Block(dim, dim_out)
self.block2 = Block(dim_out, dim_out)
self.res_conv = nn.Conv2d(dim, dim_out, 1) if dim != dim_out else nn.Identity()
def forward(self, x, time_emb):
h = self.block1(x)
h += self.mlp(time_emb)[:, :, None, None]
h = self.block2(h)
return h + self.res_conv(x)
class LinearAttention(nn.Module):
def __init__(self, dim, heads=4, dim_head=32):
super().__init__()
self.heads = heads
hidden_dim = dim_head * heads
self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias=False)
self.to_out = nn.Conv2d(hidden_dim, dim, 1)
def forward(self, x):
b, c, h, w = x.shape
qkv = self.to_qkv(x)
q, k, v = rearrange(qkv, 'b (qkv heads c) h w -> qkv b heads c (h w)', heads=self.heads, qkv=3)
k = k.softmax(dim=-1)
context = torch.einsum('bhdn,bhen->bhde', k, v)
out = torch.einsum('bhde,bhdn->bhen', context, q)
out = rearrange(out, 'b heads c (h w) -> b (heads c) h w', heads=self.heads, h=h, w=w)
return self.to_out(out)
# gaussian diffusion trainer class
def extract(a, t, x_shape):
b, *_ = t.shape
out = a.gather(-1, t)
return out.reshape(b, *((1,) * (len(x_shape) - 1)))
def noise_like(shape, device, repeat=False):
repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))
noise = lambda: torch.randn(shape, device=device)
return repeat_noise() if repeat else noise()
def cosine_beta_schedule(timesteps, s=0.008):
"""
cosine schedule
as proposed in https://openreview.net/forum?id=-NEXDKk8gZ
"""
steps = timesteps + 1
x = np.linspace(0, steps, steps)
alphas_cumprod = np.cos(((x / steps) + s) / (1 + s) * np.pi * 0.5) ** 2
alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])
return np.clip(betas, a_min=0, a_max=0.999)
class GaussianDiffusion(nn.Module):
def __init__(self, phone_encoder, out_dims, denoise_fn,
timesteps=1000, loss_type='l1', betas=None, spec_min=None, spec_max=None):
super().__init__()
self.denoise_fn = denoise_fn
if hparams.get('use_midi') is not None and hparams['use_midi']:
self.fs2 = FastSpeech2MIDI(phone_encoder, out_dims)
else:
self.fs2 = FastSpeech2(phone_encoder, out_dims)
self.fs2.decoder = None
self.mel_bins = out_dims
if exists(betas):
betas = betas.detach().cpu().numpy() if isinstance(betas, torch.Tensor) else betas
else:
betas = cosine_beta_schedule(timesteps)
alphas = 1. - betas
alphas_cumprod = np.cumprod(alphas, axis=0)
alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])
timesteps, = betas.shape
self.num_timesteps = int(timesteps)
self.loss_type = loss_type
to_torch = partial(torch.tensor, dtype=torch.float32)
self.register_buffer('betas', to_torch(betas))
self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev))
# calculations for diffusion q(x_t | x_{t-1}) and others
self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod)))
self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod)))
self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1)))
# calculations for posterior q(x_{t-1} | x_t, x_0)
posterior_variance = betas * (1. - alphas_cumprod_prev) / (1. - alphas_cumprod)
# above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
self.register_buffer('posterior_variance', to_torch(posterior_variance))
# below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20))))
self.register_buffer('posterior_mean_coef1', to_torch(
betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod)))
self.register_buffer('posterior_mean_coef2', to_torch(
(1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod)))
self.register_buffer('spec_min', torch.FloatTensor(spec_min)[None, None, :hparams['keep_bins']])
self.register_buffer('spec_max', torch.FloatTensor(spec_max)[None, None, :hparams['keep_bins']])
def q_mean_variance(self, x_start, t):
mean = extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
variance = extract(1. - self.alphas_cumprod, t, x_start.shape)
log_variance = extract(self.log_one_minus_alphas_cumprod, t, x_start.shape)
return mean, variance, log_variance
def predict_start_from_noise(self, x_t, t, noise):
return (
extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t -
extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise
)
def q_posterior(self, x_start, x_t, t):
posterior_mean = (
extract(self.posterior_mean_coef1, t, x_t.shape) * x_start +
extract(self.posterior_mean_coef2, t, x_t.shape) * x_t
)
posterior_variance = extract(self.posterior_variance, t, x_t.shape)
posterior_log_variance_clipped = extract(self.posterior_log_variance_clipped, t, x_t.shape)
return posterior_mean, posterior_variance, posterior_log_variance_clipped
def p_mean_variance(self, x, t, cond, clip_denoised: bool):
noise_pred = self.denoise_fn(x, t, cond=cond)
x_recon = self.predict_start_from_noise(x, t=t, noise=noise_pred)
if clip_denoised:
x_recon.clamp_(-1., 1.)
model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
return model_mean, posterior_variance, posterior_log_variance
@torch.no_grad()
def p_sample(self, x, t, cond, clip_denoised=True, repeat_noise=False):
b, *_, device = *x.shape, x.device
model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, cond=cond, clip_denoised=clip_denoised)
noise = noise_like(x.shape, device, repeat_noise)
# no noise when t == 0
nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
def q_sample(self, x_start, t, noise=None):
noise = default(noise, lambda: torch.randn_like(x_start))
return (
extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
extract(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise
)
def p_losses(self, x_start, t, cond, noise=None, nonpadding=None):
noise = default(noise, lambda: torch.randn_like(x_start))
x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
x_recon = self.denoise_fn(x_noisy, t, cond)
if self.loss_type == 'l1':
if nonpadding is not None:
loss = ((noise - x_recon).abs() * nonpadding.unsqueeze(1)).mean()
else:
# print('are you sure w/o nonpadding?')
loss = (noise - x_recon).abs().mean()
elif self.loss_type == 'l2':
loss = F.mse_loss(noise, x_recon)
else:
raise NotImplementedError()
return loss
def forward(self, txt_tokens, mel2ph=None, spk_embed=None,
ref_mels=None, f0=None, uv=None, energy=None, infer=False):
b, *_, device = *txt_tokens.shape, txt_tokens.device
ret = self.fs2(txt_tokens, mel2ph, spk_embed, ref_mels, f0, uv, energy,
skip_decoder=True, infer=infer)
cond = ret['decoder_inp'].transpose(1, 2)
if not infer:
t = torch.randint(0, self.num_timesteps, (b,), device=device).long()
x = ref_mels
x = self.norm_spec(x)
x = x.transpose(1, 2)[:, None, :, :] # [B, 1, M, T]
nonpadding = (mel2ph != 0).float()
ret['diff_loss'] = self.p_losses(x, t, cond, nonpadding=nonpadding)
else:
t = self.num_timesteps
shape = (cond.shape[0], 1, self.mel_bins, cond.shape[2])
x = torch.randn(shape, device=device)
for i in tqdm(reversed(range(0, t)), desc='sample time step', total=t):
x = self.p_sample(x, torch.full((b,), i, device=device, dtype=torch.long), cond)
x = x[:, 0].transpose(1, 2)
ret['mel_out'] = self.denorm_spec(x)
return ret
def norm_spec(self, x):
return (x - self.spec_min) / (self.spec_max - self.spec_min) * 2 - 1
def denorm_spec(self, x):
return (x + 1) / 2 * (self.spec_max - self.spec_min) + self.spec_min
def cwt2f0_norm(self, cwt_spec, mean, std, mel2ph):
return self.fs2.cwt2f0_norm(cwt_spec, mean, std, mel2ph)
def out2mel(self, x):
return x
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/modules/diff/diffusion.py |
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from math import sqrt
from .diffusion import Mish
from utils.hparams import hparams
Linear = nn.Linear
ConvTranspose2d = nn.ConvTranspose2d
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
def override(self, attrs):
if isinstance(attrs, dict):
self.__dict__.update(**attrs)
elif isinstance(attrs, (list, tuple, set)):
for attr in attrs:
self.override(attr)
elif attrs is not None:
raise NotImplementedError
return self
class SinusoidalPosEmb(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x):
device = x.device
half_dim = self.dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, device=device) * -emb)
emb = x[:, None] * emb[None, :]
emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
return emb
def Conv1d(*args, **kwargs):
layer = nn.Conv1d(*args, **kwargs)
nn.init.kaiming_normal_(layer.weight)
return layer
@torch.jit.script
def silu(x):
return x * torch.sigmoid(x)
class ResidualBlock(nn.Module):
def __init__(self, encoder_hidden, residual_channels, dilation):
super().__init__()
self.dilated_conv = Conv1d(residual_channels, 2 * residual_channels, 3, padding=dilation, dilation=dilation)
self.diffusion_projection = Linear(residual_channels, residual_channels)
self.conditioner_projection = Conv1d(encoder_hidden, 2 * residual_channels, 1)
self.output_projection = Conv1d(residual_channels, 2 * residual_channels, 1)
def forward(self, x, conditioner, diffusion_step):
diffusion_step = self.diffusion_projection(diffusion_step).unsqueeze(-1)
conditioner = self.conditioner_projection(conditioner)
y = x + diffusion_step
y = self.dilated_conv(y) + conditioner
gate, filter = torch.chunk(y, 2, dim=1)
y = torch.sigmoid(gate) * torch.tanh(filter)
y = self.output_projection(y)
residual, skip = torch.chunk(y, 2, dim=1)
return (x + residual) / sqrt(2.0), skip
class DiffNet(nn.Module):
def __init__(self, in_dims=80):
super().__init__()
self.params = params = AttrDict(
# Model params
encoder_hidden=hparams['hidden_size'],
residual_layers=hparams['residual_layers'],
residual_channels=hparams['residual_channels'],
dilation_cycle_length=hparams['dilation_cycle_length'],
)
self.input_projection = Conv1d(in_dims, params.residual_channels, 1)
self.diffusion_embedding = SinusoidalPosEmb(params.residual_channels)
dim = params.residual_channels
self.mlp = nn.Sequential(
nn.Linear(dim, dim * 4),
Mish(),
nn.Linear(dim * 4, dim)
)
self.residual_layers = nn.ModuleList([
ResidualBlock(params.encoder_hidden, params.residual_channels, 2 ** (i % params.dilation_cycle_length))
for i in range(params.residual_layers)
])
self.skip_projection = Conv1d(params.residual_channels, params.residual_channels, 1)
self.output_projection = Conv1d(params.residual_channels, in_dims, 1)
nn.init.zeros_(self.output_projection.weight)
def forward(self, spec, diffusion_step, cond):
"""
:param spec: [B, 1, M, T]
:param diffusion_step: [B, 1]
:param cond: [B, M, T]
:return:
"""
x = spec[:, 0]
x = self.input_projection(x) # x [B, residual_channel, T]
x = F.relu(x)
diffusion_step = self.diffusion_embedding(diffusion_step)
diffusion_step = self.mlp(diffusion_step)
skip = []
for layer_id, layer in enumerate(self.residual_layers):
x, skip_connection = layer(x, cond, diffusion_step)
skip.append(skip_connection)
x = torch.sum(torch.stack(skip), dim=0) / sqrt(len(self.residual_layers))
x = self.skip_projection(x)
x = F.relu(x)
x = self.output_projection(x) # [B, 80, T]
return x[:, None, :, :]
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/modules/diff/net.py |
import math
import random
from collections import deque
from functools import partial
from inspect import isfunction
from pathlib import Path
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from tqdm import tqdm
from einops import rearrange
from modules.fastspeech.fs2 import FastSpeech2
from modules.diffsinger_midi.fs2 import FastSpeech2MIDI
from utils.hparams import hparams
def exists(x):
return x is not None
def default(val, d):
if exists(val):
return val
return d() if isfunction(d) else d
# gaussian diffusion trainer class
def extract(a, t, x_shape):
b, *_ = t.shape
out = a.gather(-1, t)
return out.reshape(b, *((1,) * (len(x_shape) - 1)))
def noise_like(shape, device, repeat=False):
repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))
noise = lambda: torch.randn(shape, device=device)
return repeat_noise() if repeat else noise()
def linear_beta_schedule(timesteps, max_beta=hparams.get('max_beta', 0.01)):
"""
linear schedule
"""
betas = np.linspace(1e-4, max_beta, timesteps)
return betas
def cosine_beta_schedule(timesteps, s=0.008):
"""
cosine schedule
as proposed in https://openreview.net/forum?id=-NEXDKk8gZ
"""
steps = timesteps + 1
x = np.linspace(0, steps, steps)
alphas_cumprod = np.cos(((x / steps) + s) / (1 + s) * np.pi * 0.5) ** 2
alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])
return np.clip(betas, a_min=0, a_max=0.999)
beta_schedule = {
"cosine": cosine_beta_schedule,
"linear": linear_beta_schedule,
}
class GaussianDiffusion(nn.Module):
def __init__(self, phone_encoder, out_dims, denoise_fn,
timesteps=1000, K_step=1000, loss_type=hparams.get('diff_loss_type', 'l1'), betas=None, spec_min=None, spec_max=None):
super().__init__()
self.denoise_fn = denoise_fn
if hparams.get('use_midi') is not None and hparams['use_midi']:
self.fs2 = FastSpeech2MIDI(phone_encoder, out_dims)
else:
self.fs2 = FastSpeech2(phone_encoder, out_dims)
self.mel_bins = out_dims
if exists(betas):
betas = betas.detach().cpu().numpy() if isinstance(betas, torch.Tensor) else betas
else:
if 'schedule_type' in hparams.keys():
betas = beta_schedule[hparams['schedule_type']](timesteps)
else:
betas = cosine_beta_schedule(timesteps)
alphas = 1. - betas
alphas_cumprod = np.cumprod(alphas, axis=0)
alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])
timesteps, = betas.shape
self.num_timesteps = int(timesteps)
self.K_step = K_step
self.loss_type = loss_type
self.noise_list = deque(maxlen=4)
to_torch = partial(torch.tensor, dtype=torch.float32)
self.register_buffer('betas', to_torch(betas))
self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev))
# calculations for diffusion q(x_t | x_{t-1}) and others
self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod)))
self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod)))
self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1)))
# calculations for posterior q(x_{t-1} | x_t, x_0)
posterior_variance = betas * (1. - alphas_cumprod_prev) / (1. - alphas_cumprod)
# above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
self.register_buffer('posterior_variance', to_torch(posterior_variance))
# below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20))))
self.register_buffer('posterior_mean_coef1', to_torch(
betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod)))
self.register_buffer('posterior_mean_coef2', to_torch(
(1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod)))
self.register_buffer('spec_min', torch.FloatTensor(spec_min)[None, None, :hparams['keep_bins']])
self.register_buffer('spec_max', torch.FloatTensor(spec_max)[None, None, :hparams['keep_bins']])
def q_mean_variance(self, x_start, t):
mean = extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start
variance = extract(1. - self.alphas_cumprod, t, x_start.shape)
log_variance = extract(self.log_one_minus_alphas_cumprod, t, x_start.shape)
return mean, variance, log_variance
def predict_start_from_noise(self, x_t, t, noise):
return (
extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t -
extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise
)
def q_posterior(self, x_start, x_t, t):
posterior_mean = (
extract(self.posterior_mean_coef1, t, x_t.shape) * x_start +
extract(self.posterior_mean_coef2, t, x_t.shape) * x_t
)
posterior_variance = extract(self.posterior_variance, t, x_t.shape)
posterior_log_variance_clipped = extract(self.posterior_log_variance_clipped, t, x_t.shape)
return posterior_mean, posterior_variance, posterior_log_variance_clipped
def p_mean_variance(self, x, t, cond, clip_denoised: bool):
noise_pred = self.denoise_fn(x, t, cond=cond)
x_recon = self.predict_start_from_noise(x, t=t, noise=noise_pred)
if clip_denoised:
x_recon.clamp_(-1., 1.)
model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
return model_mean, posterior_variance, posterior_log_variance
@torch.no_grad()
def p_sample(self, x, t, cond, clip_denoised=True, repeat_noise=False):
b, *_, device = *x.shape, x.device
model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, cond=cond, clip_denoised=clip_denoised)
noise = noise_like(x.shape, device, repeat_noise)
# no noise when t == 0
nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
@torch.no_grad()
def p_sample_plms(self, x, t, interval, cond, clip_denoised=True, repeat_noise=False):
"""
Use the PLMS method from [Pseudo Numerical Methods for Diffusion Models on Manifolds](https://arxiv.org/abs/2202.09778).
"""
def get_x_pred(x, noise_t, t):
a_t = extract(self.alphas_cumprod, t, x.shape)
if t[0] < interval:
a_prev = torch.ones_like(a_t)
else:
a_prev = extract(self.alphas_cumprod, torch.max(t-interval, torch.zeros_like(t)), x.shape)
a_t_sq, a_prev_sq = a_t.sqrt(), a_prev.sqrt()
x_delta = (a_prev - a_t) * ((1 / (a_t_sq * (a_t_sq + a_prev_sq))) * x - 1 / (a_t_sq * (((1 - a_prev) * a_t).sqrt() + ((1 - a_t) * a_prev).sqrt())) * noise_t)
x_pred = x + x_delta
return x_pred
noise_list = self.noise_list
noise_pred = self.denoise_fn(x, t, cond=cond)
if len(noise_list) == 0:
x_pred = get_x_pred(x, noise_pred, t)
noise_pred_prev = self.denoise_fn(x_pred, max(t-interval, 0), cond=cond)
noise_pred_prime = (noise_pred + noise_pred_prev) / 2
elif len(noise_list) == 1:
noise_pred_prime = (3 * noise_pred - noise_list[-1]) / 2
elif len(noise_list) == 2:
noise_pred_prime = (23 * noise_pred - 16 * noise_list[-1] + 5 * noise_list[-2]) / 12
elif len(noise_list) >= 3:
noise_pred_prime = (55 * noise_pred - 59 * noise_list[-1] + 37 * noise_list[-2] - 9 * noise_list[-3]) / 24
x_prev = get_x_pred(x, noise_pred_prime, t)
noise_list.append(noise_pred)
return x_prev
def q_sample(self, x_start, t, noise=None):
noise = default(noise, lambda: torch.randn_like(x_start))
return (
extract(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
extract(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise
)
def p_losses(self, x_start, t, cond, noise=None, nonpadding=None):
noise = default(noise, lambda: torch.randn_like(x_start))
x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
x_recon = self.denoise_fn(x_noisy, t, cond)
if self.loss_type == 'l1':
if nonpadding is not None:
loss = ((noise - x_recon).abs() * nonpadding.unsqueeze(1)).mean()
else:
# print('are you sure w/o nonpadding?')
loss = (noise - x_recon).abs().mean()
elif self.loss_type == 'l2':
loss = F.mse_loss(noise, x_recon)
else:
raise NotImplementedError()
return loss
def forward(self, txt_tokens, mel2ph=None, spk_embed=None,
ref_mels=None, f0=None, uv=None, energy=None, infer=False, **kwargs):
b, *_, device = *txt_tokens.shape, txt_tokens.device
ret = self.fs2(txt_tokens, mel2ph, spk_embed, ref_mels, f0, uv, energy,
skip_decoder=(not infer), infer=infer, **kwargs)
cond = ret['decoder_inp'].transpose(1, 2)
if not infer:
t = torch.randint(0, self.K_step, (b,), device=device).long()
x = ref_mels
x = self.norm_spec(x)
x = x.transpose(1, 2)[:, None, :, :] # [B, 1, M, T]
ret['diff_loss'] = self.p_losses(x, t, cond)
# nonpadding = (mel2ph != 0).float()
# ret['diff_loss'] = self.p_losses(x, t, cond, nonpadding=nonpadding)
else:
ret['fs2_mel'] = ret['mel_out']
fs2_mels = ret['mel_out']
t = self.K_step
fs2_mels = self.norm_spec(fs2_mels)
fs2_mels = fs2_mels.transpose(1, 2)[:, None, :, :]
x = self.q_sample(x_start=fs2_mels, t=torch.tensor([t - 1], device=device).long())
if hparams.get('gaussian_start') is not None and hparams['gaussian_start']:
print('===> gaussion start.')
shape = (cond.shape[0], 1, self.mel_bins, cond.shape[2])
x = torch.randn(shape, device=device)
if hparams.get('pndm_speedup'):
print('===> pndm speed:', hparams['pndm_speedup'])
self.noise_list = deque(maxlen=4)
iteration_interval = hparams['pndm_speedup']
for i in tqdm(reversed(range(0, t, iteration_interval)), desc='sample time step',
total=t // iteration_interval):
x = self.p_sample_plms(x, torch.full((b,), i, device=device, dtype=torch.long), iteration_interval,
cond)
else:
for i in tqdm(reversed(range(0, t)), desc='sample time step', total=t):
x = self.p_sample(x, torch.full((b,), i, device=device, dtype=torch.long), cond)
x = x[:, 0].transpose(1, 2)
if mel2ph is not None: # for singing
ret['mel_out'] = self.denorm_spec(x) * ((mel2ph > 0).float()[:, :, None])
else:
ret['mel_out'] = self.denorm_spec(x)
return ret
def norm_spec(self, x):
return (x - self.spec_min) / (self.spec_max - self.spec_min) * 2 - 1
def denorm_spec(self, x):
return (x + 1) / 2 * (self.spec_max - self.spec_min) + self.spec_min
def cwt2f0_norm(self, cwt_spec, mean, std, mel2ph):
return self.fs2.cwt2f0_norm(cwt_spec, mean, std, mel2ph)
def out2mel(self, x):
return x
class OfflineGaussianDiffusion(GaussianDiffusion):
def forward(self, txt_tokens, mel2ph=None, spk_embed=None,
ref_mels=None, f0=None, uv=None, energy=None, infer=False, **kwargs):
b, *_, device = *txt_tokens.shape, txt_tokens.device
ret = self.fs2(txt_tokens, mel2ph, spk_embed, ref_mels, f0, uv, energy,
skip_decoder=True, infer=True, **kwargs)
cond = ret['decoder_inp'].transpose(1, 2)
fs2_mels = ref_mels[1]
ref_mels = ref_mels[0]
if not infer:
t = torch.randint(0, self.K_step, (b,), device=device).long()
x = ref_mels
x = self.norm_spec(x)
x = x.transpose(1, 2)[:, None, :, :] # [B, 1, M, T]
ret['diff_loss'] = self.p_losses(x, t, cond)
else:
t = self.K_step
fs2_mels = self.norm_spec(fs2_mels)
fs2_mels = fs2_mels.transpose(1, 2)[:, None, :, :]
x = self.q_sample(x_start=fs2_mels, t=torch.tensor([t - 1], device=device).long())
if hparams.get('gaussian_start') is not None and hparams['gaussian_start']:
print('===> gaussion start.')
shape = (cond.shape[0], 1, self.mel_bins, cond.shape[2])
x = torch.randn(shape, device=device)
for i in tqdm(reversed(range(0, t)), desc='sample time step', total=t):
x = self.p_sample(x, torch.full((b,), i, device=device, dtype=torch.long), cond)
x = x[:, 0].transpose(1, 2)
ret['mel_out'] = self.denorm_spec(x)
return ret
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/modules/diff/shallow_diffusion_tts.py |
from modules.fastspeech.tts_modules import FastspeechDecoder
# from modules.fastspeech.fast_tacotron import DecoderRNN
# from modules.fastspeech.speedy_speech.speedy_speech import ConvBlocks
# from modules.fastspeech.conformer.conformer import ConformerDecoder
import torch
from torch.nn import functional as F
import torch.nn as nn
import math
from utils.hparams import hparams
from .diffusion import Mish
Linear = nn.Linear
class SinusoidalPosEmb(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x):
device = x.device
half_dim = self.dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, device=device) * -emb)
emb = x[:, None] * emb[None, :]
emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
return emb
def Conv1d(*args, **kwargs):
layer = nn.Conv1d(*args, **kwargs)
nn.init.kaiming_normal_(layer.weight)
return layer
class FFT(FastspeechDecoder):
def __init__(self, hidden_size=None, num_layers=None, kernel_size=None, num_heads=None):
super().__init__(hidden_size, num_layers, kernel_size, num_heads=num_heads)
dim = hparams['residual_channels']
self.input_projection = Conv1d(hparams['audio_num_mel_bins'], dim, 1)
self.diffusion_embedding = SinusoidalPosEmb(dim)
self.mlp = nn.Sequential(
nn.Linear(dim, dim * 4),
Mish(),
nn.Linear(dim * 4, dim)
)
self.get_mel_out = Linear(hparams['hidden_size'], 80, bias=True)
self.get_decode_inp = Linear(hparams['hidden_size'] + dim + dim,
hparams['hidden_size']) # hs + dim + 80 -> hs
def forward(self, spec, diffusion_step, cond, padding_mask=None, attn_mask=None, return_hiddens=False):
"""
:param spec: [B, 1, 80, T]
:param diffusion_step: [B, 1]
:param cond: [B, M, T]
:return:
"""
x = spec[:, 0]
x = self.input_projection(x).permute([0, 2, 1]) # [B, T, residual_channel]
diffusion_step = self.diffusion_embedding(diffusion_step)
diffusion_step = self.mlp(diffusion_step) # [B, dim]
cond = cond.permute([0, 2, 1]) # [B, T, M]
seq_len = cond.shape[1] # [T_mel]
time_embed = diffusion_step[:, None, :] # [B, 1, dim]
time_embed = time_embed.repeat([1, seq_len, 1]) # # [B, T, dim]
decoder_inp = torch.cat([x, cond, time_embed], dim=-1) # [B, T, dim + H + dim]
decoder_inp = self.get_decode_inp(decoder_inp) # [B, T, H]
x = decoder_inp
'''
Required x: [B, T, C]
:return: [B, T, C] or [L, B, T, C]
'''
padding_mask = x.abs().sum(-1).eq(0).data if padding_mask is None else padding_mask
nonpadding_mask_TB = 1 - padding_mask.transpose(0, 1).float()[:, :, None] # [T, B, 1]
if self.use_pos_embed:
positions = self.pos_embed_alpha * self.embed_positions(x[..., 0])
x = x + positions
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1) * nonpadding_mask_TB
hiddens = []
for layer in self.layers:
x = layer(x, encoder_padding_mask=padding_mask, attn_mask=attn_mask) * nonpadding_mask_TB
hiddens.append(x)
if self.use_last_norm:
x = self.layer_norm(x) * nonpadding_mask_TB
if return_hiddens:
x = torch.stack(hiddens, 0) # [L, T, B, C]
x = x.transpose(1, 2) # [L, B, T, C]
else:
x = x.transpose(0, 1) # [B, T, C]
x = self.get_mel_out(x).permute([0, 2, 1]) # [B, 80, T]
return x[:, None, :, :] | EXA-1-master | exa/models/AudioGPT/NeuralSeq/modules/diff/candidate_decoder.py |
import matplotlib
matplotlib.use('Agg')
from data_gen.tts.data_gen_utils import get_pitch
from modules.fastspeech.tts_modules import mel2ph_to_dur
import matplotlib.pyplot as plt
from utils import audio
from utils.pitch_utils import norm_interp_f0, denorm_f0, f0_to_coarse
from vocoders.base_vocoder import get_vocoder_cls
import json
from utils.plot import spec_to_figure
from utils.hparams import hparams
import torch
import torch.optim
import torch.nn.functional as F
import torch.utils.data
from modules.GenerSpeech.task.dataset import GenerSpeech_dataset
from modules.GenerSpeech.model.generspeech import GenerSpeech
import torch.distributions
import numpy as np
from utils.tts_utils import select_attn
import utils
import os
from tasks.tts.fs2 import FastSpeech2Task
class GenerSpeechTask(FastSpeech2Task):
def __init__(self):
super(GenerSpeechTask, self).__init__()
self.dataset_cls = GenerSpeech_dataset
def build_tts_model(self):
self.model = GenerSpeech(self.phone_encoder)
def build_model(self):
self.build_tts_model()
if hparams['load_ckpt'] != '':
self.load_ckpt(hparams['load_ckpt'], strict=False)
utils.num_params(self.model)
return self.model
def run_model(self, model, sample, return_output=False):
txt_tokens = sample['txt_tokens'] # [B, T_t]
target = sample['mels'] # [B, T_s, 80]
mel2ph = sample['mel2ph'] # [B, T_s]
mel2word = sample['mel2word']
f0 = sample['f0'] # [B, T_s]
uv = sample['uv'] # [B, T_s] 0/1
spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids')
emo_embed = sample.get('emo_embed')
output = model(txt_tokens, mel2ph=mel2ph, ref_mel2ph=mel2ph, ref_mel2word=mel2word, spk_embed=spk_embed, emo_embed=emo_embed,
ref_mels=target, f0=f0, uv=uv, tgt_mels=target, global_steps=self.global_step, infer=False)
losses = {}
losses['postflow'] = output['postflow']
if self.global_step > hparams['forcing']:
losses['gloss'] = (output['gloss_utter'] + output['gloss_ph'] + output['gloss_word']) / 3
if self.global_step > hparams['vq_start']:
losses['vq_loss'] = (output['vq_loss_utter'] + output['vq_loss_ph'] + output['vq_loss_word']) / 3
losses['ppl_utter'] = output['ppl_utter']
losses['ppl_ph'] = output['ppl_ph']
losses['ppl_word'] = output['ppl_word']
self.add_mel_loss(output['mel_out'], target, losses)
self.add_dur_loss(output['dur'], mel2ph, txt_tokens, losses=losses)
if hparams['use_pitch_embed']:
self.add_pitch_loss(output, sample, losses)
output['select_attn'] = select_attn(output['attn_ph'])
if not return_output:
return losses
else:
return losses, output
def validation_step(self, sample, batch_idx):
outputs = {}
outputs['losses'] = {}
outputs['losses'], model_out = self.run_model(self.model, sample, return_output=True)
outputs['total_loss'] = sum(outputs['losses'].values())
outputs['nsamples'] = sample['nsamples']
encdec_attn = model_out['select_attn']
mel_out = self.model.out2mel(model_out['mel_out'])
outputs = utils.tensors_to_scalars(outputs)
if self.global_step % hparams['valid_infer_interval'] == 0 \
and batch_idx < hparams['num_valid_plots']:
vmin = hparams['mel_vmin']
vmax = hparams['mel_vmax']
self.plot_mel(batch_idx, sample['mels'], mel_out)
self.plot_dur(batch_idx, sample, model_out)
if hparams['use_pitch_embed']:
self.plot_pitch(batch_idx, sample, model_out)
if self.vocoder is None:
self.vocoder = get_vocoder_cls(hparams)()
if self.global_step > 0:
spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids')
emo_embed = sample.get('emo_embed')
ref_mels = sample['mels']
mel2ph = sample['mel2ph'] # [B, T_s]
mel2word = sample['mel2word']
# with gt duration
model_out = self.model(sample['txt_tokens'], mel2ph=mel2ph, ref_mel2ph=mel2ph, ref_mel2word=mel2word, spk_embed=spk_embed,
emo_embed=emo_embed, ref_mels=ref_mels, global_steps=self.global_step, infer=True)
wav_pred = self.vocoder.spec2wav(model_out['mel_out'][0].cpu())
self.logger.add_audio(f'wav_gtdur_{batch_idx}', wav_pred, self.global_step,
hparams['audio_sample_rate'])
self.logger.add_figure(f'ali_{batch_idx}', spec_to_figure(encdec_attn[0]), self.global_step)
self.logger.add_figure(
f'mel_gtdur_{batch_idx}',
spec_to_figure(model_out['mel_out'][0], vmin, vmax), self.global_step)
# with pred duration
model_out = self.model(sample['txt_tokens'], ref_mel2ph=mel2ph, ref_mel2word=mel2word, spk_embed=spk_embed, emo_embed=emo_embed, ref_mels=ref_mels,
global_steps=self.global_step, infer=True)
self.logger.add_figure(
f'mel_{batch_idx}',
spec_to_figure(model_out['mel_out'][0], vmin, vmax), self.global_step)
wav_pred = self.vocoder.spec2wav(model_out['mel_out'][0].cpu())
self.logger.add_audio(f'wav_{batch_idx}', wav_pred, self.global_step, hparams['audio_sample_rate'])
# gt wav
if self.global_step <= hparams['valid_infer_interval']:
mel_gt = sample['mels'][0].cpu()
wav_gt = self.vocoder.spec2wav(mel_gt)
self.logger.add_audio(f'wav_gt_{batch_idx}', wav_gt, self.global_step, 22050)
return outputs
############
# infer
############
def test_step(self, sample, batch_idx):
spk_embed = sample.get('spk_embed') if not hparams['use_spk_id'] else sample.get('spk_ids')
emo_embed = sample.get('emo_embed')
txt_tokens = sample['txt_tokens']
mel2ph, uv, f0 = None, None, None
ref_mel2word = sample['mel2word']
ref_mel2ph = sample['mel2ph']
ref_mels = sample['mels']
if hparams['use_gt_dur']:
mel2ph = sample['mel2ph']
if hparams['use_gt_f0']:
f0 = sample['f0']
uv = sample['uv']
global_steps = 200000
run_model = lambda: self.model(
txt_tokens, spk_embed=spk_embed, emo_embed=emo_embed, mel2ph=mel2ph, ref_mel2ph=ref_mel2ph, ref_mel2word=ref_mel2word,
f0=f0, uv=uv, ref_mels=ref_mels, global_steps=global_steps, infer=True)
outputs = run_model()
sample['outputs'] = self.model.out2mel(outputs['mel_out'])
sample['mel2ph_pred'] = outputs['mel2ph']
if hparams['use_pitch_embed']:
sample['f0'] = denorm_f0(sample['f0'], sample['uv'], hparams)
if hparams['pitch_type'] == 'ph':
sample['f0'] = torch.gather(F.pad(sample['f0'], [1, 0]), 1, sample['mel2ph'])
sample['f0_pred'] = outputs.get('f0_denorm')
return self.after_infer(sample)
def after_infer(self, predictions, sil_start_frame=0):
predictions = utils.unpack_dict_to_list(predictions)
assert len(predictions) == 1, 'Only support batch_size=1 in inference.'
prediction = predictions[0]
prediction = utils.tensors_to_np(prediction)
item_name = prediction.get('item_name')
text = prediction.get('text')
ph_tokens = prediction.get('txt_tokens')
mel_gt = prediction["mels"]
mel2ph_gt = prediction.get("mel2ph")
mel2ph_gt = mel2ph_gt if mel2ph_gt is not None else None
mel_pred = prediction["outputs"]
mel2ph_pred = prediction.get("mel2ph_pred")
f0_gt = prediction.get("f0")
f0_pred = prediction.get("f0_pred")
str_phs = None
if self.phone_encoder is not None and 'txt_tokens' in prediction:
str_phs = self.phone_encoder.decode(prediction['txt_tokens'], strip_padding=True)
if 'encdec_attn' in prediction:
encdec_attn = prediction['encdec_attn'] # (1, Tph, Tmel)
encdec_attn = encdec_attn[encdec_attn.max(-1).sum(-1).argmax(-1)]
txt_lengths = prediction.get('txt_lengths')
encdec_attn = encdec_attn.T[:, :txt_lengths]
else:
encdec_attn = None
wav_pred = self.vocoder.spec2wav(mel_pred, f0=f0_pred)
wav_pred[:sil_start_frame * hparams['hop_size']] = 0
gen_dir = self.gen_dir
base_fn = f'[{self.results_id:06d}][{item_name}][%s]'
# if text is not None:
# base_fn += text.replace(":", "%3A")[:80]
base_fn = base_fn.replace(' ', '_')
if not hparams['profile_infer']:
os.makedirs(gen_dir, exist_ok=True)
os.makedirs(f'{gen_dir}/wavs', exist_ok=True)
os.makedirs(f'{gen_dir}/plot', exist_ok=True)
if hparams.get('save_mel_npy', False):
os.makedirs(f'{gen_dir}/npy', exist_ok=True)
if 'encdec_attn' in prediction:
os.makedirs(f'{gen_dir}/attn_plot', exist_ok=True)
self.saving_results_futures.append(
self.saving_result_pool.apply_async(self.save_result, args=[
wav_pred, mel_pred, base_fn % 'TTS', gen_dir, str_phs, mel2ph_pred, encdec_attn]))
if mel_gt is not None and hparams['save_gt']:
wav_gt = self.vocoder.spec2wav(mel_gt, f0=f0_gt)
self.saving_results_futures.append(
self.saving_result_pool.apply_async(self.save_result, args=[
wav_gt, mel_gt, base_fn % 'Ref', gen_dir, str_phs, mel2ph_gt]))
if hparams['save_f0']:
import matplotlib.pyplot as plt
f0_pred_, _ = get_pitch(wav_pred, mel_pred, hparams)
f0_gt_, _ = get_pitch(wav_gt, mel_gt, hparams)
fig = plt.figure()
plt.plot(f0_pred_, label=r'$\hat{f_0}$')
plt.plot(f0_gt_, label=r'$f_0$')
plt.legend()
plt.tight_layout()
plt.savefig(f'{gen_dir}/plot/[F0][{item_name}]{text}.png', format='png')
plt.close(fig)
print(f"Pred_shape: {mel_pred.shape}, gt_shape: {mel_gt.shape}")
self.results_id += 1
return {
'item_name': item_name,
'text': text,
'ph_tokens': self.phone_encoder.decode(ph_tokens.tolist()),
'wav_fn_pred': base_fn % 'TTS',
'wav_fn_gt': base_fn % 'Ref',
}
@staticmethod
def save_result(wav_out, mel, base_fn, gen_dir, str_phs=None, mel2ph=None, alignment=None):
audio.save_wav(wav_out, f'{gen_dir}/wavs/{base_fn}.wav', hparams['audio_sample_rate'],
norm=hparams['out_wav_norm'])
fig = plt.figure(figsize=(14, 10))
spec_vmin = hparams['mel_vmin']
spec_vmax = hparams['mel_vmax']
heatmap = plt.pcolor(mel.T, vmin=spec_vmin, vmax=spec_vmax)
fig.colorbar(heatmap)
f0, _ = get_pitch(wav_out, mel, hparams)
f0 = f0 / 10 * (f0 > 0)
plt.plot(f0, c='white', linewidth=1, alpha=0.6)
if mel2ph is not None and str_phs is not None:
decoded_txt = str_phs.split(" ")
dur = mel2ph_to_dur(torch.LongTensor(mel2ph)[None, :], len(decoded_txt))[0].numpy()
dur = [0] + list(np.cumsum(dur))
for i in range(len(dur) - 1):
shift = (i % 20) + 1
plt.text(dur[i], shift, decoded_txt[i])
plt.hlines(shift, dur[i], dur[i + 1], colors='b' if decoded_txt[i] != '|' else 'black')
plt.vlines(dur[i], 0, 5, colors='b' if decoded_txt[i] != '|' else 'black',
alpha=1, linewidth=1)
plt.tight_layout()
plt.savefig(f'{gen_dir}/plot/{base_fn}.png', format='png')
plt.close(fig)
if hparams.get('save_mel_npy', False):
np.save(f'{gen_dir}/npy/{base_fn}', mel)
if alignment is not None:
fig, ax = plt.subplots(figsize=(12, 16))
im = ax.imshow(alignment, aspect='auto', origin='lower',
interpolation='none')
ax.set_xticks(np.arange(0, alignment.shape[1], 5))
ax.set_yticks(np.arange(0, alignment.shape[0], 10))
ax.set_ylabel("$S_p$ index")
ax.set_xlabel("$H_c$ index")
fig.colorbar(im, ax=ax)
fig.savefig(f'{gen_dir}/attn_plot/{base_fn}_attn.png', format='png')
plt.close(fig)
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/modules/GenerSpeech/task/generspeech.py |
import matplotlib
matplotlib.use('Agg')
from tasks.base_task import data_loader
from tasks.tts.fs2 import FastSpeech2Task
from tasks.tts.dataset_utils import FastSpeechDataset, BaseTTSDataset
import glob
import importlib
from utils.pitch_utils import norm_interp_f0, denorm_f0, f0_to_coarse
from inference.base_tts_infer import load_data_preprocessor
from data_gen.tts.emotion import inference as EmotionEncoder
from data_gen.tts.emotion.inference import embed_utterance as Embed_utterance
from data_gen.tts.emotion.inference import preprocess_wav
from tqdm import tqdm
from utils.hparams import hparams
from data_gen.tts.data_gen_utils import build_phone_encoder, build_word_encoder
import random
import torch
import torch.optim
import torch.nn.functional as F
import torch.utils.data
from utils.indexed_datasets import IndexedDataset
from resemblyzer import VoiceEncoder
import torch.distributions
import numpy as np
import utils
import os
class GenerSpeech_dataset(BaseTTSDataset):
def __init__(self, prefix, shuffle=False, test_items=None, test_sizes=None, data_dir=None):
super().__init__(prefix, shuffle, test_items, test_sizes, data_dir)
self.f0_mean, self.f0_std = hparams.get('f0_mean', None), hparams.get('f0_std', None)
if prefix == 'valid':
indexed_ds = IndexedDataset(f'{self.data_dir}/train')
sizes = np.load(f'{self.data_dir}/train_lengths.npy')
index = [i for i in range(len(indexed_ds))]
random.shuffle(index)
index = index[:300]
self.sizes = sizes[index]
self.indexed_ds = []
for i in index:
self.indexed_ds.append(indexed_ds[i])
self.avail_idxs = list(range(len(self.sizes)))
if hparams['min_frames'] > 0:
self.avail_idxs = [x for x in self.avail_idxs if self.sizes[x] >= hparams['min_frames']]
self.sizes = [self.sizes[i] for i in self.avail_idxs]
if prefix == 'test' and hparams['test_input_dir'] != '':
self.preprocessor, self.preprocess_args = load_data_preprocessor()
self.indexed_ds, self.sizes = self.load_test_inputs(hparams['test_input_dir'])
self.avail_idxs = [i for i, _ in enumerate(self.sizes)]
def load_test_inputs(self, test_input_dir):
inp_wav_paths = sorted(glob.glob(f'{test_input_dir}/*.wav') + glob.glob(f'{test_input_dir}/*.mp3'))
binarizer_cls = hparams.get("binarizer_cls", 'data_gen.tts.base_binarizerr.BaseBinarizer')
pkg = ".".join(binarizer_cls.split(".")[:-1])
cls_name = binarizer_cls.split(".")[-1]
binarizer_cls = getattr(importlib.import_module(pkg), cls_name)
phone_encoder = build_phone_encoder(hparams['binary_data_dir'])
word_encoder = build_word_encoder(hparams['binary_data_dir'])
voice_encoder = VoiceEncoder().cuda()
encoder = [phone_encoder, word_encoder]
sizes = []
items = []
EmotionEncoder.load_model(hparams['emotion_encoder_path'])
preprocessor, preprocess_args = self.preprocessor, self.preprocess_args
for wav_fn in tqdm(inp_wav_paths):
item_name = wav_fn[len(test_input_dir) + 1:].replace("/", "_")
spk_id = emotion = 0
item2tgfn = wav_fn.replace('.wav', '.TextGrid') # prepare textgrid alignment
txtpath = wav_fn.replace('.wav', '.txt') # prepare text
with open(txtpath, 'r') as f:
text_raw = f.readlines()
f.close()
ph, txt = preprocessor.txt_to_ph(preprocessor.txt_processor, text_raw[0], preprocess_args)
item = binarizer_cls.process_item(item_name, ph, txt, item2tgfn, wav_fn, spk_id, emotion, encoder, hparams['binarization_args'])
item['emo_embed'] = Embed_utterance(preprocess_wav(item['wav_fn']))
item['spk_embed'] = voice_encoder.embed_utterance(item['wav'])
items.append(item)
sizes.append(item['len'])
return items, sizes
def _get_item(self, index):
if hasattr(self, 'avail_idxs') and self.avail_idxs is not None:
index = self.avail_idxs[index]
if self.indexed_ds is None:
self.indexed_ds = IndexedDataset(f'{self.data_dir}/{self.prefix}')
return self.indexed_ds[index]
def __getitem__(self, index):
hparams = self.hparams
item = self._get_item(index)
assert len(item['mel']) == self.sizes[index], (len(item['mel']), self.sizes[index])
max_frames = hparams['max_frames']
spec = torch.Tensor(item['mel'])[:max_frames]
max_frames = spec.shape[0] // hparams['frames_multiple'] * hparams['frames_multiple']
spec = spec[:max_frames]
phone = torch.LongTensor(item['phone'][:hparams['max_input_tokens']])
sample = {
"id": index,
"item_name": item['item_name'],
"text": item['txt'],
"txt_token": phone,
"mel": spec,
"mel_nonpadding": spec.abs().sum(-1) > 0,
}
spec = sample['mel']
T = spec.shape[0]
sample['mel2ph'] = mel2ph = torch.LongTensor(item['mel2ph'])[:T] if 'mel2ph' in item else None
if hparams['use_pitch_embed']:
assert 'f0' in item
if hparams.get('normalize_pitch', False):
f0 = item["f0"]
if len(f0 > 0) > 0 and f0[f0 > 0].std() > 0:
f0[f0 > 0] = (f0[f0 > 0] - f0[f0 > 0].mean()) / f0[f0 > 0].std() * hparams['f0_std'] + \
hparams['f0_mean']
f0[f0 > 0] = f0[f0 > 0].clip(min=60, max=500)
pitch = f0_to_coarse(f0)
pitch = torch.LongTensor(pitch[:max_frames])
else:
pitch = torch.LongTensor(item.get("pitch"))[:max_frames] if "pitch" in item else None
f0, uv = norm_interp_f0(item["f0"][:max_frames], hparams)
uv = torch.FloatTensor(uv)
f0 = torch.FloatTensor(f0)
else:
f0 = uv = torch.zeros_like(mel2ph)
pitch = None
sample["f0"], sample["uv"], sample["pitch"] = f0, uv, pitch
sample["spk_embed"] = torch.Tensor(item['spk_embed'])
sample["emotion"] = item['emotion']
sample["emo_embed"] = torch.Tensor(item['emo_embed'])
if hparams.get('use_word', False):
sample["ph_words"] = item["ph_words"]
sample["word_tokens"] = torch.LongTensor(item["word_tokens"])
sample["mel2word"] = torch.LongTensor(item.get("mel2word"))[:max_frames]
sample["ph2word"] = torch.LongTensor(item['ph2word'][:hparams['max_input_tokens']])
return sample
def collater(self, samples):
if len(samples) == 0:
return {}
hparams = self.hparams
id = torch.LongTensor([s['id'] for s in samples])
item_names = [s['item_name'] for s in samples]
text = [s['text'] for s in samples]
txt_tokens = utils.collate_1d([s['txt_token'] for s in samples], 0)
mels = utils.collate_2d([s['mel'] for s in samples], 0.0)
txt_lengths = torch.LongTensor([s['txt_token'].numel() for s in samples])
mel_lengths = torch.LongTensor([s['mel'].shape[0] for s in samples])
batch = {
'id': id,
'item_name': item_names,
'nsamples': len(samples),
'text': text,
'txt_tokens': txt_tokens,
'txt_lengths': txt_lengths,
'mels': mels,
'mel_lengths': mel_lengths,
}
f0 = utils.collate_1d([s['f0'] for s in samples], 0.0)
pitch = utils.collate_1d([s['pitch'] for s in samples]) if samples[0]['pitch'] is not None else None
uv = utils.collate_1d([s['uv'] for s in samples])
mel2ph = utils.collate_1d([s['mel2ph'] for s in samples], 0.0) if samples[0]['mel2ph'] is not None else None
batch.update({
'mel2ph': mel2ph,
'pitch': pitch,
'f0': f0,
'uv': uv,
})
spk_embed = torch.stack([s['spk_embed'] for s in samples])
batch['spk_embed'] = spk_embed
emo_embed = torch.stack([s['emo_embed'] for s in samples])
batch['emo_embed'] = emo_embed
if hparams.get('use_word', False):
ph_words = [s['ph_words'] for s in samples]
batch['ph_words'] = ph_words
word_tokens = utils.collate_1d([s['word_tokens'] for s in samples], 0)
batch['word_tokens'] = word_tokens
mel2word = utils.collate_1d([s['mel2word'] for s in samples], 0)
batch['mel2word'] = mel2word
ph2word = utils.collate_1d([s['ph2word'] for s in samples], 0)
batch['ph2word'] = ph2word
return batch | EXA-1-master | exa/models/AudioGPT/NeuralSeq/modules/GenerSpeech/task/dataset.py |
import scipy
from torch.nn import functional as F
import torch
from torch import nn
import numpy as np
from modules.commons.common_layers import Permute
from modules.fastspeech.tts_modules import FFTBlocks
from modules.GenerSpeech.model.wavenet import fused_add_tanh_sigmoid_multiply, WN
class LayerNorm(nn.Module):
def __init__(self, channels, eps=1e-4):
super().__init__()
self.channels = channels
self.eps = eps
self.gamma = nn.Parameter(torch.ones(channels))
self.beta = nn.Parameter(torch.zeros(channels))
def forward(self, x):
n_dims = len(x.shape)
mean = torch.mean(x, 1, keepdim=True)
variance = torch.mean((x - mean) ** 2, 1, keepdim=True)
x = (x - mean) * torch.rsqrt(variance + self.eps)
shape = [1, -1] + [1] * (n_dims - 2)
x = x * self.gamma.view(*shape) + self.beta.view(*shape)
return x
class ConvReluNorm(nn.Module):
def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout):
super().__init__()
self.in_channels = in_channels
self.hidden_channels = hidden_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.n_layers = n_layers
self.p_dropout = p_dropout
assert n_layers > 1, "Number of layers should be larger than 0."
self.conv_layers = nn.ModuleList()
self.norm_layers = nn.ModuleList()
self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size // 2))
self.norm_layers.append(LayerNorm(hidden_channels))
self.relu_drop = nn.Sequential(
nn.ReLU(),
nn.Dropout(p_dropout))
for _ in range(n_layers - 1):
self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size // 2))
self.norm_layers.append(LayerNorm(hidden_channels))
self.proj = nn.Conv1d(hidden_channels, out_channels, 1)
self.proj.weight.data.zero_()
self.proj.bias.data.zero_()
def forward(self, x, x_mask):
x_org = x
for i in range(self.n_layers):
x = self.conv_layers[i](x * x_mask)
x = self.norm_layers[i](x)
x = self.relu_drop(x)
x = x_org + self.proj(x)
return x * x_mask
class ActNorm(nn.Module): # glow中的线性变换层
def __init__(self, channels, ddi=False, **kwargs):
super().__init__()
self.channels = channels
self.initialized = not ddi
self.logs = nn.Parameter(torch.zeros(1, channels, 1))
self.bias = nn.Parameter(torch.zeros(1, channels, 1))
def forward(self, x, x_mask=None, reverse=False, **kwargs):
if x_mask is None:
x_mask = torch.ones(x.size(0), 1, x.size(2)).to(device=x.device, dtype=x.dtype)
x_len = torch.sum(x_mask, [1, 2])
if not self.initialized:
self.initialize(x, x_mask)
self.initialized = True
if reverse:
z = (x - self.bias) * torch.exp(-self.logs) * x_mask
logdet = torch.sum(-self.logs) * x_len
else:
z = (self.bias + torch.exp(self.logs) * x) * x_mask
logdet = torch.sum(self.logs) * x_len # [b]
return z, logdet
def store_inverse(self):
pass
def set_ddi(self, ddi):
self.initialized = not ddi
def initialize(self, x, x_mask):
with torch.no_grad():
denom = torch.sum(x_mask, [0, 2])
m = torch.sum(x * x_mask, [0, 2]) / denom
m_sq = torch.sum(x * x * x_mask, [0, 2]) / denom
v = m_sq - (m ** 2)
logs = 0.5 * torch.log(torch.clamp_min(v, 1e-6))
bias_init = (-m * torch.exp(-logs)).view(*self.bias.shape).to(dtype=self.bias.dtype)
logs_init = (-logs).view(*self.logs.shape).to(dtype=self.logs.dtype)
self.bias.data.copy_(bias_init)
self.logs.data.copy_(logs_init)
class InvConvNear(nn.Module): # 可逆卷积
def __init__(self, channels, n_split=4, no_jacobian=False, lu=True, n_sqz=2, **kwargs):
super().__init__()
assert (n_split % 2 == 0)
self.channels = channels
self.n_split = n_split
self.n_sqz = n_sqz
self.no_jacobian = no_jacobian
w_init = torch.qr(torch.FloatTensor(self.n_split, self.n_split).normal_())[0]
if torch.det(w_init) < 0:
w_init[:, 0] = -1 * w_init[:, 0]
self.lu = lu
if lu:
# LU decomposition can slightly speed up the inverse
np_p, np_l, np_u = scipy.linalg.lu(w_init)
np_s = np.diag(np_u)
np_sign_s = np.sign(np_s)
np_log_s = np.log(np.abs(np_s))
np_u = np.triu(np_u, k=1)
l_mask = np.tril(np.ones(w_init.shape, dtype=float), -1)
eye = np.eye(*w_init.shape, dtype=float)
self.register_buffer('p', torch.Tensor(np_p.astype(float)))
self.register_buffer('sign_s', torch.Tensor(np_sign_s.astype(float)))
self.l = nn.Parameter(torch.Tensor(np_l.astype(float)), requires_grad=True)
self.log_s = nn.Parameter(torch.Tensor(np_log_s.astype(float)), requires_grad=True)
self.u = nn.Parameter(torch.Tensor(np_u.astype(float)), requires_grad=True)
self.register_buffer('l_mask', torch.Tensor(l_mask))
self.register_buffer('eye', torch.Tensor(eye))
else:
self.weight = nn.Parameter(w_init)
def forward(self, x, x_mask=None, reverse=False, **kwargs):
b, c, t = x.size()
assert (c % self.n_split == 0)
if x_mask is None:
x_mask = 1
x_len = torch.ones((b,), dtype=x.dtype, device=x.device) * t
else:
x_len = torch.sum(x_mask, [1, 2])
x = x.view(b, self.n_sqz, c // self.n_split, self.n_split // self.n_sqz, t)
x = x.permute(0, 1, 3, 2, 4).contiguous().view(b, self.n_split, c // self.n_split, t)
if self.lu:
self.weight, log_s = self._get_weight()
logdet = log_s.sum()
logdet = logdet * (c / self.n_split) * x_len
else:
logdet = torch.logdet(self.weight) * (c / self.n_split) * x_len # [b]
if reverse:
if hasattr(self, "weight_inv"):
weight = self.weight_inv
else:
weight = torch.inverse(self.weight.float()).to(dtype=self.weight.dtype)
logdet = -logdet
else:
weight = self.weight
if self.no_jacobian:
logdet = 0
weight = weight.view(self.n_split, self.n_split, 1, 1)
z = F.conv2d(x, weight)
z = z.view(b, self.n_sqz, self.n_split // self.n_sqz, c // self.n_split, t)
z = z.permute(0, 1, 3, 2, 4).contiguous().view(b, c, t) * x_mask
return z, logdet
def _get_weight(self):
l, log_s, u = self.l, self.log_s, self.u
l = l * self.l_mask + self.eye
u = u * self.l_mask.transpose(0, 1).contiguous() + torch.diag(self.sign_s * torch.exp(log_s))
weight = torch.matmul(self.p, torch.matmul(l, u))
return weight, log_s
def store_inverse(self):
weight, _ = self._get_weight()
self.weight_inv = torch.inverse(weight.float()).to(next(self.parameters()).device)
class InvConv(nn.Module):
def __init__(self, channels, no_jacobian=False, lu=True, **kwargs):
super().__init__()
w_shape = [channels, channels]
w_init = np.linalg.qr(np.random.randn(*w_shape))[0].astype(float)
LU_decomposed = lu
if not LU_decomposed:
# Sample a random orthogonal matrix:
self.register_parameter("weight", nn.Parameter(torch.Tensor(w_init)))
else:
np_p, np_l, np_u = scipy.linalg.lu(w_init)
np_s = np.diag(np_u)
np_sign_s = np.sign(np_s)
np_log_s = np.log(np.abs(np_s))
np_u = np.triu(np_u, k=1)
l_mask = np.tril(np.ones(w_shape, dtype=float), -1)
eye = np.eye(*w_shape, dtype=float)
self.register_buffer('p', torch.Tensor(np_p.astype(float)))
self.register_buffer('sign_s', torch.Tensor(np_sign_s.astype(float)))
self.l = nn.Parameter(torch.Tensor(np_l.astype(float)))
self.log_s = nn.Parameter(torch.Tensor(np_log_s.astype(float)))
self.u = nn.Parameter(torch.Tensor(np_u.astype(float)))
self.l_mask = torch.Tensor(l_mask)
self.eye = torch.Tensor(eye)
self.w_shape = w_shape
self.LU = LU_decomposed
self.weight = None
def get_weight(self, device, reverse):
w_shape = self.w_shape
self.p = self.p.to(device)
self.sign_s = self.sign_s.to(device)
self.l_mask = self.l_mask.to(device)
self.eye = self.eye.to(device)
l = self.l * self.l_mask + self.eye
u = self.u * self.l_mask.transpose(0, 1).contiguous() + torch.diag(self.sign_s * torch.exp(self.log_s))
dlogdet = self.log_s.sum()
if not reverse:
w = torch.matmul(self.p, torch.matmul(l, u))
else:
l = torch.inverse(l.double()).float()
u = torch.inverse(u.double()).float()
w = torch.matmul(u, torch.matmul(l, self.p.inverse()))
return w.view(w_shape[0], w_shape[1], 1), dlogdet
def forward(self, x, x_mask=None, reverse=False, **kwargs):
"""
log-det = log|abs(|W|)| * pixels
"""
b, c, t = x.size()
if x_mask is None:
x_len = torch.ones((b,), dtype=x.dtype, device=x.device) * t
else:
x_len = torch.sum(x_mask, [1, 2])
logdet = 0
if not reverse:
weight, dlogdet = self.get_weight(x.device, reverse)
z = F.conv1d(x, weight)
if logdet is not None:
logdet = logdet + dlogdet * x_len
return z, logdet
else:
if self.weight is None:
weight, dlogdet = self.get_weight(x.device, reverse)
else:
weight, dlogdet = self.weight, self.dlogdet
z = F.conv1d(x, weight)
if logdet is not None:
logdet = logdet - dlogdet * x_len
return z, logdet
def store_inverse(self):
self.weight, self.dlogdet = self.get_weight('cuda', reverse=True)
class Flip(nn.Module):
def forward(self, x, *args, reverse=False, **kwargs):
x = torch.flip(x, [1])
logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device)
return x, logdet
def store_inverse(self):
pass
class CouplingBlock(nn.Module): # 仿射耦合层
def __init__(self, in_channels, hidden_channels, kernel_size, dilation_rate, n_layers,
gin_channels=0, p_dropout=0, sigmoid_scale=False,
share_cond_layers=False, wn=None):
super().__init__()
self.in_channels = in_channels
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.gin_channels = gin_channels
self.p_dropout = p_dropout
self.sigmoid_scale = sigmoid_scale
start = torch.nn.Conv1d(in_channels // 2, hidden_channels, 1)
start = torch.nn.utils.weight_norm(start)
self.start = start
# Initializing last layer to 0 makes the affine coupling layers
# do nothing at first. This helps with training stability
end = torch.nn.Conv1d(hidden_channels, in_channels, 1)
end.weight.data.zero_()
end.bias.data.zero_()
self.end = end
self.wn = WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels,
p_dropout, share_cond_layers)
if wn is not None:
self.wn.in_layers = wn.in_layers
self.wn.res_skip_layers = wn.res_skip_layers
def forward(self, x, x_mask=None, reverse=False, g=None, **kwargs):
if x_mask is None:
x_mask = 1
x_0, x_1 = x[:, :self.in_channels // 2], x[:, self.in_channels // 2:]
x = self.start(x_0) * x_mask
x = self.wn(x, x_mask, g)
out = self.end(x)
z_0 = x_0
m = out[:, :self.in_channels // 2, :]
logs = out[:, self.in_channels // 2:, :]
if self.sigmoid_scale:
logs = torch.log(1e-6 + torch.sigmoid(logs + 2))
if reverse:
z_1 = (x_1 - m) * torch.exp(-logs) * x_mask
logdet = torch.sum(-logs * x_mask, [1, 2])
else:
z_1 = (m + torch.exp(logs) * x_1) * x_mask
logdet = torch.sum(logs * x_mask, [1, 2])
z = torch.cat([z_0, z_1], 1)
return z, logdet
def store_inverse(self):
self.wn.remove_weight_norm()
class GlowFFTBlocks(FFTBlocks):
def __init__(self, hidden_size=128, gin_channels=256, num_layers=2, ffn_kernel_size=5,
dropout=None, num_heads=4, use_pos_embed=True, use_last_norm=True,
norm='ln', use_pos_embed_alpha=True):
super().__init__(hidden_size, num_layers, ffn_kernel_size, dropout, num_heads, use_pos_embed,
use_last_norm, norm, use_pos_embed_alpha)
self.inp_proj = nn.Conv1d(hidden_size + gin_channels, hidden_size, 1)
def forward(self, x, x_mask=None, g=None):
"""
:param x: [B, C_x, T]
:param x_mask: [B, 1, T]
:param g: [B, C_g, T]
:return: [B, C_x, T]
"""
if g is not None:
x = self.inp_proj(torch.cat([x, g], 1))
x = x.transpose(1, 2)
x = super(GlowFFTBlocks, self).forward(x, x_mask[:, 0] == 0)
x = x.transpose(1, 2)
return x
class TransformerCouplingBlock(nn.Module):
def __init__(self, in_channels, hidden_channels, n_layers,
gin_channels=0, p_dropout=0, sigmoid_scale=False):
super().__init__()
self.in_channels = in_channels
self.hidden_channels = hidden_channels
self.n_layers = n_layers
self.gin_channels = gin_channels
self.p_dropout = p_dropout
self.sigmoid_scale = sigmoid_scale
start = torch.nn.Conv1d(in_channels // 2, hidden_channels, 1)
self.start = start
# Initializing last layer to 0 makes the affine coupling layers
# do nothing at first. This helps with training stability
end = torch.nn.Conv1d(hidden_channels, in_channels, 1)
end.weight.data.zero_()
end.bias.data.zero_()
self.end = end
self.fft_blocks = GlowFFTBlocks(
hidden_size=hidden_channels,
ffn_kernel_size=3,
gin_channels=gin_channels,
num_layers=n_layers)
def forward(self, x, x_mask=None, reverse=False, g=None, **kwargs):
if x_mask is None:
x_mask = 1
x_0, x_1 = x[:, :self.in_channels // 2], x[:, self.in_channels // 2:]
x = self.start(x_0) * x_mask
x = self.fft_blocks(x, x_mask, g)
out = self.end(x)
z_0 = x_0
m = out[:, :self.in_channels // 2, :]
logs = out[:, self.in_channels // 2:, :]
if self.sigmoid_scale:
logs = torch.log(1e-6 + torch.sigmoid(logs + 2))
if reverse:
z_1 = (x_1 - m) * torch.exp(-logs) * x_mask
logdet = torch.sum(-logs * x_mask, [1, 2])
else:
z_1 = (m + torch.exp(logs) * x_1) * x_mask
logdet = torch.sum(logs * x_mask, [1, 2])
z = torch.cat([z_0, z_1], 1)
return z, logdet
def store_inverse(self):
pass
class FreqFFTCouplingBlock(nn.Module):
def __init__(self, in_channels, hidden_channels, n_layers,
gin_channels=0, p_dropout=0, sigmoid_scale=False):
super().__init__()
self.in_channels = in_channels
self.hidden_channels = hidden_channels
self.n_layers = n_layers
self.gin_channels = gin_channels
self.p_dropout = p_dropout
self.sigmoid_scale = sigmoid_scale
hs = hidden_channels
stride = 8
self.start = torch.nn.Conv2d(3, hs, kernel_size=stride * 2,
stride=stride, padding=stride // 2)
end = nn.ConvTranspose2d(hs, 2, kernel_size=stride, stride=stride)
end.weight.data.zero_()
end.bias.data.zero_()
self.end = nn.Sequential(
nn.Conv2d(hs * 3, hs, 3, 1, 1),
nn.ReLU(),
nn.GroupNorm(4, hs),
nn.Conv2d(hs, hs, 3, 1, 1),
end
)
self.fft_v = FFTBlocks(hidden_size=hs, ffn_kernel_size=1, num_layers=n_layers)
self.fft_h = nn.Sequential(
nn.Conv1d(hs, hs, 3, 1, 1),
nn.ReLU(),
nn.Conv1d(hs, hs, 3, 1, 1),
)
self.fft_g = nn.Sequential(
nn.Conv1d(
gin_channels - 160, hs, kernel_size=stride * 2, stride=stride, padding=stride // 2),
Permute(0, 2, 1),
FFTBlocks(hidden_size=hs, ffn_kernel_size=1, num_layers=n_layers),
Permute(0, 2, 1),
)
def forward(self, x, x_mask=None, reverse=False, g=None, **kwargs):
g_, _ = unsqueeze(g)
g_mel = g_[:, :80]
g_txt = g_[:, 80:]
g_mel, _ = squeeze(g_mel)
g_txt, _ = squeeze(g_txt) # [B, C, T]
if x_mask is None:
x_mask = 1
x_0, x_1 = x[:, :self.in_channels // 2], x[:, self.in_channels // 2:]
x = torch.stack([x_0, g_mel[:, :80], g_mel[:, 80:]], 1)
x = self.start(x) # [B, C, N_bins, T]
B, C, N_bins, T = x.shape
x_v = self.fft_v(x.permute(0, 3, 2, 1).reshape(B * T, N_bins, C))
x_v = x_v.reshape(B, T, N_bins, -1).permute(0, 3, 2, 1)
# x_v = x
x_h = self.fft_h(x.permute(0, 2, 1, 3).reshape(B * N_bins, C, T))
x_h = x_h.reshape(B, N_bins, -1, T).permute(0, 2, 1, 3)
# x_h = x
x_g = self.fft_g(g_txt)[:, :, None, :].repeat(1, 1, 10, 1)
x = torch.cat([x_v, x_h, x_g], 1)
out = self.end(x)
z_0 = x_0
m = out[:, 0]
logs = out[:, 1]
if self.sigmoid_scale:
logs = torch.log(1e-6 + torch.sigmoid(logs + 2))
if reverse:
z_1 = (x_1 - m) * torch.exp(-logs) * x_mask
logdet = torch.sum(-logs * x_mask, [1, 2])
else:
z_1 = (m + torch.exp(logs) * x_1) * x_mask
logdet = torch.sum(logs * x_mask, [1, 2])
z = torch.cat([z_0, z_1], 1)
return z, logdet
def store_inverse(self):
pass
class Glow(nn.Module):
def __init__(self,
in_channels,
hidden_channels,
kernel_size,
dilation_rate,
n_blocks,
n_layers,
p_dropout=0.,
n_split=4,
n_sqz=2,
sigmoid_scale=False,
gin_channels=0,
inv_conv_type='near',
share_cond_layers=False,
share_wn_layers=0,
):
super().__init__()
self.in_channels = in_channels
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.dilation_rate = dilation_rate
self.n_blocks = n_blocks
self.n_layers = n_layers
self.p_dropout = p_dropout
self.n_split = n_split
self.n_sqz = n_sqz
self.sigmoid_scale = sigmoid_scale
self.gin_channels = gin_channels
self.share_cond_layers = share_cond_layers
if gin_channels != 0 and share_cond_layers:
cond_layer = torch.nn.Conv1d(gin_channels * n_sqz, 2 * hidden_channels * n_layers, 1)
self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
wn = None
self.flows = nn.ModuleList()
for b in range(n_blocks):
self.flows.append(ActNorm(channels=in_channels * n_sqz))
if inv_conv_type == 'near':
self.flows.append(InvConvNear(channels=in_channels * n_sqz, n_split=n_split, n_sqz=n_sqz))
if inv_conv_type == 'invconv':
self.flows.append(InvConv(channels=in_channels * n_sqz))
if share_wn_layers > 0:
if b % share_wn_layers == 0:
wn = WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels * n_sqz,
p_dropout, share_cond_layers)
self.flows.append(
CouplingBlock(
in_channels * n_sqz,
hidden_channels,
kernel_size=kernel_size,
dilation_rate=dilation_rate,
n_layers=n_layers,
gin_channels=gin_channels * n_sqz,
p_dropout=p_dropout,
sigmoid_scale=sigmoid_scale,
share_cond_layers=share_cond_layers,
wn=wn
))
def forward(self, x, x_mask=None, g=None, reverse=False, return_hiddens=False):
logdet_tot = 0
if not reverse:
flows = self.flows
else:
flows = reversed(self.flows)
if return_hiddens:
hs = []
if self.n_sqz > 1:
x, x_mask_ = squeeze(x, x_mask, self.n_sqz)
if g is not None:
g, _ = squeeze(g, x_mask, self.n_sqz)
x_mask = x_mask_
if self.share_cond_layers and g is not None:
g = self.cond_layer(g)
for f in flows:
x, logdet = f(x, x_mask, g=g, reverse=reverse)
if return_hiddens:
hs.append(x)
logdet_tot += logdet
if self.n_sqz > 1:
x, x_mask = unsqueeze(x, x_mask, self.n_sqz)
if return_hiddens:
return x, logdet_tot, hs
return x, logdet_tot
def store_inverse(self):
def remove_weight_norm(m):
try:
nn.utils.remove_weight_norm(m)
except ValueError: # this module didn't have weight norm
return
self.apply(remove_weight_norm)
for f in self.flows:
f.store_inverse()
class GlowV2(nn.Module):
def __init__(self,
in_channels=256,
hidden_channels=256,
kernel_size=3,
dilation_rate=1,
n_blocks=8,
n_layers=4,
p_dropout=0.,
n_split=4,
n_split_blocks=3,
sigmoid_scale=False,
gin_channels=0,
share_cond_layers=True):
super().__init__()
self.in_channels = in_channels
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.dilation_rate = dilation_rate
self.n_blocks = n_blocks
self.n_layers = n_layers
self.p_dropout = p_dropout
self.n_split = n_split
self.n_split_blocks = n_split_blocks
self.sigmoid_scale = sigmoid_scale
self.gin_channels = gin_channels
self.cond_layers = nn.ModuleList()
self.share_cond_layers = share_cond_layers
self.flows = nn.ModuleList()
in_channels = in_channels * 2
for l in range(n_split_blocks):
blocks = nn.ModuleList()
self.flows.append(blocks)
gin_channels = gin_channels * 2
if gin_channels != 0 and share_cond_layers:
cond_layer = torch.nn.Conv1d(gin_channels, 2 * hidden_channels * n_layers, 1)
self.cond_layers.append(torch.nn.utils.weight_norm(cond_layer, name='weight'))
for b in range(n_blocks):
blocks.append(ActNorm(channels=in_channels))
blocks.append(InvConvNear(channels=in_channels, n_split=n_split))
blocks.append(CouplingBlock(
in_channels,
hidden_channels,
kernel_size=kernel_size,
dilation_rate=dilation_rate,
n_layers=n_layers,
gin_channels=gin_channels,
p_dropout=p_dropout,
sigmoid_scale=sigmoid_scale,
share_cond_layers=share_cond_layers))
def forward(self, x=None, x_mask=None, g=None, reverse=False, concat_zs=True,
noise_scale=0.66, return_hiddens=False):
logdet_tot = 0
if not reverse:
flows = self.flows
assert x_mask is not None
zs = []
if return_hiddens:
hs = []
for i, blocks in enumerate(flows):
x, x_mask = squeeze(x, x_mask)
g_ = None
if g is not None:
g, _ = squeeze(g)
if self.share_cond_layers:
g_ = self.cond_layers[i](g)
else:
g_ = g
for layer in blocks:
x, logdet = layer(x, x_mask=x_mask, g=g_, reverse=reverse)
if return_hiddens:
hs.append(x)
logdet_tot += logdet
if i == self.n_split_blocks - 1:
zs.append(x)
else:
x, z = torch.chunk(x, 2, 1)
zs.append(z)
if concat_zs:
zs = [z.reshape(x.shape[0], -1) for z in zs]
zs = torch.cat(zs, 1) # [B, C*T]
if return_hiddens:
return zs, logdet_tot, hs
return zs, logdet_tot
else:
flows = reversed(self.flows)
if x is not None:
assert isinstance(x, list)
zs = x
else:
B, _, T = g.shape
zs = self.get_prior(B, T, g.device, noise_scale)
zs_ori = zs
if g is not None:
g_, g = g, []
for i in range(len(self.flows)):
g_, _ = squeeze(g_)
g.append(self.cond_layers[i](g_) if self.share_cond_layers else g_)
else:
g = [None for _ in range(len(self.flows))]
if x_mask is not None:
x_masks = []
for i in range(len(self.flows)):
x_mask, _ = squeeze(x_mask)
x_masks.append(x_mask)
else:
x_masks = [None for _ in range(len(self.flows))]
x_masks = x_masks[::-1]
g = g[::-1]
zs = zs[::-1]
x = None
for i, blocks in enumerate(flows):
x = zs[i] if x is None else torch.cat([x, zs[i]], 1)
for layer in reversed(blocks):
x, logdet = layer(x, x_masks=x_masks[i], g=g[i], reverse=reverse)
logdet_tot += logdet
x, _ = unsqueeze(x)
return x, logdet_tot, zs_ori
def store_inverse(self):
for f in self.modules():
if hasattr(f, 'store_inverse') and f != self:
f.store_inverse()
def remove_weight_norm(m):
try:
nn.utils.remove_weight_norm(m)
except ValueError: # this module didn't have weight norm
return
self.apply(remove_weight_norm)
def get_prior(self, B, T, device, noise_scale=0.66):
C = 80
zs = []
for i in range(len(self.flows)):
C, T = C, T // 2
if i == self.n_split_blocks - 1:
zs.append(torch.randn(B, C * 2, T).to(device) * noise_scale)
else:
zs.append(torch.randn(B, C, T).to(device) * noise_scale)
return zs
def squeeze(x, x_mask=None, n_sqz=2):
b, c, t = x.size()
t = (t // n_sqz) * n_sqz
x = x[:, :, :t]
x_sqz = x.view(b, c, t // n_sqz, n_sqz)
x_sqz = x_sqz.permute(0, 3, 1, 2).contiguous().view(b, c * n_sqz, t // n_sqz)
if x_mask is not None:
x_mask = x_mask[:, :, n_sqz - 1::n_sqz]
else:
x_mask = torch.ones(b, 1, t // n_sqz).to(device=x.device, dtype=x.dtype)
return x_sqz * x_mask, x_mask
def unsqueeze(x, x_mask=None, n_sqz=2):
b, c, t = x.size()
x_unsqz = x.view(b, n_sqz, c // n_sqz, t)
x_unsqz = x_unsqz.permute(0, 2, 3, 1).contiguous().view(b, c // n_sqz, t * n_sqz)
if x_mask is not None:
x_mask = x_mask.unsqueeze(-1).repeat(1, 1, 1, n_sqz).view(b, 1, t * n_sqz)
else:
x_mask = torch.ones(b, 1, t * n_sqz).to(device=x.device, dtype=x.dtype)
return x_unsqz * x_mask, x_mask
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/modules/GenerSpeech/model/glow_modules.py |
from modules.commons.common_layers import *
# @torch.jit.script
def fused_add_tanh_sigmoid_multiply(input_a, input_b, n_channels):
n_channels_int = n_channels[0]
in_act = input_a + input_b
t_act = torch.tanh(in_act[:, :n_channels_int, :])
s_act = torch.sigmoid(in_act[:, n_channels_int:, :])
acts = t_act * s_act
return acts
class WN(torch.nn.Module):
def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0,
p_dropout=0, share_cond_layers=False):
super(WN, self).__init__()
assert (kernel_size % 2 == 1)
assert (hidden_channels % 2 == 0)
self.hidden_channels = hidden_channels
self.kernel_size = kernel_size
self.dilation_rate = dilation_rate
self.n_layers = n_layers
self.gin_channels = gin_channels
self.p_dropout = p_dropout
self.share_cond_layers = share_cond_layers
self.in_layers = torch.nn.ModuleList()
self.res_skip_layers = torch.nn.ModuleList()
self.drop = nn.Dropout(p_dropout)
if gin_channels != 0 and not share_cond_layers:
cond_layer = torch.nn.Conv1d(gin_channels, 2 * hidden_channels * n_layers, 1)
self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight')
for i in range(n_layers):
dilation = dilation_rate ** i
padding = int((kernel_size * dilation - dilation) / 2)
in_layer = torch.nn.Conv1d(hidden_channels, 2 * hidden_channels, kernel_size,
dilation=dilation, padding=padding)
in_layer = torch.nn.utils.weight_norm(in_layer, name='weight')
self.in_layers.append(in_layer)
# last one is not necessary
if i < n_layers - 1:
res_skip_channels = 2 * hidden_channels
else:
res_skip_channels = hidden_channels
res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1)
res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight')
self.res_skip_layers.append(res_skip_layer)
def forward(self, x, x_mask=None, g=None, **kwargs):
output = torch.zeros_like(x)
n_channels_tensor = torch.IntTensor([self.hidden_channels])
if g is not None and not self.share_cond_layers:
g = self.cond_layer(g)
for i in range(self.n_layers):
x_in = self.in_layers[i](x)
x_in = self.drop(x_in)
if g is not None:
cond_offset = i * 2 * self.hidden_channels
g_l = g[:, cond_offset:cond_offset + 2 * self.hidden_channels, :]
else:
g_l = torch.zeros_like(x_in)
acts = fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor)
res_skip_acts = self.res_skip_layers[i](acts)
if i < self.n_layers - 1:
x = (x + res_skip_acts[:, :self.hidden_channels, :]) * x_mask
output = output + res_skip_acts[:, self.hidden_channels:, :]
else:
output = output + res_skip_acts
return output * x_mask
def remove_weight_norm(self):
def remove_weight_norm(m):
try:
nn.utils.remove_weight_norm(m)
except ValueError: # this module didn't have weight norm
return
self.apply(remove_weight_norm)
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/modules/GenerSpeech/model/wavenet.py |
from torch import nn
import copy
import torch
from utils.hparams import hparams
from modules.GenerSpeech.model.wavenet import WN
import math
from modules.fastspeech.tts_modules import LayerNorm
import torch.nn.functional as F
from utils.tts_utils import group_hidden_by_segs, sequence_mask
from scipy.cluster.vq import kmeans2
from torch.nn import functional as F
class VQEmbeddingEMA(nn.Module):
def __init__(self, n_embeddings, embedding_dim, commitment_cost=0.25, decay=0.999, epsilon=1e-5,
print_vq_prob=False):
super(VQEmbeddingEMA, self).__init__()
self.commitment_cost = commitment_cost
self.n_embeddings = n_embeddings
self.decay = decay
self.epsilon = epsilon
self.print_vq_prob = print_vq_prob
self.register_buffer('data_initialized', torch.zeros(1))
init_bound = 1 / 512
embedding = torch.Tensor(n_embeddings, embedding_dim)
embedding.uniform_(-init_bound, init_bound)
self.register_buffer("embedding", embedding)
self.register_buffer("ema_count", torch.zeros(n_embeddings))
self.register_buffer("ema_weight", self.embedding.clone())
def encode(self, x):
B, T, _ = x.shape
M, D = self.embedding.size()
x_flat = x.detach().reshape(-1, D)
distances = torch.addmm(torch.sum(self.embedding ** 2, dim=1) +
torch.sum(x_flat ** 2, dim=1, keepdim=True),
x_flat, self.embedding.t(),
alpha=-2.0, beta=1.0) # [B*T_mel, N_vq]
indices = torch.argmin(distances.float(), dim=-1) # [B*T_mel]
quantized = F.embedding(indices, self.embedding)
quantized = quantized.view_as(x)
return x_flat, quantized, indices
def forward(self, x):
"""
:param x: [B, T, D]
:return: [B, T, D]
"""
B, T, _ = x.shape
M, D = self.embedding.size()
if self.training and self.data_initialized.item() == 0:
print('| running kmeans in VQVAE') # data driven initialization for the embeddings
x_flat = x.detach().reshape(-1, D)
rp = torch.randperm(x_flat.size(0))
kd = kmeans2(x_flat[rp].data.cpu().numpy(), self.n_embeddings, minit='points')
self.embedding.copy_(torch.from_numpy(kd[0]))
x_flat, quantized, indices = self.encode(x)
encodings = F.one_hot(indices, M).float()
self.ema_weight.copy_(torch.matmul(encodings.t(), x_flat))
self.ema_count.copy_(torch.sum(encodings, dim=0))
x_flat, quantized, indices = self.encode(x)
encodings = F.one_hot(indices, M).float()
indices = indices.reshape(B, T)
if self.training and self.data_initialized.item() != 0:
self.ema_count = self.decay * self.ema_count + (1 - self.decay) * torch.sum(encodings, dim=0)
n = torch.sum(self.ema_count)
self.ema_count = (self.ema_count + self.epsilon) / (n + M * self.epsilon) * n
dw = torch.matmul(encodings.t(), x_flat)
self.ema_weight = self.decay * self.ema_weight + (1 - self.decay) * dw
self.embedding = self.ema_weight / self.ema_count.unsqueeze(-1)
self.data_initialized.fill_(1)
e_latent_loss = F.mse_loss(x, quantized.detach(), reduction='none')
nonpadding = (x.abs().sum(-1) > 0).float()
e_latent_loss = (e_latent_loss.mean(-1) * nonpadding).sum() / nonpadding.sum()
loss = self.commitment_cost * e_latent_loss
quantized = x + (quantized - x).detach()
avg_probs = torch.mean(encodings, dim=0)
perplexity = torch.exp(-torch.sum(avg_probs * torch.log(avg_probs + 1e-10)))
if self.print_vq_prob:
print("| VQ code avg_probs: ", avg_probs)
return quantized, loss, indices, perplexity
class CrossAttenLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1):
super(CrossAttenLayer, self).__init__()
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout1 = nn.Dropout(dropout)
self.norm1 = nn.LayerNorm(d_model)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.dropout2 = nn.Dropout(dropout)
self.norm2 = nn.LayerNorm(d_model)
self.activation = nn.ReLU()
def forward(self, src, local_emotion, emotion_key_padding_mask=None, forcing=False):
# src: (Tph, B, 256) local_emotion: (Temo, B, 256) emotion_key_padding_mask: (B, Temo)
if forcing:
maxlength = src.shape[0]
k = local_emotion.shape[0] / src.shape[0]
lengths1 = torch.ceil(torch.tensor([i for i in range(maxlength)]).to(src.device) * k) + 1
lengths2 = torch.floor(torch.tensor([i for i in range(maxlength)]).to(src.device) * k) - 1
mask1 = sequence_mask(lengths1, local_emotion.shape[0])
mask2 = sequence_mask(lengths2, local_emotion.shape[0])
mask = mask1.float() - mask2.float()
attn_emo = mask.repeat(src.shape[1], 1, 1) # (B, Tph, Temo)
src2 = torch.matmul(local_emotion.permute(1, 2, 0), attn_emo.float().transpose(1, 2)).permute(2, 0, 1)
else:
src2, attn_emo = self.multihead_attn(src, local_emotion, local_emotion, key_padding_mask=emotion_key_padding_mask)
src = src + self.dropout1(src2)
src = self.norm1(src)
src2 = self.linear2(self.activation(self.linear1(src)))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src, attn_emo
class ProsodyAligner(nn.Module):
def __init__(self, num_layers, guided_sigma=0.3, guided_layers=None, norm=None):
super(ProsodyAligner, self).__init__()
self.layers = nn.ModuleList([CrossAttenLayer(d_model=hparams['hidden_size'], nhead=2) for _ in range(num_layers)])
self.num_layers = num_layers
self.norm = norm
self.guided_sigma = guided_sigma
self.guided_layers = guided_layers if guided_layers is not None else num_layers
def forward(self, src, local_emotion, src_key_padding_mask=None, emotion_key_padding_mask=None, forcing=False):
output = src
guided_loss = 0
attn_emo_list = []
for i, mod in enumerate(self.layers):
# output: (Tph, B, 256), global_emotion: (1, B, 256), local_emotion: (Temo, B, 256) mask: None, src_key_padding_mask: (B, Tph),
# emotion_key_padding_mask: (B, Temo)
output, attn_emo = mod(output, local_emotion, emotion_key_padding_mask=emotion_key_padding_mask, forcing=forcing)
attn_emo_list.append(attn_emo.unsqueeze(1))
# attn_emo: (B, Tph, Temo) attn: (B, Tph, Tph)
if i < self.guided_layers and src_key_padding_mask is not None:
s_length = (~src_key_padding_mask).float().sum(-1) # B
emo_length = (~emotion_key_padding_mask).float().sum(-1)
attn_w_emo = _make_guided_attention_mask(src_key_padding_mask.size(-1), s_length, emotion_key_padding_mask.size(-1), emo_length, self.guided_sigma)
g_loss_emo = attn_emo * attn_w_emo # N, L, S
non_padding_mask = (~src_key_padding_mask).unsqueeze(-1) & (~emotion_key_padding_mask).unsqueeze(1)
guided_loss = g_loss_emo[non_padding_mask].mean() + guided_loss
if self.norm is not None:
output = self.norm(output)
return output, guided_loss, attn_emo_list
def _make_guided_attention_mask(ilen, rilen, olen, rolen, sigma):
grid_x, grid_y = torch.meshgrid(torch.arange(ilen, device=rilen.device), torch.arange(olen, device=rolen.device))
grid_x = grid_x.unsqueeze(0).expand(rilen.size(0), -1, -1)
grid_y = grid_y.unsqueeze(0).expand(rolen.size(0), -1, -1)
rilen = rilen.unsqueeze(1).unsqueeze(1)
rolen = rolen.unsqueeze(1).unsqueeze(1)
return 1.0 - torch.exp(
-((grid_y.float() / rolen - grid_x.float() / rilen) ** 2) / (2 * (sigma ** 2))
)
class LocalStyleAdaptor(nn.Module):
def __init__(self, hidden_size, num_vq_codes=64, padding_idx=0):
super(LocalStyleAdaptor, self).__init__()
self.encoder = ConvBlocks(80, hidden_size, [1] * 5, 5, dropout=hparams['vae_dropout'])
self.n_embed = num_vq_codes
self.vqvae = VQEmbeddingEMA(self.n_embed, hidden_size, commitment_cost=hparams['lambda_commit'])
self.wavenet = WN(hidden_channels=80, gin_channels=80, kernel_size=3, dilation_rate=1, n_layers=4)
self.padding_idx = padding_idx
self.hidden_size = hidden_size
def forward(self, ref_mels, mel2ph=None, no_vq=False):
"""
:param ref_mels: [B, T, 80]
:return: [B, 1, H]
"""
padding_mask = ref_mels[:, :, 0].eq(self.padding_idx).data
ref_mels = self.wavenet(ref_mels.transpose(1, 2), x_mask=(~padding_mask).unsqueeze(1).repeat([1, 80, 1])).transpose(1, 2)
if mel2ph is not None:
ref_ph, _ = group_hidden_by_segs(ref_mels, mel2ph, torch.max(mel2ph))
else:
ref_ph = ref_mels
prosody = self.encoder(ref_ph)
if no_vq:
return prosody
z, vq_loss, vq_tokens, ppl = self.vqvae(prosody)
vq_loss = vq_loss.mean()
return z, vq_loss, ppl
class LambdaLayer(nn.Module):
def __init__(self, lambd):
super(LambdaLayer, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
class Conv1d(nn.Conv1d):
"""A wrapper around nn.Conv1d, that works on (batch, time, channels)"""
def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, dilation=1, groups=1, bias=True, padding=0):
super(Conv1d, self).__init__(in_channels=in_channels, out_channels=out_channels,
kernel_size=kernel_size, stride=stride, dilation=dilation,
groups=groups, bias=bias, padding=padding)
def forward(self, x):
return super().forward(x.transpose(2, 1)).transpose(2, 1)
def init_weights_func(m):
classname = m.__class__.__name__
if classname.find("Conv1d") != -1:
torch.nn.init.xavier_uniform_(m.weight)
class ResidualBlock(nn.Module):
"""Implements conv->PReLU->norm n-times"""
def __init__(self, channels, kernel_size, dilation, n=2, norm_type='bn', dropout=0.0,
c_multiple=2, ln_eps=1e-12):
super(ResidualBlock, self).__init__()
if norm_type == 'bn':
norm_builder = lambda: nn.BatchNorm1d(channels)
elif norm_type == 'in':
norm_builder = lambda: nn.InstanceNorm1d(channels, affine=True)
elif norm_type == 'gn':
norm_builder = lambda: nn.GroupNorm(8, channels)
elif norm_type == 'ln':
norm_builder = lambda: LayerNorm(channels, dim=1, eps=ln_eps)
else:
norm_builder = lambda: nn.Identity()
self.blocks = [
nn.Sequential(
norm_builder(),
nn.Conv1d(channels, c_multiple * channels, kernel_size, dilation=dilation,
padding=(dilation * (kernel_size - 1)) // 2),
LambdaLayer(lambda x: x * kernel_size ** -0.5),
nn.GELU(),
nn.Conv1d(c_multiple * channels, channels, 1, dilation=dilation),
)
for i in range(n)
]
self.blocks = nn.ModuleList(self.blocks)
self.dropout = dropout
def forward(self, x):
nonpadding = (x.abs().sum(1) > 0).float()[:, None, :]
for b in self.blocks:
x_ = b(x)
if self.dropout > 0 and self.training:
x_ = F.dropout(x_, self.dropout, training=self.training)
x = x + x_
x = x * nonpadding
return x
class Pad(nn.ZeroPad2d):
def __init__(self, kernel_size, dilation):
pad_total = dilation * (kernel_size - 1)
begin = pad_total // 2
end = pad_total - begin
super(Pad, self).__init__((begin, end, begin, end))
class ZeroTemporalPad(nn.ZeroPad2d):
"""Pad sequences to equal lentgh in the temporal dimension"""
def __init__(self, kernel_size, dilation, causal=False):
total_pad = (dilation * (kernel_size - 1))
if causal:
super(ZeroTemporalPad, self).__init__((total_pad, 0))
else:
begin = total_pad // 2
end = total_pad - begin
super(ZeroTemporalPad, self).__init__((begin, end))
class ConvBlocks(nn.Module):
"""Decodes the expanded phoneme encoding into spectrograms"""
def __init__(self, channels, out_dims, dilations, kernel_size,
norm_type='ln', layers_in_block=2, c_multiple=2,
dropout=0.0, ln_eps=1e-5, init_weights=True):
super(ConvBlocks, self).__init__()
self.res_blocks = nn.Sequential(
*[ResidualBlock(channels, kernel_size, d,
n=layers_in_block, norm_type=norm_type, c_multiple=c_multiple,
dropout=dropout, ln_eps=ln_eps)
for d in dilations],
)
if norm_type == 'bn':
norm = nn.BatchNorm1d(channels)
elif norm_type == 'in':
norm = nn.InstanceNorm1d(channels, affine=True)
elif norm_type == 'gn':
norm = nn.GroupNorm(8, channels)
elif norm_type == 'ln':
norm = LayerNorm(channels, dim=1, eps=ln_eps)
self.last_norm = norm
self.post_net1 = nn.Conv1d(channels, out_dims, kernel_size=3, padding=1)
if init_weights:
self.apply(init_weights_func)
def forward(self, x):
"""
:param x: [B, T, H]
:return: [B, T, H]
"""
x = x.transpose(1, 2)
nonpadding = (x.abs().sum(1) > 0).float()[:, None, :]
x = self.res_blocks(x) * nonpadding
x = self.last_norm(x) * nonpadding
x = self.post_net1(x) * nonpadding
return x.transpose(1, 2)
class TextConvEncoder(ConvBlocks):
def __init__(self, embed_tokens, channels, out_dims, dilations, kernel_size,
norm_type='ln', layers_in_block=2, c_multiple=2,
dropout=0.0, ln_eps=1e-5, init_weights=True):
super().__init__(channels, out_dims, dilations, kernel_size,
norm_type, layers_in_block, c_multiple,
dropout, ln_eps, init_weights)
self.embed_tokens = embed_tokens
self.embed_scale = math.sqrt(channels)
def forward(self, txt_tokens):
"""
:param txt_tokens: [B, T]
:return: {
'encoder_out': [B x T x C]
}
"""
x = self.embed_scale * self.embed_tokens(txt_tokens)
return super().forward(x)
class ConditionalConvBlocks(ConvBlocks):
def __init__(self, channels, g_channels, out_dims, dilations, kernel_size,
norm_type='ln', layers_in_block=2, c_multiple=2,
dropout=0.0, ln_eps=1e-5, init_weights=True, is_BTC=True):
super().__init__(channels, out_dims, dilations, kernel_size,
norm_type, layers_in_block, c_multiple,
dropout, ln_eps, init_weights)
self.g_prenet = nn.Conv1d(g_channels, channels, 3, padding=1)
self.is_BTC = is_BTC
if init_weights:
self.g_prenet.apply(init_weights_func)
def forward(self, x, g, x_mask):
if self.is_BTC:
x = x.transpose(1, 2)
g = g.transpose(1, 2)
x_mask = x_mask.transpose(1, 2)
x = x + self.g_prenet(g)
x = x * x_mask
if not self.is_BTC:
x = x.transpose(1, 2)
x = super(ConditionalConvBlocks, self).forward(x) # input needs to be BTC
if not self.is_BTC:
x = x.transpose(1, 2)
return x
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/modules/GenerSpeech/model/prosody_util.py |
import torch
from modules.GenerSpeech.model.glow_modules import Glow
from modules.fastspeech.tts_modules import PitchPredictor
import random
from modules.GenerSpeech.model.prosody_util import ProsodyAligner, LocalStyleAdaptor
from utils.pitch_utils import f0_to_coarse, denorm_f0
from modules.commons.common_layers import *
import torch.distributions as dist
from utils.hparams import hparams
from modules.GenerSpeech.model.mixstyle import MixStyle
from modules.fastspeech.fs2 import FastSpeech2
import json
from modules.fastspeech.tts_modules import DEFAULT_MAX_SOURCE_POSITIONS, DEFAULT_MAX_TARGET_POSITIONS
class GenerSpeech(FastSpeech2):
'''
GenerSpeech: Towards Style Transfer for Generalizable Out-Of-Domain Text-to-Speech
https://arxiv.org/abs/2205.07211
'''
def __init__(self, dictionary, out_dims=None):
super().__init__(dictionary, out_dims)
# Mixstyle
self.norm = MixStyle(p=0.5, alpha=0.1, eps=1e-6, hidden_size=self.hidden_size)
# emotion embedding
self.emo_embed_proj = Linear(256, self.hidden_size, bias=True)
# build prosody extractor
## frame level
self.prosody_extractor_utter = LocalStyleAdaptor(self.hidden_size, hparams['nVQ'], self.padding_idx)
self.l1_utter = nn.Linear(self.hidden_size * 2, self.hidden_size)
self.align_utter = ProsodyAligner(num_layers=2)
## phoneme level
self.prosody_extractor_ph = LocalStyleAdaptor(self.hidden_size, hparams['nVQ'], self.padding_idx)
self.l1_ph = nn.Linear(self.hidden_size * 2, self.hidden_size)
self.align_ph = ProsodyAligner(num_layers=2)
## word level
self.prosody_extractor_word = LocalStyleAdaptor(self.hidden_size, hparams['nVQ'], self.padding_idx)
self.l1_word = nn.Linear(self.hidden_size * 2, self.hidden_size)
self.align_word = ProsodyAligner(num_layers=2)
self.pitch_inpainter_predictor = PitchPredictor(
self.hidden_size, n_chans=self.hidden_size,
n_layers=3, dropout_rate=0.1, odim=2,
padding=hparams['ffn_padding'], kernel_size=hparams['predictor_kernel'])
# build attention layer
self.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS
self.embed_positions = SinusoidalPositionalEmbedding(
self.hidden_size, self.padding_idx,
init_size=self.max_source_positions + self.padding_idx + 1,
)
# build post flow
cond_hs = 80
if hparams.get('use_txt_cond', True):
cond_hs = cond_hs + hparams['hidden_size']
cond_hs = cond_hs + hparams['hidden_size'] * 3 # for emo, spk embedding and prosody embedding
self.post_flow = Glow(
80, hparams['post_glow_hidden'], hparams['post_glow_kernel_size'], 1,
hparams['post_glow_n_blocks'], hparams['post_glow_n_block_layers'],
n_split=4, n_sqz=2,
gin_channels=cond_hs,
share_cond_layers=hparams['post_share_cond_layers'],
share_wn_layers=hparams['share_wn_layers'],
sigmoid_scale=hparams['sigmoid_scale']
)
self.prior_dist = dist.Normal(0, 1)
def forward(self, txt_tokens, mel2ph=None, ref_mel2ph=None, ref_mel2word=None, spk_embed=None, emo_embed=None, ref_mels=None,
f0=None, uv=None, skip_decoder=False, global_steps=0, infer=False, **kwargs):
ret = {}
encoder_out = self.encoder(txt_tokens) # [B, T, C]
src_nonpadding = (txt_tokens > 0).float()[:, :, None]
# add spk/emo embed
spk_embed = self.spk_embed_proj(spk_embed)[:, None, :]
emo_embed = self.emo_embed_proj(emo_embed)[:, None, :]
# add dur
dur_inp = (encoder_out + spk_embed + emo_embed) * src_nonpadding
mel2ph = self.add_dur(dur_inp, mel2ph, txt_tokens, ret)
tgt_nonpadding = (mel2ph > 0).float()[:, :, None]
decoder_inp = self.expand_states(encoder_out, mel2ph)
decoder_inp = self.norm(decoder_inp, spk_embed + emo_embed)
# add prosody VQ
ret['ref_mel2ph'] = ref_mel2ph
ret['ref_mel2word'] = ref_mel2word
prosody_utter_mel = self.get_prosody_utter(decoder_inp, ref_mels, ret, infer, global_steps)
prosody_ph_mel = self.get_prosody_ph(decoder_inp, ref_mels, ret, infer, global_steps)
prosody_word_mel = self.get_prosody_word(decoder_inp, ref_mels, ret, infer, global_steps)
# add pitch embed
pitch_inp_domain_agnostic = decoder_inp * tgt_nonpadding
pitch_inp_domain_specific = (decoder_inp + spk_embed + emo_embed + prosody_utter_mel + prosody_ph_mel + prosody_word_mel) * tgt_nonpadding
predicted_pitch = self.inpaint_pitch(pitch_inp_domain_agnostic, pitch_inp_domain_specific, f0, uv, mel2ph, ret)
# decode
decoder_inp = decoder_inp + spk_embed + emo_embed + predicted_pitch + prosody_utter_mel + prosody_ph_mel + prosody_word_mel
ret['decoder_inp'] = decoder_inp = decoder_inp * tgt_nonpadding
if skip_decoder:
return ret
ret['mel_out'] = self.run_decoder(decoder_inp, tgt_nonpadding, ret, infer=infer, **kwargs)
# postflow
is_training = self.training
ret['x_mask'] = tgt_nonpadding
ret['spk_embed'] = spk_embed
ret['emo_embed'] = emo_embed
ret['ref_prosody'] = prosody_utter_mel + prosody_ph_mel + prosody_word_mel
self.run_post_glow(ref_mels, infer, is_training, ret)
return ret
def get_prosody_ph(self, encoder_out, ref_mels, ret, infer=False, global_steps=0):
# get VQ prosody
if global_steps > hparams['vq_start'] or infer:
prosody_embedding, loss, ppl = self.prosody_extractor_ph(ref_mels, ret['ref_mel2ph'], no_vq=False)
ret['vq_loss_ph'] = loss
ret['ppl_ph'] = ppl
else:
prosody_embedding = self.prosody_extractor_ph(ref_mels, ret['ref_mel2ph'], no_vq=True)
# add positional embedding
positions = self.embed_positions(prosody_embedding[:, :, 0])
prosody_embedding = self.l1_ph(torch.cat([prosody_embedding, positions], dim=-1))
# style-to-content attention
src_key_padding_mask = encoder_out[:, :, 0].eq(self.padding_idx).data
prosody_key_padding_mask = prosody_embedding[:, :, 0].eq(self.padding_idx).data
if global_steps < hparams['forcing']:
output, guided_loss, attn_emo = self.align_ph(encoder_out.transpose(0, 1), prosody_embedding.transpose(0, 1),
src_key_padding_mask, prosody_key_padding_mask, forcing=True)
else:
output, guided_loss, attn_emo = self.align_ph(encoder_out.transpose(0, 1), prosody_embedding.transpose(0, 1),
src_key_padding_mask, prosody_key_padding_mask, forcing=False)
ret['gloss_ph'] = guided_loss
ret['attn_ph'] = attn_emo
return output.transpose(0, 1)
def get_prosody_word(self, encoder_out, ref_mels, ret, infer=False, global_steps=0):
# get VQ prosody
if global_steps > hparams['vq_start'] or infer:
prosody_embedding, loss, ppl = self.prosody_extractor_word(ref_mels, ret['ref_mel2word'], no_vq=False)
ret['vq_loss_word'] = loss
ret['ppl_word'] = ppl
else:
prosody_embedding = self.prosody_extractor_word(ref_mels, ret['ref_mel2word'], no_vq=True)
# add positional embedding
positions = self.embed_positions(prosody_embedding[:, :, 0])
prosody_embedding = self.l1_word(torch.cat([prosody_embedding, positions], dim=-1))
# style-to-content attention
src_key_padding_mask = encoder_out[:, :, 0].eq(self.padding_idx).data
prosody_key_padding_mask = prosody_embedding[:, :, 0].eq(self.padding_idx).data
if global_steps < hparams['forcing']:
output, guided_loss, attn_emo = self.align_word(encoder_out.transpose(0, 1), prosody_embedding.transpose(0, 1),
src_key_padding_mask, prosody_key_padding_mask, forcing=True)
else:
output, guided_loss, attn_emo = self.align_word(encoder_out.transpose(0, 1), prosody_embedding.transpose(0, 1),
src_key_padding_mask, prosody_key_padding_mask, forcing=False)
ret['gloss_word'] = guided_loss
ret['attn_word'] = attn_emo
return output.transpose(0, 1)
def get_prosody_utter(self, encoder_out, ref_mels, ret, infer=False, global_steps=0):
# get VQ prosody
if global_steps > hparams['vq_start'] or infer:
prosody_embedding, loss, ppl = self.prosody_extractor_utter(ref_mels, no_vq=False)
ret['vq_loss_utter'] = loss
ret['ppl_utter'] = ppl
else:
prosody_embedding = self.prosody_extractor_utter(ref_mels, no_vq=True)
# add positional embedding
positions = self.embed_positions(prosody_embedding[:, :, 0])
prosody_embedding = self.l1_utter(torch.cat([prosody_embedding, positions], dim=-1))
# style-to-content attention
src_key_padding_mask = encoder_out[:, :, 0].eq(self.padding_idx).data
prosody_key_padding_mask = prosody_embedding[:, :, 0].eq(self.padding_idx).data
if global_steps < hparams['forcing']:
output, guided_loss, attn_emo = self.align_utter(encoder_out.transpose(0, 1), prosody_embedding.transpose(0, 1),
src_key_padding_mask, prosody_key_padding_mask, forcing=True)
else:
output, guided_loss, attn_emo = self.align_utter(encoder_out.transpose(0, 1), prosody_embedding.transpose(0, 1),
src_key_padding_mask, prosody_key_padding_mask, forcing=False)
ret['gloss_utter'] = guided_loss
ret['attn_utter'] = attn_emo
return output.transpose(0, 1)
def inpaint_pitch(self, pitch_inp_domain_agnostic, pitch_inp_domain_specific, f0, uv, mel2ph, ret):
if hparams['pitch_type'] == 'frame':
pitch_padding = mel2ph == 0
if hparams['predictor_grad'] != 1:
pitch_inp_domain_agnostic = pitch_inp_domain_agnostic.detach() + hparams['predictor_grad'] * (pitch_inp_domain_agnostic - pitch_inp_domain_agnostic.detach())
pitch_inp_domain_specific = pitch_inp_domain_specific.detach() + hparams['predictor_grad'] * (pitch_inp_domain_specific - pitch_inp_domain_specific.detach())
pitch_domain_agnostic = self.pitch_predictor(pitch_inp_domain_agnostic)
pitch_domain_specific = self.pitch_inpainter_predictor(pitch_inp_domain_specific)
pitch_pred = pitch_domain_agnostic + pitch_domain_specific
ret['pitch_pred'] = pitch_pred
use_uv = hparams['pitch_type'] == 'frame' and hparams['use_uv']
if f0 is None:
f0 = pitch_pred[:, :, 0] # [B, T]
if use_uv:
uv = pitch_pred[:, :, 1] > 0 # [B, T]
f0_denorm = denorm_f0(f0, uv if use_uv else None, hparams, pitch_padding=pitch_padding)
pitch = f0_to_coarse(f0_denorm) # start from 0 [B, T_txt]
ret['f0_denorm'] = f0_denorm
ret['f0_denorm_pred'] = denorm_f0(pitch_pred[:, :, 0], (pitch_pred[:, :, 1] > 0) if use_uv else None, hparams, pitch_padding=pitch_padding)
if hparams['pitch_type'] == 'ph':
pitch = torch.gather(F.pad(pitch, [1, 0]), 1, mel2ph)
ret['f0_denorm'] = torch.gather(F.pad(ret['f0_denorm'], [1, 0]), 1, mel2ph)
ret['f0_denorm_pred'] = torch.gather(F.pad(ret['f0_denorm_pred'], [1, 0]), 1, mel2ph)
pitch_embed = self.pitch_embed(pitch)
return pitch_embed
def run_post_glow(self, tgt_mels, infer, is_training, ret):
x_recon = ret['mel_out'].transpose(1, 2)
g = x_recon
B, _, T = g.shape
if hparams.get('use_txt_cond', True):
g = torch.cat([g, ret['decoder_inp'].transpose(1, 2)], 1)
g_spk_embed = ret['spk_embed'].repeat(1, T, 1).transpose(1, 2)
g_emo_embed = ret['emo_embed'].repeat(1, T, 1).transpose(1, 2)
l_ref_prosody = ret['ref_prosody'].transpose(1, 2)
g = torch.cat([g, g_spk_embed, g_emo_embed, l_ref_prosody], dim=1)
prior_dist = self.prior_dist
if not infer:
if is_training:
self.train()
x_mask = ret['x_mask'].transpose(1, 2)
y_lengths = x_mask.sum(-1)
g = g.detach()
tgt_mels = tgt_mels.transpose(1, 2)
z_postflow, ldj = self.post_flow(tgt_mels, x_mask, g=g)
ldj = ldj / y_lengths / 80
ret['z_pf'], ret['ldj_pf'] = z_postflow, ldj
ret['postflow'] = -prior_dist.log_prob(z_postflow).mean() - ldj.mean()
else:
x_mask = torch.ones_like(x_recon[:, :1, :])
z_post = prior_dist.sample(x_recon.shape).to(g.device) * hparams['noise_scale']
x_recon_, _ = self.post_flow(z_post, x_mask, g, reverse=True)
x_recon = x_recon_
ret['mel_out'] = x_recon.transpose(1, 2) | EXA-1-master | exa/models/AudioGPT/NeuralSeq/modules/GenerSpeech/model/generspeech.py |
from modules.commons.common_layers import *
import random
class MixStyle(nn.Module):
"""MixStyle.
Reference:
Zhou et al. Domain Generalization with MixStyle. ICLR 2021.
"""
def __init__(self, p=0.5, alpha=0.1, eps=1e-6, hidden_size=256):
"""
Args:
p (float): probability of using MixStyle.
alpha (float): parameter of the Beta distribution.
eps (float): scaling parameter to avoid numerical issues.
mix (str): how to mix.
"""
super().__init__()
self.p = p
self.beta = torch.distributions.Beta(alpha, alpha)
self.eps = eps
self.alpha = alpha
self._activated = True
self.hidden_size = hidden_size
self.affine_layer = LinearNorm(
hidden_size,
2 * hidden_size, # For both b (bias) g (gain)
)
def __repr__(self):
return f'MixStyle(p={self.p}, alpha={self.alpha}, eps={self.eps})'
def set_activation_status(self, status=True):
self._activated = status
def forward(self, x, spk_embed):
if not self.training or not self._activated:
return x
if random.random() > self.p:
return x
B = x.size(0)
mu, sig = torch.mean(x, dim=-1, keepdim=True), torch.std(x, dim=-1, keepdim=True)
x_normed = (x - mu) / (sig + 1e-6) # [B, T, H_m]
lmda = self.beta.sample((B, 1, 1))
lmda = lmda.to(x.device)
# Get Bias and Gain
mu1, sig1 = torch.split(self.affine_layer(spk_embed), self.hidden_size, dim=-1) # [B, 1, 2 * H_m] --> 2 * [B, 1, H_m]
# MixStyle
perm = torch.randperm(B)
mu2, sig2 = mu1[perm], sig1[perm]
mu_mix = mu1*lmda + mu2 * (1-lmda)
sig_mix = sig1*lmda + sig2 * (1-lmda)
# Perform Scailing and Shifting
return sig_mix * x_normed + mu_mix # [B, T, H_m]
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/modules/GenerSpeech/model/mixstyle.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
from dgl.nn.pytorch import GatedGraphConv
def sequence_mask(lengths, maxlen, dtype=torch.bool):
if maxlen is None:
maxlen = lengths.max()
mask = ~(torch.ones((len(lengths), maxlen)).to(lengths.device).cumsum(dim=1).t() > lengths).t()
mask.type(dtype)
return mask
def group_hidden_by_segs(h, seg_ids, max_len):
"""
:param h: [B, T, H]
:param seg_ids: [B, T]
:return: h_ph: [B, T_ph, H]
"""
B, T, H = h.shape
h_gby_segs = h.new_zeros([B, max_len + 1, H]).scatter_add_(1, seg_ids[:, :, None].repeat([1, 1, H]), h)
all_ones = h.new_ones(h.shape[:2])
cnt_gby_segs = h.new_zeros([B, max_len + 1]).scatter_add_(1, seg_ids, all_ones).contiguous()
h_gby_segs = h_gby_segs[:, 1:]
cnt_gby_segs = cnt_gby_segs[:, 1:]
h_gby_segs = h_gby_segs / torch.clamp(cnt_gby_segs[:, :, None], min=1)
# assert h_gby_segs.shape[-1] == 192
return h_gby_segs
class GraphAuxEnc(nn.Module):
def __init__(self, in_dim, hid_dim, out_dim, n_iterations=5, n_edge_types=6):
super(GraphAuxEnc, self).__init__()
self.in_dim = in_dim
self.hid_dim = hid_dim
self.out_dim = out_dim
self.skip_connect = True
self.dropout_after_gae = False
self.ggc_1 = GatedGraphConv(in_feats=in_dim, out_feats=hid_dim
, n_steps=n_iterations, n_etypes=n_edge_types)
self.ggc_2 = GatedGraphConv(in_feats=hid_dim, out_feats=out_dim
, n_steps=n_iterations, n_etypes=n_edge_types)
self.dropout = nn.Dropout(p=0.5)
@staticmethod
def ph_encoding_to_word_encoding(ph_encoding, ph2word, word_len):
"""
ph_encoding: [batch, t_p, hid]
ph2word: tensor [batch, t_w]
word_len: tensor [batch]
"""
word_encoding_for_graph, batch_word_encoding, has_word_row_idx = GraphAuxEnc._process_ph_to_word_encoding(
ph_encoding,
ph2word,
word_len)
# [batch, t_w, hid]
return batch_word_encoding, word_encoding_for_graph
def pad_word_encoding_to_phoneme(self, word_encoding, ph2word, t_p):
return self._postprocess_word2ph(word_encoding, ph2word, t_p)
@staticmethod
def _process_ph_to_word_encoding(ph_encoding, ph2word, word_len=None):
"""
ph_encoding: [batch, t_p, hid]
ph2word: tensor [batch, t_w]
word_len: tensor [batch]
"""
word_len = word_len.reshape([-1,])
max_len = max(word_len)
num_nodes = sum(word_len)
batch_word_encoding = group_hidden_by_segs(ph_encoding, ph2word, max_len)
bs, t_p, hid = batch_word_encoding.shape
has_word_mask = sequence_mask(word_len, max_len) # [batch, t_p, 1]
word_encoding = batch_word_encoding.reshape([bs * t_p, hid])
has_word_row_idx = has_word_mask.reshape([-1])
word_encoding = word_encoding[has_word_row_idx]
assert word_encoding.shape[0] == num_nodes
return word_encoding, batch_word_encoding, has_word_row_idx
@staticmethod
def _postprocess_word2ph(word_encoding, ph2word, t_p):
word_encoding = F.pad(word_encoding,[0,0,1,0])
ph2word_ = ph2word[:, :, None].repeat([1, 1, word_encoding.shape[-1]])
out = torch.gather(word_encoding, 1, ph2word_) # [B, T, H]
return out
@staticmethod
def _repeat_one_sequence(x, d, T):
"""Repeat each frame according to duration."""
if d.sum() == 0:
d = d.fill_(1)
hid = x.shape[-1]
expanded_lst = [x_.repeat(int(d_), 1) for x_, d_ in zip(x, d) if d_ != 0]
expanded = torch.cat(expanded_lst, dim=0)
if T > expanded.shape[0]:
expanded = torch.cat([expanded, torch.zeros([T - expanded.shape[0], hid]).to(expanded.device)], dim=0)
return expanded
def word_forward(self, graph_lst, word_encoding, etypes_lst):
"""
word encoding in, word encoding out.
"""
batched_graph = dgl.batch(graph_lst)
inp = word_encoding
batched_etypes = torch.cat(etypes_lst) # [num_edges_in_batch, 1]
assert batched_graph.num_nodes() == inp.shape[0]
gcc1_out = self.ggc_1(batched_graph, inp, batched_etypes)
if self.dropout_after_gae:
gcc1_out = self.dropout(gcc1_out)
gcc2_out = self.ggc_2(batched_graph, gcc1_out, batched_etypes) # [num_nodes_in_batch, hin]
if self.dropout_after_gae:
gcc2_out = self.ggc_2(batched_graph, gcc2_out, batched_etypes)
if self.skip_connect:
assert self.in_dim == self.hid_dim and self.hid_dim == self.out_dim
gcc2_out = inp + gcc1_out + gcc2_out
word_len = torch.tensor([g.num_nodes() for g in graph_lst]).reshape([-1])
max_len = max(word_len)
has_word_mask = sequence_mask(word_len, max_len) # [batch, t_p, 1]
has_word_row_idx = has_word_mask.reshape([-1])
bs = len(graph_lst)
t_w = max([g.num_nodes() for g in graph_lst])
hid = word_encoding.shape[-1]
output = torch.zeros([bs * t_w, hid]).to(gcc2_out.device)
output[has_word_row_idx] = gcc2_out
output = output.reshape([bs, t_w, hid])
word_level_output = output
return torch.transpose(word_level_output, 1, 2)
def forward(self, graph_lst, ph_encoding, ph2word, etypes_lst, return_word_encoding=False):
"""
graph_lst: [list of dgl_graph]
ph_encoding: [batch, hid, t_p]
ph2word: [list of list[1,2,2,2,3,3,3]]
etypes_lst: [list of etypes]; etypes: torch.LongTensor
"""
t_p = ph_encoding.shape[-1]
ph_encoding = ph_encoding.transpose(1,2) # [batch, t_p, hid]
word_len = torch.tensor([g.num_nodes() for g in graph_lst]).reshape([-1])
batched_graph = dgl.batch(graph_lst)
inp, batched_word_encoding, has_word_row_idx = self._process_ph_to_word_encoding(ph_encoding, ph2word,
word_len=word_len) # [num_nodes_in_batch, in_dim]
bs, t_w, hid = batched_word_encoding.shape
batched_etypes = torch.cat(etypes_lst) # [num_edges_in_batch, 1]
gcc1_out = self.ggc_1(batched_graph, inp, batched_etypes)
gcc2_out = self.ggc_2(batched_graph, gcc1_out, batched_etypes) # [num_nodes_in_batch, hin]
# skip connection
gcc2_out = inp + gcc1_out + gcc2_out # [n_nodes, hid]
output = torch.zeros([bs * t_w, hid]).to(gcc2_out.device)
output[has_word_row_idx] = gcc2_out
output = output.reshape([bs, t_w, hid])
word_level_output = output
output = self._postprocess_word2ph(word_level_output, ph2word, t_p) # [batch, t_p, hid]
output = torch.transpose(output, 1, 2)
if return_word_encoding:
return output, torch.transpose(word_level_output, 1, 2)
else:
return output
if __name__ == '__main__':
# Unit Test for batching graphs
from modules.syntaspeech.syntactic_graph_buider import Sentence2GraphParser, plot_dgl_sentence_graph
parser = Sentence2GraphParser("en")
# Unit Test for English Graph Builder
text1 = "To be or not to be , that 's a question ."
text2 = "I love you . You love me . Mixue ice-scream and tea ."
graph1, etypes1 = parser.parse(text1)
graph2, etypes2 = parser.parse(text2)
batched_text = "<BOS> " + text1 + " <EOS>" + " " + "<BOS> " + text2 + " <EOS>"
batched_nodes = [graph1.num_nodes(), graph2.num_nodes()]
plot_dgl_sentence_graph(dgl.batch([graph1, graph2]), {i: w for i, w in enumerate(batched_text.split(" "))})
etypes_lst = [etypes1, etypes2]
# Unit Test for Graph Encoder forward
in_feats = 4
out_feats = 4
enc = GraphAuxEnc(in_dim=in_feats, hid_dim=in_feats, out_dim=out_feats)
ph2word = torch.tensor([
[1, 2, 3, 3, 3, 4, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 0],
[1, 2, 3, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
])
inp = torch.randn([2, in_feats, 17]) # [N_sentence, feat, ph_length]
graph_lst = [graph1, graph2]
out = enc(graph_lst, inp, ph2word, etypes_lst)
print(out.shape) # [N_sentence, feat, ph_length]
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/modules/syntaspeech/syntactic_graph_encoder.py |
from copy import deepcopy
import torch
import dgl
import stanza
import networkx as nx
class Sentence2GraphParser:
def __init__(self, language='zh', use_gpu=False, download=False):
self.language = language
if download:
self.stanza_parser = stanza.Pipeline(lang=language, use_gpu=use_gpu)
else:
self.stanza_parser = stanza.Pipeline(lang=language, use_gpu=use_gpu, download_method=None)
def parse(self, clean_sentence=None, words=None, ph_words=None):
if self.language == 'zh':
assert words is not None and ph_words is not None
ret = self._parse_zh(words, ph_words)
elif self.language == 'en':
assert clean_sentence is not None
ret = self._parse_en(clean_sentence)
else:
raise NotImplementedError
return ret
def _parse_zh(self, words, ph_words, enable_backward_edge=True, enable_recur_edge=True,
enable_inter_sentence_edge=True, sequential_edge=False):
"""
words: <List of str>, each character in chinese is one item
ph_words: <List of str>, each character in chinese is one item, represented by the phoneme
Example:
text1 = '宝马配挂跛骡鞍,貂蝉怨枕董翁榻.'
words = ['<BOS>', '宝', '马', '配', '挂', '跛', '骡', '鞍', ','
, '貂', '蝉', '怨', '枕', '董', '翁', '榻', '<EOS>']
ph_words = ['<BOS>', 'b_ao3_|', 'm_a3_#', 'p_ei4_|', 'g_ua4_#',
'b_o3_#', 'l_uo2_|', 'an1', ',', 'd_iao1_|',
'ch_an2_#', 'van4_#', 'zh_en3_#', 'd_ong3_|', 'ueng1_#', 't_a4', '<EOS>']
"""
words, ph_words = words[1:-1], ph_words[1:-1] # delete <BOS> and <EOS>
for i, p_w in enumerate(ph_words):
if p_w == ',':
# change english ',' into chinese
# we found it necessary in stanza's dependency parsing
words[i], ph_words[i] = ',', ','
tmp_words = deepcopy(words)
num_added_space = 0
for i, p_w in enumerate(ph_words):
if p_w.endswith("#"):
# add a blank after the p_w with '#', to separate words
tmp_words.insert(num_added_space + i + 1, " ")
num_added_space += 1
if p_w in [',', ',']:
# add one blank before and after ', ', respectively
tmp_words.insert(num_added_space + i + 1, " ") # insert behind ',' first
tmp_words.insert(num_added_space + i, " ") # insert before
num_added_space += 2
clean_text = ''.join(tmp_words).strip()
parser_out = self.stanza_parser(clean_text)
idx_to_word = {i + 1: w for i, w in enumerate(words)}
vocab_nodes = {}
vocab_idx_offset = 0
for sentence in parser_out.sentences:
num_nodes_in_current_sentence = 0
for vocab_node in sentence.words:
num_nodes_in_current_sentence += 1
vocab_idx = vocab_node.id + vocab_idx_offset
vocab_text = vocab_node.text.replace(" ", "") # delete blank in vocab
vocab_nodes[vocab_idx] = vocab_text
vocab_idx_offset += num_nodes_in_current_sentence
# start vocab-to-word alignment
vocab_to_word = {}
current_word_idx = 1
for vocab_i in vocab_nodes.keys():
vocab_to_word[vocab_i] = []
for w_in_vocab_i in vocab_nodes[vocab_i]:
if w_in_vocab_i != idx_to_word[current_word_idx]:
raise ValueError("Word Mismatch!")
vocab_to_word[vocab_i].append(current_word_idx) # add a path (vocab_node_idx, word_global_idx)
current_word_idx += 1
# then we compute the vocab-level edges
if len(parser_out.sentences) > 5:
print("Detect more than 5 input sentence! pls check whether the sentence is too long!")
vocab_level_source_id, vocab_level_dest_id = [], []
vocab_level_edge_types = []
sentences_heads = []
vocab_id_offset = 0
# get forward edges
for s in parser_out.sentences:
for w in s.words:
w_idx = w.id + vocab_id_offset # it starts from 1, just same as binarizer
w_dest_idx = w.head + vocab_id_offset
if w.head == 0:
sentences_heads.append(w_idx)
continue
vocab_level_source_id.append(w_idx)
vocab_level_dest_id.append(w_dest_idx)
vocab_id_offset += len(s.words)
vocab_level_edge_types += [0] * len(vocab_level_source_id)
num_vocab = vocab_id_offset
# optional: get backward edges
if enable_backward_edge:
back_source, back_dest = deepcopy(vocab_level_dest_id), deepcopy(vocab_level_source_id)
vocab_level_source_id += back_source
vocab_level_dest_id += back_dest
vocab_level_edge_types += [1] * len(back_source)
# optional: get inter-sentence edges if num_sentences > 1
inter_sentence_source, inter_sentence_dest = [], []
if enable_inter_sentence_edge and len(sentences_heads) > 1:
def get_full_graph_edges(nodes):
tmp_edges = []
for i, node_i in enumerate(nodes):
for j, node_j in enumerate(nodes):
if i == j:
continue
tmp_edges.append((node_i, node_j))
return tmp_edges
tmp_edges = get_full_graph_edges(sentences_heads)
for (source, dest) in tmp_edges:
inter_sentence_source.append(source)
inter_sentence_dest.append(dest)
vocab_level_source_id += inter_sentence_source
vocab_level_dest_id += inter_sentence_dest
vocab_level_edge_types += [3] * len(inter_sentence_source)
if sequential_edge:
seq_source, seq_dest = list(range(1, num_vocab)) + list(range(num_vocab, 0, -1)), \
list(range(2, num_vocab + 1)) + list(range(num_vocab - 1, -1, -1))
vocab_level_source_id += seq_source
vocab_level_dest_id += seq_dest
vocab_level_edge_types += [4] * (num_vocab - 1) + [5] * (num_vocab - 1)
# Then, we use the vocab-level edges and the vocab-to-word path, to construct the word-level graph
num_word = len(words)
source_id, dest_id, edge_types = [], [], []
for (vocab_start, vocab_end, vocab_edge_type) in zip(vocab_level_source_id, vocab_level_dest_id,
vocab_level_edge_types):
# connect the first word in the vocab
word_start = min(vocab_to_word[vocab_start])
word_end = min(vocab_to_word[vocab_end])
source_id.append(word_start)
dest_id.append(word_end)
edge_types.append(vocab_edge_type)
# sequential connection in words
for word_indices_in_v in vocab_to_word.values():
for i, word_idx in enumerate(word_indices_in_v):
if i + 1 < len(word_indices_in_v):
source_id.append(word_idx)
dest_id.append(word_idx + 1)
edge_types.append(4)
if i - 1 >= 0:
source_id.append(word_idx)
dest_id.append(word_idx - 1)
edge_types.append(5)
# optional: get recurrent edges
if enable_recur_edge:
recur_source, recur_dest = list(range(1, num_word + 1)), list(range(1, num_word + 1))
source_id += recur_source
dest_id += recur_dest
edge_types += [2] * len(recur_source)
# add <BOS> and <EOS>
source_id += [0, num_word + 1, 1, num_word]
dest_id += [1, num_word, 0, num_word + 1]
edge_types += [4, 4, 5, 5] # 4 represents sequentially forward, 5 is sequential backward
edges = (torch.LongTensor(source_id), torch.LongTensor(dest_id))
dgl_graph = dgl.graph(edges)
assert dgl_graph.num_edges() == len(edge_types)
return dgl_graph, torch.LongTensor(edge_types)
def _parse_en(self, clean_sentence, enable_backward_edge=True, enable_recur_edge=True,
enable_inter_sentence_edge=True, sequential_edge=False, consider_bos_for_index=True):
"""
clean_sentence: <str>, each word or punctuation should be separated by one blank.
"""
edge_types = [] # required for gated graph neural network
clean_sentence = clean_sentence.strip()
if clean_sentence.endswith((" .", " ,", " ;", " :", " ?", " !")):
clean_sentence = clean_sentence[:-2]
if clean_sentence.startswith(". "):
clean_sentence = clean_sentence[2:]
parser_out = self.stanza_parser(clean_sentence)
if len(parser_out.sentences) > 5:
print("Detect more than 5 input sentence! pls check whether the sentence is too long!")
print(clean_sentence)
source_id, dest_id = [], []
sentences_heads = []
word_id_offset = 0
# get forward edges
for s in parser_out.sentences:
for w in s.words:
w_idx = w.id + word_id_offset # it starts from 1, just same as binarizer
w_dest_idx = w.head + word_id_offset
if w.head == 0:
sentences_heads.append(w_idx)
continue
source_id.append(w_idx)
dest_id.append(w_dest_idx)
word_id_offset += len(s.words)
num_word = word_id_offset
edge_types += [0] * len(source_id)
# optional: get backward edges
if enable_backward_edge:
back_source, back_dest = deepcopy(dest_id), deepcopy(source_id)
source_id += back_source
dest_id += back_dest
edge_types += [1] * len(back_source)
# optional: get recurrent edges
if enable_recur_edge:
recur_source, recur_dest = list(range(1, num_word + 1)), list(range(1, num_word + 1))
source_id += recur_source
dest_id += recur_dest
edge_types += [2] * len(recur_source)
# optional: get inter-sentence edges if num_sentences > 1
inter_sentence_source, inter_sentence_dest = [], []
if enable_inter_sentence_edge and len(sentences_heads) > 1:
def get_full_graph_edges(nodes):
tmp_edges = []
for i, node_i in enumerate(nodes):
for j, node_j in enumerate(nodes):
if i == j:
continue
tmp_edges.append((node_i, node_j))
return tmp_edges
tmp_edges = get_full_graph_edges(sentences_heads)
for (source, dest) in tmp_edges:
inter_sentence_source.append(source)
inter_sentence_dest.append(dest)
source_id += inter_sentence_source
dest_id += inter_sentence_dest
edge_types += [3] * len(inter_sentence_source)
# add <BOS> and <EOS>
source_id += [0, num_word + 1, 1, num_word]
dest_id += [1, num_word, 0, num_word + 1]
edge_types += [4, 4, 5, 5] # 4 represents sequentially forward, 5 is sequential backward
# optional: sequential edge
if sequential_edge:
seq_source, seq_dest = list(range(1, num_word)) + list(range(num_word, 0, -1)), \
list(range(2, num_word + 1)) + list(range(num_word - 1, -1, -1))
source_id += seq_source
dest_id += seq_dest
edge_types += [4] * (num_word - 1) + [5] * (num_word - 1)
if consider_bos_for_index:
edges = (torch.LongTensor(source_id), torch.LongTensor(dest_id))
else:
edges = (torch.LongTensor(source_id) - 1, torch.LongTensor(dest_id) - 1)
dgl_graph = dgl.graph(edges)
assert dgl_graph.num_edges() == len(edge_types)
return dgl_graph, torch.LongTensor(edge_types)
def plot_dgl_sentence_graph(dgl_graph, labels):
"""
labels = {idx: word for idx,word in enumerate(sentence.split(" ")) }
"""
import matplotlib.pyplot as plt
nx_graph = dgl_graph.to_networkx()
pos = nx.random_layout(nx_graph)
nx.draw(nx_graph, pos, with_labels=False)
nx.draw_networkx_labels(nx_graph, pos, labels)
plt.show()
if __name__ == '__main__':
# Unit Test for Chinese Graph Builder
parser = Sentence2GraphParser("zh")
text1 = '宝马配挂跛骡鞍,貂蝉怨枕董翁榻.'
words = ['<BOS>', '宝', '马', '配', '挂', '跛', '骡', '鞍', ',', '貂', '蝉', '怨', '枕', '董', '翁', '榻', '<EOS>']
ph_words = ['<BOS>', 'b_ao3_|', 'm_a3_#', 'p_ei4_|', 'g_ua4_#', 'b_o3_#', 'l_uo2_|', 'an1', ',', 'd_iao1_|',
'ch_an2_#', 'van4_#', 'zh_en3_#', 'd_ong3_|', 'ueng1_#', 't_a4', '<EOS>']
graph1, etypes1 = parser.parse(text1, words, ph_words)
plot_dgl_sentence_graph(graph1, {i: w for i, w in enumerate(ph_words)})
# Unit Test for English Graph Builder
parser = Sentence2GraphParser("en")
text2 = "I love you . You love me . Mixue ice-scream and tea ."
graph2, etypes2 = parser.parse(text2)
plot_dgl_sentence_graph(graph2, {i: w for i, w in enumerate(("<BOS> " + text2 + " <EOS>").split(" "))})
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/modules/syntaspeech/syntactic_graph_buider.py |
import numpy as np
import torch
import torch.nn as nn
class SingleWindowDisc(nn.Module):
def __init__(self, time_length, freq_length=80, kernel=(3, 3), c_in=1, hidden_size=128):
super().__init__()
padding = (kernel[0] // 2, kernel[1] // 2)
self.model = nn.ModuleList([
nn.Sequential(*[
nn.Conv2d(c_in, hidden_size, kernel, (2, 2), padding),
nn.LeakyReLU(0.2, inplace=True),
nn.Dropout2d(0.25),
nn.BatchNorm2d(hidden_size, 0.8)
]),
nn.Sequential(*[
nn.Conv2d(hidden_size, hidden_size, kernel, (2, 2), padding),
nn.LeakyReLU(0.2, inplace=True),
nn.Dropout2d(0.25),
nn.BatchNorm2d(hidden_size, 0.8)
]),
nn.Sequential(*[
nn.Conv2d(hidden_size, hidden_size, kernel, (2, 2), padding),
nn.LeakyReLU(0.2, inplace=True),
nn.Dropout2d(0.25),
]),
])
ds_size = (time_length // 2 ** 3, (freq_length + 7) // 2 ** 3)
self.adv_layer = nn.Linear(hidden_size * ds_size[0] * ds_size[1], 1)
def forward(self, x):
"""
:param x: [B, C, T, n_bins]
:return: validity: [B, 1], h: List of hiddens
"""
h = []
for l in self.model:
x = l(x)
h.append(x)
x = x.view(x.shape[0], -1)
validity = self.adv_layer(x) # [B, 1]
return validity, h
class MultiWindowDiscriminator(nn.Module):
def __init__(self, time_lengths, freq_length=80, kernel=(3, 3), c_in=1, hidden_size=128):
super(MultiWindowDiscriminator, self).__init__()
self.win_lengths = time_lengths
self.discriminators = nn.ModuleList()
for time_length in time_lengths:
self.discriminators += [SingleWindowDisc(time_length, freq_length, kernel, c_in=c_in, hidden_size=hidden_size)]
def forward(self, x, x_len, start_frames_wins=None):
'''
Args:
x (tensor): input mel, (B, c_in, T, n_bins).
x_length (tensor): len of per mel. (B,).
Returns:
tensor : (B).
'''
validity = []
if start_frames_wins is None:
start_frames_wins = [None] * len(self.discriminators)
h = []
for i, start_frames in zip(range(len(self.discriminators)), start_frames_wins):
x_clip, start_frames = self.clip(x, x_len, self.win_lengths[i], start_frames) # (B, win_length, C)
start_frames_wins[i] = start_frames
if x_clip is None:
continue
x_clip, h_ = self.discriminators[i](x_clip)
h += h_
validity.append(x_clip)
if len(validity) != len(self.discriminators):
return None, start_frames_wins, h
validity = sum(validity) # [B]
return validity, start_frames_wins, h
def clip(self, x, x_len, win_length, start_frames=None):
'''Ramdom clip x to win_length.
Args:
x (tensor) : (B, c_in, T, n_bins).
cond (tensor) : (B, T, H).
x_len (tensor) : (B,).
win_length (int): target clip length
Returns:
(tensor) : (B, c_in, win_length, n_bins).
'''
T_start = 0
T_end = x_len.max() - win_length
if T_end < 0:
return None, None, start_frames
T_end = T_end.item()
if start_frames is None:
start_frame = np.random.randint(low=T_start, high=T_end + 1)
start_frames = [start_frame] * x.size(0)
else:
start_frame = start_frames[0]
x_batch = x[:, :, start_frame: start_frame + win_length]
return x_batch, start_frames
class Discriminator(nn.Module):
def __init__(self, time_lengths=[32, 64, 128], freq_length=80, kernel=(3, 3), c_in=1,
hidden_size=128):
super(Discriminator, self).__init__()
self.time_lengths = time_lengths
self.discriminator = MultiWindowDiscriminator(
freq_length=freq_length,
time_lengths=time_lengths,
kernel=kernel,
c_in=c_in, hidden_size=hidden_size
)
def forward(self, x, start_frames_wins=None):
"""
:param x: [B, T, 80]
:param return_y_only:
:return:
"""
if len(x.shape) == 3:
x = x[:, None, :, :] # [B,1,T,80]
x_len = x.sum([1, -1]).ne(0).int().sum([-1])
ret = {'y_c': None, 'y': None}
ret['y'], start_frames_wins, ret['h'] = self.discriminator(
x, x_len, start_frames_wins=start_frames_wins)
ret['start_frames_wins'] = start_frames_wins
return ret
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/modules/syntaspeech/multi_window_disc.py |
import math
import torch
from torch import nn
from torch.nn import Linear
from utils.hparams import hparams
from modules.commons.conv import ConvBlocks, ConditionalConvBlocks
from modules.commons.common_layers import Embedding
from modules.commons.rel_transformer import RelTransformerEncoder
from modules.commons.transformer import MultiheadAttention, FFTBlocks
from modules.commons.align_ops import clip_mel2token_to_multiple, build_word_mask, expand_states, mel2ph_to_mel2word
from modules.tts.fastspeech import FS_DECODERS, FastSpeech
from modules.portaspeech.fvae import SyntaFVAE, FVAE
from utils.nn.seq_utils import group_hidden_by_segs
from modules.fastspeech.tts_modules import SyntaDurationPredictor
class SinusoidalPosEmb(nn.Module):
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, x):
"""
:param x: [B, T]
:return: [B, T, H]
"""
device = x.device
half_dim = self.dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, device=device) * -emb)
emb = x[:, :, None] * emb[None, :]
emb = torch.cat((emb.sin(), emb.cos()), dim=-1)
return emb
class SyntaSpeech(FastSpeech):
def __init__(self, ph_dict_size, word_dict_size, out_dims=None):
super().__init__(ph_dict_size, out_dims)
# build linguistic encoder
if hparams['num_spk'] > 1:
self.spk_embed_proj = Embedding(hparams['num_spk'], self.hidden_size)
if hparams['use_word_encoder']:
self.word_encoder = RelTransformerEncoder(
word_dict_size, self.hidden_size, self.hidden_size, self.hidden_size, 2,
hparams['word_enc_layers'], hparams['enc_ffn_kernel_size'])
if hparams['dur_level'] == 'word':
if hparams['word_encoder_type'] == 'rel_fft':
self.ph2word_encoder = RelTransformerEncoder(
0, self.hidden_size, self.hidden_size, self.hidden_size, 2,
hparams['word_enc_layers'], hparams['enc_ffn_kernel_size'])
if hparams['word_encoder_type'] == 'fft':
self.ph2word_encoder = FFTBlocks(
self.hidden_size, hparams['word_enc_layers'], 1, num_heads=hparams['num_heads'])
self.sin_pos = SinusoidalPosEmb(self.hidden_size)
self.enc_pos_proj = nn.Linear(2 * self.hidden_size, self.hidden_size)
self.dec_query_proj = nn.Linear(2 * self.hidden_size, self.hidden_size)
self.dec_res_proj = nn.Linear(2 * self.hidden_size, self.hidden_size)
self.attn = MultiheadAttention(self.hidden_size, 1, encoder_decoder_attention=True, bias=False)
self.attn.enable_torch_version = False
if hparams['text_encoder_postnet']:
self.text_encoder_postnet = ConvBlocks(
self.hidden_size, self.hidden_size, [1] * 3, 5, layers_in_block=2)
else:
self.sin_pos = SinusoidalPosEmb(self.hidden_size)
predictor_hidden = hparams['predictor_hidden'] if hparams['predictor_hidden'] > 0 else self.hidden_size
self.dur_predictor = SyntaDurationPredictor(
self.hidden_size,
n_chans=predictor_hidden,
n_layers=hparams['dur_predictor_layers'],
dropout_rate=hparams['predictor_dropout'],
kernel_size=hparams['dur_predictor_kernel'])
# build VAE decoder
if hparams['use_fvae']:
del self.decoder
del self.mel_out
if hparams.get("use_gae_in_prior", True):
self.fvae = SyntaFVAE(
c_in_out=self.out_dims,
hidden_size=hparams['fvae_enc_dec_hidden'], c_latent=hparams['latent_size'],
kernel_size=hparams['fvae_kernel_size'],
enc_n_layers=hparams['fvae_enc_n_layers'],
dec_n_layers=hparams['fvae_dec_n_layers'],
c_cond=self.hidden_size,
use_prior_flow=hparams['use_prior_flow'],
flow_hidden=hparams['prior_flow_hidden'],
flow_kernel_size=hparams['prior_flow_kernel_size'],
flow_n_steps=hparams['prior_flow_n_blocks'],
strides=[hparams['fvae_strides']],
encoder_type=hparams['fvae_encoder_type'],
decoder_type=hparams['fvae_decoder_type'],
)
else:
self.fvae = FVAE(
c_in_out=self.out_dims,
hidden_size=hparams['fvae_enc_dec_hidden'], c_latent=hparams['latent_size'],
kernel_size=hparams['fvae_kernel_size'],
enc_n_layers=hparams['fvae_enc_n_layers'],
dec_n_layers=hparams['fvae_dec_n_layers'],
c_cond=self.hidden_size,
use_prior_flow=hparams['use_prior_flow'],
flow_hidden=hparams['prior_flow_hidden'],
flow_kernel_size=hparams['prior_flow_kernel_size'],
flow_n_steps=hparams['prior_flow_n_blocks'],
strides=[hparams['fvae_strides']],
encoder_type=hparams['fvae_encoder_type'],
decoder_type=hparams['fvae_decoder_type'],
)
else:
self.decoder = FS_DECODERS[hparams['decoder_type']](hparams)
self.mel_out = Linear(self.hidden_size, self.out_dims, bias=True)
if hparams['use_pitch_embed']:
self.pitch_embed = Embedding(300, self.hidden_size, 0)
if hparams['add_word_pos']:
self.word_pos_proj = Linear(self.hidden_size, self.hidden_size)
def build_embedding(self, dictionary, embed_dim):
num_embeddings = len(dictionary)
emb = Embedding(num_embeddings, embed_dim, self.padding_idx)
return emb
def forward(self, txt_tokens, word_tokens, ph2word, word_len, mel2word=None, mel2ph=None,
spk_embed=None, spk_id=None, pitch=None, infer=False, tgt_mels=None,
global_step=None, graph_lst=None, etypes_lst=None, *args, **kwargs):
if hparams['use_spk_embed']:
spk_embed = spk_embed
elif hparams['use_spk_id']:
spk_embed = self.spk_embed_proj(spk_id)[:, None, :]
else:
spk_embed = 0
ret = {}
style_embed = self.forward_style_embed(spk_embed, spk_id) # speaker embedding, [B, 1, C]
x, tgt_nonpadding = self.run_text_encoder(
txt_tokens, word_tokens, ph2word, word_len, mel2word, mel2ph, style_embed, ret, graph_lst=graph_lst, etypes_lst=etypes_lst, **kwargs)
x = x + style_embed # it maybe necessary to achieve multi-speaker
x = x * tgt_nonpadding
ret['nonpadding'] = tgt_nonpadding
if hparams['use_pitch_embed']:
x = x + self.pitch_embed(pitch)
ret['decoder_inp'] = x
if infer and (mel2ph is None or mel2word is None):
mel2word = ret['mel2word']
ret['mel_out_fvae'] = ret['mel_out'] = self.run_decoder(x, tgt_nonpadding, ret, infer, tgt_mels, global_step,
mel2word=mel2word, ph2word=ph2word, graph_lst=graph_lst, etypes_lst=etypes_lst)
return ret
def run_text_encoder(self, txt_tokens, word_tokens, ph2word, word_len, mel2word, mel2ph, style_embed, ret, graph_lst, etypes_lst, **kwargs):
word2word = torch.arange(word_len)[None, :].to(ph2word.device) + 1 # [B, T_mel, T_word]
src_nonpadding = (txt_tokens > 0).float()[:, :, None]
use_bert = hparams.get("use_bert") is True
if use_bert:
ph_encoder_out = self.encoder(txt_tokens, bert_feats=kwargs['bert_feats'], ph2word=ph2word,
graph_lst=graph_lst, etypes_lst=etypes_lst,
cl_feats=kwargs['cl_feats'], ret=ret) * src_nonpadding + style_embed
else:
ph_encoder_out = self.encoder(txt_tokens) * src_nonpadding + style_embed
if hparams['use_word_encoder']:
word_encoder_out = self.word_encoder(word_tokens) + style_embed
ph_encoder_out = ph_encoder_out + expand_states(word_encoder_out, ph2word)
dur_input = ph_encoder_out * src_nonpadding
if hparams['dur_level'] == 'word':
word_encoder_out = 0
h_ph_gb_word = group_hidden_by_segs(ph_encoder_out, ph2word, word_len)[0]
word_encoder_out = word_encoder_out + self.ph2word_encoder(h_ph_gb_word)
if hparams['use_word_encoder']:
word_encoder_out = word_encoder_out + self.word_encoder(word_tokens)
mel2word = self.forward_dur(dur_input, mel2word, ret, ph2word=ph2word, word_len=word_len, graph_lst=graph_lst, etypes_lst=etypes_lst)
mel2word = clip_mel2token_to_multiple(mel2word, hparams['frames_multiple'])
ret['mel2word'] = mel2word
tgt_nonpadding = (mel2word > 0).float()[:, :, None]
enc_pos = self.get_pos_embed(word2word, ph2word) # [B, T_ph, H]
dec_pos = self.get_pos_embed(word2word, mel2word) # [B, T_mel, H]
dec_word_mask = build_word_mask(mel2word, ph2word) # [B, T_mel, T_ph]
x, weight = self.attention(ph_encoder_out, enc_pos, word_encoder_out, dec_pos, mel2word, dec_word_mask)
if hparams['add_word_pos']:
x = x + self.word_pos_proj(dec_pos)
ret['attn'] = weight
else:
mel2ph = self.forward_dur(dur_input, mel2ph, ret)
mel2ph = clip_mel2token_to_multiple(mel2ph, hparams['frames_multiple'])
mel2word = mel2ph_to_mel2word(mel2ph, ph2word)
x = expand_states(ph_encoder_out, mel2ph)
if hparams['add_word_pos']:
dec_pos = self.get_pos_embed(word2word, mel2word) # [B, T_mel, H]
x = x + self.word_pos_proj(dec_pos)
tgt_nonpadding = (mel2ph > 0).float()[:, :, None]
if hparams['use_word_encoder']:
x = x + expand_states(word_encoder_out, mel2word)
return x, tgt_nonpadding
def attention(self, ph_encoder_out, enc_pos, word_encoder_out, dec_pos, mel2word, dec_word_mask):
ph_kv = self.enc_pos_proj(torch.cat([ph_encoder_out, enc_pos], -1))
word_enc_out_expend = expand_states(word_encoder_out, mel2word)
word_enc_out_expend = torch.cat([word_enc_out_expend, dec_pos], -1)
if hparams['text_encoder_postnet']:
word_enc_out_expend = self.dec_res_proj(word_enc_out_expend)
word_enc_out_expend = self.text_encoder_postnet(word_enc_out_expend)
dec_q = x_res = word_enc_out_expend
else:
dec_q = self.dec_query_proj(word_enc_out_expend)
x_res = self.dec_res_proj(word_enc_out_expend)
ph_kv, dec_q = ph_kv.transpose(0, 1), dec_q.transpose(0, 1)
x, (weight, _) = self.attn(dec_q, ph_kv, ph_kv, attn_mask=(1 - dec_word_mask) * -1e9)
x = x.transpose(0, 1)
x = x + x_res
return x, weight
def run_decoder(self, x, tgt_nonpadding, ret, infer, tgt_mels=None, global_step=0,
mel2word=None, ph2word=None, graph_lst=None, etypes_lst=None):
if not hparams['use_fvae']:
x = self.decoder(x)
x = self.mel_out(x)
ret['kl'] = 0
return x * tgt_nonpadding
else:
# x is the phoneme encoding
x = x.transpose(1, 2) # [B, H, T]
tgt_nonpadding_BHT = tgt_nonpadding.transpose(1, 2) # [B, H, T]
if infer:
z = self.fvae(cond=x, infer=True, mel2word=mel2word, ph2word=ph2word, graph_lst=graph_lst, etypes_lst=etypes_lst)
else:
tgt_mels = tgt_mels.transpose(1, 2) # [B, 80, T]
z, ret['kl'], ret['z_p'], ret['m_q'], ret['logs_q'] = self.fvae(
tgt_mels, tgt_nonpadding_BHT, cond=x, mel2word=mel2word, ph2word=ph2word, graph_lst=graph_lst, etypes_lst=etypes_lst)
if global_step < hparams['posterior_start_steps']:
z = torch.randn_like(z)
x_recon = self.fvae.decoder(z, nonpadding=tgt_nonpadding_BHT, cond=x).transpose(1, 2)
ret['pre_mel_out'] = x_recon
return x_recon
def forward_dur(self, dur_input, mel2word, ret, **kwargs):
"""
:param dur_input: [B, T_txt, H]
:param mel2ph: [B, T_mel]
:param txt_tokens: [B, T_txt]
:param ret:
:return:
"""
word_len = kwargs['word_len']
ph2word = kwargs['ph2word']
graph_lst = kwargs['graph_lst']
etypes_lst = kwargs['etypes_lst']
src_padding = dur_input.data.abs().sum(-1) == 0
dur_input = dur_input.detach() + hparams['predictor_grad'] * (dur_input - dur_input.detach())
dur = self.dur_predictor(dur_input, src_padding, ph2word, graph_lst, etypes_lst)
B, T_ph = ph2word.shape
dur = torch.zeros([B, word_len.max() + 1]).to(ph2word.device).scatter_add(1, ph2word, dur)
dur = dur[:, 1:]
ret['dur'] = dur
if mel2word is None:
mel2word = self.length_regulator(dur).detach()
return mel2word
def get_pos_embed(self, word2word, x2word):
x_pos = build_word_mask(word2word, x2word).float() # [B, T_word, T_ph]
x_pos = (x_pos.cumsum(-1) / x_pos.sum(-1).clamp(min=1)[..., None] * x_pos).sum(1)
x_pos = self.sin_pos(x_pos.float()) # [B, T_ph, H]
return x_pos
def store_inverse_all(self):
def remove_weight_norm(m):
try:
if hasattr(m, 'store_inverse'):
m.store_inverse()
nn.utils.remove_weight_norm(m)
except ValueError: # this module didn't have weight norm
return
self.apply(remove_weight_norm)
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/modules/syntaspeech/syntaspeech.py |
from utils.hparams import hparams
from modules.commons.common_layers import *
from modules.commons.common_layers import Embedding
from modules.fastspeech.tts_modules import FastspeechDecoder, DurationPredictor, LengthRegulator, PitchPredictor, \
EnergyPredictor, FastspeechEncoder
from utils.cwt import cwt2f0
from utils.pitch_utils import f0_to_coarse, denorm_f0, norm_f0
import torch.nn as nn
from modules.commons.rel_transformer import RelTransformerEncoder, BERTRelTransformerEncoder
FS_ENCODERS = {
'fft': lambda hp, embed_tokens, d: FastspeechEncoder(
embed_tokens, hp['hidden_size'], hp['enc_layers'], hp['enc_ffn_kernel_size'],
num_heads=hp['num_heads']),
}
FS_DECODERS = {
'fft': lambda hp: FastspeechDecoder(
hp['hidden_size'], hp['dec_layers'], hp['dec_ffn_kernel_size'], hp['num_heads']),
}
class FastSpeech2(nn.Module):
def __init__(self, dictionary, out_dims=None):
super().__init__()
self.dictionary = dictionary
self.padding_idx = dictionary.pad()
self.enc_layers = hparams['enc_layers']
self.dec_layers = hparams['dec_layers']
self.hidden_size = hparams['hidden_size']
self.encoder_embed_tokens = self.build_embedding(self.dictionary, self.hidden_size)
if hparams.get("use_bert", False):
self.ph_encoder = BERTRelTransformerEncoder(len(self.dictionary), hparams['hidden_size'], hparams['hidden_size'],
hparams['ffn_hidden_size'], hparams['num_heads'], hparams['enc_layers'],
hparams['enc_ffn_kernel_size'], hparams['dropout'], prenet=hparams['enc_prenet'], pre_ln=hparams['enc_pre_ln'])
else:
self.encoder = FS_ENCODERS[hparams['encoder_type']](hparams, self.encoder_embed_tokens, self.dictionary)
self.decoder = FS_DECODERS[hparams['decoder_type']](hparams)
self.out_dims = hparams['audio_num_mel_bins'] if out_dims is None else out_dims
self.mel_out = Linear(self.hidden_size, self.out_dims, bias=True)
if hparams['use_spk_id']:
self.spk_embed_proj = Embedding(hparams['num_spk'] + 1, self.hidden_size)
if hparams['use_split_spk_id']:
self.spk_embed_f0 = Embedding(hparams['num_spk'] + 1, self.hidden_size)
self.spk_embed_dur = Embedding(hparams['num_spk'] + 1, self.hidden_size)
elif hparams['use_spk_embed']:
self.spk_embed_proj = Linear(256, self.hidden_size, bias=True)
predictor_hidden = hparams['predictor_hidden'] if hparams['predictor_hidden'] > 0 else self.hidden_size
self.dur_predictor = DurationPredictor(
self.hidden_size,
n_chans=predictor_hidden,
n_layers=hparams['dur_predictor_layers'],
dropout_rate=hparams['predictor_dropout'],
kernel_size=hparams['dur_predictor_kernel'])
self.length_regulator = LengthRegulator()
if hparams['use_pitch_embed']:
self.pitch_embed = Embedding(300, self.hidden_size, self.padding_idx)
self.pitch_predictor = PitchPredictor(
self.hidden_size,
n_chans=predictor_hidden,
n_layers=hparams['predictor_layers'],
dropout_rate=hparams['predictor_dropout'],
odim=2 if hparams['pitch_type'] == 'frame' else 1,
kernel_size=hparams['predictor_kernel'])
if hparams.get('use_energy_embed', False):
self.energy_embed = Embedding(256, self.hidden_size, self.padding_idx)
self.energy_predictor = EnergyPredictor(
self.hidden_size,
n_chans=predictor_hidden,
n_layers=hparams['predictor_layers'],
dropout_rate=hparams['predictor_dropout'], odim=1,
kernel_size=hparams['predictor_kernel'])
def build_embedding(self, dictionary, embed_dim):
num_embeddings = len(dictionary)
emb = Embedding(num_embeddings, embed_dim, self.padding_idx)
return emb
def forward(self, txt_tokens, mel2ph=None, spk_embed=None,
ref_mels=None, f0=None, uv=None, energy=None, skip_decoder=False,
spk_embed_dur_id=None, spk_embed_f0_id=None, infer=False, **kwargs):
ret = {}
if hparams.get("use_bert", False):
encoder_out = self.encoder(txt_tokens, bert_feats=kwargs['bert_feats'], ph2word=kwargs['ph2word'], ret=ret)
else:
encoder_out = self.encoder(txt_tokens) # [B, T, C]
src_nonpadding = (txt_tokens > 0).float()[:, :, None]
# add ref style embed
# Not implemented
# variance encoder
var_embed = 0
# encoder_out_dur denotes encoder outputs for duration predictor
# in speech adaptation, duration predictor use old speaker embedding
if hparams['use_spk_embed']:
spk_embed_dur = spk_embed_f0 = spk_embed = self.spk_embed_proj(spk_embed)[:, None, :]
elif hparams['use_spk_id']:
spk_embed_id = spk_embed
if spk_embed_dur_id is None:
spk_embed_dur_id = spk_embed_id
if spk_embed_f0_id is None:
spk_embed_f0_id = spk_embed_id
spk_embed = self.spk_embed_proj(spk_embed_id)[:, None, :]
spk_embed_dur = spk_embed_f0 = spk_embed
if hparams['use_split_spk_id']:
spk_embed_dur = self.spk_embed_dur(spk_embed_dur_id)[:, None, :]
spk_embed_f0 = self.spk_embed_f0(spk_embed_f0_id)[:, None, :]
else:
spk_embed_dur = spk_embed_f0 = spk_embed = 0
# add dur
dur_inp = (encoder_out + var_embed + spk_embed_dur) * src_nonpadding
mel2ph = self.add_dur(dur_inp, mel2ph, txt_tokens, ret)
decoder_inp = F.pad(encoder_out, [0, 0, 1, 0])
mel2ph_ = mel2ph[..., None].repeat([1, 1, encoder_out.shape[-1]])
decoder_inp_origin = decoder_inp = torch.gather(decoder_inp, 1, mel2ph_) # [B, T, H]
tgt_nonpadding = (mel2ph > 0).float()[:, :, None]
# add pitch and energy embed
pitch_inp = (decoder_inp_origin + var_embed + spk_embed_f0) * tgt_nonpadding
if hparams['use_pitch_embed']:
pitch_inp_ph = (encoder_out + var_embed + spk_embed_f0) * src_nonpadding
decoder_inp = decoder_inp + self.add_pitch(pitch_inp, f0, uv, mel2ph, ret, encoder_out=pitch_inp_ph)
if hparams.get('use_energy_embed', False):
decoder_inp = decoder_inp + self.add_energy(pitch_inp, energy, ret)
ret['decoder_inp'] = decoder_inp = (decoder_inp + spk_embed) * tgt_nonpadding
if skip_decoder:
return ret
ret['mel_out'] = self.run_decoder(decoder_inp, tgt_nonpadding, ret, infer=infer, **kwargs)
return ret
def add_dur(self, dur_input, mel2ph, txt_tokens, ret):
"""
:param dur_input: [B, T_txt, H]
:param mel2ph: [B, T_mel]
:param txt_tokens: [B, T_txt]
:param ret:
:return:
"""
src_padding = txt_tokens == 0
dur_input = dur_input.detach() + hparams['predictor_grad'] * (dur_input - dur_input.detach())
if mel2ph is None:
dur, xs = self.dur_predictor.inference(dur_input, src_padding)
ret['dur'] = xs
ret['dur_choice'] = dur
mel2ph = self.length_regulator(dur, src_padding).detach()
# from modules.fastspeech.fake_modules import FakeLengthRegulator
# fake_lr = FakeLengthRegulator()
# fake_mel2ph = fake_lr(dur, (1 - src_padding.long()).sum(-1))[..., 0].detach()
# print(mel2ph == fake_mel2ph)
else:
ret['dur'] = self.dur_predictor(dur_input, src_padding)
ret['mel2ph'] = mel2ph
return mel2ph
def add_energy(self, decoder_inp, energy, ret):
decoder_inp = decoder_inp.detach() + hparams['predictor_grad'] * (decoder_inp - decoder_inp.detach())
ret['energy_pred'] = energy_pred = self.energy_predictor(decoder_inp)[:, :, 0]
if energy is None:
energy = energy_pred
energy = torch.clamp(energy * 256 // 4, max=255).long()
energy_embed = self.energy_embed(energy)
return energy_embed
def add_pitch(self, decoder_inp, f0, uv, mel2ph, ret, encoder_out=None):
if hparams['pitch_type'] == 'ph':
pitch_pred_inp = encoder_out.detach() + hparams['predictor_grad'] * (encoder_out - encoder_out.detach())
pitch_padding = encoder_out.sum().abs() == 0
ret['pitch_pred'] = pitch_pred = self.pitch_predictor(pitch_pred_inp)
if f0 is None:
f0 = pitch_pred[:, :, 0]
ret['f0_denorm'] = f0_denorm = denorm_f0(f0, None, hparams, pitch_padding=pitch_padding)
pitch = f0_to_coarse(f0_denorm) # start from 0 [B, T_txt]
pitch = F.pad(pitch, [1, 0])
pitch = torch.gather(pitch, 1, mel2ph) # [B, T_mel]
pitch_embed = self.pitch_embed(pitch)
return pitch_embed
decoder_inp = decoder_inp.detach() + hparams['predictor_grad'] * (decoder_inp - decoder_inp.detach())
pitch_padding = mel2ph == 0
if hparams['pitch_type'] == 'cwt':
pitch_padding = None
ret['cwt'] = cwt_out = self.cwt_predictor(decoder_inp)
stats_out = self.cwt_stats_layers(encoder_out[:, 0, :]) # [B, 2]
mean = ret['f0_mean'] = stats_out[:, 0]
std = ret['f0_std'] = stats_out[:, 1]
cwt_spec = cwt_out[:, :, :10]
if f0 is None:
std = std * hparams['cwt_std_scale']
f0 = self.cwt2f0_norm(cwt_spec, mean, std, mel2ph)
if hparams['use_uv']:
assert cwt_out.shape[-1] == 11
uv = cwt_out[:, :, -1] > 0
elif hparams['pitch_ar']:
ret['pitch_pred'] = pitch_pred = self.pitch_predictor(decoder_inp, f0 if self.training else None)
if f0 is None:
f0 = pitch_pred[:, :, 0]
else:
ret['pitch_pred'] = pitch_pred = self.pitch_predictor(decoder_inp)
if f0 is None:
f0 = pitch_pred[:, :, 0]
if hparams['use_uv'] and uv is None:
uv = pitch_pred[:, :, 1] > 0
ret['f0_denorm'] = f0_denorm = denorm_f0(f0, uv, hparams, pitch_padding=pitch_padding)
if pitch_padding is not None:
f0[pitch_padding] = 0
pitch = f0_to_coarse(f0_denorm) # start from 0
pitch_embed = self.pitch_embed(pitch)
return pitch_embed
def run_decoder(self, decoder_inp, tgt_nonpadding, ret, infer, **kwargs):
x = decoder_inp # [B, T, H]
x = self.decoder(x)
x = self.mel_out(x)
return x * tgt_nonpadding
def cwt2f0_norm(self, cwt_spec, mean, std, mel2ph):
f0 = cwt2f0(cwt_spec, mean, std, hparams['cwt_scales'])
f0 = torch.cat(
[f0] + [f0[:, -1:]] * (mel2ph.shape[1] - f0.shape[1]), 1)
f0_norm = norm_f0(f0, None, hparams)
return f0_norm
def out2mel(self, out):
return out
@staticmethod
def mel_norm(x):
return (x + 5.5) / (6.3 / 2) - 1
@staticmethod
def mel_denorm(x):
return (x + 1) * (6.3 / 2) - 5.5
def expand_states(self, h, mel2ph):
h = F.pad(h, [0, 0, 1, 0])
mel2ph_ = mel2ph[..., None].repeat([1, 1, h.shape[-1]])
h = torch.gather(h, 1, mel2ph_) # [B, T, H]
return h
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/modules/fastspeech/fs2.py |
import logging
import math
import torch
import torch.nn as nn
from torch.nn import functional as F
from modules.commons.espnet_positional_embedding import RelPositionalEncoding
from modules.commons.common_layers import SinusoidalPositionalEmbedding, Linear, EncSALayer, DecSALayer, BatchNorm1dTBC
from utils.hparams import hparams
DEFAULT_MAX_SOURCE_POSITIONS = 2000
DEFAULT_MAX_TARGET_POSITIONS = 2000
class TransformerEncoderLayer(nn.Module):
def __init__(self, hidden_size, dropout, kernel_size=None, num_heads=2, norm='ln'):
super().__init__()
self.hidden_size = hidden_size
self.dropout = dropout
self.num_heads = num_heads
self.op = EncSALayer(
hidden_size, num_heads, dropout=dropout,
attention_dropout=0.0, relu_dropout=dropout,
kernel_size=kernel_size
if kernel_size is not None else hparams['enc_ffn_kernel_size'],
padding=hparams['ffn_padding'],
norm=norm, act=hparams['ffn_act'])
def forward(self, x, **kwargs):
return self.op(x, **kwargs)
######################
# fastspeech modules
######################
class LayerNorm(torch.nn.LayerNorm):
"""Layer normalization module.
:param int nout: output dim size
:param int dim: dimension to be normalized
"""
def __init__(self, nout, dim=-1, eps=1e-5):
"""Construct an LayerNorm object."""
super(LayerNorm, self).__init__(nout, eps=eps)
self.dim = dim
def forward(self, x):
"""Apply layer normalization.
:param torch.Tensor x: input tensor
:return: layer normalized tensor
:rtype torch.Tensor
"""
if self.dim == -1:
return super(LayerNorm, self).forward(x)
return super(LayerNorm, self).forward(x.transpose(1, -1)).transpose(1, -1)
class DurationPredictor(torch.nn.Module):
"""Duration predictor module.
This is a module of duration predictor described in `FastSpeech: Fast, Robust and Controllable Text to Speech`_.
The duration predictor predicts a duration of each frame in log domain from the hidden embeddings of encoder.
.. _`FastSpeech: Fast, Robust and Controllable Text to Speech`:
https://arxiv.org/pdf/1905.09263.pdf
Note:
The calculation domain of outputs is different between in `forward` and in `inference`. In `forward`,
the outputs are calculated in log domain but in `inference`, those are calculated in linear domain.
"""
def __init__(self, idim, odims = 1, n_layers=2, n_chans=384, kernel_size=3, dropout_rate=0.1, offset=1.0, padding='SAME'):
"""Initilize duration predictor module.
Args:
idim (int): Input dimension.
n_layers (int, optional): Number of convolutional layers.
n_chans (int, optional): Number of channels of convolutional layers.
kernel_size (int, optional): Kernel size of convolutional layers.
dropout_rate (float, optional): Dropout rate.
offset (float, optional): Offset value to avoid nan in log domain.
"""
super(DurationPredictor, self).__init__()
self.offset = offset
self.conv = torch.nn.ModuleList()
self.kernel_size = kernel_size
self.padding = padding
for idx in range(n_layers):
in_chans = idim if idx == 0 else n_chans
self.conv += [torch.nn.Sequential(
torch.nn.ConstantPad1d(((kernel_size - 1) // 2, (kernel_size - 1) // 2)
if padding == 'SAME'
else (kernel_size - 1, 0), 0),
torch.nn.Conv1d(in_chans, n_chans, kernel_size, stride=1, padding=0),
torch.nn.ReLU(),
LayerNorm(n_chans, dim=1),
torch.nn.Dropout(dropout_rate)
)]
self.linear = torch.nn.Linear(n_chans, odims)
def _forward(self, xs, x_masks=None, is_inference=False):
xs = xs.transpose(1, -1) # (B, idim, Tmax)
for f in self.conv:
xs = f(xs) # (B, C, Tmax)
if x_masks is not None:
xs = xs * (1 - x_masks.float())[:, None, :]
xs = self.linear(xs.transpose(1, -1)) # [B, T, C]
xs = xs * (1 - x_masks.float())[:, :, None] # (B, T, C)
if is_inference:
return self.out2dur(xs), xs
else:
if hparams['dur_loss'] in ['mse']:
xs = xs.squeeze(-1) # (B, Tmax)
return xs
def out2dur(self, xs):
if hparams['dur_loss'] in ['mse']:
# NOTE: calculate in log domain
xs = xs.squeeze(-1) # (B, Tmax)
dur = torch.clamp(torch.round(xs.exp() - self.offset), min=0).long() # avoid negative value
elif hparams['dur_loss'] == 'mog':
return NotImplementedError
elif hparams['dur_loss'] == 'crf':
dur = torch.LongTensor(self.crf.decode(xs)).cuda()
return dur
def forward(self, xs, x_masks=None):
"""Calculate forward propagation.
Args:
xs (Tensor): Batch of input sequences (B, Tmax, idim).
x_masks (ByteTensor, optional): Batch of masks indicating padded part (B, Tmax).
Returns:
Tensor: Batch of predicted durations in log domain (B, Tmax).
"""
return self._forward(xs, x_masks, False)
def inference(self, xs, x_masks=None):
"""Inference duration.
Args:
xs (Tensor): Batch of input sequences (B, Tmax, idim).
x_masks (ByteTensor, optional): Batch of masks indicating padded part (B, Tmax).
Returns:
LongTensor: Batch of predicted durations in linear domain (B, Tmax).
"""
return self._forward(xs, x_masks, True)
class SyntaDurationPredictor(torch.nn.Module):
def __init__(self, idim, n_layers=2, n_chans=384, kernel_size=3, dropout_rate=0.1, offset=1.0):
super(SyntaDurationPredictor, self).__init__()
from modules.syntaspeech.syntactic_graph_encoder import GraphAuxEnc
self.graph_encoder = GraphAuxEnc(in_dim=idim, hid_dim=idim, out_dim=idim)
self.offset = offset
self.conv = torch.nn.ModuleList()
self.kernel_size = kernel_size
for idx in range(n_layers):
in_chans = idim if idx == 0 else n_chans
self.conv += [torch.nn.Sequential(
torch.nn.Conv1d(in_chans, n_chans, kernel_size, stride=1, padding=kernel_size // 2),
torch.nn.ReLU(),
LayerNorm(n_chans, dim=1),
torch.nn.Dropout(dropout_rate)
)]
self.linear = nn.Sequential(torch.nn.Linear(n_chans, 1), nn.Softplus())
def forward(self, x, x_padding=None, ph2word=None, graph_lst=None, etypes_lst=None):
x = x.transpose(1, -1) # (B, idim, Tmax)
assert ph2word is not None and graph_lst is not None and etypes_lst is not None
x_graph = self.graph_encoder(graph_lst, x, ph2word, etypes_lst)
x = x + x_graph * 1.
for f in self.conv:
x = f(x) # (B, C, Tmax)
if x_padding is not None:
x = x * (1 - x_padding.float())[:, None, :]
x = self.linear(x.transpose(1, -1)) # [B, T, C]
x = x * (1 - x_padding.float())[:, :, None] # (B, T, C)
x = x[..., 0] # (B, Tmax)
return x
class LengthRegulator(torch.nn.Module):
def __init__(self, pad_value=0.0):
super(LengthRegulator, self).__init__()
self.pad_value = pad_value
def forward(self, dur, dur_padding=None, alpha=1.0):
"""
Example (no batch dim version):
1. dur = [2,2,3]
2. token_idx = [[1],[2],[3]], dur_cumsum = [2,4,7], dur_cumsum_prev = [0,2,4]
3. token_mask = [[1,1,0,0,0,0,0],
[0,0,1,1,0,0,0],
[0,0,0,0,1,1,1]]
4. token_idx * token_mask = [[1,1,0,0,0,0,0],
[0,0,2,2,0,0,0],
[0,0,0,0,3,3,3]]
5. (token_idx * token_mask).sum(0) = [1,1,2,2,3,3,3]
:param dur: Batch of durations of each frame (B, T_txt)
:param dur_padding: Batch of padding of each frame (B, T_txt)
:param alpha: duration rescale coefficient
:return:
mel2ph (B, T_speech)
"""
assert alpha > 0
dur = torch.round(dur.float() * alpha).long()
if dur_padding is not None:
dur = dur * (1 - dur_padding.long())
token_idx = torch.arange(1, dur.shape[1] + 1)[None, :, None].to(dur.device)
dur_cumsum = torch.cumsum(dur, 1)
dur_cumsum_prev = F.pad(dur_cumsum, [1, -1], mode='constant', value=0)
pos_idx = torch.arange(dur.sum(-1).max())[None, None].to(dur.device)
token_mask = (pos_idx >= dur_cumsum_prev[:, :, None]) & (pos_idx < dur_cumsum[:, :, None])
mel2ph = (token_idx * token_mask.long()).sum(1)
return mel2ph
class PitchPredictor(torch.nn.Module):
def __init__(self, idim, n_layers=5, n_chans=384, odim=2, kernel_size=5,
dropout_rate=0.1, padding='SAME'):
"""Initilize pitch predictor module.
Args:
idim (int): Input dimension.
n_layers (int, optional): Number of convolutional layers.
n_chans (int, optional): Number of channels of convolutional layers.
kernel_size (int, optional): Kernel size of convolutional layers.
dropout_rate (float, optional): Dropout rate.
"""
super(PitchPredictor, self).__init__()
self.conv = torch.nn.ModuleList()
self.kernel_size = kernel_size
self.padding = padding
for idx in range(n_layers):
in_chans = idim if idx == 0 else n_chans
self.conv += [torch.nn.Sequential(
torch.nn.ConstantPad1d(((kernel_size - 1) // 2, (kernel_size - 1) // 2)
if padding == 'SAME'
else (kernel_size - 1, 0), 0),
torch.nn.Conv1d(in_chans, n_chans, kernel_size, stride=1, padding=0),
torch.nn.ReLU(),
LayerNorm(n_chans, dim=1),
torch.nn.Dropout(dropout_rate)
)]
self.linear = torch.nn.Linear(n_chans, odim)
self.embed_positions = SinusoidalPositionalEmbedding(idim, 0, init_size=4096)
self.pos_embed_alpha = nn.Parameter(torch.Tensor([1]))
def forward(self, xs):
"""
:param xs: [B, T, H]
:return: [B, T, H]
"""
positions = self.pos_embed_alpha * self.embed_positions(xs[..., 0])
xs = xs + positions
xs = xs.transpose(1, -1) # (B, idim, Tmax)
for f in self.conv:
xs = f(xs) # (B, C, Tmax)
# NOTE: calculate in log domain
xs = self.linear(xs.transpose(1, -1)) # (B, Tmax, H)
return xs
class EnergyPredictor(PitchPredictor):
pass
def mel2ph_to_dur(mel2ph, T_txt, max_dur=None):
B, _ = mel2ph.shape
dur = mel2ph.new_zeros(B, T_txt + 1).scatter_add(1, mel2ph, torch.ones_like(mel2ph))
dur = dur[:, 1:]
if max_dur is not None:
dur = dur.clamp(max=max_dur)
return dur
class FFTBlocks(nn.Module):
def __init__(self, hidden_size, num_layers, ffn_kernel_size=9, dropout=None, num_heads=2,
use_pos_embed=True, use_last_norm=True, norm='ln', use_pos_embed_alpha=True):
super().__init__()
self.num_layers = num_layers
embed_dim = self.hidden_size = hidden_size
self.dropout = dropout if dropout is not None else hparams['dropout']
self.use_pos_embed = use_pos_embed
self.use_last_norm = use_last_norm
if use_pos_embed:
self.max_source_positions = DEFAULT_MAX_TARGET_POSITIONS
self.padding_idx = 0
self.pos_embed_alpha = nn.Parameter(torch.Tensor([1])) if use_pos_embed_alpha else 1
self.embed_positions = SinusoidalPositionalEmbedding(
embed_dim, self.padding_idx, init_size=DEFAULT_MAX_TARGET_POSITIONS,
)
self.layers = nn.ModuleList([])
self.layers.extend([
TransformerEncoderLayer(self.hidden_size, self.dropout,
kernel_size=ffn_kernel_size, num_heads=num_heads)
for _ in range(self.num_layers)
])
if self.use_last_norm:
if norm == 'ln':
self.layer_norm = nn.LayerNorm(embed_dim)
elif norm == 'bn':
self.layer_norm = BatchNorm1dTBC(embed_dim)
else:
self.layer_norm = None
def forward(self, x, padding_mask=None, attn_mask=None, return_hiddens=False):
"""
:param x: [B, T, C]
:param padding_mask: [B, T]
:return: [B, T, C] or [L, B, T, C]
"""
padding_mask = x.abs().sum(-1).eq(0).data if padding_mask is None else padding_mask
nonpadding_mask_TB = 1 - padding_mask.transpose(0, 1).float()[:, :, None] # [T, B, 1]
if self.use_pos_embed:
positions = self.pos_embed_alpha * self.embed_positions(x[..., 0])
x = x + positions
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1) * nonpadding_mask_TB
hiddens = []
for layer in self.layers:
x = layer(x, encoder_padding_mask=padding_mask, attn_mask=attn_mask) * nonpadding_mask_TB
hiddens.append(x)
if self.use_last_norm:
x = self.layer_norm(x) * nonpadding_mask_TB
if return_hiddens:
x = torch.stack(hiddens, 0) # [L, T, B, C]
x = x.transpose(1, 2) # [L, B, T, C]
else:
x = x.transpose(0, 1) # [B, T, C]
return x
class FastspeechEncoder(FFTBlocks):
def __init__(self, embed_tokens, hidden_size=None, num_layers=None, kernel_size=None, num_heads=2):
hidden_size = hparams['hidden_size'] if hidden_size is None else hidden_size
kernel_size = hparams['enc_ffn_kernel_size'] if kernel_size is None else kernel_size
num_layers = hparams['dec_layers'] if num_layers is None else num_layers
super().__init__(hidden_size, num_layers, kernel_size, num_heads=num_heads,
use_pos_embed=False) # use_pos_embed_alpha for compatibility
self.embed_tokens = embed_tokens
self.embed_scale = math.sqrt(hidden_size)
self.padding_idx = 0
if hparams.get('rel_pos') is not None and hparams['rel_pos']:
self.embed_positions = RelPositionalEncoding(hidden_size, dropout_rate=0.0)
else:
self.embed_positions = SinusoidalPositionalEmbedding(
hidden_size, self.padding_idx, init_size=DEFAULT_MAX_TARGET_POSITIONS,
)
def forward(self, txt_tokens):
"""
:param txt_tokens: [B, T]
:return: {
'encoder_out': [T x B x C]
}
"""
encoder_padding_mask = txt_tokens.eq(self.padding_idx).data
x = self.forward_embedding(txt_tokens) # [B, T, H]
x = super(FastspeechEncoder, self).forward(x, encoder_padding_mask)
return x
def forward_embedding(self, txt_tokens):
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(txt_tokens)
if hparams['use_pos_embed']:
if hparams.get('rel_pos') is not None and hparams['rel_pos']:
x = self.embed_positions(x)
else:
positions = self.embed_positions(txt_tokens)
x = x + positions
x = F.dropout(x, p=self.dropout, training=self.training)
return x
class FastspeechDecoder(FFTBlocks):
def __init__(self, hidden_size=None, num_layers=None, kernel_size=None, num_heads=None):
num_heads = hparams['num_heads'] if num_heads is None else num_heads
hidden_size = hparams['hidden_size'] if hidden_size is None else hidden_size
kernel_size = hparams['dec_ffn_kernel_size'] if kernel_size is None else kernel_size
num_layers = hparams['dec_layers'] if num_layers is None else num_layers
super().__init__(hidden_size, num_layers, kernel_size, num_heads=num_heads)
| EXA-1-master | exa/models/AudioGPT/NeuralSeq/modules/fastspeech/tts_modules.py |
from modules.commons.common_layers import *
from utils.hparams import hparams
from modules.fastspeech.tts_modules import PitchPredictor
from utils.pitch_utils import denorm_f0
class Prenet(nn.Module):
def __init__(self, in_dim=80, out_dim=256, kernel=5, n_layers=3, strides=None):
super(Prenet, self).__init__()
padding = kernel // 2
self.layers = []
self.strides = strides if strides is not None else [1] * n_layers
for l in range(n_layers):
self.layers.append(nn.Sequential(
nn.Conv1d(in_dim, out_dim, kernel_size=kernel, padding=padding, stride=self.strides[l]),
nn.ReLU(),
nn.BatchNorm1d(out_dim)
))
in_dim = out_dim
self.layers = nn.ModuleList(self.layers)
self.out_proj = nn.Linear(out_dim, out_dim)
def forward(self, x):
"""
:param x: [B, T, 80]
:return: [L, B, T, H], [B, T, H]
"""
padding_mask = x.abs().sum(-1).eq(0).data # [B, T]
nonpadding_mask_TB = 1 - padding_mask.float()[:, None, :] # [B, 1, T]
x = x.transpose(1, 2)
hiddens = []
for i, l in enumerate(self.layers):
nonpadding_mask_TB = nonpadding_mask_TB[:, :, ::self.strides[i]]
x = l(x) * nonpadding_mask_TB
hiddens.append(x)
hiddens = torch.stack(hiddens, 0) # [L, B, H, T]
hiddens = hiddens.transpose(2, 3) # [L, B, T, H]
x = self.out_proj(x.transpose(1, 2)) # [B, T, H]
x = x * nonpadding_mask_TB.transpose(1, 2)
return hiddens, x
class ConvBlock(nn.Module):
def __init__(self, idim=80, n_chans=256, kernel_size=3, stride=1, norm='gn', dropout=0):
super().__init__()
self.conv = ConvNorm(idim, n_chans, kernel_size, stride=stride)
self.norm = norm
if self.norm == 'bn':
self.norm = nn.BatchNorm1d(n_chans)
elif self.norm == 'in':
self.norm = nn.InstanceNorm1d(n_chans, affine=True)
elif self.norm == 'gn':
self.norm = nn.GroupNorm(n_chans // 16, n_chans)
elif self.norm == 'ln':
self.norm = LayerNorm(n_chans // 16, n_chans)
elif self.norm == 'wn':
self.conv = torch.nn.utils.weight_norm(self.conv.conv)
self.dropout = nn.Dropout(dropout)
self.relu = nn.ReLU()
def forward(self, x):
"""
:param x: [B, C, T]
:return: [B, C, T]
"""
x = self.conv(x)
if not isinstance(self.norm, str):
if self.norm == 'none':
pass
elif self.norm == 'ln':
x = self.norm(x.transpose(1, 2)).transpose(1, 2)
else:
x = self.norm(x)
x = self.relu(x)
x = self.dropout(x)
return x
class ConvStacks(nn.Module):
def __init__(self, idim=80, n_layers=5, n_chans=256, odim=32, kernel_size=5, norm='gn',
dropout=0, strides=None, res=True):
super().__init__()
self.conv = torch.nn.ModuleList()
self.kernel_size = kernel_size
self.res = res
self.in_proj = Linear(idim, n_chans)
if strides is None:
strides = [1] * n_layers
else:
assert len(strides) == n_layers
for idx in range(n_layers):
self.conv.append(ConvBlock(
n_chans, n_chans, kernel_size, stride=strides[idx], norm=norm, dropout=dropout))
self.out_proj = Linear(n_chans, odim)
def forward(self, x, return_hiddens=False):
"""
:param x: [B, T, H]
:return: [B, T, H]
"""
x = self.in_proj(x)
x = x.transpose(1, -1) # (B, idim, Tmax)
hiddens = []
for f in self.conv:
x_ = f(x)
x = x + x_ if self.res else x_ # (B, C, Tmax)
hiddens.append(x)
x = x.transpose(1, -1)
x = self.out_proj(x) # (B, Tmax, H)
if return_hiddens:
hiddens = torch.stack(hiddens, 1) # [B, L, C, T]
return x, hiddens
return x
class PitchExtractor(nn.Module):
def __init__(self, n_mel_bins=80, conv_layers=2):
super().__init__()
self.hidden_size = hparams['hidden_size']
self.predictor_hidden = hparams['predictor_hidden'] if hparams['predictor_hidden'] > 0 else self.hidden_size
self.conv_layers = conv_layers
self.mel_prenet = Prenet(n_mel_bins, self.hidden_size, strides=[1, 1, 1])
if self.conv_layers > 0:
self.mel_encoder = ConvStacks(
idim=self.hidden_size, n_chans=self.hidden_size, odim=self.hidden_size, n_layers=self.conv_layers)
self.pitch_predictor = PitchPredictor(
self.hidden_size, n_chans=self.predictor_hidden,
n_layers=5, dropout_rate=0.1, odim=2,
padding=hparams['ffn_padding'], kernel_size=hparams['predictor_kernel'])
def forward(self, mel_input=None):
ret = {}
mel_hidden = self.mel_prenet(mel_input)[1]
if self.conv_layers > 0:
mel_hidden = self.mel_encoder(mel_hidden)
ret['pitch_pred'] = pitch_pred = self.pitch_predictor(mel_hidden)
pitch_padding = mel_input.abs().sum(-1) == 0
use_uv = hparams['pitch_type'] == 'frame' and hparams['use_uv']
ret['f0_denorm_pred'] = denorm_f0(
pitch_pred[:, :, 0], (pitch_pred[:, :, 1] > 0) if use_uv else None,
hparams, pitch_padding=pitch_padding)
return ret | EXA-1-master | exa/models/AudioGPT/NeuralSeq/modules/fastspeech/pe.py |
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from transformers import AutoModel
from .audio import get_audio_encoder
class Projection(nn.Module):
def __init__(self, d_in: int, d_out: int, p: float=0.5) -> None:
super().__init__()
self.linear1 = nn.Linear(d_in, d_out, bias=False)
self.linear2 = nn.Linear(d_out, d_out, bias=False)
self.layer_norm = nn.LayerNorm(d_out)
self.drop = nn.Dropout(p)
def forward(self, x: torch.Tensor) -> torch.Tensor:
embed1 = self.linear1(x)
embed2 = self.drop(self.linear2(F.gelu(embed1)))
embeds = self.layer_norm(embed1 + embed2)
return embeds
class AudioEncoder(nn.Module):
def __init__(self, audioenc_name:str, d_in: int, d_out: int, sample_rate: int, window_size: int,
hop_size: int, mel_bins: int, fmin: int, fmax: int, classes_num: int) -> None:
super().__init__()
audio_encoder = get_audio_encoder(audioenc_name)
self.base = audio_encoder(
sample_rate, window_size,
hop_size, mel_bins, fmin, fmax,
classes_num, d_in)
self.projection = Projection(d_in, d_out)
def forward(self, x):
out_dict = self.base(x)
audio_features, audio_classification_output = out_dict['embedding'], out_dict['clipwise_output']
projected_vec = self.projection(audio_features)
return projected_vec, audio_classification_output
class TextEncoder(nn.Module):
def __init__(self, d_out: int, text_model: str, transformer_embed_dim: int) -> None:
super().__init__()
self.base = AutoModel.from_pretrained(text_model)
self.projection = Projection(transformer_embed_dim, d_out)
def forward(self, x):
out = self.base(**x)[0]
out = out[:, 0, :] # get CLS token output
projected_vec = self.projection(out)
return projected_vec
class CLAP(nn.Module):
def __init__(self,
# audio
audioenc_name: str,
sample_rate: int,
window_size: int,
hop_size: int,
mel_bins: int,
fmin: int,
fmax: int,
classes_num: int,
out_emb: int,
# text
text_model: str,
transformer_embed_dim: int,
# common
d_proj: int,
):
super().__init__()
self.audio_encoder = AudioEncoder(
audioenc_name, out_emb, d_proj,
sample_rate, window_size, hop_size, mel_bins, fmin, fmax, classes_num)
self.caption_encoder = TextEncoder(
d_proj, text_model, transformer_embed_dim
)
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
def forward(self, audio, text):
audio_embed, _ = self.audio_encoder(audio)
caption_embed = self.caption_encoder(text)
return caption_embed, audio_embed, self.logit_scale.exp() | EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/wav_evaluation/models/clap.py |
from . import clap
from . import audio
from . import utils | EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/wav_evaluation/models/__init__.py |
import argparse
import yaml
import sys
def read_config_as_args(config_path,args=None,is_config_str=False):
return_dict = {}
if config_path is not None:
if is_config_str:
yml_config = yaml.load(config_path, Loader=yaml.FullLoader)
else:
with open(config_path, "r") as f:
yml_config = yaml.load(f, Loader=yaml.FullLoader)
if args != None:
for k, v in yml_config.items():
if k in args.__dict__:
args.__dict__[k] = v
else:
sys.stderr.write("Ignored unknown parameter {} in yaml.\n".format(k))
else:
for k, v in yml_config.items():
return_dict[k] = v
args = args if args != None else return_dict
return argparse.Namespace(**args)
| EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/wav_evaluation/models/utils.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchlibrosa.stft import Spectrogram, LogmelFilterBank
def get_audio_encoder(name: str):
if name == "Cnn14":
return Cnn14
else:
raise Exception('The audio encoder name {} is incorrect or not supported'.format(name))
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(ConvBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=(3, 3), stride=(1, 1),
padding=(1, 1), bias=False)
self.conv2 = nn.Conv2d(in_channels=out_channels,
out_channels=out_channels,
kernel_size=(3, 3), stride=(1, 1),
padding=(1, 1), bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
self.bn2 = nn.BatchNorm2d(out_channels)
def forward(self, input, pool_size=(2, 2), pool_type='avg'):
x = input
x = F.relu_(self.bn1(self.conv1(x)))
x = F.relu_(self.bn2(self.conv2(x)))
if pool_type == 'max':
x = F.max_pool2d(x, kernel_size=pool_size)
elif pool_type == 'avg':
x = F.avg_pool2d(x, kernel_size=pool_size)
elif pool_type == 'avg+max':
x1 = F.avg_pool2d(x, kernel_size=pool_size)
x2 = F.max_pool2d(x, kernel_size=pool_size)
x = x1 + x2
else:
raise Exception('Incorrect argument!')
return x
class ConvBlock5x5(nn.Module):
def __init__(self, in_channels, out_channels):
super(ConvBlock5x5, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=(5, 5), stride=(1, 1),
padding=(2, 2), bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
def forward(self, input, pool_size=(2, 2), pool_type='avg'):
x = input
x = F.relu_(self.bn1(self.conv1(x)))
if pool_type == 'max':
x = F.max_pool2d(x, kernel_size=pool_size)
elif pool_type == 'avg':
x = F.avg_pool2d(x, kernel_size=pool_size)
elif pool_type == 'avg+max':
x1 = F.avg_pool2d(x, kernel_size=pool_size)
x2 = F.max_pool2d(x, kernel_size=pool_size)
x = x1 + x2
else:
raise Exception('Incorrect argument!')
return x
class AttBlock(nn.Module):
def __init__(self, n_in, n_out, activation='linear', temperature=1.):
super(AttBlock, self).__init__()
self.activation = activation
self.temperature = temperature
self.att = nn.Conv1d(in_channels=n_in, out_channels=n_out, kernel_size=1, stride=1, padding=0, bias=True)
self.cla = nn.Conv1d(in_channels=n_in, out_channels=n_out, kernel_size=1, stride=1, padding=0, bias=True)
self.bn_att = nn.BatchNorm1d(n_out)
def forward(self, x):
# x: (n_samples, n_in, n_time)
norm_att = torch.softmax(torch.clamp(self.att(x), -10, 10), dim=-1)
cla = self.nonlinear_transform(self.cla(x))
x = torch.sum(norm_att * cla, dim=2)
return x, norm_att, cla
def nonlinear_transform(self, x):
if self.activation == 'linear':
return x
elif self.activation == 'sigmoid':
return torch.sigmoid(x)
class Cnn14(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, classes_num, out_emb):
super(Cnn14, self).__init__()
window = 'hann'
center = True
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
# Spectrogram extractor
self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
win_length=window_size, window=window, center=center, pad_mode=pad_mode,
freeze_parameters=True)
# Logmel feature extractor
self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin, top_db=top_db,
freeze_parameters=True)
self.bn0 = nn.BatchNorm2d(64)
self.conv_block1 = ConvBlock(in_channels=1, out_channels=64)
self.conv_block2 = ConvBlock(in_channels=64, out_channels=128)
self.conv_block3 = ConvBlock(in_channels=128, out_channels=256)
self.conv_block4 = ConvBlock(in_channels=256, out_channels=512)
self.conv_block5 = ConvBlock(in_channels=512, out_channels=1024)
self.conv_block6 = ConvBlock(in_channels=1024, out_channels=2048)
# out_emb is 2048 for best Cnn14
self.fc1 = nn.Linear(2048, out_emb, bias=True)
self.fc_audioset = nn.Linear(out_emb, classes_num, bias=True)
def forward(self, input, mixup_lambda=None):
"""
Input: (batch_size, data_length)
"""
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
x = self.conv_block1(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block2(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block3(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block4(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block5(x, pool_size=(2, 2), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block6(x, pool_size=(1, 1), pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = torch.mean(x, dim=3)
(x1, _) = torch.max(x, dim=2)
x2 = torch.mean(x, dim=2)
x = x1 + x2
x = F.dropout(x, p=0.5, training=self.training)
x = F.relu_(self.fc1(x))
embedding = F.dropout(x, p=0.5, training=self.training)
clipwise_output = torch.sigmoid(self.fc_audioset(x))
output_dict = {'clipwise_output': clipwise_output, 'embedding': embedding}
return output_dict | EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/wav_evaluation/models/audio.py |
import random
import torchaudio
from torch._six import string_classes
import collections
import re
import torch.nn.functional as F
import numpy as np
from transformers import AutoTokenizer
from wav_evaluation.models.utils import read_config_as_args
from wav_evaluation.models.clap import CLAP
import math
import torchaudio.transforms as T
import os
import torch
from importlib_resources import files
class CLAPWrapper():
"""
A class for interfacing CLAP model.
"""
def __init__(self, model_fp,config_path, use_cuda=False):
self.np_str_obj_array_pattern = re.compile(r'[SaUO]')
self.file_path = os.path.realpath(__file__)
self.default_collate_err_msg_format = (
"default_collate: batch must contain tensors, numpy arrays, numbers, "
"dicts or lists; found {}")
with open(config_path,'r') as f:
self.config_as_str = f.read()
self.model_fp = model_fp
self.use_cuda = use_cuda
self.clap, self.tokenizer, self.args = self.load_clap()
def load_clap(self):
r"""Load CLAP model with args from config file"""
args = read_config_as_args(self.config_as_str, is_config_str=True)
if 'bert' in args.text_model:
self.token_keys = ['input_ids', 'token_type_ids', 'attention_mask']
else:
self.token_keys = ['input_ids', 'attention_mask']
clap = CLAP(
audioenc_name=args.audioenc_name,
sample_rate=args.sampling_rate,
window_size=args.window_size,
hop_size=args.hop_size,
mel_bins=args.mel_bins,
fmin=args.fmin,
fmax=args.fmax,
classes_num=args.num_classes,
out_emb=args.out_emb,
text_model=args.text_model,
transformer_embed_dim=args.transformer_embed_dim,
d_proj=args.d_proj
)
# Load pretrained weights for model
model_state_dict = torch.load(self.model_fp, map_location=torch.device('cpu'))['model']
clap.load_state_dict(model_state_dict)
clap.eval() # set clap in eval mode
tokenizer = AutoTokenizer.from_pretrained(args.text_model)
if self.use_cuda and torch.cuda.is_available():
clap = clap.cuda()
return clap, tokenizer, args
def default_collate(self, batch):
r"""Puts each data field into a tensor with outer dimension batch size"""
elem = batch[0]
elem_type = type(elem)
if isinstance(elem, torch.Tensor):
out = None
if torch.utils.data.get_worker_info() is not None:
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
numel = sum([x.numel() for x in batch])
storage = elem.storage()._new_shared(numel)
out = elem.new(storage)
return torch.stack(batch, 0, out=out)
elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
and elem_type.__name__ != 'string_':
if elem_type.__name__ == 'ndarray' or elem_type.__name__ == 'memmap':
# array of string classes and object
if self.np_str_obj_array_pattern.search(elem.dtype.str) is not None:
raise TypeError(
self.default_collate_err_msg_format.format(elem.dtype))
return self.default_collate([torch.as_tensor(b) for b in batch])
elif elem.shape == (): # scalars
return torch.as_tensor(batch)
elif isinstance(elem, float):
return torch.tensor(batch, dtype=torch.float64)
elif isinstance(elem, int):
return torch.tensor(batch)
elif isinstance(elem, string_classes):
return batch
elif isinstance(elem, collections.abc.Mapping):
return {key: self.default_collate([d[key] for d in batch]) for key in elem}
elif isinstance(elem, tuple) and hasattr(elem, '_fields'): # namedtuple
return elem_type(*(self.default_collate(samples) for samples in zip(*batch)))
elif isinstance(elem, collections.abc.Sequence):
# check to make sure that the elements in batch have consistent size
it = iter(batch)
elem_size = len(next(it))
if not all(len(elem) == elem_size for elem in it):
raise RuntimeError(
'each element in list of batch should be of equal size')
transposed = zip(*batch)
return [self.default_collate(samples) for samples in transposed]
raise TypeError(self.default_collate_err_msg_format.format(elem_type))
def resample_and_duration(self,wav_sr,audio_duration,resample=False):
audio_time_series,sample_rate = wav_sr
resample_rate = self.args.sampling_rate
if resample:
resampler = T.Resample(sample_rate, resample_rate)
audio_time_series = resampler(audio_time_series)
audio_time_series = audio_time_series.reshape(-1)
# audio_time_series is shorter than predefined audio duration,
# so audio_time_series is extended
if audio_duration*sample_rate >= audio_time_series.shape[0]:
repeat_factor = int(np.ceil((audio_duration*sample_rate) /
audio_time_series.shape[0]))
# Repeat audio_time_series by repeat_factor to match audio_duration
audio_time_series = audio_time_series.repeat(repeat_factor)
# remove excess part of audio_time_series
audio_time_series = audio_time_series[0:audio_duration*sample_rate]
else:
# audio_time_series is longer than predefined audio duration,
# so audio_time_series is trimmed
start_index = random.randrange(
audio_time_series.shape[0] - audio_duration*sample_rate)
audio_time_series = audio_time_series[start_index:start_index +
audio_duration*sample_rate]
return torch.FloatTensor(audio_time_series)
def load_audio_into_tensor(self, audio_path, audio_duration, resample=False):
r"""Loads audio file and returns raw audio."""
# Randomly sample a segment of audio_duration from the clip or pad to match duration
audio_time_series, sample_rate = torchaudio.load(audio_path)
return self.resample_and_duration((audio_time_series, sample_rate),audio_duration,resample)
def preprocess_audio(self, audio_files, resample):
r"""Load list of audio files and return raw audio"""
audio_tensors = []
for audio_file in audio_files:
if isinstance(audio_file,str):
audio_tensor = self.load_audio_into_tensor(audio_file, self.args.duration, resample)
elif isinstance(audio_file,tuple):
audio_tensor = self.resample_and_duration(audio_file, self.args.duration, resample)
else:
raise TypeError(f"type of audiofile is {type(audio_file)},which is not supported")
audio_tensor = audio_tensor.reshape(
1, -1).cuda() if self.use_cuda and torch.cuda.is_available() else audio_tensor.reshape(1, -1)
audio_tensors.append(audio_tensor)
return self.default_collate(audio_tensors)
def preprocess_text(self, text_queries):
r"""Load list of class labels and return tokenized text"""
tokenized_texts = []
for ttext in text_queries:
tok = self.tokenizer.encode_plus(
text=ttext, add_special_tokens=True, max_length=self.args.text_len, padding="max_length", return_tensors="pt") # max_length=self.args.text_len, padding=True,
for key in self.token_keys:
tok[key] = tok[key].reshape(-1).cuda() if self.use_cuda and torch.cuda.is_available() else tok[key].reshape(-1)
tokenized_texts.append(tok)
return self.default_collate(tokenized_texts)
def get_text_embeddings(self, class_labels):
r"""Load list of class labels and return text embeddings"""
preprocessed_text = self.preprocess_text(class_labels)
text_embeddings = self._get_text_embeddings(preprocessed_text)
text_embeddings = text_embeddings/torch.norm(text_embeddings, dim=-1, keepdim=True)
return text_embeddings
def get_audio_embeddings(self, audio_files, resample):
r"""Load list of audio files and return a audio embeddings"""
preprocessed_audio = self.preprocess_audio(audio_files, resample)
audio_embeddings = self._get_audio_embeddings(preprocessed_audio)
audio_embeddings = audio_embeddings/torch.norm(audio_embeddings, dim=-1, keepdim=True)
return audio_embeddings
def _get_text_embeddings(self, preprocessed_text):
r"""Load preprocessed text and return text embeddings"""
with torch.no_grad():
text_embeddings = self.clap.caption_encoder(preprocessed_text)
text_embeddings = text_embeddings/torch.norm(text_embeddings, dim=-1, keepdim=True)
return text_embeddings
def _get_audio_embeddings(self, preprocessed_audio):
r"""Load preprocessed audio and return a audio embeddings"""
with torch.no_grad():
preprocessed_audio = preprocessed_audio.reshape(
preprocessed_audio.shape[0], preprocessed_audio.shape[2])
#Append [0] the audio emebdding, [1] has output class probabilities
audio_embeddings = self.clap.audio_encoder(preprocessed_audio)[0]
audio_embeddings = audio_embeddings/torch.norm(audio_embeddings, dim=-1, keepdim=True)
return audio_embeddings
def compute_similarity(self, audio_embeddings, text_embeddings,use_logit_scale = True):
r"""Compute similarity between text and audio embeddings"""
if use_logit_scale:
logit_scale = self.clap.logit_scale.exp()
similarity = logit_scale*text_embeddings @ audio_embeddings.T
else:
similarity = text_embeddings @ audio_embeddings.T
return similarity.T
def cal_clap_score(self,txt,audio_path):
text_embeddings = self.get_text_embeddings([txt])# 经过了norm的embedding
audio_embeddings = self.get_audio_embeddings([audio_path], resample=True)# 这一步比较耗时,读取音频并重采样到44100
score = self.compute_similarity(audio_embeddings, text_embeddings,use_logit_scale=False).squeeze().cpu().numpy()
return score
def _generic_batch_inference(self, func, *args):
r"""Process audio and/or text per batch"""
input_tmp = args[0]
batch_size = args[-1]
# args[0] has audio_files, args[1] has class_labels
inputs = [args[0], args[1]] if len(args) == 3 else [args[0]]
args0_len = len(args[0])
# compute text_embeddings once for all the audio_files batches
if len(inputs) == 2:
text_embeddings = self.get_text_embeddings(args[1])
inputs = [args[0], args[1], text_embeddings]
dataset_idx = 0
for _ in range(math.ceil(args0_len/batch_size)):
next_batch_idx = dataset_idx + batch_size
# batch size is bigger than available audio/text items
if next_batch_idx >= args0_len:
inputs[0] = input_tmp[dataset_idx:]
return func(*tuple(inputs))
else:
inputs[0] = input_tmp[dataset_idx:next_batch_idx]
yield func(*tuple(inputs))
dataset_idx = next_batch_idx
def get_audio_embeddings_per_batch(self, audio_files, batch_size):
r"""Load preprocessed audio and return a audio embeddings per batch"""
return self._generic_batch_inference(self.get_audio_embeddings, audio_files, batch_size)
def get_text_embeddings_per_batch(self, class_labels, batch_size):
r"""Load preprocessed text and return text embeddings per batch"""
return self._generic_batch_inference(self.get_text_embeddings, class_labels, batch_size)
def classify_audio_files_per_batch(self, audio_files, class_labels, batch_size):
r"""Compute classification probabilities for each audio recording in a batch and each class label"""
return self._generic_batch_inference(self.classify_audio_files, audio_files, class_labels, batch_size)
| EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/wav_evaluation/models/CLAPWrapper.py |
import os
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d
from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm
from pathlib import Path
import yaml
import numpy as np
from argparse import Namespace
LRELU_SLOPE = 0.1
def get_padding(kernel_size, dilation=1):
return int((kernel_size*dilation - dilation)/2)
def init_weights(m, mean=0.0, std=0.01):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
m.weight.data.normal_(mean, std)
class ResBlock1(torch.nn.Module):
def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5)):
super(ResBlock1, self).__init__()
self.h = h
self.convs1 = nn.ModuleList([
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
padding=get_padding(kernel_size, dilation[0]))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
padding=get_padding(kernel_size, dilation[1]))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2],
padding=get_padding(kernel_size, dilation[2])))
])
self.convs1.apply(init_weights)
self.convs2 = nn.ModuleList([
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
padding=get_padding(kernel_size, 1))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
padding=get_padding(kernel_size, 1))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1,
padding=get_padding(kernel_size, 1)))
])
self.convs2.apply(init_weights)
def forward(self, x):
for c1, c2 in zip(self.convs1, self.convs2):
xt = F.leaky_relu(x, LRELU_SLOPE)
xt = c1(xt)
xt = F.leaky_relu(xt, LRELU_SLOPE)
xt = c2(xt)
x = xt + x
return x
def remove_weight_norm(self):
for l in self.convs1:
remove_weight_norm(l)
for l in self.convs2:
remove_weight_norm(l)
class ResBlock2(torch.nn.Module):
def __init__(self, h, channels, kernel_size=3, dilation=(1, 3)):
super(ResBlock2, self).__init__()
self.h = h
self.convs = nn.ModuleList([
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0],
padding=get_padding(kernel_size, dilation[0]))),
weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1],
padding=get_padding(kernel_size, dilation[1])))
])
self.convs.apply(init_weights)
def forward(self, x):
for c in self.convs:
xt = F.leaky_relu(x, LRELU_SLOPE)
xt = c(xt)
x = xt + x
return x
def remove_weight_norm(self):
for l in self.convs:
remove_weight_norm(l)
class Generator(torch.nn.Module):
def __init__(self, h):
super(Generator, self).__init__()
self.h = h
self.num_kernels = len(h.resblock_kernel_sizes)
self.num_upsamples = len(h.upsample_rates)
self.conv_pre = weight_norm(Conv1d(80, h.upsample_initial_channel, 7, 1, padding=3))
resblock = ResBlock1 if h.resblock == '1' else ResBlock2
self.ups = nn.ModuleList()
for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)):
self.ups.append(weight_norm(
ConvTranspose1d(h.upsample_initial_channel//(2**i), h.upsample_initial_channel//(2**(i+1)),
k, u, padding=(k-u)//2)))
self.resblocks = nn.ModuleList()
for i in range(len(self.ups)):
ch = h.upsample_initial_channel//(2**(i+1))
for j, (k, d) in enumerate(zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)):
self.resblocks.append(resblock(h, ch, k, d))
self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3))
self.ups.apply(init_weights)
self.conv_post.apply(init_weights)
def forward(self, x):
x = self.conv_pre(x)
for i in range(self.num_upsamples):
x = F.leaky_relu(x, LRELU_SLOPE)
x = self.ups[i](x)
xs = None
for j in range(self.num_kernels):
if xs is None:
xs = self.resblocks[i*self.num_kernels+j](x)
else:
xs += self.resblocks[i*self.num_kernels+j](x)
x = xs / self.num_kernels
x = F.leaky_relu(x)
x = self.conv_post(x)
x = torch.tanh(x)
return x
def remove_weight_norm(self):
print('Removing weight norm...')
for l in self.ups:
remove_weight_norm(l)
for l in self.resblocks:
l.remove_weight_norm()
remove_weight_norm(self.conv_pre)
remove_weight_norm(self.conv_post)
class DiscriminatorP(torch.nn.Module):
def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False):
super(DiscriminatorP, self).__init__()
self.period = period
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
self.convs = nn.ModuleList([
norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))),
norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(2, 0))),
])
self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0)))
def forward(self, x):
fmap = []
# 1d to 2d
b, c, t = x.shape
if t % self.period != 0: # pad first
n_pad = self.period - (t % self.period)
x = F.pad(x, (0, n_pad), "reflect")
t = t + n_pad
x = x.view(b, c, t // self.period, self.period)
for l in self.convs:
x = l(x)
x = F.leaky_relu(x, LRELU_SLOPE)
fmap.append(x)
x = self.conv_post(x)
fmap.append(x)
x = torch.flatten(x, 1, -1)
return x, fmap
class MultiPeriodDiscriminator(torch.nn.Module):
def __init__(self):
super(MultiPeriodDiscriminator, self).__init__()
self.discriminators = nn.ModuleList([
DiscriminatorP(2),
DiscriminatorP(3),
DiscriminatorP(5),
DiscriminatorP(7),
DiscriminatorP(11),
])
def forward(self, y, y_hat):
y_d_rs = []
y_d_gs = []
fmap_rs = []
fmap_gs = []
for i, d in enumerate(self.discriminators):
y_d_r, fmap_r = d(y)
y_d_g, fmap_g = d(y_hat)
y_d_rs.append(y_d_r)
fmap_rs.append(fmap_r)
y_d_gs.append(y_d_g)
fmap_gs.append(fmap_g)
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
class DiscriminatorS(torch.nn.Module):
def __init__(self, use_spectral_norm=False):
super(DiscriminatorS, self).__init__()
norm_f = weight_norm if use_spectral_norm == False else spectral_norm
self.convs = nn.ModuleList([
norm_f(Conv1d(1, 128, 15, 1, padding=7)),
norm_f(Conv1d(128, 128, 41, 2, groups=4, padding=20)),
norm_f(Conv1d(128, 256, 41, 2, groups=16, padding=20)),
norm_f(Conv1d(256, 512, 41, 4, groups=16, padding=20)),
norm_f(Conv1d(512, 1024, 41, 4, groups=16, padding=20)),
norm_f(Conv1d(1024, 1024, 41, 1, groups=16, padding=20)),
norm_f(Conv1d(1024, 1024, 5, 1, padding=2)),
])
self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1))
def forward(self, x):
fmap = []
for l in self.convs:
x = l(x)
x = F.leaky_relu(x, LRELU_SLOPE)
fmap.append(x)
x = self.conv_post(x)
fmap.append(x)
x = torch.flatten(x, 1, -1)
return x, fmap
class MultiScaleDiscriminator(torch.nn.Module):
def __init__(self):
super(MultiScaleDiscriminator, self).__init__()
self.discriminators = nn.ModuleList([
DiscriminatorS(use_spectral_norm=True),
DiscriminatorS(),
DiscriminatorS(),
])
self.meanpools = nn.ModuleList([
AvgPool1d(4, 2, padding=2),
AvgPool1d(4, 2, padding=2)
])
def forward(self, y, y_hat):
y_d_rs = []
y_d_gs = []
fmap_rs = []
fmap_gs = []
for i, d in enumerate(self.discriminators):
if i != 0:
y = self.meanpools[i-1](y)
y_hat = self.meanpools[i-1](y_hat)
y_d_r, fmap_r = d(y)
y_d_g, fmap_g = d(y_hat)
y_d_rs.append(y_d_r)
fmap_rs.append(fmap_r)
y_d_gs.append(y_d_g)
fmap_gs.append(fmap_g)
return y_d_rs, y_d_gs, fmap_rs, fmap_gs
def feature_loss(fmap_r, fmap_g):
loss = 0
for dr, dg in zip(fmap_r, fmap_g):
for rl, gl in zip(dr, dg):
loss += torch.mean(torch.abs(rl - gl))
return loss*2
def discriminator_loss(disc_real_outputs, disc_generated_outputs):
loss = 0
r_losses = []
g_losses = []
for dr, dg in zip(disc_real_outputs, disc_generated_outputs):
r_loss = torch.mean((1-dr)**2)
g_loss = torch.mean(dg**2)
loss += (r_loss + g_loss)
r_losses.append(r_loss.item())
g_losses.append(g_loss.item())
return loss, r_losses, g_losses
def generator_loss(disc_outputs):
loss = 0
gen_losses = []
for dg in disc_outputs:
l = torch.mean((1-dg)**2)
gen_losses.append(l)
loss += l
return loss, gen_losses
class VocoderHifigan(object):
def __init__(self, ckpt_vocoder,device='cuda'):
with open(os.path.join(ckpt_vocoder,'args.yml'), 'r') as f:
vocoder_args = Namespace(**yaml.load(f, Loader=yaml.UnsafeLoader))
self.generator = Generator(vocoder_args)
netG_path = os.path.join(ckpt_vocoder,'best_netG.pt')
if os.path.exists(netG_path):
vocoder_sd = torch.load(netG_path, map_location='cpu')
self.generator.load_state_dict(vocoder_sd['generator'])
self.generator.eval()
self.device = device
self.generator.to(self.device)
def vocode(self, spec, global_step=None):
with torch.no_grad():
if isinstance(spec,np.ndarray):
spec = torch.from_numpy(spec).unsqueeze(0)
spec = spec.to(dtype=torch.float32,device=self.device)
return self.generator(spec).squeeze().cpu().numpy()
class VocoderHifigan_noload(object):
def __init__(self, vocoder_args,device='cuda'):
self.generator = Generator(vocoder_args)
self.generator.eval()
self.device = device
self.generator.to(self.device)
def vocode(self, spec, global_step=None):
with torch.no_grad():
if isinstance(spec,np.ndarray):
spec = torch.from_numpy(spec).unsqueeze(0)
spec = spec.to(dtype=torch.float32,device=self.device)
return self.generator(spec).squeeze().cpu().numpy() | EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/vocoder/hifigan/modules.py |
import numpy as np
class LambdaWarmUpCosineScheduler:
"""
note: use with a base_lr of 1.0
"""
def __init__(self, warm_up_steps, lr_min, lr_max, lr_start, max_decay_steps, verbosity_interval=0):
self.lr_warm_up_steps = warm_up_steps
self.lr_start = lr_start
self.lr_min = lr_min
self.lr_max = lr_max
self.lr_max_decay_steps = max_decay_steps
self.last_lr = 0.
self.verbosity_interval = verbosity_interval
def schedule(self, n, **kwargs):
if self.verbosity_interval > 0:
if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_lr}")
if n < self.lr_warm_up_steps:
lr = (self.lr_max - self.lr_start) / self.lr_warm_up_steps * n + self.lr_start
self.last_lr = lr
return lr
else:
t = (n - self.lr_warm_up_steps) / (self.lr_max_decay_steps - self.lr_warm_up_steps)
t = min(t, 1.0)
lr = self.lr_min + 0.5 * (self.lr_max - self.lr_min) * (
1 + np.cos(t * np.pi))
self.last_lr = lr
return lr
def __call__(self, n, **kwargs):
return self.schedule(n,**kwargs)
class LambdaWarmUpCosineScheduler2:
"""
supports repeated iterations, configurable via lists
note: use with a base_lr of 1.0.
"""
def __init__(self, warm_up_steps, f_min, f_max, f_start, cycle_lengths, verbosity_interval=0):
assert len(warm_up_steps) == len(f_min) == len(f_max) == len(f_start) == len(cycle_lengths)
self.lr_warm_up_steps = warm_up_steps
self.f_start = f_start
self.f_min = f_min
self.f_max = f_max
self.cycle_lengths = cycle_lengths
self.cum_cycles = np.cumsum([0] + list(self.cycle_lengths))
self.last_f = 0.
self.verbosity_interval = verbosity_interval
def find_in_interval(self, n):
interval = 0
for cl in self.cum_cycles[1:]:
if n <= cl:
return interval
interval += 1
def schedule(self, n, **kwargs):
cycle = self.find_in_interval(n)
n = n - self.cum_cycles[cycle]
if self.verbosity_interval > 0:
if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_f}, "
f"current cycle {cycle}")
if n < self.lr_warm_up_steps[cycle]:
f = (self.f_max[cycle] - self.f_start[cycle]) / self.lr_warm_up_steps[cycle] * n + self.f_start[cycle]
self.last_f = f
return f
else:
t = (n - self.lr_warm_up_steps[cycle]) / (self.cycle_lengths[cycle] - self.lr_warm_up_steps[cycle])
t = min(t, 1.0)
f = self.f_min[cycle] + 0.5 * (self.f_max[cycle] - self.f_min[cycle]) * (
1 + np.cos(t * np.pi))
self.last_f = f
return f
def __call__(self, n, **kwargs):
return self.schedule(n, **kwargs)
class LambdaLinearScheduler(LambdaWarmUpCosineScheduler2):
def schedule(self, n, **kwargs):
cycle = self.find_in_interval(n)
n = n - self.cum_cycles[cycle]
if self.verbosity_interval > 0:
if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_f}, "
f"current cycle {cycle}")
if n < self.lr_warm_up_steps[cycle]:
f = (self.f_max[cycle] - self.f_start[cycle]) / self.lr_warm_up_steps[cycle] * n + self.f_start[cycle]
self.last_f = f
return f
else:
f = self.f_min[cycle] + (self.f_max[cycle] - self.f_min[cycle]) * (self.cycle_lengths[cycle] - n) / (self.cycle_lengths[cycle])
self.last_f = f
return f
| EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/lr_scheduler.py |
import importlib
import torch
import numpy as np
from tqdm import tqdm
from inspect import isfunction
from PIL import Image, ImageDraw, ImageFont
import hashlib
import requests
import os
URL_MAP = {
'vggishish_lpaps': 'https://a3s.fi/swift/v1/AUTH_a235c0f452d648828f745589cde1219a/specvqgan_public/vggishish16.pt',
'vggishish_mean_std_melspec_10s_22050hz': 'https://a3s.fi/swift/v1/AUTH_a235c0f452d648828f745589cde1219a/specvqgan_public/train_means_stds_melspec_10s_22050hz.txt',
'melception': 'https://a3s.fi/swift/v1/AUTH_a235c0f452d648828f745589cde1219a/specvqgan_public/melception-21-05-10T09-28-40.pt',
}
CKPT_MAP = {
'vggishish_lpaps': 'vggishish16.pt',
'vggishish_mean_std_melspec_10s_22050hz': 'train_means_stds_melspec_10s_22050hz.txt',
'melception': 'melception-21-05-10T09-28-40.pt',
}
MD5_MAP = {
'vggishish_lpaps': '197040c524a07ccacf7715d7080a80bd',
'vggishish_mean_std_melspec_10s_22050hz': 'f449c6fd0e248936c16f6d22492bb625',
'melception': 'a71a41041e945b457c7d3d814bbcf72d',
}
def download(url, local_path, chunk_size=1024):
os.makedirs(os.path.split(local_path)[0], exist_ok=True)
with requests.get(url, stream=True) as r:
total_size = int(r.headers.get("content-length", 0))
with tqdm(total=total_size, unit="B", unit_scale=True) as pbar:
with open(local_path, "wb") as f:
for data in r.iter_content(chunk_size=chunk_size):
if data:
f.write(data)
pbar.update(chunk_size)
def md5_hash(path):
with open(path, "rb") as f:
content = f.read()
return hashlib.md5(content).hexdigest()
def log_txt_as_img(wh, xc, size=10):
# wh a tuple of (width, height)
# xc a list of captions to plot
b = len(xc)
txts = list()
for bi in range(b):
txt = Image.new("RGB", wh, color="white")
draw = ImageDraw.Draw(txt)
font = ImageFont.truetype('data/DejaVuSans.ttf', size=size)
nc = int(40 * (wh[0] / 256))
lines = "\n".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))
try:
draw.text((0, 0), lines, fill="black", font=font)
except UnicodeEncodeError:
print("Cant encode string for logging. Skipping.")
txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0
txts.append(txt)
txts = np.stack(txts)
txts = torch.tensor(txts)
return txts
def ismap(x):
if not isinstance(x, torch.Tensor):
return False
return (len(x.shape) == 4) and (x.shape[1] > 3)
def isimage(x):
if not isinstance(x,torch.Tensor):
return False
return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)
def exists(x):
return x is not None
def default(val, d):
if exists(val):
return val
return d() if isfunction(d) else d
def mean_flat(tensor):
"""
https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86
Take the mean over all non-batch dimensions.
"""
return tensor.mean(dim=list(range(1, len(tensor.shape))))
def count_params(model, verbose=False):
total_params = sum(p.numel() for p in model.parameters())
if verbose:
print(f"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.")
return total_params
def instantiate_from_config(config,reload=False):
if not "target" in config:
if config == '__is_first_stage__':
return None
elif config == "__is_unconditional__":
return None
raise KeyError("Expected key `target` to instantiate.")
return get_obj_from_str(config["target"],reload=reload)(**config.get("params", dict()))
def get_obj_from_str(string, reload=False):
module, cls = string.rsplit(".", 1)
if reload:
module_imp = importlib.import_module(module)
importlib.reload(module_imp)
return getattr(importlib.import_module(module, package=None), cls)
def get_ckpt_path(name, root, check=False):
assert name in URL_MAP
path = os.path.join(root, CKPT_MAP[name])
if not os.path.exists(path) or (check and not md5_hash(path) == MD5_MAP[name]):
print("Downloading {} model from {} to {}".format(name, URL_MAP[name], path))
download(URL_MAP[name], path)
md5 = md5_hash(path)
assert md5 == MD5_MAP[name], md5
return path
| EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/util.py |
"""
与autoencoder.py的区别在于,autoencoder.py计算loss时只有一个discriminator,而此处又多了个multiwindowDiscriminator,所以优化器
优化的参数改为:
opt_disc = torch.optim.Adam(list(self.loss.discriminator.parameters()) + list(self.loss.discriminator_multi.parameters()),
lr=lr, betas=(0.5, 0.9))
"""
import os
import torch
import pytorch_lightning as pl
import torch.nn.functional as F
from contextlib import contextmanager
from packaging import version
import numpy as np
from ldm.modules.diffusionmodules.model import Encoder, Decoder
from ldm.modules.distributions.distributions import DiagonalGaussianDistribution
from torch.optim.lr_scheduler import LambdaLR
from ldm.util import instantiate_from_config
class AutoencoderKL(pl.LightningModule):
def __init__(self,
ddconfig,
lossconfig,
embed_dim,
ckpt_path=None,
ignore_keys=[],
image_key="image",
colorize_nlabels=None,
monitor=None,
):
super().__init__()
self.image_key = image_key
self.encoder = Encoder(**ddconfig)
self.decoder = Decoder(**ddconfig)
self.loss = instantiate_from_config(lossconfig)
assert ddconfig["double_z"]
self.quant_conv = torch.nn.Conv2d(2*ddconfig["z_channels"], 2*embed_dim, 1)
self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1)
self.embed_dim = embed_dim
if colorize_nlabels is not None:
assert type(colorize_nlabels)==int
self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1))
if monitor is not None:
self.monitor = monitor
if ckpt_path is not None:
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
def init_from_ckpt(self, path, ignore_keys=list()):
sd = torch.load(path, map_location="cpu")["state_dict"]
keys = list(sd.keys())
for k in keys:
for ik in ignore_keys:
if k.startswith(ik):
print("Deleting key {} from state_dict.".format(k))
del sd[k]
self.load_state_dict(sd, strict=False)
print(f"Restored from {path}")
def encode(self, x):
h = self.encoder(x)
moments = self.quant_conv(h)
posterior = DiagonalGaussianDistribution(moments)
return posterior
def decode(self, z):
z = self.post_quant_conv(z)
dec = self.decoder(z)
return dec
def forward(self, input, sample_posterior=True):
posterior = self.encode(input)
if sample_posterior:
z = posterior.sample()
else:
z = posterior.mode()
dec = self.decode(z)
return dec, posterior
def get_input(self, batch, k):
x = batch[k]
if len(x.shape) == 3:
x = x[..., None]
x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()
return x
def training_step(self, batch, batch_idx, optimizer_idx):
inputs = self.get_input(batch, self.image_key)
reconstructions, posterior = self(inputs)
if optimizer_idx == 0:
# train encoder+decoder+logvar
aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,
last_layer=self.get_last_layer(), split="train")
self.log("aeloss", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)
return aeloss
if optimizer_idx == 1:
# train the discriminator
discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,
last_layer=self.get_last_layer(), split="train")
self.log("discloss", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)
return discloss
def validation_step(self, batch, batch_idx):
inputs = self.get_input(batch, self.image_key)
reconstructions, posterior = self(inputs)
aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,
last_layer=self.get_last_layer(), split="val")
discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,
last_layer=self.get_last_layer(), split="val")
self.log("val/rec_loss", log_dict_ae["val/rec_loss"])
self.log_dict(log_dict_ae)
self.log_dict(log_dict_disc)
return self.log_dict
def test_step(self, batch, batch_idx):
inputs = self.get_input(batch, self.image_key)# inputs shape:(b,c,mel_len,T) or (b,c,h,w)
reconstructions, posterior = self(inputs)# reconstructions:(b,c,mel_len,T) or (b,c,h,w)
reconstructions = (reconstructions + 1)/2 # to mel scale
test_ckpt_path = os.path.basename(self.trainer.tested_ckpt_path)
savedir = os.path.join(self.trainer.log_dir,f'output_imgs_{test_ckpt_path}','fake_class')
if not os.path.exists(savedir):
os.makedirs(savedir)
file_names = batch['f_name']
# print(f"reconstructions.shape:{reconstructions.shape}",file_names)
reconstructions = reconstructions.cpu().numpy().squeeze(1) # squuze channel dim
for b in range(reconstructions.shape[0]):
vname_num_split_index = file_names[b].rfind('_')# file_names[b]:video_name+'_'+num
v_n,num = file_names[b][:vname_num_split_index],file_names[b][vname_num_split_index+1:]
save_img_path = os.path.join(savedir,f'{v_n}_sample_{num}.npy')
np.save(save_img_path,reconstructions[b])
return None
def configure_optimizers(self):
lr = self.learning_rate
opt_ae = torch.optim.Adam(list(self.encoder.parameters())+
list(self.decoder.parameters())+
list(self.quant_conv.parameters())+
list(self.post_quant_conv.parameters()),
lr=lr, betas=(0.5, 0.9))
opt_disc = torch.optim.Adam(list(self.loss.discriminator.parameters()) + list(self.loss.discriminator_multi.parameters()),
lr=lr, betas=(0.5, 0.9))
return [opt_ae, opt_disc], []
def get_last_layer(self):
return self.decoder.conv_out.weight
@torch.no_grad()
def log_images(self, batch, only_inputs=False, **kwargs):
log = dict()
x = self.get_input(batch, self.image_key)
x = x.to(self.device)
if not only_inputs:
xrec, posterior = self(x)
if x.shape[1] > 3:
# colorize with random projection
assert xrec.shape[1] > 3
x = self.to_rgb(x)
xrec = self.to_rgb(xrec)
log["samples"] = self.decode(torch.randn_like(posterior.sample()))
log["reconstructions"] = xrec
log["inputs"] = x
return log
def to_rgb(self, x):
assert self.image_key == "segmentation"
if not hasattr(self, "colorize"):
self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x))
x = F.conv2d(x, weight=self.colorize)
x = 2.*(x-x.min())/(x.max()-x.min()) - 1.
return x
class IdentityFirstStage(torch.nn.Module):
def __init__(self, *args, vq_interface=False, **kwargs):
self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff
super().__init__()
def encode(self, x, *args, **kwargs):
return x
def decode(self, x, *args, **kwargs):
return x
def quantize(self, x, *args, **kwargs):
if self.vq_interface:
return x, None, [None, None, None]
return x
def forward(self, x, *args, **kwargs):
return x | EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/models/autoencoder_multi.py |
import os
import torch
import pytorch_lightning as pl
import torch.nn.functional as F
from contextlib import contextmanager
from packaging import version
import numpy as np
from ldm.modules.diffusionmodules.model import Encoder, Decoder
from ldm.modules.distributions.distributions import DiagonalGaussianDistribution
from torch.optim.lr_scheduler import LambdaLR
from ldm.util import instantiate_from_config
# from icecream import ic
class VQModel(pl.LightningModule):
def __init__(self,
ddconfig,
lossconfig,
n_embed,
embed_dim,
ckpt_path=None,
ignore_keys=[],
image_key="image",
colorize_nlabels=None,
monitor=None,
batch_resize_range=None,
scheduler_config=None,
lr_g_factor=1.0,
remap=None,
sane_index_shape=False, # tell vector quantizer to return indices as bhw
use_ema=False
):
super().__init__()
self.embed_dim = embed_dim
self.n_embed = n_embed
self.image_key = image_key
self.encoder = Encoder(**ddconfig)
self.decoder = Decoder(**ddconfig)
self.loss = instantiate_from_config(lossconfig)
self.quantize = VectorQuantizer(n_embed, embed_dim, beta=0.25,
remap=remap,
sane_index_shape=sane_index_shape)
self.quant_conv = torch.nn.Conv2d(ddconfig["z_channels"], embed_dim, 1)
self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1)
if colorize_nlabels is not None:
assert type(colorize_nlabels)==int
self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1))
if monitor is not None:
self.monitor = monitor
self.batch_resize_range = batch_resize_range
if self.batch_resize_range is not None:
print(f"{self.__class__.__name__}: Using per-batch resizing in range {batch_resize_range}.")
self.use_ema = use_ema
if self.use_ema:
self.model_ema = LitEma(self)
print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
if ckpt_path is not None:
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
self.scheduler_config = scheduler_config
self.lr_g_factor = lr_g_factor
@contextmanager
def ema_scope(self, context=None):
if self.use_ema:
self.model_ema.store(self.parameters())
self.model_ema.copy_to(self)
if context is not None:
print(f"{context}: Switched to EMA weights")
try:
yield None
finally:
if self.use_ema:
self.model_ema.restore(self.parameters())
if context is not None:
print(f"{context}: Restored training weights")
def init_from_ckpt(self, path, ignore_keys=list()):
sd = torch.load(path, map_location="cpu")["state_dict"]
keys = list(sd.keys())
for k in keys:
for ik in ignore_keys:
if k.startswith(ik):
print("Deleting key {} from state_dict.".format(k))
del sd[k]
missing, unexpected = self.load_state_dict(sd, strict=False)
print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
if len(missing) > 0:
print(f"Missing Keys: {missing}")
print(f"Unexpected Keys: {unexpected}")
def on_train_batch_end(self, *args, **kwargs):
if self.use_ema:
self.model_ema(self)
def encode(self, x):
h = self.encoder(x)
h = self.quant_conv(h)
quant, emb_loss, info = self.quantize(h)
return quant, emb_loss, info
def encode_to_prequant(self, x):
h = self.encoder(x)
h = self.quant_conv(h)
return h
def decode(self, quant):
quant = self.post_quant_conv(quant)
dec = self.decoder(quant)
return dec
def decode_code(self, code_b):
quant_b = self.quantize.embed_code(code_b)
dec = self.decode(quant_b)
return dec
def forward(self, input, return_pred_indices=False):
quant, diff, (_,_,ind) = self.encode(input)
dec = self.decode(quant)
if return_pred_indices:
return dec, diff, ind
return dec, diff
def get_input(self, batch, k):
x = batch[k]
if len(x.shape) == 3:
x = x[..., None]
x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()
if self.batch_resize_range is not None:
lower_size = self.batch_resize_range[0]
upper_size = self.batch_resize_range[1]
if self.global_step <= 4:
# do the first few batches with max size to avoid later oom
new_resize = upper_size
else:
new_resize = np.random.choice(np.arange(lower_size, upper_size+16, 16))
if new_resize != x.shape[2]:
x = F.interpolate(x, size=new_resize, mode="bicubic")
x = x.detach()
return x
def training_step(self, batch, batch_idx, optimizer_idx):
# https://github.com/pytorch/pytorch/issues/37142
# try not to fool the heuristics
x = self.get_input(batch, self.image_key)
xrec, qloss, ind = self(x, return_pred_indices=True)
if optimizer_idx == 0:
# autoencode
aeloss, log_dict_ae = self.loss(qloss, x, xrec, optimizer_idx, self.global_step,
last_layer=self.get_last_layer(), split="train",
predicted_indices=ind)
self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True)
return aeloss
if optimizer_idx == 1:
# discriminator
discloss, log_dict_disc = self.loss(qloss, x, xrec, optimizer_idx, self.global_step,
last_layer=self.get_last_layer(), split="train")
self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=True)
return discloss
def validation_step(self, batch, batch_idx):
log_dict = self._validation_step(batch, batch_idx)
with self.ema_scope():
log_dict_ema = self._validation_step(batch, batch_idx, suffix="_ema")
return log_dict
def _validation_step(self, batch, batch_idx, suffix=""):
x = self.get_input(batch, self.image_key)
xrec, qloss, ind = self(x, return_pred_indices=True)
aeloss, log_dict_ae = self.loss(qloss, x, xrec, 0,
self.global_step,
last_layer=self.get_last_layer(),
split="val"+suffix,
predicted_indices=ind
)
discloss, log_dict_disc = self.loss(qloss, x, xrec, 1,
self.global_step,
last_layer=self.get_last_layer(),
split="val"+suffix,
predicted_indices=ind
)
rec_loss = log_dict_ae[f"val{suffix}/rec_loss"]
self.log(f"val{suffix}/rec_loss", rec_loss,
prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True)
self.log(f"val{suffix}/aeloss", aeloss,
prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True)
if version.parse(pl.__version__) >= version.parse('1.4.0'):
del log_dict_ae[f"val{suffix}/rec_loss"]
self.log_dict(log_dict_ae)
self.log_dict(log_dict_disc)
return self.log_dict
def test_step(self, batch, batch_idx):
x = self.get_input(batch, self.image_key)
xrec, qloss, ind = self(x, return_pred_indices=True)
reconstructions = (xrec + 1)/2 # to mel scale
test_ckpt_path = os.path.basename(self.trainer.tested_ckpt_path)
savedir = os.path.join(self.trainer.log_dir,f'output_imgs_{test_ckpt_path}','fake_class')
if not os.path.exists(savedir):
os.makedirs(savedir)
file_names = batch['f_name']
# print(f"reconstructions.shape:{reconstructions.shape}",file_names)
reconstructions = reconstructions.cpu().numpy().squeeze(1) # squuze channel dim
for b in range(reconstructions.shape[0]):
vname_num_split_index = file_names[b].rfind('_')# file_names[b]:video_name+'_'+num
v_n,num = file_names[b][:vname_num_split_index],file_names[b][vname_num_split_index+1:]
save_img_path = os.path.join(savedir,f'{v_n}_sample_{num}.npy')
np.save(save_img_path,reconstructions[b])
return None
def configure_optimizers(self):
lr_d = self.learning_rate
lr_g = self.lr_g_factor*self.learning_rate
print("lr_d", lr_d)
print("lr_g", lr_g)
opt_ae = torch.optim.Adam(list(self.encoder.parameters())+
list(self.decoder.parameters())+
list(self.quantize.parameters())+
list(self.quant_conv.parameters())+
list(self.post_quant_conv.parameters()),
lr=lr_g, betas=(0.5, 0.9))
opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),
lr=lr_d, betas=(0.5, 0.9))
if self.scheduler_config is not None:
scheduler = instantiate_from_config(self.scheduler_config)
print("Setting up LambdaLR scheduler...")
scheduler = [
{
'scheduler': LambdaLR(opt_ae, lr_lambda=scheduler.schedule),
'interval': 'step',
'frequency': 1
},
{
'scheduler': LambdaLR(opt_disc, lr_lambda=scheduler.schedule),
'interval': 'step',
'frequency': 1
},
]
return [opt_ae, opt_disc], scheduler
return [opt_ae, opt_disc], []
def get_last_layer(self):
return self.decoder.conv_out.weight
def log_images(self, batch, only_inputs=False, plot_ema=False, **kwargs):
log = dict()
x = self.get_input(batch, self.image_key)
x = x.to(self.device)
if only_inputs:
log["inputs"] = x
return log
xrec, _ = self(x)
if x.shape[1] > 3:
# colorize with random projection
assert xrec.shape[1] > 3
x = self.to_rgb(x)
xrec = self.to_rgb(xrec)
log["inputs"] = x
log["reconstructions"] = xrec
if plot_ema:
with self.ema_scope():
xrec_ema, _ = self(x)
if x.shape[1] > 3: xrec_ema = self.to_rgb(xrec_ema)
log["reconstructions_ema"] = xrec_ema
return log
def to_rgb(self, x):
assert self.image_key == "segmentation"
if not hasattr(self, "colorize"):
self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x))
x = F.conv2d(x, weight=self.colorize)
x = 2.*(x-x.min())/(x.max()-x.min()) - 1.
return x
class VQModelInterface(VQModel):
def __init__(self, embed_dim, *args, **kwargs):
super().__init__(embed_dim=embed_dim, *args, **kwargs)
self.embed_dim = embed_dim
def encode(self, x):# VQModel的quantize写在encoder里,VQModelInterface则将其写在decoder里
h = self.encoder(x)
h = self.quant_conv(h)
return h
def decode(self, h, force_not_quantize=False):
# also go through quantization layer
if not force_not_quantize:
quant, emb_loss, info = self.quantize(h)
else:
quant = h
quant = self.post_quant_conv(quant)
dec = self.decoder(quant)
return dec
class AutoencoderKL(pl.LightningModule):
def __init__(self,
ddconfig,
lossconfig,
embed_dim,
ckpt_path=None,
ignore_keys=[],
image_key="image",
colorize_nlabels=None,
monitor=None,
):
super().__init__()
self.image_key = image_key
self.encoder = Encoder(**ddconfig)
self.decoder = Decoder(**ddconfig)
self.loss = instantiate_from_config(lossconfig)
assert ddconfig["double_z"]
self.quant_conv = torch.nn.Conv2d(2*ddconfig["z_channels"], 2*embed_dim, 1)
self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1)
self.embed_dim = embed_dim
if colorize_nlabels is not None:
assert type(colorize_nlabels)==int
self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1))
if monitor is not None:
self.monitor = monitor
if ckpt_path is not None:
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)
# self.automatic_optimization = False # hjw for debug
def init_from_ckpt(self, path, ignore_keys=list()):
sd = torch.load(path, map_location="cpu")["state_dict"]
keys = list(sd.keys())
for k in keys:
for ik in ignore_keys:
if k.startswith(ik):
print("Deleting key {} from state_dict.".format(k))
del sd[k]
self.load_state_dict(sd, strict=False)
print(f"Restored from {path}")
def encode(self, x):
h = self.encoder(x)
moments = self.quant_conv(h)
posterior = DiagonalGaussianDistribution(moments)
return posterior
def decode(self, z):
z = self.post_quant_conv(z)
dec = self.decoder(z)
return dec
def forward(self, input, sample_posterior=True):
posterior = self.encode(input)
if sample_posterior:
z = posterior.sample()
else:
z = posterior.mode()
dec = self.decode(z)
return dec, posterior
def get_input(self, batch, k):
x = batch[k]
if len(x.shape) == 3:
x = x[..., None]
x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()
return x
def training_step(self, batch, batch_idx, optimizer_idx):
inputs = self.get_input(batch, self.image_key)
reconstructions, posterior = self(inputs)
if optimizer_idx == 0:
# train encoder+decoder+logvar
aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,
last_layer=self.get_last_layer(), split="train")
self.log("aeloss", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)
return aeloss
if optimizer_idx == 1:
# train the discriminator
discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,
last_layer=self.get_last_layer(), split="train")
self.log("discloss", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)
self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)
return discloss
def validation_step(self, batch, batch_idx):
# self.log_images(batch,only_inputs=False,save_dir='mel_result_ae13_26/fake_class')
return self.log_dict
def test_step(self, batch, batch_idx):
test_ckpt_path = os.path.basename(self.trainer.tested_ckpt_path)
savedir = os.path.join(self.trainer.log_dir,f'output_imgs_{test_ckpt_path}','fake_class')
os.makedirs(savedir,exist_ok=True)
inputs = self.get_input(batch, self.image_key)# inputs shape:(b,c,mel_len,T) or (b,c,h,w)
# ic(inputs.shape)
# inputs = inputs[...,:624]
# ic(inputs.shape)
xrec, posterior = self(inputs)# reconstructions:(b,c,mel_len,T) or (b,c,h,w)
file_names = batch['f_name']
# print(f"reconstructions.shape:{reconstructions.shape}",file_names)
for b in range(len(file_names)):
rcon = (xrec[b].squeeze().detach().cpu().numpy() + 1) / 2 # to mel scale,squeeze channel dim
vname_num_split_index = file_names[b].rfind('_')# file_names[b]:video_name+'_'+num
v_n,num = file_names[b][:vname_num_split_index],file_names[b][vname_num_split_index+1:]
save_img_path = os.path.join(savedir,f'{v_n}_sample_{num}.npy')
np.save(save_img_path,rcon)
return None
def configure_optimizers(self):
lr = self.learning_rate
opt_ae = torch.optim.Adam(list(self.encoder.parameters())+
list(self.decoder.parameters())+
list(self.quant_conv.parameters())+
list(self.post_quant_conv.parameters()),
lr=lr, betas=(0.5, 0.9))
opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),
lr=lr, betas=(0.5, 0.9))
return [opt_ae, opt_disc], []
def get_last_layer(self):
return self.decoder.conv_out.weight
@torch.no_grad()
def log_images(self, batch, only_inputs=False,save_dir = 'mel_result_ae13_26_debug/fake_class', **kwargs): # 在main.py的on_validation_batch_end中调用
log = dict()
x = self.get_input(batch, self.image_key)
x = x.to(self.device)
if not only_inputs:
xrec, posterior = self(x)
if x.shape[1] > 3:
# colorize with random projection
assert xrec.shape[1] > 3
x = self.to_rgb(x)
xrec = self.to_rgb(xrec)
log["samples"] = self.decode(torch.randn_like(posterior.sample()))
log["reconstructions"] = xrec
log["inputs"] = x
return log
def to_rgb(self, x):
assert self.image_key == "segmentation"
if not hasattr(self, "colorize"):
self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x))
x = F.conv2d(x, weight=self.colorize)
x = 2.*(x-x.min())/(x.max()-x.min()) - 1.
return x
class IdentityFirstStage(torch.nn.Module):
def __init__(self, *args, vq_interface=False, **kwargs):
self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff
super().__init__()
def encode(self, x, *args, **kwargs):
return x
def decode(self, x, *args, **kwargs):
return x
def quantize(self, x, *args, **kwargs):
if self.vq_interface:
return x, None, [None, None, None]
return x
def forward(self, x, *args, **kwargs):
return x
| EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/models/autoencoder.py |
"""SAMPLING ONLY."""
import torch
import numpy as np
from tqdm import tqdm
from functools import partial
from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like, \
extract_into_tensor
class DDIMSampler(object):
def __init__(self, model, schedule="linear", **kwargs):
super().__init__()
self.model = model
self.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
self.ddpm_num_timesteps = model.num_timesteps
self.schedule = schedule
def register_buffer(self, name, attr):
if type(attr) == torch.Tensor:
# if attr.device != torch.device("cuda"):
# attr = attr.to(torch.device("cuda"))
attr = attr.to(self.device)
setattr(self, name, attr)
def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True):
self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,
num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)
alphas_cumprod = self.model.alphas_cumprod
assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'
to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)
self.register_buffer('betas', to_torch(self.model.betas))
self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))
# calculations for diffusion q(x_t | x_{t-1}) and others
self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))
self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))
self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))
self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))
self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))
# ddim sampling parameters
ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),
ddim_timesteps=self.ddim_timesteps,
eta=ddim_eta,verbose=verbose)
self.register_buffer('ddim_sigmas', ddim_sigmas)
self.register_buffer('ddim_alphas', ddim_alphas)
self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)
self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))
sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(
(1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (
1 - self.alphas_cumprod / self.alphas_cumprod_prev))
self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)
@torch.no_grad()
def sample(self,
S,
batch_size,
shape,
conditioning=None,
callback=None,
normals_sequence=None,
img_callback=None,
quantize_x0=False,
eta=0.,
mask=None,
x0=None,
temperature=1.,
noise_dropout=0.,
score_corrector=None,
corrector_kwargs=None,
verbose=True,
x_T=None,
log_every_t=100,
unconditional_guidance_scale=1.,
unconditional_conditioning=None,
# this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
**kwargs
):
if conditioning is not None:
if isinstance(conditioning, dict):
ctmp = conditioning[list(conditioning.keys())[0]]
while isinstance(ctmp, list): ctmp = ctmp[0]
cbs = ctmp.shape[0]
if cbs != batch_size:
print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
else:
if conditioning.shape[0] != batch_size:
print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}")
self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)
# sampling
C, H, W = shape
size = (batch_size, C, H, W)
# print(f'Data shape for DDIM sampling is {size}, eta {eta}')
samples, intermediates = self.ddim_sampling(conditioning, size,
callback=callback,
img_callback=img_callback,
quantize_denoised=quantize_x0,
mask=mask, x0=x0,
ddim_use_original_steps=False,
noise_dropout=noise_dropout,
temperature=temperature,
score_corrector=score_corrector,
corrector_kwargs=corrector_kwargs,
x_T=x_T,
log_every_t=log_every_t,
unconditional_guidance_scale=unconditional_guidance_scale,
unconditional_conditioning=unconditional_conditioning,
)
return samples, intermediates
@torch.no_grad()
def ddim_sampling(self, cond, shape,
x_T=None, ddim_use_original_steps=False,
callback=None, timesteps=None, quantize_denoised=False,
mask=None, x0=None, img_callback=None, log_every_t=100,
temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
unconditional_guidance_scale=1., unconditional_conditioning=None,):
device = self.model.betas.device
b = shape[0]
if x_T is None:
img = torch.randn(shape, device=device)
else:
img = x_T
if timesteps is None:
timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps
elif timesteps is not None and not ddim_use_original_steps:
subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1
timesteps = self.ddim_timesteps[:subset_end]
intermediates = {'x_inter': [img], 'pred_x0': [img]}
time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)
total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]
# iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)
for i, step in enumerate(time_range):
index = total_steps - i - 1
ts = torch.full((b,), step, device=device, dtype=torch.long)
if mask is not None:
assert x0 is not None
img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?
img = img_orig * mask + (1. - mask) * img
outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,
quantize_denoised=quantize_denoised, temperature=temperature,
noise_dropout=noise_dropout, score_corrector=score_corrector,
corrector_kwargs=corrector_kwargs,
unconditional_guidance_scale=unconditional_guidance_scale,
unconditional_conditioning=unconditional_conditioning)
img, pred_x0 = outs
if callback: callback(i)
if img_callback: img_callback(pred_x0, i)
if index % log_every_t == 0 or index == total_steps - 1:
intermediates['x_inter'].append(img)
intermediates['pred_x0'].append(pred_x0)
return img, intermediates
@torch.no_grad()
def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
unconditional_guidance_scale=1., unconditional_conditioning=None):
b, *_, device = *x.shape, x.device
if unconditional_conditioning is None or unconditional_guidance_scale == 1.:
e_t = self.model.apply_model(x, t, c)
else:
x_in = torch.cat([x] * 2)
t_in = torch.cat([t] * 2)
if isinstance(c, dict):
assert isinstance(unconditional_conditioning, dict)
c_in = dict()
for k in c:
if isinstance(c[k], list):
c_in[k] = [torch.cat([
unconditional_conditioning[k][i],
c[k][i]]) for i in range(len(c[k]))]
else:
c_in[k] = torch.cat([
unconditional_conditioning[k],
c[k]])
elif isinstance(c, list):
c_in = list()
assert isinstance(unconditional_conditioning, list)
for i in range(len(c)):
c_in.append(torch.cat([unconditional_conditioning[i], c[i]]))
else:
c_in = torch.cat([unconditional_conditioning, c])# c/uc shape [b,seq_len=77,dim=1024],c_in shape [b*2,seq_len,dim]
e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)
e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
if score_corrector is not None:
assert self.model.parameterization == "eps"
e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)
alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas
alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev
sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas
sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas
# select parameters corresponding to the currently considered timestep
a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)
a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)
sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)
sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)
# current prediction for x_0
pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
if quantize_denoised:
pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)
# direction pointing to x_t
dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t
noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature
if noise_dropout > 0.:
noise = torch.nn.functional.dropout(noise, p=noise_dropout)
x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise
return x_prev, pred_x0
@torch.no_grad()
def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):
# fast, but does not allow for exact reconstruction
# t serves as an index to gather the correct alphas
if use_original_steps:
sqrt_alphas_cumprod = self.sqrt_alphas_cumprod
sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod
else:
sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)
sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas
if noise is None:
noise = torch.randn_like(x0)
return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +
extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)
@torch.no_grad()
def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,
use_original_steps=False):
timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps
timesteps = timesteps[:t_start]
time_range = np.flip(timesteps)
total_steps = timesteps.shape[0]
# print(f"Running DDIM Sampling with {total_steps} timesteps")
# iterator = tqdm(time_range, desc='Decoding image', total=total_steps)
x_dec = x_latent
for i, step in enumerate(time_range):
index = total_steps - i - 1
ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)
x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,
unconditional_guidance_scale=unconditional_guidance_scale,
unconditional_conditioning=unconditional_conditioning)
return x_dec | EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/models/diffusion/ddim.py |
import os
import torch
import pytorch_lightning as pl
from omegaconf import OmegaConf
from torch.nn import functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from copy import deepcopy
from einops import rearrange
from glob import glob
from natsort import natsorted
from ldm.modules.diffusionmodules.openaimodel import EncoderUNetModel, UNetModel
from ldm.util import log_txt_as_img, default, ismap, instantiate_from_config
__models__ = {
'class_label': EncoderUNetModel,
'segmentation': UNetModel
}
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self
class NoisyLatentImageClassifier(pl.LightningModule):
def __init__(self,
diffusion_path,
num_classes,
ckpt_path=None,
pool='attention',
label_key=None,
diffusion_ckpt_path=None,
scheduler_config=None,
weight_decay=1.e-2,
log_steps=10,
monitor='val/loss',
*args,
**kwargs):
super().__init__(*args, **kwargs)
self.num_classes = num_classes
# get latest config of diffusion model
diffusion_config = natsorted(glob(os.path.join(diffusion_path, 'configs', '*-project.yaml')))[-1]
self.diffusion_config = OmegaConf.load(diffusion_config).model
self.diffusion_config.params.ckpt_path = diffusion_ckpt_path
self.load_diffusion()
self.monitor = monitor
self.numd = self.diffusion_model.first_stage_model.encoder.num_resolutions - 1
self.log_time_interval = self.diffusion_model.num_timesteps // log_steps
self.log_steps = log_steps
self.label_key = label_key if not hasattr(self.diffusion_model, 'cond_stage_key') \
else self.diffusion_model.cond_stage_key
assert self.label_key is not None, 'label_key neither in diffusion model nor in model.params'
if self.label_key not in __models__:
raise NotImplementedError()
self.load_classifier(ckpt_path, pool)
self.scheduler_config = scheduler_config
self.use_scheduler = self.scheduler_config is not None
self.weight_decay = weight_decay
def init_from_ckpt(self, path, ignore_keys=list(), only_model=False):
sd = torch.load(path, map_location="cpu")
if "state_dict" in list(sd.keys()):
sd = sd["state_dict"]
keys = list(sd.keys())
for k in keys:
for ik in ignore_keys:
if k.startswith(ik):
print("Deleting key {} from state_dict.".format(k))
del sd[k]
missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(
sd, strict=False)
print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
if len(missing) > 0:
print(f"Missing Keys: {missing}")
if len(unexpected) > 0:
print(f"Unexpected Keys: {unexpected}")
def load_diffusion(self):
model = instantiate_from_config(self.diffusion_config)
self.diffusion_model = model.eval()
self.diffusion_model.train = disabled_train
for param in self.diffusion_model.parameters():
param.requires_grad = False
def load_classifier(self, ckpt_path, pool):
model_config = deepcopy(self.diffusion_config.params.unet_config.params)
model_config.in_channels = self.diffusion_config.params.unet_config.params.out_channels
model_config.out_channels = self.num_classes
if self.label_key == 'class_label':
model_config.pool = pool
self.model = __models__[self.label_key](**model_config)
if ckpt_path is not None:
print('#####################################################################')
print(f'load from ckpt "{ckpt_path}"')
print('#####################################################################')
self.init_from_ckpt(ckpt_path)
@torch.no_grad()
def get_x_noisy(self, x, t, noise=None):
noise = default(noise, lambda: torch.randn_like(x))
continuous_sqrt_alpha_cumprod = None
if self.diffusion_model.use_continuous_noise:
continuous_sqrt_alpha_cumprod = self.diffusion_model.sample_continuous_noise_level(x.shape[0], t + 1)
# todo: make sure t+1 is correct here
return self.diffusion_model.q_sample(x_start=x, t=t, noise=noise,
continuous_sqrt_alpha_cumprod=continuous_sqrt_alpha_cumprod)
def forward(self, x_noisy, t, *args, **kwargs):
return self.model(x_noisy, t)
@torch.no_grad()
def get_input(self, batch, k):
x = batch[k]
if len(x.shape) == 3:
x = x[..., None]
x = rearrange(x, 'b h w c -> b c h w')
x = x.to(memory_format=torch.contiguous_format).float()
return x
@torch.no_grad()
def get_conditioning(self, batch, k=None):
if k is None:
k = self.label_key
assert k is not None, 'Needs to provide label key'
targets = batch[k].to(self.device)
if self.label_key == 'segmentation':
targets = rearrange(targets, 'b h w c -> b c h w')
for down in range(self.numd):
h, w = targets.shape[-2:]
targets = F.interpolate(targets, size=(h // 2, w // 2), mode='nearest')
# targets = rearrange(targets,'b c h w -> b h w c')
return targets
def compute_top_k(self, logits, labels, k, reduction="mean"):
_, top_ks = torch.topk(logits, k, dim=1)
if reduction == "mean":
return (top_ks == labels[:, None]).float().sum(dim=-1).mean().item()
elif reduction == "none":
return (top_ks == labels[:, None]).float().sum(dim=-1)
def on_train_epoch_start(self):
# save some memory
self.diffusion_model.model.to('cpu')
@torch.no_grad()
def write_logs(self, loss, logits, targets):
log_prefix = 'train' if self.training else 'val'
log = {}
log[f"{log_prefix}/loss"] = loss.mean()
log[f"{log_prefix}/acc@1"] = self.compute_top_k(
logits, targets, k=1, reduction="mean"
)
log[f"{log_prefix}/acc@5"] = self.compute_top_k(
logits, targets, k=5, reduction="mean"
)
self.log_dict(log, prog_bar=False, logger=True, on_step=self.training, on_epoch=True)
self.log('loss', log[f"{log_prefix}/loss"], prog_bar=True, logger=False)
self.log('global_step', self.global_step, logger=False, on_epoch=False, prog_bar=True)
lr = self.optimizers().param_groups[0]['lr']
self.log('lr_abs', lr, on_step=True, logger=True, on_epoch=False, prog_bar=True)
def shared_step(self, batch, t=None):
x, *_ = self.diffusion_model.get_input(batch, k=self.diffusion_model.first_stage_key)
targets = self.get_conditioning(batch)
if targets.dim() == 4:
targets = targets.argmax(dim=1)
if t is None:
t = torch.randint(0, self.diffusion_model.num_timesteps, (x.shape[0],), device=self.device).long()
else:
t = torch.full(size=(x.shape[0],), fill_value=t, device=self.device).long()
x_noisy = self.get_x_noisy(x, t)
logits = self(x_noisy, t)
loss = F.cross_entropy(logits, targets, reduction='none')
self.write_logs(loss.detach(), logits.detach(), targets.detach())
loss = loss.mean()
return loss, logits, x_noisy, targets
def training_step(self, batch, batch_idx):
loss, *_ = self.shared_step(batch)
return loss
def reset_noise_accs(self):
self.noisy_acc = {t: {'acc@1': [], 'acc@5': []} for t in
range(0, self.diffusion_model.num_timesteps, self.diffusion_model.log_every_t)}
def on_validation_start(self):
self.reset_noise_accs()
@torch.no_grad()
def validation_step(self, batch, batch_idx):
loss, *_ = self.shared_step(batch)
for t in self.noisy_acc:
_, logits, _, targets = self.shared_step(batch, t)
self.noisy_acc[t]['acc@1'].append(self.compute_top_k(logits, targets, k=1, reduction='mean'))
self.noisy_acc[t]['acc@5'].append(self.compute_top_k(logits, targets, k=5, reduction='mean'))
return loss
def configure_optimizers(self):
optimizer = AdamW(self.model.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay)
if self.use_scheduler:
scheduler = instantiate_from_config(self.scheduler_config)
print("Setting up LambdaLR scheduler...")
scheduler = [
{
'scheduler': LambdaLR(optimizer, lr_lambda=scheduler.schedule),
'interval': 'step',
'frequency': 1
}]
return [optimizer], scheduler
return optimizer
@torch.no_grad()
def log_images(self, batch, N=8, *args, **kwargs):
log = dict()
x = self.get_input(batch, self.diffusion_model.first_stage_key)
log['inputs'] = x
y = self.get_conditioning(batch)
if self.label_key == 'class_label':
y = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"])
log['labels'] = y
if ismap(y):
log['labels'] = self.diffusion_model.to_rgb(y)
for step in range(self.log_steps):
current_time = step * self.log_time_interval
_, logits, x_noisy, _ = self.shared_step(batch, t=current_time)
log[f'inputs@t{current_time}'] = x_noisy
pred = F.one_hot(logits.argmax(dim=1), num_classes=self.num_classes)
pred = rearrange(pred, 'b h w c -> b c h w')
log[f'pred@t{current_time}'] = self.diffusion_model.to_rgb(pred)
for key in log:
log[key] = log[key][:N]
return log
| EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/models/diffusion/classifier.py |
"""
wild mixture of
https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
https://github.com/CompVis/taming-transformers
-- merci
"""
import os
import torch
import torch.nn as nn
import numpy as np
import pytorch_lightning as pl
from torch.optim.lr_scheduler import LambdaLR
from einops import rearrange, repeat
from contextlib import contextmanager
from functools import partial
from tqdm import tqdm
from torchvision.utils import make_grid
from pytorch_lightning.utilities.distributed import rank_zero_only
from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
from ldm.modules.ema import LitEma
from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution
from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL
from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
from ldm.models.diffusion.ddim import DDIMSampler
from ldm.models.diffusion.ddpm import DDPM, disabled_train
from omegaconf import ListConfig
__conditioning_keys__ = {'concat': 'c_concat',
'crossattn': 'c_crossattn',
'adm': 'y'}
class LatentDiffusion_audio(DDPM):
"""main class"""
def __init__(self,
first_stage_config,
cond_stage_config,
num_timesteps_cond=None,
mel_dim=80,
mel_length=848,
cond_stage_key="image",
cond_stage_trainable=False,
concat_mode=True,
cond_stage_forward=None,
conditioning_key=None,
scale_factor=1.0,
scale_by_std=False,
*args, **kwargs):
self.num_timesteps_cond = default(num_timesteps_cond, 1)
self.scale_by_std = scale_by_std
assert self.num_timesteps_cond <= kwargs['timesteps']
# for backwards compatibility after implementation of DiffusionWrapper
if conditioning_key is None:
conditioning_key = 'concat' if concat_mode else 'crossattn'
if cond_stage_config == '__is_unconditional__':
conditioning_key = None
ckpt_path = kwargs.pop("ckpt_path", None)
ignore_keys = kwargs.pop("ignore_keys", [])
super().__init__(conditioning_key=conditioning_key, *args, **kwargs)
self.concat_mode = concat_mode
self.mel_dim = mel_dim
self.mel_length = mel_length
self.cond_stage_trainable = cond_stage_trainable
self.cond_stage_key = cond_stage_key
try:
self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1
except:
self.num_downs = 0
if not scale_by_std:
self.scale_factor = scale_factor
else:
self.register_buffer('scale_factor', torch.tensor(scale_factor))
self.instantiate_first_stage(first_stage_config)
self.instantiate_cond_stage(cond_stage_config)
self.cond_stage_forward = cond_stage_forward
self.clip_denoised = False
self.bbox_tokenizer = None
self.restarted_from_ckpt = False
if ckpt_path is not None:
self.init_from_ckpt(ckpt_path, ignore_keys)
self.restarted_from_ckpt = True
def make_cond_schedule(self, ):
self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long)
ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long()
self.cond_ids[:self.num_timesteps_cond] = ids
@rank_zero_only
@torch.no_grad()
def on_train_batch_start(self, batch, batch_idx, dataloader_idx):
# only for very first batch
if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt:
assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously'
# set rescale weight to 1./std of encodings
print("### USING STD-RESCALING ###")
x = super().get_input(batch, self.first_stage_key)
x = x.to(self.device)
encoder_posterior = self.encode_first_stage(x)
z = self.get_first_stage_encoding(encoder_posterior).detach()
del self.scale_factor
self.register_buffer('scale_factor', 1. / z.flatten().std())
print(f"setting self.scale_factor to {self.scale_factor}")
print("### USING STD-RESCALING ###")
def register_schedule(self,
given_betas=None, beta_schedule="linear", timesteps=1000,
linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s)
self.shorten_cond_schedule = self.num_timesteps_cond > 1
if self.shorten_cond_schedule:
self.make_cond_schedule()
def instantiate_first_stage(self, config):
model = instantiate_from_config(config)
self.first_stage_model = model.eval()
self.first_stage_model.train = disabled_train
for param in self.first_stage_model.parameters():
param.requires_grad = False
def instantiate_cond_stage(self, config):
if not self.cond_stage_trainable:
if config == "__is_first_stage__":
print("Using first stage also as cond stage.")
self.cond_stage_model = self.first_stage_model
elif config == "__is_unconditional__":
print(f"Training {self.__class__.__name__} as an unconditional model.")
self.cond_stage_model = None
# self.be_unconditional = True
else:
model = instantiate_from_config(config)
self.cond_stage_model = model.eval()
self.cond_stage_model.train = disabled_train
for param in self.cond_stage_model.parameters():
param.requires_grad = False
else:
assert config != '__is_first_stage__'
assert config != '__is_unconditional__'
model = instantiate_from_config(config)
self.cond_stage_model = model
def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False):
denoise_row = []
for zd in tqdm(samples, desc=desc):
denoise_row.append(self.decode_first_stage(zd.to(self.device),
force_not_quantize=force_no_decoder_quantization))
n_imgs_per_row = len(denoise_row)
denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W
denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w')
denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
return denoise_grid
def get_first_stage_encoding(self, encoder_posterior):
if isinstance(encoder_posterior, DiagonalGaussianDistribution):
z = encoder_posterior.sample()
elif isinstance(encoder_posterior, torch.Tensor):
z = encoder_posterior
else:
raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented")
return self.scale_factor * z
def get_learned_conditioning(self, c):
if self.cond_stage_forward is None:
if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode):
c = self.cond_stage_model.encode(c)
if isinstance(c, DiagonalGaussianDistribution):
c = c.mode()
else:
c = self.cond_stage_model(c)
else:
assert hasattr(self.cond_stage_model, self.cond_stage_forward)
c = getattr(self.cond_stage_model, self.cond_stage_forward)(c)
return c
@torch.no_grad()
def get_unconditional_conditioning(self, batch_size, null_label=None):
if null_label is not None:
xc = null_label
if isinstance(xc, ListConfig):
xc = list(xc)
if isinstance(xc, dict) or isinstance(xc, list):
c = self.get_learned_conditioning(xc)
else:
if hasattr(xc, "to"):
xc = xc.to(self.device)
c = self.get_learned_conditioning(xc)
else:
if self.cond_stage_key in ["class_label", "cls"]:
xc = self.cond_stage_model.get_unconditional_conditioning(batch_size, device=self.device)
return self.get_learned_conditioning(xc)
else:
raise NotImplementedError("todo")
if isinstance(c, list): # in case the encoder gives us a list
for i in range(len(c)):
c[i] = repeat(c[i], '1 ... -> b ...', b=batch_size).to(self.device)
else:
c = repeat(c, '1 ... -> b ...', b=batch_size).to(self.device)
return c
def meshgrid(self, h, w):
y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1)
x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1)
arr = torch.cat([y, x], dim=-1)
return arr
def delta_border(self, h, w):
"""
:param h: height
:param w: width
:return: normalized distance to image border,
wtith min distance = 0 at border and max dist = 0.5 at image center
"""
lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2)
arr = self.meshgrid(h, w) / lower_right_corner
dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0]
dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0]
edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0]
return edge_dist
def get_weighting(self, h, w, Ly, Lx, device):
weighting = self.delta_border(h, w)
weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"],
self.split_input_params["clip_max_weight"], )
weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device)
if self.split_input_params["tie_braker"]:
L_weighting = self.delta_border(Ly, Lx)
L_weighting = torch.clip(L_weighting,
self.split_input_params["clip_min_tie_weight"],
self.split_input_params["clip_max_tie_weight"])
L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device)
weighting = weighting * L_weighting
return weighting
def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code
"""
:param x: img of size (bs, c, h, w)
:return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1])
"""
bs, nc, h, w = x.shape
# number of crops in image
Ly = (h - kernel_size[0]) // stride[0] + 1
Lx = (w - kernel_size[1]) // stride[1] + 1
if uf == 1 and df == 1:
fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
unfold = torch.nn.Unfold(**fold_params)
fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params)
weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype)
normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap
weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx))
elif uf > 1 and df == 1:
fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
unfold = torch.nn.Unfold(**fold_params)
fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf),
dilation=1, padding=0,
stride=(stride[0] * uf, stride[1] * uf))
fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2)
weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype)
normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap
weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx))
elif df > 1 and uf == 1:
fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
unfold = torch.nn.Unfold(**fold_params)
fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df),
dilation=1, padding=0,
stride=(stride[0] // df, stride[1] // df))
fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2)
weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype)
normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap
weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx))
else:
raise NotImplementedError
return fold, unfold, normalization, weighting
@torch.no_grad()
def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False,
cond_key=None, return_original_cond=False, bs=None):
x = super().get_input(batch, k)
if bs is not None:
x = x[:bs]
x = x.to(self.device)
encoder_posterior = self.encode_first_stage(x)
z = self.get_first_stage_encoding(encoder_posterior).detach()
if self.model.conditioning_key is not None:
if cond_key is None:
cond_key = self.cond_stage_key
if cond_key != self.first_stage_key:
if cond_key in ['caption', 'coordinates_bbox']:
xc = batch[cond_key]
elif cond_key == 'class_label':
xc = batch
else:
xc = super().get_input(batch, cond_key).to(self.device)
else:
xc = x
if not self.cond_stage_trainable or force_c_encode:
if isinstance(xc, dict) or isinstance(xc, list):
# import pudb; pudb.set_trace()
c = self.get_learned_conditioning(xc)
else:
c = self.get_learned_conditioning(xc.to(self.device))
else:
c = xc
if bs is not None:
c = c[:bs]
# Testing #
if cond_key == 'masked_image':
mask = super().get_input(batch, "mask")
cc = torch.nn.functional.interpolate(mask, size=c.shape[-2:]) # [B, 1, 10, 106]
c = torch.cat((c, cc), dim=1) # [B, 5, 10, 106]
# Testing #
if self.use_positional_encodings:
pos_x, pos_y = self.compute_latent_shifts(batch)
ckey = __conditioning_keys__[self.model.conditioning_key]
c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y}
else:
c = None
xc = None
if self.use_positional_encodings:
pos_x, pos_y = self.compute_latent_shifts(batch)
c = {'pos_x': pos_x, 'pos_y': pos_y}
out = [z, c]
if return_first_stage_outputs:
xrec = self.decode_first_stage(z)
out.extend([x, xrec])
if return_original_cond:
out.append(xc)
return out
@torch.no_grad()
def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):
if predict_cids:
if z.dim() == 4:
z = torch.argmax(z.exp(), dim=1).long()
z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)
z = rearrange(z, 'b h w c -> b c h w').contiguous()
z = 1. / self.scale_factor * z
if hasattr(self, "split_input_params"):
if self.split_input_params["patch_distributed_vq"]:
ks = self.split_input_params["ks"] # eg. (128, 128)
stride = self.split_input_params["stride"] # eg. (64, 64)
uf = self.split_input_params["vqf"]
bs, nc, h, w = z.shape
if ks[0] > h or ks[1] > w:
ks = (min(ks[0], h), min(ks[1], w))
print("reducing Kernel")
if stride[0] > h or stride[1] > w:
stride = (min(stride[0], h), min(stride[1], w))
print("reducing stride")
fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf)
z = unfold(z) # (bn, nc * prod(**ks), L)
# 1. Reshape to img shape
z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
# 2. apply model loop over last dim
if isinstance(self.first_stage_model, VQModelInterface):
output_list = [self.first_stage_model.decode(z[:, :, :, :, i],
force_not_quantize=predict_cids or force_not_quantize)
for i in range(z.shape[-1])]
else:
output_list = [self.first_stage_model.decode(z[:, :, :, :, i])
for i in range(z.shape[-1])]
o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L)
o = o * weighting
# Reverse 1. reshape to img shape
o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
# stitch crops together
decoded = fold(o)
decoded = decoded / normalization # norm is shape (1, 1, h, w)
return decoded
else:
if isinstance(self.first_stage_model, VQModelInterface):
return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
else:
return self.first_stage_model.decode(z)
else:
if isinstance(self.first_stage_model, VQModelInterface):
return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
else:
return self.first_stage_model.decode(z)
# same as above but without decorator
def differentiable_decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):
if predict_cids:
if z.dim() == 4:
z = torch.argmax(z.exp(), dim=1).long()
z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)
z = rearrange(z, 'b h w c -> b c h w').contiguous()
z = 1. / self.scale_factor * z
if hasattr(self, "split_input_params"):
if self.split_input_params["patch_distributed_vq"]:
ks = self.split_input_params["ks"] # eg. (128, 128)
stride = self.split_input_params["stride"] # eg. (64, 64)
uf = self.split_input_params["vqf"]
bs, nc, h, w = z.shape
if ks[0] > h or ks[1] > w:
ks = (min(ks[0], h), min(ks[1], w))
print("reducing Kernel")
if stride[0] > h or stride[1] > w:
stride = (min(stride[0], h), min(stride[1], w))
print("reducing stride")
fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf)
z = unfold(z) # (bn, nc * prod(**ks), L)
# 1. Reshape to img shape
z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
# 2. apply model loop over last dim
if isinstance(self.first_stage_model, VQModelInterface):
output_list = [self.first_stage_model.decode(z[:, :, :, :, i],
force_not_quantize=predict_cids or force_not_quantize)
for i in range(z.shape[-1])]
else:
output_list = [self.first_stage_model.decode(z[:, :, :, :, i])
for i in range(z.shape[-1])]
o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L)
o = o * weighting
# Reverse 1. reshape to img shape
o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
# stitch crops together
decoded = fold(o)
decoded = decoded / normalization # norm is shape (1, 1, h, w)
return decoded
else:
if isinstance(self.first_stage_model, VQModelInterface):
return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
else:
return self.first_stage_model.decode(z)
else:
if isinstance(self.first_stage_model, VQModelInterface):
return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
else:
return self.first_stage_model.decode(z)
@torch.no_grad()
def encode_first_stage(self, x):
if hasattr(self, "split_input_params"):
if self.split_input_params["patch_distributed_vq"]:
ks = self.split_input_params["ks"] # eg. (128, 128)
stride = self.split_input_params["stride"] # eg. (64, 64)
df = self.split_input_params["vqf"]
self.split_input_params['original_image_size'] = x.shape[-2:]
bs, nc, h, w = x.shape
if ks[0] > h or ks[1] > w:
ks = (min(ks[0], h), min(ks[1], w))
print("reducing Kernel")
if stride[0] > h or stride[1] > w:
stride = (min(stride[0], h), min(stride[1], w))
print("reducing stride")
fold, unfold, normalization, weighting = self.get_fold_unfold(x, ks, stride, df=df)
z = unfold(x) # (bn, nc * prod(**ks), L)
# Reshape to img shape
z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
output_list = [self.first_stage_model.encode(z[:, :, :, :, i])
for i in range(z.shape[-1])]
o = torch.stack(output_list, axis=-1)
o = o * weighting
# Reverse reshape to img shape
o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
# stitch crops together
decoded = fold(o)
decoded = decoded / normalization
return decoded
else:
return self.first_stage_model.encode(x)
else:
return self.first_stage_model.encode(x)
def shared_step(self, batch, **kwargs):
x, c = self.get_input(batch, self.first_stage_key)
loss = self(x, c)
return loss
def test_step(self,batch,batch_idx):
cond = batch[self.cond_stage_key] * self.test_repeat
cond = self.get_learned_conditioning(cond) # c: string -> [B, T, Context_dim]
batch_size = len(cond)
enc_emb = self.sample(cond,batch_size,timesteps=self.test_numsteps)# shape = [batch_size,self.channels,self.mel_dim,self.mel_length]
xrec = self.decode_first_stage(enc_emb)
reconstructions = (xrec + 1)/2 # to mel scale
test_ckpt_path = os.path.basename(self.trainer.tested_ckpt_path)
savedir = os.path.join(self.trainer.log_dir,f'output_imgs_{test_ckpt_path}','fake_class')
if not os.path.exists(savedir):
os.makedirs(savedir)
file_names = batch['f_name']
nfiles = len(file_names)
reconstructions = reconstructions.cpu().numpy().squeeze(1) # squuze channel dim
for k in range(reconstructions.shape[0]):
b,repeat = k % nfiles, k // nfiles
vname_num_split_index = file_names[b].rfind('_')# file_names[b]:video_name+'_'+num
v_n,num = file_names[b][:vname_num_split_index],file_names[b][vname_num_split_index+1:]
save_img_path = os.path.join(savedir,f'{v_n}_sample_{num}_{repeat}.npy')# the num_th caption, the repeat_th repitition
np.save(save_img_path,reconstructions[b])
return None
def forward(self, x, c, *args, **kwargs):
t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()
if self.model.conditioning_key is not None:
assert c is not None
if self.cond_stage_trainable:
c = self.get_learned_conditioning(c) # c: string -> [B, T, Context_dim]
if self.shorten_cond_schedule: # TODO: drop this option
tc = self.cond_ids[t].to(self.device)
c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float()))
return self.p_losses(x, c, t, *args, **kwargs)
def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset
def rescale_bbox(bbox):
x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2])
y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3])
w = min(bbox[2] / crop_coordinates[2], 1 - x0)
h = min(bbox[3] / crop_coordinates[3], 1 - y0)
return x0, y0, w, h
return [rescale_bbox(b) for b in bboxes]
def apply_model(self, x_noisy, t, cond, return_ids=False):
if isinstance(cond, dict):
# hybrid case, cond is exptected to be a dict
pass
else:
if not isinstance(cond, list):
cond = [cond]
key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn'
cond = {key: cond}
if hasattr(self, "split_input_params"):
assert len(cond) == 1 # todo can only deal with one conditioning atm
assert not return_ids
ks = self.split_input_params["ks"] # eg. (128, 128)
stride = self.split_input_params["stride"] # eg. (64, 64)
h, w = x_noisy.shape[-2:]
fold, unfold, normalization, weighting = self.get_fold_unfold(x_noisy, ks, stride)
z = unfold(x_noisy) # (bn, nc * prod(**ks), L)
# Reshape to img shape
z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])]
if self.cond_stage_key in ["image", "LR_image", "segmentation",
'bbox_img'] and self.model.conditioning_key: # todo check for completeness
c_key = next(iter(cond.keys())) # get key
c = next(iter(cond.values())) # get value
assert (len(c) == 1) # todo extend to list with more than one elem
c = c[0] # get element
c = unfold(c)
c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L )
cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])]
elif self.cond_stage_key == 'coordinates_bbox':
assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size'
# assuming padding of unfold is always 0 and its dilation is always 1
n_patches_per_row = int((w - ks[0]) / stride[0] + 1)
full_img_h, full_img_w = self.split_input_params['original_image_size']
# as we are operating on latents, we need the factor from the original image size to the
# spatial latent size to properly rescale the crops for regenerating the bbox annotations
num_downs = self.first_stage_model.encoder.num_resolutions - 1
rescale_latent = 2 ** (num_downs)
# get top left postions of patches as conforming for the bbbox tokenizer, therefore we
# need to rescale the tl patch coordinates to be in between (0,1)
tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w,
rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h)
for patch_nr in range(z.shape[-1])]
# patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w)
patch_limits = [(x_tl, y_tl,
rescale_latent * ks[0] / full_img_w,
rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates]
# patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates]
# tokenize crop coordinates for the bounding boxes of the respective patches
patch_limits_tknzd = [torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(self.device)
for bbox in patch_limits] # list of length l with tensors of shape (1, 2)
print(patch_limits_tknzd[0].shape)
# cut tknzd crop position from conditioning
assert isinstance(cond, dict), 'cond must be dict to be fed into model'
cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device)
print(cut_cond.shape)
adapted_cond = torch.stack([torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd])
adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n')
print(adapted_cond.shape)
adapted_cond = self.get_learned_conditioning(adapted_cond)
print(adapted_cond.shape)
adapted_cond = rearrange(adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1])
print(adapted_cond.shape)
cond_list = [{'c_crossattn': [e]} for e in adapted_cond]
else:
cond_list = [cond for i in range(z.shape[-1])] # Todo make this more efficient
# apply model by loop over crops
output_list = [self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])]
assert not isinstance(output_list[0],
tuple) # todo cant deal with multiple model outputs check this never happens
o = torch.stack(output_list, axis=-1)
o = o * weighting
# Reverse reshape to img shape
o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
# stitch crops together
x_recon = fold(o) / normalization
else:
x_recon = self.model(x_noisy, t, **cond)
if isinstance(x_recon, tuple) and not return_ids:
return x_recon[0]
else:
return x_recon
def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \
extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
def _prior_bpd(self, x_start):
"""
Get the prior KL term for the variational lower-bound, measured in
bits-per-dim.
This term can't be optimized, as it only depends on the encoder.
:param x_start: the [N x C x ...] tensor of inputs.
:return: a batch of [N] KL values (in bits), one per batch element.
"""
batch_size = x_start.shape[0]
t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)
qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
return mean_flat(kl_prior) / np.log(2.0)
def p_losses(self, x_start, cond, t, noise=None):
noise = default(noise, lambda: torch.randn_like(x_start))
x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
model_output = self.apply_model(x_noisy, t, cond)
loss_dict = {}
prefix = 'train' if self.training else 'val'
if self.parameterization == "x0":
target = x_start
elif self.parameterization == "eps":
target = noise
else:
raise NotImplementedError()
loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3])
loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()})
logvar_t = self.logvar[t].to(self.device)
loss = loss_simple / torch.exp(logvar_t) + logvar_t
# loss = loss_simple / torch.exp(self.logvar) + self.logvar
if self.learn_logvar:
loss_dict.update({f'{prefix}/loss_gamma': loss.mean()})
loss_dict.update({'logvar': self.logvar.data.mean()})
loss = self.l_simple_weight * loss.mean()
loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3))
loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean()
loss_dict.update({f'{prefix}/loss_vlb': loss_vlb})
loss += (self.original_elbo_weight * loss_vlb)
loss_dict.update({f'{prefix}/loss': loss})
return loss, loss_dict
def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False,
return_x0=False, score_corrector=None, corrector_kwargs=None):
t_in = t
model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids)
if score_corrector is not None:
assert self.parameterization == "eps"
model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs)
if return_codebook_ids:
model_out, logits = model_out
if self.parameterization == "eps":
x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
elif self.parameterization == "x0":
x_recon = model_out
else:
raise NotImplementedError()
if clip_denoised:
x_recon.clamp_(-1., 1.)
if quantize_denoised:
x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon)
model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
if return_codebook_ids:
return model_mean, posterior_variance, posterior_log_variance, logits
elif return_x0:
return model_mean, posterior_variance, posterior_log_variance, x_recon
else:
return model_mean, posterior_variance, posterior_log_variance
@torch.no_grad()
def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False,
return_codebook_ids=False, quantize_denoised=False, return_x0=False,
temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None):
b, *_, device = *x.shape, x.device
outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised,
return_codebook_ids=return_codebook_ids,
quantize_denoised=quantize_denoised,
return_x0=return_x0,
score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
if return_codebook_ids:
raise DeprecationWarning("Support dropped.")
model_mean, _, model_log_variance, logits = outputs
elif return_x0:
model_mean, _, model_log_variance, x0 = outputs
else:
model_mean, _, model_log_variance = outputs
noise = noise_like(x.shape, device, repeat_noise) * temperature
if noise_dropout > 0.:
noise = torch.nn.functional.dropout(noise, p=noise_dropout)
# no noise when t == 0
nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
if return_codebook_ids:
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1)
if return_x0:
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0
else:
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
@torch.no_grad()
def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False,
img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0.,
score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None,
log_every_t=None):
if not log_every_t:
log_every_t = self.log_every_t
timesteps = self.num_timesteps
if batch_size is not None:
b = batch_size if batch_size is not None else shape[0]
shape = [batch_size] + list(shape)
else:
b = batch_size = shape[0]
if x_T is None:
img = torch.randn(shape, device=self.device)
else:
img = x_T
intermediates = []
if cond is not None:
if isinstance(cond, dict):
cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
else:
cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
if start_T is not None:
timesteps = min(timesteps, start_T)
iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation',
total=timesteps) if verbose else reversed(
range(0, timesteps))
if type(temperature) == float:
temperature = [temperature] * timesteps
for i in iterator:
ts = torch.full((b,), i, device=self.device, dtype=torch.long)
if self.shorten_cond_schedule:
assert self.model.conditioning_key != 'hybrid'
tc = self.cond_ids[ts].to(cond.device)
cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
img, x0_partial = self.p_sample(img, cond, ts,
clip_denoised=self.clip_denoised,
quantize_denoised=quantize_denoised, return_x0=True,
temperature=temperature[i], noise_dropout=noise_dropout,
score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
if mask is not None:
assert x0 is not None
img_orig = self.q_sample(x0, ts)
img = img_orig * mask + (1. - mask) * img
if i % log_every_t == 0 or i == timesteps - 1:
intermediates.append(x0_partial)
if callback: callback(i)
if img_callback: img_callback(img, i)
return img, intermediates
@torch.no_grad()
def p_sample_loop(self, cond, shape, return_intermediates=False,
x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False,
mask=None, x0=None, img_callback=None, start_T=None,
log_every_t=None):
if not log_every_t:
log_every_t = self.log_every_t
device = self.betas.device
b = shape[0]
if x_T is None:
img = torch.randn(shape, device=device)
else:
img = x_T
intermediates = [img]
if timesteps is None:
timesteps = self.num_timesteps
if start_T is not None:
timesteps = min(timesteps, start_T)
iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed(
range(0, timesteps))
if mask is not None:
assert x0 is not None
assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match
for i in iterator:
ts = torch.full((b,), i, device=device, dtype=torch.long)
if self.shorten_cond_schedule:
assert self.model.conditioning_key != 'hybrid'
tc = self.cond_ids[ts].to(cond.device)
cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
img = self.p_sample(img, cond, ts,
clip_denoised=self.clip_denoised,
quantize_denoised=quantize_denoised)
if mask is not None:
img_orig = self.q_sample(x0, ts)
img = img_orig * mask + (1. - mask) * img
if i % log_every_t == 0 or i == timesteps - 1:
intermediates.append(img)
if callback: callback(i)
if img_callback: img_callback(img, i)
if return_intermediates:
return img, intermediates
return img
@torch.no_grad()
def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None,
verbose=True, timesteps=None, quantize_denoised=False,
mask=None, x0=None, shape=None,**kwargs):
if shape is None:
shape = (batch_size, self.channels, self.mel_dim, self.mel_length)
if cond is not None:
if isinstance(cond, dict):
cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
else:
cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
return self.p_sample_loop(cond,
shape,
return_intermediates=return_intermediates, x_T=x_T,
verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised,
mask=mask, x0=x0)
@torch.no_grad()
def sample_log(self,cond,batch_size,ddim, ddim_steps,**kwargs):
if ddim:
ddim_sampler = DDIMSampler(self)
shape = (self.channels, self.mel_dim, self.mel_length)
samples, intermediates =ddim_sampler.sample(ddim_steps,batch_size,
shape,cond,verbose=False,**kwargs)
else:
samples, intermediates = self.sample(cond=cond, batch_size=batch_size,
return_intermediates=True,**kwargs)
return samples, intermediates
@torch.no_grad()
def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None,
quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True,
plot_diffusion_rows=True, **kwargs):
use_ddim = ddim_steps is not None
log = dict()
z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key,
return_first_stage_outputs=True,
force_c_encode=True,
return_original_cond=True,
bs=N)
N = min(x.shape[0], N)
n_row = min(x.shape[0], n_row)
log["inputs"] = x
log["reconstruction"] = xrec
if self.model.conditioning_key is not None:
if hasattr(self.cond_stage_model, "decode") and self.cond_stage_key != "masked_image":
xc = self.cond_stage_model.decode(c)
log["conditioning"] = xc
elif self.cond_stage_key == "masked_image":
log["mask"] = c[:, -1, :, :][:, None, :, :]
xc = self.cond_stage_model.decode(c[:, :self.cond_stage_model.embed_dim, :, :])
log["conditioning"] = xc
elif self.cond_stage_key in ["caption"]:
xc = log_txt_as_img((256, 256), batch["caption"])
log["conditioning"] = xc
elif self.cond_stage_key == 'class_label':
xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"])
log['conditioning'] = xc
elif isimage(xc):
log["conditioning"] = xc
if ismap(xc):
log["original_conditioning"] = self.to_rgb(xc)
if plot_diffusion_rows:
# get diffusion row
diffusion_row = list()
z_start = z[:n_row]
for t in range(self.num_timesteps):
if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
t = t.to(self.device).long()
noise = torch.randn_like(z_start)
z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)
diffusion_row.append(self.decode_first_stage(z_noisy))
diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W
diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')
diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')
diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])
log["diffusion_row"] = diffusion_grid
if sample:
# get denoise row
with self.ema_scope("Plotting"):
samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim,
ddim_steps=ddim_steps,eta=ddim_eta)
# samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)
x_samples = self.decode_first_stage(samples)
log["samples"] = x_samples
if plot_denoise_rows:
denoise_grid = self._get_denoise_row_from_list(z_denoise_row)
log["denoise_row"] = denoise_grid
if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance(
self.first_stage_model, IdentityFirstStage):
# also display when quantizing x0 while sampling
with self.ema_scope("Plotting Quantized Denoised"):
samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim,
ddim_steps=ddim_steps,eta=ddim_eta,
quantize_denoised=True)
# samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True,
# quantize_denoised=True)
x_samples = self.decode_first_stage(samples.to(self.device))
log["samples_x0_quantized"] = x_samples
if inpaint:
# make a simple center square
b, h, w = z.shape[0], z.shape[2], z.shape[3]
mask = torch.ones(N, h, w).to(self.device)
# zeros will be filled in
mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0.
mask = mask[:, None, ...]
with self.ema_scope("Plotting Inpaint"):
samples, _ = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, eta=ddim_eta,
ddim_steps=ddim_steps, x0=z[:N], mask=mask)
x_samples = self.decode_first_stage(samples.to(self.device))
log["samples_inpainting"] = x_samples
log["mask_inpainting"] = mask
# outpaint
mask = 1 - mask
with self.ema_scope("Plotting Outpaint"):
samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,eta=ddim_eta,
ddim_steps=ddim_steps, x0=z[:N], mask=mask)
x_samples = self.decode_first_stage(samples.to(self.device))
log["samples_outpainting"] = x_samples
log["mask_outpainting"] = mask
if plot_progressive_rows:
with self.ema_scope("Plotting Progressives"):
img, progressives = self.progressive_denoising(c,
shape=(self.channels, self.mel_dim, self.mel_length),
batch_size=N)
prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation")
log["progressive_row"] = prog_row
if return_keys:
if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
return log
else:
return {key: log[key] for key in return_keys}
return log
def configure_optimizers(self):
lr = self.learning_rate
params = list(self.model.parameters())
if self.cond_stage_trainable:
print(f"{self.__class__.__name__}: Also optimizing conditioner params!")
params = params + list(self.cond_stage_model.parameters())
if self.learn_logvar:
print('Diffusion model optimizing logvar')
params.append(self.logvar)
opt = torch.optim.AdamW(params, lr=lr)
if self.use_scheduler:
assert 'target' in self.scheduler_config
scheduler = instantiate_from_config(self.scheduler_config)
print("Setting up LambdaLR scheduler...")
scheduler = [
{
'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule),
'interval': 'step',
'frequency': 1
}]
return [opt], scheduler
return opt
@torch.no_grad()
def to_rgb(self, x):
x = x.float()
if not hasattr(self, "colorize"):
self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x)
x = nn.functional.conv2d(x, weight=self.colorize)
x = 2. * (x - x.min()) / (x.max() - x.min()) - 1.
return x
class LatentFinetuneDiffusion(LatentDiffusion_audio):
"""
Basis for different finetunas, such as inpainting or depth2image
To disable finetuning mode, set finetune_keys to None
"""
def __init__(self,
concat_keys: tuple,
finetune_keys=("model.diffusion_model.input_blocks.0.0.weight",
"model_ema.diffusion_modelinput_blocks00weight"
),
keep_finetune_dims=4,
# if model was trained without concat mode before and we would like to keep these channels
c_concat_log_start=None, # to log reconstruction of c_concat codes
c_concat_log_end=None,
*args, **kwargs
):
ckpt_path = kwargs.pop("ckpt_path", None)
ignore_keys = kwargs.pop("ignore_keys", list())
super().__init__(*args, **kwargs)
self.finetune_keys = finetune_keys
self.concat_keys = concat_keys
self.keep_dims = keep_finetune_dims
self.c_concat_log_start = c_concat_log_start
self.c_concat_log_end = c_concat_log_end
if exists(self.finetune_keys): assert exists(ckpt_path), 'can only finetune from a given checkpoint'
if exists(ckpt_path):
self.init_from_ckpt(ckpt_path, ignore_keys)
def init_from_ckpt(self, path, ignore_keys=list(), only_model=False):
sd = torch.load(path, map_location="cpu")
if "state_dict" in list(sd.keys()):
sd = sd["state_dict"]
keys = list(sd.keys())
for k in keys:
for ik in ignore_keys:
if k.startswith(ik):
print("Deleting key {} from state_dict.".format(k))
del sd[k]
# make it explicit, finetune by including extra input channels
if exists(self.finetune_keys) and k in self.finetune_keys:
new_entry = None
for name, param in self.named_parameters():
if name in self.finetune_keys:
print(
f"modifying key '{name}' and keeping its original {self.keep_dims} (channels) dimensions only")
new_entry = torch.zeros_like(param) # zero init
assert exists(new_entry), 'did not find matching parameter to modify'
new_entry[:, :self.keep_dims, ...] = sd[k]
sd[k] = new_entry
missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(sd, strict=False)
print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
if len(missing) > 0:
print(f"Missing Keys: {missing}")
if len(unexpected) > 0:
print(f"Unexpected Keys: {unexpected}")
@torch.no_grad()
def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None,
quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True,
plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None,
use_ema_scope=True,
**kwargs):
use_ddim = ddim_steps is not None
log = dict()
z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, bs=N, return_first_stage_outputs=True)
c_cat, c = c["c_concat"][0], c["c_crossattn"][0]
N = min(x.shape[0], N)
n_row = min(x.shape[0], n_row)
log["inputs"] = x
log["reconstruction"] = xrec
if self.model.conditioning_key is not None:
if hasattr(self.cond_stage_model, "decode"):
xc = self.cond_stage_model.decode(c)
log["conditioning"] = xc
elif self.cond_stage_key in ["caption"]:
xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["caption"])
log["conditioning"] = xc
elif self.cond_stage_key == 'class_label':
xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"])
log['conditioning'] = xc
elif isimage(xc):
log["conditioning"] = xc
if ismap(xc):
log["original_conditioning"] = self.to_rgb(xc)
if not (self.c_concat_log_start is None and self.c_concat_log_end is None):
log["c_concat_decoded"] = self.decode_first_stage(c_cat[:, self.c_concat_log_start:self.c_concat_log_end])
if plot_diffusion_rows:
# get diffusion row
diffusion_row = list()
z_start = z[:n_row]
for t in range(self.num_timesteps):
if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
t = t.to(self.device).long()
noise = torch.randn_like(z_start)
z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)
diffusion_row.append(self.decode_first_stage(z_noisy))
diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W
diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')
diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')
diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])
log["diffusion_row"] = diffusion_grid
if sample:
# get denoise row
with self.ema_scope("Sampling"):
samples, z_denoise_row = self.sample_log(cond={"c_concat": [c_cat], "c_crossattn": [c]},
batch_size=N, ddim=use_ddim,
ddim_steps=ddim_steps, eta=ddim_eta)
# samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)
x_samples = self.decode_first_stage(samples)
log["samples"] = x_samples
if plot_denoise_rows:
denoise_grid = self._get_denoise_row_from_list(z_denoise_row)
log["denoise_row"] = denoise_grid
if unconditional_guidance_scale > 1.0:
uc_cross = self.get_unconditional_conditioning(N, unconditional_guidance_label)
uc_cat = c_cat
uc_full = {"c_concat": [uc_cat], "c_crossattn": [uc_cross]}
with self.ema_scope("Sampling with classifier-free guidance"):
samples_cfg, _ = self.sample_log(cond={"c_concat": [c_cat], "c_crossattn": [c]},
batch_size=N, ddim=use_ddim,
ddim_steps=ddim_steps, eta=ddim_eta,
unconditional_guidance_scale=unconditional_guidance_scale,
unconditional_conditioning=uc_full,
)
x_samples_cfg = self.decode_first_stage(samples_cfg)
log[f"samples_cfg_scale_{unconditional_guidance_scale:.2f}"] = x_samples_cfg
return log
class LatentInpaintDiffusion(LatentFinetuneDiffusion):
"""
can either run as pure inpainting model (only concat mode) or with mixed conditionings,
e.g. mask as concat and text via cross-attn.
To disable finetuning mode, set finetune_keys to None
"""
def __init__(self,
concat_keys=("mask", "masked_image"),
masked_image_key="masked_image",
*args, **kwargs
):
super().__init__(concat_keys, *args, **kwargs)
self.masked_image_key = masked_image_key
assert self.masked_image_key in concat_keys
@torch.no_grad()
def get_input(self, batch, k, cond_key=None, bs=None, return_first_stage_outputs=False):
# note: restricted to non-trainable encoders currently
assert not self.cond_stage_trainable, 'trainable cond stages not yet supported for inpainting'
z, c, x, xrec, xc = super().get_input(batch, self.first_stage_key, return_first_stage_outputs=True,
force_c_encode=True, return_original_cond=True, bs=bs)
assert exists(self.concat_keys)
c_cat = list()
for ck in self.concat_keys:
if len(batch[ck].shape) == 3:
batch[ck] = batch[ck][..., None]
cc = rearrange(batch[ck], 'b h w c -> b c h w').to(memory_format=torch.contiguous_format).float()
if bs is not None:
cc = cc[:bs]
cc = cc.to(self.device)
bchw = z.shape
if ck != self.masked_image_key:
cc = torch.nn.functional.interpolate(cc, size=bchw[-2:])
else:
cc = self.get_first_stage_encoding(self.encode_first_stage(cc))
c_cat.append(cc)
c_cat = torch.cat(c_cat, dim=1)
all_conds = {"c_concat": [c_cat], "c_crossattn": [c]}
if return_first_stage_outputs:
return z, all_conds, x, xrec, xc
return z, all_conds
@torch.no_grad()
def log_images(self, *args, **kwargs):
log = super(LatentInpaintDiffusion, self).log_images(*args, **kwargs)
log["masked_image"] = rearrange(args[0]["masked_image"],
'b h w c -> b c h w').to(memory_format=torch.contiguous_format).float()
return log
| EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/models/diffusion/ddpm_audio.py |
EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/models/diffusion/__init__.py |
|
"""
wild mixture of
https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
https://github.com/CompVis/taming-transformers
-- merci
"""
import os
import torch
import torch.nn as nn
import numpy as np
import pytorch_lightning as pl
from torch.optim.lr_scheduler import LambdaLR
from einops import rearrange, repeat
from contextlib import contextmanager
from functools import partial
from tqdm import tqdm
from torchvision.utils import make_grid
from pytorch_lightning.utilities.distributed import rank_zero_only
from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
from ldm.modules.ema import LitEma
from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution
from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL
from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
from ldm.models.diffusion.ddim import DDIMSampler
from ldm.models.diffusion.ddpm import DDPM, disabled_train
__conditioning_keys__ = {'concat': 'c_concat',
'crossattn': 'c_crossattn',
'adm': 'y'}
# add mel_dim and mel_length params to ensure correct shape
class LatentDiffusion_audioinpaint(DDPM):
"""main class"""
def __init__(self,
first_stage_config,
cond_stage_config,
num_timesteps_cond=None,
mel_dim=80,
mel_length=848,
cond_stage_key="image",
cond_stage_trainable=False,
concat_mode=True,
cond_stage_forward=None,
conditioning_key=None,
scale_factor=1.0,
scale_by_std=False,
test_repeat=1,
test_numsteps = None,
*args, **kwargs):
self.num_timesteps_cond = default(num_timesteps_cond, 1)
self.scale_by_std = scale_by_std
assert self.num_timesteps_cond <= kwargs['timesteps']
# for backwards compatibility after implementation of DiffusionWrapper
if conditioning_key is None:
conditioning_key = 'concat' if concat_mode else 'crossattn'
if cond_stage_config == '__is_unconditional__':
conditioning_key = None
ckpt_path = kwargs.pop("ckpt_path", None)
ignore_keys = kwargs.pop("ignore_keys", [])
super().__init__(conditioning_key=conditioning_key, *args, **kwargs)
self.test_repeat = test_repeat
if test_numsteps == None:
self.test_numsteps = self.num_timesteps
self.concat_mode = concat_mode
self.mel_dim = mel_dim
self.mel_length = mel_length
self.cond_stage_trainable = cond_stage_trainable
self.cond_stage_key = cond_stage_key
try:
self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1
except:
self.num_downs = 0
if not scale_by_std:
self.scale_factor = scale_factor
else:
self.register_buffer('scale_factor', torch.tensor(scale_factor))
self.instantiate_first_stage(first_stage_config)
self.instantiate_cond_stage(cond_stage_config)
self.cond_stage_forward = cond_stage_forward
self.clip_denoised = False
self.bbox_tokenizer = None
self.restarted_from_ckpt = False
if ckpt_path is not None:
self.init_from_ckpt(ckpt_path, ignore_keys)
self.restarted_from_ckpt = True
def make_cond_schedule(self, ):
self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long)
ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long()
self.cond_ids[:self.num_timesteps_cond] = ids
@rank_zero_only
@torch.no_grad()
def on_train_batch_start(self, batch, batch_idx, dataloader_idx):
# only for very first batch
if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt:
assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously'
# set rescale weight to 1./std of encodings
print("### USING STD-RESCALING ###")
x = super().get_input(batch, self.first_stage_key)
x = x.to(self.device)
encoder_posterior = self.encode_first_stage(x)
z = self.get_first_stage_encoding(encoder_posterior).detach()
del self.scale_factor
self.register_buffer('scale_factor', 1. / z.flatten().std())
print(f"setting self.scale_factor to {self.scale_factor}")
print("### USING STD-RESCALING ###")
def register_schedule(self,
given_betas=None, beta_schedule="linear", timesteps=1000,
linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s)
self.shorten_cond_schedule = self.num_timesteps_cond > 1
if self.shorten_cond_schedule:
self.make_cond_schedule()
def instantiate_first_stage(self, config):
model = instantiate_from_config(config)
self.first_stage_model = model.eval()
self.first_stage_model.train = disabled_train
for param in self.first_stage_model.parameters():
param.requires_grad = False
def instantiate_cond_stage(self, config):
if not self.cond_stage_trainable:
if config == "__is_first_stage__":# for no_text inpainting task
print("Using first stage also as cond stage.")
self.cond_stage_model = self.first_stage_model
elif config == "__is_unconditional__":# for unconditional image generation such as human face、ImageNet
print(f"Training {self.__class__.__name__} as an unconditional model.")
self.cond_stage_model = None
# self.be_unconditional = True
else:
model = instantiate_from_config(config)
self.cond_stage_model = model.eval()
self.cond_stage_model.train = disabled_train
for param in self.cond_stage_model.parameters():
param.requires_grad = False
else:
assert config != '__is_first_stage__'
assert config != '__is_unconditional__'
model = instantiate_from_config(config)
self.cond_stage_model = model
def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False):
denoise_row = []
for zd in tqdm(samples, desc=desc):
denoise_row.append(self.decode_first_stage(zd.to(self.device),
force_not_quantize=force_no_decoder_quantization))
n_imgs_per_row = len(denoise_row)
denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W
denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w')
denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
return denoise_grid
def get_first_stage_encoding(self, encoder_posterior):# encode_emb from autoencoder
if isinstance(encoder_posterior, DiagonalGaussianDistribution):
z = encoder_posterior.sample()
elif isinstance(encoder_posterior, torch.Tensor):
z = encoder_posterior
else:
raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented")
return self.scale_factor * z
def get_learned_conditioning(self, c):
if self.cond_stage_forward is None:
if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode):
c = self.cond_stage_model.encode(c)
if isinstance(c, DiagonalGaussianDistribution):
c = c.mode()
else:
c = self.cond_stage_model(c)
else:
assert hasattr(self.cond_stage_model, self.cond_stage_forward)
c = getattr(self.cond_stage_model, self.cond_stage_forward)(c)
return c
def meshgrid(self, h, w):
y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1)
x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1)
arr = torch.cat([y, x], dim=-1)
return arr
def delta_border(self, h, w):
"""
:param h: height
:param w: width
:return: normalized distance to image border,
wtith min distance = 0 at border and max dist = 0.5 at image center
"""
lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2)
arr = self.meshgrid(h, w) / lower_right_corner
dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0]
dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0]
edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0]
return edge_dist
def get_weighting(self, h, w, Ly, Lx, device):
weighting = self.delta_border(h, w)
weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"],
self.split_input_params["clip_max_weight"], )
weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device)
if self.split_input_params["tie_braker"]:
L_weighting = self.delta_border(Ly, Lx)
L_weighting = torch.clip(L_weighting,
self.split_input_params["clip_min_tie_weight"],
self.split_input_params["clip_max_tie_weight"])
L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device)
weighting = weighting * L_weighting
return weighting
def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code
"""
:param x: img of size (bs, c, h, w)
:return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1])
"""
bs, nc, h, w = x.shape
# number of crops in image
Ly = (h - kernel_size[0]) // stride[0] + 1
Lx = (w - kernel_size[1]) // stride[1] + 1
if uf == 1 and df == 1:
fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
unfold = torch.nn.Unfold(**fold_params)
fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params)
weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype)
normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap
weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx))
elif uf > 1 and df == 1:
fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
unfold = torch.nn.Unfold(**fold_params)
fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf),
dilation=1, padding=0,
stride=(stride[0] * uf, stride[1] * uf))
fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2)
weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype)
normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap
weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx))
elif df > 1 and uf == 1:
fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
unfold = torch.nn.Unfold(**fold_params)
fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df),
dilation=1, padding=0,
stride=(stride[0] // df, stride[1] // df))
fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2)
weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype)
normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap
weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx))
else:
raise NotImplementedError
return fold, unfold, normalization, weighting
@torch.no_grad()
def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False,
cond_key=None, return_original_cond=False, bs=None):
x = super().get_input(batch, k)
if bs is not None:
x = x[:bs]
x = x.to(self.device)
encoder_posterior = self.encode_first_stage(x)
z = self.get_first_stage_encoding(encoder_posterior).detach()
if self.model.conditioning_key is not None:# 'crossattn' for txt2image, 'hybird' for txt_inpaint
if cond_key is None:
cond_key = self.cond_stage_key # 'caption' for txt_inpaint
if self.model.conditioning_key == 'hybrid':
xc = {}
assert cond_key == 'caption' # only txt_inpaint is implemented now
assert 'masked_image' in batch.keys()
assert 'mask' in batch.keys()
masked_image = super().get_input(batch,'masked_image')
mask = super().get_input(batch,'mask')
if bs is not None:
masked_image,mask = masked_image[:bs],mask[:bs]
masked_image,mask = masked_image.to(self.device),mask.to(self.device)
masked_image = self.get_first_stage_encoding(self.encode_first_stage(masked_image)).detach()
resized_mask = torch.nn.functional.interpolate(mask,size=masked_image.shape[-2:])
xc['c_concat'] = torch.cat((masked_image,resized_mask),dim = 1)
xc[cond_key] = batch[cond_key]
else:
if cond_key != self.first_stage_key:
if cond_key in ['caption', 'coordinates_bbox']:
xc = batch[cond_key]
elif cond_key == 'class_label':
xc = batch
else:
xc = super().get_input(batch, cond_key).to(self.device)
else:# cond_key == 'image'
xc = x
if not self.cond_stage_trainable or force_c_encode:# cond_stage_trainable is true for txt2img,force_c_encoder = True,when called in log_images
if isinstance(xc, list):
# import pudb; pudb.set_trace()
c = self.get_learned_conditioning(xc)# 因为log_images内接下来要调用sample_log,所以需要预先得到处理好的c
if isinstance(xc, dict):
c = {}
c['c_concat'] = xc['c_concat']
c['c_crossattn'] = self.get_learned_conditioning(xc[cond_key])
else:
c = self.get_learned_conditioning(xc.to(self.device))
else:
c = xc
if bs is not None:
if isinstance(c,dict):
for k in c.keys():
c[k] = c[k][:bs]
else:
c = c[:bs]
if self.use_positional_encodings:
pos_x, pos_y = self.compute_latent_shifts(batch)
ckey = __conditioning_keys__[self.model.conditioning_key]
c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y}
else:
c = None
xc = None
if self.use_positional_encodings:
pos_x, pos_y = self.compute_latent_shifts(batch)
c = {'pos_x': pos_x, 'pos_y': pos_y}
out = [z, c]
if return_first_stage_outputs:
xrec = self.decode_first_stage(z)
out.extend([x, xrec])
if return_original_cond:
out.append(xc)
return out
@torch.no_grad()
def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):
if predict_cids:
if z.dim() == 4:
z = torch.argmax(z.exp(), dim=1).long()
z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)
z = rearrange(z, 'b h w c -> b c h w').contiguous()
z = 1. / self.scale_factor * z
if hasattr(self, "split_input_params"):
if self.split_input_params["patch_distributed_vq"]:
ks = self.split_input_params["ks"] # eg. (128, 128)
stride = self.split_input_params["stride"] # eg. (64, 64)
uf = self.split_input_params["vqf"]
bs, nc, h, w = z.shape
if ks[0] > h or ks[1] > w:
ks = (min(ks[0], h), min(ks[1], w))
print("reducing Kernel")
if stride[0] > h or stride[1] > w:
stride = (min(stride[0], h), min(stride[1], w))
print("reducing stride")
fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf)
z = unfold(z) # (bn, nc * prod(**ks), L)
# 1. Reshape to img shape
z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
# 2. apply model loop over last dim
if isinstance(self.first_stage_model, VQModelInterface):
output_list = [self.first_stage_model.decode(z[:, :, :, :, i],
force_not_quantize=predict_cids or force_not_quantize)
for i in range(z.shape[-1])]
else:
output_list = [self.first_stage_model.decode(z[:, :, :, :, i])
for i in range(z.shape[-1])]
o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L)
o = o * weighting
# Reverse 1. reshape to img shape
o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
# stitch crops together
decoded = fold(o)
decoded = decoded / normalization # norm is shape (1, 1, h, w)
return decoded
else:
if isinstance(self.first_stage_model, VQModelInterface):
return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
else:
return self.first_stage_model.decode(z)
else:
if isinstance(self.first_stage_model, VQModelInterface):
return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
else:
return self.first_stage_model.decode(z)
# same as above but without decorator
def differentiable_decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):
if predict_cids:
if z.dim() == 4:
z = torch.argmax(z.exp(), dim=1).long()
z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)
z = rearrange(z, 'b h w c -> b c h w').contiguous()
z = 1. / self.scale_factor * z
if hasattr(self, "split_input_params"):
if self.split_input_params["patch_distributed_vq"]:
ks = self.split_input_params["ks"] # eg. (128, 128)
stride = self.split_input_params["stride"] # eg. (64, 64)
uf = self.split_input_params["vqf"]
bs, nc, h, w = z.shape
if ks[0] > h or ks[1] > w:
ks = (min(ks[0], h), min(ks[1], w))
print("reducing Kernel")
if stride[0] > h or stride[1] > w:
stride = (min(stride[0], h), min(stride[1], w))
print("reducing stride")
fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf)
z = unfold(z) # (bn, nc * prod(**ks), L)
# 1. Reshape to img shape
z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
# 2. apply model loop over last dim
if isinstance(self.first_stage_model, VQModelInterface):
output_list = [self.first_stage_model.decode(z[:, :, :, :, i],
force_not_quantize=predict_cids or force_not_quantize)
for i in range(z.shape[-1])]
else:
output_list = [self.first_stage_model.decode(z[:, :, :, :, i])
for i in range(z.shape[-1])]
o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L)
o = o * weighting
# Reverse 1. reshape to img shape
o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
# stitch crops together
decoded = fold(o)
decoded = decoded / normalization # norm is shape (1, 1, h, w)
return decoded
else:
if isinstance(self.first_stage_model, VQModelInterface):
return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
else:
return self.first_stage_model.decode(z)
else:
if isinstance(self.first_stage_model, VQModelInterface):
return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
else:
return self.first_stage_model.decode(z)
@torch.no_grad()
def encode_first_stage(self, x):
if hasattr(self, "split_input_params"):
if self.split_input_params["patch_distributed_vq"]:
ks = self.split_input_params["ks"] # eg. (128, 128)
stride = self.split_input_params["stride"] # eg. (64, 64)
df = self.split_input_params["vqf"]
self.split_input_params['original_image_size'] = x.shape[-2:]
bs, nc, h, w = x.shape
if ks[0] > h or ks[1] > w:
ks = (min(ks[0], h), min(ks[1], w))
print("reducing Kernel")
if stride[0] > h or stride[1] > w:
stride = (min(stride[0], h), min(stride[1], w))
print("reducing stride")
fold, unfold, normalization, weighting = self.get_fold_unfold(x, ks, stride, df=df)
z = unfold(x) # (bn, nc * prod(**ks), L)
# Reshape to img shape
z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
output_list = [self.first_stage_model.encode(z[:, :, :, :, i])
for i in range(z.shape[-1])]
o = torch.stack(output_list, axis=-1)
o = o * weighting
# Reverse reshape to img shape
o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
# stitch crops together
decoded = fold(o)
decoded = decoded / normalization
return decoded
else:
return self.first_stage_model.encode(x)
else:
return self.first_stage_model.encode(x)
def shared_step(self, batch, **kwargs):
x, c = self.get_input(batch, self.first_stage_key)# get latent and condition
loss = self(x, c)
return loss
def test_step(self,batch,batch_idx):
# TODO make self.test_repeat work
cond = {}
cond[self.cond_stage_key] = batch[self.cond_stage_key]
cond[self.cond_stage_key] = self.get_learned_conditioning(cond[self.cond_stage_key]) # c: string -> [B, T, Context_dim]
cond['c_crossattn'] = cond.pop(self.cond_stage_key)
masked_image = super().get_input(batch,'masked_image')
mask = super().get_input(batch,'mask')
masked_image,mask = masked_image.to(self.device),mask.to(self.device)
masked_image = self.get_first_stage_encoding(self.encode_first_stage(masked_image)).detach()
resized_mask = torch.nn.functional.interpolate(mask,size=masked_image.shape[-2:])
cond['c_concat'] = torch.cat((masked_image,resized_mask),dim = 1)
batch_size = len(batch[self.cond_stage_key])
# shape = [batch_size,self.channels,self.mel_dim,self.mel_length]
enc_emb = self.sample(cond,batch_size,timesteps=self.test_numsteps)
xrec = self.decode_first_stage(enc_emb)
reconstructions = (xrec + 1)/2 # to mel scale
test_ckpt_path = os.path.basename(self.trainer.tested_ckpt_path)
savedir = os.path.join(self.trainer.log_dir,f'output_imgs_{test_ckpt_path}','fake_class')
if not os.path.exists(savedir):
os.makedirs(savedir)
file_names = batch['f_name']
nfiles = len(file_names)
reconstructions = reconstructions.cpu().numpy().squeeze(1) # squuze channel dim
for k in range(reconstructions.shape[0]):
b,repeat = k % nfiles, k // nfiles
vname_num_split_index = file_names[b].rfind('_')# file_names[b]:video_name+'_'+num
v_n,num = file_names[b][:vname_num_split_index],file_names[b][vname_num_split_index+1:]
save_img_path = os.path.join(savedir,f'{v_n}_sample_{num}_{repeat}.npy')# the num_th caption, the repeat_th repitition
np.save(save_img_path,reconstructions[b])
return None
def forward(self, x, c, *args, **kwargs):
t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()
if self.model.conditioning_key is not None:
assert c is not None
if self.cond_stage_trainable:
if isinstance(c,dict):
c[self.cond_stage_key] = self.get_learned_conditioning(c[self.cond_stage_key])
c['c_crossattn'] = c.pop(self.cond_stage_key)
else:
c = self.get_learned_conditioning(c) # c: string -> [B, T, Context_dim]
if self.shorten_cond_schedule: # TODO: drop this option
tc = self.cond_ids[t].to(self.device)
c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float()))
return self.p_losses(x, c, t, *args, **kwargs)
def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset
def rescale_bbox(bbox):
x0 = torch.clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2])
y0 = torch.clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3])
w = min(bbox[2] / crop_coordinates[2], 1 - x0)
h = min(bbox[3] / crop_coordinates[3], 1 - y0)
return x0, y0, w, h
return [rescale_bbox(b) for b in bboxes]
def apply_model(self, x_noisy, t, cond, return_ids=False):
# make values to list to enable concat operation in
if isinstance(cond, dict):
# hybrid case, cond is exptected to be a dict. (txt2inpaint)
cond_tmp = {}# use cond_tmp to avoid inplace edit
for k,v in cond.items():
if not isinstance(v, list):
cond_tmp[k] = [cond[k]]
else:
cond_tmp[k] = cond[k]
cond = cond_tmp
else:
if not isinstance(cond, list):
cond = [cond]
key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn'
cond = {key: cond}
if hasattr(self, "split_input_params"):
assert len(cond) == 1 # todo can only deal with one conditioning atm
assert not return_ids
ks = self.split_input_params["ks"] # eg. (128, 128)
stride = self.split_input_params["stride"] # eg. (64, 64)
h, w = x_noisy.shape[-2:]
fold, unfold, normalization, weighting = self.get_fold_unfold(x_noisy, ks, stride)
z = unfold(x_noisy) # (bn, nc * prod(**ks), L)
# Reshape to img shape
z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])]
if self.cond_stage_key in ["image", "LR_image", "segmentation",
'bbox_img'] and self.model.conditioning_key: # todo check for completeness
c_key = next(iter(cond.keys())) # get key
c = next(iter(cond.values())) # get value
assert (len(c) == 1) # todo extend to list with more than one elem
c = c[0] # get element
c = unfold(c)
c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L )
cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])]
elif self.cond_stage_key == 'coordinates_bbox':
assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size'
# assuming padding of unfold is always 0 and its dilation is always 1
n_patches_per_row = int((w - ks[0]) / stride[0] + 1)
full_img_h, full_img_w = self.split_input_params['original_image_size']
# as we are operating on latents, we need the factor from the original image size to the
# spatial latent size to properly rescale the crops for regenerating the bbox annotations
num_downs = self.first_stage_model.encoder.num_resolutions - 1
rescale_latent = 2 ** (num_downs)
# get top left postions of patches as conforming for the bbbox tokenizer, therefore we
# need to rescale the tl patch coordinates to be in between (0,1)
tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w,
rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h)
for patch_nr in range(z.shape[-1])]
# patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w)
patch_limits = [(x_tl, y_tl,
rescale_latent * ks[0] / full_img_w,
rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates]
# patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates]
# tokenize crop coordinates for the bounding boxes of the respective patches
patch_limits_tknzd = [torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(self.device)
for bbox in patch_limits] # list of length l with tensors of shape (1, 2)
print(patch_limits_tknzd[0].shape)
# cut tknzd crop position from conditioning
assert isinstance(cond, dict), 'cond must be dict to be fed into model'
cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device)
print(cut_cond.shape)
adapted_cond = torch.stack([torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd])
adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n')
print(adapted_cond.shape)
adapted_cond = self.get_learned_conditioning(adapted_cond)
print(adapted_cond.shape)
adapted_cond = rearrange(adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1])
print(adapted_cond.shape)
cond_list = [{'c_crossattn': [e]} for e in adapted_cond]
else:
cond_list = [cond for i in range(z.shape[-1])] # Todo make this more efficient
# apply model by loop over crops
output_list = [self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])]
assert not isinstance(output_list[0],
tuple) # todo cant deal with multiple model outputs check this never happens
o = torch.stack(output_list, axis=-1)
o = o * weighting
# Reverse reshape to img shape
o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
# stitch crops together
x_recon = fold(o) / normalization
else:
# x_noisy is tensor with shape [b,c,mel_len,T]
# if condition is caption ,cond['c_crossattn'] is a list, each item shape is [1, 77, 1280]
x_recon = self.model(x_noisy, t, **cond)# tensor with shape [b,c,mel_len,T]
if isinstance(x_recon, tuple) and not return_ids:
return x_recon[0]
else:
return x_recon
def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \
extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
def _prior_bpd(self, x_start):
"""
Get the prior KL term for the variational lower-bound, measured in
bits-per-dim.
This term can't be optimized, as it only depends on the encoder.
:param x_start: the [N x C x ...] tensor of inputs.
:return: a batch of [N] KL values (in bits), one per batch element.
"""
batch_size = x_start.shape[0]
t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)
qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
return mean_flat(kl_prior) / np.log(2.0)
def p_losses(self, x_start, cond, t, noise=None):
noise = default(noise, lambda: torch.randn_like(x_start))
x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
model_output = self.apply_model(x_noisy, t, cond)
loss_dict = {}
prefix = 'train' if self.training else 'val'
if self.parameterization == "x0":
target = x_start
elif self.parameterization == "eps":
target = noise
else:
raise NotImplementedError()
loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3])
loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()})
logvar_t = self.logvar[t].to(self.device)
loss = loss_simple / torch.exp(logvar_t) + logvar_t
# loss = loss_simple / torch.exp(self.logvar) + self.logvar
if self.learn_logvar:
loss_dict.update({f'{prefix}/loss_gamma': loss.mean()})
loss_dict.update({'logvar': self.logvar.data.mean()})
loss = self.l_simple_weight * loss.mean()
loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3))
loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean()
loss_dict.update({f'{prefix}/loss_vlb': loss_vlb})
loss += (self.original_elbo_weight * loss_vlb)
loss_dict.update({f'{prefix}/loss': loss})
return loss, loss_dict
def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False,
return_x0=False, score_corrector=None, corrector_kwargs=None):
t_in = t
model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids)
if score_corrector is not None:
assert self.parameterization == "eps"
model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs)
if return_codebook_ids:
model_out, logits = model_out
if self.parameterization == "eps":
x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
elif self.parameterization == "x0":
x_recon = model_out
else:
raise NotImplementedError()
if clip_denoised:
x_recon.clamp_(-1., 1.)
if quantize_denoised:
x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon)
model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
if return_codebook_ids:
return model_mean, posterior_variance, posterior_log_variance, logits
elif return_x0:
return model_mean, posterior_variance, posterior_log_variance, x_recon
else:
return model_mean, posterior_variance, posterior_log_variance
@torch.no_grad()
def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False,
return_codebook_ids=False, quantize_denoised=False, return_x0=False,
temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None):
b, *_, device = *x.shape, x.device
outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised,
return_codebook_ids=return_codebook_ids,
quantize_denoised=quantize_denoised,
return_x0=return_x0,
score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
if return_codebook_ids:
raise DeprecationWarning("Support dropped.")
model_mean, _, model_log_variance, logits = outputs
elif return_x0:
model_mean, _, model_log_variance, x0 = outputs
else:
model_mean, _, model_log_variance = outputs
noise = noise_like(x.shape, device, repeat_noise) * temperature
if noise_dropout > 0.:
noise = torch.nn.functional.dropout(noise, p=noise_dropout)
# no noise when t == 0
nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
if return_codebook_ids:
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1)
if return_x0:
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0
else:
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
@torch.no_grad()
def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False,
img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0.,
score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None,
log_every_t=None):
if not log_every_t:
log_every_t = self.log_every_t
timesteps = self.num_timesteps
if batch_size is not None:
b = batch_size if batch_size is not None else shape[0]
shape = [batch_size] + list(shape)
else:
b = batch_size = shape[0]
if x_T is None:
img = torch.randn(shape, device=self.device)
else:
img = x_T
intermediates = []
if cond is not None:
if isinstance(cond, dict):
cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
else:
cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
if start_T is not None:
timesteps = min(timesteps, start_T)
iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation',
total=timesteps) if verbose else reversed(
range(0, timesteps))
if type(temperature) == float:
temperature = [temperature] * timesteps
for i in iterator:
ts = torch.full((b,), i, device=self.device, dtype=torch.long)
if self.shorten_cond_schedule:
assert self.model.conditioning_key != 'hybrid'
tc = self.cond_ids[ts].to(cond.device)
cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
img, x0_partial = self.p_sample(img, cond, ts,
clip_denoised=self.clip_denoised,
quantize_denoised=quantize_denoised, return_x0=True,
temperature=temperature[i], noise_dropout=noise_dropout,
score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
if mask is not None:
assert x0 is not None
img_orig = self.q_sample(x0, ts)
img = img_orig * mask + (1. - mask) * img
if i % log_every_t == 0 or i == timesteps - 1:
intermediates.append(x0_partial)
if callback: callback(i)
if img_callback: img_callback(img, i)
return img, intermediates
@torch.no_grad()
def p_sample_loop(self, cond, shape, return_intermediates=False,
x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False,
mask=None, x0=None, img_callback=None, start_T=None,
log_every_t=None):
if not log_every_t:
log_every_t = self.log_every_t
device = self.betas.device
b = shape[0]
if x_T is None:
img = torch.randn(shape, device=device)
else:
img = x_T
intermediates = [img]
if timesteps is None:
timesteps = self.num_timesteps
if start_T is not None:
timesteps = min(timesteps, start_T)
iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed(
range(0, timesteps))
if mask is not None:
assert x0 is not None
assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match
for i in iterator:
ts = torch.full((b,), i, device=device, dtype=torch.long)
if self.shorten_cond_schedule:
assert self.model.conditioning_key != 'hybrid'
tc = self.cond_ids[ts].to(cond.device)
cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
img = self.p_sample(img, cond, ts,
clip_denoised=self.clip_denoised,
quantize_denoised=quantize_denoised)
if mask is not None:
img_orig = self.q_sample(x0, ts)
img = img_orig * mask + (1. - mask) * img
if i % log_every_t == 0 or i == timesteps - 1:
intermediates.append(img)
if callback: callback(i)
if img_callback: img_callback(img, i)
if return_intermediates:
return img, intermediates
return img
@torch.no_grad()
def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None,
verbose=True, timesteps=None, quantize_denoised=False,
mask=None, x0=None, shape=None,**kwargs):
if shape is None:
shape = (batch_size, self.channels, self.mel_dim, self.mel_length)
if cond is not None:
if isinstance(cond, dict):
cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
else:
cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
return self.p_sample_loop(cond,
shape,
return_intermediates=return_intermediates, x_T=x_T,
verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised,
mask=mask, x0=x0)
@torch.no_grad()
def sample_log(self,cond,batch_size,ddim, ddim_steps,**kwargs):
if ddim:
ddim_sampler = DDIMSampler(self)
shape = (self.channels, self.mel_dim, self.mel_length)
samples, intermediates =ddim_sampler.sample(ddim_steps,batch_size,
shape,cond,verbose=False,**kwargs)
else:
samples, intermediates = self.sample(cond=cond, batch_size=batch_size,
return_intermediates=True,**kwargs)
return samples, intermediates
@torch.no_grad()
def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None,
quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True,
plot_diffusion_rows=True, **kwargs):
use_ddim = ddim_steps is not None
log = dict()
z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key,
return_first_stage_outputs=True,
force_c_encode=True,
return_original_cond=True,
bs=N)
N = min(x.shape[0], N)
n_row = min(x.shape[0], n_row)
log["inputs"] = x # 原始输入图像
log["reconstruction"] = xrec # 重建得到的图像
if self.model.conditioning_key is not None:
if hasattr(self.cond_stage_model, "decode"):# when cond_stage is first_stage. (bert embedder doesnot have decode)
xc = self.cond_stage_model.decode(c)# decoded masked image
log["conditioning"] = xc # 重建后的图像
elif self.cond_stage_key in ["caption"]:
xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["caption"])
log["conditioning"] = xc # 含有文本的图像
if self.model.conditioning_key == 'hybrid':
log["decoded_maskedimg"] = self.first_stage_model.decode(c['c_concat'][:,:self.first_stage_model.embed_dim])# c_concat is the concat result of masked_img latent and resized mask. get latent here to decode
elif self.cond_stage_key == 'class_label':
xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"])
log['conditioning'] = xc # 文本为类标签的图像
elif isimage(xc):
log["conditioning"] = xc
if ismap(xc):
log["original_conditioning"] = self.to_rgb(xc)
if plot_diffusion_rows:# diffusion每一步的图像
# get diffusion row
diffusion_row = list()
z_start = z[:n_row]
for t in range(self.num_timesteps):
if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
t = t.to(self.device).long()
noise = torch.randn_like(z_start)
z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)
diffusion_row.append(self.decode_first_stage(z_noisy))
diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W
diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')
diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')
diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])
log["diffusion_row"] = diffusion_grid
if sample:#
# get denoise row
with self.ema_scope("Plotting"):
samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim,
ddim_steps=ddim_steps,eta=ddim_eta)
# samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)
x_samples = self.decode_first_stage(samples)
log["samples"] = x_samples
if plot_denoise_rows:
denoise_grid = self._get_denoise_row_from_list(z_denoise_row)
log["denoise_row"] = denoise_grid
if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance(
self.first_stage_model, IdentityFirstStage):
# also display when quantizing x0 while sampling
with self.ema_scope("Plotting Quantized Denoised"):
samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim,
ddim_steps=ddim_steps,eta=ddim_eta,
quantize_denoised=True)
# samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True,
# quantize_denoised=True)
x_samples = self.decode_first_stage(samples.to(self.device))
log["samples_x0_quantized"] = x_samples
if inpaint:
# make a simple center square
b, h, w = z.shape[0], z.shape[2], z.shape[3]
mask = torch.ones(N, h, w).to(self.device)
# zeros will be filled in
mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0.
mask = mask[:, None, ...]# N,1,H,W
with self.ema_scope("Plotting Inpaint"):
samples, _ = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, eta=ddim_eta,
ddim_steps=ddim_steps, x0=z[:N], mask=mask)
x_samples = self.decode_first_stage(samples.to(self.device))
log["samples_inpainting"] = x_samples
log["mask"] = mask
# outpaint
with self.ema_scope("Plotting Outpaint"):
samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,eta=ddim_eta,
ddim_steps=ddim_steps, x0=z[:N], mask=mask)
x_samples = self.decode_first_stage(samples.to(self.device))
log["samples_outpainting"] = x_samples
if plot_progressive_rows:
with self.ema_scope("Plotting Progressives"):
img, progressives = self.progressive_denoising(c,
shape=(self.channels, self.mel_dim, self.mel_length),
batch_size=N)
prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation")
log["progressive_row"] = prog_row
if return_keys:
if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
return log
else:
return {key: log[key] for key in return_keys}
return log
def configure_optimizers(self):
lr = self.learning_rate
params = list(self.model.parameters())
if self.cond_stage_trainable:
print(f"{self.__class__.__name__}: Also optimizing conditioner params!")
params = params + list(self.cond_stage_model.parameters())
if self.learn_logvar:
print('Diffusion model optimizing logvar')
params.append(self.logvar)
opt = torch.optim.AdamW(params, lr=lr)
if self.use_scheduler:
assert 'target' in self.scheduler_config
scheduler = instantiate_from_config(self.scheduler_config)
print("Setting up LambdaLR scheduler...")
scheduler = [
{
'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule),
'interval': 'step',
'frequency': 1
}]
return [opt], scheduler
return opt
@torch.no_grad()
def to_rgb(self, x):
x = x.float()
if not hasattr(self, "colorize"):
self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x)
x = nn.functional.conv2d(x, weight=self.colorize)
x = 2. * (x - x.min()) / (x.max() - x.min()) - 1.
return x
| EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/models/diffusion/ddpm_audio_inpaint.py |
"""SAMPLING ONLY."""
import torch
import numpy as np
from tqdm import tqdm
from functools import partial
from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like
class PLMSSampler(object):
def __init__(self, model, schedule="linear", **kwargs):
super().__init__()
self.model = model
self.ddpm_num_timesteps = model.num_timesteps
self.schedule = schedule
def register_buffer(self, name, attr):
if type(attr) == torch.Tensor:
if attr.device != torch.device("cuda"):
attr = attr.to(torch.device("cuda"))
setattr(self, name, attr)
def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True):
if ddim_eta != 0:
raise ValueError('ddim_eta must be 0 for PLMS')
self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,
num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)
alphas_cumprod = self.model.alphas_cumprod
assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'
to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)
self.register_buffer('betas', to_torch(self.model.betas))
self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))
# calculations for diffusion q(x_t | x_{t-1}) and others
self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))
self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))
self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))
self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))
self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))
# ddim sampling parameters
ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),
ddim_timesteps=self.ddim_timesteps,
eta=ddim_eta,verbose=verbose)
self.register_buffer('ddim_sigmas', ddim_sigmas)
self.register_buffer('ddim_alphas', ddim_alphas)
self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)
self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))
sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(
(1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (
1 - self.alphas_cumprod / self.alphas_cumprod_prev))
self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)
@torch.no_grad()
def sample(self,
S,
batch_size,
shape,
conditioning=None,
callback=None,
normals_sequence=None,
img_callback=None,
quantize_x0=False,
eta=0.,
mask=None,
x0=None,
temperature=1.,
noise_dropout=0.,
score_corrector=None,
corrector_kwargs=None,
verbose=True,
x_T=None,
log_every_t=100,
unconditional_guidance_scale=1.,
unconditional_conditioning=None,
# this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...
**kwargs
):
if conditioning is not None:
if isinstance(conditioning, dict):
cbs = conditioning[list(conditioning.keys())[0]].shape[0]
if cbs != batch_size:
print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}")
else:
if conditioning.shape[0] != batch_size:
print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}")
self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)
# sampling
C, H, W = shape
size = (batch_size, C, H, W)
print(f'Data shape for PLMS sampling is {size}')
samples, intermediates = self.plms_sampling(conditioning, size,
callback=callback,
img_callback=img_callback,
quantize_denoised=quantize_x0,
mask=mask, x0=x0,
ddim_use_original_steps=False,
noise_dropout=noise_dropout,
temperature=temperature,
score_corrector=score_corrector,
corrector_kwargs=corrector_kwargs,
x_T=x_T,
log_every_t=log_every_t,
unconditional_guidance_scale=unconditional_guidance_scale,
unconditional_conditioning=unconditional_conditioning,
)
return samples, intermediates
@torch.no_grad()
def plms_sampling(self, cond, shape,
x_T=None, ddim_use_original_steps=False,
callback=None, timesteps=None, quantize_denoised=False,
mask=None, x0=None, img_callback=None, log_every_t=100,
temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
unconditional_guidance_scale=1., unconditional_conditioning=None,):
device = self.model.betas.device
b = shape[0]
if x_T is None:
img = torch.randn(shape, device=device)
else:
img = x_T
if timesteps is None:
timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps
elif timesteps is not None and not ddim_use_original_steps:
subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1
timesteps = self.ddim_timesteps[:subset_end]
intermediates = {'x_inter': [img], 'pred_x0': [img]}
time_range = list(reversed(range(0,timesteps))) if ddim_use_original_steps else np.flip(timesteps)
total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]
print(f"Running PLMS Sampling with {total_steps} timesteps")
iterator = tqdm(time_range, desc='PLMS Sampler', total=total_steps)
old_eps = []
for i, step in enumerate(iterator):
index = total_steps - i - 1
ts = torch.full((b,), step, device=device, dtype=torch.long)
ts_next = torch.full((b,), time_range[min(i + 1, len(time_range) - 1)], device=device, dtype=torch.long)
if mask is not None:
assert x0 is not None
img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?
img = img_orig * mask + (1. - mask) * img
outs = self.p_sample_plms(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,
quantize_denoised=quantize_denoised, temperature=temperature,
noise_dropout=noise_dropout, score_corrector=score_corrector,
corrector_kwargs=corrector_kwargs,
unconditional_guidance_scale=unconditional_guidance_scale,
unconditional_conditioning=unconditional_conditioning,
old_eps=old_eps, t_next=ts_next)
img, pred_x0, e_t = outs
old_eps.append(e_t)
if len(old_eps) >= 4:
old_eps.pop(0)
if callback: callback(i)
if img_callback: img_callback(pred_x0, i)
if index % log_every_t == 0 or index == total_steps - 1:
intermediates['x_inter'].append(img)
intermediates['pred_x0'].append(pred_x0)
return img, intermediates
@torch.no_grad()
def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,
temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,
unconditional_guidance_scale=1., unconditional_conditioning=None, old_eps=None, t_next=None):
b, *_, device = *x.shape, x.device
def get_model_output(x, t):
if unconditional_conditioning is None or unconditional_guidance_scale == 1.:
e_t = self.model.apply_model(x, t, c)
else:
x_in = torch.cat([x] * 2)
t_in = torch.cat([t] * 2)
c_in = torch.cat([unconditional_conditioning, c])
e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)
e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)
if score_corrector is not None:
assert self.model.parameterization == "eps"
e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)
return e_t
alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas
alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev
sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas
sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas
def get_x_prev_and_pred_x0(e_t, index):
# select parameters corresponding to the currently considered timestep
a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)
a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)
sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)
sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)
# current prediction for x_0
pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()
if quantize_denoised:
pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)
# direction pointing to x_t
dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t
noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature
if noise_dropout > 0.:
noise = torch.nn.functional.dropout(noise, p=noise_dropout)
x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise
return x_prev, pred_x0
e_t = get_model_output(x, t)
if len(old_eps) == 0:
# Pseudo Improved Euler (2nd order)
x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t, index)
e_t_next = get_model_output(x_prev, t_next)
e_t_prime = (e_t + e_t_next) / 2
elif len(old_eps) == 1:
# 2nd order Pseudo Linear Multistep (Adams-Bashforth)
e_t_prime = (3 * e_t - old_eps[-1]) / 2
elif len(old_eps) == 2:
# 3nd order Pseudo Linear Multistep (Adams-Bashforth)
e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12
elif len(old_eps) >= 3:
# 4nd order Pseudo Linear Multistep (Adams-Bashforth)
e_t_prime = (55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2] - 9 * old_eps[-3]) / 24
x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index)
return x_prev, pred_x0, e_t
| EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/models/diffusion/plms.py |
"""
wild mixture of
https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py
https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py
https://github.com/CompVis/taming-transformers
-- merci
"""
import torch
import torch.nn as nn
import numpy as np
import pytorch_lightning as pl
from torch.optim.lr_scheduler import LambdaLR
from einops import rearrange, repeat
from contextlib import contextmanager
from functools import partial
from tqdm import tqdm
from torchvision.utils import make_grid
from pytorch_lightning.utilities.distributed import rank_zero_only
from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
from ldm.modules.ema import LitEma
from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution
from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL
from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
from ldm.models.diffusion.ddim import DDIMSampler
__conditioning_keys__ = {'concat': 'c_concat',
'crossattn': 'c_crossattn',
'adm': 'y'}
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self
def uniform_on_device(r1, r2, shape, device):
return (r1 - r2) * torch.rand(*shape, device=device) + r2
class DDPM(pl.LightningModule):
# classic DDPM with Gaussian diffusion, in image space
def __init__(self,
unet_config,
timesteps=1000,
beta_schedule="linear",
loss_type="l2",
ckpt_path=None,
ignore_keys=[],
load_only_unet=False,
monitor="val/loss",
use_ema=True,
first_stage_key="image",
image_size=256,
channels=3,
log_every_t=100,
clip_denoised=True,
linear_start=1e-4,
linear_end=2e-2,
cosine_s=8e-3,
given_betas=None,
original_elbo_weight=0.,
v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta
l_simple_weight=1.,
conditioning_key=None,
parameterization="eps", # all config files uses "eps"
scheduler_config=None,
use_positional_encodings=False,
learn_logvar=False,
logvar_init=0.,
):
super().__init__()
assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"'
self.parameterization = parameterization
print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode")
self.cond_stage_model = None
self.clip_denoised = clip_denoised
self.log_every_t = log_every_t
self.first_stage_key = first_stage_key
self.image_size = image_size # try conv?
self.channels = channels
self.use_positional_encodings = use_positional_encodings
self.model = DiffusionWrapper(unet_config, conditioning_key)
count_params(self.model, verbose=True)
self.use_ema = use_ema
if self.use_ema:
self.model_ema = LitEma(self.model)
print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.")
self.use_scheduler = scheduler_config is not None
if self.use_scheduler:
self.scheduler_config = scheduler_config
self.v_posterior = v_posterior
self.original_elbo_weight = original_elbo_weight
self.l_simple_weight = l_simple_weight
if monitor is not None:
self.monitor = monitor
if ckpt_path is not None:
self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet)
self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps,
linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s)
self.loss_type = loss_type
self.learn_logvar = learn_logvar
self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,))
if self.learn_logvar:
self.logvar = nn.Parameter(self.logvar, requires_grad=True)
def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000,
linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
if exists(given_betas):
betas = given_betas
else:
betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end,
cosine_s=cosine_s)
alphas = 1. - betas
alphas_cumprod = np.cumprod(alphas, axis=0)
alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1])
timesteps, = betas.shape
self.num_timesteps = int(timesteps)
self.linear_start = linear_start
self.linear_end = linear_end
assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep'
to_torch = partial(torch.tensor, dtype=torch.float32)
self.register_buffer('betas', to_torch(betas))
self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))
self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev))
# calculations for diffusion q(x_t | x_{t-1}) and others
self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod)))
self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod)))
self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod)))
self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod)))
self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1)))
# calculations for posterior q(x_{t-1} | x_t, x_0)
posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / (
1. - alphas_cumprod) + self.v_posterior * betas
# above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)
self.register_buffer('posterior_variance', to_torch(posterior_variance))
# below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain
self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20))))
self.register_buffer('posterior_mean_coef1', to_torch(
betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod)))
self.register_buffer('posterior_mean_coef2', to_torch(
(1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod)))
if self.parameterization == "eps":
lvlb_weights = self.betas ** 2 / (
2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod))
elif self.parameterization == "x0":
lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod))
else:
raise NotImplementedError("mu not supported")
# TODO how to choose this term
lvlb_weights[0] = lvlb_weights[1]
self.register_buffer('lvlb_weights', lvlb_weights, persistent=False)
assert not torch.isnan(self.lvlb_weights).all()
@contextmanager
def ema_scope(self, context=None):
if self.use_ema:
self.model_ema.store(self.model.parameters())
self.model_ema.copy_to(self.model)
if context is not None:
print(f"{context}: Switched to EMA weights")
try:
yield None
finally:
if self.use_ema:
self.model_ema.restore(self.model.parameters())
if context is not None:
print(f"{context}: Restored training weights")
def init_from_ckpt(self, path, ignore_keys=list(), only_model=False):
sd = torch.load(path, map_location="cpu")
if "state_dict" in list(sd.keys()):
sd = sd["state_dict"]
keys = list(sd.keys())
for k in keys:
for ik in ignore_keys:
if k.startswith(ik):
print("Deleting key {} from state_dict.".format(k))
del sd[k]
missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict(
sd, strict=False)
print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys")
if len(missing) > 0:
print(f"Missing Keys: {missing}")
if len(unexpected) > 0:
print(f"Unexpected Keys: {unexpected}")
def q_mean_variance(self, x_start, t):
"""
Get the distribution q(x_t | x_0).
:param x_start: the [N x C x ...] tensor of noiseless inputs.
:param t: the number of diffusion steps (minus 1). Here, 0 means one step.
:return: A tuple (mean, variance, log_variance), all of x_start's shape.
"""
mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start)
variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape)
log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape)
return mean, variance, log_variance
def predict_start_from_noise(self, x_t, t, noise):
return (
extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t -
extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise
)
def q_posterior(self, x_start, x_t, t):
posterior_mean = (
extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start +
extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t
)
posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape)
posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape)
return posterior_mean, posterior_variance, posterior_log_variance_clipped
def p_mean_variance(self, x, t, clip_denoised: bool):
model_out = self.model(x, t)
if self.parameterization == "eps":
x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
elif self.parameterization == "x0":
x_recon = model_out
if clip_denoised:
x_recon.clamp_(-1., 1.)
model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
return model_mean, posterior_variance, posterior_log_variance
@torch.no_grad()
def p_sample(self, x, t, clip_denoised=True, repeat_noise=False):
b, *_, device = *x.shape, x.device
model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised)
noise = noise_like(x.shape, device, repeat_noise)
# no noise when t == 0
nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
@torch.no_grad()
def p_sample_loop(self, shape, return_intermediates=False):
device = self.betas.device
b = shape[0]
img = torch.randn(shape, device=device)
intermediates = [img]
for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps):
img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long),
clip_denoised=self.clip_denoised)
if i % self.log_every_t == 0 or i == self.num_timesteps - 1:
intermediates.append(img)
if return_intermediates:
return img, intermediates
return img
@torch.no_grad()
def sample(self, batch_size=16, return_intermediates=False):
image_size = self.image_size
channels = self.channels
return self.p_sample_loop((batch_size, channels, image_size, image_size),
return_intermediates=return_intermediates)
def q_sample(self, x_start, t, noise=None):
noise = default(noise, lambda: torch.randn_like(x_start))
return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start +
extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise)
def get_loss(self, pred, target, mean=True):
if self.loss_type == 'l1':
loss = (target - pred).abs()
if mean:
loss = loss.mean()
elif self.loss_type == 'l2':
if mean:
loss = torch.nn.functional.mse_loss(target, pred)
else:
loss = torch.nn.functional.mse_loss(target, pred, reduction='none')
else:
raise NotImplementedError("unknown loss type '{loss_type}'")
return loss
def p_losses(self, x_start, t, noise=None):
noise = default(noise, lambda: torch.randn_like(x_start))
x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
model_out = self.model(x_noisy, t)
loss_dict = {}
if self.parameterization == "eps":
target = noise
elif self.parameterization == "x0":
target = x_start
else:
raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported")
loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3])
log_prefix = 'train' if self.training else 'val'
loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()})
loss_simple = loss.mean() * self.l_simple_weight
loss_vlb = (self.lvlb_weights[t] * loss).mean()
loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb})
loss = loss_simple + self.original_elbo_weight * loss_vlb
loss_dict.update({f'{log_prefix}/loss': loss})
return loss, loss_dict
def forward(self, x, *args, **kwargs):
# b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size
# assert h == img_size and w == img_size, f'height and width of image must be {img_size}'
t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()
return self.p_losses(x, t, *args, **kwargs)
def get_input(self, batch, k):
x = batch[k]
if len(x.shape) == 3:
x = x[..., None]
x = rearrange(x, 'b h w c -> b c h w')
x = x.to(memory_format=torch.contiguous_format).float()
return x
def shared_step(self, batch):
x = self.get_input(batch, self.first_stage_key)
loss, loss_dict = self(x)
return loss, loss_dict
def training_step(self, batch, batch_idx):
loss, loss_dict = self.shared_step(batch)
self.log_dict(loss_dict, prog_bar=True,
logger=True, on_step=True, on_epoch=True)
self.log("global_step", self.global_step,
prog_bar=True, logger=True, on_step=True, on_epoch=False)
if self.use_scheduler:
lr = self.optimizers().param_groups[0]['lr']
self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False)
return loss
@torch.no_grad()
def validation_step(self, batch, batch_idx):
_, loss_dict_no_ema = self.shared_step(batch)
with self.ema_scope():
_, loss_dict_ema = self.shared_step(batch)
loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema}
self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True)
self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True)
def on_train_batch_end(self, *args, **kwargs):
if self.use_ema:
self.model_ema(self.model)
def _get_rows_from_list(self, samples):
n_imgs_per_row = len(samples)
denoise_grid = rearrange(samples, 'n b c h w -> b n c h w')
denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
return denoise_grid
@torch.no_grad()
def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs):
log = dict()
x = self.get_input(batch, self.first_stage_key)
N = min(x.shape[0], N)
n_row = min(x.shape[0], n_row)
x = x.to(self.device)[:N]
log["inputs"] = x
# get diffusion row
diffusion_row = list()
x_start = x[:n_row]
for t in range(self.num_timesteps):
if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
t = t.to(self.device).long()
noise = torch.randn_like(x_start)
x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
diffusion_row.append(x_noisy)
log["diffusion_row"] = self._get_rows_from_list(diffusion_row)
if sample:
# get denoise row
with self.ema_scope("Plotting"):
samples, denoise_row = self.sample(batch_size=N, return_intermediates=True)
log["samples"] = samples
log["denoise_row"] = self._get_rows_from_list(denoise_row)
if return_keys:
if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
return log
else:
return {key: log[key] for key in return_keys}
return log
def configure_optimizers(self):
lr = self.learning_rate
params = list(self.model.parameters())
if self.learn_logvar:
params = params + [self.logvar]
opt = torch.optim.AdamW(params, lr=lr)
return opt
class LatentDiffusion(DDPM):
"""main class"""
def __init__(self,
first_stage_config,
cond_stage_config,
num_timesteps_cond=None,
cond_stage_key="image",# 'caption' for txt2image, 'masked_image' for inpainting
cond_stage_trainable=False,
concat_mode=True,# true for inpainting
cond_stage_forward=None,
conditioning_key=None, # 'crossattn' for txt2image, None for inpainting
scale_factor=1.0,
scale_by_std=False,
*args, **kwargs):
self.num_timesteps_cond = default(num_timesteps_cond, 1)
self.scale_by_std = scale_by_std
assert self.num_timesteps_cond <= kwargs['timesteps']
# for backwards compatibility after implementation of DiffusionWrapper
if conditioning_key is None:
conditioning_key = 'concat' if concat_mode else 'crossattn'
if cond_stage_config == '__is_unconditional__':
conditioning_key = None
ckpt_path = kwargs.pop("ckpt_path", None)
ignore_keys = kwargs.pop("ignore_keys", [])
super().__init__(conditioning_key=conditioning_key, *args, **kwargs)
self.concat_mode = concat_mode
self.cond_stage_trainable = cond_stage_trainable
self.cond_stage_key = cond_stage_key
try:
self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1
except:
self.num_downs = 0
if not scale_by_std:
self.scale_factor = scale_factor
else:
self.register_buffer('scale_factor', torch.tensor(scale_factor))
self.instantiate_first_stage(first_stage_config)
self.instantiate_cond_stage(cond_stage_config)
self.cond_stage_forward = cond_stage_forward
self.clip_denoised = False
self.bbox_tokenizer = None
self.restarted_from_ckpt = False
if ckpt_path is not None:
self.init_from_ckpt(ckpt_path, ignore_keys)
self.restarted_from_ckpt = True
def make_cond_schedule(self, ):
self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long)
ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long()
self.cond_ids[:self.num_timesteps_cond] = ids
@rank_zero_only
@torch.no_grad()
def on_train_batch_start(self, batch, batch_idx, dataloader_idx):
# only for very first batch
if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt:
assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously'
# set rescale weight to 1./std of encodings
print("### USING STD-RESCALING ###")
x = super().get_input(batch, self.first_stage_key)
x = x.to(self.device)
encoder_posterior = self.encode_first_stage(x)
z = self.get_first_stage_encoding(encoder_posterior).detach()
del self.scale_factor
self.register_buffer('scale_factor', 1. / z.flatten().std())
print(f"setting self.scale_factor to {self.scale_factor}")
print("### USING STD-RESCALING ###")
def register_schedule(self,
given_betas=None, beta_schedule="linear", timesteps=1000,
linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s)
self.shorten_cond_schedule = self.num_timesteps_cond > 1
if self.shorten_cond_schedule:
self.make_cond_schedule()
def instantiate_first_stage(self, config):
model = instantiate_from_config(config)
self.first_stage_model = model.eval()
self.first_stage_model.train = disabled_train
for param in self.first_stage_model.parameters():
param.requires_grad = False
def instantiate_cond_stage(self, config):
if not self.cond_stage_trainable:
if config == "__is_first_stage__":# inpaint
print("Using first stage also as cond stage.")
self.cond_stage_model = self.first_stage_model
elif config == "__is_unconditional__":
print(f"Training {self.__class__.__name__} as an unconditional model.")
self.cond_stage_model = None
# self.be_unconditional = True
else:
model = instantiate_from_config(config)
self.cond_stage_model = model.eval()
self.cond_stage_model.train = disabled_train
for param in self.cond_stage_model.parameters():
param.requires_grad = False
else:
assert config != '__is_first_stage__'
assert config != '__is_unconditional__'
model = instantiate_from_config(config)
self.cond_stage_model = model
def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False):
denoise_row = []
for zd in tqdm(samples, desc=desc):
denoise_row.append(self.decode_first_stage(zd.to(self.device),
force_not_quantize=force_no_decoder_quantization))
n_imgs_per_row = len(denoise_row)
denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W
denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w')
denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')
denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)
return denoise_grid
def get_first_stage_encoding(self, encoder_posterior):
if isinstance(encoder_posterior, DiagonalGaussianDistribution):
z = encoder_posterior.sample()
elif isinstance(encoder_posterior, torch.Tensor):
z = encoder_posterior
else:
raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented")
return self.scale_factor * z
def get_learned_conditioning(self, c):
if self.cond_stage_forward is None:
if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode):
c = self.cond_stage_model.encode(c)
if isinstance(c, DiagonalGaussianDistribution):
c = c.mode()
else:
c = self.cond_stage_model(c)
else:
assert hasattr(self.cond_stage_model, self.cond_stage_forward)
c = getattr(self.cond_stage_model, self.cond_stage_forward)(c)
return c
def meshgrid(self, h, w):
y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1)
x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1)
arr = torch.cat([y, x], dim=-1)
return arr
def delta_border(self, h, w):
"""
:param h: height
:param w: width
:return: normalized distance to image border,
wtith min distance = 0 at border and max dist = 0.5 at image center
"""
lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2)
arr = self.meshgrid(h, w) / lower_right_corner
dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0]
dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0]
edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0]
return edge_dist
def get_weighting(self, h, w, Ly, Lx, device):
weighting = self.delta_border(h, w)
weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"],
self.split_input_params["clip_max_weight"], )
weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device)
if self.split_input_params["tie_braker"]:
L_weighting = self.delta_border(Ly, Lx)
L_weighting = torch.clip(L_weighting,
self.split_input_params["clip_min_tie_weight"],
self.split_input_params["clip_max_tie_weight"])
L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device)
weighting = weighting * L_weighting
return weighting
def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code
"""
:param x: img of size (bs, c, h, w)
:return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1])
"""
bs, nc, h, w = x.shape
# number of crops in image
Ly = (h - kernel_size[0]) // stride[0] + 1
Lx = (w - kernel_size[1]) // stride[1] + 1
if uf == 1 and df == 1:
fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
unfold = torch.nn.Unfold(**fold_params)
fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params)
weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype)
normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap
weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx))
elif uf > 1 and df == 1:
fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
unfold = torch.nn.Unfold(**fold_params)
fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf),
dilation=1, padding=0,
stride=(stride[0] * uf, stride[1] * uf))
fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2)
weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype)
normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap
weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx))
elif df > 1 and uf == 1:
fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)
unfold = torch.nn.Unfold(**fold_params)
fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df),
dilation=1, padding=0,
stride=(stride[0] // df, stride[1] // df))
fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2)
weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype)
normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap
weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx))
else:
raise NotImplementedError
return fold, unfold, normalization, weighting
@torch.no_grad()
def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False,
cond_key=None, return_original_cond=False, bs=None):
x = super().get_input(batch, k)
if bs is not None:
x = x[:bs]
x = x.to(self.device)
encoder_posterior = self.encode_first_stage(x)
z = self.get_first_stage_encoding(encoder_posterior).detach()
if self.model.conditioning_key is not None:
if cond_key is None:
cond_key = self.cond_stage_key
if cond_key != self.first_stage_key:# cond_key is not image. for inapint it's masked_img
if cond_key in ['caption', 'coordinates_bbox']:
xc = batch[cond_key]
elif cond_key == 'class_label':
xc = batch
else:
xc = super().get_input(batch, cond_key).to(self.device)
else:
xc = x
if not self.cond_stage_trainable or force_c_encode:
if isinstance(xc, dict) or isinstance(xc, list):
# import pudb; pudb.set_trace()
c = self.get_learned_conditioning(xc)
else:
c = self.get_learned_conditioning(xc.to(self.device))
else:
c = xc
if bs is not None:
c = c[:bs]
if self.use_positional_encodings:
pos_x, pos_y = self.compute_latent_shifts(batch)
ckey = __conditioning_keys__[self.model.conditioning_key]
c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y}
else:
c = None
xc = None
if self.use_positional_encodings:
pos_x, pos_y = self.compute_latent_shifts(batch)
c = {'pos_x': pos_x, 'pos_y': pos_y}
out = [z, c]
if return_first_stage_outputs:
xrec = self.decode_first_stage(z)
out.extend([x, xrec])
if return_original_cond:
out.append(xc)
return out
@torch.no_grad()
def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):
if predict_cids:
if z.dim() == 4:
z = torch.argmax(z.exp(), dim=1).long()
z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)
z = rearrange(z, 'b h w c -> b c h w').contiguous()
z = 1. / self.scale_factor * z
if hasattr(self, "split_input_params"):
if self.split_input_params["patch_distributed_vq"]:
ks = self.split_input_params["ks"] # eg. (128, 128)
stride = self.split_input_params["stride"] # eg. (64, 64)
uf = self.split_input_params["vqf"]
bs, nc, h, w = z.shape
if ks[0] > h or ks[1] > w:
ks = (min(ks[0], h), min(ks[1], w))
print("reducing Kernel")
if stride[0] > h or stride[1] > w:
stride = (min(stride[0], h), min(stride[1], w))
print("reducing stride")
fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf)
z = unfold(z) # (bn, nc * prod(**ks), L)
# 1. Reshape to img shape
z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
# 2. apply model loop over last dim
if isinstance(self.first_stage_model, VQModelInterface):
output_list = [self.first_stage_model.decode(z[:, :, :, :, i],
force_not_quantize=predict_cids or force_not_quantize)
for i in range(z.shape[-1])]
else:
output_list = [self.first_stage_model.decode(z[:, :, :, :, i])
for i in range(z.shape[-1])]
o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L)
o = o * weighting
# Reverse 1. reshape to img shape
o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
# stitch crops together
decoded = fold(o)
decoded = decoded / normalization # norm is shape (1, 1, h, w)
return decoded
else:
if isinstance(self.first_stage_model, VQModelInterface):
return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
else:
return self.first_stage_model.decode(z)
else:
if isinstance(self.first_stage_model, VQModelInterface):
return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
else:
return self.first_stage_model.decode(z)
# same as above but without decorator
def differentiable_decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):
if predict_cids:
if z.dim() == 4:
z = torch.argmax(z.exp(), dim=1).long()
z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)
z = rearrange(z, 'b h w c -> b c h w').contiguous()
z = 1. / self.scale_factor * z
if hasattr(self, "split_input_params"):
if self.split_input_params["patch_distributed_vq"]:
ks = self.split_input_params["ks"] # eg. (128, 128)
stride = self.split_input_params["stride"] # eg. (64, 64)
uf = self.split_input_params["vqf"]
bs, nc, h, w = z.shape
if ks[0] > h or ks[1] > w:
ks = (min(ks[0], h), min(ks[1], w))
print("reducing Kernel")
if stride[0] > h or stride[1] > w:
stride = (min(stride[0], h), min(stride[1], w))
print("reducing stride")
fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf)
z = unfold(z) # (bn, nc * prod(**ks), L)
# 1. Reshape to img shape
z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
# 2. apply model loop over last dim
if isinstance(self.first_stage_model, VQModelInterface):
output_list = [self.first_stage_model.decode(z[:, :, :, :, i],
force_not_quantize=predict_cids or force_not_quantize)
for i in range(z.shape[-1])]
else:
output_list = [self.first_stage_model.decode(z[:, :, :, :, i])
for i in range(z.shape[-1])]
o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L)
o = o * weighting
# Reverse 1. reshape to img shape
o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
# stitch crops together
decoded = fold(o)
decoded = decoded / normalization # norm is shape (1, 1, h, w)
return decoded
else:
if isinstance(self.first_stage_model, VQModelInterface):
return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
else:
return self.first_stage_model.decode(z)
else:
if isinstance(self.first_stage_model, VQModelInterface):
return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize)
else:
return self.first_stage_model.decode(z)
@torch.no_grad()
def encode_first_stage(self, x):
if hasattr(self, "split_input_params"):
if self.split_input_params["patch_distributed_vq"]:
ks = self.split_input_params["ks"] # eg. (128, 128)
stride = self.split_input_params["stride"] # eg. (64, 64)
df = self.split_input_params["vqf"]
self.split_input_params['original_image_size'] = x.shape[-2:]
bs, nc, h, w = x.shape
if ks[0] > h or ks[1] > w:
ks = (min(ks[0], h), min(ks[1], w))
print("reducing Kernel")
if stride[0] > h or stride[1] > w:
stride = (min(stride[0], h), min(stride[1], w))
print("reducing stride")
fold, unfold, normalization, weighting = self.get_fold_unfold(x, ks, stride, df=df)
z = unfold(x) # (bn, nc * prod(**ks), L)
# Reshape to img shape
z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
output_list = [self.first_stage_model.encode(z[:, :, :, :, i])
for i in range(z.shape[-1])]
o = torch.stack(output_list, axis=-1)
o = o * weighting
# Reverse reshape to img shape
o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
# stitch crops together
decoded = fold(o)
decoded = decoded / normalization
return decoded
else:
return self.first_stage_model.encode(x)
else:
return self.first_stage_model.encode(x)
def shared_step(self, batch, **kwargs):
x, c = self.get_input(batch, self.first_stage_key)
loss = self(x, c)
return loss
def forward(self, x, c, *args, **kwargs):
t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()
if self.model.conditioning_key is not None:
assert c is not None
if self.cond_stage_trainable:# true when use text
c = self.get_learned_conditioning(c) # c: string list -> [B, T, Context_dim]
if self.shorten_cond_schedule: # TODO: drop this option
tc = self.cond_ids[t].to(self.device)
c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float()))
return self.p_losses(x, c, t, *args, **kwargs)
def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset
def rescale_bbox(bbox):
x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2])
y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3])
w = min(bbox[2] / crop_coordinates[2], 1 - x0)
h = min(bbox[3] / crop_coordinates[3], 1 - y0)
return x0, y0, w, h
return [rescale_bbox(b) for b in bboxes]
def apply_model(self, x_noisy, t, cond, return_ids=False):
if isinstance(cond, dict):
# hybrid case, cond is exptected to be a dict
pass
else:
if not isinstance(cond, list):
cond = [cond]
key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn'
cond = {key: cond}
if hasattr(self, "split_input_params"):
assert len(cond) == 1 # todo can only deal with one conditioning atm
assert not return_ids
ks = self.split_input_params["ks"] # eg. (128, 128)
stride = self.split_input_params["stride"] # eg. (64, 64)
h, w = x_noisy.shape[-2:]
fold, unfold, normalization, weighting = self.get_fold_unfold(x_noisy, ks, stride)
z = unfold(x_noisy) # (bn, nc * prod(**ks), L)
# Reshape to img shape
z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L )
z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])]
if self.cond_stage_key in ["image", "LR_image", "segmentation",
'bbox_img'] and self.model.conditioning_key: # todo check for completeness
c_key = next(iter(cond.keys())) # get key
c = next(iter(cond.values())) # get value
assert (len(c) == 1) # todo extend to list with more than one elem
c = c[0] # get element
c = unfold(c)
c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L )
cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])]
elif self.cond_stage_key == 'coordinates_bbox':
assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size'
# assuming padding of unfold is always 0 and its dilation is always 1
n_patches_per_row = int((w - ks[0]) / stride[0] + 1)
full_img_h, full_img_w = self.split_input_params['original_image_size']
# as we are operating on latents, we need the factor from the original image size to the
# spatial latent size to properly rescale the crops for regenerating the bbox annotations
num_downs = self.first_stage_model.encoder.num_resolutions - 1
rescale_latent = 2 ** (num_downs)
# get top left postions of patches as conforming for the bbbox tokenizer, therefore we
# need to rescale the tl patch coordinates to be in between (0,1)
tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w,
rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h)
for patch_nr in range(z.shape[-1])]
# patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w)
patch_limits = [(x_tl, y_tl,
rescale_latent * ks[0] / full_img_w,
rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates]
# patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates]
# tokenize crop coordinates for the bounding boxes of the respective patches
patch_limits_tknzd = [torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(self.device)
for bbox in patch_limits] # list of length l with tensors of shape (1, 2)
print(patch_limits_tknzd[0].shape)
# cut tknzd crop position from conditioning
assert isinstance(cond, dict), 'cond must be dict to be fed into model'
cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device)
print(cut_cond.shape)
adapted_cond = torch.stack([torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd])
adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n')
print(adapted_cond.shape)
adapted_cond = self.get_learned_conditioning(adapted_cond)
print(adapted_cond.shape)
adapted_cond = rearrange(adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1])
print(adapted_cond.shape)
cond_list = [{'c_crossattn': [e]} for e in adapted_cond]
else:
cond_list = [cond for i in range(z.shape[-1])] # Todo make this more efficient
# apply model by loop over crops
output_list = [self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])]
assert not isinstance(output_list[0],
tuple) # todo cant deal with multiple model outputs check this never happens
o = torch.stack(output_list, axis=-1)
o = o * weighting
# Reverse reshape to img shape
o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)
# stitch crops together
x_recon = fold(o) / normalization
else:
x_recon = self.model(x_noisy, t, **cond)
if isinstance(x_recon, tuple) and not return_ids:
return x_recon[0]
else:
return x_recon
def _predict_eps_from_xstart(self, x_t, t, pred_xstart):
return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \
extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
def _prior_bpd(self, x_start):
"""
Get the prior KL term for the variational lower-bound, measured in
bits-per-dim.
This term can't be optimized, as it only depends on the encoder.
:param x_start: the [N x C x ...] tensor of inputs.
:return: a batch of [N] KL values (in bits), one per batch element.
"""
batch_size = x_start.shape[0]
t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)
qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)
kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)
return mean_flat(kl_prior) / np.log(2.0)
def p_losses(self, x_start, cond, t, noise=None):
noise = default(noise, lambda: torch.randn_like(x_start))
x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)
model_output = self.apply_model(x_noisy, t, cond)
loss_dict = {}
prefix = 'train' if self.training else 'val'
if self.parameterization == "x0":
target = x_start
elif self.parameterization == "eps":
target = noise
else:
raise NotImplementedError()
loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3])
loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()})
logvar_t = self.logvar[t].to(self.device)
loss = loss_simple / torch.exp(logvar_t) + logvar_t
# loss = loss_simple / torch.exp(self.logvar) + self.logvar
if self.learn_logvar:
loss_dict.update({f'{prefix}/loss_gamma': loss.mean()})
loss_dict.update({'logvar': self.logvar.data.mean()})
loss = self.l_simple_weight * loss.mean()
loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3))
loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean()
loss_dict.update({f'{prefix}/loss_vlb': loss_vlb})
loss += (self.original_elbo_weight * loss_vlb)
loss_dict.update({f'{prefix}/loss': loss})
return loss, loss_dict
def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False,
return_x0=False, score_corrector=None, corrector_kwargs=None):
t_in = t
model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids)
if score_corrector is not None:
assert self.parameterization == "eps"
model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs)
if return_codebook_ids:
model_out, logits = model_out
if self.parameterization == "eps":
x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)
elif self.parameterization == "x0":
x_recon = model_out
else:
raise NotImplementedError()
if clip_denoised:
x_recon.clamp_(-1., 1.)
if quantize_denoised:
x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon)
model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)
if return_codebook_ids:
return model_mean, posterior_variance, posterior_log_variance, logits
elif return_x0:
return model_mean, posterior_variance, posterior_log_variance, x_recon
else:
return model_mean, posterior_variance, posterior_log_variance
@torch.no_grad()
def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False,
return_codebook_ids=False, quantize_denoised=False, return_x0=False,
temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None):
b, *_, device = *x.shape, x.device
outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised,
return_codebook_ids=return_codebook_ids,
quantize_denoised=quantize_denoised,
return_x0=return_x0,
score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
if return_codebook_ids:
raise DeprecationWarning("Support dropped.")
model_mean, _, model_log_variance, logits = outputs
elif return_x0:
model_mean, _, model_log_variance, x0 = outputs
else:
model_mean, _, model_log_variance = outputs
noise = noise_like(x.shape, device, repeat_noise) * temperature
if noise_dropout > 0.:
noise = torch.nn.functional.dropout(noise, p=noise_dropout)
# no noise when t == 0
nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))
if return_codebook_ids:
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1)
if return_x0:
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0
else:
return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise
@torch.no_grad()
def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False,
img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0.,
score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None,
log_every_t=None):
if not log_every_t:
log_every_t = self.log_every_t
timesteps = self.num_timesteps
if batch_size is not None:
b = batch_size if batch_size is not None else shape[0]
shape = [batch_size] + list(shape)
else:
b = batch_size = shape[0]
if x_T is None:
img = torch.randn(shape, device=self.device)
else:
img = x_T
intermediates = []
if cond is not None:
if isinstance(cond, dict):
cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
else:
cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
if start_T is not None:
timesteps = min(timesteps, start_T)
iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation',
total=timesteps) if verbose else reversed(
range(0, timesteps))
if type(temperature) == float:
temperature = [temperature] * timesteps
for i in iterator:
ts = torch.full((b,), i, device=self.device, dtype=torch.long)
if self.shorten_cond_schedule:
assert self.model.conditioning_key != 'hybrid'
tc = self.cond_ids[ts].to(cond.device)
cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
img, x0_partial = self.p_sample(img, cond, ts,
clip_denoised=self.clip_denoised,
quantize_denoised=quantize_denoised, return_x0=True,
temperature=temperature[i], noise_dropout=noise_dropout,
score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)
if mask is not None:
assert x0 is not None
img_orig = self.q_sample(x0, ts)
img = img_orig * mask + (1. - mask) * img
if i % log_every_t == 0 or i == timesteps - 1:
intermediates.append(x0_partial)
if callback: callback(i)
if img_callback: img_callback(img, i)
return img, intermediates
@torch.no_grad()
def p_sample_loop(self, cond, shape, return_intermediates=False,
x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False,
mask=None, x0=None, img_callback=None, start_T=None,
log_every_t=None):
if not log_every_t:
log_every_t = self.log_every_t
device = self.betas.device
b = shape[0]
if x_T is None:
img = torch.randn(shape, device=device)
else:
img = x_T
intermediates = [img]
if timesteps is None:
timesteps = self.num_timesteps
if start_T is not None:
timesteps = min(timesteps, start_T)
iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed(
range(0, timesteps))
if mask is not None:
assert x0 is not None
assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match
for i in iterator:
ts = torch.full((b,), i, device=device, dtype=torch.long)
if self.shorten_cond_schedule:
assert self.model.conditioning_key != 'hybrid'
tc = self.cond_ids[ts].to(cond.device)
cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))
img = self.p_sample(img, cond, ts,
clip_denoised=self.clip_denoised,
quantize_denoised=quantize_denoised)
if mask is not None:
img_orig = self.q_sample(x0, ts)
img = img_orig * mask + (1. - mask) * img
if i % log_every_t == 0 or i == timesteps - 1:
intermediates.append(img)
if callback: callback(i)
if img_callback: img_callback(img, i)
if return_intermediates:
return img, intermediates
return img
@torch.no_grad()
def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None,
verbose=True, timesteps=None, quantize_denoised=False,
mask=None, x0=None, shape=None,**kwargs):
if shape is None:
shape = (batch_size, self.channels, self.image_size, self.image_size)
if cond is not None:
if isinstance(cond, dict):
cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else
list(map(lambda x: x[:batch_size], cond[key])) for key in cond}
else:
cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]
return self.p_sample_loop(cond,
shape,
return_intermediates=return_intermediates, x_T=x_T,
verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised,
mask=mask, x0=x0)
@torch.no_grad()
def sample_log(self,cond,batch_size,ddim, ddim_steps,**kwargs):
if ddim:
ddim_sampler = DDIMSampler(self)
shape = (self.channels, self.image_size, self.image_size)
samples, intermediates =ddim_sampler.sample(ddim_steps,batch_size,
shape,cond,verbose=False,**kwargs)
else:
samples, intermediates = self.sample(cond=cond, batch_size=batch_size,
return_intermediates=True,**kwargs)
return samples, intermediates
@torch.no_grad()
def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None,
quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True,
plot_diffusion_rows=True, **kwargs):
use_ddim = ddim_steps is not None
log = dict()
z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key,
return_first_stage_outputs=True,
force_c_encode=True,
return_original_cond=True,
bs=N)
N = min(x.shape[0], N)
n_row = min(x.shape[0], n_row)
log["inputs"] = x
log["reconstruction"] = xrec
if self.model.conditioning_key is not None:
if hasattr(self.cond_stage_model, "decode"):
xc = self.cond_stage_model.decode(c)
log["conditioning"] = xc
elif self.cond_stage_key in ["caption"]:
xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["caption"])
log["conditioning"] = xc
elif self.cond_stage_key == 'class_label':
xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"])
log['conditioning'] = xc
elif isimage(xc):
log["conditioning"] = xc
if ismap(xc):
log["original_conditioning"] = self.to_rgb(xc)
if plot_diffusion_rows:
# get diffusion row
diffusion_row = list()
z_start = z[:n_row]
for t in range(self.num_timesteps):
if t % self.log_every_t == 0 or t == self.num_timesteps - 1:
t = repeat(torch.tensor([t]), '1 -> b', b=n_row)
t = t.to(self.device).long()
noise = torch.randn_like(z_start)
z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)
diffusion_row.append(self.decode_first_stage(z_noisy))
diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W
diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')
diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')
diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])
log["diffusion_row"] = diffusion_grid
if sample:
# get denoise row
with self.ema_scope("Plotting"):
samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim,
ddim_steps=ddim_steps,eta=ddim_eta)
# samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)
x_samples = self.decode_first_stage(samples)
log["samples"] = x_samples
if plot_denoise_rows:
denoise_grid = self._get_denoise_row_from_list(z_denoise_row)
log["denoise_row"] = denoise_grid
if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance(
self.first_stage_model, IdentityFirstStage):
# also display when quantizing x0 while sampling
with self.ema_scope("Plotting Quantized Denoised"):
samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim,
ddim_steps=ddim_steps,eta=ddim_eta,
quantize_denoised=True)
# samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True,
# quantize_denoised=True)
x_samples = self.decode_first_stage(samples.to(self.device))
log["samples_x0_quantized"] = x_samples
if inpaint:
# make a simple center square
b, h, w = z.shape[0], z.shape[2], z.shape[3]
mask = torch.ones(N, h, w).to(self.device)
# zeros will be filled in
mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0.
mask = mask[:, None, ...]
with self.ema_scope("Plotting Inpaint"):
samples, _ = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, eta=ddim_eta,
ddim_steps=ddim_steps, x0=z[:N], mask=mask)
x_samples = self.decode_first_stage(samples.to(self.device))
log["samples_inpainting"] = x_samples
log["mask"] = mask
# outpaint
with self.ema_scope("Plotting Outpaint"):
samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,eta=ddim_eta,
ddim_steps=ddim_steps, x0=z[:N], mask=mask)
x_samples = self.decode_first_stage(samples.to(self.device))
log["samples_outpainting"] = x_samples
if plot_progressive_rows:
with self.ema_scope("Plotting Progressives"):
img, progressives = self.progressive_denoising(c,
shape=(self.channels, self.image_size, self.image_size),
batch_size=N)
prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation")
log["progressive_row"] = prog_row
if return_keys:
if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:
return log
else:
return {key: log[key] for key in return_keys}
return log
def configure_optimizers(self):
lr = self.learning_rate
params = list(self.model.parameters())
if self.cond_stage_trainable:
print(f"{self.__class__.__name__}: Also optimizing conditioner params!")
params = params + list(self.cond_stage_model.parameters())
if self.learn_logvar:
print('Diffusion model optimizing logvar')
params.append(self.logvar)
opt = torch.optim.AdamW(params, lr=lr)
if self.use_scheduler:
assert 'target' in self.scheduler_config
scheduler = instantiate_from_config(self.scheduler_config)
print("Setting up LambdaLR scheduler...")
scheduler = [
{
'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule),
'interval': 'step',
'frequency': 1
}]
return [opt], scheduler
return opt
@torch.no_grad()
def to_rgb(self, x):
x = x.float()
if not hasattr(self, "colorize"):
self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x)
x = nn.functional.conv2d(x, weight=self.colorize)
x = 2. * (x - x.min()) / (x.max() - x.min()) - 1.
return x
class DiffusionWrapper(pl.LightningModule):
def __init__(self, diff_model_config, conditioning_key):
super().__init__()
self.diffusion_model = instantiate_from_config(diff_model_config)
self.conditioning_key = conditioning_key # 'crossattn' for txt2image, concat for inpainting
assert self.conditioning_key in [None, 'concat', 'crossattn', 'hybrid', 'adm']
def forward(self, x, t, c_concat: list = None, c_crossattn: list = None):
"""param x: tensor with shape:[B,C,mel_len,T]"""
if self.conditioning_key is None:
out = self.diffusion_model(x, t)
elif self.conditioning_key == 'concat':
xc = torch.cat([x] + c_concat, dim=1)# channel dim,x shape (b,3,64,64) c_concat shape(b,4,64,64)
out = self.diffusion_model(xc, t)
elif self.conditioning_key == 'crossattn':
cc = torch.cat(c_crossattn, 1)# [b,seq_len,dim]
out = self.diffusion_model(x, t, context=cc)
elif self.conditioning_key == 'hybrid':# not implemented in the LatentDiffusion
xc = torch.cat([x] + c_concat, dim=1)
cc = torch.cat(c_crossattn, 1)
out = self.diffusion_model(xc, t, context=cc)
elif self.conditioning_key == 'adm':
cc = c_crossattn[0]
out = self.diffusion_model(x, t, y=cc)
else:
raise NotImplementedError()
return out
class Layout2ImgDiffusion(LatentDiffusion):
# TODO: move all layout-specific hacks to this class
def __init__(self, cond_stage_key, *args, **kwargs):
assert cond_stage_key == 'coordinates_bbox', 'Layout2ImgDiffusion only for cond_stage_key="coordinates_bbox"'
super().__init__(cond_stage_key=cond_stage_key, *args, **kwargs)
def log_images(self, batch, N=8, *args, **kwargs):
logs = super().log_images(batch=batch, N=N, *args, **kwargs)
key = 'train' if self.training else 'validation'
dset = self.trainer.datamodule.datasets[key]
mapper = dset.conditional_builders[self.cond_stage_key]
bbox_imgs = []
map_fn = lambda catno: dset.get_textual_label(dset.get_category_id(catno))
for tknzd_bbox in batch[self.cond_stage_key][:N]:
bboximg = mapper.plot(tknzd_bbox.detach().cpu(), map_fn, (256, 256))
bbox_imgs.append(bboximg)
cond_img = torch.stack(bbox_imgs, dim=0)
logs['bbox_image'] = cond_img
return logs
| EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/models/diffusion/ddpm.py |
from inspect import isfunction
import math
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, repeat
from ldm.modules.diffusionmodules.util import checkpoint
def exists(val):
return val is not None
def uniq(arr):
return{el: True for el in arr}.keys()
def default(val, d):
if exists(val):
return val
return d() if isfunction(d) else d
def max_neg_value(t):
return -torch.finfo(t.dtype).max
def init_(tensor):
dim = tensor.shape[-1]
std = 1 / math.sqrt(dim)
tensor.uniform_(-std, std)
return tensor
# feedforward
class GEGLU(nn.Module):
def __init__(self, dim_in, dim_out):
super().__init__()
self.proj = nn.Linear(dim_in, dim_out * 2)
def forward(self, x):
x, gate = self.proj(x).chunk(2, dim=-1)
return x * F.gelu(gate)
class FeedForward(nn.Module):
def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.):
super().__init__()
inner_dim = int(dim * mult)
dim_out = default(dim_out, dim)
project_in = nn.Sequential(
nn.Linear(dim, inner_dim),
nn.GELU()
) if not glu else GEGLU(dim, inner_dim)
self.net = nn.Sequential(
project_in,
nn.Dropout(dropout),
nn.Linear(inner_dim, dim_out)
)
def forward(self, x):
return self.net(x)
def zero_module(module):
"""
Zero out the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().zero_()
return module
def Normalize(in_channels):
return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True)
class LinearAttention(nn.Module):
def __init__(self, dim, heads=4, dim_head=32):
super().__init__()
self.heads = heads
hidden_dim = dim_head * heads
self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias = False)
self.to_out = nn.Conv2d(hidden_dim, dim, 1)
def forward(self, x):
b, c, h, w = x.shape
qkv = self.to_qkv(x)
q, k, v = rearrange(qkv, 'b (qkv heads c) h w -> qkv b heads c (h w)', heads = self.heads, qkv=3)
k = k.softmax(dim=-1)
context = torch.einsum('bhdn,bhen->bhde', k, v)
out = torch.einsum('bhde,bhdn->bhen', context, q)
out = rearrange(out, 'b heads c (h w) -> b (heads c) h w', heads=self.heads, h=h, w=w)
return self.to_out(out)
class SpatialSelfAttention(nn.Module):
def __init__(self, in_channels):
super().__init__()
self.in_channels = in_channels
self.norm = Normalize(in_channels)
self.q = torch.nn.Conv2d(in_channels,
in_channels,
kernel_size=1,
stride=1,
padding=0)
self.k = torch.nn.Conv2d(in_channels,
in_channels,
kernel_size=1,
stride=1,
padding=0)
self.v = torch.nn.Conv2d(in_channels,
in_channels,
kernel_size=1,
stride=1,
padding=0)
self.proj_out = torch.nn.Conv2d(in_channels,
in_channels,
kernel_size=1,
stride=1,
padding=0)
def forward(self, x):
h_ = x
h_ = self.norm(h_)
q = self.q(h_)
k = self.k(h_)
v = self.v(h_)
# compute attention
b,c,h,w = q.shape
q = rearrange(q, 'b c h w -> b (h w) c')
k = rearrange(k, 'b c h w -> b c (h w)')
w_ = torch.einsum('bij,bjk->bik', q, k)
w_ = w_ * (int(c)**(-0.5))
w_ = torch.nn.functional.softmax(w_, dim=2)
# attend to values
v = rearrange(v, 'b c h w -> b c (h w)')
w_ = rearrange(w_, 'b i j -> b j i')
h_ = torch.einsum('bij,bjk->bik', v, w_)
h_ = rearrange(h_, 'b c (h w) -> b c h w', h=h)
h_ = self.proj_out(h_)
return x+h_
class CrossAttention(nn.Module):
def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.):# 如果设置了context_dim就不是自注意力了
super().__init__()
inner_dim = dim_head * heads # inner_dim == SpatialTransformer.model_channels
context_dim = default(context_dim, query_dim)
self.scale = dim_head ** -0.5
self.heads = heads
self.to_q = nn.Linear(query_dim, inner_dim, bias=False)
self.to_k = nn.Linear(context_dim, inner_dim, bias=False)
self.to_v = nn.Linear(context_dim, inner_dim, bias=False)
self.to_out = nn.Sequential(
nn.Linear(inner_dim, query_dim),
nn.Dropout(dropout)
)
def forward(self, x, context=None, mask=None):# x:(b,h*w,c), context:(b,seq_len,context_dim)
h = self.heads
q = self.to_q(x)# q:(b,h*w,inner_dim)
context = default(context, x)
k = self.to_k(context)# (b,seq_len,inner_dim)
v = self.to_v(context)# (b,seq_len,inner_dim)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))# n is seq_len for k and v
sim = einsum('b i d, b j d -> b i j', q, k) * self.scale # (b*head,h*w,seq_len)
if exists(mask):# false
mask = rearrange(mask, 'b ... -> b (...)')
max_neg_value = -torch.finfo(sim.dtype).max
mask = repeat(mask, 'b j -> (b h) () j', h=h)
sim.masked_fill_(~mask, max_neg_value)
# attention, what we cannot get enough of
attn = sim.softmax(dim=-1)
out = einsum('b i j, b j d -> b i d', attn, v)# (b*head,h*w,inner_dim/head)
out = rearrange(out, '(b h) n d -> b n (h d)', h=h)# (b,h*w,inner_dim)
return self.to_out(out)
class BasicTransformerBlock(nn.Module):
def __init__(self, dim, n_heads, d_head, dropout=0., context_dim=None, gated_ff=True, checkpoint=True):
super().__init__()
self.attn1 = CrossAttention(query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout) # is a self-attention
self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff)
self.attn2 = CrossAttention(query_dim=dim, context_dim=context_dim,
heads=n_heads, dim_head=d_head, dropout=dropout) # is self-attn if context is none
self.norm1 = nn.LayerNorm(dim)
self.norm2 = nn.LayerNorm(dim)
self.norm3 = nn.LayerNorm(dim)
self.checkpoint = checkpoint
def forward(self, x, context=None):
return checkpoint(self._forward, (x, context), self.parameters(), self.checkpoint)
def _forward(self, x, context=None):
x = self.attn1(self.norm1(x)) + x
x = self.attn2(self.norm2(x), context=context) + x
x = self.ff(self.norm3(x)) + x
return x
class SpatialTransformer(nn.Module):
"""
Transformer block for image-like data.
First, project the input (aka embedding)
and reshape to b, t, d.
Then apply standard transformer action.
Finally, reshape to image
"""
def __init__(self, in_channels, n_heads, d_head,
depth=1, dropout=0., context_dim=None):
super().__init__()
self.in_channels = in_channels
inner_dim = n_heads * d_head
self.norm = Normalize(in_channels)
self.proj_in = nn.Conv2d(in_channels,
inner_dim,
kernel_size=1,
stride=1,
padding=0)
self.transformer_blocks = nn.ModuleList(
[BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim)
for d in range(depth)]
)
self.proj_out = zero_module(nn.Conv2d(inner_dim,
in_channels,
kernel_size=1,
stride=1,
padding=0))
def forward(self, x, context=None):
# note: if no context is given, cross-attention defaults to self-attention
b, c, h, w = x.shape # such as [2,320,10,106]
x_in = x
x = self.norm(x)# group norm
x = self.proj_in(x)# no shape change
x = rearrange(x, 'b c h w -> b (h w) c')
for block in self.transformer_blocks:
x = block(x, context=context)# context shape [b,seq_len=77,context_dim]
x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w)
x = self.proj_out(x)
return x + x_in | EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/attention.py |
"""shout-out to https://github.com/lucidrains/x-transformers/tree/main/x_transformers"""
import torch
from torch import nn, einsum
import torch.nn.functional as F
from functools import partial
from inspect import isfunction
from collections import namedtuple
from einops import rearrange, repeat, reduce
# constants
DEFAULT_DIM_HEAD = 64
Intermediates = namedtuple('Intermediates', [
'pre_softmax_attn',
'post_softmax_attn'
])
LayerIntermediates = namedtuple('Intermediates', [
'hiddens',
'attn_intermediates'
])
class AbsolutePositionalEmbedding(nn.Module):
def __init__(self, dim, max_seq_len):
super().__init__()
self.emb = nn.Embedding(max_seq_len, dim)
self.init_()
def init_(self):
nn.init.normal_(self.emb.weight, std=0.02)
def forward(self, x):
n = torch.arange(x.shape[1], device=x.device)
return self.emb(n)[None, :, :]
class FixedPositionalEmbedding(nn.Module):
def __init__(self, dim):
super().__init__()
inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer('inv_freq', inv_freq)
def forward(self, x, seq_dim=1, offset=0):
t = torch.arange(x.shape[seq_dim], device=x.device).type_as(self.inv_freq) + offset
sinusoid_inp = torch.einsum('i , j -> i j', t, self.inv_freq)
emb = torch.cat((sinusoid_inp.sin(), sinusoid_inp.cos()), dim=-1)
return emb[None, :, :]
# helpers
def exists(val):
return val is not None
def default(val, d):
if exists(val):
return val
return d() if isfunction(d) else d
def always(val):
def inner(*args, **kwargs):
return val
return inner
def not_equals(val):
def inner(x):
return x != val
return inner
def equals(val):
def inner(x):
return x == val
return inner
def max_neg_value(tensor):
return -torch.finfo(tensor.dtype).max
# keyword argument helpers
def pick_and_pop(keys, d):
values = list(map(lambda key: d.pop(key), keys))
return dict(zip(keys, values))
def group_dict_by_key(cond, d):
return_val = [dict(), dict()]
for key in d.keys():
match = bool(cond(key))
ind = int(not match)
return_val[ind][key] = d[key]
return (*return_val,)
def string_begins_with(prefix, str):
return str.startswith(prefix)
def group_by_key_prefix(prefix, d):
return group_dict_by_key(partial(string_begins_with, prefix), d)
def groupby_prefix_and_trim(prefix, d):
kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d)
kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items())))
return kwargs_without_prefix, kwargs
# classes
class Scale(nn.Module):
def __init__(self, value, fn):
super().__init__()
self.value = value
self.fn = fn
def forward(self, x, **kwargs):
x, *rest = self.fn(x, **kwargs)
return (x * self.value, *rest)
class Rezero(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
self.g = nn.Parameter(torch.zeros(1))
def forward(self, x, **kwargs):
x, *rest = self.fn(x, **kwargs)
return (x * self.g, *rest)
class ScaleNorm(nn.Module):
def __init__(self, dim, eps=1e-5):
super().__init__()
self.scale = dim ** -0.5
self.eps = eps
self.g = nn.Parameter(torch.ones(1))
def forward(self, x):
norm = torch.norm(x, dim=-1, keepdim=True) * self.scale
return x / norm.clamp(min=self.eps) * self.g
class RMSNorm(nn.Module):
def __init__(self, dim, eps=1e-8):
super().__init__()
self.scale = dim ** -0.5
self.eps = eps
self.g = nn.Parameter(torch.ones(dim))
def forward(self, x):
norm = torch.norm(x, dim=-1, keepdim=True) * self.scale
return x / norm.clamp(min=self.eps) * self.g
class Residual(nn.Module):
def forward(self, x, residual):
return x + residual
class GRUGating(nn.Module):
def __init__(self, dim):
super().__init__()
self.gru = nn.GRUCell(dim, dim)
def forward(self, x, residual):
gated_output = self.gru(
rearrange(x, 'b n d -> (b n) d'),
rearrange(residual, 'b n d -> (b n) d')
)
return gated_output.reshape_as(x)
# feedforward
class GEGLU(nn.Module):
def __init__(self, dim_in, dim_out):
super().__init__()
self.proj = nn.Linear(dim_in, dim_out * 2)
def forward(self, x):
x, gate = self.proj(x).chunk(2, dim=-1)
return x * F.gelu(gate)
class FeedForward(nn.Module):
def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.):
super().__init__()
inner_dim = int(dim * mult)
dim_out = default(dim_out, dim)
project_in = nn.Sequential(
nn.Linear(dim, inner_dim),
nn.GELU()
) if not glu else GEGLU(dim, inner_dim)
self.net = nn.Sequential(
project_in,
nn.Dropout(dropout),
nn.Linear(inner_dim, dim_out)
)
def forward(self, x):
return self.net(x)
# attention.
class Attention(nn.Module):
def __init__(
self,
dim,
dim_head=DEFAULT_DIM_HEAD,
heads=8,
causal=False,
mask=None,
talking_heads=False,
sparse_topk=None,
use_entmax15=False,
num_mem_kv=0,
dropout=0.,
on_attn=False
):
super().__init__()
if use_entmax15:
raise NotImplementedError("Check out entmax activation instead of softmax activation!")
self.scale = dim_head ** -0.5
self.heads = heads
self.causal = causal
self.mask = mask
inner_dim = dim_head * heads
self.to_q = nn.Linear(dim, inner_dim, bias=False)
self.to_k = nn.Linear(dim, inner_dim, bias=False)
self.to_v = nn.Linear(dim, inner_dim, bias=False)
self.dropout = nn.Dropout(dropout)
# talking heads
self.talking_heads = talking_heads
if talking_heads:
self.pre_softmax_proj = nn.Parameter(torch.randn(heads, heads))
self.post_softmax_proj = nn.Parameter(torch.randn(heads, heads))
# explicit topk sparse attention
self.sparse_topk = sparse_topk
# entmax
#self.attn_fn = entmax15 if use_entmax15 else F.softmax
self.attn_fn = F.softmax
# add memory key / values
self.num_mem_kv = num_mem_kv
if num_mem_kv > 0:
self.mem_k = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head))
self.mem_v = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head))
# attention on attention
self.attn_on_attn = on_attn
self.to_out = nn.Sequential(nn.Linear(inner_dim, dim * 2), nn.GLU()) if on_attn else nn.Linear(inner_dim, dim)
def forward(
self,
x,
context=None,
mask=None,
context_mask=None,
rel_pos=None,
sinusoidal_emb=None,
prev_attn=None,
mem=None
):
b, n, _, h, talking_heads, device = *x.shape, self.heads, self.talking_heads, x.device
kv_input = default(context, x)
q_input = x
k_input = kv_input
v_input = kv_input
if exists(mem):
k_input = torch.cat((mem, k_input), dim=-2)
v_input = torch.cat((mem, v_input), dim=-2)
if exists(sinusoidal_emb):
# in shortformer, the query would start at a position offset depending on the past cached memory
offset = k_input.shape[-2] - q_input.shape[-2]
q_input = q_input + sinusoidal_emb(q_input, offset=offset)
k_input = k_input + sinusoidal_emb(k_input)
q = self.to_q(q_input)
k = self.to_k(k_input)
v = self.to_v(v_input)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v))
input_mask = None
if any(map(exists, (mask, context_mask))):
q_mask = default(mask, lambda: torch.ones((b, n), device=device).bool())
k_mask = q_mask if not exists(context) else context_mask
k_mask = default(k_mask, lambda: torch.ones((b, k.shape[-2]), device=device).bool())
q_mask = rearrange(q_mask, 'b i -> b () i ()')
k_mask = rearrange(k_mask, 'b j -> b () () j')
input_mask = q_mask * k_mask
if self.num_mem_kv > 0:
mem_k, mem_v = map(lambda t: repeat(t, 'h n d -> b h n d', b=b), (self.mem_k, self.mem_v))
k = torch.cat((mem_k, k), dim=-2)
v = torch.cat((mem_v, v), dim=-2)
if exists(input_mask):
input_mask = F.pad(input_mask, (self.num_mem_kv, 0), value=True)
dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
mask_value = max_neg_value(dots)
if exists(prev_attn):
dots = dots + prev_attn
pre_softmax_attn = dots
if talking_heads:
dots = einsum('b h i j, h k -> b k i j', dots, self.pre_softmax_proj).contiguous()
if exists(rel_pos):
dots = rel_pos(dots)
if exists(input_mask):
dots.masked_fill_(~input_mask, mask_value)
del input_mask
if self.causal:
i, j = dots.shape[-2:]
r = torch.arange(i, device=device)
mask = rearrange(r, 'i -> () () i ()') < rearrange(r, 'j -> () () () j')
mask = F.pad(mask, (j - i, 0), value=False)
dots.masked_fill_(mask, mask_value)
del mask
if exists(self.sparse_topk) and self.sparse_topk < dots.shape[-1]:
top, _ = dots.topk(self.sparse_topk, dim=-1)
vk = top[..., -1].unsqueeze(-1).expand_as(dots)
mask = dots < vk
dots.masked_fill_(mask, mask_value)
del mask
attn = self.attn_fn(dots, dim=-1)
post_softmax_attn = attn
attn = self.dropout(attn)
if talking_heads:
attn = einsum('b h i j, h k -> b k i j', attn, self.post_softmax_proj).contiguous()
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
intermediates = Intermediates(
pre_softmax_attn=pre_softmax_attn,
post_softmax_attn=post_softmax_attn
)
return self.to_out(out), intermediates
class AttentionLayers(nn.Module):
def __init__(
self,
dim,
depth,
heads=8,
causal=False,
cross_attend=False,
only_cross=False,
use_scalenorm=False,
use_rmsnorm=False,
use_rezero=False,
rel_pos_num_buckets=32,
rel_pos_max_distance=128,
position_infused_attn=False,
custom_layers=None,
sandwich_coef=None,
par_ratio=None,
residual_attn=False,
cross_residual_attn=False,
macaron=False,
pre_norm=True,
gate_residual=False,
**kwargs
):
super().__init__()
ff_kwargs, kwargs = groupby_prefix_and_trim('ff_', kwargs)
attn_kwargs, _ = groupby_prefix_and_trim('attn_', kwargs)
dim_head = attn_kwargs.get('dim_head', DEFAULT_DIM_HEAD)
self.dim = dim
self.depth = depth
self.layers = nn.ModuleList([])
self.has_pos_emb = position_infused_attn
self.pia_pos_emb = FixedPositionalEmbedding(dim) if position_infused_attn else None
self.rotary_pos_emb = always(None)
assert rel_pos_num_buckets <= rel_pos_max_distance, 'number of relative position buckets must be less than the relative position max distance'
self.rel_pos = None
self.pre_norm = pre_norm
self.residual_attn = residual_attn
self.cross_residual_attn = cross_residual_attn
norm_class = ScaleNorm if use_scalenorm else nn.LayerNorm
norm_class = RMSNorm if use_rmsnorm else norm_class
norm_fn = partial(norm_class, dim)
norm_fn = nn.Identity if use_rezero else norm_fn
branch_fn = Rezero if use_rezero else None
if cross_attend and not only_cross:
default_block = ('a', 'c', 'f')
elif cross_attend and only_cross:
default_block = ('c', 'f')
else:
default_block = ('a', 'f')
if macaron:
default_block = ('f',) + default_block
if exists(custom_layers):
layer_types = custom_layers
elif exists(par_ratio):
par_depth = depth * len(default_block)
assert 1 < par_ratio <= par_depth, 'par ratio out of range'
default_block = tuple(filter(not_equals('f'), default_block))
par_attn = par_depth // par_ratio
depth_cut = par_depth * 2 // 3 # 2 / 3 attention layer cutoff suggested by PAR paper
par_width = (depth_cut + depth_cut // par_attn) // par_attn
assert len(default_block) <= par_width, 'default block is too large for par_ratio'
par_block = default_block + ('f',) * (par_width - len(default_block))
par_head = par_block * par_attn
layer_types = par_head + ('f',) * (par_depth - len(par_head))
elif exists(sandwich_coef):
assert sandwich_coef > 0 and sandwich_coef <= depth, 'sandwich coefficient should be less than the depth'
layer_types = ('a',) * sandwich_coef + default_block * (depth - sandwich_coef) + ('f',) * sandwich_coef
else:
layer_types = default_block * depth
self.layer_types = layer_types
self.num_attn_layers = len(list(filter(equals('a'), layer_types)))
for layer_type in self.layer_types:
if layer_type == 'a':
layer = Attention(dim, heads=heads, causal=causal, **attn_kwargs)
elif layer_type == 'c':
layer = Attention(dim, heads=heads, **attn_kwargs)
elif layer_type == 'f':
layer = FeedForward(dim, **ff_kwargs)
layer = layer if not macaron else Scale(0.5, layer)
else:
raise Exception(f'invalid layer type {layer_type}')
if isinstance(layer, Attention) and exists(branch_fn):
layer = branch_fn(layer)
if gate_residual:
residual_fn = GRUGating(dim)
else:
residual_fn = Residual()
self.layers.append(nn.ModuleList([
norm_fn(),
layer,
residual_fn
]))
def forward(
self,
x,
context=None,
mask=None,
context_mask=None,
mems=None,
return_hiddens=False
):
hiddens = []
intermediates = []
prev_attn = None
prev_cross_attn = None
mems = mems.copy() if exists(mems) else [None] * self.num_attn_layers
for ind, (layer_type, (norm, block, residual_fn)) in enumerate(zip(self.layer_types, self.layers)):
is_last = ind == (len(self.layers) - 1)
if layer_type == 'a':
hiddens.append(x)
layer_mem = mems.pop(0)
residual = x
if self.pre_norm:
x = norm(x)
if layer_type == 'a':
out, inter = block(x, mask=mask, sinusoidal_emb=self.pia_pos_emb, rel_pos=self.rel_pos,
prev_attn=prev_attn, mem=layer_mem)
elif layer_type == 'c':
out, inter = block(x, context=context, mask=mask, context_mask=context_mask, prev_attn=prev_cross_attn)
elif layer_type == 'f':
out = block(x)
x = residual_fn(out, residual)
if layer_type in ('a', 'c'):
intermediates.append(inter)
if layer_type == 'a' and self.residual_attn:
prev_attn = inter.pre_softmax_attn
elif layer_type == 'c' and self.cross_residual_attn:
prev_cross_attn = inter.pre_softmax_attn
if not self.pre_norm and not is_last:
x = norm(x)
if return_hiddens:
intermediates = LayerIntermediates(
hiddens=hiddens,
attn_intermediates=intermediates
)
return x, intermediates
return x
class Encoder(AttentionLayers):
def __init__(self, **kwargs):
assert 'causal' not in kwargs, 'cannot set causality on encoder'
super().__init__(causal=False, **kwargs)
class TransformerWrapper(nn.Module):
def __init__(
self,
*,
num_tokens,
max_seq_len,
attn_layers,
emb_dim=None,
max_mem_len=0.,
emb_dropout=0.,
num_memory_tokens=None,
tie_embedding=False,
use_pos_emb=True
):
super().__init__()
assert isinstance(attn_layers, AttentionLayers), 'attention layers must be one of Encoder or Decoder'
dim = attn_layers.dim
emb_dim = default(emb_dim, dim)
self.max_seq_len = max_seq_len
self.max_mem_len = max_mem_len
self.num_tokens = num_tokens
self.token_emb = nn.Embedding(num_tokens, emb_dim)
self.pos_emb = AbsolutePositionalEmbedding(emb_dim, max_seq_len) if (
use_pos_emb and not attn_layers.has_pos_emb) else always(0)
self.emb_dropout = nn.Dropout(emb_dropout)
self.project_emb = nn.Linear(emb_dim, dim) if emb_dim != dim else nn.Identity()
self.attn_layers = attn_layers
self.norm = nn.LayerNorm(dim)
self.init_()
self.to_logits = nn.Linear(dim, num_tokens) if not tie_embedding else lambda t: t @ self.token_emb.weight.t()
# memory tokens (like [cls]) from Memory Transformers paper
num_memory_tokens = default(num_memory_tokens, 0)
self.num_memory_tokens = num_memory_tokens
if num_memory_tokens > 0:
self.memory_tokens = nn.Parameter(torch.randn(num_memory_tokens, dim))
# let funnel encoder know number of memory tokens, if specified
if hasattr(attn_layers, 'num_memory_tokens'):
attn_layers.num_memory_tokens = num_memory_tokens
def init_(self):
nn.init.normal_(self.token_emb.weight, std=0.02)
def forward(
self,
x,
return_embeddings=False,
mask=None,
return_mems=False,
return_attn=False,
mems=None,
**kwargs
):
b, n, device, num_mem = *x.shape, x.device, self.num_memory_tokens
x = self.token_emb(x)
x += self.pos_emb(x)
x = self.emb_dropout(x)
x = self.project_emb(x)
if num_mem > 0:
mem = repeat(self.memory_tokens, 'n d -> b n d', b=b)
x = torch.cat((mem, x), dim=1)
# auto-handle masking after appending memory tokens
if exists(mask):
mask = F.pad(mask, (num_mem, 0), value=True)
x, intermediates = self.attn_layers(x, mask=mask, mems=mems, return_hiddens=True, **kwargs)
x = self.norm(x)
mem, x = x[:, :num_mem], x[:, num_mem:]
out = self.to_logits(x) if not return_embeddings else x
if return_mems:
hiddens = intermediates.hiddens
new_mems = list(map(lambda pair: torch.cat(pair, dim=-2), zip(mems, hiddens))) if exists(mems) else hiddens
new_mems = list(map(lambda t: t[..., -self.max_mem_len:, :].detach(), new_mems))
return out, new_mems
if return_attn:
attn_maps = list(map(lambda t: t.post_softmax_attn, intermediates.attn_intermediates))
return out, attn_maps
return out
| EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/x_transformer.py |
import torch
from torch import nn
class LitEma(nn.Module):
def __init__(self, model, decay=0.9999, use_num_upates=True):
super().__init__()
if decay < 0.0 or decay > 1.0:
raise ValueError('Decay must be between 0 and 1')
self.m_name2s_name = {}
self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))
self.register_buffer('num_updates', torch.tensor(0,dtype=torch.int) if use_num_upates
else torch.tensor(-1,dtype=torch.int))
for name, p in model.named_parameters():
if p.requires_grad:
#remove as '.'-character is not allowed in buffers
s_name = name.replace('.','')
self.m_name2s_name.update({name:s_name})
self.register_buffer(s_name,p.clone().detach().data)
self.collected_params = []
def forward(self,model):
decay = self.decay
if self.num_updates >= 0:
self.num_updates += 1
decay = min(self.decay,(1 + self.num_updates) / (10 + self.num_updates))
one_minus_decay = 1.0 - decay
with torch.no_grad():
m_param = dict(model.named_parameters())
shadow_params = dict(self.named_buffers())
for key in m_param:
if m_param[key].requires_grad:
sname = self.m_name2s_name[key]
shadow_params[sname] = shadow_params[sname].type_as(m_param[key])
shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))
else:
assert not key in self.m_name2s_name
def copy_to(self, model):
m_param = dict(model.named_parameters())
shadow_params = dict(self.named_buffers())
for key in m_param:
if m_param[key].requires_grad:
m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)
else:
assert not key in self.m_name2s_name
def store(self, parameters):
"""
Save the current parameters for restoring later.
Args:
parameters: Iterable of `torch.nn.Parameter`; the parameters to be
temporarily stored.
"""
self.collected_params = [param.clone() for param in parameters]
def restore(self, parameters):
"""
Restore the parameters stored with the `store` method.
Useful to validate the model with EMA parameters without affecting the
original optimization process. Store the parameters before the
`copy_to` method. After validation (or model saving), use this to
restore the former parameters.
Args:
parameters: Iterable of `torch.nn.Parameter`; the parameters to be
updated with the stored parameters.
"""
for c_param, param in zip(self.collected_params, parameters):
param.data.copy_(c_param.data)
| EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/ema.py |
import functools
import torch.nn as nn
class ActNorm(nn.Module):
def __init__(self, num_features, logdet=False, affine=True,
allow_reverse_init=False):
assert affine
super().__init__()
self.logdet = logdet
self.loc = nn.Parameter(torch.zeros(1, num_features, 1, 1))
self.scale = nn.Parameter(torch.ones(1, num_features, 1, 1))
self.allow_reverse_init = allow_reverse_init
self.register_buffer('initialized', torch.tensor(0, dtype=torch.uint8))
def initialize(self, input):
with torch.no_grad():
flatten = input.permute(1, 0, 2, 3).contiguous().view(input.shape[1], -1)
mean = (
flatten.mean(1)
.unsqueeze(1)
.unsqueeze(2)
.unsqueeze(3)
.permute(1, 0, 2, 3)
)
std = (
flatten.std(1)
.unsqueeze(1)
.unsqueeze(2)
.unsqueeze(3)
.permute(1, 0, 2, 3)
)
self.loc.data.copy_(-mean)
self.scale.data.copy_(1 / (std + 1e-6))
def forward(self, input, reverse=False):
if reverse:
return self.reverse(input)
if len(input.shape) == 2:
input = input[:, :, None, None]
squeeze = True
else:
squeeze = False
_, _, height, width = input.shape
if self.training and self.initialized.item() == 0:
self.initialize(input)
self.initialized.fill_(1)
h = self.scale * (input + self.loc)
if squeeze:
h = h.squeeze(-1).squeeze(-1)
if self.logdet:
log_abs = torch.log(torch.abs(self.scale))
logdet = height * width * torch.sum(log_abs)
logdet = logdet * torch.ones(input.shape[0]).to(input)
return h, logdet
return h
def reverse(self, output):
if self.training and self.initialized.item() == 0:
if not self.allow_reverse_init:
raise RuntimeError(
"Initializing ActNorm in reverse direction is "
"disabled by default. Use allow_reverse_init=True to enable."
)
else:
self.initialize(output)
self.initialized.fill_(1)
if len(output.shape) == 2:
output = output[:, :, None, None]
squeeze = True
else:
squeeze = False
h = output / self.scale - self.loc
if squeeze:
h = h.squeeze(-1).squeeze(-1)
return h
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0)
class NLayerDiscriminator(nn.Module):
"""Defines a PatchGAN discriminator as in Pix2Pix
--> see https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/models/networks.py
"""
def __init__(self, input_nc=3, ndf=64, n_layers=3, use_actnorm=False):
"""Construct a PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
n_layers (int) -- the number of conv layers in the discriminator
norm_layer -- normalization layer
"""
super(NLayerDiscriminator, self).__init__()
if not use_actnorm:
norm_layer = nn.BatchNorm2d
else:
norm_layer = ActNorm
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func != nn.BatchNorm2d
else:
use_bias = norm_layer != nn.BatchNorm2d
kw = 4
padw = 1
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers): # gradually increase the number of filters
nf_mult_prev = nf_mult
nf_mult = min(2 ** n, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = min(2 ** n_layers, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
# output 1 channel prediction map
sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)]
self.main = nn.Sequential(*sequence)
def forward(self, input):
"""Standard forward."""
return self.main(input)
class NLayerDiscriminator1dFeats(NLayerDiscriminator):
"""Defines a PatchGAN discriminator as in Pix2Pix
--> see https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/models/networks.py
"""
def __init__(self, input_nc=3, ndf=64, n_layers=3, use_actnorm=False):
"""Construct a PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input feats
ndf (int) -- the number of filters in the last conv layer
n_layers (int) -- the number of conv layers in the discriminator
norm_layer -- normalization layer
"""
super().__init__(input_nc=input_nc, ndf=64, n_layers=n_layers, use_actnorm=use_actnorm)
if not use_actnorm:
norm_layer = nn.BatchNorm1d
else:
norm_layer = ActNorm
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm has affine parameters
use_bias = norm_layer.func != nn.BatchNorm1d
else:
use_bias = norm_layer != nn.BatchNorm1d
kw = 4
padw = 1
sequence = [nn.Conv1d(input_nc, input_nc//2, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
nf_mult = input_nc//2
nf_mult_prev = 1
for n in range(1, n_layers): # gradually decrease the number of filters
nf_mult_prev = nf_mult
nf_mult = max(nf_mult_prev // (2 ** n), 8)
sequence += [
nn.Conv1d(nf_mult_prev, nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
norm_layer(nf_mult),
nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = max(nf_mult_prev // (2 ** n), 8)
sequence += [
nn.Conv1d(nf_mult_prev, nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(nf_mult),
nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = max(nf_mult_prev // (2 ** n), 8)
sequence += [
nn.Conv1d(nf_mult_prev, nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(nf_mult),
nn.LeakyReLU(0.2, True)
]
# output 1 channel prediction map
sequence += [nn.Conv1d(nf_mult, 1, kernel_size=kw, stride=1, padding=padw)]
self.main = nn.Sequential(*sequence)
class NLayerDiscriminator1dSpecs(NLayerDiscriminator):
"""Defines a PatchGAN discriminator as in Pix2Pix
--> see https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix/blob/master/models/networks.py
"""
def __init__(self, input_nc=80, ndf=64, n_layers=3, use_actnorm=False):
"""Construct a PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input specs
ndf (int) -- the number of filters in the last conv layer
n_layers (int) -- the number of conv layers in the discriminator
norm_layer -- normalization layer
"""
super().__init__(input_nc=input_nc, ndf=64, n_layers=n_layers, use_actnorm=use_actnorm)
if not use_actnorm:
norm_layer = nn.BatchNorm1d
else:
norm_layer = ActNorm
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm has affine parameters
use_bias = norm_layer.func != nn.BatchNorm1d
else:
use_bias = norm_layer != nn.BatchNorm1d
kw = 4
padw = 1
sequence = [nn.Conv1d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers): # gradually decrease the number of filters
nf_mult_prev = nf_mult
nf_mult = min(2 ** n, 8)
sequence += [
nn.Conv1d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = min(2 ** n_layers, 8)
sequence += [
nn.Conv1d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
# output 1 channel prediction map
sequence += [nn.Conv1d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)]
self.main = nn.Sequential(*sequence)
def forward(self, input):
"""Standard forward."""
# (B, C, L)
input = input.squeeze(1)
input = self.main(input)
return input
if __name__ == '__main__':
import torch
## FEATURES
disc_in_channels = 2048
disc_num_layers = 2
use_actnorm = False
disc_ndf = 64
discriminator = NLayerDiscriminator1dFeats(input_nc=disc_in_channels, n_layers=disc_num_layers,
use_actnorm=use_actnorm, ndf=disc_ndf).apply(weights_init)
inputs = torch.rand((6, 2048, 212))
outputs = discriminator(inputs)
print(outputs.shape)
## AUDIO
disc_in_channels = 1
disc_num_layers = 3
use_actnorm = False
disc_ndf = 64
discriminator = NLayerDiscriminator(input_nc=disc_in_channels, n_layers=disc_num_layers,
use_actnorm=use_actnorm, ndf=disc_ndf).apply(weights_init)
inputs = torch.rand((6, 1, 80, 848))
outputs = discriminator(inputs)
print(outputs.shape)
## IMAGE
disc_in_channels = 3
disc_num_layers = 3
use_actnorm = False
disc_ndf = 64
discriminator = NLayerDiscriminator(input_nc=disc_in_channels, n_layers=disc_num_layers,
use_actnorm=use_actnorm, ndf=disc_ndf).apply(weights_init)
inputs = torch.rand((6, 3, 256, 256))
outputs = discriminator(inputs)
print(outputs.shape)
| EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/discriminator/model.py |
import numpy as np
import torch
import torch.nn as nn
class Discriminator2DFactory(nn.Module):
def __init__(self, time_length, freq_length=80, kernel=(3, 3), c_in=1, hidden_size=128,
norm_type='bn', reduction='sum'):
super(Discriminator2DFactory, self).__init__()
padding = (kernel[0] // 2, kernel[1] // 2)
def discriminator_block(in_filters, out_filters, first=False):
"""
Input: (B, in, 2H, 2W)
Output:(B, out, H, W)
"""
conv = nn.Conv2d(in_filters, out_filters, kernel, (2, 2), padding)
if norm_type == 'sn':
conv = nn.utils.spectral_norm(conv)
block = [
conv, # padding = kernel//2
nn.LeakyReLU(0.2, inplace=True),
nn.Dropout2d(0.25)
]
if norm_type == 'bn' and not first:
block.append(nn.BatchNorm2d(out_filters, 0.8))
if norm_type == 'in' and not first:
block.append(nn.InstanceNorm2d(out_filters, affine=True))
block = nn.Sequential(*block)
return block
self.model = nn.ModuleList([
discriminator_block(c_in, hidden_size, first=True),
discriminator_block(hidden_size, hidden_size),
discriminator_block(hidden_size, hidden_size),
])
self.reduction = reduction
ds_size = (time_length // 2 ** 3, (freq_length + 7) // 2 ** 3)
if reduction != 'none':
# The height and width of downsampled image
self.adv_layer = nn.Linear(hidden_size * ds_size[0] * ds_size[1], 1)
else:
self.adv_layer = nn.Linear(hidden_size * ds_size[1], 1)
def forward(self, x):
"""
:param x: [B, C, T, n_bins]
:return: validity: [B, 1], h: List of hiddens
"""
h = []
for l in self.model:
x = l(x)
h.append(x)
if self.reduction != 'none':
x = x.view(x.shape[0], -1)
validity = self.adv_layer(x) # [B, 1]
else:
B, _, T_, _ = x.shape
x = x.transpose(1, 2).reshape(B, T_, -1)
validity = self.adv_layer(x)[:, :, 0] # [B, T]
return validity, h
class MultiWindowDiscriminator(nn.Module):
def __init__(self, time_lengths, cond_size=0, freq_length=80, kernel=(3, 3),
c_in=1, hidden_size=128, norm_type='bn', reduction='sum'):
super(MultiWindowDiscriminator, self).__init__()
self.win_lengths = time_lengths
self.reduction = reduction
self.conv_layers = nn.ModuleList()
if cond_size > 0:
self.cond_proj_layers = nn.ModuleList()
self.mel_proj_layers = nn.ModuleList()
for time_length in time_lengths:
conv_layer = [
Discriminator2DFactory(
time_length, freq_length, kernel, c_in=c_in, hidden_size=hidden_size,
norm_type=norm_type, reduction=reduction)
]
self.conv_layers += conv_layer
if cond_size > 0:
self.cond_proj_layers.append(nn.Linear(cond_size, freq_length))
self.mel_proj_layers.append(nn.Linear(freq_length, freq_length))
def forward(self, x, x_len, cond=None, start_frames_wins=None):
'''
Args:
x (tensor): input mel, (B, c_in, T, n_bins).
x_length (tensor): len of per mel. (B,).
Returns:
tensor : (B).
'''
validity = []
if start_frames_wins is None:
start_frames_wins = [None] * len(self.conv_layers)
h = []
for i, start_frames in zip(range(len(self.conv_layers)), start_frames_wins):
x_clip, c_clip, start_frames = self.clip(
x, cond, x_len, self.win_lengths[i], start_frames) # (B, win_length, C)
start_frames_wins[i] = start_frames
if x_clip is None:
continue
if cond is not None:
x_clip = self.mel_proj_layers[i](x_clip) # (B, 1, win_length, C)
c_clip = self.cond_proj_layers[i](c_clip)[:, None] # (B, 1, win_length, C)
x_clip = x_clip + c_clip
x_clip, h_ = self.conv_layers[i](x_clip)
h += h_
validity.append(x_clip)
if len(validity) != len(self.conv_layers):
return None, start_frames_wins, h
if self.reduction == 'sum':
validity = sum(validity) # [B]
elif self.reduction == 'stack':
validity = torch.stack(validity, -1) # [B, W_L]
elif self.reduction == 'none':
validity = torch.cat(validity, -1) # [B, W_sum]
return validity, start_frames_wins, h
def clip(self, x, cond, x_len, win_length, start_frames=None):
'''Ramdom clip x to win_length.
Args:
x (tensor) : (B, c_in, T, n_bins).
cond (tensor) : (B, T, H).
x_len (tensor) : (B,).
win_length (int): target clip length
Returns:
(tensor) : (B, c_in, win_length, n_bins).
'''
T_start = 0
T_end = x_len.max() - win_length
if T_end < 0:
return None, None, start_frames
T_end = T_end.item()
if start_frames is None:
start_frame = np.random.randint(low=T_start, high=T_end + 1)
start_frames = [start_frame] * x.size(0)
else:
start_frame = start_frames[0]
x_batch = x[:, :, start_frame: start_frame + win_length]
c_batch = cond[:, start_frame: start_frame + win_length] if cond is not None else None
return x_batch, c_batch, start_frames
class Discriminator(nn.Module):
def __init__(self, time_lengths=[32, 64, 128], freq_length=80, cond_size=0, kernel=(3, 3), c_in=1,
hidden_size=128, norm_type='bn', reduction='sum', uncond_disc=True):
super(Discriminator, self).__init__()
self.time_lengths = time_lengths
self.cond_size = cond_size
self.reduction = reduction
self.uncond_disc = uncond_disc
if uncond_disc:
self.discriminator = MultiWindowDiscriminator(
freq_length=freq_length,
time_lengths=time_lengths,
kernel=kernel,
c_in=c_in, hidden_size=hidden_size, norm_type=norm_type,
reduction=reduction
)
if cond_size > 0:
self.cond_disc = MultiWindowDiscriminator(
freq_length=freq_length,
time_lengths=time_lengths,
cond_size=cond_size,
kernel=kernel,
c_in=c_in, hidden_size=hidden_size, norm_type=norm_type,
reduction=reduction
)
def forward(self, x, cond=None, start_frames_wins=None):
"""
:param x: [B, T, 80]
:param cond: [B, T, cond_size]
:param return_y_only:
:return:
"""
if len(x.shape) == 3:
x = x[:, None, :, :]
x_len = x.sum([1, -1]).ne(0).int().sum([-1])
ret = {'y_c': None, 'y': None}
if self.uncond_disc:
ret['y'], start_frames_wins, ret['h'] = self.discriminator(
x, x_len, start_frames_wins=start_frames_wins)
if self.cond_size > 0 and cond is not None:
ret['y_c'], start_frames_wins, ret['h_c'] = self.cond_disc(
x, x_len, cond, start_frames_wins=start_frames_wins)
ret['start_frames_wins'] = start_frames_wins
return ret | EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/discriminator/multi_window_disc.py |
"""
Based on https://github.com/CompVis/taming-transformers/blob/52720829/taming/modules/losses/lpips.py
Adapted for spectrograms by Vladimir Iashin (v-iashin)
"""
from collections import namedtuple
import numpy as np
import torch
import torch.nn as nn
import sys
sys.path.insert(0, '.') # nopep8
from ldm.modules.losses_audio.vggishish.model import VGGishish
from ldm.util import get_ckpt_path
class LPAPS(nn.Module):
# Learned perceptual metric
def __init__(self, use_dropout=True):
super().__init__()
self.scaling_layer = ScalingLayer()
self.chns = [64, 128, 256, 512, 512] # vggish16 features
self.net = vggishish16(pretrained=True, requires_grad=False)
self.lin0 = NetLinLayer(self.chns[0], use_dropout=use_dropout)
self.lin1 = NetLinLayer(self.chns[1], use_dropout=use_dropout)
self.lin2 = NetLinLayer(self.chns[2], use_dropout=use_dropout)
self.lin3 = NetLinLayer(self.chns[3], use_dropout=use_dropout)
self.lin4 = NetLinLayer(self.chns[4], use_dropout=use_dropout)
self.load_from_pretrained()
for param in self.parameters():
param.requires_grad = False
def load_from_pretrained(self, name="vggishish_lpaps"):
ckpt = get_ckpt_path(name, "ldm/modules/autoencoder/lpaps")
self.load_state_dict(torch.load(ckpt, map_location=torch.device("cpu")), strict=False)
print("loaded pretrained LPAPS loss from {}".format(ckpt))
@classmethod
def from_pretrained(cls, name="vggishish_lpaps"):
if name != "vggishish_lpaps":
raise NotImplementedError
model = cls()
ckpt = get_ckpt_path(name)
model.load_state_dict(torch.load(ckpt, map_location=torch.device("cpu")), strict=False)
return model
def forward(self, input, target):
in0_input, in1_input = (self.scaling_layer(input), self.scaling_layer(target))
outs0, outs1 = self.net(in0_input), self.net(in1_input)
feats0, feats1, diffs = {}, {}, {}
lins = [self.lin0, self.lin1, self.lin2, self.lin3, self.lin4]
for kk in range(len(self.chns)):
feats0[kk], feats1[kk] = normalize_tensor(outs0[kk]), normalize_tensor(outs1[kk])
diffs[kk] = (feats0[kk] - feats1[kk]) ** 2
res = [spatial_average(lins[kk].model(diffs[kk]), keepdim=True) for kk in range(len(self.chns))]
val = res[0]
for l in range(1, len(self.chns)):
val += res[l]
return val
class ScalingLayer(nn.Module):
def __init__(self):
super(ScalingLayer, self).__init__()
# we are gonna use get_ckpt_path to donwload the stats as well
stat_path = get_ckpt_path('vggishish_mean_std_melspec_10s_22050hz', 'ldm/modules/autoencoder/lpaps')
# if for images we normalize on the channel dim, in spectrogram we will norm on frequency dimension
means, stds = np.loadtxt(stat_path, dtype=np.float32).T
# the normalization in means and stds are given for [0, 1], but specvqgan expects [-1, 1]:
means = 2 * means - 1
stds = 2 * stds
# input is expected to be (B, 1, F, T)
self.register_buffer('shift', torch.from_numpy(means)[None, None, :, None])
self.register_buffer('scale', torch.from_numpy(stds)[None, None, :, None])
def forward(self, inp):
return (inp - self.shift) / self.scale
class NetLinLayer(nn.Module):
""" A single linear layer which does a 1x1 conv """
def __init__(self, chn_in, chn_out=1, use_dropout=False):
super(NetLinLayer, self).__init__()
layers = [nn.Dropout(), ] if (use_dropout) else []
layers += [nn.Conv2d(chn_in, chn_out, 1, stride=1, padding=0, bias=False), ]
self.model = nn.Sequential(*layers)
class vggishish16(torch.nn.Module):
def __init__(self, requires_grad=False, pretrained=True):
super().__init__()
vgg_pretrained_features = self.vggishish16(pretrained=pretrained).features
self.slice1 = torch.nn.Sequential()
self.slice2 = torch.nn.Sequential()
self.slice3 = torch.nn.Sequential()
self.slice4 = torch.nn.Sequential()
self.slice5 = torch.nn.Sequential()
self.N_slices = 5
for x in range(4):
self.slice1.add_module(str(x), vgg_pretrained_features[x])
for x in range(4, 9):
self.slice2.add_module(str(x), vgg_pretrained_features[x])
for x in range(9, 16):
self.slice3.add_module(str(x), vgg_pretrained_features[x])
for x in range(16, 23):
self.slice4.add_module(str(x), vgg_pretrained_features[x])
for x in range(23, 30):
self.slice5.add_module(str(x), vgg_pretrained_features[x])
if not requires_grad:
for param in self.parameters():
param.requires_grad = False
def forward(self, X):
h = self.slice1(X)
h_relu1_2 = h
h = self.slice2(h)
h_relu2_2 = h
h = self.slice3(h)
h_relu3_3 = h
h = self.slice4(h)
h_relu4_3 = h
h = self.slice5(h)
h_relu5_3 = h
vgg_outputs = namedtuple("VggOutputs", ['relu1_2', 'relu2_2', 'relu3_3', 'relu4_3', 'relu5_3'])
out = vgg_outputs(h_relu1_2, h_relu2_2, h_relu3_3, h_relu4_3, h_relu5_3)
return out
def vggishish16(self, pretrained: bool = True) -> VGGishish:
# loading vggishish pretrained on vggsound
num_classes_vggsound = 309
conv_layers = [64, 64, 'MP', 128, 128, 'MP', 256, 256, 256, 'MP', 512, 512, 512, 'MP', 512, 512, 512]
model = VGGishish(conv_layers, use_bn=False, num_classes=num_classes_vggsound)
if pretrained:
ckpt_path = get_ckpt_path('vggishish_lpaps', "ldm/modules/autoencoder/lpaps")
ckpt = torch.load(ckpt_path, map_location=torch.device("cpu"))
model.load_state_dict(ckpt, strict=False)
return model
def normalize_tensor(x, eps=1e-10):
norm_factor = torch.sqrt(torch.sum(x**2, dim=1, keepdim=True))
return x / (norm_factor+eps)
def spatial_average(x, keepdim=True):
return x.mean([2, 3], keepdim=keepdim)
if __name__ == '__main__':
inputs = torch.rand((16, 1, 80, 848))
reconstructions = torch.rand((16, 1, 80, 848))
lpips = LPAPS().eval()
loss_p = lpips(inputs.contiguous(), reconstructions.contiguous())
# (16, 1, 1, 1)
print(loss_p.shape)
| EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/losses_audio/lpaps.py |
from ldm.modules.losses_audio.vqperceptual import DummyLoss
# relative imports pain
import os
import sys
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'vggishish')
sys.path.append(path)
| EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/losses_audio/__init__.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
import sys
from ldm.util import exists
sys.path.insert(0, '.') # nopep8
from ldm.modules.discriminator.model import (NLayerDiscriminator, NLayerDiscriminator1dFeats,
NLayerDiscriminator1dSpecs,
weights_init)
from ldm.modules.losses_audio.lpaps import LPAPS
from ldm.modules.losses.vqperceptual import l1, l2, measure_perplexity, hinge_d_loss, vanilla_d_loss, adopt_weight
class DummyLoss(nn.Module):
def __init__(self):
super().__init__()
class VQLPAPSWithDiscriminator(nn.Module):
def __init__(self, disc_start, codebook_weight=1.0, pixelloss_weight=1.0,
disc_num_layers=3, disc_in_channels=3, disc_factor=1.0, disc_weight=1.0,
perceptual_weight=1.0, use_actnorm=False, disc_conditional=False,
disc_ndf=64, disc_loss="hinge", n_classes=None, pixel_loss="l1"):
super().__init__()
assert disc_loss in ["hinge", "vanilla"]
self.codebook_weight = codebook_weight
self.pixel_weight = pixelloss_weight
self.perceptual_loss = LPAPS().eval()
self.perceptual_weight = perceptual_weight
if pixel_loss == "l1":
self.pixel_loss = l1
else:
self.pixel_loss = l2
self.discriminator = NLayerDiscriminator(input_nc=disc_in_channels,
n_layers=disc_num_layers,
use_actnorm=use_actnorm,
ndf=disc_ndf
).apply(weights_init)
self.discriminator_iter_start = disc_start
if disc_loss == "hinge":
self.disc_loss = hinge_d_loss
elif disc_loss == "vanilla":
self.disc_loss = vanilla_d_loss
else:
raise ValueError(f"Unknown GAN loss '{disc_loss}'.")
print(f"VQLPAPSWithDiscriminator running with {disc_loss} loss.")
self.disc_factor = disc_factor
self.discriminator_weight = disc_weight
self.disc_conditional = disc_conditional
self.n_classes = n_classes
def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None):
if last_layer is not None:
nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0]
g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0]
else:
nll_grads = torch.autograd.grad(nll_loss, self.last_layer[0], retain_graph=True)[0]
g_grads = torch.autograd.grad(g_loss, self.last_layer[0], retain_graph=True)[0]
d_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4)
d_weight = torch.clamp(d_weight, 0.0, 1e4).detach()
d_weight = d_weight * self.discriminator_weight
return d_weight
def forward(self, codebook_loss, inputs, reconstructions, optimizer_idx,
global_step, last_layer=None, cond=None, split="train", predicted_indices=None):
if not exists(codebook_loss):
codebook_loss = torch.tensor([0.]).to(inputs.device)
rec_loss = torch.abs(inputs.contiguous() - reconstructions.contiguous())
if self.perceptual_weight > 0:
p_loss = self.perceptual_loss(inputs.contiguous(), reconstructions.contiguous())
rec_loss = rec_loss + self.perceptual_weight * p_loss
else:
p_loss = torch.tensor([0.0])
nll_loss = rec_loss
# nll_loss = torch.sum(nll_loss) / nll_loss.shape[0]
nll_loss = torch.mean(nll_loss)
# now the GAN part
if optimizer_idx == 0:
# generator update
if cond is None:
assert not self.disc_conditional
logits_fake = self.discriminator(reconstructions.contiguous())
else:
assert self.disc_conditional
logits_fake = self.discriminator(torch.cat((reconstructions.contiguous(), cond), dim=1))
g_loss = -torch.mean(logits_fake)
try:
d_weight = self.calculate_adaptive_weight(nll_loss, g_loss, last_layer=last_layer)
except RuntimeError:
assert not self.training
d_weight = torch.tensor(0.0)
disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start)
loss = nll_loss + d_weight * disc_factor * g_loss + self.codebook_weight * codebook_loss.mean()
log = {"{}/total_loss".format(split): loss.clone().detach().mean(),
"{}/quant_loss".format(split): codebook_loss.detach().mean(),
"{}/nll_loss".format(split): nll_loss.detach().mean(),
"{}/rec_loss".format(split): rec_loss.detach().mean(),
"{}/p_loss".format(split): p_loss.detach().mean(),
"{}/d_weight".format(split): d_weight.detach(),
"{}/disc_factor".format(split): torch.tensor(disc_factor),
"{}/g_loss".format(split): g_loss.detach().mean(),
}
# if predicted_indices is not None:
# assert self.n_classes is not None
# with torch.no_grad():
# perplexity, cluster_usage = measure_perplexity(predicted_indices, self.n_classes)
# log[f"{split}/perplexity"] = perplexity
# log[f"{split}/cluster_usage"] = cluster_usage
return loss, log
if optimizer_idx == 1:
# second pass for discriminator update
if cond is None:
logits_real = self.discriminator(inputs.contiguous().detach())
logits_fake = self.discriminator(reconstructions.contiguous().detach())
else:
logits_real = self.discriminator(torch.cat((inputs.contiguous().detach(), cond), dim=1))
logits_fake = self.discriminator(torch.cat((reconstructions.contiguous().detach(), cond), dim=1))
disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start)
d_loss = disc_factor * self.disc_loss(logits_real, logits_fake)
log = {"{}/disc_loss".format(split): d_loss.clone().detach().mean(),
"{}/logits_real".format(split): logits_real.detach().mean(),
"{}/logits_fake".format(split): logits_fake.detach().mean()
}
return d_loss, log
| EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/losses_audio/vqperceptual.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
import sys
sys.path.insert(0, '.') # nopep8
from ldm.modules.losses_audio.vqperceptual import *
class LPAPSWithDiscriminator(nn.Module):
def __init__(self, disc_start, logvar_init=0.0, kl_weight=1.0, pixelloss_weight=1.0,
disc_num_layers=3, disc_in_channels=3, disc_factor=1.0, disc_weight=1.0,
perceptual_weight=1.0, use_actnorm=False, disc_conditional=False,
disc_loss="hinge"):
super().__init__()
assert disc_loss in ["hinge", "vanilla"]
self.kl_weight = kl_weight
self.pixel_weight = pixelloss_weight
self.perceptual_loss = LPAPS().eval()# LPIPS用于日常图像,而LPAPS用于梅尔谱图
self.perceptual_weight = perceptual_weight
# output log variance
self.logvar = nn.Parameter(torch.ones(size=()) * logvar_init)
self.discriminator = NLayerDiscriminator(input_nc=disc_in_channels,
n_layers=disc_num_layers,
use_actnorm=use_actnorm,
).apply(weights_init)
self.discriminator_iter_start = disc_start
if disc_loss == "hinge":
self.disc_loss = hinge_d_loss
elif disc_loss == "vanilla":
self.disc_loss = vanilla_d_loss
else:
raise ValueError(f"Unknown GAN loss '{disc_loss}'.")
print(f"LPAPSWithDiscriminator running with {disc_loss} loss.")
self.disc_factor = disc_factor
self.discriminator_weight = disc_weight
self.disc_conditional = disc_conditional
def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None):
if last_layer is not None:
nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0]
g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0]
else:
nll_grads = torch.autograd.grad(nll_loss, self.last_layer[0], retain_graph=True)[0]
g_grads = torch.autograd.grad(g_loss, self.last_layer[0], retain_graph=True)[0]
d_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4)
d_weight = torch.clamp(d_weight, 0.0, 1e4).detach()
d_weight = d_weight * self.discriminator_weight
return d_weight
def forward(self, inputs, reconstructions, posteriors, optimizer_idx,
global_step, last_layer=None, cond=None, split="train", weights=None):
rec_loss = torch.abs(inputs.contiguous() - reconstructions.contiguous())
if self.perceptual_weight > 0:
p_loss = self.perceptual_loss(inputs.contiguous(), reconstructions.contiguous())
# print(f"p_loss {p_loss}")
rec_loss = rec_loss + self.perceptual_weight * p_loss
else:
p_loss = torch.tensor([0.0])
nll_loss = rec_loss / torch.exp(self.logvar) + self.logvar
weighted_nll_loss = nll_loss
if weights is not None:
weighted_nll_loss = weights*nll_loss
weighted_nll_loss = torch.sum(weighted_nll_loss) / weighted_nll_loss.shape[0]
nll_loss = torch.sum(nll_loss) / nll_loss.shape[0]
kl_loss = posteriors.kl()
kl_loss = torch.sum(kl_loss) / kl_loss.shape[0]
# now the GAN part
if optimizer_idx == 0:
# generator update
if cond is None:
assert not self.disc_conditional
logits_fake = self.discriminator(reconstructions.contiguous())
else:
assert self.disc_conditional
logits_fake = self.discriminator(torch.cat((reconstructions.contiguous(), cond), dim=1))
g_loss = -torch.mean(logits_fake)
try:
d_weight = self.calculate_adaptive_weight(nll_loss, g_loss, last_layer=last_layer)
except RuntimeError:
assert not self.training
d_weight = torch.tensor(0.0)
disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start)
loss = weighted_nll_loss + self.kl_weight * kl_loss + d_weight * disc_factor * g_loss
log = {"{}/total_loss".format(split): loss.clone().detach().mean(),
"{}/logvar".format(split): self.logvar.detach(),
"{}/kl_loss".format(split): kl_loss.detach().mean(),
"{}/nll_loss".format(split): nll_loss.detach().mean(),
"{}/rec_loss".format(split): rec_loss.detach().mean(),
"{}/d_weight".format(split): d_weight.detach(),
"{}/disc_factor".format(split): torch.tensor(disc_factor),
"{}/g_loss".format(split): g_loss.detach().mean(),
}
return loss, log
if optimizer_idx == 1:
# second pass for discriminator update
if cond is None:
logits_real = self.discriminator(inputs.contiguous().detach())
logits_fake = self.discriminator(reconstructions.contiguous().detach())
else:
logits_real = self.discriminator(torch.cat((inputs.contiguous().detach(), cond), dim=1))
logits_fake = self.discriminator(torch.cat((reconstructions.contiguous().detach(), cond), dim=1))
disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start)
d_loss = disc_factor * self.disc_loss(logits_real, logits_fake)
log = {"{}/disc_loss".format(split): d_loss.clone().detach().mean(),
"{}/logits_real".format(split): logits_real.detach().mean(),
"{}/logits_fake".format(split): logits_fake.detach().mean()
}
return d_loss, log
| EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/losses_audio/contperceptual.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
import sys
sys.path.insert(0, '.') # nopep8
from ldm.modules.losses_audio.vqperceptual import *
from ldm.modules.discriminator.multi_window_disc import Discriminator
class LPAPSWithDiscriminator(nn.Module):# 相比于contperceptual.py添加了MultiWindowDiscriminator
def __init__(self, disc_start, logvar_init=0.0, kl_weight=1.0, pixelloss_weight=1.0,
disc_num_layers=3, disc_in_channels=3, disc_factor=1.0, disc_weight=1.0,
perceptual_weight=1.0, use_actnorm=False, disc_conditional=False,
disc_loss="hinge"):
super().__init__()
assert disc_loss in ["hinge", "vanilla"]
self.kl_weight = kl_weight
self.pixel_weight = pixelloss_weight
self.perceptual_loss = LPAPS().eval()
self.perceptual_weight = perceptual_weight
# output log variance
self.logvar = nn.Parameter(torch.ones(size=()) * logvar_init)
self.discriminator = NLayerDiscriminator(input_nc=disc_in_channels,
n_layers=disc_num_layers,
use_actnorm=use_actnorm,
).apply(weights_init)
self.discriminator_iter_start = disc_start
if disc_loss == "hinge":
self.disc_loss = hinge_d_loss
elif disc_loss == "vanilla":
self.disc_loss = vanilla_d_loss
else:
raise ValueError(f"Unknown GAN loss '{disc_loss}'.")
print(f"LPAPSWithDiscriminator running with {disc_loss} loss.")
self.disc_factor = disc_factor
self.discriminator_weight = disc_weight
self.disc_conditional = disc_conditional
disc_win_num = 3
mel_disc_hidden_size = 128
self.discriminator_multi = Discriminator(time_lengths=[32, 64, 128][:disc_win_num],
freq_length=80, hidden_size=mel_disc_hidden_size, kernel=(3, 3),
cond_size=0, norm_type="in", reduction="stack")
def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None):
if last_layer is not None:
nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0]
g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0]
else:
nll_grads = torch.autograd.grad(nll_loss, self.last_layer[0], retain_graph=True)[0]
g_grads = torch.autograd.grad(g_loss, self.last_layer[0], retain_graph=True)[0]
d_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4)
d_weight = torch.clamp(d_weight, 0.0, 1e4).detach()
d_weight = d_weight * self.discriminator_weight
return d_weight
def forward(self, inputs, reconstructions, posteriors, optimizer_idx,
global_step, last_layer=None, cond=None, split="train", weights=None):
rec_loss = torch.abs(inputs.contiguous() - reconstructions.contiguous())
if self.perceptual_weight > 0:
p_loss = self.perceptual_loss(inputs.contiguous(), reconstructions.contiguous())
rec_loss = rec_loss + self.perceptual_weight * p_loss
else:
p_loss = torch.tensor([0.0])
nll_loss = rec_loss / torch.exp(self.logvar) + self.logvar
weighted_nll_loss = nll_loss
if weights is not None:
weighted_nll_loss = weights*nll_loss
weighted_nll_loss = torch.sum(weighted_nll_loss) / weighted_nll_loss.shape[0]
nll_loss = torch.sum(nll_loss) / nll_loss.shape[0]
kl_loss = posteriors.kl()
kl_loss = torch.sum(kl_loss) / kl_loss.shape[0]
# now the GAN part
if optimizer_idx == 0:
# generator update
if cond is None:
assert not self.disc_conditional
logits_fake = self.discriminator(reconstructions.contiguous())
else:
assert self.disc_conditional
logits_fake = self.discriminator(torch.cat((reconstructions.contiguous(), cond), dim=1))
logits_fake_multi = self.discriminator_multi(reconstructions.contiguous().squeeze(1).transpose(1, 2))
g_loss = -torch.mean(logits_fake)
g_loss_multi = -torch.mean(logits_fake_multi['y'])
try:
d_weight = self.calculate_adaptive_weight(nll_loss, g_loss, last_layer=last_layer)
d_weight_multi = self.calculate_adaptive_weight(nll_loss, g_loss_multi, last_layer=last_layer)
except RuntimeError:
assert not self.training
d_weight = d_weight_multi = torch.tensor(0.0)
disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start)
loss = weighted_nll_loss + self.kl_weight * kl_loss + d_weight * disc_factor * g_loss + d_weight_multi * disc_factor * g_loss_multi
log = {"{}/total_loss".format(split): loss.clone().detach().mean(),
"{}/logvar".format(split): self.logvar.detach(),
"{}/kl_loss".format(split): kl_loss.detach().mean(),
"{}/nll_loss".format(split): nll_loss.detach().mean(),
"{}/rec_loss".format(split): rec_loss.detach().mean(),
"{}/d_weight".format(split): d_weight.detach(),
"{}/disc_factor".format(split): torch.tensor(disc_factor),
"{}/g_loss".format(split): g_loss.detach().mean(),
"{}/g_loss_multi".format(split): g_loss_multi.detach().mean(),
}
return loss, log
if optimizer_idx == 1:
# second pass for discriminator update
if cond is None:
logits_real = self.discriminator(inputs.contiguous().detach())
logits_fake = self.discriminator(reconstructions.contiguous().detach())
else:
logits_real = self.discriminator(torch.cat((inputs.contiguous().detach(), cond), dim=1))
logits_fake = self.discriminator(torch.cat((reconstructions.contiguous().detach(), cond), dim=1))
logits_real_multi = self.discriminator_multi(inputs.contiguous().detach().squeeze(1).transpose(1, 2))
logits_fake_multi = self.discriminator_multi(reconstructions.contiguous().detach().squeeze(1).transpose(1, 2))
disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start)
d_loss = disc_factor * self.disc_loss(logits_real, logits_fake)
d_loss_multi = disc_factor * self.disc_loss(logits_real_multi['y'], logits_fake_multi['y'])
log = {"{}/disc_loss".format(split): d_loss.clone().detach().mean(),
"{}/disc_loss_multi".format(split): d_loss_multi.clone().detach().mean(),
"{}/logits_real".format(split): logits_real.detach().mean(),
"{}/logits_fake".format(split): logits_fake.detach().mean()
}
return d_loss+d_loss_multi, log
| EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/losses_audio/contperceptual_dis.py |
import logging
import numpy as np
import scipy
import torch
from sklearn.metrics import average_precision_score, roc_auc_score
logger = logging.getLogger(f'main.{__name__}')
def metrics(targets, outputs, topk=(1, 5)):
"""
Adapted from https://github.com/hche11/VGGSound/blob/master/utils.py
Calculate statistics including mAP, AUC, and d-prime.
Args:
output: 2d tensors, (dataset_size, classes_num) - before softmax
target: 1d tensors, (dataset_size, )
topk: tuple
Returns:
metric_dict: a dict of metrics
"""
metrics_dict = dict()
num_cls = outputs.shape[-1]
# accuracy@k
_, preds = torch.topk(outputs, k=max(topk), dim=1)
correct_for_maxtopk = preds == targets.view(-1, 1).expand_as(preds)
for k in topk:
metrics_dict[f'accuracy_{k}'] = float(correct_for_maxtopk[:, :k].sum() / correct_for_maxtopk.shape[0])
# avg precision, average roc_auc, and dprime
targets = torch.nn.functional.one_hot(targets, num_classes=num_cls)
# ids of the predicted classes (same as softmax)
targets_pred = torch.softmax(outputs, dim=1)
targets = targets.numpy()
targets_pred = targets_pred.numpy()
# one-vs-rest
avg_p = [average_precision_score(targets[:, c], targets_pred[:, c], average=None) for c in range(num_cls)]
try:
roc_aucs = [roc_auc_score(targets[:, c], targets_pred[:, c], average=None) for c in range(num_cls)]
except ValueError:
logger.warning('Weird... Some classes never occured in targets. Do not trust the metrics.')
roc_aucs = np.array([0.5])
avg_p = np.array([0])
metrics_dict['mAP'] = np.mean(avg_p)
metrics_dict['mROCAUC'] = np.mean(roc_aucs)
# Percent point function (ppf) (inverse of cdf — percentiles).
metrics_dict['dprime'] = scipy.stats.norm().ppf(metrics_dict['mROCAUC']) * np.sqrt(2)
return metrics_dict
if __name__ == '__main__':
targets = torch.tensor([3, 3, 1, 2, 1, 0])
outputs = torch.tensor([
[1.2, 1.3, 1.1, 1.5],
[1.3, 1.4, 1.0, 1.1],
[1.5, 1.1, 1.4, 1.3],
[1.0, 1.2, 1.4, 1.5],
[1.2, 1.3, 1.1, 1.1],
[1.2, 1.1, 1.1, 1.1],
]).float()
metrics_dict = metrics(targets, outputs, topk=(1, 3))
print(metrics_dict)
| EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/losses_audio/vggishish/metrics.py |
import logging
import os
from pathlib import Path
import albumentations
import numpy as np
import torch
from tqdm import tqdm
logger = logging.getLogger(f'main.{__name__}')
class StandardNormalizeAudio(object):
'''
Frequency-wise normalization
'''
def __init__(self, specs_dir, train_ids_path='./data/vggsound_train.txt', cache_path='./data/'):
self.specs_dir = specs_dir
self.train_ids_path = train_ids_path
# making the stats filename to match the specs dir name
self.cache_path = os.path.join(cache_path, f'train_means_stds_{Path(specs_dir).stem}.txt')
logger.info('Assuming that the input stats are calculated using preprocessed spectrograms (log)')
self.train_stats = self.calculate_or_load_stats()
def __call__(self, item):
# just to generalizat the input handling. Useful for FID, IS eval and training other staff
if isinstance(item, dict):
if 'input' in item:
input_key = 'input'
elif 'image' in item:
input_key = 'image'
else:
raise NotImplementedError
item[input_key] = (item[input_key] - self.train_stats['means']) / self.train_stats['stds']
elif isinstance(item, torch.Tensor):
# broadcasts np.ndarray (80, 1) to (1, 80, 1) because item is torch.Tensor (B, 80, T)
item = (item - self.train_stats['means']) / self.train_stats['stds']
else:
raise NotImplementedError
return item
def calculate_or_load_stats(self):
try:
# (F, 2)
train_stats = np.loadtxt(self.cache_path)
means, stds = train_stats.T
logger.info('Trying to load train stats for Standard Normalization of inputs')
except OSError:
logger.info('Could not find the precalculated stats for Standard Normalization. Calculating...')
train_vid_ids = open(self.train_ids_path)
specs_paths = [os.path.join(self.specs_dir, f'{i.rstrip()}_mel.npy') for i in train_vid_ids]
means = [None] * len(specs_paths)
stds = [None] * len(specs_paths)
for i, path in enumerate(tqdm(specs_paths)):
spec = np.load(path)
means[i] = spec.mean(axis=1)
stds[i] = spec.std(axis=1)
# (F) <- (num_files, F)
means = np.array(means).mean(axis=0)
stds = np.array(stds).mean(axis=0)
# saving in two columns
np.savetxt(self.cache_path, np.vstack([means, stds]).T, fmt='%0.8f')
means = means.reshape(-1, 1)
stds = stds.reshape(-1, 1)
return {'means': means, 'stds': stds}
class ToTensor(object):
def __call__(self, item):
item['input'] = torch.from_numpy(item['input']).float()
# if 'target' in item:
item['target'] = torch.tensor(item['target'])
return item
class Crop(object):
def __init__(self, cropped_shape=None, random_crop=False):
self.cropped_shape = cropped_shape
if cropped_shape is not None:
mel_num, spec_len = cropped_shape
if random_crop:
self.cropper = albumentations.RandomCrop
else:
self.cropper = albumentations.CenterCrop
self.preprocessor = albumentations.Compose([self.cropper(mel_num, spec_len)])
else:
self.preprocessor = lambda **kwargs: kwargs
def __call__(self, item):
item['input'] = self.preprocessor(image=item['input'])['image']
return item
if __name__ == '__main__':
cropper = Crop([80, 848])
item = {'input': torch.rand([80, 860])}
outputs = cropper(item)
print(outputs['input'].shape)
| EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/losses_audio/vggishish/transforms.py |
import os
from torch.utils.data import DataLoader
import torchvision
from tqdm import tqdm
from dataset import VGGSound
import torch
import torch.nn as nn
from metrics import metrics
from omegaconf import OmegaConf
from model import VGGishish
from transforms import Crop, StandardNormalizeAudio, ToTensor
if __name__ == '__main__':
cfg_cli = OmegaConf.from_cli()
print(cfg_cli.config)
cfg_yml = OmegaConf.load(cfg_cli.config)
# the latter arguments are prioritized
cfg = OmegaConf.merge(cfg_yml, cfg_cli)
OmegaConf.set_readonly(cfg, True)
print(OmegaConf.to_yaml(cfg))
# logger = LoggerWithTBoard(cfg)
transforms = [
StandardNormalizeAudio(cfg.mels_path),
ToTensor(),
]
if cfg.cropped_size not in [None, 'None', 'none']:
transforms.append(Crop(cfg.cropped_size))
transforms = torchvision.transforms.transforms.Compose(transforms)
datasets = {
'test': VGGSound('test', cfg.mels_path, transforms),
}
loaders = {
'test': DataLoader(datasets['test'], batch_size=cfg.batch_size,
num_workers=cfg.num_workers, pin_memory=True)
}
device = torch.device(cfg.device if torch.cuda.is_available() else 'cpu')
model = VGGishish(cfg.conv_layers, cfg.use_bn, num_classes=len(datasets['test'].target2label))
model = model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=cfg.learning_rate)
criterion = nn.CrossEntropyLoss()
# loading the best model
folder_name = os.path.split(cfg.config)[0].split('/')[-1]
print(folder_name)
ckpt = torch.load(f'./logs/{folder_name}/vggishish-{folder_name}.pt', map_location='cpu')
model.load_state_dict(ckpt['model'])
print((f'The model was trained for {ckpt["epoch"]} epochs. Loss: {ckpt["loss"]:.4f}'))
# Testing the model
model.eval()
running_loss = 0
preds_from_each_batch = []
targets_from_each_batch = []
for i, batch in enumerate(tqdm(loaders['test'])):
inputs = batch['input'].to(device)
targets = batch['target'].to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
with torch.set_grad_enabled(False):
outputs = model(inputs)
loss = criterion(outputs, targets)
# loss
running_loss += loss.item()
# for metrics calculation later on
preds_from_each_batch += [outputs.detach().cpu()]
targets_from_each_batch += [targets.cpu()]
# logging metrics
preds_from_each_batch = torch.cat(preds_from_each_batch)
targets_from_each_batch = torch.cat(targets_from_each_batch)
test_metrics_dict = metrics(targets_from_each_batch, preds_from_each_batch)
test_metrics_dict['avg_loss'] = running_loss / len(loaders['test'])
test_metrics_dict['param_num'] = sum(p.numel() for p in model.parameters() if p.requires_grad)
# TODO: I have no idea why tboard doesn't keep metrics (hparams) in a tensorboard when
# I run this experiment from cli: `python main.py config=./configs/vggish.yaml`
# while when I run it in vscode debugger the metrics are present in the tboard (weird)
print(test_metrics_dict)
| EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/losses_audio/vggishish/predict.py |
import logging
import os
import time
from shutil import copytree, ignore_patterns
import torch
from omegaconf import OmegaConf
from torch.utils.tensorboard import SummaryWriter, summary
class LoggerWithTBoard(SummaryWriter):
def __init__(self, cfg):
# current time stamp and experiment log directory
self.start_time = time.strftime('%y-%m-%dT%H-%M-%S', time.localtime())
self.logdir = os.path.join(cfg.logdir, self.start_time)
# init tboard
super().__init__(self.logdir)
# backup the cfg
OmegaConf.save(cfg, os.path.join(self.log_dir, 'cfg.yaml'))
# backup the code state
if cfg.log_code_state:
dest_dir = os.path.join(self.logdir, 'code')
copytree(os.getcwd(), dest_dir, ignore=ignore_patterns(*cfg.patterns_to_ignore))
# init logger which handles printing and logging mostly same things to the log file
self.print_logger = logging.getLogger('main')
self.print_logger.setLevel(logging.INFO)
msgfmt = '[%(levelname)s] %(asctime)s - %(name)s \n %(message)s'
datefmt = '%d %b %Y %H:%M:%S'
formatter = logging.Formatter(msgfmt, datefmt)
# stdout
sh = logging.StreamHandler()
sh.setLevel(logging.DEBUG)
sh.setFormatter(formatter)
self.print_logger.addHandler(sh)
# log file
fh = logging.FileHandler(os.path.join(self.log_dir, 'log.txt'))
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
self.print_logger.addHandler(fh)
self.print_logger.info(f'Saving logs and checkpoints @ {self.logdir}')
def log_param_num(self, model):
param_num = sum(p.numel() for p in model.parameters() if p.requires_grad)
self.print_logger.info(f'The number of parameters: {param_num/1e+6:.3f} mil')
self.add_scalar('num_params', param_num, 0)
return param_num
def log_iter_loss(self, loss, iter, phase):
self.add_scalar(f'{phase}/loss_iter', loss, iter)
def log_epoch_loss(self, loss, epoch, phase):
self.add_scalar(f'{phase}/loss', loss, epoch)
self.print_logger.info(f'{phase} ({epoch}): loss {loss:.3f};')
def log_epoch_metrics(self, metrics_dict, epoch, phase):
for metric, val in metrics_dict.items():
self.add_scalar(f'{phase}/{metric}', val, epoch)
metrics_dict = {k: round(v, 4) for k, v in metrics_dict.items()}
self.print_logger.info(f'{phase} ({epoch}) metrics: {metrics_dict};')
def log_test_metrics(self, metrics_dict, hparams_dict, best_epoch):
allowed_types = (int, float, str, bool, torch.Tensor)
hparams_dict = {k: v for k, v in hparams_dict.items() if isinstance(v, allowed_types)}
metrics_dict = {f'test/{k}': round(v, 4) for k, v in metrics_dict.items()}
exp, ssi, sei = summary.hparams(hparams_dict, metrics_dict)
self.file_writer.add_summary(exp)
self.file_writer.add_summary(ssi)
self.file_writer.add_summary(sei)
for k, v in metrics_dict.items():
self.add_scalar(k, v, best_epoch)
self.print_logger.info(f'test ({best_epoch}) metrics: {metrics_dict};')
def log_best_model(self, model, loss, epoch, optimizer, metrics_dict):
model_name = model.__class__.__name__
self.best_model_path = os.path.join(self.logdir, f'{model_name}-{self.start_time}.pt')
checkpoint = {
'loss': loss,
'metrics': metrics_dict,
'epoch': epoch,
'optimizer': optimizer.state_dict(),
'model': model.state_dict(),
}
torch.save(checkpoint, self.best_model_path)
self.print_logger.info(f'Saved model in {self.best_model_path}')
| EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/losses_audio/vggishish/logger.py |
import torch
import torch.nn as nn
class VGGishish(nn.Module):
def __init__(self, conv_layers, use_bn, num_classes):
'''
Mostly from
https://pytorch.org/vision/0.8/_modules/torchvision/models/vgg.html
'''
super().__init__()
layers = []
in_channels = 1
# a list of channels with 'MP' (maxpool) from config
for v in conv_layers:
if v == 'MP':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1, stride=1)
if use_bn:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
self.features = nn.Sequential(*layers)
self.avgpool = nn.AdaptiveAvgPool2d((5, 10))
self.flatten = nn.Flatten()
self.classifier = nn.Sequential(
nn.Linear(512 * 5 * 10, 4096),
nn.ReLU(True),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Linear(4096, num_classes)
)
# weight init
self.reset_parameters()
def forward(self, x):
# adding channel dim for conv2d (B, 1, F, T) <-
x = x.unsqueeze(1)
# backbone (B, 1, 5, 53) <- (B, 1, 80, 860)
x = self.features(x)
# adaptive avg pooling (B, 1, 5, 10) <- (B, 1, 5, 53) – if no MP is used as the end of VGG
x = self.avgpool(x)
# flatten
x = self.flatten(x)
# classify
x = self.classifier(x)
return x
def reset_parameters(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
if __name__ == '__main__':
num_classes = 309
inputs = torch.rand(3, 80, 848)
conv_layers = [64, 64, 'MP', 128, 128, 'MP', 256, 256, 256, 'MP', 512, 512, 512, 'MP', 512, 512, 512]
# conv_layers = [64, 'MP', 128, 'MP', 256, 256, 'MP', 512, 512, 'MP']
model = VGGishish(conv_layers, use_bn=False, num_classes=num_classes)
outputs = model(inputs)
print(outputs.shape)
| EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/losses_audio/vggishish/model.py |
import collections
import csv
import logging
import os
import random
from glob import glob
from pathlib import Path
import numpy as np
import torch
import torchvision
logger = logging.getLogger(f'main.{__name__}')
class VGGSound(torch.utils.data.Dataset):
def __init__(self, split, specs_dir, transforms=None, splits_path='./data', meta_path='./data/vggsound.csv'):
super().__init__()
self.split = split
self.specs_dir = specs_dir
self.transforms = transforms
self.splits_path = splits_path
self.meta_path = meta_path
vggsound_meta = list(csv.reader(open(meta_path), quotechar='"'))
unique_classes = sorted(list(set(row[2] for row in vggsound_meta)))
self.label2target = {label: target for target, label in enumerate(unique_classes)}
self.target2label = {target: label for label, target in self.label2target.items()}
self.video2target = {row[0]: self.label2target[row[2]] for row in vggsound_meta}
split_clip_ids_path = os.path.join(splits_path, f'vggsound_{split}.txt')
if not os.path.exists(split_clip_ids_path):
self.make_split_files()
clip_ids_with_timestamp = open(split_clip_ids_path).read().splitlines()
clip_paths = [os.path.join(specs_dir, v + '_mel.npy') for v in clip_ids_with_timestamp]
self.dataset = clip_paths
# self.dataset = clip_paths[:10000] # overfit one batch
# 'zyTX_1BXKDE_16000_26000'[:11] -> 'zyTX_1BXKDE'
vid_classes = [self.video2target[Path(path).stem[:11]] for path in self.dataset]
class2count = collections.Counter(vid_classes)
self.class_counts = torch.tensor([class2count[cls] for cls in range(len(class2count))])
# self.sample_weights = [len(self.dataset) / class2count[self.video2target[Path(path).stem[:11]]] for path in self.dataset]
def __getitem__(self, idx):
item = {}
spec_path = self.dataset[idx]
# 'zyTX_1BXKDE_16000_26000' -> 'zyTX_1BXKDE'
video_name = Path(spec_path).stem[:11]
item['input'] = np.load(spec_path)
item['input_path'] = spec_path
# if self.split in ['train', 'valid']:
item['target'] = self.video2target[video_name]
item['label'] = self.target2label[item['target']]
if self.transforms is not None:
item = self.transforms(item)
return item
def __len__(self):
return len(self.dataset)
def make_split_files(self):
random.seed(1337)
logger.info(f'The split files do not exist @ {self.splits_path}. Calculating the new ones.')
# The downloaded videos (some went missing on YouTube and no longer available)
available_vid_paths = sorted(glob(os.path.join(self.specs_dir, '*_mel.npy')))
logger.info(f'The number of clips available after download: {len(available_vid_paths)}')
# original (full) train and test sets
vggsound_meta = list(csv.reader(open(self.meta_path), quotechar='"'))
train_vids = {row[0] for row in vggsound_meta if row[3] == 'train'}
test_vids = {row[0] for row in vggsound_meta if row[3] == 'test'}
logger.info(f'The number of videos in vggsound train set: {len(train_vids)}')
logger.info(f'The number of videos in vggsound test set: {len(test_vids)}')
# class counts in test set. We would like to have the same distribution in valid
unique_classes = sorted(list(set(row[2] for row in vggsound_meta)))
label2target = {label: target for target, label in enumerate(unique_classes)}
video2target = {row[0]: label2target[row[2]] for row in vggsound_meta}
test_vid_classes = [video2target[vid] for vid in test_vids]
test_target2count = collections.Counter(test_vid_classes)
# now given the counts from test set, sample the same count for validation and the rest leave in train
train_vids_wo_valid, valid_vids = set(), set()
for target, label in enumerate(label2target.keys()):
class_train_vids = [vid for vid in train_vids if video2target[vid] == target]
random.shuffle(class_train_vids)
count = test_target2count[target]
valid_vids.update(class_train_vids[:count])
train_vids_wo_valid.update(class_train_vids[count:])
# make file with a list of available test videos (each video should contain timestamps as well)
train_i = valid_i = test_i = 0
with open(os.path.join(self.splits_path, 'vggsound_train.txt'), 'w') as train_file, \
open(os.path.join(self.splits_path, 'vggsound_valid.txt'), 'w') as valid_file, \
open(os.path.join(self.splits_path, 'vggsound_test.txt'), 'w') as test_file:
for path in available_vid_paths:
path = path.replace('_mel.npy', '')
vid_name = Path(path).name
# 'zyTX_1BXKDE_16000_26000'[:11] -> 'zyTX_1BXKDE'
if vid_name[:11] in train_vids_wo_valid:
train_file.write(vid_name + '\n')
train_i += 1
elif vid_name[:11] in valid_vids:
valid_file.write(vid_name + '\n')
valid_i += 1
elif vid_name[:11] in test_vids:
test_file.write(vid_name + '\n')
test_i += 1
else:
raise Exception(f'Clip {vid_name} is neither in train, valid nor test. Strange.')
logger.info(f'Put {train_i} clips to the train set and saved it to ./data/vggsound_train.txt')
logger.info(f'Put {valid_i} clips to the valid set and saved it to ./data/vggsound_valid.txt')
logger.info(f'Put {test_i} clips to the test set and saved it to ./data/vggsound_test.txt')
if __name__ == '__main__':
from transforms import Crop, StandardNormalizeAudio, ToTensor
specs_path = '/home/nvme/data/vggsound/features/melspec_10s_22050hz/'
transforms = torchvision.transforms.transforms.Compose([
StandardNormalizeAudio(specs_path),
ToTensor(),
Crop([80, 848]),
])
datasets = {
'train': VGGSound('train', specs_path, transforms),
'valid': VGGSound('valid', specs_path, transforms),
'test': VGGSound('test', specs_path, transforms),
}
print(datasets['train'][0])
print(datasets['valid'][0])
print(datasets['test'][0])
print(datasets['train'].class_counts)
print(datasets['valid'].class_counts)
print(datasets['test'].class_counts)
| EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/losses_audio/vggishish/dataset.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
class WeightedCrossEntropy(nn.CrossEntropyLoss):
def __init__(self, weights, **pytorch_ce_loss_args) -> None:
super().__init__(reduction='none', **pytorch_ce_loss_args)
self.weights = weights
def __call__(self, outputs, targets, to_weight=True):
loss = super().__call__(outputs, targets)
if to_weight:
return (loss * self.weights[targets]).sum() / self.weights[targets].sum()
else:
return loss.mean()
if __name__ == '__main__':
x = torch.randn(10, 5)
target = torch.randint(0, 5, (10,))
weights = torch.tensor([1., 2., 3., 4., 5.])
# criterion_weighted = nn.CrossEntropyLoss(weight=weights)
# loss_weighted = criterion_weighted(x, target)
# criterion_weighted_manual = nn.CrossEntropyLoss(reduction='none')
# loss_weighted_manual = criterion_weighted_manual(x, target)
# print(loss_weighted, loss_weighted_manual.mean())
# loss_weighted_manual = (loss_weighted_manual * weights[target]).sum() / weights[target].sum()
# print(loss_weighted, loss_weighted_manual)
# print(torch.allclose(loss_weighted, loss_weighted_manual))
pytorch_weighted = nn.CrossEntropyLoss(weight=weights)
pytorch_unweighted = nn.CrossEntropyLoss()
custom = WeightedCrossEntropy(weights)
assert torch.allclose(pytorch_weighted(x, target), custom(x, target, to_weight=True))
assert torch.allclose(pytorch_unweighted(x, target), custom(x, target, to_weight=False))
print(custom(x, target, to_weight=True), custom(x, target, to_weight=False))
| EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/losses_audio/vggishish/loss.py |
from loss import WeightedCrossEntropy
import random
import numpy as np
import torch
import torchvision
from omegaconf import OmegaConf
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
from dataset import VGGSound
from transforms import Crop, StandardNormalizeAudio, ToTensor
from logger import LoggerWithTBoard
from metrics import metrics
from model import VGGishish
if __name__ == "__main__":
cfg_cli = OmegaConf.from_cli()
cfg_yml = OmegaConf.load(cfg_cli.config)
# the latter arguments are prioritized
cfg = OmegaConf.merge(cfg_yml, cfg_cli)
OmegaConf.set_readonly(cfg, True)
print(OmegaConf.to_yaml(cfg))
logger = LoggerWithTBoard(cfg)
random.seed(cfg.seed)
np.random.seed(cfg.seed)
torch.manual_seed(cfg.seed)
torch.cuda.manual_seed_all(cfg.seed)
# makes iterations faster (in this case 30%) if your inputs are of a fixed size
# https://discuss.pytorch.org/t/what-does-torch-backends-cudnn-benchmark-do/5936/3
torch.backends.cudnn.benchmark = True
transforms = [
StandardNormalizeAudio(cfg.mels_path),
]
if cfg.cropped_size not in [None, 'None', 'none']:
logger.print_logger.info(f'Using cropping {cfg.cropped_size}')
transforms.append(Crop(cfg.cropped_size))
transforms.append(ToTensor())
transforms = torchvision.transforms.transforms.Compose(transforms)
datasets = {
'train': VGGSound('train', cfg.mels_path, transforms),
'valid': VGGSound('valid', cfg.mels_path, transforms),
'test': VGGSound('test', cfg.mels_path, transforms),
}
loaders = {
'train': DataLoader(datasets['train'], batch_size=cfg.batch_size, shuffle=True, drop_last=True,
num_workers=cfg.num_workers, pin_memory=True),
'valid': DataLoader(datasets['valid'], batch_size=cfg.batch_size,
num_workers=cfg.num_workers, pin_memory=True),
'test': DataLoader(datasets['test'], batch_size=cfg.batch_size,
num_workers=cfg.num_workers, pin_memory=True),
}
device = torch.device(cfg.device if torch.cuda.is_available() else 'cpu')
model = VGGishish(cfg.conv_layers, cfg.use_bn, num_classes=len(datasets['train'].target2label))
model = model.to(device)
param_num = logger.log_param_num(model)
if cfg.optimizer == 'adam':
optimizer = torch.optim.Adam(
model.parameters(), lr=cfg.learning_rate, betas=cfg.betas, weight_decay=cfg.weight_decay)
elif cfg.optimizer == 'sgd':
optimizer = torch.optim.SGD(
model.parameters(), lr=cfg.learning_rate, momentum=cfg.momentum, weight_decay=cfg.weight_decay)
else:
raise NotImplementedError
if cfg.cls_weights_in_loss:
weights = 1 / datasets['train'].class_counts
else:
weights = torch.ones(len(datasets['train'].target2label))
criterion = WeightedCrossEntropy(weights.to(device))
# loop over the train and validation multiple times (typical PT boilerplate)
no_change_epochs = 0
best_valid_loss = float('inf')
early_stop_triggered = False
for epoch in range(cfg.num_epochs):
for phase in ['train', 'valid']:
if phase == 'train':
model.train()
else:
model.eval()
running_loss = 0
preds_from_each_batch = []
targets_from_each_batch = []
prog_bar = tqdm(loaders[phase], f'{phase} ({epoch})', ncols=0)
for i, batch in enumerate(prog_bar):
inputs = batch['input'].to(device)
targets = batch['target'].to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
loss = criterion(outputs, targets, to_weight=phase == 'train')
if phase == 'train':
loss.backward()
optimizer.step()
# loss
running_loss += loss.item()
# for metrics calculation later on
preds_from_each_batch += [outputs.detach().cpu()]
targets_from_each_batch += [targets.cpu()]
# iter logging
if i % 50 == 0:
logger.log_iter_loss(loss.item(), epoch*len(loaders[phase])+i, phase)
# tracks loss in the tqdm progress bar
prog_bar.set_postfix(loss=loss.item())
# logging loss
epoch_loss = running_loss / len(loaders[phase])
logger.log_epoch_loss(epoch_loss, epoch, phase)
# logging metrics
preds_from_each_batch = torch.cat(preds_from_each_batch)
targets_from_each_batch = torch.cat(targets_from_each_batch)
metrics_dict = metrics(targets_from_each_batch, preds_from_each_batch)
logger.log_epoch_metrics(metrics_dict, epoch, phase)
# Early stopping
if phase == 'valid':
if epoch_loss < best_valid_loss:
no_change_epochs = 0
best_valid_loss = epoch_loss
logger.log_best_model(model, epoch_loss, epoch, optimizer, metrics_dict)
else:
no_change_epochs += 1
logger.print_logger.info(
f'Valid loss hasnt changed for {no_change_epochs} patience: {cfg.patience}'
)
if no_change_epochs >= cfg.patience:
early_stop_triggered = True
if early_stop_triggered:
logger.print_logger.info(f'Training is early stopped @ {epoch}')
break
logger.print_logger.info('Finished Training')
# loading the best model
ckpt = torch.load(logger.best_model_path)
model.load_state_dict(ckpt['model'])
logger.print_logger.info(f'Loading the best model from {logger.best_model_path}')
logger.print_logger.info((f'The model was trained for {ckpt["epoch"]} epochs. Loss: {ckpt["loss"]:.4f}'))
# Testing the model
model.eval()
running_loss = 0
preds_from_each_batch = []
targets_from_each_batch = []
for i, batch in enumerate(loaders['test']):
inputs = batch['input'].to(device)
targets = batch['target'].to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
with torch.set_grad_enabled(False):
outputs = model(inputs)
loss = criterion(outputs, targets, to_weight=False)
# loss
running_loss += loss.item()
# for metrics calculation later on
preds_from_each_batch += [outputs.detach().cpu()]
targets_from_each_batch += [targets.cpu()]
# logging metrics
preds_from_each_batch = torch.cat(preds_from_each_batch)
targets_from_each_batch = torch.cat(targets_from_each_batch)
test_metrics_dict = metrics(targets_from_each_batch, preds_from_each_batch)
test_metrics_dict['avg_loss'] = running_loss / len(loaders['test'])
test_metrics_dict['param_num'] = param_num
# TODO: I have no idea why tboard doesn't keep metrics (hparams) when
# I run this experiment from cli: `python train_vggishish.py config=./configs/vggish.yaml`
# while when I run it in vscode debugger the metrics are logger (wtf)
logger.log_test_metrics(test_metrics_dict, dict(cfg), ckpt['epoch'])
logger.print_logger.info('Finished the experiment')
| EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/losses_audio/vggishish/train_vggishish.py |
import random
import numpy as np
import torch
import torchvision
from omegaconf import OmegaConf
from torch.utils.data.dataloader import DataLoader
from torchvision.models.inception import BasicConv2d, Inception3
from tqdm import tqdm
from dataset import VGGSound
from logger import LoggerWithTBoard
from loss import WeightedCrossEntropy
from metrics import metrics
from transforms import Crop, StandardNormalizeAudio, ToTensor
# TODO: refactor ./evaluation/feature_extractors/melception.py to handle this class as well.
# So far couldn't do it because of the difference in outputs
class Melception(Inception3):
def __init__(self, num_classes, **kwargs):
# inception = Melception(num_classes=309)
super().__init__(num_classes=num_classes, **kwargs)
# the same as https://github.com/pytorch/vision/blob/5339e63148/torchvision/models/inception.py#L95
# but for 1-channel input instead of RGB.
self.Conv2d_1a_3x3 = BasicConv2d(1, 32, kernel_size=3, stride=2)
# also the 'hight' of the mel spec is 80 (vs 299 in RGB) we remove all max pool from Inception
self.maxpool1 = torch.nn.Identity()
self.maxpool2 = torch.nn.Identity()
def forward(self, x):
x = x.unsqueeze(1)
return super().forward(x)
def train_inception_scorer(cfg):
logger = LoggerWithTBoard(cfg)
random.seed(cfg.seed)
np.random.seed(cfg.seed)
torch.manual_seed(cfg.seed)
torch.cuda.manual_seed_all(cfg.seed)
# makes iterations faster (in this case 30%) if your inputs are of a fixed size
# https://discuss.pytorch.org/t/what-does-torch-backends-cudnn-benchmark-do/5936/3
torch.backends.cudnn.benchmark = True
meta_path = './data/vggsound.csv'
train_ids_path = './data/vggsound_train.txt'
cache_path = './data/'
splits_path = cache_path
transforms = [
StandardNormalizeAudio(cfg.mels_path, train_ids_path, cache_path),
]
if cfg.cropped_size not in [None, 'None', 'none']:
logger.print_logger.info(f'Using cropping {cfg.cropped_size}')
transforms.append(Crop(cfg.cropped_size))
transforms.append(ToTensor())
transforms = torchvision.transforms.transforms.Compose(transforms)
datasets = {
'train': VGGSound('train', cfg.mels_path, transforms, splits_path, meta_path),
'valid': VGGSound('valid', cfg.mels_path, transforms, splits_path, meta_path),
'test': VGGSound('test', cfg.mels_path, transforms, splits_path, meta_path),
}
loaders = {
'train': DataLoader(datasets['train'], batch_size=cfg.batch_size, shuffle=True, drop_last=True,
num_workers=cfg.num_workers, pin_memory=True),
'valid': DataLoader(datasets['valid'], batch_size=cfg.batch_size,
num_workers=cfg.num_workers, pin_memory=True),
'test': DataLoader(datasets['test'], batch_size=cfg.batch_size,
num_workers=cfg.num_workers, pin_memory=True),
}
device = torch.device(cfg.device if torch.cuda.is_available() else 'cpu')
model = Melception(num_classes=len(datasets['train'].target2label))
model = model.to(device)
param_num = logger.log_param_num(model)
if cfg.optimizer == 'adam':
optimizer = torch.optim.Adam(
model.parameters(), lr=cfg.learning_rate, betas=cfg.betas, weight_decay=cfg.weight_decay)
elif cfg.optimizer == 'sgd':
optimizer = torch.optim.SGD(
model.parameters(), lr=cfg.learning_rate, momentum=cfg.momentum, weight_decay=cfg.weight_decay)
else:
raise NotImplementedError
if cfg.cls_weights_in_loss:
weights = 1 / datasets['train'].class_counts
else:
weights = torch.ones(len(datasets['train'].target2label))
criterion = WeightedCrossEntropy(weights.to(device))
# loop over the train and validation multiple times (typical PT boilerplate)
no_change_epochs = 0
best_valid_loss = float('inf')
early_stop_triggered = False
for epoch in range(cfg.num_epochs):
for phase in ['train', 'valid']:
if phase == 'train':
model.train()
else:
model.eval()
running_loss = 0
preds_from_each_batch = []
targets_from_each_batch = []
prog_bar = tqdm(loaders[phase], f'{phase} ({epoch})', ncols=0)
for i, batch in enumerate(prog_bar):
inputs = batch['input'].to(device)
targets = batch['target'].to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
with torch.set_grad_enabled(phase == 'train'):
# inception v3
if phase == 'train':
outputs, aux_outputs = model(inputs)
loss1 = criterion(outputs, targets)
loss2 = criterion(aux_outputs, targets)
loss = loss1 + 0.4*loss2
loss = criterion(outputs, targets, to_weight=True)
else:
outputs = model(inputs)
loss = criterion(outputs, targets, to_weight=False)
if phase == 'train':
loss.backward()
optimizer.step()
# loss
running_loss += loss.item()
# for metrics calculation later on
preds_from_each_batch += [outputs.detach().cpu()]
targets_from_each_batch += [targets.cpu()]
# iter logging
if i % 50 == 0:
logger.log_iter_loss(loss.item(), epoch*len(loaders[phase])+i, phase)
# tracks loss in the tqdm progress bar
prog_bar.set_postfix(loss=loss.item())
# logging loss
epoch_loss = running_loss / len(loaders[phase])
logger.log_epoch_loss(epoch_loss, epoch, phase)
# logging metrics
preds_from_each_batch = torch.cat(preds_from_each_batch)
targets_from_each_batch = torch.cat(targets_from_each_batch)
metrics_dict = metrics(targets_from_each_batch, preds_from_each_batch)
logger.log_epoch_metrics(metrics_dict, epoch, phase)
# Early stopping
if phase == 'valid':
if epoch_loss < best_valid_loss:
no_change_epochs = 0
best_valid_loss = epoch_loss
logger.log_best_model(model, epoch_loss, epoch, optimizer, metrics_dict)
else:
no_change_epochs += 1
logger.print_logger.info(
f'Valid loss hasnt changed for {no_change_epochs} patience: {cfg.patience}'
)
if no_change_epochs >= cfg.patience:
early_stop_triggered = True
if early_stop_triggered:
logger.print_logger.info(f'Training is early stopped @ {epoch}')
break
logger.print_logger.info('Finished Training')
# loading the best model
ckpt = torch.load(logger.best_model_path)
model.load_state_dict(ckpt['model'])
logger.print_logger.info(f'Loading the best model from {logger.best_model_path}')
logger.print_logger.info((f'The model was trained for {ckpt["epoch"]} epochs. Loss: {ckpt["loss"]:.4f}'))
# Testing the model
model.eval()
running_loss = 0
preds_from_each_batch = []
targets_from_each_batch = []
for i, batch in enumerate(loaders['test']):
inputs = batch['input'].to(device)
targets = batch['target'].to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
with torch.set_grad_enabled(False):
outputs = model(inputs)
loss = criterion(outputs, targets, to_weight=False)
# loss
running_loss += loss.item()
# for metrics calculation later on
preds_from_each_batch += [outputs.detach().cpu()]
targets_from_each_batch += [targets.cpu()]
# logging metrics
preds_from_each_batch = torch.cat(preds_from_each_batch)
targets_from_each_batch = torch.cat(targets_from_each_batch)
test_metrics_dict = metrics(targets_from_each_batch, preds_from_each_batch)
test_metrics_dict['avg_loss'] = running_loss / len(loaders['test'])
test_metrics_dict['param_num'] = param_num
# TODO: I have no idea why tboard doesn't keep metrics (hparams) when
# I run this experiment from cli: `python train_melception.py config=./configs/vggish.yaml`
# while when I run it in vscode debugger the metrics are logger (wtf)
logger.log_test_metrics(test_metrics_dict, dict(cfg), ckpt['epoch'])
logger.print_logger.info('Finished the experiment')
if __name__ == '__main__':
# input = torch.rand(16, 1, 80, 848)
# output, aux = inception(input)
# print(output.shape, aux.shape)
# Expected input size: (3, 299, 299) in RGB -> (1, 80, 848) in Mel Spec
# train_inception_scorer()
cfg_cli = OmegaConf.from_cli()
cfg_yml = OmegaConf.load(cfg_cli.config)
# the latter arguments are prioritized
cfg = OmegaConf.merge(cfg_yml, cfg_cli)
OmegaConf.set_readonly(cfg, True)
print(OmegaConf.to_yaml(cfg))
train_inception_scorer(cfg)
| EXA-1-master | exa/models/AudioGPT/text_to_audio/MakeAnAudio/ldm/modules/losses_audio/vggishish/train_melception.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.