python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from interactivity import INTERACTIVE, try_magic, try_cd
try_cd('~/dev/drawmodel/nkcodraw')
#%%
assert __name__ == "__main__", "Training script should not be imported!"
#%%
import numpy as np
from pathlib import Path
import torch
import torch.cuda
import torch.nn as nn
import torch.nn.functional as F
from nkfb_util import logsumexp, cuda_if_available
import codraw_data
from codraw_data import AbstractScene, Clipart
import abs_render
from abs_metric import scene_similarity, clipart_similarity
from episode import Episode, Transcriber, respond_to
import model
from model import make_fns, eval_fns
from model import Model
from baseline2_models import load_baseline2
from datagen import SceneToSeqData
from baseline3_models import SceneToSeqTeller
# %%
# scenes_and_scripts_dev = codraw_data.get_scenes_and_scripts('dev')
# transcribe = Transcriber(
# 'baseline3_train.py' if INTERACTIVE else __file__,
# scenes_and_scripts=scenes_and_scripts_dev[::110],
# scenes_description="scenes_and_scripts_dev[::110]")
# %%
models_baseline2 = load_baseline2()
# %%
drawer_lstmaddonly_a = models_baseline2['drawer_lstmaddonly_a']
drawer_lstmaddonly_b = models_baseline2['drawer_lstmaddonly_b']
# %%
data_scene2seq_a = SceneToSeqData('a')
data_scene2seq_b = SceneToSeqData('b')
# %%
def train_teller(split, teller_pair, num_epochs=50, limit=100):
splits_pair = split + 'a', split + 'b'
if split == 'a':
teller = teller_pair[0]
elif split == 'b':
teller = teller_pair[1]
else:
assert False
optimizer = torch.optim.Adam(teller.parameters())
print('perplexity-dev', model.calc_perplexity(teller))
print('perplexity-a', model.calc_perplexity(teller, 'a'))
print('avg-loss-dev', teller.calc_split_loss())
print('avg-loss-a', teller.calc_split_loss('a'))
for epoch in range(num_epochs):
teller.train()
for num, ex in enumerate(teller.datagen.get_examples_batch()):
optimizer.zero_grad()
loss = teller(ex)
loss.backward()
optimizer.step()
print(f'Done epoch {epoch} loss {float(loss)}')
if epoch % 5 == 0:
del ex, loss # clean up memory
print('perplexity-dev', model.calc_perplexity(teller))
print('perplexity-a', model.calc_perplexity(teller, 'a'))
print('avg-loss-dev', teller.calc_split_loss())
print('avg-loss-a', teller.calc_split_loss('a'))
for splits in splits_pair:
sims = eval_fns(make_fns(splits, teller_pair, (drawer_lstmaddonly_a, drawer_lstmaddonly_b)), limit=limit)
print(splits, sims.mean())
# %%
teller_scene2seq_a = SceneToSeqTeller(data_scene2seq_a, prediction_loss_scale=0)
teller_scene2seq_b = SceneToSeqTeller(data_scene2seq_b, prediction_loss_scale=0)
train_teller('a', (teller_scene2seq_a, teller_scene2seq_b))
train_teller('b', (teller_scene2seq_a, teller_scene2seq_b))
# %% scene2seq with intermediate supervision for all clipart ids
teller_scene2seq_aux_a = SceneToSeqTeller(data_scene2seq_a)
teller_scene2seq_aux_b = SceneToSeqTeller(data_scene2seq_b)
train_teller('a', (teller_scene2seq_aux_a, teller_scene2seq_aux_b))
train_teller('b', (teller_scene2seq_aux_a, teller_scene2seq_aux_b))
# %% scene2seq with intermediate supervision only for present cliparts
teller_scene2seq_aux2_a = SceneToSeqTeller(data_scene2seq_a, predict_for_full_library=False, prediction_loss_scale=6.)
teller_scene2seq_aux2_b = SceneToSeqTeller(data_scene2seq_b, predict_for_full_library=False, prediction_loss_scale=6.)
train_teller('a', (teller_scene2seq_aux2_a, teller_scene2seq_aux2_b), num_epochs=40)
train_teller('b', (teller_scene2seq_aux2_a, teller_scene2seq_aux2_b), num_epochs=40)
# %%
scene2seq_specs = dict(
teller_scene2seq_a = teller_scene2seq_a.spec,
teller_scene2seq_b = teller_scene2seq_b.spec,
teller_scene2seq_aux_a = teller_scene2seq_aux_a.spec,
teller_scene2seq_aux_b = teller_scene2seq_aux_b.spec,
teller_scene2seq_aux2_a = teller_scene2seq_aux2_a.spec,
teller_scene2seq_aux2_b = teller_scene2seq_aux2_b.spec,
)
# %%
print()
print()
print("Saving models")
torch.save(scene2seq_specs, Path('models/scene2seq.pt'))
# %%
print()
print("Final evaluation on full dev set (scene2seq)")
for splits in ('aa', 'ab', 'ba', 'bb'):
sims = eval_fns(make_fns(splits, (teller_scene2seq_a, teller_scene2seq_b), (drawer_lstmaddonly_a, drawer_lstmaddonly_b)), limit=None)
print(splits, sims.mean())
print("Final evaluation on full dev set (scene2seq_aux)")
for splits in ('aa', 'ab', 'ba', 'bb'):
sims = eval_fns(make_fns(splits, (teller_scene2seq_aux_a, teller_scene2seq_aux_b), (drawer_lstmaddonly_a, drawer_lstmaddonly_b)), limit=None)
print(splits, sims.mean())
print("Final evaluation on full dev set (scene2seq_aux2)")
for splits in ('aa', 'ab', 'ba', 'bb'):
sims = eval_fns(make_fns(splits, (teller_scene2seq_aux2_a, teller_scene2seq_aux2_b), (drawer_lstmaddonly_a, drawer_lstmaddonly_b)), limit=None)
print(splits, sims.mean())
| codraw-models-master | baseline3_train.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from interactivity import INTERACTIVE, try_magic, try_cd
try_cd('~/dev/drawmodel/nkcodraw')
#%%
import numpy as np
from pathlib import Path
import editdistance
import torch
import torch.cuda
import torch.nn as nn
import torch.nn.functional as F
from nkfb_util import logsumexp, cuda_if_available
import codraw_data
from codraw_data import AbstractScene, Clipart
import abs_render
from abs_metric import scene_similarity, clipart_similarity
from episode import Episode, Transcriber, respond_to, response_partial
import model
from model import make_fns, eval_fns
from saved_models import load_models, make_pairs
# %%
def print_human(limit=None, split='dev'):
human_sims = np.array([
scene_similarity(human_scene, true_scene)
for true_scene, human_scene in codraw_data.get_truth_and_human_scenes('test')[:limit]
])
print(f"Human scene similarity [{split}]: mean={human_sims.mean():.2f} std={human_sims.std():.2f} median={np.median(human_sims):.2f}")
# %%
def print_pairwise(tellers, drawers, teller_splits='ab', drawer_splits='ab', limit=None, split='dev'):
print(f"Teller \t Drawer \t Scene similarity [{split}]")
for splits_group in [('ab', 'ba'), ('aa', 'bb')]:
for teller_name, teller_pair in tellers:
for drawer_name, drawer_pair in drawers:
for splits in splits_group:
if splits[0] not in teller_splits or splits[1] not in drawer_splits:
continue
sims = eval_fns(make_fns(splits, teller_pair, drawer_pair), limit=limit, split=split)
teller_caption = f"{teller_name}_{splits[0]}"
drawer_caption = f"{drawer_name}_{splits[1]}"
print(f"{teller_caption:17s}\t {drawer_caption:17s}\t {sims.mean():.2f}")
print()
# %%
def print_script(drawers, drawer_splits='ab', limit=None, split='dev'):
print("Drawer evaluations against script")
print(f"Drawer \t Scene similarity [{split}]")
for drawer_name, drawer_pair in drawers:
for drawer_split in drawer_splits:
sims = eval_fns(make_fns(drawer_split, model.scripted_tell, drawer_pair), limit=limit, split=split)
drawer_caption = f"{drawer_name}_{drawer_split}"
print(f"{drawer_caption:17s}\t {sims.mean():.2f}")
# %%
component_evaluator = model.ComponentEvaluator.get()
# %%
def print_components_pairwise(tellers, drawers, teller_splits='ab', drawer_splits='ab', limit=None, split='dev'):
print(f"Component evaluations [{split}]")
print("Teller \t Drawer \t Dir \t Expr(human)\t Pose(human)\t Depth \t xy (sq.)\t x-only \t y-only")
for splits_group in [('ab', 'ba'), ('aa', 'bb')]:
for teller_name, teller_pair in tellers:
for drawer_name, drawer_pair in drawers:
for splits in splits_group:
if splits[0] not in teller_splits or splits[1] not in drawer_splits:
continue
components = component_evaluator.eval_fns(make_fns(splits, teller_pair, drawer_pair), limit=limit, split=split)
teller_caption = f"{teller_name}_{splits[0]}"
drawer_caption = f"{drawer_name}_{splits[1]}"
print(f"{teller_caption:17s}\t {drawer_caption:17s}\t", "\t".join(f"{num: .6f}" for num in components))
print()
def print_components_script(drawers, drawer_splits='ab', limit=None, split='dev'):
print(f"Drawer evaluations against script [{split}]")
print("Drawer \t Dir \t Expr(human)\t Pose(human)\t Depth \t xy (sq.)\t x-only \t y-only")
for drawer_name, drawer_pair in drawers:
for drawer_split in drawer_splits:
components = component_evaluator.eval_fns(make_fns(drawer_split, model.scripted_tell, drawer_pair), limit=limit, split=split)
drawer_caption = f"{drawer_name}_{drawer_split}"
print(f"{drawer_caption:17s}\t", "\t".join(f"{num: .6f}" for num in components))
# %%
def print_eval(
tellers=None, drawers=None,
teller_splits='ab', drawer_splits='ab',
limit=None,
split='dev',
do_all=False,
do_human=False,
do_pairwise=False,
do_script=False,
do_components_pairwise=False,
do_components_script=False,
):
if do_all:
do_human = True
do_pairwise = True
do_script = True
do_components_pairwise = True
do_components_script = True
print()
if do_human:
print_human(limit=limit, split=split)
print()
print()
if do_pairwise:
print_pairwise(tellers, drawers, teller_splits=teller_splits, drawer_splits=drawer_splits, limit=limit, split=split)
print()
print()
if do_script:
print_script(drawers, drawer_splits=drawer_splits, limit=limit, split=split)
print()
print()
if do_components_pairwise:
print_components_pairwise(tellers, drawers, teller_splits=teller_splits, drawer_splits=drawer_splits, limit=limit, split=split)
print()
print()
if do_components_script:
print_components_script(drawers, drawer_splits=drawer_splits, limit=limit, split=split)
print()
print()
# %%
if __name__ == '__main__':
models = load_models()
# %%
if __name__ == '__main__':
tellers = make_pairs(models,
'teller_nn',
# 'teller_pragmaticnn',
'teller_scene2seq',
'teller_scene2seq_aux2',
'teller_rl',
)
drawers_for_script = make_pairs(models,
'drawer_nn',
# 'drawer_bowcanvas2bce',
'drawer_lstmaddonly',
)
drawers_for_pairwise = make_pairs(models,
'drawer_lstmaddonly',
)
limit=None
split='test'
print_eval(limit=limit, split=split, do_human=True)
print_eval(tellers, drawers_for_pairwise, teller_splits='a', drawer_splits='b', limit=limit, split=split, do_pairwise=True)
print_eval(tellers, drawers_for_script, teller_splits='a', drawer_splits='b', limit=limit, split=split, do_script=True)
# %%
# %%
# %%
# %%
# %%
# %%
| codraw-models-master | eval_automatic.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from interactivity import INTERACTIVE, try_magic, try_cd
try_cd('~/dev/drawmodel/nkcodraw')
#%%
import numpy as np
from pathlib import Path
import editdistance
import torch
import torch.cuda
import torch.nn as nn
import torch.nn.functional as F
from nkfb_util import logsumexp, cuda_if_available
import codraw_data
from codraw_data import AbstractScene, Clipart
import abs_render
from abs_metric import scene_similarity, clipart_similarity
from episode import Episode, Transcriber, respond_to, response_partial
from baseline1_models import load_baseline1
from baseline2_models import load_baseline2
from baseline3_models import load_baseline3
import model
from model import make_fns, eval_fns
# %%
compontent_evaluator = model.ComponentEvaluator.get()
# %%
models_baseline1 = load_baseline1()
models_baseline2 = load_baseline2()
models_baseline3 = load_baseline3()
# %%
tellers = [
# ('teller_nn', (models_baseline1['teller_nn_a'], models_baseline1['teller_nn_b'])),
# ('teller_c2seq', (models_baseline1['teller_c2seq_a'], models_baseline1['teller_c2seq_b'])),
# ('teller_pragmaticnn', (models_baseline2['teller_pragmaticnn_a'], models_baseline2['teller_pragmaticnn_b'])),
('teller_scene2seq', (models_baseline3['teller_scene2seq_a'], models_baseline3['teller_scene2seq_b'])),
('teller_scene2seq_aux', (models_baseline3['teller_scene2seq_aux_a'], models_baseline3['teller_scene2seq_aux_b'])),
('teller_scene2seq_aux2', (models_baseline3['teller_scene2seq_aux2_a'], models_baseline3['teller_scene2seq_aux2_b'])),
]
drawers = [
# ('drawer_nn', (models_baseline1['drawer_nn_a'], models_baseline1['drawer_nn_b'])),
# ('drawer_sim', (models_baseline1['drawer_sim_a'], models_baseline1['drawer_sim_b'])),
# ('drawer_bow2c', (models_baseline1['drawer_bow2c_a'], models_baseline1['drawer_bow2c_b'])),
# ('drawer_bow2bce', (models_baseline1['drawer_bow2bce_a'], models_baseline1['drawer_bow2bce_b'])),
# ('drawer_bowcanvas2bce', (models_baseline1['drawer_bowcanvas2bce_a'], models_baseline1['drawer_bowcanvas2bce_b'])),
('drawer_lstmaddonly', (models_baseline2['drawer_lstmaddonly_a'], models_baseline2['drawer_lstmaddonly_b'])),
]
# %%
print()
human_sims = np.array([
scene_similarity(human_scene, true_scene)
for true_scene, human_scene in codraw_data.get_truth_and_human_scenes('dev')
])
print(f"Human scene similarity: mean={human_sims.mean():.6f} std={human_sims.std():.6f} median={np.median(human_sims):.6f}")
# %%
print()
print()
# %%
limit = None
print("Teller \t Drawer \t Scene similarity")
for splits_group in [('ab', 'ba'), ('aa', 'bb')]:
for teller_name, teller_pair in tellers:
for drawer_name, drawer_pair in drawers:
for splits in splits_group:
sims = eval_fns(make_fns(splits, teller_pair, drawer_pair), limit=limit)
teller_caption = f"{teller_name}_{splits[0]}"
drawer_caption = f"{drawer_name}_{splits[1]}"
print(f"{teller_caption:17s}\t {drawer_caption:17s}\t", sims.mean())
print()
# %%
print()
print()
# %%
limit = None
print("Drawer evaluations against script")
print("Drawer \t Scene similarity")
for drawer_name, drawer_pair in drawers:
for split in ('a', 'b'):
sims = eval_fns(make_fns(split, model.scripted_tell, drawer_pair), limit=limit)
drawer_caption = f"{drawer_name}_{split}"
print(f"{drawer_caption:17s}\t", sims.mean())
# %%
print()
print()
# %%
limit = None
print("Teller \t Drawer \t Dir \t Expr(human)\t Pose(human)\t Depth \t xy (sq.)\t x-only \t y-only")
for splits_group in [('ab', 'ba'), ('aa', 'bb')]:
for teller_name, teller_pair in tellers:
for drawer_name, drawer_pair in drawers:
for splits in splits_group:
components = compontent_evaluator.eval_fns(make_fns(splits, teller_pair, drawer_pair), limit=limit)
teller_caption = f"{teller_name}_{splits[0]}"
drawer_caption = f"{drawer_name}_{splits[1]}"
print(f"{teller_caption:17s}\t {drawer_caption:17s}\t", "\t".join(f"{num: .6f}" for num in components))
print()
# %%
print()
print()
# %%
limit = None
print("Drawer evaluations against script")
print("Drawer \t Dir \t Expr(human)\t Pose(human)\t Depth \t xy (sq.)\t x-only \t y-only")
for drawer_name, drawer_pair in drawers:
for split in ('a', 'b'):
components = compontent_evaluator.eval_fns(make_fns(split, model.scripted_tell, drawer_pair), limit=limit)
drawer_caption = f"{drawer_name}_{split}"
print(f"{drawer_caption:17s}\t", "\t".join(f"{num: .6f}" for num in components))
| codraw-models-master | baseline3_eval.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Abstract Scene (abs) utilities copied from the original CoDraw codebase
"""
import math
import torch
from torch.autograd import Variable
import math
class AbsUtil:
"""AbsUtil ported from AbsUtil.js"""
# Various variables setting up the appearence of the interface
CANVAS_WIDTH = 500
CANVAS_HEIGHT = 400
NOT_USED = -10000
numClipArts = 58
numTypes = 8
numProps = 6
numClasses = [58,35,3,2,1,1]
Null = 0
def __init__(self, str):
# Each object type has its own prefix, the ordering of the object types affects the
# order in which they are rendered. That is the "t" type (toys) will be rendered on top
# of the "hb0" (boy) category assuming they have the same depth.
self.prefix = ['s','p','hb0','hb1','a','c','e','t']
# Total number of clipart for each type
self.typeTotalCt = [8,10,35,35,6,10,7,15]
# Total number of clipart to be randomly selected for each type
# The sum should equal numClipart
self.typeCt = [3,4,5,5,2,3,2,4]
self.str = str
self.obj = self.preprocess(str)
# Preprocess given CSV into 7Val format, which is
# 1. clipartIdx integer [0-57]
# ~~2. clipartType integer [0-7]~~
# 3. clipartSubType integer [0-34]
# 4. depth integer [0-2]
# 5. flip integer [0-1]
# 6. x-position float [1-500]
# 7. y-position float [1-400]
def preprocess(self, str, verbose=False):
idx = 1;
val = [];
if not str or len(str) < 1:
return None
results = str.split(',')
numClipArts = int(results[0])
for i in range(numClipArts):
v = list()
idx = idx + 1 # png filename
idx = idx + 1 # clip art local index
_clipArtObjectIdx = int(results[idx]); idx = idx + 1
_clipArtTypeIdx = int(results[idx]); idx = idx + 1
# This code was originally used to read the dataset from Python
_clipArtX = int(round(float(results[idx]))); idx = idx + 1
_clipArtY = int(round(float(results[idx]))); idx = idx + 1
# The javascript code, however, used parseInt instead. This has
# slightly different rounding behavior, which can be recreated by
# using the following Python code instead:
# _clipArtX = float(results[idx]); idx = idx + 1
# _clipArtY = float(results[idx]); idx = idx + 1
# _clipArtX = int(math.floor(_clipArtX)) if _clipArtX >= 0 else -int(math.floor(-_clipArtX))
# _clipArtY = int(math.floor(_clipArtY)) if _clipArtY >= 0 else -int(math.floor(-_clipArtY))
_clipArtZ = int(results[idx]); idx = idx + 1
_clipArtFlip = int(results[idx]); idx = idx + 1
if not verbose and (_clipArtX==AbsUtil.NOT_USED or _clipArtY==AbsUtil.NOT_USED):
continue
v.append(self.getClipArtIdx(_clipArtObjectIdx, _clipArtTypeIdx))
# v.append(_clipArtTypeIdx); # remove this redundant feature
v.append(_clipArtObjectIdx if (_clipArtTypeIdx==2 or _clipArtTypeIdx==3) else 0)
v.append(_clipArtZ)
v.append(_clipArtFlip)
v.append(_clipArtX)
v.append(_clipArtY)
val.append(v)
return val
def asTensor(self):
if None==self.obj:
return None
# notice that position (x & y) is rounded as LongTensor
t = torch.LongTensor(AbsUtil.numClipArts, 6).fill_(AbsUtil.Null)
# clipartIdx & clipartSubType are starting with 1
t[:,:2].add_(-1)
for v in self.obj:
clipartIdx = v[0]
t[clipartIdx].copy_(torch.LongTensor(v))
t[:,:2].add_(1)
return t
def __repr__(self):
return self.obj.__repr__()
def getClipArtIdx(self, clipArtObjectIdx, clipArtTypeIdx):
typeTotalPos = [0,8,18,19,20,26,36,43]
offset = 0 if (clipArtTypeIdx==2 or clipArtTypeIdx==3) else clipArtObjectIdx
return typeTotalPos[clipArtTypeIdx] + offset
# Static methods #############################################################
# Sample clipart from idx(abs_d - abs_b)>0
# @param abs_b Tensor(bx58x6)
# @param abs_d Tensor(bx58x6)
# @output Tensor(bx6)
# @output Tensor(bx58)
@staticmethod
def sample_abs_c(abs_b, abs_d):
# using Tensors directly
abs_b = abs_b.data
abs_d = abs_d.data
# bx58
abs_c_mask = (abs_d - abs_b).abs().sum(2)!=0 # updated cliparts
# bx58x6
mask = abs_c_mask.unsqueeze(2).expand_as(abs_d)
# collapsed x 6
abs_c = abs_d[mask.byte()].view(-1, abs_d.size(-1))
return abs_c, abs_c_mask
# Get abs_c mask, if `r_mask` is given, masked over it.
# @param abs_b (long, bx58x6): latest drawn scene before prev teller's message
# @param abs_d (long, bx58x6): latest drawn scene before next teller's message
# @param r_mask (byte, optional, b)
# #output c_mask (byte, b): batch mask whether drawn scene is changed or not
@staticmethod
def get_c_mask(abs_b, abs_d, r_mask=None):
if Variable==type(r_mask):
r_mask = r_mask.data
_, abs_c_mask = AbsUtil.sample_abs_c(abs_b, abs_d) # _, bx58
c_mask = abs_c_mask.sum(1).byte()>0
if r_mask is not None:
c_mask = c_mask.mul(r_mask)
return c_mask
| codraw-models-master | abs_util_orig.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from interactivity import INTERACTIVE, try_magic, try_cd
try_cd('~/dev/drawmodel/nkcodraw')
#%%
import numpy as np
from pathlib import Path
import editdistance
import torch
import torch.cuda
import torch.nn as nn
import torch.nn.functional as F
from nkfb_util import logsumexp, cuda_if_available
import codraw_data
from codraw_data import AbstractScene, Clipart
import abs_render
from abs_metric import scene_similarity, clipart_similarity
from episode import Episode, respond_to, response_partial
from datagen import NearestNeighborData, MessageSimilarityData, BOWtoClipartData, ClipartToSeqData, BOWplusCanvasToMultiData
from model import Model, select_clipart_to_tell, drawer_observe_canvas, make_fns, eval_fns
from model import scripted_tell, scripted_tell_before_peek, scripted_tell_after_peek, draw_nothing
from baseline1_models import load_baseline1
# %%
models = load_baseline1()
# %%
tellers = [
('teller_nn', (models['teller_nn_a'], models['teller_nn_b'])),
('teller_c2seq', (models['teller_c2seq_a'], models['teller_c2seq_b'])),
]
drawers = [
('drawer_nn', (models['drawer_nn_a'], models['drawer_nn_b'])),
('drawer_sim', (models['drawer_sim_a'], models['drawer_sim_b'])),
('drawer_bow2c', (models['drawer_bow2c_a'], models['drawer_bow2c_b'])),
('drawer_bow2bce', (models['drawer_bow2bce_a'], models['drawer_bow2bce_b'])),
('drawer_bowcanvas2bce', (models['drawer_bowcanvas2bce_a'], models['drawer_bowcanvas2bce_b'])),
]
# %%
limit = None
print("Drawer evaluations against script")
for drawer_name, drawer_pair in drawers:
for split in ('a', 'b'):
sims = eval_fns(make_fns(split, scripted_tell, drawer_pair), limit=limit)
print(f"{drawer_name}_{split}", sims.mean())
# %%
limit = None
print("Drawer evaluations against script before peek")
for drawer_name, drawer_pair in drawers:
for split in ('a', 'b'):
sims = eval_fns(make_fns(split, scripted_tell_before_peek, drawer_pair), limit=limit)
print(f"{drawer_name}_{split}", sims.mean())
# %%
limit = None
print("Drawer evaluations against script after peek")
sims = eval_fns(make_fns('', scripted_tell_after_peek, draw_nothing), limit=limit)
print("draw_nothing", sims.mean())
for drawer_name, drawer_pair in drawers:
for split in ('a', 'b'):
sims = eval_fns(make_fns(split, scripted_tell_after_peek, drawer_pair), limit=limit)
print(f"{drawer_name}_{split}", sims.mean())
# %%
limit = None
print("Teller/Drawer pair evaluations")
for teller_name, teller_pair in tellers:
for drawer_name, drawer_pair in drawers:
for splits in ('aa', 'ab', 'ba', 'bb'):
sims = eval_fns(make_fns(splits, teller_pair, drawer_pair), limit=limit)
print(f"{teller_name}_{splits[0]} {drawer_name}_{splits[1]}", sims.mean())
| codraw-models-master | baseline1_eval.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
try:
get_ipython()
INTERACTIVE=True
except:
INTERACTIVE=False
def try_magic(*args, **kwargs):
if not INTERACTIVE:
return
return get_ipython().magic(*args, **kwargs)
def try_cd(loc):
if not INTERACTIVE:
return
return get_ipython().magic(f'%cd {loc}')
| codraw-models-master | interactivity.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Scene-level nearest-neighbor teller
"""
from interactivity import INTERACTIVE, try_magic, try_cd
try_cd('~/dev/drawmodel/nkcodraw')
#%%
import numpy as np
from pathlib import Path
import codraw_data
from codraw_data import AbstractScene, Clipart
import abs_render
from abs_metric import scene_similarity, clipart_similarity
from episode import Episode, Transcriber, respond_to
import model
from model import make_fns, eval_fns
from model import Model
from baseline2_models import load_baseline2
# %%
scenes_and_scripts_dev = codraw_data.get_scenes_and_scripts('dev')
transcribe = Transcriber(
'exp28_scenenn.py' if INTERACTIVE else __file__,
scenes_and_scripts=scenes_and_scripts_dev[::110],
scenes_description="scenes_and_scripts_dev[::110]")
# %%
models_baseline2 = load_baseline2()
# %%
drawer_lstmaddonly_a = models_baseline2['drawer_lstmaddonly_a']
drawer_lstmaddonly_b = models_baseline2['drawer_lstmaddonly_b']
# %%
from datagen import Datagen
class SceneNearestNeighborData(Datagen):
def init_full(self):
self.build_dicts()
def init_from_spec(self):
self.build_dicts()
def build_dicts(self):
self.scene_to_msgs = {}
# calculate events
events = codraw_data.get_contextual_place_many(self.split)
scene = None
msgs = None
it = iter(events)
for event in it:
if isinstance(event, codraw_data.ObserveTruth):
if scene is not None and msgs is not None:
self.scene_to_msgs[tuple(scene)] = msgs
scene = event.scene
msgs = []
elif isinstance(event, codraw_data.TellGroup):
msgs.append(event.msg)
if scene is not None and msgs is not None:
self.scene_to_msgs[tuple(scene)] = msgs
# %%
class SceneNearestNeighborTeller(Model):
datagen_cls = SceneNearestNeighborData
def prepare(self, episode):
scene = episode.get_last(codraw_data.ObserveTruth).scene
best_similarity = -1
best_msgs = []
best_scene_tuple = None
for cand_scene_tuple in self.datagen.scene_to_msgs:
cand_sim = scene_similarity(cand_scene_tuple, scene)
if cand_sim > best_similarity:
best_similarity = cand_sim
best_msgs = self.datagen.scene_to_msgs[cand_scene_tuple]
best_scene_tuple = cand_scene_tuple
# display(AbstractScene(scene))
# display(AbstractScene(best_scene_tuple))
# display(best_similarity)
episode.to_tell = best_msgs[::] # make a copy!
@respond_to(codraw_data.ObserveTruth)
@respond_to(codraw_data.ReplyGroup)
def tell(self, episode):
if not hasattr(episode, 'to_tell'):
self.prepare(episode)
if episode.to_tell:
msg = episode.to_tell.pop(0)
episode.append(codraw_data.TellGroup(msg))
def get_action_fns(self):
return [self.tell]
# %%
data_scenenn_a = SceneNearestNeighborData('a')
data_scenenn_b = SceneNearestNeighborData('b')
# %%
teller_scenenn_a = SceneNearestNeighborTeller(data_scenenn_a)
teller_scenenn_b = SceneNearestNeighborTeller(data_scenenn_b)
# %%
# Episode.run(codraw_data.get_scenes('dev')[0], make_fns('aa', (teller_scenenn_a, teller_scenenn_b), (drawer_lstmaddonly_a, drawer_lstmaddonly_b))).display()
# %%
# %%
# %%
print()
print()
print("Final evaluation on full dev set")
# %%
for splits in ('aa', 'ab', 'ba', 'bb'):
sims = eval_fns(make_fns(splits, (teller_scenenn_a, teller_scenenn_b), (drawer_lstmaddonly_a, drawer_lstmaddonly_b)), limit=None)
print(splits, sims.mean())
# aa 1.3095491909624886
# ab 1.3115692170881366
# nohier aa 2.229799264350204
# nohier ab 2.255167911899865
# %%
for splits in ('ba', 'bb'):
sims = eval_fns(make_fns(splits, (teller_scenenn_a, teller_scenenn_b), (drawer_lstmaddonly_a, drawer_lstmaddonly_b)), limit=None)
print(splits, sims.mean())
# %%
transcribe("exp28_scenenn",
aa=make_fns('aa', (teller_scenenn_a, teller_scenenn_b), (drawer_lstmaddonly_a, drawer_lstmaddonly_b)),
ab=make_fns('ab', (teller_scenenn_a, teller_scenenn_b), (drawer_lstmaddonly_a, drawer_lstmaddonly_b)),
)
# %%
# hieraddonlyseq = dict(
# drawer_hieraddonlyseq_a = drawer_hieraddonlyseq_a.spec,
# drawer_hieraddonlyseq_b = drawer_hieraddonlyseq_b.spec,
# )
#%%
# torch.save(hieraddonlyseq, Path('models/hieraddonlyseq.pt'))
# %%
# %%
# %%
# %%
| codraw-models-master | exp28_scenenn.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#%%
import numpy as np
from pathlib import Path
import editdistance
import torch
import torch.cuda
import torch.nn as nn
import torch.nn.functional as F
from nkfb_util import logsumexp, cuda_if_available, torch_load
import codraw_data
from codraw_data import AbstractScene, Clipart
import abs_render
from abs_metric import scene_similarity, clipart_similarity
from episode import Episode, respond_to, response_partial
from datagen import NearestNeighborData, MessageSimilarityData, BOWtoClipartData, ClipartToSeqData, BOWplusCanvasToMultiData
from model import Model, select_clipart_to_tell, drawer_observe_canvas, make_fns, eval_fns, scripted_tell
# %%
class NearestNeighborTeller(Model):
datagen_cls = NearestNeighborData
@respond_to(codraw_data.SelectClipart)
def tell(self, episode):
clipart = episode.get_last(codraw_data.SelectClipart).clipart
best_similarity = -1
best_msg = ""
for cand_clipart in self.datagen.clipart_to_msg:
cand_sim = clipart_similarity(cand_clipart, clipart)
if cand_sim > best_similarity:
best_similarity = cand_sim
best_msg = self.datagen.clipart_to_msg[cand_clipart]
episode.append(codraw_data.TellGroup(best_msg))
def get_action_fns(self):
return [select_clipart_to_tell, self.tell]
#%%
class CharNeighborDrawer(Model):
datagen_cls = NearestNeighborData
@respond_to(codraw_data.TellGroup)
def draw(self, episode):
msg = episode.get_last(codraw_data.TellGroup).msg
best_distance = float('inf')
best_clipart = None
for cand_msg in self.datagen.msg_to_clipart:
cand_dist = editdistance.eval(cand_msg, msg)
if cand_dist < best_distance:
best_distance = cand_dist
best_clipart = self.datagen.msg_to_clipart[cand_msg]
episode.append(codraw_data.DrawClipart(best_clipart))
episode.append(codraw_data.ReplyGroup("ok"))
def get_action_fns(self):
return [self.draw]
#%%
class BOWNeighborDrawer(Model, torch.nn.Module):
datagen_cls = MessageSimilarityData
def init_full(self, d_embeddings=512):
self.d_embeddings = d_embeddings
self.word_embs = torch.nn.EmbeddingBag(len(self.datagen.vocabulary_dict), d_embeddings)
self.msg_vecs = []
self.msg_vecs_cliparts = []
self.null_clipart = None
def post_init_from_spec(self):
self.prepare_for_inference()
def get_spec(self):
return dict(d_embeddings=self.d_embeddings)
def forward(self, example_batch):
bow_feats = self.word_embs(example_batch['words'], example_batch['offsets']).reshape(-1,21,self.d_embeddings)
# assert np.isfinite(bow_feats.data.numpy()).all()
bow_feats_src = bow_feats[:,0,:]
bow_feats_tgt = bow_feats[:,1:,:]
similarity_scores = torch.bmm(bow_feats_tgt, bow_feats_src[:,:,None])[:,:,0]
loss = F.cross_entropy(similarity_scores, torch.zeros(similarity_scores.shape[0], dtype=torch.long, device=cuda_if_available))
return loss
def vec_for_msg(self, msg):
if msg == "":
return None
words = [self.datagen.vocabulary_dict.get(word, None) for word in msg.split()]
words = [word for word in words if word is not None]
if not words:
return None
return self.word_embs(torch.tensor([words], dtype=torch.long, device=self.word_embs.weight.device))[0,:].cpu().detach().numpy()
def prepare_for_inference(self):
self.msg_vecs = []
self.msg_vecs_cliparts = []
# sorting is important for deterministic serialization
for msg in sorted(self.datagen.msg_to_clipart.keys()):
clipart = self.datagen.msg_to_clipart[msg]
vec = self.vec_for_msg(msg)
if vec is not None:
self.msg_vecs.append(vec)
self.msg_vecs_cliparts.append(clipart)
else:
self.null_clipart = clipart
if self.null_clipart is None:
self.null_clipart = self.msg_vecs_cliparts[0]
self.msg_vecs = np.array(self.msg_vecs).T
self.eval()
@respond_to(codraw_data.TellGroup)
def draw(self, episode):
msg = episode.get_last(codraw_data.TellGroup).msg
vec = self.vec_for_msg(msg)
if vec is not None:
best_clipart = self.msg_vecs_cliparts[np.argmax(vec @ self.msg_vecs)]
else:
best_clipart = self.null_clipart
episode.append(codraw_data.DrawClipart(best_clipart))
episode.append(codraw_data.ReplyGroup("ok"))
def get_action_fns(self):
return [self.draw]
#%%
class BOWtoClipartDrawer(Model, torch.nn.Module):
datagen_cls = BOWtoClipartData
NUM_INDEX = 58
NUM_SUBTYPES = 35
NUM_DEPTH = 3
NUM_FLIP = 2
NUM_CATEGORICAL = 35 + 3 + 2
NUM_NUMERICAL = 2 # x, y
NUM_ALL = NUM_CATEGORICAL + NUM_NUMERICAL
def init_full(self, d_embeddings=512, d_hidden=1024):
self.d_embeddings = d_embeddings
self.d_hidden = d_hidden
self.word_embs = torch.nn.EmbeddingBag(len(self.datagen.vocabulary_dict), d_embeddings)
# Sigmoid is used to prevent drawing cliparts far off the canvas
self.sigmoid_coeff = 2.
# Scaling coefficient so that the sigmoid doesn't always saturate
self.vals_coeff = 1. / 5.
d_out = self.NUM_INDEX * (self.NUM_ALL + 1)
self.lang_to_clipart = nn.Sequential(
nn.Linear(d_embeddings, d_hidden),
nn.Dropout(0.4),
nn.ReLU(),
nn.Linear(d_hidden, d_out),
)
self.to(cuda_if_available)
def get_spec(self):
return dict(d_embeddings=self.d_embeddings, d_hidden=self.d_hidden)
def forward(self, example_batch):
bow_feats = self.word_embs(example_batch['msg_idxs'], example_batch['offsets'])
clipart_scores = self.lang_to_clipart(bow_feats).reshape(-1, self.NUM_INDEX, (self.NUM_ALL + 1))
correct_index = example_batch['clipart_index']
logits_index = clipart_scores[:,:,0]
correct_scores = clipart_scores[torch.arange(correct_index.shape[0], dtype=torch.long, device=cuda_if_available), correct_index][:,1:]
(logits_subtype, logits_depth, logits_flip, vals_numerical) = torch.split(correct_scores, [self.NUM_SUBTYPES, self.NUM_DEPTH, self.NUM_FLIP, self.NUM_NUMERICAL], dim=1)
vals_numerical = self.sigmoid_coeff * F.sigmoid(self.vals_coeff * vals_numerical)
correct_categorical = example_batch['clipart_categorical']
correct_numerical = example_batch['clipart_numerical']
loss = ( F.cross_entropy(logits_index, correct_index)
+ F.cross_entropy(logits_subtype, correct_categorical[:,0])
+ F.cross_entropy(logits_depth, correct_categorical[:,1])
+ F.cross_entropy(logits_flip, correct_categorical[:,2])
+ F.mse_loss(vals_numerical, correct_numerical)
)
return loss
@respond_to(codraw_data.TellGroup)
def draw(self, episode):
msg = episode.get_last(codraw_data.TellGroup).msg
words = [self.datagen.vocabulary_dict.get(word, None) for word in msg.split()]
words = [word for word in words if word is not None]
if not words:
# XXX(nikita): this is using DrawGroup, while normally DrawClipart is used
episode.append(codraw_data.DrawGroup([]))
episode.append(codraw_data.ReplyGroup("ok"))
return
msg_idxs = torch.tensor(words).to(cuda_if_available)
bow_feats = self.word_embs(msg_idxs[None,:])
clipart_scores = self.lang_to_clipart(bow_feats).reshape(-1, self.NUM_INDEX, (self.NUM_ALL + 1))[0,:,:]
best_idx = int(clipart_scores[:,0].argmax())
(logits_subtype, logits_depth, logits_flip, vals_numerical) = torch.split(clipart_scores[best_idx,1:], [self.NUM_SUBTYPES, self.NUM_DEPTH, self.NUM_FLIP, self.NUM_NUMERICAL])
vals_numerical = self.sigmoid_coeff * F.sigmoid(self.vals_coeff * vals_numerical)
nx, ny = vals_numerical.cpu().detach().numpy()
clipart = Clipart(best_idx, int(logits_subtype.argmax()), int(logits_depth.argmax()), int(logits_flip.argmax()), normed_x=nx, normed_y=ny)
episode.append(codraw_data.DrawClipart(clipart))
episode.append(codraw_data.ReplyGroup("ok"))
def get_action_fns(self):
return [self.draw]
#%%
class ClipartToSeqTeller(Model, torch.nn.Module):
datagen_cls = ClipartToSeqData
def init_full(self, d_word_emb=256, d_clipart_binary=256, d_clipart_numerical=256, d_clipart_hidden=1024, d_hidden=1024):
self._args = dict(
d_word_emb=d_word_emb,
d_clipart_binary=d_clipart_binary,
d_clipart_numerical=d_clipart_numerical,
d_clipart_hidden=d_clipart_hidden,
d_hidden=d_hidden)
self.word_embs = nn.Embedding(len(self.datagen.vocabulary_dict), d_word_emb)
self.binary_feature_embs = nn.Linear(self.datagen.NUM_BINARY, d_clipart_binary, bias=False)
self.numerical_transform = nn.Sequential(
nn.Linear(self.datagen.NUM_NUMERICAL, d_clipart_numerical),
nn.ReLU(),
)
self.clipart_transform = nn.Sequential(
nn.Linear(d_clipart_numerical + d_clipart_binary, d_clipart_hidden),
nn.ReLU(),
nn.Linear(d_clipart_hidden, d_hidden),
)
self.lstm = nn.LSTM(d_word_emb, d_hidden, num_layers=2)
self.word_project = nn.Linear(d_hidden, len(self.datagen.vocabulary_dict))
self.to(cuda_if_available)
def get_spec(self):
return self._args
def forward(self, example_batch):
binary_feats = self.binary_feature_embs(example_batch['clipart_binary'])
numerical_feats = self.numerical_transform(example_batch['clipart_numerical'])
clipart_feats = self.clipart_transform(torch.cat([binary_feats, numerical_feats], -1))
msg_embedded = nn.utils.rnn.PackedSequence(self.word_embs(example_batch['msg_in'].data), example_batch['msg_in'].batch_sizes)
initial_state = torch.stack([clipart_feats] * self.lstm.num_layers)
lstm_out, _ = self.lstm(msg_embedded, (initial_state, initial_state))
word_logits = self.word_project(lstm_out.data)
per_word_losses = nn.utils.rnn.PackedSequence(F.cross_entropy(word_logits, example_batch['msg_out'].data, reduce=False), example_batch['msg_out'].batch_sizes)
per_example_losses = nn.utils.rnn.pad_packed_sequence(per_word_losses)[0].sum(-1)
loss = per_example_losses.mean()
return loss
@respond_to(codraw_data.SelectClipart)
def tell(self, episode):
clipart = episode.get_last(codraw_data.SelectClipart).clipart
x = clipart.normed_x
y = clipart.normed_y
clipart_numerical = torch.tensor([x, y], dtype=torch.float)
clipart_binary = torch.zeros(self.datagen.NUM_BINARY)
for val, offset in zip([clipart.idx, clipart.subtype, clipart.depth, clipart.flip], self.datagen.BINARY_OFFSETS):
clipart_binary[val + offset] = 1.
binary_feats = self.binary_feature_embs(clipart_binary[None,:].to(cuda_if_available))
numerical_feats = self.numerical_transform(clipart_numerical[None,:].to(cuda_if_available))
clipart_feats = self.clipart_transform(torch.cat([binary_feats, numerical_feats], -1))
token_idxs = [self.datagen.vocabulary_dict['<S>']]
# lstm_state = (F.tanh(clipart_feats[None,:,:]), clipart_feats[None,:,:])
lstm_state = torch.stack([clipart_feats] * self.lstm.num_layers)
lstm_state = (lstm_state, lstm_state)
for _ in range(200):
token_emb = self.word_embs(torch.tensor(token_idxs[-1], dtype=torch.long).to(cuda_if_available))[None,None,:]
lstm_out, lstm_state = self.lstm(token_emb, lstm_state)
next_token = int(self.word_project(lstm_out[0,0,:]).argmax())
token_idxs.append(next_token)
if next_token == self.datagen.vocabulary_dict['</S>']:
break
msg = " ".join([self.datagen.vocabulary[i] for i in token_idxs[1:-1]])
episode.append(codraw_data.TellGroup(msg))
def get_action_fns(self):
return [select_clipart_to_tell, self.tell]
#%%
class BOWtoMultiBCEDrawer(Model, torch.nn.Module):
datagen_cls = BOWplusCanvasToMultiData
def init_full(self, d_embeddings=512, d_hidden=1024):
self._args = dict(
d_embeddings=d_embeddings,
d_hidden=d_hidden,
)
self.d_embeddings = d_embeddings
self.word_embs = torch.nn.EmbeddingBag(len(self.datagen.vocabulary_dict), d_embeddings)
# Sigmoid is used to prevent drawing cliparts far off the canvas
self.sigmoid_coeff = 2.
# Scaling coefficient so that the sigmoid doesn't always saturate
self.vals_coeff = 1. / 5.
dg = self.datagen
d_out = dg.NUM_INDEX * (dg.NUM_ALL + 1)
self.lang_to_clipart = nn.Sequential(
nn.Linear(d_embeddings, d_hidden),
# nn.Dropout(0.4),
nn.ReLU(),
nn.Linear(d_hidden, d_out),
)
self.to(cuda_if_available)
def get_spec(self):
return self._args
def forward(self, example_batch):
dg = self.datagen
bow_feats = self.word_embs(example_batch['msg_idxs'], example_batch['offsets'])
assert np.isfinite(bow_feats.cpu().detach().numpy()).all()
clipart_scores = self.lang_to_clipart(bow_feats).view(-1, dg.NUM_INDEX, dg.NUM_ALL + 1)
clipart_idx_scores = clipart_scores[:,:,0]
idx_losses = F.binary_cross_entropy_with_logits(clipart_idx_scores, example_batch['clipart_chosen_mask'].to(torch.float), reduce=False)
# idx_losses = torch.where(example_batch['clipart_chosen_mask'], 3. * idx_losses, idx_losses)
per_example_idx_loss = idx_losses.sum(1)
flat_scores = clipart_scores[:,:,1:].view((-1, dg.NUM_ALL))
(logits_subtype, logits_depth, logits_flip, vals_numerical) = torch.split(flat_scores, [dg.NUM_SUBTYPES, dg.NUM_DEPTH, dg.NUM_FLIP, dg.NUM_NUMERICAL], dim=1)
vals_numerical = self.sigmoid_coeff * F.sigmoid(self.vals_coeff * vals_numerical)
correct_categorical = example_batch['clipart_categorical']
correct_numerical = example_batch['clipart_numerical']
subtype_losses = F.cross_entropy(logits_subtype, correct_categorical[:,:,0].view((-1,)), reduce=False).view_as(correct_categorical[:,:,0])
depth_losses = F.cross_entropy(logits_depth, correct_categorical[:,:,1].view((-1,)), reduce=False).view_as(correct_categorical[:,:,1])
flip_losses = F.cross_entropy(logits_flip, correct_categorical[:,:,2].view((-1,)), reduce=False).view_as(correct_categorical[:,:,2])
vals_losses = F.mse_loss(vals_numerical, correct_numerical.view((-1, dg.NUM_NUMERICAL)), reduce=False).view_as(correct_numerical).sum(-1)
all_losses = torch.stack([subtype_losses, depth_losses, flip_losses, vals_losses], -1).sum(-1)
per_example_loss = torch.where(example_batch['clipart_chosen_mask'], all_losses, all_losses.new_zeros(1)).sum(-1)
loss = per_example_idx_loss.mean() + per_example_loss.mean()
return loss
@respond_to(codraw_data.TellGroup)
def draw(self, episode):
dg = self.datagen
msg = episode.get_last(codraw_data.TellGroup).msg
# assert msg != ""
words = [self.datagen.vocabulary_dict.get(word, None) for word in msg.split()]
words = [word for word in words if word is not None]
if not words:
episode.append(codraw_data.DrawGroup([]))
episode.append(codraw_data.ReplyGroup("ok"))
return
msg_idxs = torch.tensor(words).to(cuda_if_available)
bow_feats = self.word_embs(msg_idxs[None,:])
assert np.isfinite(bow_feats.cpu().detach().numpy()).all()
clipart_scores = self.lang_to_clipart(bow_feats).view(-1, dg.NUM_INDEX, (dg.NUM_ALL + 1))
flat_scores = clipart_scores[:,:,1:].view((-1, dg.NUM_ALL))
(logits_subtype, logits_depth, logits_flip, vals_numerical) = torch.split(flat_scores, [dg.NUM_SUBTYPES, dg.NUM_DEPTH, dg.NUM_FLIP, dg.NUM_NUMERICAL], dim=1)
vals_numerical = self.sigmoid_coeff * F.sigmoid(self.vals_coeff * vals_numerical)
vals_numerical = vals_numerical.cpu().detach().numpy()
clipart_idx_scores = clipart_scores[0,:,0].cpu().detach().numpy()
cliparts = []
for idx in np.where(clipart_idx_scores > 0)[0]:
nx, ny = vals_numerical[idx,:]
clipart = Clipart(idx, int(logits_subtype[idx,:].argmax()), int(logits_depth[idx,:].argmax()), int(logits_flip[idx,:].argmax()), normed_x=nx, normed_y=ny)
cliparts.append(clipart)
episode.append(codraw_data.DrawGroup(cliparts))
episode.append(codraw_data.ReplyGroup("ok"))
def get_action_fns(self):
return [self.draw]
# %%
class BOWplusCanvasDrawer(Model, torch.nn.Module):
datagen_cls = BOWplusCanvasToMultiData
def init_full(self, d_embeddings=512, d_hidden=512):
self._args = dict(
d_embeddings=d_embeddings,
d_hidden=d_hidden,
)
self.d_embeddings = d_embeddings
self.word_embs = torch.nn.EmbeddingBag(len(self.datagen.vocabulary_dict), d_embeddings)
# Helps overcome class imbalance (most cliparts are not drawn most of
# the time)
self.positive_scaling_coeff = 3.
# Sigmoid is used to prevent drawing cliparts far off the canvas
self.sigmoid_coeff = 2.
# Scaling coefficient so that the sigmoid doesn't always saturate
self.vals_coeff = 1. / 5.
dg = self.datagen
self.lang_to_hidden = nn.Linear(d_embeddings, d_hidden)
self.canvas_binary_to_hidden = nn.Sequential(
nn.Dropout(0.2),
nn.Linear(dg.NUM_BINARY, d_hidden, bias=False),
)
self.canvas_numerical_to_hidden = nn.Sequential(
nn.Linear(dg.NUM_INDEX * dg.NUM_NUMERICAL, d_hidden, bias=False),
)
d_out = dg.NUM_INDEX * (dg.NUM_ALL + 1)
self.hidden_to_clipart = nn.Sequential(
nn.Dropout(0.4),
nn.ReLU(),
nn.Linear(d_hidden, d_out),
)
self.to(cuda_if_available)
def forward(self, example_batch):
dg = self.datagen
bow_feats = self.word_embs(example_batch['msg_idxs'], example_batch['offsets'])
assert np.isfinite(bow_feats.cpu().detach().numpy()).all()
hidden_feats = (
self.lang_to_hidden(bow_feats)
+ self.canvas_binary_to_hidden(example_batch['canvas_binary'].float())
+ self.canvas_numerical_to_hidden(example_batch['canvas_numerical'])
)
clipart_scores = self.hidden_to_clipart(hidden_feats).view(-1, dg.NUM_INDEX, dg.NUM_ALL + 1)
clipart_idx_scores = clipart_scores[:,:,0]
idx_losses = F.binary_cross_entropy_with_logits(clipart_idx_scores, example_batch['clipart_chosen_mask'].to(torch.float), reduce=False)
idx_losses = torch.where(example_batch['clipart_chosen_mask'], self.positive_scaling_coeff * idx_losses, idx_losses)
per_example_idx_loss = idx_losses.sum(1)
flat_scores = clipart_scores[:,:,1:].view((-1, dg.NUM_ALL))
(logits_subtype, logits_depth, logits_flip, vals_numerical) = torch.split(flat_scores, [dg.NUM_SUBTYPES, dg.NUM_DEPTH, dg.NUM_FLIP, dg.NUM_NUMERICAL], dim=1)
vals_numerical = self.sigmoid_coeff * F.sigmoid(self.vals_coeff * vals_numerical)
correct_categorical = example_batch['clipart_categorical']
correct_numerical = example_batch['clipart_numerical']
subtype_losses = F.cross_entropy(logits_subtype, correct_categorical[:,:,0].view((-1,)), reduce=False).view_as(correct_categorical[:,:,0])
depth_losses = F.cross_entropy(logits_depth, correct_categorical[:,:,1].view((-1,)), reduce=False).view_as(correct_categorical[:,:,1])
flip_losses = F.cross_entropy(logits_flip, correct_categorical[:,:,2].view((-1,)), reduce=False).view_as(correct_categorical[:,:,2])
vals_losses = F.mse_loss(vals_numerical, correct_numerical.view((-1, dg.NUM_NUMERICAL)), reduce=False).view_as(correct_numerical).sum(-1)
all_losses = torch.stack([subtype_losses, depth_losses, flip_losses, vals_losses], -1).sum(-1)
per_example_loss = torch.where(example_batch['clipart_chosen_mask'], all_losses, all_losses.new_zeros(1)).sum(-1)
loss = per_example_idx_loss.mean() + per_example_loss.mean()
return loss
@respond_to(codraw_data.ObserveCanvas)
def draw(self, episode):
dg = self.datagen
msg = episode.get_last(codraw_data.TellGroup).msg
# assert msg != ""
words = [self.datagen.vocabulary_dict.get(word, None) for word in msg.split()]
words = [word for word in words if word is not None]
if not words:
episode.append(codraw_data.DrawGroup([]))
episode.append(codraw_data.ReplyGroup("ok"))
return
msg_idxs = torch.tensor(words).to(cuda_if_available)
canvas_context = episode.get_last(codraw_data.ObserveCanvas).scene
canvas_binary = np.zeros((dg.NUM_INDEX, 1 + dg.NUM_DEPTH + dg.NUM_FLIP), dtype=bool)
canvas_pose = np.zeros((2, dg.NUM_SUBTYPES), dtype=bool)
canvas_numerical = np.zeros((dg.NUM_INDEX, dg.NUM_NUMERICAL))
for clipart in canvas_context:
if clipart.idx in Clipart.HUMAN_IDXS:
canvas_pose[clipart.human_idx, clipart.subtype] = True
canvas_binary[clipart.idx, 0] = True
canvas_binary[clipart.idx, 1 + clipart.depth] = True
canvas_binary[clipart.idx, 1 + dg.NUM_DEPTH + clipart.flip] = True
canvas_numerical[clipart.idx, 0] = clipart.normed_x
canvas_numerical[clipart.idx, 1] = clipart.normed_y
canvas_binary = np.concatenate([canvas_binary.reshape((-1,)), canvas_pose.reshape((-1,))])
canvas_numerical = canvas_numerical.reshape((-1,))
canvas_binary = torch.tensor(canvas_binary.astype(np.uint8), dtype=torch.uint8)[None,:].to(cuda_if_available)
canvas_numerical = torch.tensor(canvas_numerical, dtype=torch.float)[None,:].to(cuda_if_available)
bow_feats = self.word_embs(msg_idxs[None,:])
assert np.isfinite(bow_feats.cpu().detach().numpy()).all()
hidden_feats = (
self.lang_to_hidden(bow_feats)
+ self.canvas_binary_to_hidden(canvas_binary.float())
+ self.canvas_numerical_to_hidden(canvas_numerical)
)
clipart_scores = self.hidden_to_clipart(hidden_feats).view(-1, dg.NUM_INDEX, (dg.NUM_ALL + 1))
flat_scores = clipart_scores[:,:,1:].view((-1, dg.NUM_ALL))
(logits_subtype, logits_depth, logits_flip, vals_numerical) = torch.split(flat_scores, [dg.NUM_SUBTYPES, dg.NUM_DEPTH, dg.NUM_FLIP, dg.NUM_NUMERICAL], dim=1)
vals_numerical = self.sigmoid_coeff * F.sigmoid(self.vals_coeff * vals_numerical)
vals_numerical = vals_numerical.cpu().detach().numpy()
clipart_idx_scores = clipart_scores[0,:,0].cpu().detach().numpy()
cliparts = []
prior_idxs = set([c.idx for c in canvas_context])
for idx in np.where(clipart_idx_scores > 0)[0]:
if idx in prior_idxs: # XXX: break ties in favor of earlier actions
continue
nx, ny = vals_numerical[idx,:]
clipart = Clipart(idx, int(logits_subtype[idx,:].argmax()), int(logits_depth[idx,:].argmax()), int(logits_flip[idx,:].argmax()), normed_x=nx, normed_y=ny)
cliparts.append(clipart)
episode.append(codraw_data.DrawGroup(cliparts))
episode.append(codraw_data.ReplyGroup("ok"))
def get_action_fns(self):
return [drawer_observe_canvas, self.draw]
#%%
def load_baseline1():
baseline1_specs = torch_load(Path('models/baseline1_may31.pt'))
models = {}
for k, spec in baseline1_specs.items():
print(k)
models[k] = globals()[spec['class']](spec=spec)
return models
| codraw-models-master | baseline1_models.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from interactivity import INTERACTIVE, try_magic, try_cd
try_cd('~/dev/drawmodel/nkcodraw')
#%%
assert __name__ == "__main__", "Training script should not be imported!"
#%%
import numpy as np
from pathlib import Path
import torch
import torch.cuda
import torch.nn as nn
import torch.nn.functional as F
from nkfb_util import logsumexp, cuda_if_available, torch_load
from attention import AttentionSeqToMasked
import codraw_data
from codraw_data import AbstractScene, Clipart
import abs_render
from abs_metric import scene_similarity, clipart_similarity
from episode import Episode, Transcriber, respond_to
from model import make_fns, eval_fns
from model import Model
from baseline2_models import load_baseline2
from baseline3_models import load_baseline3
from baseline4_models import RLSceneToSeqTeller, collect_episodes
# %%
models_baseline2 = load_baseline2()
models_baseline3 = load_baseline3()
# %%
drawer_lstmaddonly_a, drawer_lstmaddonly_b = models_baseline2['drawer_lstmaddonly_a'], models_baseline2['drawer_lstmaddonly_b']
teller_scene2seq_aux2_a, teller_scene2seq_aux2_b = models_baseline3['teller_scene2seq_aux2_a'], models_baseline3['teller_scene2seq_aux2_b']
# %%
def train_teller(split, teller_pair, scenes,
utterance_penalty=0.1,
gamma=0.999,
uninformative_penalty=0.3,
batch_size=16,
num_batches=12500,
eval_every=2000,
lr=0.00007,
limit=100,
base_name="scene2seq_rl",
):
print("Training hyperparameters:")
for param in ['utterance_penalty',
'gamma',
'uninformative_penalty',
'batch_size',
'num_batches',
'lr',
'limit',
]:
print(param, '=', locals()[param])
drawer_pair = drawer_lstmaddonly_a, drawer_lstmaddonly_b
splits_pair = split + 'a', split + 'b'
if split == 'a':
teller = teller_pair[0]
elif split == 'b':
teller = teller_pair[1]
else:
assert False
teller.disable_dropout()
fns = make_fns(split + split, teller_pair, drawer_pair)
optimizer = torch.optim.Adam(teller.parameters(), lr=lr)
def validate():
for inference_method in ['greedy', 'sample']:
teller.inference_method = inference_method
for splits in splits_pair:
sims = eval_fns(make_fns(splits, teller_pair, drawer_pair), limit=limit)
print(splits, f'[{inference_method}]', sims.mean())
validate()
teller.inference_method = 'sample'
for batch_num in range(num_batches):
optimizer.zero_grad()
teller.eval()
episodes, ex = collect_episodes(
fns,
teller.datagen,
scenes=scenes,
batch_size=batch_size,
utterance_penalty=utterance_penalty,
gamma=gamma,
uninformative_penalty=uninformative_penalty,
)
teller.train()
loss = teller.calc_rl_loss(ex)
loss.backward()
# grad_norm = nn.utils.clip_grad_norm_(teller.parameters(), float('inf'))
# XXX(nikita): clip gradients in an attempt to stabilize. Need to see if
# there's an underlying bug, though.
grad_norm = nn.utils.clip_grad_norm_(teller.parameters(), 1.5)
optimizer.step()
mean_reward = float(ex['brw_rewards'].sum().item() / ex['b_scene_mask'].shape[0])
mean_len = np.mean([
len([event for event in episode if isinstance(event, codraw_data.TellGroup)])
for episode in episodes])
sims = np.array([episode.scene_similarity() for episode in episodes])
mean_sim = sims.mean()
std_sim = sims.std()
print(f'batch {batch_num} mean-reward {mean_reward} loss {float(loss)} grad {float(grad_norm)} mean-len {mean_len} mean-sim {mean_sim} std-sim {std_sim}')
if batch_num % 5 == 0:
for event in episodes[-1]:
if isinstance(event, codraw_data.TellGroup):
print(' >', event.msg)
if batch_num % 50 == 0:
del episodes, ex, loss # clean up memory
validate()
if batch_num > 0 and batch_num % eval_every == 0:
teller.eval()
print("Printing representative sampled dialogs")
teller.inference_method = 'sample'
episodes, ex = collect_episodes(fns, teller.datagen, scenes=scenes[:1], batch_size=5)
for episode in episodes:
for event in episode:
if isinstance(event, codraw_data.TellGroup):
print(' >', event.msg)
print('similarity', episode.scene_similarity())
print('-----')
print("Evaluating on the full dev set")
for inference_method in ['greedy', 'sample']:
teller.inference_method = inference_method
for splits in splits_pair:
sims = eval_fns(make_fns(splits, (teller_rl_a, teller_rl_b), (drawer_lstmaddonly_a, drawer_lstmaddonly_b)), limit=None)
print(splits, f'[{inference_method}]', sims.mean())
if base_name is not None:
print("Serializing teller to disk")
torch.save(teller.spec, Path(f'rl_models/{base_name}_{split}_{batch_num}.pt'))
# %%
# Change this to train a different teller
TELLER_SPLIT = 'a'
# TELLER_SPLIT = 'b'
# Reduce entropy: the uncertainty in the pre-trained model isn't ideal for
# starting RL. It may be possible to adjust label smoothing in the pre-training,
# but for now just reweigh the linear layer prior to the softmax
SOFTMAX_RESCALE = 3.
# %%
teller_rl_a, teller_rl_b = None, None
if TELLER_SPLIT == 'a':
teller_rl_a = RLSceneToSeqTeller(spec=teller_scene2seq_aux2_a.spec)
teller_rl_a.word_project.weight.data *= SOFTMAX_RESCALE
teller_rl_a.word_project.bias.data *= SOFTMAX_RESCALE
else:
teller_rl_b = RLSceneToSeqTeller(spec=teller_scene2seq_aux2_b.spec)
teller_rl_b.word_project.weight.data *= SOFTMAX_RESCALE
teller_rl_b.word_project.bias.data *= SOFTMAX_RESCALE
# %%
print(f"Info: training on partition {TELLER_SPLIT}")
scenes = np.asarray(codraw_data.get_scenes(TELLER_SPLIT))
train_teller(
TELLER_SPLIT,
(teller_rl_a, teller_rl_b),
scenes,
utterance_penalty=0.0,
gamma=0.995,
uninformative_penalty=0.3,
batch_size=16,
num_batches=60000,
eval_every=2000,
lr=0.00003,
limit=100,
base_name="b5_utt0_lr3_clip15",
)
| codraw-models-master | baseline4_train.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import ast
from itertools import chain
import logging
import math
import os
import sys
import json
import hashlib
import editdistance
from argparse import Namespace
import numpy as np
import torch
from fairseq import checkpoint_utils, options, tasks, utils, distributed_utils
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.logging import progress_bar
from fairseq.logging.meters import StopwatchMeter, TimeMeter
from fairseq.models import FairseqLanguageModel
from omegaconf import DictConfig
from pathlib import Path
import hydra
from hydra.core.config_store import ConfigStore
from fairseq.dataclass.configs import (
CheckpointConfig,
CommonConfig,
CommonEvalConfig,
DatasetConfig,
DistributedTrainingConfig,
GenerationConfig,
FairseqDataclass,
)
from dataclasses import dataclass, field, is_dataclass
from typing import Any, Dict, List, Optional, Tuple, Union
from omegaconf import OmegaConf
logging.root.setLevel(logging.INFO)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
config_path = Path(__file__).resolve().parent / "conf"
@dataclass
class OverrideConfig(FairseqDataclass):
noise_wav: Optional[str] = field(default=None, metadata={'help': 'noise wav file'})
noise_prob: float = field(default=0, metadata={'help': 'noise probability'})
noise_snr: float = field(default=0, metadata={'help': 'noise SNR in audio'})
modalities: List[str] = field(default_factory=lambda: [""], metadata={'help': 'which modality to use'})
data: Optional[str] = field(default=None, metadata={'help': 'path to test data directory'})
label_dir: Optional[str] = field(default=None, metadata={'help': 'path to test label directory'})
@dataclass
class InferConfig(FairseqDataclass):
task: Any = None
generation: GenerationConfig = GenerationConfig()
common: CommonConfig = CommonConfig()
common_eval: CommonEvalConfig = CommonEvalConfig()
checkpoint: CheckpointConfig = CheckpointConfig()
distributed_training: DistributedTrainingConfig = DistributedTrainingConfig()
dataset: DatasetConfig = DatasetConfig()
override: OverrideConfig = OverrideConfig()
is_ax: bool = field(
default=False,
metadata={
"help": "if true, assumes we are using ax for tuning and returns a tuple for ax to consume"
},
)
def main(cfg: DictConfig):
if isinstance(cfg, Namespace):
cfg = convert_namespace_to_omegaconf(cfg)
assert cfg.common_eval.path is not None, "--path required for recognition!"
assert (
not cfg.generation.sampling or cfg.generation.nbest == cfg.generation.beam
), "--sampling requires --nbest to be equal to --beam"
if cfg.common_eval.results_path is not None:
os.makedirs(cfg.common_eval.results_path, exist_ok=True)
output_path = os.path.join(cfg.common_eval.results_path, "decode.log")
with open(output_path, "w", buffering=1, encoding="utf-8") as h:
return _main(cfg, h)
return _main(cfg, sys.stdout)
def get_symbols_to_strip_from_output(generator):
if hasattr(generator, "symbols_to_strip_from_output"):
return generator.symbols_to_strip_from_output
else:
return {generator.eos, generator.pad}
def _main(cfg, output_file):
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=output_file,
)
logger = logging.getLogger("hybrid.speech_recognize")
if output_file is not sys.stdout: # also print to stdout
logger.addHandler(logging.StreamHandler(sys.stdout))
utils.import_user_module(cfg.common)
models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task([cfg.common_eval.path])
models = [model.eval().cuda() for model in models]
saved_cfg.task.modalities = cfg.override.modalities
task = tasks.setup_task(saved_cfg.task)
task.build_tokenizer(saved_cfg.tokenizer)
task.build_bpe(saved_cfg.bpe)
logger.info(cfg)
# Fix seed for stochastic decoding
if cfg.common.seed is not None and not cfg.generation.no_seed_provided:
np.random.seed(cfg.common.seed)
utils.set_torch_seed(cfg.common.seed)
use_cuda = torch.cuda.is_available()
# Set dictionary
dictionary = task.target_dictionary
# loading the dataset should happen after the checkpoint has been loaded so we can give it the saved task config
task.cfg.noise_prob = cfg.override.noise_prob
task.cfg.noise_snr = cfg.override.noise_snr
task.cfg.noise_wav = cfg.override.noise_wav
if cfg.override.data is not None:
task.cfg.data = cfg.override.data
if cfg.override.label_dir is not None:
task.cfg.label_dir = cfg.override.label_dir
task.load_dataset(cfg.dataset.gen_subset, task_cfg=saved_cfg.task)
lms = [None]
# Optimize ensemble for generation
for model in chain(models, lms):
if model is None:
continue
if cfg.common.fp16:
model.half()
if use_cuda and not cfg.distributed_training.pipeline_model_parallel:
model.cuda()
model.prepare_for_inference_(cfg)
# Load dataset (possibly sharded)
itr = task.get_batch_iterator(
dataset=task.dataset(cfg.dataset.gen_subset),
max_tokens=cfg.dataset.max_tokens,
max_sentences=cfg.dataset.batch_size,
max_positions=utils.resolve_max_positions(
task.max_positions(), *[m.max_positions() for m in models]
),
ignore_invalid_inputs=cfg.dataset.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=cfg.dataset.required_batch_size_multiple,
seed=cfg.common.seed,
num_shards=cfg.distributed_training.distributed_world_size,
shard_id=cfg.distributed_training.distributed_rank,
num_workers=cfg.dataset.num_workers,
data_buffer_size=cfg.dataset.data_buffer_size,
).next_epoch_itr(shuffle=False)
progress = progress_bar.progress_bar(
itr,
log_format=cfg.common.log_format,
log_interval=cfg.common.log_interval,
default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"),
)
# Initialize generator
if cfg.generation.match_source_len:
logger.warning(
"The option match_source_len is not applicable to speech recognition. Ignoring it."
)
gen_timer = StopwatchMeter()
extra_gen_cls_kwargs = {
"lm_model": lms[0],
"lm_weight": cfg.generation.lm_weight,
}
cfg.generation.score_reference = False #
save_attention_plot = cfg.generation.print_alignment is not None
cfg.generation.print_alignment = None #
generator = task.build_generator(
models, cfg.generation, extra_gen_cls_kwargs=extra_gen_cls_kwargs
)
def decode_fn(x):
symbols_ignore = get_symbols_to_strip_from_output(generator)
symbols_ignore.add(dictionary.pad())
if hasattr(task.datasets[cfg.dataset.gen_subset].label_processors[0], 'decode'):
return task.datasets[cfg.dataset.gen_subset].label_processors[0].decode(x, symbols_ignore)
chars = dictionary.string(x, extra_symbols_to_ignore=symbols_ignore)
words = " ".join("".join(chars.split()).replace('|', ' ').split())
return words
num_sentences = 0
has_target = True
wps_meter = TimeMeter()
result_dict = {'utt_id': [], 'ref': [], 'hypo': []}
for sample in progress:
sample = utils.move_to_cuda(sample) if use_cuda else sample
if "net_input" not in sample:
continue
prefix_tokens = None
if cfg.generation.prefix_size > 0:
prefix_tokens = sample["target"][:, : cfg.generation.prefix_size]
constraints = None
if "constraints" in sample:
constraints = sample["constraints"]
gen_timer.start()
hypos = task.inference_step(
generator,
models,
sample,
prefix_tokens=prefix_tokens,
constraints=constraints,
)
num_generated_tokens = sum(len(h[0]["tokens"]) for h in hypos)
gen_timer.stop(num_generated_tokens)
for i in range(len(sample["id"])):
result_dict['utt_id'].append(sample['utt_id'][i])
ref_sent = decode_fn(sample['target'][i].int().cpu())
result_dict['ref'].append(ref_sent)
best_hypo = hypos[i][0]['tokens'].int().cpu()
hypo_str = decode_fn(best_hypo)
result_dict['hypo'].append(hypo_str)
logger.info(f"\nREF:{ref_sent}\nHYP:{hypo_str}\n")
wps_meter.update(num_generated_tokens)
progress.log({"wps": round(wps_meter.avg)})
num_sentences += sample["nsentences"] if "nsentences" in sample else sample["id"].numel()
logger.info("NOTE: hypothesis and token scores are output in base 2")
logger.info("Recognized {:,} utterances ({} tokens) in {:.1f}s ({:.2f} sentences/s, {:.2f} tokens/s)".format(
num_sentences, gen_timer.n, gen_timer.sum, num_sentences / gen_timer.sum, 1. / gen_timer.avg))
yaml_str = OmegaConf.to_yaml(cfg.generation)
fid = int(hashlib.md5(yaml_str.encode("utf-8")).hexdigest(), 16)
fid = fid % 1000000
result_fn = f"{cfg.common_eval.results_path}/hypo-{fid}.json"
json.dump(result_dict, open(result_fn, 'w'), indent=4)
n_err, n_total = 0, 0
assert len(result_dict['hypo']) == len(result_dict['ref'])
for hypo, ref in zip(result_dict['hypo'], result_dict['ref']):
hypo, ref = hypo.strip().split(), ref.strip().split()
n_err += editdistance.eval(hypo, ref)
n_total += len(ref)
wer = 100 * n_err / n_total
wer_fn = f"{cfg.common_eval.results_path}/wer.{fid}"
with open(wer_fn, "w") as fo:
fo.write(f"WER: {wer}\n")
fo.write(f"err / num_ref_words = {n_err} / {n_total}\n\n")
fo.write(f"{yaml_str}")
logger.info(f"WER: {wer}%")
return
@hydra.main(config_path=config_path, config_name="infer")
def hydra_main(cfg: InferConfig) -> Union[float, Tuple[float, Optional[float]]]:
container = OmegaConf.to_container(cfg, resolve=True, enum_to_str=True)
cfg = OmegaConf.create(container)
OmegaConf.set_struct(cfg, True)
if cfg.common.reset_logging:
reset_logging()
wer = float("inf")
try:
if cfg.common.profile:
with torch.cuda.profiler.profile():
with torch.autograd.profiler.emit_nvtx():
distributed_utils.call_main(cfg, main)
else:
distributed_utils.call_main(cfg, main)
except BaseException as e: # pylint: disable=broad-except
if not cfg.common.suppress_crashes:
raise
else:
logger.error("Crashed! %s", str(e))
return
def cli_main() -> None:
try:
from hydra._internal.utils import (
get_args,
) # pylint: disable=import-outside-toplevel
cfg_name = get_args().config_name or "infer"
except ImportError:
logger.warning("Failed to get config name from hydra args")
cfg_name = "infer"
cs = ConfigStore.instance()
cs.store(name=cfg_name, node=InferConfig)
for k in InferConfig.__dataclass_fields__:
if is_dataclass(InferConfig.__dataclass_fields__[k].type):
v = InferConfig.__dataclass_fields__[k].default
cs.store(name=k, node=v)
hydra_main() # pylint: disable=no-value-for-parameter
if __name__ == "__main__":
cli_main()
| av_hubert-main | avhubert/infer_s2s.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from argparse import Namespace
import contextlib
import copy
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from dataclasses import dataclass, field
from omegaconf import MISSING, II, open_dict
from typing import Any, Optional
from fairseq import checkpoint_utils, tasks, utils
from fairseq.dataclass import FairseqDataclass
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.tasks import FairseqTask
from fairseq.models import (
BaseFairseqModel,
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqIncrementalDecoder,
register_model,
)
# from fairseq.models.wav2vec.wav2vec2 import MASKING_DISTRIBUTION_CHOICES
from fairseq.modules import (
LayerNorm,
PositionalEmbedding,
TransformerDecoderLayer,
)
class TransformerDecoder(FairseqIncrementalDecoder):
"""
Transformer decoder consisting of *args.decoder_layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(
self,
cfg,
dictionary,
embed_tokens,
no_encoder_attn=False,
):
super().__init__(dictionary)
self.dropout = cfg.decoder_dropout
self.share_input_output_embed = cfg.share_decoder_input_output_embed
input_embed_dim = embed_tokens.embedding_dim
embed_dim = cfg.decoder_embed_dim
self.output_embed_dim = cfg.decoder_embed_dim
self.layerdrop = cfg.decoder_layerdrop
padding_idx = embed_tokens.padding_idx
self.max_target_positions = cfg.max_target_positions
self.embed_tokens = embed_tokens
# self.embed_scale = math.sqrt(embed_dim) # todo: try with input_embed_dim
self.embed_scale = 1.0 if cfg.no_scale_embedding else math.sqrt(embed_dim)
self.project_in_dim = (
Linear(input_embed_dim, embed_dim, bias=False)
if embed_dim != input_embed_dim
else None
)
self.embed_positions = (
PositionalEmbedding(
cfg.max_target_positions,
embed_dim,
padding_idx,
learned=cfg.decoder_learned_pos,
)
if not cfg.no_token_positional_embeddings
else None
)
# TODO: update this when transformer gets converted to dataclass configs
transformer_cfg = copy.deepcopy(cfg)
# with open_dict(transformer_cfg):
transformer_cfg.dropout = transformer_cfg.decoder_dropout
transformer_cfg.attention_dropout = (
transformer_cfg.decoder_attention_dropout
)
transformer_cfg.activation_dropout = (
transformer_cfg.decoder_activation_dropout
)
self.layers = nn.ModuleList([])
self.layers.extend(
[
TransformerDecoderLayer(transformer_cfg, no_encoder_attn)
for _ in range(transformer_cfg.decoder_layers)
]
)
if not self.share_input_output_embed:
self.embed_out = nn.Parameter(
torch.Tensor(len(dictionary), self.output_embed_dim)
)
nn.init.normal_(self.embed_out, mean=0, std=self.output_embed_dim ** -0.5)
if transformer_cfg.decoder_normalize_before:
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
def forward(
self, prev_output_tokens, encoder_out=None, incremental_state=None, **unused
):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
encoder_out (Tensor, optional): output from the encoder, used for
encoder-side attention
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
prev_output_tokens = prev_output_tokens.long()
x, extra = self.extract_features(
prev_output_tokens, encoder_out, incremental_state
)
x = self.output_layer(x)
return x, extra
def extract_features(
self, prev_output_tokens, encoder_out=None, incremental_state=None, **unused
):
"""
Similar to *forward* but only return features.
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
"""
# embed positions
positions = (
self.embed_positions(
prev_output_tokens, incremental_state=incremental_state
)
if self.embed_positions is not None
else None
)
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
attn = None
inner_states = [x]
# decoder layers
for layer in self.layers:
dropout_probability = np.random.random()
if not self.training or (dropout_probability > self.layerdrop):
x, attn, _ = layer(
x,
encoder_out["encoder_out"] if encoder_out is not None else None,
encoder_out["padding_mask"] if encoder_out is not None else None,
incremental_state,
self_attn_mask=self.buffered_future_mask(x)
if incremental_state is None
else None,
)
inner_states.append(x)
if self.layer_norm:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
return x, {"attn": attn, "inner_states": inner_states}
def output_layer(self, features, **kwargs):
"""Project features to the vocabulary size."""
# project back to size of vocabulary
emb_mat = self.embed_tokens.weight if self.share_input_output_embed else self.embed_out
return torch.matmul(features, emb_mat.transpose(0, 1))
# if self.share_input_output_embed:
# return F.linear(features, self.embed_tokens.weight)
# else:
# return F.linear(features, self.embed_out)
def max_positions(self):
"""Maximum output length supported by the decoder."""
if self.embed_positions is None:
return self.max_target_positions
return min(self.max_target_positions, self.embed_positions.max_positions)
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
if (
not hasattr(self, "_future_mask")
or self._future_mask is None
or self._future_mask.device != tensor.device
or self._future_mask.size(0) < dim
):
self._future_mask = torch.triu(
utils.fill_with_neg_inf(tensor.new(dim, dim)), 1
)
return self._future_mask[:dim, :dim]
def upgrade_state_dict_named(self, state_dict, name):
return state_dict
| av_hubert-main | avhubert/decoder.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os, glob
import sys
from typing import Dict, List, Optional, Tuple
import numpy as np
from dataclasses import dataclass, field
from fairseq import metrics, search
from fairseq.data import Dictionary, encoders
from fairseq.dataclass.configs import FairseqDataclass
from fairseq.tasks import register_task
from fairseq.tasks.fairseq_task import FairseqTask
from omegaconf import MISSING, II
import numpy as np
from argparse import Namespace
DBG=True if len(sys.argv) == 1 else False
if DBG:
from hubert_dataset import AVHubertDataset
from sequence_generator import SequenceGenerator
else:
from .hubert_dataset import AVHubertDataset
from .sequence_generator import SequenceGenerator
logger = logging.getLogger(__name__)
class LabelEncoder(object):
def __init__(self, dictionary: Dictionary) -> None:
self.dictionary = dictionary
def __call__(self, label: str) -> List[str]:
return self.dictionary.encode_line(
label, append_eos=False, add_if_not_exist=False,
)
class LabelEncoderS2SToken(object):
def __init__(self, dictionary: Dictionary, bpe_tokenizer) -> None:
self.bpe_tokenizer = bpe_tokenizer
self.dictionary = dictionary
def __call__(self, label: str) -> List[str]:
label = self.bpe_tokenizer.encode(label.lower())
return self.dictionary.encode_line(
label, append_eos=True, add_if_not_exist=False,
).long()
def decode(self, tok, symbols_ignore=None):
tok = self.dictionary.string(tok, extra_symbols_to_ignore=symbols_ignore)
if self.bpe_tokenizer:
tok = self.bpe_tokenizer.decode(tok)
return tok
@dataclass
class AVHubertPretrainingConfig(FairseqDataclass):
data: str = field(
default=MISSING, metadata={"help": "path to data directory"}
)
labels: List[str] = field(
default_factory=lambda: ["ltr"],
metadata={
"help": (
"extension of the label files to load, frame-level labels for"
" pre-training, and sequence-level label for fine-tuning"
)
},
)
label_dir: Optional[str] = field(
default=None,
metadata={
"help": "if set, looks for labels in this directory instead",
},
)
label_rate: int = field(
default=-1,
metadata={"help": "label frame rate. -1 for sequence label"},
)
sample_rate: int = field(
default=16_000,
metadata={
"help": "target sample rate. audio files will be up/down "
"sampled to this rate"
},
)
normalize: bool = field(
default=False,
metadata={
"help": "if set, normalizes input to have 0 mean and unit variance"
},
)
enable_padding: bool = field(
default=False,
metadata={"help": "pad shorter samples instead of cropping"},
)
max_sample_size: Optional[int] = field(
default=None,
metadata={"help": "max sample size to keep in training"},
)
min_sample_size: Optional[int] = field(
default=None,
metadata={"help": "min sample size to keep in training"},
)
max_trim_sample_size: Optional[int] = field(
default=II("task.max_sample_size"),
metadata={"help": "max sample size to trim to for batching"},
)
single_target: Optional[bool] = field(
default=False,
metadata={
"help": "if set, AddTargetDatasets outputs same keys "
"as AddTargetDataset"
},
)
random_crop: Optional[bool] = field(
default=True,
metadata={"help": "always crop from the beginning if false"},
)
pad_audio: Optional[bool] = field(
default=False,
metadata={"help": "pad audio to the longest one in the batch if true"},
)
pdb: Optional[bool] = field(
default=False,
metadata={"help": "pdb"},
)
stack_order_audio: int = field(
default=1,
metadata={"help": "concatenate n consecutive audio frames for one step"},
)
skip_verify: Optional[bool] = field(
default=False,
metadata={"help": "skip verifying label-audio alignment"},
)
image_aug: bool = field(default=False, metadata={'help': 'image data augmentation'})
image_crop_size: int = field(
default=88, metadata={"help": "image ROI size"})
image_mean: float = field(
default=0.421, metadata={"help": "image mean"})
image_std: float = field(
default=0.165, metadata={"help": "image std"})
modalities: Optional[List[str]] = field(default_factory=lambda: ["audio", "video"], metadata={'help': 'modalities to load'})
is_s2s: bool=field(default=False, metadata={'help': 'seq2seq fine-tuning only'})
tokenizer_bpe_name: Optional[str] = field(default=None, metadata={'help': 'tokenizer model name'})
tokenizer_bpe_model: Optional[str] = field(default=None, metadata={'help': 'tokenizer model path'})
noise_wav: Optional[str] = field(default=None, metadata={'help': 'manifest of noise wav files (one wav file path per line)'})
noise_prob: float = field(default=0, metadata={'help': 'noise probability'})
noise_snr: Optional[str] = field(default='0', metadata={'help': 'noise SNR in audio'})
noise_num: int = field(default=1, metadata={'help': 'number of noise wav files to mix'})
fine_tuning: bool = field(default=False, metadata={"help": "set to true if fine-tuning AV-Hubert"})
@register_task("av_hubert_pretraining", dataclass=AVHubertPretrainingConfig)
class AVHubertPretrainingTask(FairseqTask):
cfg: AVHubertPretrainingConfig
def __init__(
self,
cfg: AVHubertPretrainingConfig,
) -> None:
super().__init__(cfg)
logger.info(f"current directory is {os.getcwd()}")
logger.info(f"AVHubertPretrainingTask Config {cfg}")
self.fine_tuning = cfg.fine_tuning
if cfg.fine_tuning:
self.state.add_factory("target_dictionary", self.load_dictionaries)
if cfg.is_s2s:
self.state.add_factory("s2s_tokenizer", self.load_tokenizer)
else:
self.state.add_factory("dictionaries", self.load_dictionaries)
self.blank_symbol = "<s>"
@property
def source_dictionary(self) -> Optional[Dictionary]:
return None # self._source_dictionary
@property
def target_dictionary(self) -> Optional[Dictionary]:
return self.state.target_dictionary # self._target_dictionary
@property
def dictionaries(self) -> List[Dictionary]:
return self.state.dictionaries
def load_dictionaries(self):
label_dir = self.cfg.data if self.cfg.label_dir is None else self.cfg.label_dir
dictionaries = [
Dictionary.load(f"{label_dir}/dict.{label}.txt")
for label in self.cfg.labels
]
return dictionaries[0] if self.cfg.fine_tuning else dictionaries
def load_tokenizer(self):
bpe_args = Namespace(**{'bpe': self.cfg.tokenizer_bpe_name, f"{self.cfg.tokenizer_bpe_name}_model": self.cfg.tokenizer_bpe_model})
bpe_tokenizer = encoders.build_bpe(bpe_args)
return bpe_tokenizer
@property
def s2s_tokenizer(self):
return self.state.s2s_tokenizer
@classmethod
def setup_task(
cls, cfg: AVHubertPretrainingConfig, **kwargs
) -> "AVHubertPretrainingTask":
if cfg.pdb:
import pdb
pdb.set_trace()
return cls(cfg)
def get_label_dir(self) -> str:
if self.cfg.label_dir is None:
return self.cfg.data
return self.cfg.label_dir
def load_dataset(self, split: str, **kwargs) -> None:
manifest = f"{self.cfg.data}/{split}.tsv"
dictionaries = [self.target_dictionary] if self.fine_tuning else self.dictionaries
pad_list = [dictionary.pad() for dictionary in dictionaries]
eos_list = [dictionary.eos() for dictionary in dictionaries]
if not self.cfg.is_s2s:
procs = [LabelEncoder(dictionary) for dictionary in dictionaries]
else:
logger.info(f"Using tokenizer")
bpe_tokenizer = self.s2s_tokenizer
procs = [LabelEncoderS2SToken(dictionary, bpe_tokenizer) for dictionary in dictionaries]
paths = [
f"{self.get_label_dir()}/{split}.{l}" for l in self.cfg.labels
]
image_aug = self.cfg.image_aug if split == 'train' else False
noise_fn, noise_snr = f"{self.cfg.noise_wav}/{split}.tsv" if self.cfg.noise_wav is not None else None, eval(self.cfg.noise_snr)
noise_num = self.cfg.noise_num #
self.datasets[split] = AVHubertDataset(
manifest,
sample_rate=self.cfg.sample_rate,
label_paths=paths,
label_rates=self.cfg.label_rate,
pad_list=pad_list,
eos_list=eos_list,
label_processors=procs,
max_keep_sample_size=self.cfg.max_sample_size,
min_keep_sample_size=self.cfg.min_sample_size,
max_sample_size=self.cfg.max_trim_sample_size,
pad_audio=self.cfg.pad_audio,
normalize=self.cfg.normalize,
store_labels=False,
random_crop=self.cfg.random_crop,
single_target=self.cfg.single_target,
stack_order_audio=self.cfg.stack_order_audio,
skip_verify=self.cfg.skip_verify,
image_mean=self.cfg.image_mean,
image_std=self.cfg.image_std,
image_crop_size=self.cfg.image_crop_size,
image_aug=image_aug,
modalities=self.cfg.modalities,
is_s2s=self.cfg.is_s2s,
noise_fn=noise_fn,
noise_prob=self.cfg.noise_prob,
noise_snr=noise_snr,
noise_num=noise_num
)
def max_positions(self) -> Tuple[int, int]:
return (sys.maxsize, sys.maxsize)
def filter_indices_by_size(
self, indices: np.array, *args, **kwargs
) -> np.array:
return indices
def build_generator(
self, models, args, seq_gen_cls=None, extra_gen_cls_kwargs=None, prefix_allowed_tokens_fn=None,
):
"""
Build a :class:`~fairseq.SequenceGenerator` instance for this
task.
Args:
models (List[~fairseq.models.FairseqModel]): ensemble of models
args (fairseq.dataclass.configs.GenerationConfig):
configuration object (dataclass) for generation
extra_gen_cls_kwargs (Dict[str, Any]): extra options to pass
through to SequenceGenerator
prefix_allowed_tokens_fn (Callable[[int, torch.Tensor], List[int]]):
If provided, this function constrains the beam search to
allowed tokens only at each step. The provided function
should take 2 arguments: the batch ID (`batch_id: int`)
and a unidimensional tensor of token ids (`inputs_ids:
torch.Tensor`). It has to return a `List[int]` with the
allowed tokens for the next generation step conditioned
on the previously generated tokens (`inputs_ids`) and
the batch ID (`batch_id`). This argument is useful for
constrained generation conditioned on the prefix, as
described in "Autoregressive Entity Retrieval"
(https://arxiv.org/abs/2010.00904) and
https://github.com/facebookresearch/GENRE.
"""
if getattr(args, "score_reference", False):
from fairseq.sequence_scorer import SequenceScorer
return SequenceScorer(
self.target_dictionary,
compute_alignment=getattr(args, "print_alignment", False),
)
# Choose search strategy. Defaults to Beam Search.
sampling = getattr(args, "sampling", False)
sampling_topk = getattr(args, "sampling_topk", -1)
sampling_topp = getattr(args, "sampling_topp", -1.0)
diverse_beam_groups = getattr(args, "diverse_beam_groups", -1)
diverse_beam_strength = getattr(args, "diverse_beam_strength", 0.5)
match_source_len = getattr(args, "match_source_len", False)
diversity_rate = getattr(args, "diversity_rate", -1)
constrained = getattr(args, "constraints", False)
if prefix_allowed_tokens_fn is None:
prefix_allowed_tokens_fn = getattr(args, "prefix_allowed_tokens_fn", None)
if (
sum(
int(cond)
for cond in [
sampling,
diverse_beam_groups > 0,
match_source_len,
diversity_rate > 0,
]
)
> 1
):
raise ValueError("Provided Search parameters are mutually exclusive.")
assert sampling_topk < 0 or sampling, "--sampling-topk requires --sampling"
assert sampling_topp < 0 or sampling, "--sampling-topp requires --sampling"
if sampling:
search_strategy = search.Sampling(
self.target_dictionary, sampling_topk, sampling_topp
)
elif diverse_beam_groups > 0:
search_strategy = search.DiverseBeamSearch(
self.target_dictionary, diverse_beam_groups, diverse_beam_strength
)
elif match_source_len:
# this is useful for tagging applications where the output
# length should match the input length, so we hardcode the
# length constraints for simplicity
search_strategy = search.LengthConstrainedBeamSearch(
self.target_dictionary,
min_len_a=1,
min_len_b=0,
max_len_a=1,
max_len_b=0,
)
elif diversity_rate > -1:
search_strategy = search.DiverseSiblingsSearch(
self.target_dictionary, diversity_rate
)
elif constrained:
search_strategy = search.LexicallyConstrainedBeamSearch(
self.target_dictionary, args.constraints
)
elif prefix_allowed_tokens_fn:
search_strategy = search.PrefixConstrainedBeamSearch(
self.target_dictionary, prefix_allowed_tokens_fn
)
else:
search_strategy = search.BeamSearch(self.target_dictionary)
extra_gen_cls_kwargs = extra_gen_cls_kwargs or {}
if seq_gen_cls is None:
if getattr(args, "print_alignment", False):
seq_gen_cls = SequenceGeneratorWithAlignment
extra_gen_cls_kwargs["print_alignment"] = args.print_alignment
else:
seq_gen_cls = SequenceGenerator
return seq_gen_cls(
models,
self.target_dictionary,
beam_size=getattr(args, "beam", 5),
max_len_a=getattr(args, "max_len_a", 0),
max_len_b=getattr(args, "max_len_b", 200),
min_len=getattr(args, "min_len", 1),
normalize_scores=(not getattr(args, "unnormalized", False)),
len_penalty=getattr(args, "lenpen", 1),
unk_penalty=getattr(args, "unkpen", 0),
temperature=getattr(args, "temperature", 1.0),
match_source_len=getattr(args, "match_source_len", False),
no_repeat_ngram_size=getattr(args, "no_repeat_ngram_size", 0),
search_strategy=search_strategy,
**extra_gen_cls_kwargs,
)
| av_hubert-main | avhubert/hubert_pretraining.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .hubert import * # noqa
from .hubert_asr import * # noqa
from .hubert_dataset import *
from .hubert_pretraining import *
from .hubert_criterion import *
| av_hubert-main | avhubert/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Dict, List, Optional
import sys
import torch
import torch.nn as nn
from fairseq import search, utils
from fairseq.data import data_utils
from fairseq.models import FairseqIncrementalDecoder
from torch import Tensor
from fairseq.ngram_repeat_block import NGramRepeatBlock
class SequenceGenerator(nn.Module):
def __init__(
self,
models,
tgt_dict,
beam_size=1,
max_len_a=0,
max_len_b=200,
max_len=0,
min_len=1,
normalize_scores=True,
len_penalty=1.0,
unk_penalty=0.0,
temperature=1.0,
match_source_len=False,
no_repeat_ngram_size=0,
search_strategy=None,
eos=None,
symbols_to_strip_from_output=None,
lm_model=None,
lm_weight=1.0,
):
"""Generates translations of a given source sentence.
Args:
models (List[~fairseq.models.FairseqModel]): ensemble of models,
currently support fairseq.models.TransformerModel for scripting
beam_size (int, optional): beam width (default: 1)
max_len_a/b (int, optional): generate sequences of maximum length
ax + b, where x is the source length
max_len (int, optional): the maximum length of the generated output
(not including end-of-sentence)
min_len (int, optional): the minimum length of the generated output
(not including end-of-sentence)
normalize_scores (bool, optional): normalize scores by the length
of the output (default: True)
len_penalty (float, optional): length penalty, where <1.0 favors
shorter, >1.0 favors longer sentences (default: 1.0)
unk_penalty (float, optional): unknown word penalty, where <0
produces more unks, >0 produces fewer (default: 0.0)
temperature (float, optional): temperature, where values
>1.0 produce more uniform samples and values <1.0 produce
sharper samples (default: 1.0)
match_source_len (bool, optional): outputs should match the source
length (default: False)
"""
super().__init__()
if isinstance(models, EnsembleModel):
self.model = models
else:
self.model = EnsembleModel(models)
self.tgt_dict = tgt_dict
self.pad = tgt_dict.pad()
self.unk = tgt_dict.unk()
self.eos = tgt_dict.eos() if eos is None else eos
self.symbols_to_strip_from_output = (
symbols_to_strip_from_output.union({self.eos})
if symbols_to_strip_from_output is not None
else {self.eos}
)
self.vocab_size = len(tgt_dict)
self.beam_size = beam_size
# the max beam size is the dictionary size - 1, since we never select pad
self.beam_size = min(beam_size, self.vocab_size - 1)
self.max_len_a = max_len_a
self.max_len_b = max_len_b
self.min_len = min_len
self.max_len = max_len or self.model.max_decoder_positions()
self.normalize_scores = normalize_scores
self.len_penalty = len_penalty
self.unk_penalty = unk_penalty
self.temperature = temperature
self.match_source_len = match_source_len
if no_repeat_ngram_size > 0:
self.repeat_ngram_blocker = NGramRepeatBlock(no_repeat_ngram_size)
else:
self.repeat_ngram_blocker = None
assert temperature > 0, "--temperature must be greater than 0"
self.search = (
search.BeamSearch(tgt_dict) if search_strategy is None else search_strategy
)
# We only need to set src_lengths in LengthConstrainedBeamSearch.
# As a module attribute, setting it would break in multithread
# settings when the model is shared.
self.should_set_src_lengths = (
hasattr(self.search, "needs_src_lengths") and self.search.needs_src_lengths
)
self.model.eval()
self.lm_model = lm_model
self.lm_weight = lm_weight
if self.lm_model is not None:
self.lm_model.eval()
def cuda(self):
self.model.cuda()
return self
@torch.no_grad()
def forward(
self,
sample: Dict[str, Dict[str, Tensor]],
prefix_tokens: Optional[Tensor] = None,
bos_token: Optional[int] = None,
):
"""Generate a batch of translations.
Args:
sample (dict): batch
prefix_tokens (torch.LongTensor, optional): force decoder to begin
with these tokens
bos_token (int, optional): beginning of sentence token
(default: self.eos)
"""
return self._generate(sample, prefix_tokens, bos_token=bos_token)
# TODO(myleott): unused, deprecate after pytorch-translate migration
def generate_batched_itr(self, data_itr, beam_size=None, cuda=False, timer=None):
"""Iterate over a batched dataset and yield individual translations.
Args:
cuda (bool, optional): use GPU for generation
timer (StopwatchMeter, optional): time generations
"""
for sample in data_itr:
s = utils.move_to_cuda(sample) if cuda else sample
if "net_input" not in s:
continue
input = s["net_input"]
# model.forward normally channels prev_output_tokens into the decoder
# separately, but SequenceGenerator directly calls model.encoder
encoder_input = {
k: v for k, v in input.items() if k != "prev_output_tokens"
}
if timer is not None:
timer.start()
with torch.no_grad():
hypos = self.generate(encoder_input)
if timer is not None:
timer.stop(sum(len(h[0]["tokens"]) for h in hypos))
for i, id in enumerate(s["id"].data):
# remove padding
src = utils.strip_pad(input["src_tokens"].data[i, :], self.pad)
ref = (
utils.strip_pad(s["target"].data[i, :], self.pad)
if s["target"] is not None
else None
)
yield id, src, ref, hypos[i]
@torch.no_grad()
def generate(self, models, sample: Dict[str, Dict[str, Tensor]], **kwargs) -> List[List[Dict[str, Tensor]]]:
"""Generate translations. Match the api of other fairseq generators.
Args:
models (List[~fairseq.models.FairseqModel]): ensemble of models
sample (dict): batch
prefix_tokens (torch.LongTensor, optional): force decoder to begin
with these tokens
constraints (torch.LongTensor, optional): force decoder to include
the list of constraints
bos_token (int, optional): beginning of sentence token
(default: self.eos)
"""
return self._generate(sample, **kwargs)
def _generate(
self,
sample: Dict[str, Dict[str, Tensor]],
prefix_tokens: Optional[Tensor] = None,
constraints: Optional[Tensor] = None,
bos_token: Optional[int] = None,
):
incremental_states = torch.jit.annotate(
List[Dict[str, Dict[str, Optional[Tensor]]]],
[
torch.jit.annotate(Dict[str, Dict[str, Optional[Tensor]]], {})
for i in range(self.model.models_size)
],
)
net_input = sample["net_input"]
if "src_tokens" in net_input:
src_tokens = net_input["src_tokens"]
# length of the source text being the character length except EndOfSentence and pad
src_lengths = (
(src_tokens.ne(self.eos) & src_tokens.ne(self.pad)).long().sum(dim=1)
)
elif "source" in net_input:
src_tokens = net_input["source"]
src_lengths = (
net_input["padding_mask"].size(-1) - net_input["padding_mask"].sum(-1)
if net_input["padding_mask"] is not None
else torch.tensor(src_tokens.size(-1)).to(src_tokens)
)
elif "features" in net_input:
src_tokens = net_input["features"]
src_lengths = (
net_input["padding_mask"].size(-1) - net_input["padding_mask"].sum(-1)
if net_input["padding_mask"] is not None
else torch.tensor(src_tokens.size(-1)).to(src_tokens)
)
else:
raise Exception("expected src_tokens or source in net input. input keys: " + str(net_input.keys()))
# bsz: total number of sentences in beam
# Note that src_tokens may have more than 2 dimensions (i.e. audio features)
if src_tokens['audio'] is not None:
bsz, src_len = src_tokens['audio'].size()[:2]
src_device = src_tokens['audio'].device
else:
bsz, src_len = net_input['padding_mask'].size()
src_device = src_tokens['video'].device
beam_size = self.beam_size
if constraints is not None and not self.search.supports_constraints:
raise NotImplementedError(
"Target-side constraints were provided, but search method doesn't support them"
)
# Initialize constraints, when active
self.search.init_constraints(constraints, beam_size)
max_len: int = -1
if self.match_source_len:
max_len = src_lengths.max().item()
else:
max_len = min(
int(self.max_len_a * src_len + self.max_len_b),
self.max_len - 1,
)
assert (
self.min_len <= max_len
), "min_len cannot be larger than max_len, please adjust these!"
# compute the encoder output for each beam
encoder_outs = self.model.forward_encoder(net_input)
# placeholder of indices for bsz * beam_size to hold tokens and accumulative scores
new_order = torch.arange(bsz).view(-1, 1).repeat(1, beam_size).view(-1)
new_order = new_order.to(src_device).long()
encoder_outs = self.model.reorder_encoder_out(encoder_outs, new_order)
# ensure encoder_outs is a List.
assert encoder_outs is not None
# initialize buffers
scores = (
torch.zeros(bsz * beam_size, max_len + 1).to(src_device).float()
) # +1 for eos; pad is never chosen for scoring
tokens = (
torch.zeros(bsz * beam_size, max_len + 2)
.to(src_device)
.long()
.fill_(self.pad)
) # +2 for eos and pad
tokens[:, 0] = self.eos if bos_token is None else bos_token
attn: Optional[Tensor] = None
# A list that indicates candidates that should be ignored.
# For example, suppose we're sampling and have already finalized 2/5
# samples. Then cands_to_ignore would mark 2 positions as being ignored,
# so that we only finalize the remaining 3 samples.
cands_to_ignore = (
torch.zeros(bsz, beam_size).to(src_device).eq(-1)
) # forward and backward-compatible False mask
# list of completed sentences
finalized = torch.jit.annotate(
List[List[Dict[str, Tensor]]],
[torch.jit.annotate(List[Dict[str, Tensor]], []) for i in range(bsz)],
) # contains lists of dictionaries of infomation about the hypothesis being finalized at each step
# a boolean array indicating if the sentence at the index is finished or not
finished = [False for i in range(bsz)]
num_remaining_sent = bsz # number of sentences remaining
# number of candidate hypos per step
cand_size = 2 * beam_size # 2 x beam size in case half are EOS
# offset arrays for converting between different indexing schemes
bbsz_offsets = (
(torch.arange(0, bsz) * beam_size)
.unsqueeze(1)
.type_as(tokens)
.to(src_device)
)
cand_offsets = torch.arange(0, cand_size).type_as(tokens).to(src_device)
reorder_state: Optional[Tensor] = None
batch_idxs: Optional[Tensor] = None
original_batch_idxs: Optional[Tensor] = None
if "id" in sample and isinstance(sample["id"], Tensor):
original_batch_idxs = sample["id"]
else:
original_batch_idxs = torch.arange(0, bsz).type_as(tokens)
for step in range(max_len + 1): # one extra step for EOS marker
# reorder decoder internal states based on the prev choice of beams
if reorder_state is not None:
if batch_idxs is not None:
# update beam indices to take into account removed sentences
corr = batch_idxs - torch.arange(batch_idxs.numel()).type_as(
batch_idxs
)
reorder_state.view(-1, beam_size).add_(
corr.unsqueeze(-1) * beam_size
)
original_batch_idxs = original_batch_idxs[batch_idxs]
self.model.reorder_incremental_state(incremental_states, reorder_state)
encoder_outs = self.model.reorder_encoder_out(
encoder_outs, reorder_state
)
lprobs, avg_attn_scores = self.model.forward_decoder(
tokens[:, : step + 1],
encoder_outs,
incremental_states,
self.temperature,
)
if self.lm_model is not None:
lm_out = self.lm_model(tokens[:, : step + 1])
probs = self.lm_model.get_normalized_probs(
lm_out, log_probs=True, sample=None
)
probs = probs[:, -1, :] * self.lm_weight
lprobs += probs
lprobs[lprobs != lprobs] = torch.tensor(-math.inf).to(lprobs)
lprobs[:, self.pad] = -math.inf # never select pad
lprobs[:, self.unk] -= self.unk_penalty # apply unk penalty
# handle max length constraint
if step >= max_len:
lprobs[:, : self.eos] = -math.inf
lprobs[:, self.eos + 1 :] = -math.inf
# handle prefix tokens (possibly with different lengths)
if (
prefix_tokens is not None
and step < prefix_tokens.size(1)
and step < max_len
):
lprobs, tokens, scores = self._prefix_tokens(
step, lprobs, scores, tokens, prefix_tokens, beam_size
)
elif step < self.min_len:
# minimum length constraint (does not apply if using prefix_tokens)
lprobs[:, self.eos] = -math.inf
# Record attention scores, only support avg_attn_scores is a Tensor
if avg_attn_scores is not None:
if attn is None:
attn = torch.empty(
bsz * beam_size, avg_attn_scores.size(1), max_len + 2
).to(scores)
attn[:, :, step + 1].copy_(avg_attn_scores)
scores = scores.type_as(lprobs)
eos_bbsz_idx = torch.empty(0).to(
tokens
) # indices of hypothesis ending with eos (finished sentences)
eos_scores = torch.empty(0).to(
scores
) # scores of hypothesis ending with eos (finished sentences)
if self.should_set_src_lengths:
self.search.set_src_lengths(src_lengths)
if self.repeat_ngram_blocker is not None:
lprobs = self.repeat_ngram_blocker(tokens, lprobs, bsz, beam_size, step)
# Shape: (batch, cand_size)
cand_scores, cand_indices, cand_beams = self.search.step(
step,
lprobs.view(bsz, -1, self.vocab_size),
scores.view(bsz, beam_size, -1)[:, :, :step],
tokens[:, : step + 1],
original_batch_idxs,
)
# cand_bbsz_idx contains beam indices for the top candidate
# hypotheses, with a range of values: [0, bsz*beam_size),
# and dimensions: [bsz, cand_size]
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
# finalize hypotheses that end in eos
# Shape of eos_mask: (batch size, beam size)
eos_mask = cand_indices.eq(self.eos) & cand_scores.ne(-math.inf)
eos_mask[:, :beam_size][cands_to_ignore] = torch.tensor(0).to(eos_mask)
# only consider eos when it's among the top beam_size indices
# Now we know what beam item(s) to finish
# Shape: 1d list of absolute-numbered
eos_bbsz_idx = torch.masked_select(
cand_bbsz_idx[:, :beam_size], mask=eos_mask[:, :beam_size]
)
finalized_sents: List[int] = []
if eos_bbsz_idx.numel() > 0:
eos_scores = torch.masked_select(
cand_scores[:, :beam_size], mask=eos_mask[:, :beam_size]
)
finalized_sents = self.finalize_hypos(
step,
eos_bbsz_idx,
eos_scores,
tokens,
scores,
finalized,
finished,
beam_size,
attn,
src_lengths,
max_len,
)
num_remaining_sent -= len(finalized_sents)
assert num_remaining_sent >= 0
if num_remaining_sent == 0:
break
if self.search.stop_on_max_len and step >= max_len:
break
assert step < max_len, f"{step} < {max_len}"
# Remove finalized sentences (ones for which {beam_size}
# finished hypotheses have been generated) from the batch.
if len(finalized_sents) > 0:
new_bsz = bsz - len(finalized_sents)
# construct batch_idxs which holds indices of batches to keep for the next pass
batch_mask = torch.ones(
bsz, dtype=torch.bool, device=cand_indices.device
)
batch_mask[finalized_sents] = False
# TODO replace `nonzero(as_tuple=False)` after TorchScript supports it
batch_idxs = torch.arange(
bsz, device=cand_indices.device
).masked_select(batch_mask)
# Choose the subset of the hypothesized constraints that will continue
self.search.prune_sentences(batch_idxs)
eos_mask = eos_mask[batch_idxs]
cand_beams = cand_beams[batch_idxs]
bbsz_offsets.resize_(new_bsz, 1)
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
cand_scores = cand_scores[batch_idxs]
cand_indices = cand_indices[batch_idxs]
if prefix_tokens is not None:
prefix_tokens = prefix_tokens[batch_idxs]
src_lengths = src_lengths[batch_idxs]
cands_to_ignore = cands_to_ignore[batch_idxs]
scores = scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
tokens = tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
if attn is not None:
attn = attn.view(bsz, -1)[batch_idxs].view(
new_bsz * beam_size, attn.size(1), -1
)
bsz = new_bsz
else:
batch_idxs = None
# Set active_mask so that values > cand_size indicate eos hypos
# and values < cand_size indicate candidate active hypos.
# After, the min values per row are the top candidate active hypos
# Rewrite the operator since the element wise or is not supported in torchscript.
eos_mask[:, :beam_size] = ~((~cands_to_ignore) & (~eos_mask[:, :beam_size]))
active_mask = torch.add(
eos_mask.type_as(cand_offsets) * cand_size,
cand_offsets[: eos_mask.size(1)],
)
# get the top beam_size active hypotheses, which are just
# the hypos with the smallest values in active_mask.
# {active_hypos} indicates which {beam_size} hypotheses
# from the list of {2 * beam_size} candidates were
# selected. Shapes: (batch size, beam size)
new_cands_to_ignore, active_hypos = torch.topk(
active_mask, k=beam_size, dim=1, largest=False
)
# update cands_to_ignore to ignore any finalized hypos.
cands_to_ignore = new_cands_to_ignore.ge(cand_size)[:, :beam_size]
# Make sure there is at least one active item for each sentence in the batch.
assert (~cands_to_ignore).any(dim=1).all()
# update cands_to_ignore to ignore any finalized hypos
# {active_bbsz_idx} denotes which beam number is continued for each new hypothesis (a beam
# can be selected more than once).
active_bbsz_idx = torch.gather(cand_bbsz_idx, dim=1, index=active_hypos)
active_scores = torch.gather(cand_scores, dim=1, index=active_hypos)
active_bbsz_idx = active_bbsz_idx.view(-1)
active_scores = active_scores.view(-1)
# copy tokens and scores for active hypotheses
# Set the tokens for each beam (can select the same row more than once)
tokens[:, : step + 1] = torch.index_select(
tokens[:, : step + 1], dim=0, index=active_bbsz_idx
)
# Select the next token for each of them
tokens.view(bsz, beam_size, -1)[:, :, step + 1] = torch.gather(
cand_indices, dim=1, index=active_hypos
)
if step > 0:
scores[:, :step] = torch.index_select(
scores[:, :step], dim=0, index=active_bbsz_idx
)
scores.view(bsz, beam_size, -1)[:, :, step] = torch.gather(
cand_scores, dim=1, index=active_hypos
)
# Update constraints based on which candidates were selected for the next beam
self.search.update_constraints(active_hypos)
# copy attention for active hypotheses
if attn is not None:
attn[:, :, : step + 2] = torch.index_select(
attn[:, :, : step + 2], dim=0, index=active_bbsz_idx
)
# reorder incremental state in decoder
reorder_state = active_bbsz_idx
# sort by score descending
for sent in range(len(finalized)):
scores = torch.tensor(
[float(elem["score"].item()) for elem in finalized[sent]]
)
_, sorted_scores_indices = torch.sort(scores, descending=True)
finalized[sent] = [finalized[sent][ssi] for ssi in sorted_scores_indices]
finalized[sent] = torch.jit.annotate(
List[Dict[str, Tensor]], finalized[sent]
)
return finalized
def _prefix_tokens(
self, step: int, lprobs, scores, tokens, prefix_tokens, beam_size: int
):
"""Handle prefix tokens"""
prefix_toks = prefix_tokens[:, step].unsqueeze(-1).repeat(1, beam_size).view(-1)
prefix_lprobs = lprobs.gather(-1, prefix_toks.unsqueeze(-1))
prefix_mask = prefix_toks.ne(self.pad)
lprobs[prefix_mask] = torch.tensor(-math.inf).to(lprobs)
lprobs[prefix_mask] = lprobs[prefix_mask].scatter(
-1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_lprobs[prefix_mask]
)
# if prefix includes eos, then we should make sure tokens and
# scores are the same across all beams
eos_mask = prefix_toks.eq(self.eos)
if eos_mask.any():
# validate that the first beam matches the prefix
first_beam = tokens[eos_mask].view(-1, beam_size, tokens.size(-1))[
:, 0, 1 : step + 1
]
eos_mask_batch_dim = eos_mask.view(-1, beam_size)[:, 0]
target_prefix = prefix_tokens[eos_mask_batch_dim][:, :step]
assert (first_beam == target_prefix).all()
# copy tokens, scores and lprobs from the first beam to all beams
tokens = self.replicate_first_beam(tokens, eos_mask_batch_dim, beam_size)
scores = self.replicate_first_beam(scores, eos_mask_batch_dim, beam_size)
lprobs = self.replicate_first_beam(lprobs, eos_mask_batch_dim, beam_size)
return lprobs, tokens, scores
def replicate_first_beam(self, tensor, mask, beam_size: int):
tensor = tensor.view(-1, beam_size, tensor.size(-1))
tensor[mask] = tensor[mask][:, :1, :]
return tensor.view(-1, tensor.size(-1))
def finalize_hypos(
self,
step: int,
bbsz_idx,
eos_scores,
tokens,
scores,
finalized: List[List[Dict[str, Tensor]]],
finished: List[bool],
beam_size: int,
attn: Optional[Tensor],
src_lengths,
max_len: int,
):
"""Finalize hypothesis, store finalized information in `finalized`, and change `finished` accordingly.
A sentence is finalized when {beam_size} finished items have been collected for it.
Returns number of sentences (not beam items) being finalized.
These will be removed from the batch and not processed further.
Args:
bbsz_idx (Tensor):
"""
assert bbsz_idx.numel() == eos_scores.numel()
# clone relevant token and attention tensors.
# tokens is (batch * beam, max_len). So the index_select
# gets the newly EOS rows, then selects cols 1..{step + 2}
tokens_clone = tokens.index_select(0, bbsz_idx)[
:, 1 : step + 2
] # skip the first index, which is EOS
tokens_clone[:, step] = self.eos
attn_clone = (
attn.index_select(0, bbsz_idx)[:, :, 1 : step + 2]
if attn is not None
else None
)
# compute scores per token position
pos_scores = scores.index_select(0, bbsz_idx)[:, : step + 1]
pos_scores[:, step] = eos_scores
# convert from cumulative to per-position scores
pos_scores[:, 1:] = pos_scores[:, 1:] - pos_scores[:, :-1]
# normalize sentence-level scores
if self.normalize_scores:
eos_scores /= (step + 1) ** self.len_penalty
# cum_unfin records which sentences in the batch are finished.
# It helps match indexing between (a) the original sentences
# in the batch and (b) the current, possibly-reduced set of
# sentences.
cum_unfin: List[int] = []
prev = 0
for f in finished:
if f:
prev += 1
else:
cum_unfin.append(prev)
# The keys here are of the form "{sent}_{unfin_idx}", where
# "unfin_idx" is the index in the current (possibly reduced)
# list of sentences, and "sent" is the index in the original,
# unreduced batch
# set() is not supported in script export
sents_seen: Dict[str, Optional[Tensor]] = {}
# For every finished beam item
for i in range(bbsz_idx.size()[0]):
idx = bbsz_idx[i]
score = eos_scores[i]
# sentence index in the current (possibly reduced) batch
unfin_idx = idx // beam_size
# sentence index in the original (unreduced) batch
sent = unfin_idx + cum_unfin[unfin_idx]
# Cannot create dict for key type '(int, int)' in torchscript.
# The workaround is to cast int to string
seen = str(sent.item()) + "_" + str(unfin_idx.item())
if seen not in sents_seen:
sents_seen[seen] = None
if self.match_source_len and step > src_lengths[unfin_idx]:
score = torch.tensor(-math.inf).to(score)
# An input sentence (among those in a batch) is finished when
# beam_size hypotheses have been collected for it
if len(finalized[sent]) < beam_size:
if attn_clone is not None:
# remove padding tokens from attn scores
hypo_attn = attn_clone[i]
else:
hypo_attn = torch.empty(0)
finalized[sent].append(
{
"tokens": tokens_clone[i],
"score": score,
"attention": hypo_attn, # src_len x tgt_len
"alignment": torch.empty(0),
"positional_scores": pos_scores[i],
}
)
newly_finished: List[int] = []
for seen in sents_seen.keys():
# check termination conditions for this sentence
sent: int = int(float(seen.split("_")[0]))
unfin_idx: int = int(float(seen.split("_")[1]))
if not finished[sent] and self.is_finished(
step, unfin_idx, max_len, len(finalized[sent]), beam_size
):
finished[sent] = True
newly_finished.append(unfin_idx)
return newly_finished
def is_finished(
self,
step: int,
unfin_idx: int,
max_len: int,
finalized_sent_len: int,
beam_size: int,
):
"""
Check whether decoding for a sentence is finished, which
occurs when the list of finalized sentences has reached the
beam size, or when we reach the maximum length.
"""
assert finalized_sent_len <= beam_size
if finalized_sent_len == beam_size or step == max_len:
return True
return False
class EnsembleModel(nn.Module):
"""A wrapper around an ensemble of models."""
def __init__(self, models):
super().__init__()
self.models_size = len(models)
# method '__len__' is not supported in ModuleList for torch script
self.single_model = models[0]
self.models = nn.ModuleList(models)
self.has_incremental: bool = False
if all(
hasattr(m, "decoder") and isinstance(m.decoder, FairseqIncrementalDecoder)
for m in models
):
self.has_incremental = True
def forward(self):
pass
def has_encoder(self):
return hasattr(self.single_model, "encoder")
def has_incremental_states(self):
return self.has_incremental
def max_decoder_positions(self):
return min([m.max_decoder_positions() for m in self.models if hasattr(m, "max_decoder_positions")] + [sys.maxsize])
@torch.jit.export
def forward_encoder(self, net_input: Dict[str, Tensor]):
if not self.has_encoder():
return None
return [model.encoder.forward_torchscript(net_input) for model in self.models]
@torch.jit.export
def forward_decoder(
self,
tokens,
encoder_outs: List[Dict[str, List[Tensor]]],
incremental_states: List[Dict[str, Dict[str, Optional[Tensor]]]],
temperature: float = 1.0,
):
log_probs = []
avg_attn: Optional[Tensor] = None
encoder_out: Optional[Dict[str, List[Tensor]]] = None
for i, model in enumerate(self.models):
if self.has_encoder():
encoder_out = encoder_outs[i]
# decode each model
if self.has_incremental_states():
decoder_out = model.decoder.forward(
tokens,
encoder_out=encoder_out,
incremental_state=incremental_states[i],
)
else:
if hasattr(model, "decoder"):
decoder_out = model.decoder.forward(tokens, encoder_out=encoder_out)
else:
decoder_out = model.forward(tokens)
attn: Optional[Tensor] = None
decoder_len = len(decoder_out)
if decoder_len > 1 and decoder_out[1] is not None:
if isinstance(decoder_out[1], Tensor):
attn = decoder_out[1]
else:
attn_holder = decoder_out[1]["attn"]
if isinstance(attn_holder, Tensor):
attn = attn_holder
elif attn_holder is not None:
attn = attn_holder[0]
if attn is not None:
attn = attn[:, -1, :]
decoder_out_tuple = (
decoder_out[0][:, -1:, :].div_(temperature),
None if decoder_len <= 1 else decoder_out[1],
)
probs = model.get_normalized_probs(
decoder_out_tuple, log_probs=True, sample=None
)
probs = probs[:, -1, :]
if self.models_size == 1:
return probs, attn
log_probs.append(probs)
if attn is not None:
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
avg_probs = torch.logsumexp(torch.stack(log_probs, dim=0), dim=0) - math.log(
self.models_size
)
if avg_attn is not None:
avg_attn.div_(self.models_size)
return avg_probs, avg_attn
@torch.jit.export
def reorder_encoder_out(
self, encoder_outs: Optional[List[Dict[str, List[Tensor]]]], new_order
):
"""
Reorder encoder output according to *new_order*.
Args:
encoder_out: output from the ``forward()`` method
new_order (LongTensor): desired order
Returns:
*encoder_out* rearranged according to *new_order*
"""
new_outs: List[Dict[str, List[Tensor]]] = []
if not self.has_encoder():
return new_outs
for i, model in enumerate(self.models):
assert encoder_outs is not None
new_outs.append(
model.encoder.reorder_encoder_out(encoder_outs[i], new_order)
)
return new_outs
@torch.jit.export
def reorder_incremental_state(
self,
incremental_states: List[Dict[str, Dict[str, Optional[Tensor]]]],
new_order,
):
if not self.has_incremental_states():
return
for i, model in enumerate(self.models):
model.decoder.reorder_incremental_state_scripting(
incremental_states[i], new_order
)
class SequenceGeneratorWithAlignment(SequenceGenerator):
def __init__(
self, models, tgt_dict, left_pad_target=False, print_alignment="hard", **kwargs
):
"""Generates translations of a given source sentence.
Produces alignments following "Jointly Learning to Align and
Translate with Transformer Models" (Garg et al., EMNLP 2019).
Args:
left_pad_target (bool, optional): Whether or not the
hypothesis should be left padded or not when they are
teacher forced for generating alignments.
"""
super().__init__(EnsembleModelWithAlignment(models), tgt_dict, **kwargs)
self.left_pad_target = left_pad_target
if print_alignment == "hard":
self.extract_alignment = utils.extract_hard_alignment
elif print_alignment == "soft":
self.extract_alignment = utils.extract_soft_alignment
@torch.no_grad()
def generate(self, models, sample, **kwargs):
finalized = super()._generate(sample, **kwargs)
src_tokens = sample["net_input"]["src_tokens"]
bsz = src_tokens.shape[0]
beam_size = self.beam_size
(
src_tokens,
src_lengths,
prev_output_tokens,
tgt_tokens,
) = self._prepare_batch_for_alignment(sample, finalized)
if any(getattr(m, "full_context_alignment", False) for m in self.model.models):
attn = self.model.forward_align(src_tokens, src_lengths, prev_output_tokens)
else:
attn = [
finalized[i // beam_size][i % beam_size]["attention"].transpose(1, 0)
for i in range(bsz * beam_size)
]
if src_tokens.device != "cpu":
src_tokens = src_tokens.to("cpu")
tgt_tokens = tgt_tokens.to("cpu")
attn = [i.to("cpu") for i in attn]
# Process the attn matrix to extract hard alignments.
for i in range(bsz * beam_size):
alignment = self.extract_alignment(
attn[i], src_tokens[i], tgt_tokens[i], self.pad, self.eos
)
finalized[i // beam_size][i % beam_size]["alignment"] = alignment
return finalized
def _prepare_batch_for_alignment(self, sample, hypothesis):
src_tokens = sample["net_input"]["src_tokens"]
bsz = src_tokens.shape[0]
src_tokens = (
src_tokens[:, None, :]
.expand(-1, self.beam_size, -1)
.contiguous()
.view(bsz * self.beam_size, -1)
)
src_lengths = sample["net_input"]["src_lengths"]
src_lengths = (
src_lengths[:, None]
.expand(-1, self.beam_size)
.contiguous()
.view(bsz * self.beam_size)
)
prev_output_tokens = data_utils.collate_tokens(
[beam["tokens"] for example in hypothesis for beam in example],
self.pad,
self.eos,
self.left_pad_target,
move_eos_to_beginning=True,
)
tgt_tokens = data_utils.collate_tokens(
[beam["tokens"] for example in hypothesis for beam in example],
self.pad,
self.eos,
self.left_pad_target,
move_eos_to_beginning=False,
)
return src_tokens, src_lengths, prev_output_tokens, tgt_tokens
class EnsembleModelWithAlignment(EnsembleModel):
"""A wrapper around an ensemble of models."""
def __init__(self, models):
super().__init__(models)
def forward_align(self, src_tokens, src_lengths, prev_output_tokens):
avg_attn = None
for model in self.models:
decoder_out = model(src_tokens, src_lengths, prev_output_tokens)
attn = decoder_out[1]["attn"][0]
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
if len(self.models) > 1:
avg_attn.div_(len(self.models))
return avg_attn
| av_hubert-main | avhubert/sequence_generator.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
import re
from dataclasses import dataclass, field
from typing import List, Optional
import torch
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.dataclass import FairseqDataclass
@dataclass
class AVHubertCriterionConfig(FairseqDataclass):
pred_masked_weight: float = field(
default=1.0,
metadata={"help": "weight for predictive loss for masked frames"},
)
pred_nomask_weight: float = field(
default=0.0,
metadata={"help": "weight for predictive loss for unmasked frames"},
)
loss_weights: Optional[List[float]] = field(
default=None,
metadata={"help": "weights for additional loss terms (not first one)"},
)
log_keys: List[str] = field(
default_factory=lambda: [],
metadata={"help": "output keys to log"},
)
@register_criterion("av_hubert", dataclass=AVHubertCriterionConfig)
class AVHubertCriterion(FairseqCriterion):
def __init__(self, task, pred_masked_weight, pred_nomask_weight, loss_weights=None, log_keys=None):
super().__init__(task)
self.pred_masked_weight = pred_masked_weight
self.pred_nomask_weight = pred_nomask_weight
self.loss_weights = loss_weights
self.log_keys = [] if log_keys is None else log_keys
def forward(self, model, sample, reduce=True, log_pred=False):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(target_list=sample["target_list"], **sample["net_input"])
loss = 0.
sample_size = 0
logging_output = {}
reduction = "sum" if reduce else "none"
loss_m_list = []
logp_m_list, targ_m_list = net_output['logit_m_list'], net_output['target_m_list']
for i, (logp_m, targ_m) in enumerate(zip(logp_m_list, targ_m_list)):
loss_m = F.cross_entropy(logp_m, targ_m, reduction=reduction)
loss_m_list.append(loss_m)
logging_output[f"loss_m_{i}"] = loss_m.detach().item()
if self.pred_masked_weight > 0:
loss += self.pred_masked_weight * sum(loss_m_list)
sample_size += targ_m_list[0].numel()
loss_u_list = []
logp_u_list, targ_u_list = net_output['logit_u_list'], net_output['target_u_list']
for i, (logp_u, targ_u) in enumerate(zip(logp_u_list, targ_u_list)):
loss_u = F.cross_entropy(logp_u, targ_u, reduction=reduction)
loss_u_list.append(loss_u)
logging_output[f"loss_u_{i}"] = loss_u.detach().item()
if self.pred_nomask_weight > 0:
loss += self.pred_nomask_weight * sum(loss_u_list)
sample_size += targ_u_list[0].numel()
if self.loss_weights is not None:
assert hasattr(model, "get_extra_losses")
extra_losses, names = model.get_extra_losses(net_output)
if torch.is_tensor(extra_losses):
extra_losses = [extra_losses]
names = [names]
if len(self.loss_weights) == 1 and len(extra_losses) != 1:
self.loss_weights = [self.loss_weights[0]] * len(extra_losses)
assert len(extra_losses) == len(self.loss_weights), f"{len(extra_losses)}, {len(self.loss_weights)}"
for p, n, coef in zip(extra_losses, names, self.loss_weights):
if coef != 0 and p is not None:
p = coef * p.float() * sample_size
loss += p
logging_output[f"loss_{n}"] = p.item()
logging_output = {
"loss": loss.item() if reduce else loss,
"ntokens": sample_size,
"nsentences": sample["id"].numel(),
"sample_size": sample_size,
**logging_output,
}
for lk in self.log_keys:
if lk in net_output:
logging_output[lk] = float((net_output[lk]))
with torch.no_grad():
for i, logp_m in enumerate(logp_m_list):
# corr_m, count_m = compute_correct(logp_m)
if logp_m.numel() == 0:
corr_m, count_m = 0, 0
else:
corr_m, count_m = (logp_m.argmax(dim=-1)==targ_m_list[i]).sum().item(), len(targ_m_list[i])
logging_output[f"correct_m_{i}"] = corr_m
logging_output[f"count_m_{i}"] = count_m
for i, logp_u in enumerate(logp_u_list):
if logp_u.numel() == 0:
corr_u, count_u = 0, 0
else:
corr_u, count_u = (logp_u.argmax(dim=-1)==targ_u_list[i]).sum().item(), len(targ_u_list[i])
logging_output[f"correct_u_{i}"] = corr_u
logging_output[f"count_u_{i}"] = count_u
return loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training (copied from normal cross entropy)."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
metrics.log_scalar("loss", loss_sum / sample_size / math.log(2), sample_size, round=3)
if sample_size != ntokens:
metrics.log_scalar("nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3)
metrics.log_derived("ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg))
else:
metrics.log_derived("ppl", lambda meters: utils.get_perplexity(meters["loss"].avg))
counts = {}
for lk in logging_outputs[0].keys():
if lk.startswith("count_"):
val = sum(log[lk] for log in logging_outputs)
metrics.log_scalar(lk, val)
counts[lk] = val
for lk in logging_outputs[0].keys():
if lk.startswith("loss_"):
val = sum(log[lk] for log in logging_outputs)
metrics.log_scalar(lk, val / sample_size / math.log(2), round=3)
elif lk.startswith("correct_"):
val = sum(log[lk] for log in logging_outputs)
metrics.log_scalar(lk, val / counts[re.sub("correct", "count", lk)])
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
raise NotImplementedError()
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return False
| av_hubert-main | avhubert/hubert_criterion.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
import math
import torch.nn as nn
import pdb
logger = logging.getLogger(__name__)
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
def downsample_basic_block( inplanes, outplanes, stride ):
return nn.Sequential(
nn.Conv2d(inplanes, outplanes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(outplanes),
)
def downsample_basic_block_v2( inplanes, outplanes, stride ):
return nn.Sequential(
nn.AvgPool2d(kernel_size=stride, stride=stride, ceil_mode=True, count_include_pad=False),
nn.Conv2d(inplanes, outplanes, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(outplanes),
)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, relu_type = 'relu' ):
super(BasicBlock, self).__init__()
assert relu_type in ['relu','prelu']
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
if relu_type == 'relu':
self.relu1 = nn.ReLU(inplace=True)
self.relu2 = nn.ReLU(inplace=True)
elif relu_type == 'prelu':
self.relu1 = nn.PReLU(num_parameters=planes)
self.relu2 = nn.PReLU(num_parameters=planes)
else:
raise Exception('relu type not implemented')
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu1(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu2(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, relu_type = 'relu', gamma_zero = False, avg_pool_downsample = False):
self.inplanes = 64
self.relu_type = relu_type
self.gamma_zero = gamma_zero
self.downsample_block = downsample_basic_block_v2 if avg_pool_downsample else downsample_basic_block
super(ResNet, self).__init__()
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d(1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
if self.gamma_zero:
for m in self.modules():
if isinstance(m, BasicBlock ):
m.bn2.weight.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = self.downsample_block( inplanes = self.inplanes,
outplanes = planes * block.expansion,
stride = stride )
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, relu_type = self.relu_type))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, relu_type = self.relu_type))
return nn.Sequential(*layers)
def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
return x
class ResEncoder(nn.Module):
def __init__(self, relu_type, weights):
super(ResEncoder, self).__init__()
self.frontend_nout = 64
self.backend_out = 512
frontend_relu = nn.PReLU(num_parameters=self.frontend_nout) if relu_type == 'prelu' else nn.ReLU()
self.frontend3D = nn.Sequential(
nn.Conv3d(1, self.frontend_nout, kernel_size=(5, 7, 7), stride=(1, 2, 2), padding=(2, 3, 3), bias=False),
nn.BatchNorm3d(self.frontend_nout),
frontend_relu,
nn.MaxPool3d( kernel_size=(1, 3, 3), stride=(1, 2, 2), padding=(0, 1, 1)))
self.trunk = ResNet(BasicBlock, [2, 2, 2, 2], relu_type=relu_type)
if weights is not None:
logger.info(f"Load {weights} for resnet")
std = torch.load(weights, map_location=torch.device('cpu'))['model_state_dict']
frontend_std, trunk_std = OrderedDict(), OrderedDict()
for key, val in std.items():
new_key = '.'.join(key.split('.')[1:])
if 'frontend3D' in key:
frontend_std[new_key] = val
if 'trunk' in key:
trunk_std[new_key] = val
self.frontend3D.load_state_dict(frontend_std)
self.trunk.load_state_dict(trunk_std)
def forward(self, x):
B, C, T, H, W = x.size()
x = self.frontend3D(x)
Tnew = x.shape[2]
x = self.threeD_to_2D_tensor(x)
x = self.trunk(x)
x = x.view(B, Tnew, x.size(1))
x = x.transpose(1, 2).contiguous()
return x
def threeD_to_2D_tensor(self, x):
n_batch, n_channels, s_time, sx, sy = x.shape
x = x.transpose(1, 2).contiguous()
return x.reshape(n_batch*s_time, n_channels, sx, sy)
| av_hubert-main | avhubert/resnet.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import sys,logging
import contextlib
import tempfile
from argparse import Namespace
from typing import Any, Optional
import torch
import torch.nn as nn
from dataclasses import dataclass, field
from fairseq import checkpoint_utils, tasks, utils
from fairseq.dataclass import FairseqDataclass
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.models import BaseFairseqModel, FairseqEncoder, FairseqEncoderDecoderModel, register_model
from fairseq.models.hubert.hubert import MASKING_DISTRIBUTION_CHOICES
from fairseq.tasks import FairseqTask
from omegaconf import II, MISSING
DBG=True if len(sys.argv) == 1 else False
if DBG:
from hubert import AVHubertModel
from decoder import TransformerDecoder
else:
from .hubert import AVHubertModel
from .decoder import TransformerDecoder
logger = logging.getLogger(__name__)
@dataclass
class AVHubertAsrConfig(FairseqDataclass):
w2v_path: str = field(
default=MISSING, metadata={"help": "path to hubert model"}
)
no_pretrained_weights: bool = field(
default=False,
metadata={"help": "if true, does not load pretrained weights"},
)
dropout_input: float = field(
default=0.0,
metadata={"help": "dropout to apply to the input (after feat extr)"},
)
final_dropout: float = field(
default=0.0,
metadata={
"help": "dropout after transformer and before final projection"
},
)
dropout: float = field(
default=0.0,
metadata={"help": "dropout probability inside hubert model"},
)
attention_dropout: float = field(
default=0.0,
metadata={
"help": "dropout probability for attention weights "
"inside hubert model"
},
)
activation_dropout: float = field(
default=0.0,
metadata={
"help": "dropout probability after activation in FFN "
"inside hubert model"
},
)
# masking
apply_mask: bool = field(
default=False, metadata={"help": "apply masking during fine-tuning"}
)
mask_length: int = field(
default=10, metadata={"help": "repeat the mask indices multiple times"}
)
mask_prob: float = field(
default=0.5,
metadata={
"help": "probability of replacing a token with mask "
"(normalized by length)"
},
)
mask_selection: MASKING_DISTRIBUTION_CHOICES = field(
default="static", metadata={"help": "how to choose masks"}
)
mask_other: float = field(
default=0,
metadata={
"help": "secondary mask argument "
"(used for more complex distributions), "
"see help in compute_mask_indices"
},
)
no_mask_overlap: bool = field(
default=False, metadata={"help": "whether to allow masks to overlap"}
)
# channel masking
mask_channel_length: int = field(
default=10,
metadata={"help": "length of the mask for features (channels)"},
)
mask_channel_prob: float = field(
default=0.0,
metadata={"help": "probability of replacing a feature with 0"},
)
mask_channel_selection: MASKING_DISTRIBUTION_CHOICES = field(
default="static",
metadata={"help": "how to choose mask length for channel masking"},
)
mask_channel_other: float = field(
default=0,
metadata={
"help": "secondary mask argument "
"(used for more complex distributions), "
"see help in compute_mask_indices"
},
)
no_mask_channel_overlap: bool = field(
default=False,
metadata={"help": "whether to allow channel masks to overlap"},
)
freeze_finetune_updates: int = field(
default=0,
metadata={"help": "dont finetune hubert for this many updates"},
)
feature_grad_mult: float = field(
default=0.0,
metadata={"help": "reset feature grad mult in hubert to this"},
)
layerdrop: float = field(
default=0.0,
metadata={"help": "probability of dropping a layer in hubert"},
)
normalize: bool = II("task.normalize")
data: str = II("task.data")
# this holds the loaded hubert args
w2v_args: Any = None
@dataclass
class AVHubertCtcConfig(AVHubertAsrConfig):
pass
@register_model("av_hubert_ctc", dataclass=AVHubertCtcConfig)
class AVHubertCtc(BaseFairseqModel):
def __init__(self, cfg: AVHubertCtcConfig, w2v_encoder: BaseFairseqModel):
super().__init__()
self.cfg = cfg
self.w2v_encoder = w2v_encoder
def upgrade_state_dict_named(self, state_dict, name):
super().upgrade_state_dict_named(state_dict, name)
return state_dict
@classmethod
def build_model(cls, cfg: AVHubertCtcConfig, task: FairseqTask):
"""Build a new model instance."""
w2v_encoder = HubertEncoder(cfg, task.target_dictionary)
return cls(cfg, w2v_encoder)
def get_normalized_probs(self, net_output, log_probs):
"""Get normalized probabilities (or log probs) from a net's output."""
logits = net_output["encoder_out"]
if log_probs:
return utils.log_softmax(logits.float(), dim=-1)
else:
return utils.softmax(logits.float(), dim=-1)
def get_logits(self, net_output):
logits = net_output["encoder_out"]
padding = net_output["encoder_padding_mask"]
if padding is not None and padding.any():
padding = padding.T
logits[padding][..., 0] = 0
logits[padding][..., 1:] = float("-inf")
return logits
def forward(self, **kwargs):
x = self.w2v_encoder(**kwargs)
return x
@dataclass
class AVHubertSeq2SeqConfig(AVHubertAsrConfig):
decoder_embed_dim: int = field(
default=768, metadata={"help": "decoder embedding dimension"}
)
decoder_ffn_embed_dim: int = field(
default=3072, metadata={"help": "decoder embedding dimension for FFN"}
)
decoder_layers: int = field(
default=6, metadata={"help": "num of decoder layers"}
)
decoder_layerdrop: float = field(
default=0.0, metadata={"help": "decoder layerdrop chance"}
)
decoder_attention_heads: int = field(
default=4, metadata={"help": "num decoder attention heads"}
)
decoder_learned_pos: bool = field(
default=False,
metadata={"help": "use learned positional embeddings in the decoder"},
)
decoder_normalize_before: bool = field(
default=False,
metadata={"help": "apply layernorm before each decoder block"},
)
no_token_positional_embeddings: bool = field(
default=False,
metadata={
"help": "if set, disables positional embeddings "
"(outside self attention)"
},
)
decoder_dropout: float = field(
default=0.0, metadata={"help": "dropout probability in the decoder"}
)
decoder_attention_dropout: float = field(
default=0.0,
metadata={
"help": "dropout probability for attention weights "
"inside the decoder"
},
)
decoder_activation_dropout: float = field(
default=0.0,
metadata={
"help": "dropout probability after activation in FFN "
"inside the decoder"
},
)
max_target_positions: int = field(
default=2048, metadata={"help": "max target positions"}
)
share_decoder_input_output_embed: bool = field(
default=False,
metadata={"help": "share decoder input and output embeddings"},
)
no_scale_embedding: bool = field(default=True, metadata={'help': 'scale embedding'})
class HubertEncoder(FairseqEncoder):
def __init__(self, cfg: AVHubertAsrConfig, tgt_dict=None):
self.apply_mask = cfg.apply_mask
arg_overrides = {
"dropout": cfg.dropout,
"activation_dropout": cfg.activation_dropout,
"dropout_input": cfg.dropout_input,
"attention_dropout": cfg.attention_dropout,
"mask_length": cfg.mask_length,
"mask_prob": cfg.mask_prob,
"mask_selection": cfg.mask_selection,
"mask_other": cfg.mask_other,
"no_mask_overlap": cfg.no_mask_overlap,
"mask_channel_length": cfg.mask_channel_length,
"mask_channel_prob": cfg.mask_channel_prob,
"mask_channel_selection": cfg.mask_channel_selection,
"mask_channel_other": cfg.mask_channel_other,
"no_mask_channel_overlap": cfg.no_mask_channel_overlap,
"encoder_layerdrop": cfg.layerdrop,
"feature_grad_mult": cfg.feature_grad_mult,
}
if cfg.w2v_args is None:
state = checkpoint_utils.load_checkpoint_to_cpu(
cfg.w2v_path, arg_overrides
)
w2v_args = state.get("cfg", None)
if w2v_args is None:
w2v_args = convert_namespace_to_omegaconf(state["args"])
cfg.w2v_args = w2v_args
else:
state = None
w2v_args = cfg.w2v_args
if isinstance(w2v_args, Namespace):
cfg.w2v_args = w2v_args = convert_namespace_to_omegaconf(
w2v_args
)
assert cfg.normalize == w2v_args.task.normalize, (
"Fine-tuning works best when data normalization is the same. "
"Please check that --normalize is set or unset for "
"both pre-training and here"
)
w2v_args.task.data = cfg.data
task = tasks.setup_task(w2v_args.task)
model = task.build_model(w2v_args.model)
if state is not None and not cfg.no_pretrained_weights:
# set strict=False because we omit some modules
model.load_state_dict(state["model"], strict=False)
model.remove_pretraining_modules()
super().__init__(task.source_dictionary)
d = model.encoder.embedding_dim
self.w2v_model = model
self.final_dropout = nn.Dropout(cfg.final_dropout)
self.freeze_finetune_updates = cfg.freeze_finetune_updates
self.num_updates = 0
if tgt_dict is not None:
self.proj = Linear(d, len(tgt_dict))
elif getattr(cfg, "decoder_embed_dim", d) != d:
self.proj = Linear(d, cfg.decoder_embed_dim)
else:
self.proj = None
def set_num_updates(self, num_updates):
"""Set the number of parameters updates."""
super().set_num_updates(num_updates)
self.num_updates = num_updates
def forward(self, source, padding_mask, tbc=True, **kwargs):
w2v_args = {
"source": source,
"padding_mask": padding_mask,
"mask": self.apply_mask and self.training,
}
ft = self.freeze_finetune_updates <= self.num_updates
with torch.no_grad() if not ft else contextlib.ExitStack():
x, padding_mask = self.w2v_model.extract_finetune(**w2v_args)
if tbc:
# B x T x C -> T x B x C
x = x.transpose(0, 1)
x = self.final_dropout(x)
if self.proj:
x = self.proj(x)
return {
"encoder_out": x, # T x B x C
"encoder_padding_mask": padding_mask, # B x T
"padding_mask": padding_mask,
}
def reorder_encoder_out(self, encoder_out, new_order):
if encoder_out["encoder_out"] is not None:
encoder_out["encoder_out"] = encoder_out[
"encoder_out"
].index_select(1, new_order)
if encoder_out["encoder_padding_mask"] is not None:
encoder_out["encoder_padding_mask"] = encoder_out[
"encoder_padding_mask"
].index_select(0, new_order)
return encoder_out
def max_positions(self):
"""Maximum input length supported by the encoder."""
return None
def upgrade_state_dict_named(self, state_dict, name):
return state_dict
class HubertEncoderWrapper(FairseqEncoder):
def __init__(self, w2v_model):
super().__init__(None)
self.w2v_model = w2v_model
def forward(self, source, padding_mask, **kwargs):
w2v_args = {
"source": source,
"padding_mask": padding_mask,
}
x, padding_mask = self.w2v_model.extract_finetune(**w2v_args)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
return {
"encoder_out": x, # T x B x C
"encoder_padding_mask": padding_mask, # B x T
"padding_mask": padding_mask
}
def reorder_encoder_out(self, encoder_out, new_order):
if encoder_out["encoder_out"] is not None:
encoder_out["encoder_out"] = encoder_out[
"encoder_out"
].index_select(1, new_order)
if encoder_out["encoder_padding_mask"] is not None:
encoder_out["encoder_padding_mask"] = encoder_out[
"encoder_padding_mask"
].index_select(0, new_order)
if encoder_out["padding_mask"] is not None:
encoder_out["padding_mask"] = encoder_out[
"padding_mask"
].index_select(0, new_order)
return encoder_out
@register_model("av_hubert_seq2seq", dataclass=AVHubertSeq2SeqConfig)
class AVHubertSeq2Seq(FairseqEncoderDecoderModel):
def __init__(self, encoder, decoder, tgt_dict, cfg):
super().__init__(encoder, decoder)
self.cfg = cfg
self.freeze_finetune_updates = cfg.freeze_finetune_updates
@classmethod
def build_model(cls, cfg, task):
"""Build a new model instance."""
arg_overrides = {
"dropout": cfg.dropout,
"activation_dropout": cfg.activation_dropout,
"dropout_input": cfg.dropout_input,
"attention_dropout": cfg.attention_dropout,
"mask_length": cfg.mask_length,
"mask_prob": cfg.mask_prob,
"mask_selection": cfg.mask_selection,
"mask_other": cfg.mask_other,
"no_mask_overlap": cfg.no_mask_overlap,
"mask_channel_length": cfg.mask_channel_length,
"mask_channel_prob": cfg.mask_channel_prob,
"mask_channel_selection": cfg.mask_channel_selection,
"mask_channel_other": cfg.mask_channel_other,
"no_mask_channel_overlap": cfg.no_mask_channel_overlap,
"encoder_layerdrop": cfg.layerdrop,
"feature_grad_mult": cfg.feature_grad_mult,
}
if cfg.w2v_args is None:
state = checkpoint_utils.load_checkpoint_to_cpu(
cfg.w2v_path, arg_overrides
)
w2v_args = state.get("cfg", None)
if w2v_args is None:
w2v_args = convert_namespace_to_omegaconf(state["args"])
cfg.w2v_args = w2v_args
else:
state = None
w2v_args = cfg.w2v_args
if isinstance(w2v_args, Namespace):
cfg.w2v_args = w2v_args = convert_namespace_to_omegaconf(
w2v_args
)
assert cfg.normalize == w2v_args.task.normalize, (
"Fine-tuning works best when data normalization is the same. "
"Please check that --normalize is set or unset for "
"both pre-training and here"
)
w2v_args.task.data = cfg.data
task_pretrain = tasks.setup_task(w2v_args.task)
if state is not None:
task_pretrain.load_state_dict(state['task_state'])
encoder_ = task_pretrain.build_model(w2v_args.model)
encoder = HubertEncoderWrapper(encoder_)
if state is not None and not cfg.no_pretrained_weights:
# set strict=False because we omit some modules
del state['model']['mask_emb']
encoder.w2v_model.load_state_dict(state["model"], strict=False)
encoder.w2v_model.remove_pretraining_modules()
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
def build_embedding(dictionary, embed_dim):
num_embeddings = len(dictionary)
padding_idx = dictionary.pad()
emb = Embedding(num_embeddings, embed_dim, padding_idx=padding_idx)
return emb
decoder_embed_tokens = build_embedding(tgt_dict, cfg.decoder_embed_dim)
decoder = TransformerDecoder(cfg, tgt_dict, decoder_embed_tokens)
return AVHubertSeq2Seq(encoder, decoder, tgt_dict, cfg)
def forward(self, **kwargs):
ft = self.freeze_finetune_updates <= self.num_updates
with torch.no_grad() if not ft else contextlib.ExitStack():
output = self.encoder(**kwargs)
decoder_out = self.decoder(prev_output_tokens=kwargs['prev_output_tokens'], encoder_out=output)
return decoder_out
def upgrade_state_dict_named(self, state_dict, name):
super().upgrade_state_dict_named(state_dict, name)
return state_dict
def set_num_updates(self, num_updates):
"""Set the number of parameters updates."""
super().set_num_updates(num_updates)
self.num_updates = num_updates
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
nn.init.constant_(m.weight[padding_idx], 0)
return m
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.0)
return m
| av_hubert-main | avhubert/hubert_asr.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import cv2
import torch
import random
import numpy as np
from typing import Dict, List, Optional, Tuple
def load_video(path):
for i in range(3):
try:
cap = cv2.VideoCapture(path)
frames = []
while True:
ret, frame = cap.read()
if ret:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
frames.append(frame)
else:
break
frames = np.stack(frames)
return frames
except Exception:
print(f"failed loading {path} ({i} / 3)")
if i == 2:
raise ValueError(f"Unable to load {path}")
class Compose(object):
"""Compose several preprocess together.
Args:
preprocess (list of ``Preprocess`` objects): list of preprocess to compose.
"""
def __init__(self, preprocess):
self.preprocess = preprocess
def __call__(self, sample):
for t in self.preprocess:
sample = t(sample)
return sample
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.preprocess:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string
class Normalize(object):
"""Normalize a ndarray image with mean and standard deviation.
"""
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, frames):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized Tensor image.
"""
frames = (frames - self.mean) / self.std
return frames
def __repr__(self):
return self.__class__.__name__+'(mean={0}, std={1})'.format(self.mean, self.std)
class CenterCrop(object):
"""Crop the given image at the center
"""
def __init__(self, size):
self.size = size
def __call__(self, frames):
"""
Args:
img (numpy.ndarray): Images to be cropped.
Returns:
numpy.ndarray: Cropped image.
"""
t, h, w = frames.shape
th, tw = self.size
delta_w = int(round((w - tw))/2.)
delta_h = int(round((h - th))/2.)
frames = frames[:, delta_h:delta_h+th, delta_w:delta_w+tw]
return frames
class RandomCrop(object):
"""Crop the given image at the center
"""
def __init__(self, size):
self.size = size
def __call__(self, frames):
"""
Args:
img (numpy.ndarray): Images to be cropped.
Returns:
numpy.ndarray: Cropped image.
"""
t, h, w = frames.shape
th, tw = self.size
delta_w = random.randint(0, w-tw)
delta_h = random.randint(0, h-th)
frames = frames[:, delta_h:delta_h+th, delta_w:delta_w+tw]
return frames
def __repr__(self):
return self.__class__.__name__ + '(size={0})'.format(self.size)
class HorizontalFlip(object):
"""Flip image horizontally.
"""
def __init__(self, flip_ratio):
self.flip_ratio = flip_ratio
def __call__(self, frames):
"""
Args:
img (numpy.ndarray): Images to be flipped with a probability flip_ratio
Returns:
numpy.ndarray: Cropped image.
"""
t, h, w = frames.shape
if random.random() < self.flip_ratio:
for index in range(t):
frames[index] = cv2.flip(frames[index], 1)
return frames
def compute_mask_indices(
shape: Tuple[int, int],
padding_mask: Optional[torch.Tensor],
mask_prob: float,
mask_length: int,
mask_type: str = "static",
mask_other: float = 0.0,
min_masks: int = 0,
no_overlap: bool = False,
min_space: int = 0,
) -> np.ndarray:
"""
Computes random mask spans for a given shape
Args:
shape: the the shape for which to compute masks.
should be of size 2 where first element is batch size and 2nd is timesteps
padding_mask: optional padding mask of the same size as shape, which will prevent masking padded elements
mask_prob: probability for each token to be chosen as start of the span to be masked. this will be multiplied by
number of timesteps divided by length of mask span to mask approximately this percentage of all elements.
however due to overlaps, the actual number will be smaller (unless no_overlap is True)
mask_type: how to compute mask lengths
static = fixed size
uniform = sample from uniform distribution [mask_other, mask_length*2]
normal = sample from normal distribution with mean mask_length and stdev mask_other. mask is min 1 element
poisson = sample from possion distribution with lambda = mask length
min_masks: minimum number of masked spans
no_overlap: if false, will switch to an alternative recursive algorithm that prevents spans from overlapping
min_space: only used if no_overlap is True, this is how many elements to keep unmasked between spans
"""
bsz, all_sz = shape
mask = np.full((bsz, all_sz), False)
all_num_mask = int(
# add a random number for probabilistic rounding
mask_prob * all_sz / float(mask_length)
+ np.random.rand()
)
all_num_mask = max(min_masks, all_num_mask)
mask_idcs = []
for i in range(bsz):
if padding_mask is not None:
sz = all_sz - padding_mask[i].long().sum().item()
num_mask = int(
# add a random number for probabilistic rounding
mask_prob * sz / float(mask_length)
+ np.random.rand()
)
num_mask = max(min_masks, num_mask)
else:
sz = all_sz
num_mask = all_num_mask
if mask_type == "static":
lengths = np.full(num_mask, mask_length)
elif mask_type == "uniform":
lengths = np.random.randint(mask_other, mask_length * 2 + 1, size=num_mask)
elif mask_type == "normal":
lengths = np.random.normal(mask_length, mask_other, size=num_mask)
lengths = [max(1, int(round(x))) for x in lengths]
elif mask_type == "poisson":
lengths = np.random.poisson(mask_length, size=num_mask)
lengths = [int(round(x)) for x in lengths]
else:
raise Exception("unknown mask selection " + mask_type)
if sum(lengths) == 0:
lengths[0] = min(mask_length, sz - 1)
if no_overlap:
mask_idc = []
def arrange(s, e, length, keep_length):
span_start = np.random.randint(s, e - length)
mask_idc.extend(span_start + i for i in range(length))
new_parts = []
if span_start - s - min_space >= keep_length:
new_parts.append((s, span_start - min_space + 1))
if e - span_start - keep_length - min_space > keep_length:
new_parts.append((span_start + length + min_space, e))
return new_parts
parts = [(0, sz)]
min_length = min(lengths)
for length in sorted(lengths, reverse=True):
lens = np.fromiter(
(e - s if e - s >= length + min_space else 0 for s, e in parts),
np.int,
)
l_sum = np.sum(lens)
if l_sum == 0:
break
probs = lens / np.sum(lens)
c = np.random.choice(len(parts), p=probs)
s, e = parts.pop(c)
parts.extend(arrange(s, e, length, min_length))
mask_idc = np.asarray(mask_idc)
else:
min_len = min(lengths)
if sz - min_len <= num_mask:
min_len = sz - num_mask - 1
mask_idc = np.random.choice(sz - min_len, num_mask, replace=False)
mask_idc = np.asarray(
[
mask_idc[j] + offset
for j in range(len(mask_idc))
for offset in range(lengths[j])
]
)
mask_idcs.append(np.unique(mask_idc[mask_idc < sz]))
min_len = min([len(m) for m in mask_idcs])
batch_indexes, starts, ends = [], [], []
for i, mask_idc in enumerate(mask_idcs):
if len(mask_idc) > min_len:
mask_idc = np.random.choice(mask_idc, min_len, replace=False)
mask[i, mask_idc] = True
vals, run_starts, run_lengths = find_runs(mask[i])
start_indices, lengths = run_starts[vals == True], run_lengths[vals == True]
starts.append(start_indices)
ends.append(start_indices+lengths)
batch_indexes.append(np.zeros([len(start_indices)])+i)
return mask, np.concatenate(starts).astype(np.int64), np.concatenate(ends).astype(np.int64), np.concatenate(batch_indexes).astype(np.int64)
def find_runs(x):
"""Find runs of consecutive items in an array."""
# ensure array
x = np.asanyarray(x)
if x.ndim != 1:
raise ValueError('only 1D array supported')
n = x.shape[0]
# handle empty array
if n == 0:
return np.array([]), np.array([]), np.array([])
else:
# find run starts
loc_run_start = np.empty(n, dtype=bool)
loc_run_start[0] = True
np.not_equal(x[:-1], x[1:], out=loc_run_start[1:])
run_starts = np.nonzero(loc_run_start)[0]
# find run values
run_values = x[loc_run_start]
# find run lengths
run_lengths = np.diff(np.append(run_starts, n))
return run_values, run_starts, run_lengths
| av_hubert-main | avhubert/utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import logging
import os
import sys
import time
from typing import Any, List, Optional, Union
import numpy as np
import torch
import torch.nn.functional as F
from fairseq.data import data_utils
from fairseq.data.fairseq_dataset import FairseqDataset
from python_speech_features import logfbank
from scipy.io import wavfile
DBG=True if len(sys.argv) == 1 else False
if DBG:
import utils as custom_utils
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "DEBUG").upper(),
stream=sys.stdout,
)
else:
from . import utils as custom_utils
logger = logging.getLogger(__name__)
def load_audio_visual(manifest_path, max_keep, min_keep, frame_rate, label_paths, label_rates, tol=0.1):
def is_audio_label_aligned(audio_dur, label_durs):
return all([abs(audio_dur - label_dur)<tol for label_dur in label_durs])
n_long, n_short, n_unaligned = 0, 0, 0
names, inds, sizes = [], [], []
dur_from_label_list = []
is_seq_label = any([x==-1 for x in label_rates])
for label_path, label_rate in zip(label_paths, label_rates):
label_lengths = [len(line.rstrip().split())/label_rate for line in open(label_path).readlines()]
dur_from_label_list.append(label_lengths)
dur_from_label_list = list(zip(*dur_from_label_list))
with open(manifest_path) as f:
root = f.readline().strip()
for ind, line in enumerate(f):
items = line.strip().split("\t")
sz = int(items[-2]) #
if min_keep is not None and sz < min_keep:
n_short += 1
elif max_keep is not None and sz > max_keep:
n_long += 1
elif (not is_seq_label) and (not is_audio_label_aligned(sz/frame_rate, dur_from_label_list[ind])):
n_unaligned += 1
else:
video_path = items[1]
audio_path = items[2]
audio_id = items[0]
names.append((video_path, audio_path+':'+audio_id))
inds.append(ind)
sizes.append(sz)
tot = ind + 1
logger.info(
(
f"max_keep={max_keep}, min_keep={min_keep}, "
f"loaded {len(names)}, skipped {n_short} short and {n_long} long and {n_unaligned} unaligned, "
f"longest-loaded={max(sizes)}, shortest-loaded={min(sizes)}"
)
)
return root, names, inds, tot, sizes
def load_label(label_path, inds, tot):
with open(label_path) as f:
labels = [line.rstrip() for line in f]
assert (
len(labels) == tot
), f"number of labels does not match ({len(labels)} != {tot})"
labels = [labels[i] for i in inds]
return labels
def load_label_offset(label_path, inds, tot):
with open(label_path) as f:
code_lengths = [len(line.encode("utf-8")) for line in f]
assert (
len(code_lengths) == tot
), f"number of labels does not match ({len(code_lengths)} != {tot})"
offsets = list(itertools.accumulate([0] + code_lengths))
offsets = [(offsets[i], offsets[i + 1]) for i in inds]
return offsets
def verify_label_lengths(
audio_sizes,
audio_rate,
label_path,
label_rate,
inds,
tot,
tol=0.1, # tolerance in seconds
):
if label_rate < 0:
logger.info(f"{label_path} is sequence label. skipped")
return
with open(label_path) as f:
lengths = [len(line.rstrip().split()) for line in f]
assert len(lengths) == tot
lengths = [lengths[i] for i in inds]
num_invalid = 0
for i, ind in enumerate(inds):
dur_from_audio = audio_sizes[i] / audio_rate
dur_from_label = lengths[i] / label_rate
if abs(dur_from_audio - dur_from_label) > tol:
logger.warning(
(
f"audio and label duration differ too much "
f"(|{dur_from_audio} - {dur_from_label}| > {tol}) "
f"in line {ind+1} of {label_path}. Check if `label_rate` "
f"is correctly set (currently {label_rate}). "
f"num. of samples = {audio_sizes[i]}; "
f"label length = {lengths[i]}"
)
)
num_invalid += 1
if num_invalid > 0:
logger.warning(
f"total {num_invalid} (audio, label) pairs with mismatched lengths"
)
class AVHubertDataset(FairseqDataset):
def __init__(
self,
manifest_path: str,
sample_rate: float,
label_paths: List[str],
label_rates: Union[List[float], float], # -1 for sequence labels
pad_list: List[str],
eos_list: List[str],
label_processors: Optional[List[Any]] = None,
max_keep_sample_size: Optional[int] = None,
min_keep_sample_size: Optional[int] = None,
max_sample_size: Optional[int] = None,
shuffle: bool = True,
pad_audio: bool = False,
normalize: bool = False,
store_labels: bool = True,
random_crop: bool = False,
single_target: bool = False,
stack_order_audio: int=1,
skip_verify: bool=False,
image_mean: float=0,
image_std: float=1,
image_crop_size: int=88,
image_aug: bool=False,
modalities: Optional[List[str]]=None,
is_s2s=False,
noise_fn=None,
noise_prob=0,
noise_snr=0,
noise_num=1
):
self.label_rates = (
[label_rates for _ in range(len(label_paths))]
if isinstance(label_rates, int)
else label_rates
)
self.modalities = set(modalities)
self.audio_root, self.names, inds, tot, self.sizes = load_audio_visual(manifest_path, max_keep_sample_size, min_keep_sample_size, frame_rate=sample_rate, label_paths=label_paths, label_rates=self.label_rates)
self.sample_rate = sample_rate
self.stack_order_audio = stack_order_audio
self.shuffle = shuffle
self.random_crop = random_crop
self.num_labels = len(label_paths)
self.pad_list = pad_list
self.eos_list = eos_list
self.label_processors = label_processors
self.single_target = single_target
self.store_labels = store_labels
self.is_s2s = is_s2s
self.noise_wav, self.noise_prob, self.noise_snr, self.noise_num = [ln.strip() for ln in open(noise_fn).readlines()] if noise_fn is not None else [], noise_prob, noise_snr, noise_num
assert self.single_target == (self.label_rates[0] == -1), f"single target should be equivalent to sequence label (label_rate==-1)"
if store_labels:
self.label_list = [load_label(p, inds, tot) for p in label_paths]
else:
self.label_paths = label_paths
self.label_offsets_list = [
load_label_offset(p, inds, tot) for p in label_paths
]
assert (
label_processors is None
or len(label_processors) == self.num_labels
)
if not skip_verify:
for label_path, label_rate in zip(label_paths, self.label_rates):
verify_label_lengths(self.sizes, self.sample_rate, label_path, label_rate, inds, tot)
else:
logger.info(f"Skip label alignment verifying")
self.max_sample_size = (
max_sample_size if max_sample_size is not None else sys.maxsize
)
self.pad_audio = pad_audio
self.normalize = normalize
if image_aug:
self.transform = custom_utils.Compose([
custom_utils.Normalize( 0.0,255.0 ),
custom_utils.RandomCrop((image_crop_size, image_crop_size)),
custom_utils.HorizontalFlip(0.5),
custom_utils.Normalize(image_mean, image_std) ])
else:
self.transform = custom_utils.Compose([
custom_utils.Normalize( 0.0,255.0 ),
custom_utils.CenterCrop((image_crop_size, image_crop_size)),
custom_utils.Normalize(image_mean, image_std) ])
logger.info(f"image transform: {self.transform}")
logger.info(
f"pad_audio={pad_audio}, random_crop={random_crop}, "
f"normalize={normalize}, max_sample_size={self.max_sample_size}, "
f"seqs2seq data={self.is_s2s},")
logger.info(
f"Noise wav: {noise_fn}->{len(self.noise_wav)} wav, Prob: {self.noise_prob}, SNR: {self.noise_snr}, Number of mixture: {self.noise_num}"
)
def get_label(self, index, label_idx):
if self.store_labels:
label = self.label_list[label_idx][index]
else:
with open(self.label_paths[label_idx]) as f:
offset_s, offset_e = self.label_offsets_list[label_idx][index]
f.seek(offset_s)
label = f.read(offset_e - offset_s)
if self.label_processors is not None:
label = self.label_processors[label_idx](label)
return label
def get_labels(self, index):
return [self.get_label(index, i) for i in range(self.num_labels)]
def load_feature(self, mix_name):
"""
Load image and audio feature
Returns:
video_feats: numpy.ndarray of shape [T, H, W, 1], audio_feats: numpy.ndarray of shape [T, F]
"""
def stacker(feats, stack_order):
"""
Concatenating consecutive audio frames
Args:
feats - numpy.ndarray of shape [T, F]
stack_order - int (number of neighboring frames to concatenate
Returns:
feats - numpy.ndarray of shape [T', F']
"""
feat_dim = feats.shape[1]
if len(feats) % stack_order != 0:
res = stack_order - len(feats) % stack_order
res = np.zeros([res, feat_dim]).astype(feats.dtype)
feats = np.concatenate([feats, res], axis=0)
feats = feats.reshape((-1, stack_order, feat_dim)).reshape(-1, stack_order*feat_dim)
return feats
video_fn, audio_fn = mix_name
if 'video' in self.modalities:
video_feats = self.load_video(video_fn) # [T, H, W, 1]
else:
video_feats = None
if 'audio' in self.modalities:
audio_fn = audio_fn.split(':')[0]
sample_rate, wav_data = wavfile.read(audio_fn)
assert sample_rate == 16_000 and len(wav_data.shape) == 1
if np.random.rand() < self.noise_prob:
wav_data = self.add_noise(wav_data)
audio_feats = logfbank(wav_data, samplerate=sample_rate).astype(np.float32) # [T, F]
audio_feats = stacker(audio_feats, self.stack_order_audio) # [T/stack_order_audio, F*stack_order_audio]
else:
audio_feats = None
if audio_feats is not None and video_feats is not None:
diff = len(audio_feats) - len(video_feats)
if diff < 0:
audio_feats = np.concatenate([audio_feats, np.zeros([-diff, audio_feats.shape[-1]], dtype=audio_feats.dtype)])
elif diff > 0:
audio_feats = audio_feats[:-diff]
return video_feats, audio_feats
def load_video(self, audio_name):
feats = custom_utils.load_video(os.path.join(self.audio_root, audio_name))
feats = self.transform(feats)
feats = np.expand_dims(feats, axis=-1)
return feats
def select_noise(self):
rand_indexes = np.random.randint(0, len(self.noise_wav), size=self.noise_num)
noise_wav = []
for x in rand_indexes:
noise_wav.append(wavfile.read(self.noise_wav[x])[1].astype(np.float32))
if self.noise_num == 1:
return noise_wav[0]
else:
min_len = min([len(x) for x in noise_wav])
noise_wav = [x[:min_len] for x in noise_wav]
noise_wav = np.floor(np.stack(noise_wav).mean(axis=0))
return noise_wav
def add_noise(self, clean_wav):
clean_wav = clean_wav.astype(np.float32)
noise_wav = self.select_noise()
if type(self.noise_snr) == int or type(self.noise_snr) == float:
snr = self.noise_snr
elif type(self.noise_snr) == tuple:
snr = np.random.randint(self.noise_snr[0], self.noise_snr[1]+1)
clean_rms = np.sqrt(np.mean(np.square(clean_wav), axis=-1))
if len(clean_wav) > len(noise_wav):
ratio = int(np.ceil(len(clean_wav)/len(noise_wav)))
noise_wav = np.concatenate([noise_wav for _ in range(ratio)])
if len(clean_wav) < len(noise_wav):
start = 0
noise_wav = noise_wav[start: start + len(clean_wav)]
noise_rms = np.sqrt(np.mean(np.square(noise_wav), axis=-1))
adjusted_noise_rms = clean_rms / (10**(snr/20))
adjusted_noise_wav = noise_wav * (adjusted_noise_rms / noise_rms)
mixed = clean_wav + adjusted_noise_wav
#Avoid clipping noise
max_int16 = np.iinfo(np.int16).max
min_int16 = np.iinfo(np.int16).min
if mixed.max(axis=0) > max_int16 or mixed.min(axis=0) < min_int16:
if mixed.max(axis=0) >= abs(mixed.min(axis=0)):
reduction_rate = max_int16 / mixed.max(axis=0)
else :
reduction_rate = min_int16 / mixed.min(axis=0)
mixed = mixed * (reduction_rate)
mixed = mixed.astype(np.int16)
return mixed
def __getitem__(self, index):
video_feats, audio_feats = self.load_feature(self.names[index])
audio_feats, video_feats = torch.from_numpy(audio_feats.astype(np.float32)) if audio_feats is not None else None, torch.from_numpy(video_feats.astype(np.float32)) if video_feats is not None else None
if self.normalize and 'audio' in self.modalities:
with torch.no_grad():
audio_feats = F.layer_norm(audio_feats, audio_feats.shape[1:])
labels = self.get_labels(index)
fid = self.names[index][1].split(':')[1]
return {"id": index, 'fid': fid, "video_source": video_feats, 'audio_source': audio_feats, "label_list": labels}
def __len__(self):
return len(self.sizes)
def crop_to_max_size(self, wav, target_size, start=None):
size = len(wav)
diff = size - target_size
if diff <= 0:
return wav, 0
# longer utterances
if start is None:
start, end = 0, target_size
if self.random_crop:
start = np.random.randint(0, diff + 1)
end = size - diff + start
else:
end = start + target_size
return wav[start:end], start
def collater(self, samples):
samples = [s for s in samples if s["id"] is not None]
if len(samples) == 0:
return {}
audio_source, video_source = [s["audio_source"] for s in samples], [s["video_source"] for s in samples]
if audio_source[0] is None:
audio_source = None
if video_source[0] is None:
video_source = None
if audio_source is not None:
audio_sizes = [len(s) for s in audio_source]
else:
audio_sizes = [len(s) for s in video_source]
if self.pad_audio:
audio_size = min(max(audio_sizes), self.max_sample_size)
else:
audio_size = min(min(audio_sizes), self.max_sample_size)
if audio_source is not None:
collated_audios, padding_mask, audio_starts = self.collater_audio(audio_source, audio_size)
else:
collated_audios, audio_starts = None, None
if video_source is not None:
collated_videos, padding_mask, audio_starts = self.collater_audio(video_source, audio_size, audio_starts)
else:
collated_videos = None
targets_by_label = [
[s["label_list"][i] for s in samples]
for i in range(self.num_labels)
]
targets_list, lengths_list, ntokens_list = self.collater_label(
targets_by_label, audio_size, audio_starts
)
source = {"audio": collated_audios, "video": collated_videos}
net_input = {"source": source, "padding_mask": padding_mask}
batch = {
"id": torch.LongTensor([s["id"] for s in samples]),
"net_input": net_input,
"utt_id": [s['fid'] for s in samples]
}
if self.single_target:
batch["target_lengths"] = lengths_list[0]
batch["ntokens"] = ntokens_list[0]
if self.is_s2s:
batch['target'], net_input['prev_output_tokens'] = targets_list[0][0], targets_list[0][1]
else:
batch["target"] = targets_list[0]
else:
batch["target_lengths_list"] = lengths_list
batch["ntokens_list"] = ntokens_list
batch["target_list"] = targets_list
return batch
def collater_audio(self, audios, audio_size, audio_starts=None):
audio_feat_shape = list(audios[0].shape[1:])
collated_audios = audios[0].new_zeros([len(audios), audio_size]+audio_feat_shape)
padding_mask = (
torch.BoolTensor(len(audios), audio_size).fill_(False) #
)
start_known = audio_starts is not None
audio_starts = [0 for _ in audios] if not start_known else audio_starts
for i, audio in enumerate(audios):
diff = len(audio) - audio_size
if diff == 0:
collated_audios[i] = audio
elif diff < 0:
assert self.pad_audio
collated_audios[i] = torch.cat(
[audio, audio.new_full([-diff]+audio_feat_shape, 0.0)]
)
padding_mask[i, diff:] = True
else:
collated_audios[i], audio_starts[i] = self.crop_to_max_size(
audio, audio_size, audio_starts[i] if start_known else None
)
if len(audios[0].shape) == 2:
collated_audios = collated_audios.transpose(1, 2) # [B, T, F] -> [B, F, T]
else:
collated_audios = collated_audios.permute((0, 4, 1, 2, 3)).contiguous() # [B, T, H, W, C] -> [B, C, T, H, W]
return collated_audios, padding_mask, audio_starts
def collater_frm_label(
self, targets, audio_size, audio_starts, label_rate, pad
):
assert label_rate > 0
s2f = label_rate / self.sample_rate # num label per sample
frm_starts = [int(round(s * s2f)) for s in audio_starts]
frm_size = int(round(audio_size * s2f))
if not self.pad_audio:
rem_size = [len(t) - s for t, s in zip(targets, frm_starts)]
frm_size = min(frm_size, *rem_size)
targets = [t[s: s + frm_size] for t, s in zip(targets, frm_starts)]
logger.debug(f"audio_starts={audio_starts}")
logger.debug(f"frame_starts={frm_starts}")
logger.debug(f"frame_size={frm_size}")
lengths = torch.LongTensor([len(t) for t in targets])
ntokens = lengths.sum().item()
targets = data_utils.collate_tokens(
targets, pad_idx=pad, left_pad=False
)
return targets, lengths, ntokens
def collater_seq_label(self, targets, pad):
lengths = torch.LongTensor([len(t) for t in targets])
ntokens = lengths.sum().item()
targets = data_utils.collate_tokens(
targets, pad_idx=pad, left_pad=False
)
return targets, lengths, ntokens
def collater_seq_label_s2s(self, targets, pad):
lengths = torch.LongTensor([len(t) for t in targets])
ntokens = lengths.sum().item()
pad, eos = self.label_processors[0].dictionary.pad(), self.label_processors[0].dictionary.eos()
targets_ = data_utils.collate_tokens(targets, pad_idx=pad, eos_idx=eos, left_pad=False)
prev_output_tokens = data_utils.collate_tokens(targets, pad_idx=pad, eos_idx=eos, left_pad=False, move_eos_to_beginning=True)
return (targets_, prev_output_tokens), lengths, ntokens
def collater_label(self, targets_by_label, audio_size, audio_starts):
targets_list, lengths_list, ntokens_list = [], [], []
itr = zip(targets_by_label, self.label_rates, self.pad_list)
for targets, label_rate, pad in itr:
if label_rate == -1:
if self.is_s2s:
targets, lengths, ntokens = self.collater_seq_label_s2s(targets, pad)
else:
targets, lengths, ntokens = self.collater_seq_label(targets, pad)
else:
targets, lengths, ntokens = self.collater_frm_label(
targets, audio_size, audio_starts, label_rate, pad
)
targets_list.append(targets)
lengths_list.append(lengths)
ntokens_list.append(ntokens)
return targets_list, lengths_list, ntokens_list
def num_tokens(self, index):
return self.size(index)
def size(self, index):
if self.pad_audio:
return self.sizes[index]
return min(self.sizes[index], self.max_sample_size)
def ordered_indices(self):
if self.shuffle:
order = [np.random.permutation(len(self))]
else:
order = [np.arange(len(self))]
order.append(self.sizes)
return np.lexsort(order)[::-1]
| av_hubert-main | avhubert/hubert_dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os,sys
import logging
from typing import Dict, List, Optional, Tuple
import numpy as np
import torch
import torch.nn as nn
from dataclasses import dataclass, field
from fairseq import utils
from fairseq.data.data_utils import compute_mask_indices
from fairseq.data.dictionary import Dictionary
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
from fairseq.models import BaseFairseqModel, register_model
from fairseq.models.wav2vec.wav2vec2 import (
ConvFeatureExtractionModel,
TransformerEncoder,
)
from fairseq.modules import GradMultiply, LayerNorm
from copy import deepcopy
DBG=True if len(sys.argv) == 1 else False
if DBG:
from hubert_pretraining import (
AVHubertPretrainingConfig,
AVHubertPretrainingTask,
)
from resnet import ResEncoder
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
from utils import compute_mask_indices
from decoder import TransformerDecoder
else:
from .hubert_pretraining import (
AVHubertPretrainingConfig,
AVHubertPretrainingTask,
)
from .resnet import ResEncoder
from .utils import compute_mask_indices
from .decoder import TransformerDecoder
from omegaconf import II
logger = logging.getLogger(__name__)
EXTRACTOR_MODE_CHOICES = ChoiceEnum(["default", "layer_norm"])
MASKING_DISTRIBUTION_CHOICES = ChoiceEnum(
["static", "uniform", "normal", "poisson"]
)
@dataclass
class AVHubertConfig(FairseqDataclass):
label_rate: int = II("task.label_rate")
input_modality: str = II("task.input_modality")
extractor_mode: EXTRACTOR_MODE_CHOICES = field(
default="default",
metadata={
"help": "mode for feature extractor. default has a single group "
"norm with d groups in the first conv block, whereas layer_norm "
"has layer norms in every block (meant to use with normalize=True)"
},
)
encoder_layers: int = field(
default=12, metadata={"help": "num encoder layers in the transformer"}
)
encoder_embed_dim: int = field(
default=768, metadata={"help": "encoder embedding dimension"}
)
encoder_ffn_embed_dim: int = field(
default=3072, metadata={"help": "encoder embedding dimension for FFN"}
)
encoder_attention_heads: int = field(
default=12, metadata={"help": "num encoder attention heads"}
)
activation_fn: ChoiceEnum(utils.get_available_activation_fns()) = field(
default="gelu", metadata={"help": "activation function to use"}
)
# dropouts
dropout: float = field(
default=0.1,
metadata={"help": "dropout probability for the transformer"},
)
attention_dropout: float = field(
default=0.1,
metadata={"help": "dropout probability for attention weights"},
)
activation_dropout: float = field(
default=0.0,
metadata={"help": "dropout probability after activation in FFN"},
)
encoder_layerdrop: float = field(
default=0.0,
metadata={"help": "probability of dropping a tarnsformer layer"},
)
dropout_input: float = field(
default=0.0,
metadata={"help": "dropout to apply to the input (after feat extr)"},
)
dropout_features: float = field(
default=0.0,
metadata={
"help": "dropout to apply to the features (after feat extr)"
},
)
final_dim: int = field(
default=0,
metadata={
"help": "project final representations and targets to this many "
"dimensions. set to encoder_embed_dim is <= 0"
},
)
untie_final_proj: bool = field(
default=False,
metadata={"help": "use separate projection for each target"},
)
layer_norm_first: bool = field(
default=False,
metadata={"help": "apply layernorm first in the transformer"},
)
conv_feature_layers: str = field(
default="[(512,10,5)] + [(512,3,2)] * 4 + [(512,2,2)] * 2",
metadata={
"help": "string describing convolutional feature extraction "
"layers in form of a python list that contains "
"[(dim, kernel_size, stride), ...]"
},
)
conv_bias: bool = field(
default=False, metadata={"help": "include bias in conv encoder"}
)
logit_temp: float = field(
default=0.1, metadata={"help": "temperature to divide logits by"}
)
target_glu: bool = field(
default=False, metadata={"help": "adds projection + glu to targets"}
)
feature_grad_mult: float = field(
default=1.0,
metadata={"help": "multiply feature extractor var grads by this"},
)
# masking
mask_length_audio: int = field(default=10, metadata={"help": "mask length"})
mask_prob_audio: float = field(
default=0.65,
metadata={"help": "probability of replacing a token with mask"},
)
mask_length_image: int = field(default=10, metadata={"help": "mask length"})
mask_prob_image: float = field(
default=0.65,
metadata={"help": "probability of replacing a token with mask"},
)
mask_selection: MASKING_DISTRIBUTION_CHOICES = field(
default="static", metadata={"help": "how to choose mask length"}
)
mask_other: float = field(
default=0,
metadata={
"help": "secondary mask argument "
"(used for more complex distributions), "
"see help in compute_mask_indicesh"
},
)
no_mask_overlap: bool = field(
default=False, metadata={"help": "whether to allow masks to overlap"}
)
mask_min_space: int = field(
default=1,
metadata={
"help": "min space between spans (if no overlap is enabled)"
},
)
# channel masking
mask_channel_length: int = field(
default=10,
metadata={"help": "length of the mask for features (channels)"},
)
mask_channel_prob: float = field(
default=0.0,
metadata={"help": "probability of replacing a feature with 0"},
)
mask_channel_selection: MASKING_DISTRIBUTION_CHOICES = field(
default="static",
metadata={"help": "how to choose mask length for channel masking"},
)
mask_channel_other: float = field(
default=0,
metadata={
"help": "secondary mask argument "
"(used for more complex distributions), "
"see help in compute_mask_indicesh"
},
)
no_mask_channel_overlap: bool = field(
default=False,
metadata={"help": "whether to allow channel masks to overlap"},
)
mask_channel_min_space: int = field(
default=1,
metadata={
"help": "min space between spans (if no overlap is enabled)"
},
)
# positional embeddings
conv_pos: int = field(
default=128,
metadata={
"help": "number of filters for convolutional positional embeddings"
},
)
conv_pos_groups: int = field(
default=16,
metadata={
"help": "number of groups for convolutional positional embedding"
},
)
latent_temp: Tuple[float, float, float] = field(
default=(2, 0.5, 0.999995),
metadata={"help": "legacy (to be removed)"},
)
# loss computation
skip_masked: bool = field(
default=False,
metadata={"help": "skip computing losses over masked frames"},
)
skip_nomask: bool = field(
default=False,
metadata={"help": "skip computing losses over unmasked frames"},
)
resnet_relu_type: str = field(default='prelu', metadata={"help": 'relu type for resnet'})
resnet_weights: Optional[str] = field(default=None, metadata={"help": 'resnet weights'})
sim_type: str = field(default='cosine', metadata={"help": 'similarity type'})
sub_encoder_layers: int = field(default=0, metadata={'help': 'number of transformer layers for single modality'})
audio_feat_dim: int = field(default=-1, metadata={'help': 'audio feature dimension'})
modality_dropout: float = field(default=0, metadata={'help': 'drop one modality'})
audio_dropout: float = field(default=0, metadata={'help': 'drop audio feature'})
modality_fuse: str = field(default='concat', metadata={'help': 'fusing two modalities: add,concat'})
selection_type : str = field(default='same_other_seq', metadata={'help': 'type of selectig images, same_other_seq: replace masked span with span from another sequence, same_seq: repace masked span with span of the same sequence'})
masking_type : str = field(default='input', metadata={'help': 'input or feature masking'})
decoder_embed_dim: int = field(
default=768, metadata={"help": "decoder embedding dimension"}
)
decoder_ffn_embed_dim: int = field(
default=3072, metadata={"help": "decoder embedding dimension for FFN"}
)
decoder_layers: int = field(
default=6, metadata={"help": "num of decoder layers"}
)
decoder_layerdrop: float = field(
default=0.0, metadata={"help": "decoder layerdrop chance"}
)
decoder_attention_heads: int = field(
default=4, metadata={"help": "num decoder attention heads"}
)
decoder_learned_pos: bool = field(
default=False,
metadata={"help": "use learned positional embeddings in the decoder"},
)
decoder_normalize_before: bool = field(
default=False,
metadata={"help": "apply layernorm before each decoder block"},
)
no_token_positional_embeddings: bool = field(
default=False,
metadata={
"help": "if set, disables positional embeddings "
"(outside self attention)"
},
)
decoder_dropout: float = field(
default=0.1, metadata={"help": "dropout probability in the decoder"}
)
decoder_attention_dropout: float = field(
default=0.1,
metadata={
"help": "dropout probability for attention weights "
"inside the decoder"
},
)
decoder_activation_dropout: float = field(
default=0.0,
metadata={
"help": "dropout probability after activation in FFN "
"inside the decoder"
},
)
max_target_positions: int = field(
default=2048, metadata={"help": "max target positions"}
)
share_decoder_input_output_embed: bool = field(
default=False,
metadata={"help": "share decoder input and output embeddings"},
)
no_scale_embedding: bool = field(default=True, metadata={'help': 'scale embedding'})
class SubModel(nn.Module):
def __init__(self, resnet=None, input_dim=None, cfg=None):
super().__init__()
self.resnet = resnet
self.proj = nn.Linear(input_dim, cfg.encoder_embed_dim)
self.encoder = TransformerEncoder(cfg) if cfg.encoder_layers > 0 else None
def forward(self, x):
if self.resnet is not None:
x = self.resnet(x)
x = self.proj(x.transpose(1, 2))
if self.encoder is not None:
x = self.encoder(x)[0].transpose(1, 2)
else:
x = x.transpose(1, 2)
return x
@register_model("av_hubert", dataclass=AVHubertConfig)
class AVHubertModel(BaseFairseqModel):
def __init__(
self,
cfg: AVHubertConfig,
task_cfg: AVHubertPretrainingConfig,
dictionaries: List[Dictionary],
**kwargs
) -> None:
super().__init__()
logger.info(f"HubertModel Config: {cfg}")
feature_ds_rate = 1
self.feat2tar_ratio = cfg.label_rate * feature_ds_rate / task_cfg.sample_rate
sub_cfg = deepcopy(cfg)
sub_cfg.encoder_layers = sub_cfg.sub_encoder_layers
resnet = ResEncoder(relu_type=cfg.resnet_relu_type, weights=cfg.resnet_weights)
self.feature_extractor_audio = SubModel(resnet=None, input_dim=cfg.audio_feat_dim, cfg=sub_cfg)
self.feature_extractor_video = SubModel(resnet=resnet, input_dim=resnet.backend_out, cfg=sub_cfg)
self.modality_dropout, self.audio_dropout = cfg.modality_dropout, cfg.audio_dropout
self.modality_fuse = cfg.modality_fuse
self.encoder_embed_dim = cfg.encoder_embed_dim
if self.modality_fuse == 'concat':
self.embed = cfg.encoder_embed_dim * 2
elif self.modality_fuse == 'add':
self.embed = cfg.encoder_embed_dim
self.post_extract_proj = (
nn.Linear(self.embed, cfg.encoder_embed_dim)
if self.embed != cfg.encoder_embed_dim
else None
)
self.mask_prob_image, self.mask_prob_audio = cfg.mask_prob_image, cfg.mask_prob_audio
self.mask_selection = cfg.mask_selection
self.mask_other = cfg.mask_other
self.mask_length_image, self.mask_length_audio = cfg.mask_length_image, cfg.mask_length_audio
self.no_mask_overlap = cfg.no_mask_overlap
self.mask_min_space = cfg.mask_min_space
self.mask_channel_prob = cfg.mask_channel_prob
self.mask_channel_selection = cfg.mask_channel_selection
self.mask_channel_other = cfg.mask_channel_other
self.mask_channel_length = cfg.mask_channel_length
self.no_mask_channel_overlap = cfg.no_mask_channel_overlap
self.mask_channel_min_space = cfg.mask_channel_min_space
self.dropout_input = nn.Dropout(cfg.dropout_input)
self.dropout_features = nn.Dropout(cfg.dropout_features)
self.feature_grad_mult = cfg.feature_grad_mult
self.logit_temp = cfg.logit_temp
self.skip_masked = cfg.skip_masked
self.skip_nomask = cfg.skip_nomask
self.sim_type = cfg.sim_type
self.selection_type = cfg.selection_type
self.masking_type = cfg.masking_type
final_dim = (
cfg.final_dim if cfg.final_dim > 0 else cfg.encoder_embed_dim
)
self.mask_emb = nn.Parameter(
torch.FloatTensor(cfg.audio_feat_dim).uniform_() if self.masking_type == 'input' else torch.FloatTensor(cfg.encoder_embed_dim).uniform_()
)
self.encoder = TransformerEncoder(cfg)
self.layer_norm = LayerNorm(self.embed)
self.target_glu = None
if cfg.target_glu:
self.target_glu = nn.Sequential(
nn.Linear(final_dim, final_dim * 2), nn.GLU()
)
self.untie_final_proj = cfg.untie_final_proj
if self.untie_final_proj:
self.final_proj = nn.Linear(
cfg.encoder_embed_dim, final_dim * len(dictionaries)
)
else:
self.final_proj = nn.Linear(cfg.encoder_embed_dim, final_dim)
# modules below are not needed during fine-tuning
if any([d is None for d in dictionaries]):
logger.info(
"cannot find dictionary. assume will be used for fine-tuning"
)
else:
self.num_classes = [len(d) for d in dictionaries]
self.label_embs_concat = nn.Parameter(
torch.FloatTensor(sum(self.num_classes), final_dim)
)
nn.init.uniform_(self.label_embs_concat)
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
super().upgrade_state_dict_named(state_dict, name)
return state_dict
@classmethod
def build_model(cls, cfg: AVHubertConfig, task: AVHubertPretrainingTask):
"""Build a new model instance."""
kwargs = {}
model = AVHubertModel(cfg, task.cfg, task.dictionaries, **kwargs)
return model
def apply_input_mask(self, x, padding_mask, target_list):
B, C, T = x.shape[:3]
is_audio = True if len(x.shape) == 3 else False
if is_audio:
mask_prob, mask_length = self.mask_prob_audio, self.mask_length_audio
else:
mask_prob, mask_length = self.mask_prob_image, self.mask_length_image
if mask_prob > 0:
mask_indices, starts, ends, batch_indexes = compute_mask_indices(
(B, T),
padding_mask,
mask_prob,
mask_length,
self.mask_selection,
self.mask_other,
min_masks=2,
no_overlap=self.no_mask_overlap,
min_space=self.mask_min_space,
)
mask_indices_np = mask_indices
mask_indices = torch.from_numpy(mask_indices).to(x.device)
x = x.transpose(1, 2).contiguous() # [B, T, C, H, W]
if B == 1:
x[mask_indices] = 0
elif is_audio:
x[mask_indices] = self.mask_emb
elif self.selection_type == 'same_other_seq':
perm = (torch.arange(B) + torch.randint(low=1, high=B, size=(1,))) % B
x_perm = x[perm]
x[mask_indices] = x_perm[mask_indices]
elif self.selection_type == 'same_seq':
batch_indexes_, other_indexes = [], []
for batch_index, start, end in zip(batch_indexes, starts, ends):
length = end-start
other_start = np.setdiff1d(np.arange(T), np.arange(max(0, start-length), end))
if len(other_start) > 0:
other_start = np.random.choice(other_start, size=1)
else:
other_start = 0
other_end = other_start + length
other_indexes.append(np.arange(other_start, other_end).clip(max=T-1))
batch_indexes_.append(np.zeros([length], dtype=np.int64)+batch_index)
batch_indexes, other_indexes = np.concatenate(batch_indexes_), np.concatenate(other_indexes)
x[mask_indices] = x[batch_indexes, other_indexes]
x = x.transpose(1, 2).contiguous()
else:
mask_indices = None
if self.mask_channel_prob > 0:
logger.info(f"No mask channel prob for input masking")
return x, mask_indices
def apply_feature_mask(self, x, padding_mask, target_list):
B, T, C = x.shape
assert self.mask_prob_audio == self.mask_prob_image and self.mask_length_audio == self.mask_length_image, f"masking prob/length for image/audio be same for feature masking"
mask_prob, mask_length = self.mask_prob_audio, self.mask_length_image
if mask_prob > 0:
mask_indices, _, _, _ = compute_mask_indices(
(B, T),
padding_mask,
mask_prob,
mask_length,
self.mask_selection,
self.mask_other,
min_masks=2,
no_overlap=self.no_mask_overlap,
min_space=self.mask_min_space,
)
mask_indices = torch.from_numpy(mask_indices).to(x.device)
x[mask_indices] = self.mask_emb
else:
mask_indices = None
if self.mask_channel_prob > 0:
mask_channel_indices, _, _, _ = compute_mask_indices(
(B, C),
None,
self.mask_channel_prob,
self.mask_channel_length,
self.mask_channel_selection,
self.mask_channel_other,
no_overlap=self.no_mask_channel_overlap,
min_space=self.mask_channel_min_space,
)
mask_channel_indices = (
torch.from_numpy(mask_channel_indices)
.to(x.device)
.unsqueeze(1)
.expand(-1, T, -1)
)
x[mask_channel_indices] = 0
return x, mask_indices
def forward_features(self, source: torch.Tensor, modality: str) -> torch.Tensor:
extractor = eval(f"self.feature_extractor_{modality}")
if self.feature_grad_mult > 0:
features = extractor(source)
if self.feature_grad_mult != 1.0:
features = GradMultiply.apply(features, self.feature_grad_mult)
else:
with torch.no_grad():
features = extractor(source)
return features
def forward_targets(
self, features: torch.Tensor, mask_indices: torch.Tensor, target_list: List[torch.Tensor],
) -> Tuple[torch.Tensor, torch.Tensor]:
# Trim features to ensure labels exist and then get aligned labels
feat_tsz = features.size(2)
targ_tsz = min([t.size(1) for t in target_list])
if self.feat2tar_ratio * feat_tsz > targ_tsz:
feat_tsz = int(targ_tsz / self.feat2tar_ratio)
features = features[..., :feat_tsz]
if mask_indices is not None:
mask_indices = mask_indices[..., :feat_tsz]
target_inds = torch.arange(feat_tsz).float() * self.feat2tar_ratio
target_list = [t[:, target_inds.long()] for t in target_list]
return features, mask_indices, target_list
def forward_padding_mask(
self, features: torch.Tensor, padding_mask: torch.Tensor,
) -> torch.Tensor:
extra = padding_mask.size(1) % features.size(1)
if extra > 0:
padding_mask = padding_mask[:, :-extra]
padding_mask = padding_mask.view(
padding_mask.size(0), features.size(1), -1
)
padding_mask = padding_mask.all(-1)
return padding_mask
def compute_logits(self, feats, emb_mat):
# feats: [B, T, F], emb_mat: [V, F]
if self.sim_type == 'dot':
logits = torch.matmul(feats, emb_mat.transpose(0, 1))
elif self.sim_type == 'cosine':
batch_size, timesteps, emb_dim = feats.size()
feats_ = feats.view(-1, emb_dim)
nom = (feats_.unsqueeze(dim=1) * emb_mat.unsqueeze(dim=0)).sum(dim=-1) # [B*T, V]
denom = (feats_**2).sum(dim=-1).sqrt().unsqueeze(dim=1) * (emb_mat**2).sum(dim=-1).sqrt().unsqueeze(dim=0) # [B*T, V]
logits = (nom/denom.clamp(min=1e-6)).view(batch_size, timesteps, -1)
else:
raise NotImplementedError
logits = logits / self.logit_temp
return logits
def forward(
self,
source: torch.Tensor,
target_list: Optional[List[torch.Tensor]] = None,
padding_mask: Optional[torch.Tensor] = None,
mask: bool = True,
features_only: bool = False,
output_layer: Optional[int] = None
) -> Dict[str, torch.Tensor]:
"""output layer is 1-based"""
src_audio, src_video = source['audio'], source['video']
if mask and self.masking_type == 'input':
src_video, mask_indices_video = self.apply_input_mask(src_video, padding_mask, target_list)
src_audio, mask_indices_audio = self.apply_input_mask(src_audio, padding_mask, target_list)
mask_indices = torch.logical_or(mask_indices_audio, mask_indices_video)
else:
src_audio, src_video, mask_indices = src_audio, src_video, None
features_audio = self.forward_features(src_audio, modality='audio') # features: [B, F, T]
features_video = self.forward_features(src_video, modality='video')
modality_drop_prob, audio_drop_prob = np.random.random(), np.random.random()
if self.training:
if modality_drop_prob < self.modality_dropout:
if audio_drop_prob < self.audio_dropout:
features_audio = 0 * features_audio
else:
features_video = 0 * features_video
if self.modality_fuse == 'concat':
features = torch.cat([features_audio, features_video], dim=1)
elif self.modality_fuse == 'add':
features = features_audio + features_video
if target_list is not None:
features, mask_indices, target_list = self.forward_targets(features, mask_indices, target_list)
features_pen = features.float().pow(2).mean()
features = features.transpose(1, 2)
features = self.layer_norm(features)
if padding_mask is not None:
padding_mask = self.forward_padding_mask(features, padding_mask)
if self.post_extract_proj is not None:
features = self.post_extract_proj(features)
features = self.dropout_input(features)
if self.masking_type == 'feature' and mask:
x, mask_indices = self.apply_feature_mask(features, padding_mask, target_list)
else:
x = features
# feature: (B, T, D), float
# target: (B, T), long
# x: (B, T, D), float
# padding_mask: (B, T), bool
# mask_indices: (B, T), bool
x, _ = self.encoder(
x,
padding_mask=padding_mask,
layer=None if output_layer is None else output_layer - 1
)
if features_only:
return {"x": x, "padding_mask": padding_mask, "features": features}
label_embs_list = self.label_embs_concat.split(self.num_classes, 0)
proj_x = self.final_proj(x)
if self.untie_final_proj:
proj_x_list = proj_x.chunk(len(self.num_classes), dim=-1)
else:
proj_x_list = [proj_x for _ in self.num_classes]
logit_list = [self.compute_logits(proj, emb).view(-1, num_class) for proj, emb, num_class in zip(proj_x_list, label_embs_list, self.num_classes)] # [[B*T, V]]
mask, unmask = torch.logical_and(mask_indices, ~padding_mask).view(-1), torch.logical_and(~mask_indices, ~padding_mask).view(-1) # [B*T]
logit_m_list, logit_u_list = [logit[mask] for logit in logit_list], [logit[unmask] for logit in logit_list]
target_m_list, target_u_list = [target.view(-1)[mask].long() for target in target_list], [target.view(-1)[unmask].long() for target in target_list]
result = {
"logit_m_list": logit_m_list,
"logit_u_list": logit_u_list,
"target_m_list": target_m_list,
"target_u_list": target_u_list,
"padding_mask": padding_mask,
"features_pen": features_pen,
}
return result
def extract_features(
self,
source: torch.Tensor,
padding_mask: Optional[torch.Tensor] = None,
mask: bool = False,
ret_conv: bool = False,
output_layer: Optional[int] = None,
) -> Tuple[torch.Tensor, torch.Tensor]:
res = self.forward(
source,
padding_mask=padding_mask,
mask=mask,
features_only=True,
output_layer=output_layer,
)
feature = res["features"] if ret_conv else res["x"]
return feature, res["padding_mask"]
def extract_finetune(self, source, padding_mask=None, mask=False, ret_conv=False, output_layer=None):
src_audio, src_video = source['audio'], source['video']
if mask and self.masking_type == 'input':
src_video, mask_indices_video = self.apply_input_mask(src_video, padding_mask, target_list=None)
src_audio, mask_indices_audio = self.apply_input_mask(src_audio, padding_mask, target_list=None)
mask_indices = torch.logical_or(mask_indices_audio, mask_indices_video) # mask_indices not used in fine-tuning
else:
src_audio, src_video, mask_indices = src_audio, src_video, None
if src_audio is not None and src_video is None:
features_audio = self.forward_features(src_audio, modality='audio') # features: [B, F, T]
features_video = features_audio.new_zeros(features_audio.size(0), self.encoder_embed_dim, features_audio.size(-1))
elif src_audio is None and src_video is not None:
features_video = self.forward_features(src_video, modality='video')
features_audio = features_video.new_zeros(features_video.size(0), self.encoder_embed_dim, features_video.size(-1))
elif src_audio is not None and src_video is not None:
features_video = self.forward_features(src_video, modality='video')
features_audio = self.forward_features(src_audio, modality='audio') # features: [B, F, T]
if self.modality_fuse == 'concat':
features = torch.cat([features_audio, features_video], dim=1)
elif self.modality_fuse == 'add':
features = features_audio + features_video
features_pen = features.float().pow(2).mean()
features = features.transpose(1, 2)
features = self.layer_norm(features)
unmasked_features = features.clone()
if padding_mask is not None:
padding_mask = self.forward_padding_mask(features, padding_mask)
if self.post_extract_proj is not None:
features = self.post_extract_proj(features)
features = self.dropout_input(features)
unmasked_features = self.dropout_features(unmasked_features)
x = features
mask_indices = None
# feature: (B, T, D), float
# target: (B, T), long
# x: (B, T, D), float
# padding_mask: (B, T), bool
# mask_indices: (B, T), bool
x, _ = self.encoder(
x,
padding_mask=padding_mask,
layer=None if output_layer is None else output_layer - 1
)
return x, padding_mask
def get_extra_losses(self, net_output):
extra_losses = []
names = []
if "features_pen" in net_output:
extra_losses.append(net_output["features_pen"])
names.append("features_pen")
return extra_losses, names
def remove_pretraining_modules(self):
self.target_glu = None
self.final_proj = None
def get_logits(self, net_output, is_masked=True):
raise NotImplementedError
def get_targets(self, net_output, is_masked=True):
raise NotImplementedError
def compute_nce(self, x, pos, negs):
neg_is_pos = (pos == negs).all(-1)
pos = pos.unsqueeze(0)
targets = torch.cat([pos, negs], dim=0)
logits = torch.cosine_similarity(
x.float(), targets.float(), dim=-1
).type_as(x)
logits /= self.logit_temp
if neg_is_pos.any():
logits[1:][neg_is_pos] = float("-inf")
logits = logits.transpose(0, 1) # (num_x, num_cls+1)
return logits
| av_hubert-main | avhubert/hubert.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import sys
import argparse
import torch
from fairseq.data import Dictionary, encoders
def add_task_state(ckpt_path):
std = torch.load(ckpt_path)
cfg = std['cfg']
if cfg['model']['_name'] == 'av_hubert':
dictionaries = [Dictionary.load(f"{cfg['task']['label_dir']}/dict.{label}.txt") for label in cfg['task']['labels']]
std['cfg']['task']['fine_tuning'] = False
std['task_state'] = {'dictionaries': dictionaries}
print(dictionaries, std['cfg']['task'])
else:
prt = torch.load(std['cfg']['model']['w2v_path'])
std['cfg']['model']['w2v_args'] = prt['cfg']
std['cfg']['task']['fine_tuning'] = True
dictionaries = [Dictionary.load(f"{prt['cfg']['task']['label_dir']}/dict.{label}.txt") for label in prt['cfg']['task']['labels']]
target_dictionary = Dictionary.load(f"{cfg['task']['label_dir']}/dict.wrd.txt")
tokenizer_fn = std['cfg']['task']['tokenizer_bpe_model']
bpe_args = argparse.Namespace(**{'bpe': 'sentencepiece', f"sentencepiece_model": tokenizer_fn})
bpe_tokenizer = encoders.build_bpe(bpe_args)
std['task_state'] = {'dictionaries': dictionaries, 'target_dictionary': target_dictionary, 's2s_tokenizer': bpe_tokenizer}
torch.save(std, ckpt_path)
return
if __name__ == '__main__':
ckpt_paths = sys.argv[1:]
for ckpt_path in ckpt_paths:
add_task_state(ckpt_path)
| av_hubert-main | avhubert/misc/fix_state.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import sys
import numpy as np
import joblib
import torch
import tqdm
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("dump_km_label")
class ApplyKmeans(object):
def __init__(self, km_path):
self.km_model = joblib.load(km_path)
self.C_np = self.km_model.cluster_centers_.transpose()
self.Cnorm_np = (self.C_np ** 2).sum(0, keepdims=True)
self.C = torch.from_numpy(self.C_np)
self.Cnorm = torch.from_numpy(self.Cnorm_np)
if torch.cuda.is_available():
self.C = self.C.cuda()
self.Cnorm = self.Cnorm.cuda()
def __call__(self, x):
if isinstance(x, torch.Tensor):
dist = (
x.pow(2).sum(1, keepdim=True)
- 2 * torch.matmul(x, self.C)
+ self.Cnorm
)
return dist.argmin(dim=1).cpu().numpy()
else:
dist = (
(x ** 2).sum(1, keepdims=True)
- 2 * np.matmul(x, self.C_np)
+ self.Cnorm_np
)
return np.argmin(dist, axis=1)
def get_feat_iterator(feat_dir, split, nshard, rank):
feat_path = f"{feat_dir}/{split}_{rank}_{nshard}.npy"
leng_path = f"{feat_dir}/{split}_{rank}_{nshard}.len"
with open(leng_path, "r") as f:
lengs = [int(line.rstrip()) for line in f]
offsets = [0] + np.cumsum(lengs[:-1]).tolist()
def iterate():
feat = np.load(feat_path, mmap_mode="r")
assert feat.shape[0] == (offsets[-1] + lengs[-1])
for offset, leng in zip(offsets, lengs):
yield feat[offset: offset + leng]
return iterate, len(lengs)
def dump_label(feat_dir, split, km_path, nshard, rank, lab_dir):
apply_kmeans = ApplyKmeans(km_path)
generator, num = get_feat_iterator(feat_dir, split, nshard, rank)
iterator = generator()
lab_path = f"{lab_dir}/{split}_{rank}_{nshard}.km"
os.makedirs(lab_dir, exist_ok=True)
with open(lab_path, "w") as f:
for feat in tqdm.tqdm(iterator, total=num):
# feat = torch.from_numpy(feat).cuda()
lab = apply_kmeans(feat).tolist()
f.write(" ".join(map(str, lab)) + "\n")
logger.info("finished successfully")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("feat_dir")
parser.add_argument("split")
parser.add_argument("km_path")
parser.add_argument("nshard", type=int)
parser.add_argument("rank", type=int)
parser.add_argument("lab_dir")
args = parser.parse_args()
logging.info(str(args))
dump_label(**vars(args))
| av_hubert-main | avhubert/clustering/dump_km_label.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
import math
import os
import sys
import fairseq
import soundfile as sf
import torch
import torch.nn.functional as F
import tqdm
from npy_append_array import NpyAppendArray
import numpy as np
from python_speech_features import logfbank
from scipy.io import wavfile
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("dump_hubert_feature")
class HubertFeatureReader(object):
def __init__(self, ckpt_path, layer, max_chunk=1600000, custom_utils=None):
(
model,
cfg,
task,
) = fairseq.checkpoint_utils.load_model_ensemble_and_task([ckpt_path])
self.model = model[0].eval().cuda()
self.task = task
self.layer = layer
self.max_chunk = max_chunk
self.stack_order_audio = self.task.cfg.stack_order_audio
image_crop_size, image_mean, image_std = self.task.cfg.image_crop_size, self.task.cfg.image_mean, self.task.cfg.image_std
self.transform = custom_utils.Compose([
custom_utils.Normalize( 0.0,255.0 ),
custom_utils.CenterCrop((image_crop_size, image_crop_size)),
custom_utils.Normalize(image_mean, image_std) ])
self.custom_utils = custom_utils
logger.info(f"TASK CONFIG:\n{self.task.cfg}")
logger.info(f" max_chunk = {self.max_chunk}")
logger.info(f"Transform: {self.transform}")
def load_feature(self, mix_name, ref_len=None):
def stacker(feats, stack_order):
feat_dim = feats.shape[1]
if len(feats) % stack_order != 0:
res = stack_order - len(feats) % stack_order
res = np.zeros([res, feat_dim]).astype(feats.dtype)
feats = np.concatenate([feats, res], axis=0)
feats = feats.reshape((-1, stack_order, feat_dim)).reshape(-1, stack_order*feat_dim)
return feats
video_fn, audio_fn = mix_name
video_feats = self.load_image(video_fn)
audio_fn = audio_fn.split(':')[0]
sample_rate, wav_data = wavfile.read(audio_fn)
assert sample_rate == 16_000 and len(wav_data.shape) == 1
audio_feats = logfbank(wav_data, samplerate=sample_rate).astype(np.float32)
audio_feats = stacker(audio_feats, self.stack_order_audio)
diff = len(audio_feats) - len(video_feats)
if diff < 0:
audio_feats = np.concatenate([audio_feats, np.zeros([-diff, audio_feats.shape[-1]], dtype=audio_feats.dtype)])
elif diff > 0:
audio_feats = audio_feats[:-diff]
return video_feats, audio_feats
def load_image(self, audio_name):
feats = self.custom_utils.load_video(audio_name)
feats = self.transform(feats)
feats = np.expand_dims(feats, axis=-1)
return feats
def get_feats(self, path, ref_len=None):
video_feats, audio_feats = self.load_feature(path, ref_len)
with torch.no_grad():
audio_feats, video_feats = torch.from_numpy(audio_feats.astype(np.float32)).cuda(), torch.from_numpy(video_feats.astype(np.float32)).cuda()
if self.task.cfg.normalize:
audio_feats = F.layer_norm(audio_feats, audio_feats.shape[1:])
video_feats = video_feats.unsqueeze(dim=0).permute((0, 4, 1, 2, 3)).contiguous()
audio_feats = audio_feats.unsqueeze(dim=0).transpose(1, 2)
source = {'audio': audio_feats, 'video': video_feats}
if self.layer == 0:
ret_conv, output_layer = True, None
else:
ret_conv, output_layer = False, self.layer
feat, _ = self.model.extract_features(
source=source,
padding_mask=None,
mask=False,
output_layer=output_layer,
ret_conv=ret_conv
# output_layer=self.layer,
)
return feat.squeeze(dim=0)
def get_path_iterator(tsv, nshard, rank):
with open(tsv, "r") as f:
root = f.readline().rstrip()
lines = [line.rstrip() for line in f]
tot = len(lines)
shard_size = math.ceil(tot / nshard)
start, end = rank * shard_size, min((rank + 1) * shard_size, tot)
assert start < end, "start={start}, end={end}"
logger.info(
f"rank {rank} of {nshard}, process {end-start} "
f"({start}-{end}) out of {tot}"
)
lines = lines[start:end]
def iterate():
for line in lines:
items = line.strip().split("\t")
# audio_path = f"{items[1]}:{items[0]}"
yield (items[1], items[2]+':'+items[0]), int(items[3])
return iterate, len(lines)
def dump_feature(
tsv_dir, split, ckpt_path, layer, nshard, rank, feat_dir, max_chunk, custom_utils=None, **kwargs
):
reader = HubertFeatureReader(ckpt_path, layer, max_chunk, custom_utils=custom_utils)
generator, num = get_path_iterator(f"{tsv_dir}/{split}.tsv", nshard, rank)
iterator = generator()
feat_path = f"{feat_dir}/{split}_{rank}_{nshard}.npy"
leng_path = f"{feat_dir}/{split}_{rank}_{nshard}.len"
os.makedirs(feat_dir, exist_ok=True)
if os.path.exists(feat_path):
os.remove(feat_path)
feat_f = NpyAppendArray(feat_path)
with open(leng_path, "w") as leng_f:
for path, nsample in tqdm.tqdm(iterator, total=num):
feat = reader.get_feats(path, nsample)
feat_f.append(feat.cpu().numpy())
leng_f.write(f"{len(feat)}\n")
logger.info("finished successfully")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("tsv_dir")
parser.add_argument("split")
parser.add_argument("ckpt_path")
parser.add_argument("layer", type=int)
parser.add_argument("nshard", type=int)
parser.add_argument("rank", type=int)
parser.add_argument("feat_dir")
parser.add_argument("--max_chunk", type=int, default=1600000)
parser.add_argument("--user_dir", type=str, default=None)
args = parser.parse_args()
logger.info(args)
fairseq.utils.import_user_module(args)
sys.path.append(args.user_dir)
import utils as custom_utils
kwargs = vars(args)
kwargs.update({'custom_utils': custom_utils})
dump_feature(**kwargs)
| av_hubert-main | avhubert/clustering/dump_hubert_feature.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os, subprocess
import submitit
import argparse
from argparse import Namespace
def dump_av_hubert(*args, **kwargs):
from dump_hubert_feature import dump_feature
import fairseq
import sys
av_hubert_dir = os.path.join(os.getcwd(), '..')
fairseq.utils.import_user_module(Namespace(user_dir=av_hubert_dir))
sys.path.append(av_hubert_dir)
import utils as custom_utils
kwargs.update({'custom_utils': custom_utils})
args = args[0]
dump_feature(*args, **kwargs)
return
def dump_mfcc(*args, **kwargs):
from dump_mfcc_feature import dump_feature
args = args[0]
dump_feature(*args, **kwargs)
return
def run_kmeans(*args, **kwargs):
import sys
from learn_kmeans import learn_kmeans
learn_kmeans(*args, **kwargs)
return
def apply_kmeans(*args, **kwargs):
import sys
from dump_km_label import dump_label
args = args[0]
dump_label(*args, **kwargs)
return
def concatenate(*args, **kwargs):
from concat import main as concat_fn
args = args[0]
concat_fn(*args, **kwargs)
return
def main():
parser = argparse.ArgumentParser(description='clustering', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--tsv', type=str, help='tsv dir')
parser.add_argument('--output', type=str, help='output dir (labels)')
parser.add_argument('--ckpt', type=str, help='checkpoint of last iteration')
parser.add_argument('--nlayer', type=int, default=12, help='layer index for clustering')
parser.add_argument('--ncluster', type=int, default=500, help='number of clusters')
parser.add_argument('--nshard', type=int, default=100, help='number of shards')
parser.add_argument('--percent', type=float, default=0.05, help='Percentage for clustering')
parser.add_argument('--mfcc', action='store_true', help='extracting MFCC feature')
parser.add_argument('--slurm-partition', type=str, help='slurm partitions')
args = parser.parse_args()
tsv_dir = args.tsv
output_dir = args.output
km_dir = output_dir
feat_dir = output_dir
ckpt_path = args.ckpt
nlayer = args.nlayer
nshard = args.nshard
n_clusters = args.ncluster
slurm_partition = args.slurm_partition
is_mfcc = args.mfcc
timeout_min = 240
percent = 0.1
log_folder = "log_submit/%j"
km_path = f"{km_dir}/kmeans.mdl"
os.makedirs(output_dir, exist_ok=True)
ext = submitit.AutoExecutor(folder=log_folder)
args_array = []
if is_mfcc:
print(f"Dump MFCC feature")
for rank in range(nshard):
args = [tsv_dir, 'train', nshard, rank, output_dir]
args_array.append(args)
args_array.append([tsv_dir, 'valid', 1, 0, output_dir])
ext.update_parameters(timeout_min=60, slurm_partition=slurm_partition, cpus_per_task=1, slurm_array_parallelism=100)
jobs = ext.map_array(dump_mfcc, args_array)
else:
print(f"Dump AV-Hubert feature")
for rank in range(nshard):
args = [tsv_dir, 'train', ckpt_path, nlayer, nshard, rank, output_dir, 1600000]
args_array.append(args)
args_array.append([tsv_dir, 'valid', ckpt_path, nlayer, 1, 0, output_dir, 1600000])
ext.update_parameters(timeout_min=60, slurm_partition=slurm_partition, cpus_per_task=1, gpus_per_node=1, slurm_array_parallelism=100)
jobs = ext.map_array(dump_av_hubert, args_array)
[job.result() for job in jobs]
print(f"Learn K-means")
percent, batch_size = percent, 20000
ext.update_parameters(timeout_min=timeout_min, slurm_partition=slurm_partition, cpus_per_task=8, mem_gb=128)
args, kwargs = [feat_dir, 'train', nshard, km_path, n_clusters], vars(Namespace(seed=0, percent=percent, init="k-means++", max_iter=100, batch_size=batch_size, tol=0.0, n_init=20, reassignment_ratio=0.0, max_no_improvement=100))
print(args, kwargs)
job = ext.submit(run_kmeans, *args, **kwargs)
job.result()
print(f"Apply K-means")
args_array = []
for rank in range(nshard):
args = [feat_dir, 'train', km_path, nshard, rank, output_dir]
args_array.append(args)
args_array.append([feat_dir, 'valid', km_path, 1, 0, output_dir])
ext.update_parameters(timeout_min=10, slurm_partition=slurm_partition, cpus_per_task=1, slurm_array_parallelism=500)
jobs = ext.map_array(apply_kmeans, args_array)
[job.result() for job in jobs]
print(f"Concatenate labels")
cont = f"for rank in $(seq 0 {nshard-1}); do cat {output_dir}/train_${{rank}}_{nshard}.km; done > {output_dir}/train.km"
print(cont)
subprocess.call(cont, shell=True)
cont = f"cp {output_dir}/valid*.km {output_dir}/valid.km"
print(cont)
subprocess.call(cont, shell=True)
with open(f"{output_dir}/dict.km.txt", 'w') as fo:
for i in range(n_clusters):
fo.write(f"{i} {10000}\n")
print(f"Please delete intermediate files to save space: rm {output_dir}/*npy")
return
if __name__ == '__main__':
main()
| av_hubert-main | avhubert/clustering/submit_cluster.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import sys
import numpy as np
from sklearn.cluster import MiniBatchKMeans
import joblib
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("learn_kmeans")
def get_km_model(
n_clusters,
init,
max_iter,
batch_size,
tol,
max_no_improvement,
n_init,
reassignment_ratio,
):
return MiniBatchKMeans(
n_clusters=n_clusters,
init=init,
max_iter=max_iter,
batch_size=batch_size,
verbose=1,
compute_labels=False,
tol=tol,
max_no_improvement=max_no_improvement,
init_size=None,
n_init=n_init,
reassignment_ratio=reassignment_ratio,
)
def load_feature_shard(feat_dir, split, nshard, rank, percent):
feat_path = f"{feat_dir}/{split}_{rank}_{nshard}.npy"
leng_path = f"{feat_dir}/{split}_{rank}_{nshard}.len"
with open(leng_path, "r") as f:
lengs = [int(line.rstrip()) for line in f]
offsets = [0] + np.cumsum(lengs[:-1]).tolist()
if percent < 0:
return np.load(feat_path, mmap_mode="r")
else:
nsample = int(np.ceil(len(lengs) * percent))
indices = np.random.choice(len(lengs), nsample, replace=False)
feat = np.load(feat_path, mmap_mode="r")
sampled_feat = np.concatenate(
[feat[offsets[i]: offsets[i] + lengs[i]] for i in indices], axis=0
)
logger.info(
(
f"sampled {nsample} utterances, {len(sampled_feat)} frames "
f"from shard {rank}/{nshard}"
)
)
return sampled_feat
def load_feature(feat_dir, split, nshard, seed, percent):
assert percent <= 1.0
feat = np.concatenate(
[
load_feature_shard(feat_dir, split, nshard, r, percent)
for r in range(nshard)
],
axis=0,
)
logging.info(f"loaded feature with dimension {feat.shape}")
return feat
def learn_kmeans(
feat_dir,
split,
nshard,
km_path,
n_clusters,
seed,
percent,
init,
max_iter,
batch_size,
tol,
n_init,
reassignment_ratio,
max_no_improvement,
):
np.random.seed(seed)
feat = load_feature(feat_dir, split, nshard, seed, percent)
km_model = get_km_model(
n_clusters,
init,
max_iter,
batch_size,
tol,
max_no_improvement,
n_init,
reassignment_ratio,
)
km_model.fit(feat)
joblib.dump(km_model, km_path)
inertia = -km_model.score(feat) / len(feat)
logger.info("total intertia: %.5f", inertia)
logger.info("finished successfully")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("feat_dir", type=str)
parser.add_argument("split", type=str)
parser.add_argument("nshard", type=int)
parser.add_argument("km_path", type=str)
parser.add_argument("n_clusters", type=int)
parser.add_argument("--seed", default=0, type=int)
parser.add_argument(
"--percent", default=-1, type=float, help="sample a subset; -1 for all"
)
parser.add_argument("--init", default="k-means++")
parser.add_argument("--max_iter", default=100, type=int)
parser.add_argument("--batch_size", default=10000, type=int)
parser.add_argument("--tol", default=0.0, type=float)
parser.add_argument("--max_no_improvement", default=100, type=int)
parser.add_argument("--n_init", default=20, type=int)
parser.add_argument("--reassignment_ratio", default=0.0, type=float)
args = parser.parse_args()
logging.info(str(args))
learn_kmeans(**vars(args))
| av_hubert-main | avhubert/clustering/learn_kmeans.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import logging
import math
import os
import sys
import soundfile as sf
import torch
import torchaudio
import tqdm
from npy_append_array import NpyAppendArray
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("dump_mfcc_feature")
class MfccFeatureReader(object):
def __init__(self, sample_rate):
self.sample_rate = sample_rate
def read_audio(self, path, ref_len=None):
wav, sr = sf.read(path)
assert sr == self.sample_rate, sr
if wav.ndim == 2:
wav = wav.mean(-1)
assert wav.ndim == 1, wav.ndim
if ref_len is not None and abs(ref_len - len(wav)) > 160:
logging.warning(f"ref {ref_len} != read {len(wav)} ({path})")
return wav
def get_feats(self, path, ref_len=None):
x = self.read_audio(path, ref_len)
with torch.no_grad():
x = torch.from_numpy(x).float()
x = x.view(1, -1)
mfccs = torchaudio.compliance.kaldi.mfcc(
waveform=x,
sample_frequency=self.sample_rate,
use_energy=False,
) # (time, freq)
mfccs = mfccs.transpose(0, 1) # (freq, time)
deltas = torchaudio.functional.compute_deltas(mfccs)
ddeltas = torchaudio.functional.compute_deltas(deltas)
concat = torch.cat([mfccs, deltas, ddeltas], dim=0)
concat = concat.transpose(0, 1).contiguous() # (freq, time)
return concat
def get_path_iterator(tsv, nshard, rank):
with open(tsv, "r") as f:
root = f.readline().rstrip()
lines = [line.rstrip() for line in f]
tot = len(lines)
shard_size = math.ceil(tot / nshard)
start, end = rank * shard_size, min((rank + 1) * shard_size, tot)
assert start < end, "start={start}, end={end}"
logger.info(
f"rank {rank} of {nshard}, process {end-start} "
f"({start}-{end}) out of {tot}"
)
lines = lines[start:end]
def iterate():
for line in lines:
_, video_path, wav_path, nsample_video, nsample_wav = line.split("\t")
yield f"{root}/{wav_path}", int(nsample_wav)
return iterate, len(lines)
def dump_feature(tsv_dir, split, nshard, rank, feat_dir, sample_rate=16_000):
reader = MfccFeatureReader(sample_rate)
generator, num = get_path_iterator(f"{tsv_dir}/{split}.tsv", nshard, rank)
iterator = generator()
feat_path = f"{feat_dir}/{split}_{rank}_{nshard}.npy"
leng_path = f"{feat_dir}/{split}_{rank}_{nshard}.len"
os.makedirs(feat_dir, exist_ok=True)
if os.path.exists(feat_path):
os.remove(feat_path)
feat_f = NpyAppendArray(feat_path)
with open(leng_path, "w") as leng_f:
for path, nsample in tqdm.tqdm(iterator, total=num):
feat = reader.get_feats(path, nsample)
feat_f.append(feat.cpu().numpy())
leng_f.write(f"{len(feat)}\n")
logger.info("finished successfully")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("tsv_dir")
parser.add_argument("split")
parser.add_argument("nshard", type=int)
parser.add_argument("rank", type=int)
parser.add_argument("feat_dir")
parser.add_argument("--sample_rate", type=int, default=16000)
args = parser.parse_args()
logger.info(args)
dump_feature(**vars(args))
| av_hubert-main | avhubert/clustering/dump_mfcc_feature.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import glob
import shutil
import subprocess
from tqdm import tqdm
from pathlib import Path
from gen_subword import gen_vocab
from tempfile import NamedTemporaryFile
def main():
import argparse
parser = argparse.ArgumentParser(description='LRS3 tsv preparation', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--lrs3', type=str, help='lrs3 root dir')
parser.add_argument('--valid-ids', type=str, help='a list of valid ids')
parser.add_argument('--vocab-size', type=int, default=1000, help='a list of valid ids')
args = parser.parse_args()
file_list, label_list = f"{args.lrs3}/file.list", f"{args.lrs3}/label.list"
assert os.path.isfile(file_list) , f"{file_list} not exist -> run lrs3_prepare.py first"
assert os.path.isfile(label_list) , f"{label_list} not exist -> run lrs3_prepare.py first"
nframes_audio_file, nframes_video_file = f"{args.lrs3}/nframes.audio", f"{args.lrs3}/nframes.video"
assert os.path.isfile(nframes_audio_file) , f"{nframes_audio_file} not exist -> run count_frames.py first"
assert os.path.isfile(nframes_video_file) , f"{nframes_video_file} not exist -> run count_frames.py first"
print(f"Generating sentencepiece units")
vocab_size = args.vocab_size
vocab_dir = (Path(f"{args.lrs3}")/f"spm{vocab_size}").absolute()
# out_root = Path(vocab_dir).absolute()
vocab_dir.mkdir(exist_ok=True)
spm_filename_prefix = f"spm_unigram{vocab_size}"
with NamedTemporaryFile(mode="w") as f:
label_text = [ln.strip() for ln in open(label_list).readlines()]
for t in label_text:
f.write(t.lower() + "\n")
gen_vocab(Path(f.name), vocab_dir/spm_filename_prefix, 'unigram', args.vocab_size)
vocab_path = (vocab_dir/spm_filename_prefix).as_posix()+'.txt'
audio_dir, video_dir = f"{args.lrs3}/audio", f"{args.lrs3}/video"
def setup_target(target_dir, train, valid, test):
for name, data in zip(['train', 'valid', 'test'], [train, valid, test]):
with open(f"{target_dir}/{name}.tsv", 'w') as fo:
fo.write('/\n')
for fid, _, nf_audio, nf_video in data:
fo.write('\t'.join([fid, os.path.abspath(f"{video_dir}/{fid}.mp4"), os.path.abspath(f"{audio_dir}/{fid}.wav"), str(nf_video), str(nf_audio)])+'\n')
with open(f"{target_dir}/{name}.wrd", 'w') as fo:
for _, label, _, _ in data:
fo.write(f"{label}\n")
shutil.copyfile(vocab_path, f"{target_dir}/dict.wrd.txt")
return
fids, labels = [x.strip() for x in open(file_list).readlines()], [x.strip().lower() for x in open(label_list).readlines()]
nfs_audio, nfs_video = [x.strip() for x in open(nframes_audio_file).readlines()], [x.strip() for x in open(nframes_video_file).readlines()]
valid_fids = set([x.strip() for x in open(args.valid_ids).readlines()])
train_all, train_sub, valid, test = [], [], [], []
for fid, label, nf_audio, nf_video in zip(fids, labels, nfs_audio, nfs_video):
part = fid.split('/')[0]
# print(part)
if part == 'test':
test.append([fid, label, nf_audio, nf_video])
else:
if fid in valid_fids:
valid.append([fid, label, nf_audio, nf_video])
else:
train_all.append([fid, label, nf_audio, nf_video])
if part == 'trainval':
train_sub.append([fid, label, nf_audio, nf_video])
dir_30h = f"{args.lrs3}/30h_data"
print(f"Set up 30h dir")
os.makedirs(dir_30h, exist_ok=True)
setup_target(dir_30h, train_sub, valid, test)
dir_433h = f"{args.lrs3}/433h_data"
print(f"Set up 433h dir")
os.makedirs(dir_433h, exist_ok=True)
setup_target(dir_433h, train_all, valid, test)
return
if __name__ == '__main__':
main()
| av_hubert-main | avhubert/preparation/lrs3_manifest.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import numpy as np
from scipy.io import wavfile
from tqdm import tqdm
def mix_audio(wav_fns):
wav_data = [wavfile.read(wav_fn)[1] for wav_fn in wav_fns]
wav_data_ = []
min_len = min([len(x) for x in wav_data])
for item in wav_data:
wav_data_.append(item[:min_len])
wav_data = np.stack(wav_data_).mean(axis=0).astype(np.int16)
return wav_data
def main():
import argparse
parser = argparse.ArgumentParser(description='Generating babble and speech noise from LRS3', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--lrs3', type=str, help='lrs3 root dir')
args = parser.parse_args()
tsv_fn = os.path.join(args.lrs3, '433h_data', 'train.tsv')
output_wav = os.path.join(args.lrs3, 'noise', 'babble', 'noise.wav')
output_tsvs = [os.path.join(args.lrs3, 'noise', 'babble', 'valid.tsv'), os.path.join(args.lrs3, 'noise', 'babble', 'test.tsv')]
os.makedirs(os.path.dirname(output_wav), exist_ok=True)
for output_tsv in output_tsvs:
os.makedirs(os.path.dirname(output_tsv), exist_ok=True)
print(f"Generating babble noise -> {output_tsvs}")
num_samples = 30
sample_rate = 16_000
min_len = 15*sample_rate
lns = open(tsv_fn).readlines()[1:]
wav_fns = [(ln.strip().split('\t')[2], int(ln.strip().split('\t')[-1])) for ln in lns]
wav_fns = list(filter(lambda x: x[1]>min_len, wav_fns))
indexes = np.random.permutation(len(wav_fns))[:num_samples]
wav_fns = [wav_fns[i][0] for i in indexes]
wav_data = mix_audio(wav_fns)
wavfile.write(output_wav, sample_rate, wav_data)
for output_tsv in output_tsvs:
with open(output_tsv, 'w') as fo:
fo.write(os.path.abspath(output_wav)+'\n')
min_len = 20*sample_rate
speech_tsv_dir, speech_wav_dir = os.path.join(args.lrs3, 'noise', 'speech'), os.path.join(args.lrs3, 'noise', 'speech', 'wav')
os.makedirs(speech_tsv_dir, exist_ok=True)
os.makedirs(speech_wav_dir, exist_ok=True)
print(f'Generating speech noise -> {speech_tsv_dir}')
lns = open(tsv_fn).readlines()[1:]
wav_fns = [(ln.strip().split('\t')[2], int(ln.strip().split('\t')[-1])) for ln in lns]
wav_fns = list(filter(lambda x: x[1]>min_len, wav_fns))
wav_fns = [x[0] for x in wav_fns]
print(f"# speech noise audios: {len(wav_fns)}")
noise_fns = []
for wav_fn in tqdm(wav_fns):
sample_rate, wav_data = wavfile.read(wav_fn)
wav_data = wav_data[:min_len]
filename = '_'.join(wav_fn.split('/')[-2:])
noise_fn = f"{speech_wav_dir}/{filename}"
noise_fns.append(noise_fn)
wavfile.write(noise_fn, sample_rate, wav_data.astype(np.int16))
num_train, num_valid, num_test = int(len(noise_fns)*0.6), int(len(noise_fns)*0.2), int(len(noise_fns)*0.2)
prev = 0
for split in ['train', 'valid', 'test']:
split_fns = []
num_x, tsv_x = eval(f"num_{split}"), f"{speech_tsv_dir}/{split}.tsv"
for fn in noise_fns[prev: prev+num_x]:
split_fns.append(os.path.abspath(fn))
with open(tsv_x, 'w') as fo:
fo.write('\n'.join(split_fns)+'\n')
prev += num_x
return
if __name__ == '__main__':
main()
| av_hubert-main | avhubert/preparation/lrs3_noise.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os, sys, glob, subprocess, json, math
import numpy as np
from scipy.io import wavfile
from os.path import basename, dirname
from tqdm import tqdm
import tempfile, shutil
def get_filelist(root_dir):
fids = []
for split in ['dev', 'test']:
all_fns = glob.glob(f"{root_dir}/{split}/mp4/*/*/*mp4")
for fn in all_fns:
fids.append('/'.join(fn.split('/')[-5:])[:-4])
output_fn = f"{root_dir}/file.list"
with open(output_fn, 'w') as fo:
fo.write('\n'.join(fids)+'\n')
return
def prep_wav(root_dir, wav_dir, flist, ffmpeg, rank, nshard):
input_dir, output_dir = root_dir, wav_dir
os.makedirs(output_dir, exist_ok=True)
fids = [ln.strip() for ln in open(flist).readlines()]
num_per_shard = math.ceil(len(fids)/nshard)
start_id, end_id = num_per_shard*rank, num_per_shard*(rank+1)
fids = fids[start_id: end_id]
print(f"{len(fids)} videos")
for i, fid in enumerate(tqdm(fids)):
video_fn = f"{input_dir}/{fid}.mp4"
audio_fn = f"{output_dir}/{fid}.wav"
os.makedirs(os.path.dirname(audio_fn), exist_ok=True)
cmd = ffmpeg + " -i " + video_fn + " -f wav -vn -y " + audio_fn + ' -loglevel quiet'
# print(cmd)
subprocess.call(cmd, shell=True)
# print(f"{video_fn} -> {audio_fn}")
return
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='VoxCeleb2 data preparation', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--vox', type=str, help='VoxCeleb2 dir')
parser.add_argument('--ffmpeg', type=str, help='ffmpeg path')
parser.add_argument('--step', type=int, help='Steps(1: get file list, 2: extract audio)')
parser.add_argument('--rank', type=int, help='rank id')
parser.add_argument('--nshard', type=int, help='number of shards')
args = parser.parse_args()
if args.step == 1:
print(f"Get file list")
get_filelist(args.vox)
elif args.step == 2:
print(f"Extract audio")
output_dir = f"{args.vox}/audio"
manifest = f"{args.vox}/file.list"
prep_wav(args.vox, output_dir, manifest, args.ffmpeg, args.rank, args.nshard)
| av_hubert-main | avhubert/preparation/vox_prepare.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import cv2, math, os
import submitit
import tempfile
import shutil
from tqdm import tqdm
from scipy.io import wavfile
def count_frames(fids, audio_dir, video_dir):
total_num_frames = []
for fid in tqdm(fids):
wav_fn = f"{audio_dir}/{fid}.wav"
video_fn = f"{video_dir}/{fid}.mp4"
num_frames_audio = len(wavfile.read(wav_fn)[1])
cap = cv2.VideoCapture(video_fn)
num_frames_video = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
total_num_frames.append([num_frames_audio, num_frames_video])
return total_num_frames
def check(fids, audio_dir, video_dir):
missing = []
for fid in tqdm(fids):
wav_fn = f"{audio_dir}/{fid}.wav"
video_fn = f"{video_dir}/{fid}.mp4"
is_file = os.path.isfile(wav_fn) and os.path.isfile(video_fn)
if not is_file:
missing.append(fid)
return missing
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='count number of frames (on slurm)', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--root', type=str, help='root dir')
parser.add_argument('--manifest', type=str, help='a list of filenames')
parser.add_argument('--nshard', type=int, default=1, help='number of shards')
parser.add_argument('--slurm_partition', type=str, default='cpu', help='slurm partition')
args = parser.parse_args()
fids = [ln.strip() for ln in open(args.manifest).readlines()]
print(f"{len(fids)} files")
audio_dir, video_dir = f"{args.root}/audio", f"{args.root}/video"
tmp_dir = tempfile.mkdtemp(dir='./')
executor = submitit.AutoExecutor(folder=tmp_dir)
executor.update_parameters(slurm_array_parallelism=100, slurm_partition=args.slurm_partition, timeout_min=240)
ranks = list(range(0, args.nshard))
fids_arr = []
num_per_shard = math.ceil(len(fids)/args.nshard)
for rank in ranks:
sub_fids = fids[rank*num_per_shard: (rank+1)*num_per_shard]
if len(sub_fids) > 0:
fids_arr.append(sub_fids)
jobs = executor.map_array(check, fids_arr, [audio_dir for _ in fids_arr], [video_dir for _ in fids_arr])
missing_fids = [job.result() for job in jobs]
missing_fids = [x for item in missing_fids for x in item]
if len(missing_fids) > 0:
print(f"Some audio/video files not exist, see {args.root}/missing.list")
with open(f"{args.root}/missing.list", 'w') as fo:
fo.write('\n'.join(missing_fids)+'\n')
shutil.rmtree(tmp_dir)
else:
jobs = executor.map_array(count_frames, fids_arr, [audio_dir for _ in fids_arr], [video_dir for _ in fids_arr])
num_frames = [job.result() for job in jobs]
audio_num_frames, video_num_frames = [], []
for item in num_frames:
audio_num_frames.extend([x[0] for x in item])
video_num_frames.extend([x[1] for x in item])
with open(f"{args.root}/nframes.audio", 'w') as fo:
fo.write(''.join([f"{x}\n" for x in audio_num_frames]))
with open(f"{args.root}/nframes.video", 'w') as fo:
fo.write(''.join([f"{x}\n" for x in video_num_frames]))
shutil.rmtree(tmp_dir)
| av_hubert-main | avhubert/preparation/count_frames_slurm.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import sys, os, glob, subprocess, shutil, math
from datetime import timedelta
import tempfile
from collections import OrderedDict
from pydub import AudioSegment
from tqdm import tqdm
def read_csv(csv_file, delimit=','):
lns = open(csv_file, 'r').readlines()
keys = lns[0].strip().split(delimit)
df = {key: [] for key in keys}
for ln in lns[1:]:
ln = ln.strip().split(delimit)
for j, key in enumerate(keys):
df[key].append(ln[j])
return df
def make_short_manifest(pretrain_dir, output_fn):
subdirs = os.listdir(pretrain_dir)
min_interval = 0.4
max_duration = 15
df = {'fid': [], 'sent': [], 'start': [], 'end': []}
for subdir in tqdm(subdirs):
txt_fns = glob.glob(os.path.join(pretrain_dir, subdir+'/*txt'))
for txt_fn in txt_fns:
fid = os.path.relpath(txt_fn, pretrain_dir)[:-4]
lns = open(txt_fn).readlines()
raw_text = lns[0].strip().split(':')[-1].strip()
conf = lns[1].strip().split(':')[-1].strip()
word_intervals = []
for i_line, ln in enumerate(lns):
if ln[:4] == 'WORD':
start_index = i_line
break
for ln in lns[start_index+1:]:
word, start, end, score = ln.strip().split()
word_intervals.append([word, float(start), float(end)])
if word_intervals[-1][-1] < max_duration:
df['fid'].append(fid)
df['sent'].append(raw_text)
df['start'].append(0)
df['end'].append(-1)
continue
sents, cur_sent = [], []
for i_word, (word, start, end) in enumerate(word_intervals):
if i_word == 0:
cur_sent.append([word, start, end])
else:
assert start >= cur_sent[-1][-1], f"{fid} , {word}, start-{start}, prev-{cur_sent[-1][-1]}"
if start - cur_sent[-1][-1] > min_interval:
sents.append(cur_sent)
cur_sent = [[word, start, end]]
else:
cur_sent.append([word, start, end])
if len(cur_sent) > 0:
sents.append(cur_sent)
for i_sent, sent in enumerate(sents):
df['fid'].append(fid+'_'+str(i_sent))
sent_words = ' '.join([x[0] for x in sent])
if i_sent == 0:
sent_start = 0
else:
sent_start = (sent[0][1] + sents[i_sent-1][-1][2])/2
if i_sent == len(sents)-1:
sent_end = -1
else:
sent_end = (sent[-1][2] + sents[i_sent+1][0][1])/2
df['sent'].append(sent_words)
df['start'].append(sent_start)
df['end'].append(sent_end)
durations = [y-x for x, y in zip(df['start'], df['end'])]
num_long = len(list(filter(lambda x: x > 15, durations)))
print(f"Percentage of >15 second: {100*num_long/len(durations)}%")
num_long = len(list(filter(lambda x: x > 20, durations)))
print(f"Percentage of >20 second: {100*num_long/len(durations)}%")
with open(output_fn, 'w') as fo:
fo.write('id,text,start,end\n')
for i in range(len(df['fid'])):
fo.write(','.join([df['fid'][i], df['sent'][i], '%.3f' % (df['start'][i]), '%.3f' % (df['end'][i])])+'\n')
return
def trim_video_frame(csv_fn, raw_dir, output_dir, ffmpeg, rank, nshard):
df = read_csv(csv_fn)
raw2fid = OrderedDict()
decimal, fps = 9, 25
for fid, start, end in zip(df['id'], df['start'], df['end']):
if '_' in fid:
raw_fid = '_'.join(fid.split('_')[:-1])
else:
raw_fid = fid
if raw_fid in raw2fid:
raw2fid[raw_fid].append([fid, start, end])
else:
raw2fid[raw_fid] = [[fid, start, end]]
i_raw = -1
num_per_shard = math.ceil(len(raw2fid.keys())/nshard)
start_id, end_id = num_per_shard*rank, num_per_shard*(rank+1)
fid_info_shard = list(raw2fid.items())[start_id: end_id]
print(f"Total videos in current shard: {len(fid_info_shard)}/{len(raw2fid.keys())}")
for raw_fid, fid_info in tqdm(fid_info_shard):
i_raw += 1
raw_path = os.path.join(raw_dir, raw_fid+'.mp4')
tmp_dir = tempfile.mkdtemp()
cmd = ffmpeg + " -i " + raw_path + " " + tmp_dir + '/%0' + str(decimal) + 'd.png -loglevel quiet'
subprocess.call(cmd, shell=True)
num_frames = len(glob.glob(tmp_dir+'/*png'))
for fid, start_sec, end_sec in fid_info:
sub_dir = os.path.join(tmp_dir, fid)
os.makedirs(sub_dir, exist_ok=True)
start_sec, end_sec = float(start_sec), float(end_sec)
if end_sec == -1:
end_sec = 24*3600
start_frame_id, end_frame_id = int(start_sec*fps), min(int(end_sec*fps), num_frames)
imnames = [tmp_dir+'/'+str(x+1).zfill(decimal)+'.png' for x in range(start_frame_id, end_frame_id)]
for ix, imname in enumerate(imnames):
shutil.copyfile(imname, sub_dir+'/'+str(ix).zfill(decimal)+'.png')
output_path = os.path.join(output_dir, fid+'.mp4')
os.makedirs(os.path.dirname(output_path), exist_ok=True)
cmd = [ffmpeg, "-i", sub_dir+'/%0'+str(decimal)+'d.png', "-y", "-crf", "20", output_path, "-loglevel", "quiet"]
pipe = subprocess.call(cmd, stdout = subprocess.PIPE, stderr = subprocess.STDOUT) # subprocess.PIPE
shutil.rmtree(tmp_dir)
return
def trim_audio(csv_fn, raw_dir, output_dir, ffmpeg, rank, nshard):
df = read_csv(csv_fn)
raw2fid = OrderedDict()
for fid, start, end in zip(df['id'], df['start'], df['end']):
if '_' in fid:
raw_fid = '_'.join(fid.split('_')[:-1])
else:
raw_fid = fid
if raw_fid in raw2fid:
raw2fid[raw_fid].append([fid, start, end])
else:
raw2fid[raw_fid] = [[fid, start, end]]
i_raw = -1
num_per_shard = math.ceil(len(raw2fid.keys())/nshard)
start_id, end_id = num_per_shard*rank, num_per_shard*(rank+1)
fid_info_shard = list(raw2fid.items())[start_id: end_id]
print(f"Total audios in current shard: {len(fid_info_shard)}/{len(raw2fid.keys())}")
for raw_fid, fid_info in tqdm(fid_info_shard):
i_raw += 1
tmp_dir = tempfile.mkdtemp()
wav_path = os.path.join(tmp_dir, 'tmp.wav')
cmd = ffmpeg + " -i " + os.path.join(raw_dir, raw_fid+'.mp4') + " -f wav -vn -y " + wav_path + ' -loglevel quiet'
subprocess.call(cmd, shell=True)
raw_audio = AudioSegment.from_wav(wav_path)
for fid, start_sec, end_sec in fid_info:
start_sec, end_sec = float(start_sec), float(end_sec)
if end_sec == -1:
end_sec = 24*3600
t1, t2 = int(start_sec*1000), int(end_sec*1000)
new_audio = raw_audio[t1: t2]
output_path = os.path.join(output_dir, fid+'.wav')
os.makedirs(os.path.dirname(output_path), exist_ok=True)
new_audio.export(output_path, format="wav")
shutil.rmtree(tmp_dir)
return
def trim_pretrain(root_dir, ffmpeg, rank=0, nshard=1, step=1):
pretrain_dir = os.path.join(root_dir, 'pretrain')
print(f"Trim original videos in pretrain")
csv_fn = os.path.join(root_dir, 'short-pretrain.csv')
if step == 1:
print(f"Step 1. Make csv file {csv_fn}")
make_short_manifest(pretrain_dir, csv_fn)
else:
print(f"Step 2. Trim video and audio")
output_video_dir, output_audio_dir = os.path.join(root_dir, 'short-pretrain'), os.path.join(root_dir, 'audio/short-pretrain/')
os.makedirs(output_video_dir, exist_ok=True)
os.makedirs(output_audio_dir, exist_ok=True)
trim_video_frame(csv_fn, pretrain_dir, output_video_dir, ffmpeg, rank, nshard)
trim_audio(csv_fn, pretrain_dir, output_audio_dir, ffmpeg, rank, nshard)
return
def prep_wav(lrs3_root, ffmpeg, rank, nshard):
output_dir = f"{lrs3_root}/audio/"
video_fns = glob.glob(lrs3_root + '/trainval/*/*mp4') + glob.glob(lrs3_root + '/test/*/*mp4')
video_fns = sorted(video_fns)
num_per_shard = math.ceil(len(video_fns)/nshard)
start_id, end_id = num_per_shard*rank, num_per_shard*(rank+1)
video_fns = video_fns[start_id: end_id]
print(f"{len(video_fns)} videos")
# subdirs = os.listdir(input_dir)
for video_fn in tqdm(video_fns):
base_name = '/'.join(video_fn.split('/')[-3:])
audio_fn = os.path.join(output_dir, base_name.replace('mp4', 'wav'))
os.makedirs(os.path.dirname(audio_fn), exist_ok=True)
cmd = ffmpeg + " -i " + video_fn + " -f wav -vn -y " + audio_fn + ' -loglevel quiet'
subprocess.call(cmd, shell=True)
return
def get_file_label(lrs3_root):
video_ids_total, labels_total = [], []
for split in ['trainval', 'test']:
subdirs = os.listdir(os.path.join(lrs3_root, split))
for subdir in tqdm(subdirs):
video_fns = glob.glob(os.path.join(lrs3_root, split, subdir, '*mp4'))
video_ids = ['/'.join(x.split('/')[-3:])[:-4] for x in video_fns]
for video_id in video_ids:
txt_fn = os.path.join(lrs3_root, video_id+'.txt')
label = open(txt_fn).readlines()[0].split(':')[1].strip()
labels_total.append(label)
video_ids_total.append(video_id)
pretrain_csv = os.path.join(lrs3_root, 'short-pretrain.csv')
df = read_csv(pretrain_csv)
for video_id, label in zip(df['id'], df['text']):
video_ids_total.append(os.path.join('short-pretrain', video_id))
labels_total.append(label)
video_id_fn, label_fn = os.path.join(lrs3_root, 'file.list'), os.path.join(lrs3_root, 'label.list')
print(video_id_fn, label_fn)
with open(video_id_fn, 'w') as fo:
fo.write('\n'.join(video_ids_total)+'\n')
with open(label_fn, 'w') as fo:
fo.write('\n'.join(labels_total)+'\n')
return
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='LRS3 preprocess pretrain dir', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--lrs3', type=str, help='lrs3 root dir')
parser.add_argument('--ffmpeg', type=str, help='path to ffmpeg')
parser.add_argument('--rank', type=int, help='rank id')
parser.add_argument('--nshard', type=int, help='number of shards')
parser.add_argument('--step', type=int, help='Steps (1: split labels, 2: trim video/audio, 3: prep audio for trainval/test, 4: get labels and file list)')
args = parser.parse_args()
if args.step <= 2:
trim_pretrain(args.lrs3, args.ffmpeg, args.rank, args.nshard, step=args.step)
elif args.step == 3:
print(f"Extracting audio for trainval/test")
prep_wav(args.lrs3, args.ffmpeg, args.rank, args.nshard)
elif args.step == 4:
get_file_label(args.lrs3)
| av_hubert-main | avhubert/preparation/lrs3_prepare.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import sys,os,pickle,math
import cv2,dlib,time
import numpy as np
from tqdm import tqdm
def load_video(path):
videogen = skvideo.io.vread(path)
frames = np.array([frame for frame in videogen])
return frames
def detect_face_landmarks(face_predictor_path, cnn_detector_path, root_dir, landmark_dir, flist_fn, rank, nshard):
def detect_landmark(image, detector, cnn_detector, predictor):
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
rects = detector(gray, 1)
if len(rects) == 0:
rects = cnn_detector(gray)
rects = [d.rect for d in rects]
coords = None
for (_, rect) in enumerate(rects):
shape = predictor(gray, rect)
coords = np.zeros((68, 2), dtype=np.int32)
for i in range(0, 68):
coords[i] = (shape.part(i).x, shape.part(i).y)
return coords
detector = dlib.get_frontal_face_detector()
cnn_detector = dlib.cnn_face_detection_model_v1(cnn_detector_path)
predictor = dlib.shape_predictor(face_predictor_path)
input_dir = root_dir #
output_dir = landmark_dir #
fids = [ln.strip() for ln in open(flist_fn).readlines()]
num_per_shard = math.ceil(len(fids)/nshard)
start_id, end_id = num_per_shard*rank, num_per_shard*(rank+1)
fids = fids[start_id: end_id]
print(f"{len(fids)} files")
for fid in tqdm(fids):
output_fn = os.path.join(output_dir, fid+'.pkl')
video_path = os.path.join(input_dir, fid+'.mp4')
frames = load_video(video_path)
landmarks = []
for frame in frames:
landmark = detect_landmark(frame, detector, cnn_detector, predictor)
landmarks.append(landmark)
os.makedirs(os.path.dirname(output_fn), exist_ok=True)
pickle.dump(landmarks, open(output_fn, 'wb'))
return
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='detecting facial landmarks', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--root', type=str, help='root dir')
parser.add_argument('--landmark', type=str, help='landmark dir')
parser.add_argument('--manifest', type=str, help='a list of filenames')
parser.add_argument('--cnn_detector', type=str, help='path to cnn detector (download and unzip from: http://dlib.net/files/mmod_human_face_detector.dat.bz2)')
parser.add_argument('--face_predictor', type=str, help='path to face predictor (download and unzip from: http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2)')
parser.add_argument('--rank', type=int, help='rank id')
parser.add_argument('--nshard', type=int, help='number of shards')
parser.add_argument('--ffmpeg', type=str, help='ffmpeg path')
args = parser.parse_args()
import skvideo
skvideo.setFFmpegPath(os.path.dirname(args.ffmpeg))
print(skvideo.getFFmpegPath())
import skvideo.io
detect_face_landmarks(args.face_predictor, args.cnn_detector, args.root, args.landmark, args.manifest, args.rank, args.nshard)
| av_hubert-main | avhubert/preparation/detect_landmark.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from tempfile import NamedTemporaryFile
import csv
from pathlib import Path
import zipfile
from functools import reduce
from multiprocessing import cpu_count
from typing import Any, Dict, List, Optional, Union
import numpy as np
# import pandas as pd
import sentencepiece as sp
# from fairseq.data.audio.audio_utils import (
# _convert_to_mono, _get_kaldi_fbank, _get_torchaudio_fbank
# )
# import torch
from tqdm import tqdm
UNK_TOKEN, UNK_TOKEN_ID = "<unk>", 3
BOS_TOKEN, BOS_TOKEN_ID = "<s>", 0
EOS_TOKEN, EOS_TOKEN_ID = "</s>", 2
PAD_TOKEN, PAD_TOKEN_ID = "<pad>", 1
def gen_vocab(
input_path: Path, output_path_prefix: Path, model_type="bpe",
vocab_size=1000, special_symbols: Optional[List[str]] = None
):
# Train SentencePiece Model
arguments = [
f"--input={input_path.as_posix()}",
f"--model_prefix={output_path_prefix.as_posix()}",
f"--model_type={model_type}",
f"--vocab_size={vocab_size}",
"--character_coverage=1.0",
f"--num_threads={cpu_count()}",
f"--unk_id={UNK_TOKEN_ID}",
f"--bos_id={BOS_TOKEN_ID}",
f"--eos_id={EOS_TOKEN_ID}",
f"--pad_id={PAD_TOKEN_ID}",
]
if special_symbols is not None:
_special_symbols = ",".join(special_symbols)
arguments.append(f"--user_defined_symbols={_special_symbols}")
sp.SentencePieceTrainer.Train(" ".join(arguments))
# Export fairseq dictionary
spm = sp.SentencePieceProcessor()
spm.Load(output_path_prefix.as_posix() + ".model")
vocab = {i: spm.IdToPiece(i) for i in range(spm.GetPieceSize())}
assert (
vocab.get(UNK_TOKEN_ID) == UNK_TOKEN
and vocab.get(PAD_TOKEN_ID) == PAD_TOKEN
and vocab.get(BOS_TOKEN_ID) == BOS_TOKEN
and vocab.get(EOS_TOKEN_ID) == EOS_TOKEN
)
vocab = {
i: s
for i, s in vocab.items()
if s not in {UNK_TOKEN, BOS_TOKEN, EOS_TOKEN, PAD_TOKEN}
}
with open(output_path_prefix.as_posix() + ".txt", "w") as f_out:
for _, s in sorted(vocab.items(), key=lambda x: x[0]):
f_out.write(f"{s} 1\n")
return
| av_hubert-main | avhubert/preparation/gen_subword.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
## Based on: https://github.com/mpc001/Lipreading_using_Temporal_Convolutional_Networks/blob/master/preprocessing/crop_mouth_from_video.py
""" Crop Mouth ROIs from videos for lipreading"""
import os,pickle,shutil,tempfile
import math
import cv2
import glob
import subprocess
import argparse
import numpy as np
from collections import deque
import cv2
from skimage import transform as tf
from tqdm import tqdm
# -- Landmark interpolation:
def linear_interpolate(landmarks, start_idx, stop_idx):
start_landmarks = landmarks[start_idx]
stop_landmarks = landmarks[stop_idx]
delta = stop_landmarks - start_landmarks
for idx in range(1, stop_idx-start_idx):
landmarks[start_idx+idx] = start_landmarks + idx/float(stop_idx-start_idx) * delta
return landmarks
# -- Face Transformation
def warp_img(src, dst, img, std_size):
tform = tf.estimate_transform('similarity', src, dst) # find the transformation matrix
warped = tf.warp(img, inverse_map=tform.inverse, output_shape=std_size) # warp
warped = warped * 255 # note output from wrap is double image (value range [0,1])
warped = warped.astype('uint8')
return warped, tform
def apply_transform(transform, img, std_size):
warped = tf.warp(img, inverse_map=transform.inverse, output_shape=std_size)
warped = warped * 255 # note output from warp is double image (value range [0,1])
warped = warped.astype('uint8')
return warped
def get_frame_count(filename):
cap = cv2.VideoCapture(filename)
total = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
cap.release()
return total
def read_video(filename):
cap = cv2.VideoCapture(filename)
while(cap.isOpened()):
ret, frame = cap.read() # BGR
if ret:
yield frame
else:
break
cap.release()
# -- Crop
def cut_patch(img, landmarks, height, width, threshold=5):
center_x, center_y = np.mean(landmarks, axis=0)
if center_y - height < 0:
center_y = height
if center_y - height < 0 - threshold:
raise Exception('too much bias in height')
if center_x - width < 0:
center_x = width
if center_x - width < 0 - threshold:
raise Exception('too much bias in width')
if center_y + height > img.shape[0]:
center_y = img.shape[0] - height
if center_y + height > img.shape[0] + threshold:
raise Exception('too much bias in height')
if center_x + width > img.shape[1]:
center_x = img.shape[1] - width
if center_x + width > img.shape[1] + threshold:
raise Exception('too much bias in width')
cutted_img = np.copy(img[ int(round(center_y) - round(height)): int(round(center_y) + round(height)),
int(round(center_x) - round(width)): int(round(center_x) + round(width))])
return cutted_img
def write_video_ffmpeg(rois, target_path, ffmpeg):
os.makedirs(os.path.dirname(target_path), exist_ok=True)
decimals = 10
fps = 25
tmp_dir = tempfile.mkdtemp()
for i_roi, roi in enumerate(rois):
cv2.imwrite(os.path.join(tmp_dir, str(i_roi).zfill(decimals)+'.png'), roi)
list_fn = os.path.join(tmp_dir, "list")
with open(list_fn, 'w') as fo:
fo.write("file " + "'" + tmp_dir+'/%0'+str(decimals)+'d.png' + "'\n")
## ffmpeg
if os.path.isfile(target_path):
os.remove(target_path)
cmd = [ffmpeg, "-f", "concat", "-safe", "0", "-i", list_fn, "-q:v", "1", "-r", str(fps), '-y', '-crf', '20', target_path]
pipe = subprocess.run(cmd, stdout = subprocess.PIPE, stderr = subprocess.STDOUT)
# rm tmp dir
shutil.rmtree(tmp_dir)
return
def load_args(default_config=None):
parser = argparse.ArgumentParser(description='Lipreading Pre-processing', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--video-direc', default=None, help='raw video directory')
parser.add_argument('--landmark-direc', default=None, help='landmark directory')
parser.add_argument('--filename-path', help='list of detected video and its subject ID')
parser.add_argument('--save-direc', default=None, help='the directory of saving mouth ROIs')
# -- mean face utils
parser.add_argument('--mean-face', type=str, help='reference mean face (download from: https://github.com/mpc001/Lipreading_using_Temporal_Convolutional_Networks/blob/master/preprocessing/20words_mean_face.npy)')
# -- mouthROIs utils
parser.add_argument('--crop-width', default=96, type=int, help='the width of mouth ROIs')
parser.add_argument('--crop-height', default=96, type=int, help='the height of mouth ROIs')
parser.add_argument('--start-idx', default=48, type=int, help='the start of landmark index')
parser.add_argument('--stop-idx', default=68, type=int, help='the end of landmark index')
parser.add_argument('--window-margin', default=12, type=int, help='window margin for smoothed_landmarks')
parser.add_argument('--ffmpeg', type=str, help='ffmpeg path')
parser.add_argument('--rank', type=int, help='rank id')
parser.add_argument('--nshard', type=int, help='number of shards')
args = parser.parse_args()
return args
def crop_patch(video_pathname, landmarks, mean_face_landmarks, stablePntsIDs, STD_SIZE, window_margin, start_idx, stop_idx, crop_height, crop_width):
"""Crop mouth patch
:param str video_pathname: pathname for the video_dieo
:param list landmarks: interpolated landmarks
"""
frame_idx = 0
num_frames = get_frame_count(video_pathname)
frame_gen = read_video(video_pathname)
margin = min(num_frames, window_margin)
while True:
try:
frame = frame_gen.__next__() ## -- BGR
except StopIteration:
break
if frame_idx == 0:
q_frame, q_landmarks = deque(), deque()
sequence = []
q_landmarks.append(landmarks[frame_idx])
q_frame.append(frame)
if len(q_frame) == margin:
smoothed_landmarks = np.mean(q_landmarks, axis=0)
cur_landmarks = q_landmarks.popleft()
cur_frame = q_frame.popleft()
# -- affine transformation
trans_frame, trans = warp_img( smoothed_landmarks[stablePntsIDs, :],
mean_face_landmarks[stablePntsIDs, :],
cur_frame,
STD_SIZE)
trans_landmarks = trans(cur_landmarks)
# -- crop mouth patch
sequence.append( cut_patch( trans_frame,
trans_landmarks[start_idx:stop_idx],
crop_height//2,
crop_width//2,))
if frame_idx == len(landmarks)-1:
while q_frame:
cur_frame = q_frame.popleft()
# -- transform frame
trans_frame = apply_transform( trans, cur_frame, STD_SIZE)
# -- transform landmarks
trans_landmarks = trans(q_landmarks.popleft())
# -- crop mouth patch
sequence.append( cut_patch( trans_frame,
trans_landmarks[start_idx:stop_idx],
crop_height//2,
crop_width//2,))
return np.array(sequence)
frame_idx += 1
return None
def landmarks_interpolate(landmarks):
"""Interpolate landmarks
param list landmarks: landmarks detected in raw videos
"""
valid_frames_idx = [idx for idx, _ in enumerate(landmarks) if _ is not None]
if not valid_frames_idx:
return None
for idx in range(1, len(valid_frames_idx)):
if valid_frames_idx[idx] - valid_frames_idx[idx-1] == 1:
continue
else:
landmarks = linear_interpolate(landmarks, valid_frames_idx[idx-1], valid_frames_idx[idx])
valid_frames_idx = [idx for idx, _ in enumerate(landmarks) if _ is not None]
# -- Corner case: keep frames at the beginning or at the end failed to be detected.
if valid_frames_idx:
landmarks[:valid_frames_idx[0]] = [landmarks[valid_frames_idx[0]]] * valid_frames_idx[0]
landmarks[valid_frames_idx[-1]:] = [landmarks[valid_frames_idx[-1]]] * (len(landmarks) - valid_frames_idx[-1])
valid_frames_idx = [idx for idx, _ in enumerate(landmarks) if _ is not None]
assert len(valid_frames_idx) == len(landmarks), "not every frame has landmark"
return landmarks
if __name__ == '__main__':
args = load_args()
# -- mean face utils
STD_SIZE = (256, 256)
mean_face_landmarks = np.load(args.mean_face)
stablePntsIDs = [33, 36, 39, 42, 45]
lines = open(args.filename_path).readlines()
fids = [ln.strip() for ln in lines]
num_per_shard = math.ceil(len(fids)/args.nshard)
start_id, end_id = num_per_shard*args.rank, num_per_shard*(args.rank+1)
fids = fids[start_id: end_id]
for filename_idx, filename in enumerate(tqdm(fids)):
video_pathname = os.path.join(args.video_direc, filename+'.mp4')
landmarks_pathname = os.path.join(args.landmark_direc, filename+'.pkl')
dst_pathname = os.path.join(args.save_direc, filename+'.mp4')
assert os.path.isfile(video_pathname), "File does not exist. Path input: {}".format(video_pathname)
assert os.path.isfile(landmarks_pathname), "File does not exist. Path input: {}".format(landmarks_pathname)
if os.path.exists(dst_pathname):
continue
landmarks = pickle.load(open(landmarks_pathname, 'rb'))
# -- pre-process landmarks: interpolate frames not being detected.
preprocessed_landmarks = landmarks_interpolate(landmarks)
if not preprocessed_landmarks:
print(f"resizing {filename}")
frame_gen = read_video(video_pathname)
frames = [cv2.resize(x, (args.crop_width, args.crop_height)) for x in frame_gen]
write_video_ffmpeg(frames, dst_pathname, args.ffmpeg)
continue
# -- crop
sequence = crop_patch(video_pathname, preprocessed_landmarks, mean_face_landmarks, stablePntsIDs, STD_SIZE, window_margin=args.window_margin, start_idx=args.start_idx, stop_idx=args.stop_idx, crop_height=args.crop_height, crop_width=args.crop_width)
assert sequence is not None, "cannot crop from {}.".format(filename)
# -- save
os.makedirs(os.path.dirname(dst_pathname), exist_ok=True)
write_video_ffmpeg(sequence, dst_pathname, args.ffmpeg)
print('Done.')
| av_hubert-main | avhubert/preparation/align_mouth.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
import tempfile
import shutil
import submitit
import os, sys, subprocess, glob, re
import numpy as np
from collections import defaultdict
from scipy.io import wavfile
from tqdm import tqdm
def split_musan(musan_root, rank, nshard):
wav_fns = glob.glob(f"{musan_root}/speech/*/*wav") + glob.glob(f"{musan_root}/music/*/*wav") + glob.glob(f"{musan_root}/noise/*/*wav")
num_per_shard = math.ceil(len(wav_fns)/nshard)
start_id, end_id = num_per_shard*rank, num_per_shard*(rank+1)
wav_fns = wav_fns[start_id: end_id]
print(f"{len(wav_fns)} raw audios")
output_dir = f"{musan_root}/short-musan"
dur = 10
for wav_fn in tqdm(wav_fns):
sample_rate, wav_data = wavfile.read(wav_fn)
assert sample_rate == 16_000 and len(wav_data.shape) == 1
if len(wav_data) > dur * sample_rate:
num_split = int(np.ceil(len(wav_data) / (dur*sample_rate)))
for i in range(num_split):
filename = '/'.join(wav_fn.split('/')[-3:])[:-4]
output_wav_fn = os.path.join(output_dir, filename + f'-{i}.wav')
sub_data = wav_data[i*dur*sample_rate: (i+1)*dur*sample_rate]
os.makedirs(os.path.dirname(output_wav_fn), exist_ok=True)
wavfile.write(output_wav_fn, sample_rate, sub_data.astype(np.int16))
return
def mix_audio(wav_fns):
wav_data = [wavfile.read(wav_fn)[1] for wav_fn in wav_fns]
wav_data_ = []
min_len = min([len(x) for x in wav_data])
for item in wav_data:
wav_data_.append(item[:min_len])
wav_data = np.stack(wav_data_).mean(axis=0).astype(np.int16)
return wav_data
def get_speaker_info(musan_root):
wav_fns = glob.glob(f"{musan_root}/speech/*/*wav")
spk2wav = {}
for wav_fn in tqdm(wav_fns):
speaker = '-'.join(os.path.basename(wav_fn).split('-')[:-1])
if speaker not in spk2wav:
spk2wav[speaker] = []
spk2wav[speaker].append(wav_fn)
speakers = sorted(list(spk2wav.keys()))
print(f"{len(speakers)} speakers")
np.random.shuffle(speakers)
output_dir = f"{musan_root}/speech/"
num_train, num_valid = int(len(speakers)*0.8), int(len(speakers)*0.1)
train_speakers, valid_speakers, test_speakers = speakers[:num_train], speakers[num_train: num_train+num_valid], speakers[num_train+num_valid:]
for split in ['train', 'valid', 'test']:
speakers = eval(f"{split}_speakers")
with open(f"{output_dir}/spk.{split}", 'w') as fo:
fo.write('\n'.join(speakers)+'\n')
return
def make_musan_babble(musan_root, rank, nshard):
babble_dir = f"{musan_root}/babble/wav/"
num_per_mixture = 30
sample_rate = 16_000
num_train, num_valid, num_test = 8000, 1000, 1000
os.makedirs(babble_dir, exist_ok=True)
wav_fns = glob.glob(f"{musan_root}/speech/*/*wav")
spk2wav = {}
for wav_fn in tqdm(wav_fns):
speaker = '-'.join(os.path.basename(wav_fn).split('-')[:-1])
if speaker not in spk2wav:
spk2wav[speaker] = []
spk2wav[speaker].append(wav_fn)
for split in ['train', 'valid', 'test']:
speakers = [ln.strip() for ln in open(f"{musan_root}/speech/spk.{split}").readlines()]
num_split = eval(f"num_{split}")
wav_fns = []
for x in speakers:
wav_fns.extend(spk2wav[x])
print(f"{split} -> # speaker {len(speakers)}, # wav {len(wav_fns)}")
num_per_shard = math.ceil(num_split/nshard)
start_id, end_id = num_per_shard*rank, num_per_shard*(rank+1)
for i in tqdm(range(num_split)):
if not (i >= start_id and i < end_id):
continue
np.random.seed(i)
perm = np.random.permutation(len(wav_fns))[:num_per_mixture]
output_fn = f"{babble_dir}/{split}-{str(i+1).zfill(5)}.wav"
wav_data = mix_audio([wav_fns[x] for x in perm])
wavfile.write(output_fn, sample_rate, wav_data)
return
def count_frames(wav_fns, rank, nshard):
num_per_shard = math.ceil(len(wav_fns)/nshard)
start_id, end_id = num_per_shard*rank, num_per_shard*(rank+1)
wav_fns = wav_fns[start_id: end_id]
nfs = []
for wav_fn in tqdm(wav_fns):
sample_rate, wav_data = wavfile.read(wav_fn)
nfs.append(len(wav_data))
return nfs
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='MUSAN audio preparation', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--musan', type=str, help='MUSAN root')
parser.add_argument('--nshard', type=int, default=1, help='number of shards')
parser.add_argument('--slurm_partition', type=str, default='cpu', help='slurm partition')
args = parser.parse_args()
tmp_dir = tempfile.mkdtemp(dir='./')
executor = submitit.AutoExecutor(folder=tmp_dir)
executor.update_parameters(slurm_array_parallelism=100, slurm_partition=args.slurm_partition, timeout_min=240)
ranks = list(range(0, args.nshard))
print(f"Split raw audio")
jobs = executor.map_array(split_musan, [args.musan for _ in ranks], ranks, [args.nshard for _ in ranks])
[job.result() for job in jobs]
short_musan = os.path.join(args.musan, 'short-musan')
print(f"Get speaker info")
get_speaker_info(short_musan)
print(f"Mix audio")
jobs = executor.map_array(make_musan_babble, [short_musan for _ in ranks], ranks, [args.nshard for _ in ranks])
[job.result() for job in jobs]
print(f"Count number of frames")
wav_fns = glob.glob(f"{short_musan}/babble/*/*wav") + glob.glob(f"{short_musan}/music/*/*wav") + glob.glob(f"{short_musan}/noise/*/*wav")
jobs = executor.map_array(count_frames, [wav_fns for _ in ranks], ranks, [args.nshard for _ in ranks])
nfs = [job.result() for job in jobs]
nfs_ = []
for nf in nfs:
nfs_.extend(nf)
nfs = nfs_
num_frames_fn = f"{short_musan}/nframes.audio"
with open(num_frames_fn, 'w') as fo:
for wav_fn, nf in zip(wav_fns, nfs):
fo.write(os.path.abspath(wav_fn)+'\t'+str(nf)+'\n')
shutil.rmtree(tmp_dir)
| av_hubert-main | avhubert/preparation/musan_prepare.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math, time
import os, sys, subprocess, glob, re
import numpy as np
from collections import defaultdict
from scipy.io import wavfile
from tqdm import tqdm
def make_musan_tsv(musan_root, output_dir):
os.makedirs(output_dir, exist_ok=True)
sample_rate = 16_000
min_dur, max_dur = 3*sample_rate, 11*sample_rate
part_ratios = zip(['train', 'valid', 'test'], [0.8, 0.1, 0.1])
all_fns = {}
nfs = f"{musan_root}/nframes.audio"
nfs = dict([x.strip().split('\t') for x in open(nfs).readlines()])
for category in ['babble', 'music', 'noise']:
wav_fns = glob.glob(f"{musan_root}/{category}/*/*wav")
target_fns = []
for wav_fn in tqdm(wav_fns):
dur = int(nfs[os.path.abspath(wav_fn)])
if dur >= min_dur and dur < max_dur:
target_fns.append(wav_fn)
print(f"{category}: {len(target_fns)}/{len(wav_fns)}")
all_fns[category] = target_fns
output_subdir = f"{output_dir}/{category}"
os.makedirs(output_subdir, exist_ok=True)
num_train, num_valid, num_test = int(0.8*len(target_fns)), int(0.1*len(target_fns)), int(0.1*len(target_fns))
if category in {'music', 'noise'}:
np.random.shuffle(target_fns)
train_fns, valid_fns, test_fns = target_fns[:num_train], target_fns[num_train:num_train+num_valid], target_fns[num_train+num_valid:]
elif category == 'babble':
train_fns, valid_fns, test_fns = [], [], []
for wav_fn in target_fns:
split_id = os.path.basename(wav_fn)[:-4].split('-')[0]
if split_id == 'train':
train_fns.append(wav_fn)
elif split_id == 'valid':
valid_fns.append(wav_fn)
elif split_id == 'test':
test_fns.append(wav_fn)
for x in ['train', 'valid', 'test']:
x_fns = eval(f"{x}_fns")
x_fns = [os.path.abspath(x_fn) for x_fn in x_fns]
print(os.path.abspath(output_subdir), x, len(x_fns))
with open(f"{output_subdir}/{x}.tsv", 'w') as fo:
fo.write('\n'.join(x_fns)+'\n')
return
def combine(input_tsv_dirs, output_dir):
output_subdir = f"{output_dir}/all"
os.makedirs(output_subdir, exist_ok=True)
num_train_per_cat = 20_000
train_fns, valid_fns, test_fns = [], [], []
for input_tsv_dir in input_tsv_dirs:
train_fn, valid_fn, test_fn = [ln.strip() for ln in open(f"{input_tsv_dir}/train.tsv").readlines()], [ln.strip() for ln in open(f"{input_tsv_dir}/valid.tsv").readlines()], [ln.strip() for ln in open(f"{input_tsv_dir}/test.tsv").readlines()]
num_repeats = int(np.ceil(num_train_per_cat/len(train_fn)))
train_fn_ = []
for i in range(num_repeats):
train_fn_.extend(train_fn)
train_fn = train_fn_[:num_train_per_cat]
train_fns.extend(train_fn)
valid_fns.extend(valid_fn)
test_fns.extend(test_fn)
for x in ['train', 'valid', 'test']:
x_fns = eval(f"{x}_fns")
print(os.path.abspath(output_subdir), x, len(x_fns))
with open(f"{output_subdir}/{x}.tsv", 'w') as fo:
fo.write('\n'.join(x_fns)+'\n')
return
def main():
import argparse
parser = argparse.ArgumentParser(description='Set up noise manifest', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--musan', type=str, help='MUSAN root')
parser.add_argument('--lrs3', type=str, help='LRS3 root')
args = parser.parse_args()
short_musan, output_tsv_dir = f"{args.musan}/short-musan", f"{args.musan}/tsv"
print(f"Make tsv for babble, music, noise")
make_musan_tsv(short_musan, output_tsv_dir)
print(f"Combine tsv")
input_tsv_dirs = [f"{output_tsv_dir}/{x}" for x in ['noise', 'music', 'babble']] + [f"{args.lrs3}/noise/speech"]
combine(input_tsv_dirs, output_tsv_dir)
return
if __name__ == '__main__':
main()
| av_hubert-main | avhubert/preparation/noise_manifest.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import cv2, math, os
import tempfile
import shutil
from tqdm import tqdm
from scipy.io import wavfile
def count_frames(fids, audio_dir, video_dir):
total_num_frames = []
for fid in tqdm(fids):
wav_fn = f"{audio_dir}/{fid}.wav"
video_fn = f"{video_dir}/{fid}.mp4"
num_frames_audio = len(wavfile.read(wav_fn)[1])
cap = cv2.VideoCapture(video_fn)
num_frames_video = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
total_num_frames.append([num_frames_audio, num_frames_video])
return total_num_frames
def check(fids, audio_dir, video_dir):
missing = []
for fid in tqdm(fids):
wav_fn = f"{audio_dir}/{fid}.wav"
video_fn = f"{video_dir}/{fid}.mp4"
is_file = os.path.isfile(wav_fn) and os.path.isfile(video_fn)
if not is_file:
missing.append(fid)
return missing
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='count number of frames', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--root', type=str, help='root dir')
parser.add_argument('--manifest', type=str, help='a list of filenames')
parser.add_argument('--nshard', type=int, default=1, help='number of shards')
parser.add_argument('--rank', type=int, default=0, help='rank id')
args = parser.parse_args()
fids = [ln.strip() for ln in open(args.manifest).readlines()]
print(f"{len(fids)} files")
audio_dir, video_dir = f"{args.root}/audio", f"{args.root}/video"
ranks = list(range(0, args.nshard))
fids_arr = []
num_per_shard = math.ceil(len(fids)/args.nshard)
for rank in ranks:
sub_fids = fids[rank*num_per_shard: (rank+1)*num_per_shard]
if len(sub_fids) > 0:
fids_arr.append(sub_fids)
if args.rank >= len(fids_arr):
open(f"{args.root}/nframes.audio.{args.rank}", 'w').write('')
open(f"{args.root}/nframes.video.{args.rank}", 'w').write('')
else:
fids = fids_arr[args.rank]
missing_fids = check(fids, audio_dir, video_dir)
if len(missing_fids) > 0:
print(f"Some audio/video files not exist, see {args.root}/missing.list.{args.rank}")
with open(f"{args.root}/missing.list.{args.rank}", 'w') as fo:
fo.write('\n'.join(missing_fids)+'\n')
else:
num_frames = count_frames(fids, audio_dir, video_dir)
audio_num_frames = [x[0] for x in num_frames]
video_num_frames = [x[1] for x in num_frames]
with open(f"{args.root}/nframes.audio.{args.rank}", 'w') as fo:
fo.write(''.join([f"{x}\n" for x in audio_num_frames]))
with open(f"{args.root}/nframes.video.{args.rank}", 'w') as fo:
fo.write(''.join([f"{x}\n" for x in video_num_frames]))
| av_hubert-main | avhubert/preparation/count_frames.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import glob
import shutil
import subprocess
from tqdm import tqdm
from pathlib import Path
def main():
import argparse
parser = argparse.ArgumentParser(description='VoxCeleb2 tsv preparation', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--vox', type=str, help='VoxCeleb2 root dir')
parser.add_argument('--en-ids', type=str, help='a list of English-utterance ids')
args = parser.parse_args()
file_list = f"{args.vox}/file.list"
assert os.path.isfile(file_list) , f"{file_list} not exist -> run vox_prepare.py first"
nframes_audio_file, nframes_video_file = f"{args.vox}/nframes.audio", f"{args.vox}/nframes.video"
assert os.path.isfile(nframes_audio_file) , f"{nframes_audio_file} not exist -> run count_frames.py first"
assert os.path.isfile(nframes_video_file) , f"{nframes_video_file} not exist -> run count_frames.py first"
audio_dir, video_dir = f"{args.vox}/audio", f"{args.vox}/video"
def setup_target(target_dir, train):
for name, data in zip(['train'], [train]):
with open(f"{target_dir}/{name}.tsv", 'w') as fo:
fo.write('/\n')
for fid, nf_audio, nf_video in data:
fo.write('\t'.join([fid, os.path.abspath(f"{video_dir}/{fid}.mp4"), os.path.abspath(f"{audio_dir}/{fid}.wav"), str(nf_video), str(nf_audio)])+'\n')
return
fids = [x.strip() for x in open(file_list).readlines()]
nfs_audio, nfs_video = [x.strip() for x in open(nframes_audio_file).readlines()], [x.strip() for x in open(nframes_video_file).readlines()]
en_fids = set([x.strip() for x in open(args.en_ids).readlines()])
train_all, train_sub = [], []
for fid, nf_audio, nf_video in zip(fids, nfs_audio, nfs_video):
if fid in en_fids:
train_sub.append([fid, nf_audio, nf_video])
train_all.append([fid, nf_audio, nf_video])
dir_en = f"{args.vox}/en_data"
print(f"Set up English-only dir")
os.makedirs(dir_en, exist_ok=True)
setup_target(dir_en, train_sub)
dir_all = f"{args.vox}/all_data"
print(f"Set up all data dir")
os.makedirs(dir_all, exist_ok=True)
setup_target(dir_all, train_all)
return
if __name__ == '__main__':
main()
| av_hubert-main | avhubert/preparation/vox_manifest.py |
import numpy as np
import torch
class RandomEmbedding(torch.nn.Embedding):
"""A class used for efficiently storing random circulant embeddings.
For a n-by-d embedding matrix, we let each d-by-d submatrix be a
circulant matrix parameterized by a d-dimensional random vector.
To add further variability between the rows of these circulant
submatrices, we multiply each circulant submatrix by a diagonal
matrix with random {+1,-1} values along the diagonal. This follows
the convention of (Arora et al., 2020) and (Yu et al., 2017).
Note that if d does not divide n evenly, than the final circulant
submatrix will only be partially used.
...
References
----------
S. Arora, A. May, J. Zhang, C. Ré.
Contextual Embeddings: When Are They Worth It? ACL 2020.
F. Yu, A. Bhaskara, S. Kumar, Y. Gong, S. Chang.
On Binary Embedding Using Circulant Matrices. JMLR 2017.
...
Attributes
----------
num_embeddings : int
The size of the embedding vocabulary.
embedding_dim : int
The dimension of each embedding vector.
avg_embedding_norm : float
The average norm of a row in the embedding matrix (default 1).
rand_weight : tensor (dtype = torch.float)
A random and fixed float tensor storing the parameters of the
circulant submatrices of the embedding matrix. Its shape is
(b,embedding_dim), where b = ceil(num_embeddings/embedding_dim).
Each row of rand_weight corresponds to the parameters for one of
the circulant submatrices.
rand_signs : tensor (dtype = torch.bool)
A random and fixed boolean tensor which flips the signs of
columns of the circulant matrix. Its shape is (b,embedding_dim),
where b = ceil(num_embeddings/embedding_dim). For the i^th
circulant submatrix, we multiply it by a diagonal matrix whose
diagonal is given by the i^th row of rand_signs.
ind : tensor (dtype = torch.long)
A fixed tensor storing the indices [0,...,embedding_dim - 1],
which is used for accessing a full row of the embedding matrix
at a time in the forward method.
...
Methods
-------
forward(input)
Takes a tensor (dtype = torch.long) of indices as input, and
returns the corresponding rows of the random embedding matrix.
"""
def __init__(self, num_embeddings, embedding_dim, avg_embedding_norm=1):
"""Initializes the random circulant embedding matrix.
Note that although RandomEmbedding is a subclass of
nn.Embedding, this constructor ignores the padding_idx,
norm_type, scale_grad_by_freq, sparse, and _weight arguments
which can normally be passed to the constructor of the
nn.Embedding class.
Parameters
----------
num_embeddings : int
The size of the embedding vocabulary.
embedding_dim : int
The dimension of each embedding vector.
avg_embedding_norm : float
The desired average L2 norm of a row in the embedding matrix
(default 1).
"""
# we pass in a 0 for num_embeddings and embedding_dim to the superclass
# constructor so that it doesn't instantiate a large embedding weight
# matrix.
super().__init__(0, 0)
# Now we initialize num_embeddings and embedding_dim properly
self.num_embeddings, self.embedding_dim = num_embeddings, embedding_dim
self.avg_embedding_norm = avg_embedding_norm
n, d = self.num_embeddings, self.embedding_dim
# b is the number of different d-by-d circulant blocks in the matrix.
b = int(np.ceil(n/d))
# self.weight is a learnable parameter in nn.Embedding. We set it to
# None here because we don't need any learnable parameters.
self.weight = None
# Each of the b random d-dimensional rows of rand_weight represents
# the parameters for one of the b circulant submatrices of the
# random embedding matrix.
rand_weight = torch.randn(b, d)
# We now normalize rand_weight so that the average L2 row norm for
# the embedding matrix is equal to avg_embedding_norm. To compute the
# average norm of the rows of this circulant embedding matrix, we count
# the L2 norm of each row of rand_weight[:b-1,:] d times (because
# there are d rows in the embedding matrix that have the same norm as
# each of these rows), and we count the L2 norm of
# rand_weight[b-1,:] (n-(b-1)*d) times. This is because when d does
# not divide n evenly, the last row of rand_weight will only be
# repeated this many times in the embedding matrix.
curr_avg_norm = (d * torch.sum(rand_weight[:b-1,:].norm(dim=1)) +
(n - (b-1) * d) * rand_weight[b-1,:].norm()) / n
rand_weight *= avg_embedding_norm / curr_avg_norm.item()
# ind is used to access a full row of the circulant embedding
# matrix at a time.
# rand_signs is used to randomly change the signs of the columns of
# the rows of the embedding matrix.
ind = torch.arange(d)
rand_signs = torch.randint(2, (b,d), dtype=torch.bool)
# Register these tensors as buffers, so they stay fixed during training.
self.register_buffer('rand_weight', rand_weight)
self.register_buffer('ind', ind)
self.register_buffer('rand_signs', rand_signs)
def forward(self, input):
"""Returns the requested rows of the embedding matrix.
Parameters
----------
input : torch.LongTensor
A tensor of indices specifying which rows of the embedding
matrix should be returned by the forward method. The values
of input must all be between 0 and self.num_embeddings - 1.
Returns
-------
tensor (dtype = torch.float)
A tensor containing the rows of the embedding matrix
specified by the indices in the input tensor. The returned
tensor has shape (input.shape, self.embedding_dim).
Raises
------
TypeError
If input tensor is not of type torch.long.
ValueError
If input tensor has any negative values, or values greater
than self.num_embeddings - 1.
"""
if input.dtype != torch.long:
raise TypeError('Input must be of type torch.long')
if (torch.sum(input >= self.num_embeddings).item() != 0 or
torch.sum(input < 0).item() != 0):
raise ValueError('Entries of input tensor must all be non-negative '
'integers less than self.num_embeddings')
d = self.embedding_dim
input_us = input.unsqueeze(-1)
# Given the input tensor of indices (of shape input.shape), we must
# return the corresponding d-dimensional rows of the circulant random
# embedding matrix. Thus, the output of this forward
# method will have shape (input.shape,d).
# For each index in input, we first figure out what circulant block it
# belongs to (input_us//d), and then access the corresponding row
# (x_0,...,x_{d-1}) of self.rand_weight in the order
# (x_i,x_{i-1},...,x_0,x_{d-1},x_{d-2}...x_{i+1}), where i is equal to
# input_us % d.
# After extracting this row, we multiply it entrywise by the row of the
# rand_signs matrix corresponding to this circulant block.
# Note that we index self.rand_weight with (input.shape,1) and
# (input.shape,d) shaped index tensors, so the output has shape
# (input.shape,d). Similarly, we index the first dimension of
# self.rand_signs with a tensor of shape (input.shape), so the output
# is also fo shape (input.shape,d).
return (self.rand_weight[input_us // d, (input_us - self.ind) % d] *
(self.rand_signs[input // d, :] * 2.0 - 1.0))
| random_embedding-master | random_embedding.py |
import unittest
import time
import torch
import numpy as np
from random_embedding import RandomEmbedding
class RandomEmbeddingTest(unittest.TestCase):
def test_forward(self):
for device in ['cuda','cpu']:
if device=='cpu' or (device=='cuda' and torch.cuda.is_available()):
print(''.format(device))
t1 = time.perf_counter()
for (n,d) in [(300,3),(301,3),(3,301),(30000,800)]:
avg_norm = 3
emb = RandomEmbedding(n,d,avg_embedding_norm=avg_norm)
if device == 'cuda':
emb.cuda()
x = torch.tensor(range(n), dtype=torch.int64, device=device)
out = emb(x)
self.check_embedding_output(emb,out,n,d,avg_norm)
x2 = torch.tensor([range(n),range(n)], dtype=torch.int64,
device=device)
out = emb(x2)
self.assertTrue(out.shape == (2,n,d))
self.check_embedding_output(emb,out[0,:],n,d,avg_norm)
self.check_embedding_output(emb,out[1,:],n,d,avg_norm)
t2 = time.perf_counter()
print('Device: {}, time elapsed: {:.3f}s'.format(device, t2-t1))
def check_embedding_output(self,emb,out,n,d,avg_norm):
num_blocks = int(np.ceil(n/d))
n_ceil = num_blocks * d
# Test that shapes/dimensions are correct
self.assertTrue(out.shape == (n,d))
self.assertTrue(emb.embedding_dim == d and
emb.num_embeddings == n)
# Ensure that the (1) number of unique elements in the output
# matches what it should be for a circulant matrix (at most n_ceil),
# and (2) that the average row norm of output tensor is equal to
# avg_norm.
self.assertTrue(torch.abs(out).unique().numel() <= n_ceil)
self.assertTrue(
np.isclose(torch.mean(out.norm(dim=1)).item(), avg_norm)
)
# Check that in each of the d x d blocks of the output, that (1) the
# diagonal always contains only a single unique absolute value, and
# (2) that the # of unique absolute values in the block is <= d.
for i in range(num_blocks):
block = out[i*d:(i+1)*d,:]
self.assertTrue(
torch.abs(torch.diag(block)).unique().numel() == 1
)
self.assertTrue(torch.abs(block).unique().numel() <= d)
if __name__ == "__main__":
unittest.main()
| random_embedding-master | random_embedding_test.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from transformers import BertModel, BertTokenizer
import torch
from typing import List, Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss
from transformers.modeling_outputs import SequenceClassifierOutput
from transformers.models.bert.modeling_bert import BertPreTrainedModel
from transformers.models.bert.configuration_bert import BertConfig
class_labels = [
"adoring",
"amused",
"angered",
"approving",
"excited",
"saddened",
"scared",
]
class CAREBERT(BertPreTrainedModel):
def __init__(self, config: BertConfig, model_load_path: str = "./care_bert.pth"):
super().__init__(config)
self.config = config
self.bert = BertModel(config)
if model_load_path is not None:
checkpoint = torch.load(model_load_path)
self.bert.load_state_dict(checkpoint["model_state_dict"])
print(f"Loaded from old {model_load_path}")
classifier_dropout = (
config.classifier_dropout
if config.classifier_dropout is not None
else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
head_mask: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
# Run predictions for a list of texts, returning a list of the list of affects predicted for each example.
def predict(
examples: List[str], threshold: float = 0.5, model_load_path="./care_bert.pth"
) -> List[List[str]]:
model = CAREBERT.from_pretrained(
"bert-base-uncased",
num_labels=7,
model_load_path=model_load_path,
)
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
encoding = tokenizer(
examples,
padding="max_length",
truncation=True,
max_length=256,
return_tensors="pt",
)
# forward pass
outs = model(**encoding, return_dict=False)
logits = outs[0]
pred_bools = [pl > threshold for pl in logits]
predictions = []
for pred_bool in pred_bools:
affects = [class_labels[i] for i in range(len(pred_bool)) if pred_bool[i]]
predictions.append(affects)
return predictions
if __name__ == "__main__":
examples = ["Warriors against the Miami Heat!!!", "That was so hilarious"]
print(predict(examples))
| care-main | care_bert.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import re
import lexicon_filtering
import nltk
import string
from typing import List, Dict, Tuple
tokenizer = nltk.data.load("tokenizers/punkt/PY3/english.pickle")
lexicon_map, multi_word_phrases = lexicon_filtering.get_hardcoded_lexicon()
all_AA_keys = set(
list(lexicon_map.keys()) + multi_word_phrases
) # The list of all indicators
# List of negation words that are not permitted.
negation_words = [
"weren't",
"wasn't",
"don't",
"aren't",
"can't",
"neither",
"if",
"couldn't",
"not",
"shouldn't",
"wouldn't",
"stop",
"people think",
"you think",
"nobody",
"no one",
]
# List of exaggerators as described in line 246 of the paper.
exaggerator_synonyms = (
"(?:a\s|an\s)*("
+ "|".join(
[
"soo*\s",
"re*a*lly*\s",
"ve*ry*\s",
"extre*mely*\s",
"su*per*\s",
"pre*tty*\s",
"the\smost\s",
"one\sof\sthe\smost\s",
"abso*lu*tely\s",
"such\sa\s",
"alwa*ys*\s",
"ju*st*\s",
"espe*cia*lly*\s",
"friggin.\s",
"fuckin.\s",
"friggin\s",
"fuckin\s",
"by\sfar\sthe\smo*st\s",
"probably\sthe\smo*st\s",
"too*\s",
"a\slittle\s",
"a\s*lo*t\s",
"more\s",
"quite\spossibly\sthe\smo*st\s",
"actually\s",
"ki*nd*\sof\s",
"freakin.\s",
"freakin\s",
"bit\s",
"currently\s",
"recently\s",
"lately\s",
"honestly\s",
"truly\s",
"unbelievably\s",
"insanely\s",
"seriously\s",
]
)
+ ")*(?:a\s|an\s)*"
)
# Additional sub-patterns used in CARE patterns
singular_subjective_pronouns = "(" + "|".join(["he", "she"]) + ")"
plural_subjective_pronouns = "(" + "|".join(["they", "you", "u"]) + ")"
singular_demonstrative_pronouns = "(" + "|".join(["that", "this"]) + ")"
plural_demonstrative_pronouns = "(" + "|".join(["these", "those"]) + ")"
beginning = r"(\.|!|but\s|however\s|oh\sno\s|oh\s|oh\sman\s|oh\ssnap\s|omg\s|wow\s|jesus|holy\scrap\s|for\ssome\sreason\s|,|^)\s*(?:funny\senough\s|holy\sshit\s|damn\s|oh\sshit\s)*"
ending = "\s*([^\s]*)\s*([^\s]*)\s*([^\s]*)"
# ending = "\s*([a-z]*)\s*([a-z]*)\s*([a-z]*)"
# Map of CARE pattern names to their respective regular expressions.
regex_name_to_pattern = {
"individual": beginning
+ "(i)(\s|\sam\s|'m\s|m\s|'ve\s|\shave\s|\shave\snever\s.een\s)"
+ exaggerator_synonyms
+ ending,
"individual_feel": beginning
+ "(i\sfeel\s)(like\s)*"
+ exaggerator_synonyms
+ ending,
"we": beginning + "(we)(\sare|'re|re|have|'ve)\s" + exaggerator_synonyms + ending,
"we_feel": beginning + "(we\sfeel\s)(like\s)" + exaggerator_synonyms + ending,
"heshe": beginning
+ singular_subjective_pronouns
+ "(\sis|'s|s)\s"
+ exaggerator_synonyms
+ ending,
"it": beginning + "(it)" + "(\sis|'s|s)\s" + exaggerator_synonyms + ending,
"theyyou": beginning
+ plural_subjective_pronouns
+ "(\sare|'re|re)\s"
+ exaggerator_synonyms
+ ending,
"this_is": beginning
+ "(this|that)\s(?:story\s|situation\s)*(is\s|was\s|\s)"
+ exaggerator_synonyms
+ ending,
"hisher_story": beginning
+ "(his|her)\s(?:story\s|situation\s)*(is\s|was\s|\s)"
+ exaggerator_synonyms
+ ending,
"noun_is": beginning
+ "(?:the\s)"
+ "([a-z']+)"
+ "\s(is)\s"
+ exaggerator_synonyms
+ ending,
"this_really": beginning
+ singular_demonstrative_pronouns
+ "\s(re*a*lly*)\s"
+ "(is\s|was\s|\s)*"
+ ending,
"this_makes_me": beginning
+ singular_demonstrative_pronouns
+ "\s(makes\sme\sfeel|made\sme|made\sme\sfeel|makes\sme)\s"
+ exaggerator_synonyms
+ ending,
"these_are": beginning
+ plural_demonstrative_pronouns
+ "\s(are|were|)\s"
+ exaggerator_synonyms
+ ending,
"these_really": beginning
+ plural_demonstrative_pronouns
+ "\s(really)"
+ "\s(are\s|were\s|)*"
+ ending,
"these_make_me": beginning
+ plural_demonstrative_pronouns
+ "\s(make\sme|make\sme\sfeel|made\sme|made\sme\sfeel)\s"
+ exaggerator_synonyms
+ ending,
"made_me": beginning
+ "(makes\sme|made\sme)\s(feel\s)*"
+ exaggerator_synonyms
+ ending,
"feeling": beginning + "()()(feeling\s)" + exaggerator_synonyms + ending,
"my_heart": beginning + "(my\sheart\sis)" + exaggerator_synonyms + ending,
"sovery": beginning
+ "()()("
+ "|".join(["soo*\s", "very\s", "extremely\s"])
+ ")+"
+ ending,
"what_a": beginning + "(what\s)(a|an)\s" + exaggerator_synonyms + ending,
"how": beginning + "()()(how\s)" + exaggerator_synonyms + ending,
"some_people": beginning
+ "(some\speople\s|humans\s|society\s)(is\s|are\s|make\sme\s)"
+ exaggerator_synonyms
+ ending,
"freeform": beginning + "()()()" + ending,
}
# Helper function to skip duplicate affects that can occur from matching multiple patterns.
def get_set(
matches: List, affects: List[str], indicators: List[str]
) -> Tuple[List[str], List[str], List[str]]:
output_matches = []
output_indicators = []
seen = set()
for i, affect in enumerate(affects):
if affect in seen:
continue
else:
seen.add(affect)
output_matches.append(matches[i])
output_indicators.append(indicators[i])
return output_matches, list(seen), output_indicators
# Function for getting a list of all matches, all affects, and all indicators from a given piece of text.
def get_regex_match_all(text: str) -> List[str]:
if type(text) == list:
sentences = text
else:
sentences = tokenizer.tokenize(text)
all_matches = []
all_affects = []
all_indicators = []
for sentence in sentences:
matches, affects, indicators = get_regex_match(sentence)
if len(affects) > 0:
matches, affects, indicators = get_set(matches, affects, indicators)
all_affects.extend(affects)
all_matches.extend(matches)
all_indicators.extend(indicators)
return all_affects
# Check that the pattern and keyword combination is not forbidding.
def is_valid_regex_pattern(regex_name: str, affect: str, keyword: str) -> bool:
if regex_name in lexicon_filtering.affect_to_prohibited_patterns[affect]:
return False
if regex_name == "freeform" and len(keyword.split(" ")) == 1:
return False
return True
# Clean the text of punctuation, numbers, and extra spaces, and make lower case.
def clean_text(text: str) -> str:
# remove numbers
text_nonum = re.sub(r"\d+", "", text)
# remove punctuations and convert characters to lower case
text_nopunct = "".join(
[
char.lower()
for char in text_nonum
if char not in string.punctuation or char == "'" or char == ","
]
)
# substitute multiple whitespace with single whitespace
# Also, removes leading and trailing whitespaces
text_no_doublespace = re.sub("\s+", " ", text_nopunct).strip()
return text_no_doublespace
# Apply regular expression matching to a single sentence.
def get_regex_match(sentence: str) -> Tuple[List[str], List[str], List[str]]:
matches = []
affects = []
indicators = []
if "but" in sentence:
sentence = sentence[sentence.index("but") + 4 :]
if "however" in sentence:
sentence = sentence[sentence.index("however") + 8 :]
sentence = clean_text(sentence)
for regex_name, regex_pattern in regex_name_to_pattern.items():
regex = re.compile(regex_pattern)
match = regex.search(sentence.lower())
if match is not None and len(match.groups()) > 0:
# Make sure that the given group is a noun if the regular expression name is 'noun_is'.
if regex_name == "noun_is":
if match.groups()[0] != "":
if nltk.pos_tag([match.groups()[0]])[0][1] != "NN":
if (
match.groups()[1] != ""
and nltk.pos_tag([match.groups()[1]])[0][1] != "NN"
):
continue
elif match.groups()[0] == "":
if (
match.groups()[1] != ""
and nltk.pos_tag([match.groups()[1]])[0][1] != "NN"
):
continue
index = 4 # This is the index of the group defining the start of the indicator phrase
if index > len(match.groups()):
continue
indicator = match.groups()[index : len(match.groups())]
indicator = [
x.rstrip().lstrip() for x in indicator if x != "" and x is not None
]
for negator in negation_words:
if negator in indicator:
joined_indicator = " ".join(indicator)
if (
"can't stop laughing" in joined_indicator
or "cannot stop laughing" in joined_indicator
):
continue
else:
indicator = []
keyword = ""
for i, word in enumerate(indicator):
if keyword in lexicon_map:
print(
is_valid_regex_pattern(
regex_name, lexicon_map[keyword], keyword
)
)
word = word.replace(",", "").rstrip().lstrip()
if word in all_AA_keys:
if word in multi_word_phrases:
two_words = " ".join(indicator[:-1])
if two_words in lexicon_map:
keyword = two_words
three_words = two_words + " " + indicator[-1]
if three_words in lexicon_map:
keyword = three_words
elif word in lexicon_map:
keyword = word
if keyword != "" and is_valid_regex_pattern(
regex_name, lexicon_map[keyword], keyword
):
matches.append(
" ".join(
[
x.rstrip().lstrip()
for x in match.groups()
if x is not None and x != "" and x != " "
]
)
)
affects.append(lexicon_map[keyword])
indicators.append(regex_name + ": " + keyword)
return matches, affects, indicators
return matches, affects, indicators
| care-main | regex_pipeline.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import json
from functools import partial
import requests
import pandas as pd
import os
from typing import Dict, List
import multiprocessing
import utils
import argparse
# Metadata parameters to save when downloading the post metadata
parameters_to_save = [
"id",
"num_comments",
"is_original_content",
"parent_id",
"link_id",
"subreddit",
"permalink",
"subreddit_type",
"category",
"url",
"submission-type",
"lang",
"title",
"selftext",
"header_title",
"submit_text",
"metadata",
]
# This function uses pushshift.io to download all metadata the posts in the CARE database. data_file should point to a csv containing the post ids in the CARE database. The parameter params_to_keep enumerates the parameters to save. Increase cpus_to_use if for more multiprocessing.
def download_all_sub_data(
sub_ids: List[str] = None,
data_file: str = None,
cpus_to_use: int = 2,
n: int = 10,
output_file: str = None,
chunked_folder: str = None,
params_to_keep: List[str] = utils.parameters_to_save,
) -> None:
if data_file is None:
data_file = "./care_db_ids_and_labels.csv"
if sub_ids is None:
assert os.path.exists(data_file)
sub_ids_df = pd.read_csv(data_file, sep="\t")
sub_ids = [x for x in list(sub_ids_df["id"]) if isinstance(x, str)]
pool = multiprocessing.Pool(cpus_to_use)
chunked_list = sorted([sub_ids[i : i + n] for i in range(0, len(sub_ids), n)])
func = partial(
download_sub_data_one_chunk,
output_file_path=chunked_folder,
chunked_list=chunked_list,
params_to_keep=params_to_keep,
)
pool.map(func, range(len(chunked_list)))
aggregate_chunks(output_file=output_file)
pool.close()
pool.join()
# Helper function for download_all_sub_data. By defaults it saves to care/data/chunks/post_id_metadata_{index}.json
def download_sub_data_one_chunk(
index: int,
chunked_list: List[List[str]],
attempt: int = 1,
output_file_path: str = None,
params_to_keep: List[str] = None,
) -> bool:
sub_ids = chunked_list[index]
if output_file_path is None:
output_file_path = f"./data/chunks/post_id_metadata_{index}.json"
if os.path.exists(output_file_path):
return True
if not os.path.exists(os.path.dirname(os.path.abspath(output_file_path))):
os.makedirs(os.path.dirname(os.path.abspath(output_file_path)))
if attempt == 5:
return False
try:
response = requests.get(
"https://api.pushshift.io/reddit/submission/search?ids=" + ",".join(sub_ids)
)
data = response.json()["data"]
if params_to_keep is not None:
filtered_data = []
for entry in data:
new_entry = {}
for param in params_to_keep:
if param in entry:
new_entry[param] = entry[param]
filtered_data.append(new_entry)
data = filtered_data
with open(f"{output_file_path}", "w", encoding="utf8") as fh:
fh.write(json.dumps(data) + "\n")
return True
except:
download_sub_data_one_chunk(
index, chunked_list, attempt=attempt + 1, output_file_path=output_file_path
)
# Aggregates all the downloads into one file. By default, it saves to care/data/post_id_metadata.json
def aggregate_chunks(
output_file_path: str = None, chunked_output_folder: str = None
) -> None:
if output_file_path is None:
output_file_path = f"./data/post_id_metadata.json"
if chunked_output_folder is None:
chunked_output_folder = f"./data/chunks/"
all_data = []
for file in os.listdir(chunked_output_folder):
with open(os.path.join(chunked_output_folder, file), "r") as fin:
data = json.load(fin)
all_data.extend(data)
with open(f"{output_file_path}", "w", encoding="utf8") as fh:
for example in all_data:
fh.write(json.dumps(example) + "\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--cpus",
type=int,
required=False,
default=2,
help=f"Number of cpus to use for multiprocessing.",
)
parser.add_argument(
"--n",
type=int,
required=False,
default=10,
help=f"Number of post ids for each job.",
)
parser.add_argument(
"--data_file", type=str, default=None, help="Path the to csv with post ids."
)
parser.add_argument(
"--output_file", type=str, default=None, help="Write the metadata to this file."
)
parser.add_argument(
"--chunk_dir",
type=str,
default=None,
help="Write the batch metadata to this directory. This can be deleted after aggregation.",
)
args = parser.parse_args()
download_all_sub_data(
data_file=args.data_file,
cpus_to_use=args.cpus,
n=args.n,
output_file=args.output_file,
chunked_folder=args.chunk_dir,
)
| care-main | download_posts.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import regex_pipeline
from typing import Dict, List
from collections import Counter
import pandas as pd
import utils
# Labels posts based on if at least t comments are labeled with the same affect.
def label_posts(
post_id_to_comment_texts: Dict[str, List[str]], t: int = 5
) -> pd.DataFrame:
outputs = []
for post_id, comment_texts in post_id_to_comment_texts.items():
affects = []
for comment_text in comment_texts:
comment_affects = regex_pipeline.get_regex_match_all(comment_text)
affects.extend(comment_affects)
affect_map = dict(Counter(affects))
filtered_affect_map = {}
for k, v in utils.cluster_and_filter(affect_map).items():
if v >= t:
filtered_affect_map[k] = v
if len(filtered_affect_map) > 0:
outputs.append([post_id, filtered_affect_map])
return pd.DataFrame(outputs, columns=["post_id", "affect_map"])
if __name__ == "__main__":
example_dict = {
"1": ["This is so funny!!", "Cannot stop laughing at this.", "So hilarious"]
}
print(label_posts(example_dict, t=3))
| care-main | care_predict.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict
# Clustering into seven affective responses.
CLUSTER_MAP = {
"disgusted": "angered",
"saddened": "saddened",
"amused": "amused",
"angered": "angered",
"disappointed": "saddened",
"interested": "amused",
"impressed": "approving",
"excited": "excited",
"inspired": "approving",
"annoyed": "angered",
"admiring": "approving",
"scared": "scared",
"worried": "scared",
"anxious": "scared",
"adoring": "adoring",
"approving": "approving",
"attracted": "adoring",
"entertained": "amused",
}
CORE_AFFECTS = [
"adoring",
"angered",
"amused",
"approving",
"excited",
"saddened",
"scared",
]
# This function is for clustering according to the hierarchy defined in CLUSTER_MAP and/or filtering for the affects defined in CORE_AFFECTS.
def cluster_and_filter(
affect_map: Dict[str, int], cluster: bool = True, filter: bool = True
) -> Dict[str, int]:
new_affect_map = {}
for orig_k, orig_v in affect_map.items():
if not cluster or orig_k not in CLUSTER_MAP:
k = orig_k
else:
k = CLUSTER_MAP[orig_k]
if filter and k not in CORE_AFFECTS:
continue
if k not in new_affect_map:
new_affect_map[k] = 0
new_affect_map[k] += orig_v
return new_affect_map
| care-main | utils.py |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import List, Tuple, Dict
# Map of keyword in the CARE lexicon to pattern combinations that are prohibited.
affect_to_prohibited_patterns = {
"disgusted": [],
"saddened": ["heshe", "theyyou"],
"amused": ["theyyou", "it"],
"angered": [],
"disappointed": ["heshe", "theyyou"],
"entertained": ["individual", "individual_feel", "we", "we_feel"],
"interested": ["hesheit", "theyyou"],
"impressed": [],
"excited": ["heshe", "theyyou", "some_people"],
"inspired": [],
"annoyed": [],
"admiring": [
"individual_feel",
"we_feel",
"heshe",
"it",
"theyyou",
"this_is",
"hisher_story",
"noun_is",
"this_really",
"these_are",
"these_really",
"feeling",
"what_a",
"some_people",
],
"scared": ["theyyou", "heshe"],
"worried": [],
"anxious": [],
"adoring": [
"individual",
"individual_feel",
"we",
"we_feel",
"this_makes_me",
"these_make_me",
"made_me",
"feeling",
],
"approving": [
"individual_feel",
"we",
"we_feel",
"this_makes_me",
"these_make_me",
"made_me",
"feeling",
],
"awed": ["heshe", "theyyou", "hisher_story", "some_people"],
"attracted": [
"individual",
"individual_feel",
"it",
"we",
"we_feel",
"this_is",
"hisher_story",
"noun_is",
"this_really",
"this_makes_me",
"these_are",
"these_really",
"these_make_me",
"made_me",
"feeling",
"sovery",
"how",
"some_people",
],
}
# Map of each class to keywords. This is the inverse mapping of the CARE lexicon, as defined in the paper.
affect_to_words = {
"disgusted": [
"gross",
"grosses me out",
"disgusting",
"disgusted",
"disgusts me",
"nasty",
"disgust",
"repulsive",
"repulses me",
],
"saddened": [
"depressing",
"that really sucks",
"saddening",
"saddens me",
"sad",
"sorry for your",
"sorry for them",
"sorry to hear",
"heartbreaking",
"heartbroken",
"tragic",
"painful to watch",
"painful to see",
"hard to see",
"hard to watch",
"unfortunate",
"depressed",
"depresses me",
],
"amused": [
"hilarious",
"funny",
"cracks me up",
"laugh",
"never laughed so",
"can't stop laughing",
"cannot stop laughing",
"the funniest thing",
],
"angered": [
"why i hate",
"fake",
"mislead",
"infuriated",
"infuriating",
"infuriates me",
"infuriate",
"fed up",
"furious",
"frustrate me",
"frustrates me",
"frustrated",
"frustrating",
"mad",
"angry",
"angers me",
"pissed me off",
"pisses me off",
"fuck the",
"fuck this",
"fuck them",
],
"disappointed": [
"disappointing",
"disappointed",
"let down",
"a bummer",
"letting down",
],
"entertained": ["entertaining"],
"interested": [
"intriguing",
"intrigues me",
"interesting",
"curious to see",
"talented",
"curious to know",
"intrigued",
],
"impressed": [
"brilliant",
"impressed",
"impressive",
"proud of you",
"impressive",
"impresses me",
],
"excited": [
"happy",
"ecstatic",
"excited",
"stoked",
"exciting",
"jazzed",
"excites me",
"excite",
"looking forward to",
],
"inspired": [
"forward to trying",
"inspired",
"inspiring",
"inspiration",
"inspires me",
"uplift",
"uplifts me",
"inspire",
"creative",
"motivated",
"encouraged",
"motivates me",
"encourages me",
"motivation",
"encouragement",
],
"annoyed": [
"sick of",
"annoy",
"annoys me",
"annoying",
"annoyed",
"annoyance",
"irritates me",
"irritating",
"agitates me",
"agitated",
"agitation",
"tired of this",
"getting ridiculous",
"tired of seeing",
"tired of hearing",
],
"admiring": ["admire you", "of admiration for", "admirable"],
"scared": [
"scare me",
"scared",
"scares me",
"freaks me out",
"freak me out",
"freaky",
"creepy",
],
"worried": ["worried", "worries me", "concerning", "concerns me"],
"anxious": ["anxious", "gives me anxiety", "nervous"],
"adoring": [
"adorable",
"the cutest",
"cute",
"adorbs",
"sweet",
"cutest thing",
],
"approving": [
"love this",
"love that",
"dope",
"fabulous",
"high five",
"excellent",
"amazing",
"damn good",
"fantastic",
"epic",
"wonderful",
"awesome",
"the best",
"the greatest",
],
"awed": [
"magnificent",
"awe inspiring",
"awe-inspiring",
"spectacular",
"breathtaking",
"majestic",
"incredible",
"in awe",
"awe-inspired",
],
"attracted": ["beautiful", "gorgeous", "handsome"],
}
# Creates the word to affect lexicon and collects a list of multi-word indicators.
def get_hardcoded_lexicon() -> Tuple[Dict[str, str], List[str]]:
words_to_affect = {x: k for k, v in affect_to_words.items() for x in v}
multi_word_phrases = [k.split(" ")[0] for k in words_to_affect.keys() if " " in k]
return words_to_affect, multi_word_phrases
| care-main | lexicon_filtering.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions.categorical import Categorical
import torch_ac
# Function from https://github.com/ikostrikov/pytorch-a2c-ppo-acktr/blob/master/model.py
def init_params(m):
classname = m.__class__.__name__
if classname.find("Linear") != -1:
m.weight.data.normal_(0, 1)
m.weight.data *= 1 / torch.sqrt(m.weight.data.pow(2).sum(1, keepdim=True))
if m.bias is not None:
m.bias.data.fill_(0)
class ACModel(nn.Module, torch_ac.RecurrentACModel):
def __init__(self, obs_space, action_space, use_memory=False, use_text=False):
super().__init__()
# Decide which components are enabled
self.use_text = use_text
self.use_memory = use_memory
# Define image embedding
self.image_conv = nn.Sequential(
nn.Conv2d(3, 16, (2, 2)),
nn.ReLU(),
nn.MaxPool2d((2, 2)),
nn.Conv2d(16, 32, (2, 2)),
nn.ReLU(),
nn.Conv2d(32, 64, (2, 2)),
nn.ReLU()
)
n = obs_space["image"][0]
m = obs_space["image"][1]
self.image_embedding_size = ((n-1)//2-2)*((m-1)//2-2)*64
# Define memory
if self.use_memory:
self.memory_rnn = nn.LSTMCell(self.image_embedding_size, self.semi_memory_size)
# Define text embedding
if self.use_text:
self.word_embedding_size = 32
self.word_embedding = nn.Embedding(obs_space["text"], self.word_embedding_size)
self.text_embedding_size = 128
self.text_rnn = nn.GRU(self.word_embedding_size, self.text_embedding_size, batch_first=True)
# Resize image embedding
self.embedding_size = self.semi_memory_size
if self.use_text:
self.embedding_size += self.text_embedding_size
# Define actor's model
self.actor = nn.Sequential(
nn.Linear(self.embedding_size, 64),
nn.Tanh(),
nn.Linear(64, action_space.n)
)
# Define critic's model
self.critic = nn.Sequential(
nn.Linear(self.embedding_size, 64),
nn.Tanh(),
nn.Linear(64, 1)
)
# Initialize parameters correctly
self.apply(init_params)
@property
def memory_size(self):
return 2*self.semi_memory_size
@property
def semi_memory_size(self):
return self.image_embedding_size
def forward(self, obs, memory):
x = obs.image.transpose(1, 3).transpose(2, 3)
x = self.image_conv(x)
x = x.reshape(x.shape[0], -1)
if self.use_memory:
hidden = (memory[:, :self.semi_memory_size], memory[:, self.semi_memory_size:])
hidden = self.memory_rnn(x, hidden)
embedding = hidden[0]
memory = torch.cat(hidden, dim=1)
else:
embedding = x
if self.use_text:
embed_text = self._get_embed_text(obs.text)
embedding = torch.cat((embedding, embed_text), dim=1)
x = self.actor(embedding)
dist = Categorical(logits=F.log_softmax(x, dim=1))
x = self.critic(embedding)
value = x.squeeze(1)
return dist, value, memory
def _get_embed_text(self, text):
_, hidden = self.text_rnn(self.word_embedding(text))
return hidden[-1]
| rl-starter-files-master | model.py |
import gym
import gym_minigrid
def make_env(env_key, seed=None):
env = gym.make(env_key)
env.seed(seed)
return env
| rl-starter-files-master | utils/env.py |
from .agent import *
from .env import *
from .format import *
from .other import *
from .storage import *
| rl-starter-files-master | utils/__init__.py |
import random
import numpy
import torch
import collections
def seed(seed):
random.seed(seed)
numpy.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
def synthesize(array):
d = collections.OrderedDict()
d["mean"] = numpy.mean(array)
d["std"] = numpy.std(array)
d["min"] = numpy.amin(array)
d["max"] = numpy.amax(array)
return d
| rl-starter-files-master | utils/other.py |
import os
import json
import numpy
import re
import torch
import torch_ac
import gym
import utils
def get_obss_preprocessor(obs_space):
# Check if obs_space is an image space
if isinstance(obs_space, gym.spaces.Box):
obs_space = {"image": obs_space.shape}
def preprocess_obss(obss, device=None):
return torch_ac.DictList({
"image": preprocess_images(obss, device=device)
})
# Check if it is a MiniGrid observation space
elif isinstance(obs_space, gym.spaces.Dict) and list(obs_space.spaces.keys()) == ["image"]:
obs_space = {"image": obs_space.spaces["image"].shape, "text": 100}
vocab = Vocabulary(obs_space["text"])
def preprocess_obss(obss, device=None):
return torch_ac.DictList({
"image": preprocess_images([obs["image"] for obs in obss], device=device),
"text": preprocess_texts([obs["mission"] for obs in obss], vocab, device=device)
})
preprocess_obss.vocab = vocab
else:
raise ValueError("Unknown observation space: " + str(obs_space))
return obs_space, preprocess_obss
def preprocess_images(images, device=None):
# Bug of Pytorch: very slow if not first converted to numpy array
images = numpy.array(images)
return torch.tensor(images, device=device, dtype=torch.float)
def preprocess_texts(texts, vocab, device=None):
var_indexed_texts = []
max_text_len = 0
for text in texts:
tokens = re.findall("([a-z]+)", text.lower())
var_indexed_text = numpy.array([vocab[token] for token in tokens])
var_indexed_texts.append(var_indexed_text)
max_text_len = max(len(var_indexed_text), max_text_len)
indexed_texts = numpy.zeros((len(texts), max_text_len))
for i, indexed_text in enumerate(var_indexed_texts):
indexed_texts[i, :len(indexed_text)] = indexed_text
return torch.tensor(indexed_texts, device=device, dtype=torch.long)
class Vocabulary:
"""A mapping from tokens to ids with a capacity of `max_size` words.
It can be saved in a `vocab.json` file."""
def __init__(self, max_size):
self.max_size = max_size
self.vocab = {}
def load_vocab(self, vocab):
self.vocab = vocab
def __getitem__(self, token):
if not token in self.vocab.keys():
if len(self.vocab) >= self.max_size:
raise ValueError("Maximum vocabulary capacity reached")
self.vocab[token] = len(self.vocab) + 1
return self.vocab[token]
| rl-starter-files-master | utils/format.py |
import csv
import os
import torch
import logging
import sys
import utils
def create_folders_if_necessary(path):
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
def get_storage_dir():
if "RL_STORAGE" in os.environ:
return os.environ["RL_STORAGE"]
return "storage"
def get_model_dir(model_name):
return os.path.join(get_storage_dir(), model_name)
def get_status_path(model_dir):
return os.path.join(model_dir, "status.pt")
def get_status(model_dir):
path = get_status_path(model_dir)
return torch.load(path)
def save_status(status, model_dir):
path = get_status_path(model_dir)
utils.create_folders_if_necessary(path)
torch.save(status, path)
def get_vocab(model_dir):
return get_status(model_dir)["vocab"]
def get_model_state(model_dir):
return get_status(model_dir)["model_state"]
def get_txt_logger(model_dir):
path = os.path.join(model_dir, "log.txt")
utils.create_folders_if_necessary(path)
logging.basicConfig(
level=logging.INFO,
format="%(message)s",
handlers=[
logging.FileHandler(filename=path),
logging.StreamHandler(sys.stdout)
]
)
return logging.getLogger()
def get_csv_logger(model_dir):
csv_path = os.path.join(model_dir, "log.csv")
utils.create_folders_if_necessary(csv_path)
csv_file = open(csv_path, "a")
return csv_file, csv.writer(csv_file)
| rl-starter-files-master | utils/storage.py |
import torch
import utils
from model import ACModel
class Agent:
"""An agent.
It is able:
- to choose an action given an observation,
- to analyze the feedback (i.e. reward and done state) of its action."""
def __init__(self, obs_space, action_space, model_dir, device=None, argmax=False, num_envs=1):
obs_space, self.preprocess_obss = utils.get_obss_preprocessor(obs_space)
self.acmodel = ACModel(obs_space, action_space)
self.device = device
self.argmax = argmax
self.num_envs = num_envs
if self.acmodel.recurrent:
self.memories = torch.zeros(self.num_envs, self.acmodel.memory_size)
self.acmodel.load_state_dict(utils.get_model_state(model_dir))
self.acmodel.to(self.device)
self.acmodel.eval()
if hasattr(self.preprocess_obss, "vocab"):
self.preprocess_obss.vocab.load_vocab(utils.get_vocab(model_dir))
def get_actions(self, obss):
preprocessed_obss = self.preprocess_obss(obss, device=self.device)
with torch.no_grad():
if self.acmodel.recurrent:
dist, _, self.memories = self.acmodel(preprocessed_obss, self.memories)
else:
dist, _ = self.acmodel(preprocessed_obss)
if self.argmax:
actions = dist.probs.max(1, keepdim=True)[1]
else:
actions = dist.sample()
return actions.cpu().numpy()
def get_action(self, obs):
return self.get_actions([obs])[0]
def analyze_feedbacks(self, rewards, dones):
if self.acmodel.recurrent:
masks = 1 - torch.tensor(dones, dtype=torch.float).unsqueeze(1)
self.memories *= masks
def analyze_feedback(self, reward, done):
return self.analyze_feedbacks([reward], [done])
| rl-starter-files-master | utils/agent.py |
import argparse
import time
import numpy
import torch
import utils
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("--env", required=True,
help="name of the environment to be run (REQUIRED)")
parser.add_argument("--model", required=True,
help="name of the trained model (REQUIRED)")
parser.add_argument("--seed", type=int, default=0,
help="random seed (default: 0)")
parser.add_argument("--shift", type=int, default=0,
help="number of times the environment is reset at the beginning (default: 0)")
parser.add_argument("--argmax", action="store_true", default=False,
help="select the action with highest probability (default: False)")
parser.add_argument("--pause", type=float, default=0.1,
help="pause duration between two consequent actions of the agent (default: 0.1)")
parser.add_argument("--gif", type=str, default=None,
help="store output as gif with the given filename")
parser.add_argument("--episodes", type=int, default=1000000,
help="number of episodes to visualize")
args = parser.parse_args()
# Set seed for all randomness sources
utils.seed(args.seed)
# Set device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Device: {device}\n")
# Load environment
env = utils.make_env(args.env, args.seed)
for _ in range(args.shift):
env.reset()
print("Environment loaded\n")
# Load agent
model_dir = utils.get_model_dir(args.model)
agent = utils.Agent(env.observation_space, env.action_space, model_dir, device, args.argmax)
print("Agent loaded\n")
# Run the agent
if args.gif:
from array2gif import write_gif
frames = []
# Create a window to view the environment
env.render('human')
for episode in range(args.episodes):
obs = env.reset()
while True:
env.render('human')
if args.gif:
frames.append(numpy.moveaxis(env.render("rgb_array"), 2, 0))
action = agent.get_action(obs)
obs, reward, done, _ = env.step(action)
agent.analyze_feedback(reward, done)
if done or env.window.closed:
break
if env.window.closed:
break
if args.gif:
print("Saving gif... ", end="")
write_gif(numpy.array(frames), args.gif+".gif", fps=1/args.pause)
print("Done.")
| rl-starter-files-master | scripts/visualize.py |
import argparse
import time
import datetime
import torch
import torch_ac
import tensorboardX
import sys
import utils
from model import ACModel
# Parse arguments
parser = argparse.ArgumentParser()
## General parameters
parser.add_argument("--algo", required=True,
help="algorithm to use: a2c | ppo (REQUIRED)")
parser.add_argument("--env", required=True,
help="name of the environment to train on (REQUIRED)")
parser.add_argument("--model", default=None,
help="name of the model (default: {ENV}_{ALGO}_{TIME})")
parser.add_argument("--seed", type=int, default=1,
help="random seed (default: 1)")
parser.add_argument("--log-interval", type=int, default=1,
help="number of updates between two logs (default: 1)")
parser.add_argument("--save-interval", type=int, default=10,
help="number of updates between two saves (default: 10, 0 means no saving)")
parser.add_argument("--procs", type=int, default=16,
help="number of processes (default: 16)")
parser.add_argument("--frames", type=int, default=10**7,
help="number of frames of training (default: 1e7)")
## Parameters for main algorithm
parser.add_argument("--epochs", type=int, default=4,
help="number of epochs for PPO (default: 4)")
parser.add_argument("--batch-size", type=int, default=256,
help="batch size for PPO (default: 256)")
parser.add_argument("--frames-per-proc", type=int, default=None,
help="number of frames per process before update (default: 5 for A2C and 128 for PPO)")
parser.add_argument("--discount", type=float, default=0.99,
help="discount factor (default: 0.99)")
parser.add_argument("--lr", type=float, default=0.001,
help="learning rate (default: 0.001)")
parser.add_argument("--gae-lambda", type=float, default=0.95,
help="lambda coefficient in GAE formula (default: 0.95, 1 means no gae)")
parser.add_argument("--entropy-coef", type=float, default=0.01,
help="entropy term coefficient (default: 0.01)")
parser.add_argument("--value-loss-coef", type=float, default=0.5,
help="value loss term coefficient (default: 0.5)")
parser.add_argument("--max-grad-norm", type=float, default=0.5,
help="maximum norm of gradient (default: 0.5)")
parser.add_argument("--optim-eps", type=float, default=1e-8,
help="Adam and RMSprop optimizer epsilon (default: 1e-8)")
parser.add_argument("--optim-alpha", type=float, default=0.99,
help="RMSprop optimizer alpha (default: 0.99)")
parser.add_argument("--clip-eps", type=float, default=0.2,
help="clipping epsilon for PPO (default: 0.2)")
parser.add_argument("--recurrence", type=int, default=1,
help="number of time-steps gradient is backpropagated (default: 1). If > 1, a LSTM is added to the model to have memory.")
parser.add_argument("--text", action="store_true", default=False,
help="add a GRU to the model to handle text input")
args = parser.parse_args()
args.mem = args.recurrence > 1
# Set run dir
date = datetime.datetime.now().strftime("%y-%m-%d-%H-%M-%S")
default_model_name = f"{args.env}_{args.algo}_seed{args.seed}_{date}"
model_name = args.model or default_model_name
model_dir = utils.get_model_dir(model_name)
# Load loggers and Tensorboard writer
txt_logger = utils.get_txt_logger(model_dir)
csv_file, csv_logger = utils.get_csv_logger(model_dir)
tb_writer = tensorboardX.SummaryWriter(model_dir)
# Log command and all script arguments
txt_logger.info("{}\n".format(" ".join(sys.argv)))
txt_logger.info("{}\n".format(args))
# Set seed for all randomness sources
utils.seed(args.seed)
# Set device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
txt_logger.info(f"Device: {device}\n")
# Load environments
envs = []
for i in range(args.procs):
envs.append(utils.make_env(args.env, args.seed + 10000 * i))
txt_logger.info("Environments loaded\n")
# Load training status
try:
status = utils.get_status(model_dir)
except OSError:
status = {"num_frames": 0, "update": 0}
txt_logger.info("Training status loaded\n")
# Load observations preprocessor
obs_space, preprocess_obss = utils.get_obss_preprocessor(envs[0].observation_space)
if "vocab" in status:
preprocess_obss.vocab.load_vocab(status["vocab"])
txt_logger.info("Observations preprocessor loaded")
# Load model
acmodel = ACModel(obs_space, envs[0].action_space, args.mem, args.text)
if "model_state" in status:
acmodel.load_state_dict(status["model_state"])
acmodel.to(device)
txt_logger.info("Model loaded\n")
txt_logger.info("{}\n".format(acmodel))
# Load algo
if args.algo == "a2c":
algo = torch_ac.A2CAlgo(envs, acmodel, device, args.frames_per_proc, args.discount, args.lr, args.gae_lambda,
args.entropy_coef, args.value_loss_coef, args.max_grad_norm, args.recurrence,
args.optim_alpha, args.optim_eps, preprocess_obss)
elif args.algo == "ppo":
algo = torch_ac.PPOAlgo(envs, acmodel, device, args.frames_per_proc, args.discount, args.lr, args.gae_lambda,
args.entropy_coef, args.value_loss_coef, args.max_grad_norm, args.recurrence,
args.optim_eps, args.clip_eps, args.epochs, args.batch_size, preprocess_obss)
else:
raise ValueError("Incorrect algorithm name: {}".format(args.algo))
if "optimizer_state" in status:
algo.optimizer.load_state_dict(status["optimizer_state"])
txt_logger.info("Optimizer loaded\n")
# Train model
num_frames = status["num_frames"]
update = status["update"]
start_time = time.time()
while num_frames < args.frames:
# Update model parameters
update_start_time = time.time()
exps, logs1 = algo.collect_experiences()
logs2 = algo.update_parameters(exps)
logs = {**logs1, **logs2}
update_end_time = time.time()
num_frames += logs["num_frames"]
update += 1
# Print logs
if update % args.log_interval == 0:
fps = logs["num_frames"]/(update_end_time - update_start_time)
duration = int(time.time() - start_time)
return_per_episode = utils.synthesize(logs["return_per_episode"])
rreturn_per_episode = utils.synthesize(logs["reshaped_return_per_episode"])
num_frames_per_episode = utils.synthesize(logs["num_frames_per_episode"])
header = ["update", "frames", "FPS", "duration"]
data = [update, num_frames, fps, duration]
header += ["rreturn_" + key for key in rreturn_per_episode.keys()]
data += rreturn_per_episode.values()
header += ["num_frames_" + key for key in num_frames_per_episode.keys()]
data += num_frames_per_episode.values()
header += ["entropy", "value", "policy_loss", "value_loss", "grad_norm"]
data += [logs["entropy"], logs["value"], logs["policy_loss"], logs["value_loss"], logs["grad_norm"]]
txt_logger.info(
"U {} | F {:06} | FPS {:04.0f} | D {} | rR:μσmM {:.2f} {:.2f} {:.2f} {:.2f} | F:μσmM {:.1f} {:.1f} {} {} | H {:.3f} | V {:.3f} | pL {:.3f} | vL {:.3f} | ∇ {:.3f}"
.format(*data))
header += ["return_" + key for key in return_per_episode.keys()]
data += return_per_episode.values()
if status["num_frames"] == 0:
csv_logger.writerow(header)
csv_logger.writerow(data)
csv_file.flush()
for field, value in zip(header, data):
tb_writer.add_scalar(field, value, num_frames)
# Save status
if args.save_interval > 0 and update % args.save_interval == 0:
status = {"num_frames": num_frames, "update": update,
"model_state": acmodel.state_dict(), "optimizer_state": algo.optimizer.state_dict()}
if hasattr(preprocess_obss, "vocab"):
status["vocab"] = preprocess_obss.vocab.vocab
utils.save_status(status, model_dir)
txt_logger.info("Status saved")
| rl-starter-files-master | scripts/train.py |
import argparse
import time
import torch
from torch_ac.utils.penv import ParallelEnv
import utils
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("--env", required=True,
help="name of the environment (REQUIRED)")
parser.add_argument("--model", required=True,
help="name of the trained model (REQUIRED)")
parser.add_argument("--episodes", type=int, default=100,
help="number of episodes of evaluation (default: 100)")
parser.add_argument("--seed", type=int, default=0,
help="random seed (default: 0)")
parser.add_argument("--procs", type=int, default=16,
help="number of processes (default: 16)")
parser.add_argument("--argmax", action="store_true", default=False,
help="action with highest probability is selected")
parser.add_argument("--worst-episodes-to-show", type=int, default=10,
help="how many worst episodes to show")
args = parser.parse_args()
# Set seed for all randomness sources
utils.seed(args.seed)
# Set device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Device: {device}\n")
# Load environments
envs = []
for i in range(args.procs):
env = utils.make_env(args.env, args.seed + 10000 * i)
envs.append(env)
env = ParallelEnv(envs)
print("Environments loaded\n")
# Load agent
model_dir = utils.get_model_dir(args.model)
agent = utils.Agent(env.observation_space, env.action_space, model_dir, device, args.argmax, args.procs)
print("Agent loaded\n")
# Initialize logs
logs = {"num_frames_per_episode": [], "return_per_episode": []}
# Run agent
start_time = time.time()
obss = env.reset()
log_done_counter = 0
log_episode_return = torch.zeros(args.procs, device=device)
log_episode_num_frames = torch.zeros(args.procs, device=device)
while log_done_counter < args.episodes:
actions = agent.get_actions(obss)
obss, rewards, dones, _ = env.step(actions)
agent.analyze_feedbacks(rewards, dones)
log_episode_return += torch.tensor(rewards, device=device, dtype=torch.float)
log_episode_num_frames += torch.ones(args.procs, device=device)
for i, done in enumerate(dones):
if done:
log_done_counter += 1
logs["return_per_episode"].append(log_episode_return[i].item())
logs["num_frames_per_episode"].append(log_episode_num_frames[i].item())
mask = 1 - torch.tensor(dones, device=device, dtype=torch.float)
log_episode_return *= mask
log_episode_num_frames *= mask
end_time = time.time()
# Print logs
num_frames = sum(logs["num_frames_per_episode"])
fps = num_frames/(end_time - start_time)
duration = int(end_time - start_time)
return_per_episode = utils.synthesize(logs["return_per_episode"])
num_frames_per_episode = utils.synthesize(logs["num_frames_per_episode"])
print("F {} | FPS {:.0f} | D {} | R:μσmM {:.2f} {:.2f} {:.2f} {:.2f} | F:μσmM {:.1f} {:.1f} {} {}"
.format(num_frames, fps, duration,
*return_per_episode.values(),
*num_frames_per_episode.values()))
# Print worst episodes
n = args.worst_episodes_to_show
if n > 0:
print("\n{} worst episodes:".format(n))
indexes = sorted(range(len(logs["return_per_episode"])), key=lambda k: logs["return_per_episode"][k])
for i in indexes[:n]:
print("- episode {}: R={}, F={}".format(i, logs["return_per_episode"][i], logs["num_frames_per_episode"][i]))
| rl-starter-files-master | scripts/evaluate.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from setuptools import setup, find_packages
with open('README.md', 'r') as f:
long_description = f.read()
with open('requirements.txt', 'r') as f:
requirements = [line.strip() for line in f]
setup(
name='access',
version='0.2',
description='Controllable Sentence Simplification',
long_description=long_description,
long_description_content_type='text/markdown',
author='Louis Martin <[email protected]>',
url='https://github.com/facebookreasearch/access',
packages=find_packages(exclude=['resources']),
install_requires=requirements,
)
| access-main | setup.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from functools import wraps
import multiprocessing
import random
import re
from joblib import Parallel, delayed
import torch
from access.text import to_words
from access.utils.helpers import (open_files, yield_lines, yield_lines_in_parallel, get_temp_filepath, delete_files,
get_temp_filepaths)
def apply_line_method_to_file(line_method, input_filepath):
output_filepath = get_temp_filepath()
with open(input_filepath, 'r') as input_file, open(output_filepath, 'w') as output_file:
for line in input_file:
transformed_line = line_method(line.rstrip('\n'))
if transformed_line is not None:
output_file.write(transformed_line + '\n')
return output_filepath
def replace_lrb_rrb(text):
text = re.sub(r'-lrb-', '(', text, flags=re.IGNORECASE)
text = re.sub(r'-rrb-', ')', text, flags=re.IGNORECASE)
text = re.sub(r'-lsb-', '[', text, flags=re.IGNORECASE)
text = re.sub(r'-rsb-', ']', text, flags=re.IGNORECASE)
text = re.sub(r'-lcb-', '{', text, flags=re.IGNORECASE)
text = re.sub(r'-rcb-', '}', text, flags=re.IGNORECASE)
return text
def replace_lrb_rrb_file(filepath):
return apply_line_method_to_file(replace_lrb_rrb, filepath)
def to_lrb_rrb(text):
# TODO: Very basic
text = re.sub(r'((^| ))\( ', r'\1-lrb- ', text)
text = re.sub(r' \)((^| ))', r' -rrb-\1', text)
return text
def replace_back_quotes(text):
return text.replace('`', "'")
def replace_double_quotes(text):
return text.replace("''", '"')
def normalize_quotes(text):
return replace_double_quotes(replace_back_quotes(text))
def to_lrb_rrb_file(input_filepath):
return apply_line_method_to_file(to_lrb_rrb, input_filepath)
def lowercase_file(filepath):
return apply_line_method_to_file(lambda line: line.lower(), filepath)
def concatenate_files(input_filepaths, output_filepath):
with open(output_filepath, 'w') as output_f:
for input_file in input_filepaths:
with open(input_file, 'r') as input_f:
for line in input_f:
output_f.write(line)
def split_file(input_filepath, output_filepaths, round_robin=False):
if not round_robin:
raise NotImplementedError('Splitting files is only implemented as round robin.')
with open_files(output_filepaths, 'w') as files:
# We write each line to a different file in a round robin fashion
for i, line in enumerate(yield_lines(input_filepath)):
files[i % len(output_filepaths)].write(line + '\n')
def merge_files(input_filepaths, output_filepath, round_robin=False):
if not round_robin:
return concatenate_files(input_filepaths, output_filepath)
with open(output_filepath, 'w') as f:
for lines in yield_lines_in_parallel(input_filepaths, strict=False):
for line in lines:
if line is None:
return
f.write(line + '\n')
def get_real_n_jobs(n_jobs):
n_cpus = multiprocessing.cpu_count()
if n_jobs < 0:
# Adopt same logic as joblib
n_jobs = n_cpus + 1 + n_jobs
if n_jobs > n_cpus:
print('Setting n_jobs={n_jobs} > n_cpus={n_cpus}, setting n_jobs={n_cpus}')
n_jobs = n_cpus
assert 0 < n_jobs <= n_cpus
return n_jobs
def get_parallel_file_pair_preprocessor(file_pair_preprocessor, n_jobs):
if n_jobs == 1:
return file_pair_preprocessor
n_jobs = get_real_n_jobs(n_jobs)
@wraps(file_pair_preprocessor)
def parallel_file_pair_preprocessor(complex_filepath, simple_filepath, output_complex_filepath,
output_simple_filepath):
temp_complex_filepaths = get_temp_filepaths(n_jobs)
temp_simple_filepaths = get_temp_filepaths(n_jobs)
split_file(complex_filepath, temp_complex_filepaths, round_robin=True)
split_file(simple_filepath, temp_simple_filepaths, round_robin=True)
preprocessed_temp_complex_filepaths = get_temp_filepaths(n_jobs)
preprocessed_temp_simple_filepaths = get_temp_filepaths(n_jobs)
tasks = [
delayed(file_pair_preprocessor)(*paths)
for paths in zip(temp_complex_filepaths, temp_simple_filepaths, preprocessed_temp_complex_filepaths,
preprocessed_temp_simple_filepaths)
]
Parallel(n_jobs=n_jobs)(tasks)
merge_files(preprocessed_temp_complex_filepaths, output_complex_filepath, round_robin=True)
merge_files(preprocessed_temp_simple_filepaths, output_simple_filepath, round_robin=True)
delete_files(temp_complex_filepaths)
delete_files(temp_simple_filepaths)
delete_files(preprocessed_temp_complex_filepaths)
delete_files(preprocessed_temp_simple_filepaths)
return parallel_file_pair_preprocessor
def word_shuffle(words, max_swap=3):
noise = torch.rand(len(words)).mul_(max_swap)
permutation = torch.arange(len(words)).float().add_(noise).sort()[1]
return [words[i] for i in permutation]
def word_dropout(words, dropout_prob=0.1):
keep = torch.rand(len(words))
dropped_out_words = [word for i, word in enumerate(words) if keep[i] > dropout_prob]
if len(dropped_out_words) == 0:
return [words[random.randint(0, len(words) - 1)]]
return dropped_out_words
def word_blank(words, blank_prob=0.1):
keep = torch.rand(len(words))
return [word if keep[i] > blank_prob else '<BLANK>' for i, word in enumerate(words)]
def add_noise(sentence):
words = to_words(sentence)
words = word_shuffle(words, max_swap=3)
words = word_dropout(words, dropout_prob=0.1)
words = word_blank(words, blank_prob=0.1)
return ' '.join(words)
| access-main | access/preprocess.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from functools import wraps
from pathlib import Path
import shutil
import tempfile
from imohash import hashfile
from access.fairseq.base import fairseq_generate
from access.preprocessors import ComposedPreprocessor, load_preprocessors
from access.utils.helpers import count_lines
def memoize_simplifier(simplifier):
memo = {}
@wraps(simplifier)
def wrapped(complex_filepath, pred_filepath):
complex_filehash = hashfile(complex_filepath, hexdigest=True)
previous_pred_filepath = memo.get(complex_filehash)
if previous_pred_filepath is not None and Path(previous_pred_filepath).exists():
assert count_lines(complex_filepath) == count_lines(previous_pred_filepath)
# Reuse previous prediction
shutil.copyfile(previous_pred_filepath, pred_filepath)
else:
simplifier(complex_filepath, pred_filepath)
# Save prediction
memo[complex_filehash] = pred_filepath
return wrapped
def get_fairseq_simplifier(exp_dir, reload_preprocessors=False, **kwargs):
'''Method factory'''
@memoize_simplifier
def fairseq_simplifier(complex_filepath, output_pred_filepath):
# Trailing spaces for markdown formatting
print('simplifier_type="fairseq_simplifier" ')
print(f'exp_dir="{exp_dir}" ')
fairseq_generate(complex_filepath, output_pred_filepath, exp_dir, **kwargs)
preprocessors = None
if reload_preprocessors:
preprocessors = load_preprocessors(exp_dir)
if preprocessors is not None:
fairseq_simplifier = get_preprocessed_simplifier(fairseq_simplifier, preprocessors)
return fairseq_simplifier
def get_preprocessed_simplifier(simplifier, preprocessors):
composed_preprocessor = ComposedPreprocessor(preprocessors)
@memoize_simplifier
@wraps(simplifier)
def preprocessed_simplifier(complex_filepath, output_pred_filepath):
print(f'preprocessors={preprocessors}')
preprocessed_complex_filepath = tempfile.mkstemp()[1]
composed_preprocessor.encode_file(complex_filepath, preprocessed_complex_filepath)
preprocessed_output_pred_filepath = tempfile.mkstemp()[1]
simplifier(preprocessed_complex_filepath, preprocessed_output_pred_filepath)
composed_preprocessor.decode_file(preprocessed_output_pred_filepath,
output_pred_filepath,
encoder_filepath=complex_filepath)
preprocessed_simplifier.__name__ = f'{preprocessed_simplifier.__name__}_{composed_preprocessor.get_suffix()}'
return preprocessed_simplifier
| access-main | access/simplifiers.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from functools import lru_cache
import Levenshtein
import numpy as np
from access.resources.paths import FASTTEXT_EMBEDDINGS_PATH
from access.resources.prepare import prepare_fasttext_embeddings
from access.text import (to_words, remove_punctuation_tokens, remove_stopwords, spacy_process)
from access.utils.helpers import yield_lines
@lru_cache(maxsize=1)
def get_word2rank(vocab_size=np.inf):
prepare_fasttext_embeddings()
# TODO: Decrease vocab size or load from smaller file
word2rank = {}
line_generator = yield_lines(FASTTEXT_EMBEDDINGS_PATH)
next(line_generator) # Skip the first line (header)
for i, line in enumerate(line_generator):
if (i + 1) > vocab_size:
break
word = line.split(' ')[0]
word2rank[word] = i
return word2rank
def get_rank(word):
return get_word2rank().get(word, len(get_word2rank()))
def get_log_rank(word):
return np.log(1 + get_rank(word))
def get_lexical_complexity_score(sentence):
words = to_words(remove_stopwords(remove_punctuation_tokens(sentence)))
words = [word for word in words if word in get_word2rank()]
if len(words) == 0:
return np.log(1 + len(get_word2rank())) # TODO: This is completely arbitrary
return np.quantile([get_log_rank(word) for word in words], 0.75)
def get_levenshtein_similarity(complex_sentence, simple_sentence):
return Levenshtein.ratio(complex_sentence, simple_sentence)
def get_dependency_tree_depth(sentence):
def get_subtree_depth(node):
if len(list(node.children)) == 0:
return 0
return 1 + max([get_subtree_depth(child) for child in node.children])
tree_depths = [get_subtree_depth(spacy_sentence.root) for spacy_sentence in spacy_process(sentence).sents]
if len(tree_depths) == 0:
return 0
return max(tree_depths)
| access-main | access/feature_extraction.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from abc import ABC
from functools import wraps, lru_cache
import hashlib
from pathlib import Path
import dill as pickle
import re
import shutil
from nevergrad.instrumentation import var
import numpy as np
import sentencepiece as spm
from access.feature_extraction import (get_lexical_complexity_score, get_levenshtein_similarity,
get_dependency_tree_depth)
from access.resources.paths import VARIOUS_DIR, get_data_filepath
from access.utils.helpers import (write_lines_in_parallel, yield_lines_in_parallel, add_dicts, get_default_args,
get_temp_filepath, safe_division, count_lines)
SPECIAL_TOKEN_REGEX = r'<[a-zA-Z\-_\d\.]+>'
PREPROCESSORS_REGISTRY = {}
def get_preprocessor_by_name(preprocessor_name):
return PREPROCESSORS_REGISTRY[preprocessor_name]
def get_preprocessors(preprocessor_kwargs):
preprocessors = []
for preprocessor_name, kwargs in preprocessor_kwargs.items():
preprocessors.append(get_preprocessor_by_name(preprocessor_name)(**kwargs))
return preprocessors
def extract_special_tokens(sentence):
'''Remove any number of token at the beginning of the sentence'''
match = re.match(fr'(^(?:{SPECIAL_TOKEN_REGEX} *)+) *(.*)$', sentence)
if match is None:
return '', sentence
special_tokens, sentence = match.groups()
return special_tokens.strip(), sentence
def remove_special_tokens(sentence):
return extract_special_tokens(sentence)[1]
def store_args(constructor):
@wraps(constructor)
def wrapped(self, *args, **kwargs):
if not hasattr(self, 'args') or not hasattr(self, 'kwargs'):
# TODO: Default args are not overwritten if provided as args
self.args = args
self.kwargs = add_dicts(get_default_args(constructor), kwargs)
return constructor(self, *args, **kwargs)
return wrapped
def dump_preprocessors(preprocessors, dir_path):
with open(Path(dir_path) / 'preprocessors.pickle', 'wb') as f:
pickle.dump(preprocessors, f)
def load_preprocessors(dir_path):
path = Path(dir_path) / 'preprocessors.pickle'
if not path.exists():
return None
with open(path, 'rb') as f:
return pickle.load(f)
class AbstractPreprocessor(ABC):
def __init_subclass__(cls, **kwargs):
'''Register all children in registry'''
super().__init_subclass__(**kwargs)
PREPROCESSORS_REGISTRY[cls.__name__] = cls
def __repr__(self):
args = getattr(self, 'args', ())
kwargs = getattr(self, 'kwargs', {})
args_repr = [repr(arg) for arg in args]
kwargs_repr = [f'{k}={repr(v)}' for k, v in sorted(kwargs.items(), key=lambda kv: kv[0])]
args_kwargs_str = ', '.join(args_repr + kwargs_repr)
return f'{self.__class__.__name__}({args_kwargs_str})'
def get_hash_string(self):
return self.__class__.__name__
def get_hash(self):
return hashlib.md5(self.get_hash_string().encode()).hexdigest()
def get_nevergrad_variables(self):
return {}
@property
def prefix(self):
return self.__class__.__name__.replace('Preprocessor', '')
def fit(self, complex_filepath, simple_filepath):
pass
def encode_sentence(self, sentence, encoder_sentence=None):
raise NotImplementedError
def decode_sentence(self, sentence, encoder_sentence=None):
raise NotImplementedError
def encode_sentence_pair(self, complex_sentence, simple_sentence):
if complex_sentence is not None:
complex_sentence = self.encode_sentence(complex_sentence)
if simple_sentence is not None:
simple_sentence = self.encode_sentence(simple_sentence)
return complex_sentence, simple_sentence
def encode_file(self, input_filepath, output_filepath, encoder_filepath=None):
if encoder_filepath is None:
# We will use an empty temporary file which will yield None for each line
encoder_filepath = get_temp_filepath(create=True)
with open(output_filepath, 'w') as f:
for input_line, encoder_line in yield_lines_in_parallel([input_filepath, encoder_filepath], strict=False):
f.write(self.encode_sentence(input_line, encoder_line) + '\n')
def decode_file(self, input_filepath, output_filepath, encoder_filepath=None):
if encoder_filepath is None:
# We will use an empty temporary file which will yield None for each line
encoder_filepath = get_temp_filepath(create=True)
with open(output_filepath, 'w') as f:
for encoder_sentence, input_sentence in yield_lines_in_parallel([encoder_filepath, input_filepath],
strict=False):
decoded_sentence = self.decode_sentence(input_sentence, encoder_sentence=encoder_sentence)
f.write(decoded_sentence + '\n')
def encode_file_pair(self, complex_filepath, simple_filepath, output_complex_filepath, output_simple_filepath):
'''Jointly encode a complex file and a simple file (can be aligned or not)'''
with write_lines_in_parallel([output_complex_filepath, output_simple_filepath], strict=False) as output_files:
for complex_line, simple_line in yield_lines_in_parallel([complex_filepath, simple_filepath], strict=False):
output_files.write(self.encode_sentence_pair(complex_line, simple_line))
class ComposedPreprocessor(AbstractPreprocessor):
@store_args
def __init__(self, preprocessors, sort=False):
if preprocessors is None:
preprocessors = []
if sort:
# Make sure preprocessors are always in the same order
preprocessors = sorted(preprocessors, key=lambda preprocessor: preprocessor.__class__.__name__)
self.preprocessors = preprocessors
def get_hash_string(self):
preprocessors_hash_strings = [preprocessor.get_hash_string() for preprocessor in self.preprocessors]
return f'ComposedPreprocessor(preprocessors={preprocessors_hash_strings})'
def get_suffix(self):
return '_'.join([p.prefix.lower() for p in self.preprocessors])
def fit(self, complex_filepath, simple_filepath):
for preprocessor in self.preprocessors:
pass
def encode_sentence(self, sentence, encoder_sentence=None):
for preprocessor in self.preprocessors:
sentence = preprocessor.encode_sentence(sentence, encoder_sentence)
return sentence
def decode_sentence(self, sentence, encoder_sentence=None):
for preprocessor in self.preprocessors:
sentence = preprocessor.decode_sentence(sentence, encoder_sentence)
return sentence
def encode_file(self, input_filepath, output_filepath, encoder_filepath=None):
for preprocessor in self.preprocessors:
intermediary_output_filepath = get_temp_filepath()
preprocessor.encode_file(input_filepath, intermediary_output_filepath, encoder_filepath)
input_filepath = intermediary_output_filepath
shutil.copyfile(input_filepath, output_filepath)
def decode_file(self, input_filepath, output_filepath, encoder_filepath=None):
for preprocessor in self.preprocessors:
intermediary_output_filepath = get_temp_filepath()
preprocessor.decode_file(input_filepath, intermediary_output_filepath, encoder_filepath)
input_filepath = intermediary_output_filepath
shutil.copyfile(input_filepath, output_filepath)
def encode_file_pair(self, complex_filepath, simple_filepath, output_complex_filepath, output_simple_filepath):
for preprocessor in self.preprocessors:
intermediary_output_complex_filepath = get_temp_filepath()
intermediary_output_simple_filepath = get_temp_filepath()
preprocessor.encode_file_pair(complex_filepath, simple_filepath, intermediary_output_complex_filepath,
intermediary_output_simple_filepath)
complex_filepath = intermediary_output_complex_filepath
simple_filepath = intermediary_output_simple_filepath
shutil.copyfile(complex_filepath, output_complex_filepath)
shutil.copyfile(simple_filepath, output_simple_filepath)
def encode_sentence_pair(self, complex_sentence, simple_sentence):
for preprocessor in self.preprocessors:
complex_sentence, simple_sentence = preprocessor.encode_sentence_pair(complex_sentence, simple_sentence)
return complex_sentence, simple_sentence
class FeaturePreprocessor(AbstractPreprocessor):
'''Prepend a computed feature at the beginning of the sentence'''
@store_args
def __init__(self, feature_name, get_feature_value, get_target_feature_value, bucket_size=0.05, noise_std=0):
self.get_feature_value = get_feature_value
self.get_target_feature_value = get_target_feature_value
self.bucket_size = bucket_size
self.noise_std = noise_std
self.feature_name = feature_name.upper()
def get_hash_string(self):
return (f'{self.__class__.__name__}(feature_name={repr(self.feature_name)}, bucket_size={self.bucket_size},'
f'noise_std={self.noise_std})')
def bucketize(self, value):
'''Round value to bucket_size to reduce the number of different values'''
return round(round(value / self.bucket_size) * self.bucket_size, 10)
def add_noise(self, value):
return value + np.random.normal(0, self.noise_std)
def get_feature_token(self, feature_value):
return f'<{self.feature_name}_{feature_value}>'
def encode_sentence(self, sentence, encoder_sentence=None):
desired_feature = self.bucketize(self.get_target_feature_value(remove_special_tokens(sentence)))
return f'{self.get_feature_token(desired_feature)} {sentence}'
def decode_sentence(self, sentence, encoder_sentence=None):
return sentence
def encode_sentence_pair(self, complex_sentence, simple_sentence):
feature = self.bucketize(
self.add_noise(
self.get_feature_value(remove_special_tokens(complex_sentence),
remove_special_tokens(simple_sentence))))
return f'{self.get_feature_token(feature)} {complex_sentence}', simple_sentence
class LevenshteinPreprocessor(FeaturePreprocessor):
@store_args
def __init__(self, target_ratio=0.8, bucket_size=0.05, noise_std=0):
self.target_ratio = target_ratio
super().__init__(self.prefix.upper(), self.get_feature_value, self.get_target_feature_value, bucket_size,
noise_std)
def get_nevergrad_variables(self):
return {'target_ratio': var.OrderedDiscrete(np.arange(0.4, 1 + 1e-6, self.bucket_size))}
def get_feature_value(self, complex_sentence, simple_sentence):
return get_levenshtein_similarity(complex_sentence, simple_sentence)
def get_target_feature_value(self, complex_sentence):
return self.target_ratio
class RatioPreprocessor(FeaturePreprocessor):
@store_args
def __init__(self, feature_extractor, target_ratio=0.8, bucket_size=0.05, noise_std=0):
self.feature_extractor = feature_extractor
self.target_ratio = target_ratio
super().__init__(self.prefix.upper(), self.get_feature_value, self.get_target_feature_value, bucket_size,
noise_std)
def get_nevergrad_variables(self):
return {'target_ratio': var.OrderedDiscrete(np.arange(0.4, 1.4 + 1e-6, self.bucket_size))}
def get_feature_value(self, complex_sentence, simple_sentence):
return min(safe_division(self.feature_extractor(simple_sentence), self.feature_extractor(complex_sentence)), 2)
def get_target_feature_value(self, complex_sentence):
return self.target_ratio
class LengthRatioPreprocessor(RatioPreprocessor):
@store_args
def __init__(self, *args, **kwargs):
super().__init__(len, *args, **kwargs)
class WordRankRatioPreprocessor(RatioPreprocessor):
@store_args
def __init__(self, *args, **kwargs):
super().__init__(get_lexical_complexity_score, *args, **kwargs)
class DependencyTreeDepthRatioPreprocessor(RatioPreprocessor):
@store_args
def __init__(self, *args, **kwargs):
super().__init__(get_dependency_tree_depth, *args, **kwargs)
class SentencePiecePreprocessor(AbstractPreprocessor):
@store_args
def __init__(self, vocab_size=10000, input_filepaths=None):
self.vocab_size = vocab_size
self.sentencepiece_model_path = VARIOUS_DIR / f'sentencepiece_model/sentencepiece_model_{self.vocab_size}.model'
self.input_filepaths = input_filepaths
if self.input_filepaths is None:
self.input_filepaths = [
get_data_filepath('wikilarge', 'train', 'complex'),
get_data_filepath('wikilarge', 'train', 'simple')
]
self.learn_sentencepiece()
@property
@lru_cache(maxsize=1)
def sp(self):
'''
We need to use a property because SentencenPieceProcessor is cannot pickled
> pickle.dumps(spm.SentencePieceProcessor())
----> TypeError: can't pickle SwigPyObject objects
'''
sp = spm.SentencePieceProcessor()
sp.Load(str(self.sentencepiece_model_path))
return sp
def get_hash_string(self):
return f'{self.__class__.__name__}(vocab_size={self.vocab_size})'
def learn_sentencepiece(self):
if self.sentencepiece_model_path.exists():
return
self.sentencepiece_model_path.parent.mkdir(parents=True, exist_ok=True)
sentencepiece_model_prefix = self.sentencepiece_model_path.parent / self.sentencepiece_model_path.stem
args_str = ' '.join([
f'--input={",".join([str(path) for path in self.input_filepaths])}',
f'--model_prefix={sentencepiece_model_prefix}',
f'--vocab_size={self.vocab_size}',
])
max_lines = 10**6
if sum([count_lines(filepath) for filepath in self.input_filepaths]) > max_lines:
args_str += f' --input_sentence_size={max_lines} --shuffle_input_sentence=true'
spm.SentencePieceTrainer.Train(args_str)
def fit(self, complex_filepath, simple_filepath):
# Args are not used
self.learn_sentencepiece()
def encode_sentence(self, sentence, encoder_sentence=None):
# TODO: Do we really need to extract the tokens
special_tokens, sentence = extract_special_tokens(sentence)
encoded_sentence = ' '.join(self.sp.EncodeAsPieces(sentence))
if special_tokens != '':
encoded_sentence = f'{special_tokens} {encoded_sentence}'
return encoded_sentence
def decode_sentence(self, sentence, encoder_sentence=None):
return self.sp.DecodePieces(sentence.split(' '))
| access-main | access/preprocessors.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from functools import lru_cache
import re
from string import punctuation
from nltk.tokenize.nist import NISTTokenizer
from nltk.corpus import stopwords as nltk_stopwords
import spacy
# TODO: #language_specific
stopwords = set(nltk_stopwords.words('english'))
@lru_cache(maxsize=100) # To speed up subsequent calls
def word_tokenize(sentence):
tokenizer = NISTTokenizer()
sentence = ' '.join(tokenizer.tokenize(sentence))
# Rejoin special tokens that where tokenized by error: e.g. "<PERSON_1>" -> "< PERSON _ 1 >"
for match in re.finditer(r'< (?:[A-Z]+ _ )+\d+ >', sentence):
sentence = sentence.replace(match.group(), ''.join(match.group().split()))
return sentence
def to_words(sentence):
return sentence.split()
def remove_punctuation_characters(text):
return ''.join([char for char in text if char not in punctuation])
@lru_cache(maxsize=1000)
def is_punctuation(word):
return remove_punctuation_characters(word) == ''
@lru_cache(maxsize=100)
def remove_punctuation_tokens(text):
return ' '.join([w for w in to_words(text) if not is_punctuation(w)])
def remove_stopwords(text):
return ' '.join([w for w in to_words(text) if w.lower() not in stopwords])
@lru_cache(maxsize=1)
def get_spacy_model():
model = 'en_core_web_sm'
if not spacy.util.is_package(model):
spacy.cli.download(model)
spacy.cli.link(model, model, force=True, model_path=spacy.util.get_package_path(model))
return spacy.load(model) # python -m spacy download en_core_web_sm`
@lru_cache(maxsize=10**6)
def spacy_process(text):
return get_spacy_model()(str(text))
| access-main | access/text.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from itertools import product
from pathlib import Path
REPO_DIR = Path(__file__).resolve().parent.parent.parent
EXP_DIR = REPO_DIR / 'experiments'
RESOURCES_DIR = REPO_DIR / 'resources'
DATASETS_DIR = RESOURCES_DIR / 'datasets'
VARIOUS_DIR = RESOURCES_DIR / 'various'
FASTTEXT_EMBEDDINGS_PATH = VARIOUS_DIR / 'fasttext-vectors/wiki.en.vec'
MODELS_DIR = RESOURCES_DIR / 'models'
BEST_MODEL_DIR = MODELS_DIR / 'best_model'
LANGUAGES = ['complex', 'simple']
PHASES = ['train', 'valid', 'test']
def get_dataset_dir(dataset):
return DATASETS_DIR / dataset
def get_data_filepath(dataset, phase, language, i=None):
suffix = '' # Create suffix e.g. for multiple references
if i is not None:
suffix = f'.{i}'
filename = f'{dataset}.{phase}.{language}{suffix}'
return get_dataset_dir(dataset) / filename
def get_filepaths_dict(dataset):
return {(phase, language): get_data_filepath(dataset, phase, language)
for phase, language in product(PHASES, LANGUAGES)}
| access-main | access/resources/paths.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import hashlib
from pathlib import Path
from access.preprocess import get_parallel_file_pair_preprocessor
from access.preprocessors import dump_preprocessors, load_preprocessors
from access.resources.paths import PHASES, get_dataset_dir, get_data_filepath, get_filepaths_dict
from access.utils.helpers import count_lines, read_lines, create_directory_or_skip
def yield_indexes_of_lines(filepath, lines):
lines = set(lines)
with Path(filepath).open('r') as f:
for idx, line in enumerate(f):
if line.strip('\n') in lines:
yield idx
def sort_files_by_line_count(filepaths):
return sorted(filepaths, key=lambda filepath: count_lines(filepath))
def has_lines_in_common(filepath1, filepath2):
[smallest_filepath, largest_filepath] = sort_files_by_line_count([filepath1, filepath2])
for idx in yield_indexes_of_lines(largest_filepath, read_lines(smallest_filepath)):
return True
return False
def get_preprocessed_dataset_name(dataset, preprocessor):
return '_' + hashlib.md5((dataset + preprocessor.get_hash()).encode()).hexdigest()
def create_preprocessed_dataset_one_preprocessor(dataset, preprocessor, n_jobs):
new_dataset = get_preprocessed_dataset_name(dataset, preprocessor)
with create_directory_or_skip(get_dataset_dir(new_dataset)):
print(f'Creating preprocessed dataset with {preprocessor}: {dataset} -> {new_dataset}')
new_dataset_dir = get_dataset_dir(new_dataset)
filepaths_dict = get_filepaths_dict(dataset)
new_filepaths_dict = get_filepaths_dict(new_dataset)
for phase in PHASES:
if not filepaths_dict[phase, 'complex'].exists() or not filepaths_dict[phase, 'complex'].exists():
continue
parallel_file_pair_preprocessor = get_parallel_file_pair_preprocessor(
preprocessor.encode_file_pair,
n_jobs=n_jobs,
)
parallel_file_pair_preprocessor(filepaths_dict[phase, 'complex'], filepaths_dict[phase, 'simple'],
new_filepaths_dict[phase, 'complex'], new_filepaths_dict[phase, 'simple'])
previous_preprocessors = load_preprocessors(get_dataset_dir(dataset))
if previous_preprocessors is not None:
preprocessors = previous_preprocessors + [preprocessor]
else:
preprocessors = [preprocessor]
dump_preprocessors(preprocessors, new_dataset_dir)
with open(new_dataset_dir / 'original_dataset', 'w') as f:
f.write(dataset + '\n')
return new_dataset
def create_preprocessed_dataset(dataset, preprocessors, n_jobs=1):
for preprocessor in preprocessors:
# Fit preprocessor on input dataset
preprocessor.fit(get_data_filepath(dataset, 'train', 'complex'), get_data_filepath(dataset, 'train', 'simple'))
dataset = create_preprocessed_dataset_one_preprocessor(dataset, preprocessor, n_jobs)
return dataset
| access-main | access/resources/datasets.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import bz2
import gzip
import os
from pathlib import Path
import shutil
import sys
import tarfile
import tempfile
import time
from urllib.request import urlretrieve
import zipfile
import git
from tqdm import tqdm
def reporthook(count, block_size, total_size):
# Download progress bar
global start_time
if count == 0:
start_time = time.time()
return
duration = time.time() - start_time
progress_size_mb = count * block_size / (1024 * 1024)
speed = progress_size_mb / duration
percent = int(count * block_size * 100 / total_size)
msg = f'\r... {percent}% - {int(progress_size_mb)} MB - {speed:.2f} MB/s - {int(duration)}s'
sys.stdout.write(msg)
def download(url, destination_path):
print('Downloading...')
try:
urlretrieve(url, destination_path, reporthook)
sys.stdout.write('\n')
except (Exception, KeyboardInterrupt, SystemExit):
print('Rolling back: remove partially downloaded file')
os.remove(destination_path)
raise
def download_and_extract(url):
tmp_dir = Path(tempfile.mkdtemp())
compressed_filename = url.split('/')[-1]
compressed_filepath = tmp_dir / compressed_filename
download(url, compressed_filepath)
print('Extracting...')
return extract(compressed_filepath, tmp_dir)
def extract(filepath, output_dir):
# Infer extract method based on extension
extensions_to_methods = {
'.tar.gz': untar,
'.tar.bz2': untar,
'.tgz': untar,
'.zip': unzip,
'.gz': ungzip,
'.bz2': unbz2,
}
def get_extension(filename, extensions):
possible_extensions = [ext for ext in extensions if filename.endswith(ext)]
if len(possible_extensions) == 0:
raise Exception(f'File {filename} has an unknown extension')
# Take the longest (.tar.gz should take precedence over .gz)
return max(possible_extensions, key=lambda ext: len(ext))
filename = os.path.basename(filepath)
extension = get_extension(filename, list(extensions_to_methods))
extract_method = extensions_to_methods[extension]
# Extract files in a temporary dir then move the extracted item back to
# the ouput dir in order to get the details of what was extracted
tmp_extract_dir = tempfile.mkdtemp()
# Extract
extract_method(filepath, output_dir=tmp_extract_dir)
extracted_items = os.listdir(tmp_extract_dir)
output_paths = []
for name in extracted_items:
extracted_path = os.path.join(tmp_extract_dir, name)
output_path = os.path.join(output_dir, name)
move_with_overwrite(extracted_path, output_path)
output_paths.append(output_path)
return output_paths
def move_with_overwrite(source_path, target_path):
if os.path.isfile(target_path):
os.remove(target_path)
if os.path.isdir(target_path) and os.path.isdir(source_path):
shutil.rmtree(target_path)
shutil.move(source_path, target_path)
def untar(compressed_path, output_dir):
with tarfile.open(compressed_path) as f:
f.extractall(output_dir)
def unzip(compressed_path, output_dir):
with zipfile.ZipFile(compressed_path, 'r') as f:
f.extractall(output_dir)
def ungzip(compressed_path, output_dir):
filename = os.path.basename(compressed_path)
assert filename.endswith('.gz')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
output_path = os.path.join(output_dir, filename[:-3])
with gzip.open(compressed_path, 'rb') as f_in:
with open(output_path, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
def unbz2(compressed_path, output_dir):
extract_filename = os.path.basename(compressed_path).replace('.bz2', '')
extract_path = os.path.join(output_dir, extract_filename)
with bz2.BZ2File(compressed_path, 'rb') as compressed_file, open(extract_path, 'wb') as extract_file:
for data in tqdm(iter(lambda: compressed_file.read(1024 * 1024), b'')):
extract_file.write(data)
def add_newline_at_end_of_file(file_path):
with open(file_path, 'r') as f:
last_character = f.readlines()[-1][-1]
if last_character == '\n':
return
print(f'Adding newline at the end of {file_path}')
with open(file_path, 'a') as f:
f.write('\n')
def git_clone(url, output_dir, overwrite=True):
if Path(output_dir).exists():
shutil.rmtree(output_dir)
git.Repo.clone_from(url, output_dir)
def replace_lrb_rrb_file(filepath):
tmp_filepath = filepath + '.tmp'
with open(filepath, 'r') as input_file, open(tmp_filepath, 'w') as output_file:
for line in input_file:
output_file.write(line.replace('-lrb-', '(').replace('-rrb-', ')'))
os.rename(tmp_filepath, filepath)
| access-main | access/resources/utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from glob import glob
import os
from pathlib import Path
import shutil
import tempfile
import numpy as np
from access.text import word_tokenize
from access.utils.helpers import (yield_lines_in_parallel, write_lines_in_parallel, create_directory_or_skip,
lock_directory)
from access.preprocess import replace_lrb_rrb, replace_lrb_rrb_file, normalize_quotes
from access.resources.utils import download_and_extract, add_newline_at_end_of_file, git_clone
from access.resources.paths import (FASTTEXT_EMBEDDINGS_PATH, get_dataset_dir, get_data_filepath, PHASES, MODELS_DIR,
BEST_MODEL_DIR)
def prepare_wikilarge():
dataset = 'wikilarge'
with create_directory_or_skip(get_dataset_dir(dataset)):
url = 'https://github.com/louismartin/dress-data/raw/master/data-simplification.tar.bz2'
extracted_path = download_and_extract(url)[0]
# Only rename files and put them in local directory architecture
for phase in PHASES:
for (old_language_name, new_language_name) in [('src', 'complex'), ('dst', 'simple')]:
old_path_glob = os.path.join(extracted_path, dataset, f'*.ori.{phase}.{old_language_name}')
globs = glob(old_path_glob)
assert len(globs) == 1
old_path = globs[0]
new_path = get_data_filepath(dataset, phase, new_language_name)
shutil.copyfile(old_path, new_path)
shutil.move(replace_lrb_rrb_file(new_path), new_path)
add_newline_at_end_of_file(new_path)
return dataset
def prepare_turkcorpus_lower():
dataset = 'turkcorpus_lower'
with create_directory_or_skip(get_dataset_dir(dataset)):
url = 'https://github.com/cocoxu/simplification.git'
output_dir = Path(tempfile.mkdtemp())
git_clone(url, output_dir)
print(output_dir)
print('Processing...')
# Only rename files and put them in local directory architecture
turkcorpus_lower_dir = output_dir / 'data/turkcorpus'
print(turkcorpus_lower_dir)
for (old_phase, new_phase) in [('test', 'test'), ('tune', 'valid')]:
for (old_language_name, new_language_name) in [('norm', 'complex'), ('simp', 'simple')]:
old_path = turkcorpus_lower_dir / f'{old_phase}.8turkers.tok.{old_language_name}'
new_path = get_data_filepath('turkcorpus_lower', new_phase, new_language_name)
shutil.copyfile(old_path, new_path)
add_newline_at_end_of_file(new_path)
shutil.move(replace_lrb_rrb_file(new_path), new_path)
for i in range(8):
old_path = turkcorpus_lower_dir / f'{old_phase}.8turkers.tok.turk.{i}'
new_path = get_data_filepath('turkcorpus_lower', new_phase, 'simple.turk', i=i)
shutil.copyfile(old_path, new_path)
add_newline_at_end_of_file(new_path)
shutil.move(replace_lrb_rrb_file(new_path), new_path)
print('Done.')
return dataset
def prepare_turkcorpus():
dataset = 'turkcorpus'
with create_directory_or_skip(get_dataset_dir(dataset)):
# Import here to avoid circular imports
from access.feature_extraction import get_levenshtein_similarity
prepare_turkcorpus_lower()
url = 'https://github.com/cocoxu/simplification.git'
output_dir = Path(tempfile.mkdtemp())
git_clone(url, output_dir)
print('Processing...')
# Only rename files and put them in local directory architecture
turkcorpus_truecased_dir = output_dir / 'data/turkcorpus/truecased'
for (old_phase, new_phase) in [('test', 'test'), ('tune', 'valid')]:
# (1) read the .tsv for which each line is tab separated:
# `idx, complex_sentence, *turk_sentences = line.split('\t')`
# (2) replace lrb and rrb, tokenize
# (3) Turk sentences are shuffled for each sample so need to realign them with turkcorpus lower
tsv_filepath = turkcorpus_truecased_dir / f'{old_phase}.8turkers.organized.tsv'
output_complex_filepath = get_data_filepath(dataset, new_phase, 'complex')
output_ref_filepaths = [get_data_filepath(dataset, new_phase, 'simple.turk', i) for i in range(8)]
# These files will be used to reorder the shuffled ref sentences
ordered_ref_filepaths = [
get_data_filepath('turkcorpus_lower', new_phase, 'simple.turk', i) for i in range(8)
]
with write_lines_in_parallel([output_complex_filepath] + output_ref_filepaths) as files:
input_filepaths = [tsv_filepath] + ordered_ref_filepaths
for tsv_line, *ordered_ref_sentences in yield_lines_in_parallel(input_filepaths):
sample_id, complex_sentence, *shuffled_ref_sentences = [
word_tokenize(normalize_quotes(replace_lrb_rrb(s))) for s in tsv_line.split('\t')
]
reordered_sentences = []
for ordered_ref_sentence in ordered_ref_sentences:
# Find the position of the ref_sentence in the shuffled sentences
similarities = [
get_levenshtein_similarity(ordered_ref_sentence.replace(' ', ''),
shuffled_ref_sentence.lower().replace(' ', ''))
for shuffled_ref_sentence in shuffled_ref_sentences
]
idx = np.argmax(similarities)
# A few sentences have differing punctuation marks
assert similarities[idx] > 0.98, \
f'{ordered_ref_sentence} != {shuffled_ref_sentences[idx].lower()} {similarities[idx]:.2f}'
reordered_sentences.append(shuffled_ref_sentences.pop(idx))
assert len(shuffled_ref_sentences) == 0
assert len(reordered_sentences) == 8
files.write([complex_sentence] + reordered_sentences)
return dataset
def prepare_fasttext_embeddings():
FASTTEXT_EMBEDDINGS_PATH.parent.mkdir(parents=True, exist_ok=True)
with lock_directory(FASTTEXT_EMBEDDINGS_PATH.parent):
if FASTTEXT_EMBEDDINGS_PATH.exists():
return
url = 'https://dl.fbaipublicfiles.com/fasttext/vectors-crawl/cc.en.300.vec.gz'
extracted_path = download_and_extract(url)[0]
shutil.move(extracted_path, FASTTEXT_EMBEDDINGS_PATH)
def prepare_models():
MODELS_DIR.mkdir(parents=True, exist_ok=True)
if not BEST_MODEL_DIR.exists():
url = 'http://dl.fbaipublicfiles.com/access/best_model.tar.gz'
extracted_path = download_and_extract(url)[0]
shutil.move(extracted_path, BEST_MODEL_DIR)
all_parameters_model_dir = MODELS_DIR / 'all_parameters_model'
if not all_parameters_model_dir.exists():
url = 'http://dl.fbaipublicfiles.com/access/all_parameters_model.tar.gz'
extracted_path = download_and_extract(url)[0]
shutil.move(extracted_path, all_parameters_model_dir)
return BEST_MODEL_DIR
| access-main | access/resources/prepare.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from collections import defaultdict
from functools import lru_cache
import shutil
from nevergrad.instrumentation import Instrumentation
from nevergrad.optimization import optimizerlib
import re
from access.evaluation.general import evaluate_simplifier_on_turkcorpus
from access.evaluation.utils import combine_metrics
from access.fairseq.base import (fairseq_preprocess, fairseq_train, fairseq_generate, get_fairseq_exp_dir,
)
from access.resources.datasets import has_lines_in_common
from access.preprocessors import get_preprocessors, get_preprocessor_by_name
from access.resources.datasets import create_preprocessed_dataset
from access.resources.paths import get_data_filepath, get_dataset_dir
from access.simplifiers import get_fairseq_simplifier, get_preprocessed_simplifier
from access.utils.training import (print_method_name, print_args, print_result, print_running_time,
)
from access.utils.helpers import get_allowed_kwargs
def check_dataset(dataset):
# Sanity check with evaluation dataset
assert not has_lines_in_common(get_data_filepath(dataset, 'train', 'complex'),
get_data_filepath('turkcorpus', 'valid', 'complex'))
assert not has_lines_in_common(get_data_filepath(dataset, 'train', 'complex'),
get_data_filepath('turkcorpus', 'test', 'complex'))
def prepare_exp_dir():
exp_dir = get_fairseq_exp_dir()
if exp_dir.exists():
# Remove exp dir to prevent conflicts with requeue and non deterministic args
# https://github.com/fairinternal/dfoptim/issues/126 #private
shutil.rmtree(exp_dir)
exp_dir.mkdir(parents=True)
return exp_dir
def get_simplifier(exp_dir, preprocessors_kwargs, generate_kwargs):
# TODO: Take kwargs as input and separate between get_preprocessors kwargs and generate_kwargs
preprocessors = get_preprocessors(preprocessors_kwargs)
simplifier = get_fairseq_simplifier(exp_dir, **generate_kwargs)
return get_preprocessed_simplifier(simplifier, preprocessors=preprocessors)
def find_best_parametrization(exp_dir, metrics_coefs, preprocessors_kwargs, parametrization_budget=64):
@lru_cache()
def evaluate_parametrization(**instru_kwargs):
# Note that we use default generate kwargs instead of provided one because they are faster
preprocessors_kwargs = instru_kwargs_to_preprocessors_kwargs(instru_kwargs)
simplifier = get_simplifier(exp_dir, preprocessors_kwargs=preprocessors_kwargs, generate_kwargs={})
scores = evaluate_simplifier_on_turkcorpus(simplifier, phase='valid')
return combine_metrics(scores['BLEU'], scores['SARI'], scores['FKGL'], metrics_coefs)
def preprocessors_kwargs_to_instru_kwargs(preprocessors_kwargs):
instru_kwargs = {}
for preprocessor_name, preprocessor_kwargs in preprocessors_kwargs.items():
assert '_' not in preprocessor_name
preprocessor = get_preprocessor_by_name(preprocessor_name)(**preprocessor_kwargs)
# First we set the values from preprocessors_kwargs which are constant
for kwarg_name, kwarg_value in preprocessor_kwargs.items():
instru_kwargs[f'{preprocessor_name}_{kwarg_name}'] = kwarg_value
# Then we overwrite some of these values with nevergrad variables when necessary
for kwarg_name, kwarg_value in preprocessor.get_nevergrad_variables().items():
instru_kwargs[f'{preprocessor_name}_{kwarg_name}'] = kwarg_value
return instru_kwargs
def instru_kwargs_to_preprocessors_kwargs(instru_kwargs):
preprocessors_kwargs = defaultdict(dict)
for key, value in instru_kwargs.items():
preprocessor_name, kwarg_name = re.match(r'([a-zA-Z0-9]+)_([a-z0-9_]+)', key).groups()
preprocessors_kwargs[preprocessor_name][kwarg_name] = value
return dict(preprocessors_kwargs)
instru_kwargs = preprocessors_kwargs_to_instru_kwargs(preprocessors_kwargs)
instru = Instrumentation(**instru_kwargs)
if instru.dimension == 0:
return preprocessors_kwargs
# No need to search a lot when there is only a few parameters
parametrization_budget = min(32**instru.dimension, parametrization_budget)
optimizer = optimizerlib.ScrHammersleySearch(instrumentation=instru, budget=parametrization_budget, num_workers=1)
recommendation = optimizer.optimize(evaluate_parametrization, verbosity=0)
return instru_kwargs_to_preprocessors_kwargs(recommendation.kwargs)
def check_and_resolve_args(kwargs):
if kwargs.get('diverse_beam_groups_ratio', None) is not None:
diverse_beam_groups = max(int(kwargs['beam'] * kwargs['diverse_beam_groups_ratio']), 1)
print(f'diverse_beam_groups={diverse_beam_groups}')
assert kwargs['beam'] % diverse_beam_groups == 0
kwargs['diverse_beam_groups'] = diverse_beam_groups
else:
diverse_beam_groups = None
return kwargs
@print_method_name
@print_args
@print_result
@print_running_time
def fairseq_train_and_evaluate(dataset, metrics_coefs=[1, 1, 1], parametrization_budget=64, **kwargs):
check_dataset(dataset)
kwargs = check_and_resolve_args(kwargs)
exp_dir = prepare_exp_dir()
preprocessors_kwargs = kwargs.get('preprocessors_kwargs', {})
preprocessors = get_preprocessors(preprocessors_kwargs)
if len(preprocessors) > 0:
dataset = create_preprocessed_dataset(dataset, preprocessors, n_jobs=1)
shutil.copy(get_dataset_dir(dataset) / 'preprocessors.pickle', exp_dir)
preprocessed_dir = fairseq_preprocess(dataset)
train_kwargs = get_allowed_kwargs(fairseq_train, preprocessed_dir, exp_dir, **kwargs)
fairseq_train(preprocessed_dir, exp_dir=exp_dir, **train_kwargs)
# Evaluation
generate_kwargs = get_allowed_kwargs(fairseq_generate, 'complex_filepath', 'pred_filepath', exp_dir, **kwargs)
recommended_preprocessors_kwargs = find_best_parametrization(exp_dir, metrics_coefs, preprocessors_kwargs,
parametrization_budget)
print(f'recommended_preprocessors_kwargs={recommended_preprocessors_kwargs}')
simplifier = get_simplifier(exp_dir, recommended_preprocessors_kwargs, generate_kwargs)
scores = evaluate_simplifier_on_turkcorpus(simplifier, phase='valid')
print(f'scores={scores}')
score = combine_metrics(scores['BLEU'], scores['SARI'], scores['FKGL'], metrics_coefs)
return score
| access-main | access/fairseq/main.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from collections import defaultdict
import os
from pathlib import Path
import random
import re
import shutil
import tempfile
import time
from fairseq import options
from fairseq_cli import preprocess, train, generate
from access.resources.paths import get_dataset_dir, EXP_DIR
from access.utils.helpers import (log_stdout, lock_directory, create_directory_or_skip, yield_lines,
write_lines)
def get_fairseq_exp_dir(job_id=None):
if job_id is not None:
dir_name = f'slurmjob_{job_id}'
else:
dir_name = f'local_{int(time.time() * 1000)}'
return Path(EXP_DIR) / f'fairseq' / dir_name
def fairseq_preprocess(dataset):
dataset_dir = get_dataset_dir(dataset)
with lock_directory(dataset_dir):
preprocessed_dir = dataset_dir / 'fairseq_preprocessed'
with create_directory_or_skip(preprocessed_dir):
preprocessing_parser = options.get_preprocessing_parser()
preprocess_args = preprocessing_parser.parse_args([
'--source-lang',
'complex',
'--target-lang',
'simple',
'--trainpref',
os.path.join(dataset_dir, f'{dataset}.train'),
'--validpref',
os.path.join(dataset_dir, f'{dataset}.valid'),
'--testpref',
os.path.join(dataset_dir, f'{dataset}.test'),
'--destdir',
str(preprocessed_dir),
'--output-format',
'raw',
])
preprocess.main(preprocess_args)
return preprocessed_dir
def fairseq_train(
preprocessed_dir,
exp_dir,
ngpus=None,
max_tokens=2000,
arch='fconv_iwslt_de_en',
pretrained_emb_path=None,
embeddings_dim=None,
# Transformer (decoder is the same as encoder for now)
encoder_embed_dim=512,
encoder_layers=6,
encoder_attention_heads=8,
# encoder_decoder_dim_ratio=1,
# share_embeddings=True,
max_epoch=50,
warmup_updates=None,
lr=0.1,
min_lr=1e-9,
dropout=0.2,
label_smoothing=0.1,
lr_scheduler='fixed',
weight_decay=0.0001,
criterion='label_smoothed_cross_entropy',
optimizer='nag',
validations_before_sari_early_stopping=10,
fp16=False):
exp_dir = Path(exp_dir)
with log_stdout(exp_dir / 'fairseq_train.stdout'):
preprocessed_dir = Path(preprocessed_dir)
exp_dir.mkdir(exist_ok=True, parents=True)
# Copy dictionaries to exp_dir for generation
shutil.copy(preprocessed_dir / 'dict.complex.txt', exp_dir)
shutil.copy(preprocessed_dir / 'dict.simple.txt', exp_dir)
train_parser = options.get_training_parser()
# if share_embeddings:
# assert encoder_decoder_dim_ratio == 1
args = [
'--task',
'translation',
preprocessed_dir,
'--raw-text',
'--source-lang',
'complex',
'--target-lang',
'simple',
'--save-dir',
os.path.join(exp_dir, 'checkpoints'),
'--clip-norm',
0.1,
'--criterion',
criterion,
'--no-epoch-checkpoints',
'--save-interval-updates',
5000, # Validate every n updates
'--validations-before-sari-early-stopping',
validations_before_sari_early_stopping,
'--arch',
arch,
# '--decoder-out-embed-dim', int(embeddings_dim * encoder_decoder_dim_ratio), # Output dim of decoder
'--max-tokens',
max_tokens,
'--max-epoch',
max_epoch,
'--lr-scheduler',
lr_scheduler,
'--dropout',
dropout,
'--lr',
lr,
'--lr-shrink',
0.5, # For reduce lr on plateau scheduler
'--min-lr',
min_lr,
'--weight-decay',
weight_decay,
'--optimizer',
optimizer,
'--label-smoothing',
label_smoothing,
'--seed',
random.randint(1, 1000),
# '--force-anneal', '200',
# '--distributed-world-size', '1',
]
if arch == 'transformer':
args.extend([
'--encoder-embed-dim',
encoder_embed_dim,
'--encoder-ffn-embed-dim',
4 * encoder_embed_dim,
'--encoder-layers',
encoder_layers,
'--encoder-attention-heads',
encoder_attention_heads,
'--decoder-layers',
encoder_layers,
'--decoder-attention-heads',
encoder_attention_heads,
])
if pretrained_emb_path is not None:
args.extend(['--encoder-embed-path', pretrained_emb_path if pretrained_emb_path is not None else ''])
args.extend(['--decoder-embed-path', pretrained_emb_path if pretrained_emb_path is not None else ''])
if embeddings_dim is not None:
args.extend(['--encoder-embed-dim', embeddings_dim]) # Input and output dim of encoder
args.extend(['--decoder-embed-dim', embeddings_dim]) # Input dim of decoder
if ngpus is not None:
args.extend(['--distributed-world-size', ngpus])
# if share_embeddings:
# args.append('--share-input-output-embed')
if fp16:
args.append('--fp16')
if warmup_updates is not None:
args.extend(['--warmup-updates', warmup_updates])
args = [str(arg) for arg in args]
train_args = options.parse_args_and_arch(train_parser, args)
train.main(train_args)
def _fairseq_generate(complex_filepath,
output_pred_filepath,
checkpoint_paths,
complex_dictionary_path,
simple_dictionary_path,
beam=5,
hypothesis_num=1,
lenpen=1.,
diverse_beam_groups=None,
diverse_beam_strength=0.5,
sampling=False,
batch_size=128):
# exp_dir must contain checkpoints/checkpoint_best.pt, and dict.{complex,simple}.txt
# First copy input complex file to exp_dir and create dummy simple file
tmp_dir = Path(tempfile.mkdtemp())
new_complex_filepath = tmp_dir / 'tmp.complex-simple.complex'
dummy_simple_filepath = tmp_dir / 'tmp.complex-simple.simple'
shutil.copy(complex_filepath, new_complex_filepath)
shutil.copy(complex_filepath, dummy_simple_filepath)
shutil.copy(complex_dictionary_path, tmp_dir / 'dict.complex.txt')
shutil.copy(simple_dictionary_path, tmp_dir / 'dict.simple.txt')
generate_parser = options.get_generation_parser()
args = [
tmp_dir,
'--path',
':'.join([str(path) for path in checkpoint_paths]),
'--beam',
beam,
'--nbest',
hypothesis_num,
'--lenpen',
lenpen,
'--diverse-beam-groups',
diverse_beam_groups if diverse_beam_groups is not None else -1,
'--diverse-beam-strength',
diverse_beam_strength,
'--batch-size',
batch_size,
'--raw-text',
'--print-alignment',
'--gen-subset',
'tmp',
# We don't want to reload pretrained embeddings
'--model-overrides',
{
'encoder_embed_path': None,
'decoder_embed_path': None
},
]
if sampling:
args.extend([
'--sampling',
'--sampling-topk',
10,
])
args = [str(arg) for arg in args]
generate_args = options.parse_args_and_arch(generate_parser, args)
out_filepath = tmp_dir / 'generation.out'
with log_stdout(out_filepath, mute_stdout=True):
# evaluate model in batch mode
generate.main(generate_args)
# Retrieve translations
def parse_all_hypotheses(out_filepath):
hypotheses_dict = defaultdict(list)
for line in yield_lines(out_filepath):
match = re.match(r'^H-(\d+)\t-?\d+\.\d+\t(.*)$', line)
if match:
sample_id, hypothesis = match.groups()
hypotheses_dict[int(sample_id)].append(hypothesis)
# Sort in original order
return [hypotheses_dict[i] for i in range(len(hypotheses_dict))]
all_hypotheses = parse_all_hypotheses(out_filepath)
predictions = [hypotheses[hypothesis_num - 1] for hypotheses in all_hypotheses]
write_lines(predictions, output_pred_filepath)
os.remove(dummy_simple_filepath)
os.remove(new_complex_filepath)
def fairseq_generate(complex_filepath,
output_pred_filepath,
exp_dir,
beam=1,
hypothesis_num=1,
lenpen=1.,
diverse_beam_groups=None,
diverse_beam_strength=0.5,
sampling=False,
batch_size=128):
exp_dir = Path(exp_dir)
checkpoint_path = exp_dir / 'checkpoints/checkpoint_best.pt'
assert checkpoint_path.exists(), f'Generation failed, no checkpoint at {checkpoint_path}'
complex_dictionary_path = exp_dir / 'dict.complex.txt'
simple_dictionary_path = exp_dir / 'dict.simple.txt'
_fairseq_generate(complex_filepath,
output_pred_filepath, [checkpoint_path],
complex_dictionary_path=complex_dictionary_path,
simple_dictionary_path=simple_dictionary_path,
beam=beam,
hypothesis_num=hypothesis_num,
lenpen=lenpen,
diverse_beam_groups=diverse_beam_groups,
diverse_beam_strength=diverse_beam_strength,
sampling=sampling,
batch_size=batch_size)
| access-main | access/fairseq/base.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from contextlib import contextmanager, AbstractContextManager
from fcntl import flock, LOCK_EX, LOCK_UN
import inspect
import io
from itertools import zip_longest
from pathlib import Path
import shutil
import sys
import tempfile
import numpy as np
@contextmanager
def open_files(filepaths, mode='r'):
files = []
try:
files = [Path(filepath).open(mode) for filepath in filepaths]
yield files
finally:
[f.close() for f in files]
def yield_lines_in_parallel(filepaths, strip=True, strict=True, n_lines=float('inf')):
assert type(filepaths) == list
with open_files(filepaths) as files:
for i, parallel_lines in enumerate(zip_longest(*files)):
if i >= n_lines:
break
if None in parallel_lines:
assert not strict, f'Files don\'t have the same number of lines: {filepaths}, use strict=False'
if strip:
parallel_lines = [l.rstrip('\n') if l is not None else None for l in parallel_lines]
yield parallel_lines
class FilesWrapper:
'''Write to multiple open files at the same time'''
def __init__(self, files, strict=True):
self.files = files
self.strict = strict # Whether to raise an exception when a line is None
def write(self, lines):
assert len(lines) == len(self.files)
for line, f in zip(lines, self.files):
if line is None:
assert not self.strict
continue
f.write(line.rstrip('\n') + '\n')
@contextmanager
def write_lines_in_parallel(filepaths, strict=True):
with open_files(filepaths, 'w') as files:
yield FilesWrapper(files, strict=strict)
def write_lines(lines, filepath):
filepath = Path(filepath)
filepath.parent.mkdir(parents=True, exist_ok=True)
with filepath.open('w') as f:
for line in lines:
f.write(line + '\n')
def yield_lines(filepath, n_lines=float('inf'), prop=1):
if prop < 1:
assert n_lines == float('inf')
n_lines = int(prop * count_lines(filepath))
with open(filepath, 'r') as f:
for i, l in enumerate(f):
if i >= n_lines:
break
yield l.rstrip('\n')
def read_lines(filepath, n_lines=float('inf'), prop=1):
return list(yield_lines(filepath, n_lines, prop))
def count_lines(filepath):
n_lines = 0
with Path(filepath).open() as f:
for l in f:
n_lines += 1
return n_lines
@contextmanager
def open_with_lock(filepath, mode):
with open(filepath, mode) as f:
flock(f, LOCK_EX)
yield f
flock(f, LOCK_UN)
def get_lockfile_path(path):
path = Path(path)
if path.is_dir():
return path / '.lockfile'
if path.is_file():
return path.parent / f'.{path.name}.lockfile'
@contextmanager
def lock_directory(dir_path):
# TODO: Locking a directory should lock all files in that directory
# Right now if we lock foo/, someone else can lock foo/bar.txt
# TODO: Nested with lock_directory() should not be blocking
assert Path(dir_path).exists(), f'Directory does not exists: {dir_path}'
lockfile_path = get_lockfile_path(dir_path)
with open_with_lock(lockfile_path, 'w'):
yield
def safe_division(a, b):
if b == 0:
return 0
return a / b
def harmonic_mean(values, coefs=None):
if 0 in values:
return 0
values = np.array(values)
if coefs is None:
coefs = np.ones(values.shape)
values = np.array(values)
coefs = np.array(coefs)
return np.sum(coefs) / np.dot(coefs, 1 / values)
@contextmanager
def mute(mute_stdout=True, mute_stderr=True):
save_stdout = sys.stdout
save_stderr = sys.stderr
if mute_stdout:
sys.stdout = io.StringIO()
if mute_stderr:
sys.stderr = io.StringIO()
try:
yield
finally:
sys.stdout = save_stdout
sys.stderr = save_stderr
@contextmanager
def log_stdout(filepath, mute_stdout=False):
'''Context manager to write both to stdout and to a file'''
class MultipleStreamsWriter:
def __init__(self, streams):
self.streams = streams
def write(self, message):
for stream in self.streams:
stream.write(message)
def flush(self):
for stream in self.streams:
stream.flush()
save_stdout = sys.stdout
log_file = open(filepath, 'w')
if mute_stdout:
sys.stdout = MultipleStreamsWriter([log_file]) # Write to file only
else:
sys.stdout = MultipleStreamsWriter([save_stdout, log_file]) # Write to both stdout and file
try:
yield
finally:
sys.stdout = save_stdout
log_file.close()
def add_dicts(*dicts):
return {k: v for dic in dicts for k, v in dic.items()}
def get_default_args(func):
signature = inspect.signature(func)
return {k: v.default for k, v in signature.parameters.items() if v.default is not inspect.Parameter.empty}
def get_allowed_kwargs(func, *args, **kwargs):
expected_args = inspect.getargspec(func).args
allowed_kwargs = expected_args[len(args):]
return {k: v for k, v in kwargs.items() if k in allowed_kwargs}
class SkipWithBlock(Exception):
pass
class create_directory_or_skip(AbstractContextManager):
'''Context manager for creating a new directory (with rollback and skipping with block if exists)
In order to skip the execution of the with block if the dataset already exists, this context manager uses deep
magic from https://stackoverflow.com/questions/12594148/skipping-execution-of-with-block
'''
def __init__(self, dir_path, overwrite=False):
self.dir_path = Path(dir_path)
self.overwrite = overwrite
def __enter__(self):
if self.dir_path.exists():
self.directory_lock = lock_directory(self.dir_path)
self.directory_lock.__enter__()
files_in_directory = list(self.dir_path.iterdir())
if set(files_in_directory) in [set([]), set([self.dir_path / '.lockfile'])]:
# TODO: Quick hack to remove empty directories
self.directory_lock.__exit__(None, None, None)
print(f'Removing empty directory {self.dir_path}')
shutil.rmtree(self.dir_path)
else:
# Deep magic hack to skip the execution of the code inside the with block
# We set the trace to a dummy function
sys.settrace(lambda *args, **keys: None)
# Get the calling frame (sys._getframe(0) is the current frame)
frame = sys._getframe(1)
# Set the calling frame's trace to the one that raises the special exception
frame.f_trace = self.trace
return
print(f'Creating {self.dir_path}...')
self.dir_path.mkdir(parents=True, exist_ok=True)
self.directory_lock = lock_directory(self.dir_path)
self.directory_lock.__enter__()
def trace(self, frame, event, arg):
# This method is called when a new local scope is entered, i.e. right when the code in the with block begins
# The exception will therefore be caught by the __exit__()
raise SkipWithBlock()
def __exit__(self, type, value, traceback):
self.directory_lock.__exit__(type, value, traceback)
if type is not None:
if issubclass(type, SkipWithBlock):
return True # Suppress special SkipWithBlock exception
if issubclass(type, BaseException):
# Rollback
print(f'Error: Rolling back creation of directory {self.dir_path}')
shutil.rmtree(self.dir_path)
return False # Reraise the exception
def get_temp_filepath(create=False):
temp_filepath = Path(tempfile.mkstemp()[1])
if not create:
temp_filepath.unlink()
return temp_filepath
def get_temp_filepaths(n_filepaths, create=False):
return [get_temp_filepath(create=create) for _ in range(n_filepaths)]
def delete_files(filepaths):
for filepath in filepaths:
filepath = Path(filepath)
assert filepath.is_file()
filepath.unlink()
| access-main | access/utils/helpers.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# TODO: Move to utils/training.py
from functools import wraps
import time
def print_method_name(func):
'''Decorator to print method name for logging purposes'''
@wraps(func) # To preserve the name and path for pickling purposes
def wrapped_func(*args, **kwargs):
print(f"method_name='{func.__name__}'")
return func(*args, **kwargs)
return wrapped_func
def print_args(func):
'''Decorator to print arguments of method for logging purposes'''
@wraps(func) # To preserve the name and path for pickling purposes
def wrapped_func(*args, **kwargs):
print(f'args={args}')
print(f'kwargs={kwargs}')
return func(*args, **kwargs)
return wrapped_func
def print_result(func):
'''Decorator to print result of method for logging purposes'''
@wraps(func) # To preserve the name and path for pickling purposes
def wrapped_func(*args, **kwargs):
result = func(*args, **kwargs)
print(f'result={result}')
return result
return wrapped_func
def print_running_time(func):
'''Decorator to print running time of method for logging purposes'''
@wraps(func) # To preserve the name and path for pickling purposes
def wrapped_func(*args, **kwargs):
start_time = time.time()
result = func(*args, **kwargs)
print(f'running_time={time.time() - start_time}')
return result
return wrapped_func
| access-main | access/utils/training.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from access.utils.helpers import harmonic_mean
# Tranforms take a value and cast it to a score between 0 and 1, the higher the better
def bleu_transform(bleu):
min_bleu = 0
max_bleu = 100
bleu = max(bleu, min_bleu)
bleu = min(bleu, max_bleu)
return (bleu - min_bleu) / (max_bleu - min_bleu)
def sari_transform(sari):
min_sari = 0
max_sari = 60
sari = max(sari, min_sari)
sari = min(sari, max_sari)
return (sari - min_sari) / (max_sari - min_sari)
def fkgl_transform(fkgl):
min_fkgl = 0
max_fkgl = 20
fkgl = max(fkgl, min_fkgl)
fkgl = min(fkgl, max_fkgl)
return 1 - (fkgl - min_fkgl) / (max_fkgl - min_fkgl)
def combine_metrics(bleu, sari, fkgl, coefs):
# Combine into a score between 0 and 1, LOWER the better
assert len(coefs) == 3
return 1 - harmonic_mean([bleu_transform(bleu), sari_transform(sari), fkgl_transform(fkgl)], coefs)
| access-main | access/evaluation/utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from easse.cli import evaluate_system_output
from access.preprocess import lowercase_file, to_lrb_rrb_file
from access.resources.paths import get_data_filepath
from access.utils.helpers import mute, get_temp_filepath
'''A simplifier is a method with signature: simplifier(complex_filepath, output_pred_filepath)'''
def get_prediction_on_turkcorpus(simplifier, phase):
source_filepath = get_data_filepath('turkcorpus', phase, 'complex')
pred_filepath = get_temp_filepath()
with mute():
simplifier(source_filepath, pred_filepath)
return pred_filepath
def evaluate_simplifier_on_turkcorpus(simplifier, phase):
pred_filepath = get_prediction_on_turkcorpus(simplifier, phase)
pred_filepath = lowercase_file(pred_filepath)
pred_filepath = to_lrb_rrb_file(pred_filepath)
return evaluate_system_output(f'turkcorpus_{phase}_legacy',
sys_sents_path=pred_filepath,
metrics=['bleu', 'sari_legacy', 'fkgl'],
quality_estimation=True)
| access-main | access/evaluation/general.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import fileinput
from access.preprocessors import get_preprocessors
from access.resources.prepare import prepare_models
from access.simplifiers import get_fairseq_simplifier, get_preprocessed_simplifier
from access.text import word_tokenize
from access.utils.helpers import yield_lines, write_lines, get_temp_filepath, mute
if __name__ == '__main__':
# Usage: python generate.py < my_file.complex
# Read from stdin
source_filepath = get_temp_filepath()
write_lines([word_tokenize(line) for line in fileinput.input()], source_filepath)
# Load best model
best_model_dir = prepare_models()
recommended_preprocessors_kwargs = {
'LengthRatioPreprocessor': {'target_ratio': 0.95},
'LevenshteinPreprocessor': {'target_ratio': 0.75},
'WordRankRatioPreprocessor': {'target_ratio': 0.75},
'SentencePiecePreprocessor': {'vocab_size': 10000},
}
preprocessors = get_preprocessors(recommended_preprocessors_kwargs)
simplifier = get_fairseq_simplifier(best_model_dir, beam=8)
simplifier = get_preprocessed_simplifier(simplifier, preprocessors=preprocessors)
# Simplify
pred_filepath = get_temp_filepath()
with mute():
simplifier(source_filepath, pred_filepath)
for line in yield_lines(pred_filepath):
print(line)
| access-main | scripts/generate.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from access.fairseq.main import fairseq_train_and_evaluate
from access.resources.prepare import prepare_wikilarge, prepare_turkcorpus
if __name__ == '__main__':
print('Training a model from scratch')
prepare_wikilarge()
prepare_turkcorpus()
kwargs = {
'arch': 'transformer',
'warmup_updates': 4000,
'parametrization_budget': 256,
'beam': 8,
'dataset': 'wikilarge',
'dropout': 0.2,
'fp16': False,
'label_smoothing': 0.54,
'lr': 0.00011,
'lr_scheduler': 'fixed',
'max_epoch': 100,
'max_tokens': 5000,
'metrics_coefs': [0, 1, 0],
'optimizer': 'adam',
'preprocessors_kwargs': {
'LengthRatioPreprocessor': {
'target_ratio': 0.8 # Default initial value
},
'LevenshteinPreprocessor': {
'target_ratio': 0.8 # Default initial value
},
'WordRankRatioPreprocessor': {
'target_ratio': 0.8 # Default initial value
},
'DependencyTreeDepthRatioPreprocessor': {
'target_ratio': 0.8 # Default initial value
},
'SentencePiecePreprocessor': {
'vocab_size': 10000
}
}
}
fairseq_train_and_evaluate(**kwargs)
| access-main | scripts/train.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from access.evaluation.general import evaluate_simplifier_on_turkcorpus
from access.preprocessors import get_preprocessors
from access.resources.prepare import prepare_turkcorpus, prepare_models
from access.simplifiers import get_fairseq_simplifier, get_preprocessed_simplifier
if __name__ == '__main__':
print('Evaluating pretrained model')
prepare_turkcorpus()
best_model_dir = prepare_models()
recommended_preprocessors_kwargs = {
'LengthRatioPreprocessor': {'target_ratio': 0.95},
'LevenshteinPreprocessor': {'target_ratio': 0.75},
'WordRankRatioPreprocessor': {'target_ratio': 0.75},
'SentencePiecePreprocessor': {'vocab_size': 10000},
}
preprocessors = get_preprocessors(recommended_preprocessors_kwargs)
simplifier = get_fairseq_simplifier(best_model_dir, beam=8)
simplifier = get_preprocessed_simplifier(simplifier, preprocessors=preprocessors)
print(evaluate_simplifier_on_turkcorpus(simplifier, phase='test'))
| access-main | scripts/evaluate.py |
#!/usr/bin/env python3
# Copyright (c) 2021-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import click
from covid19_spread.data.usa import us_recurring
@click.group()
def cli():
pass
REGIONS = {"us": us_recurring.USARRecurring}
@cli.command()
@click.argument("region", type=click.Choice(REGIONS.keys()))
def install(region):
mod = REGIONS[region]()
mod.install()
@cli.command()
@click.argument("region", type=click.Choice(REGIONS.keys()))
def run(region):
mod = REGIONS[region]()
mod.refresh()
if __name__ == "__main__":
cli()
| covid19_spread-main | recurring.py |
#!/usr/bin/env python3
# Copyright (c) 2021-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import click
from covid19_spread.data.usa.convert import main as us_convert, SOURCES as US_SOURCES
import os
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
@click.group()
def cli():
pass
@cli.command()
@click.option("--metric", default="cases", type=click.Choice(["cases", "deaths"]))
@click.option("--with-features", is_flag=True)
@click.option("--source", default="nyt", type=click.Choice(US_SOURCES.keys()))
@click.option("--resolution", default="county", type=click.Choice(["county", "state"]))
def us(metric, with_features, source, resolution):
us_convert(metric, with_features, source, resolution)
if __name__ == "__main__":
cli()
| covid19_spread-main | prepare_data.py |
#!/usr/bin/env python3
# Copyright (c) 2021-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from setuptools import setup, find_packages
setup(
name="covid19_spread",
version="0.1",
py_modules=["covid19_spread"],
install_requires=["Click",],
packages=find_packages(),
entry_points="""
[console_scripts]
cv=cv:cli
prepare-data=prepare_data:cli
recurring=recurring:cli
""",
)
| covid19_spread-main | setup.py |
#!/usr/bin/env python3
# Copyright (c) 2021-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import copy
import click
import importlib
import itertools
import json
import pandas as pd
import os
import random
import shutil
import submitit
import tempfile
import torch as th
import re
import yaml
from argparse import Namespace
from datetime import datetime
from functools import partial
from glob import glob, iglob
from typing import Dict, Any, List, Optional
from contextlib import nullcontext, ExitStack
from covid19_spread import common
from covid19_spread import metrics
from covid19_spread.lib import cluster
from covid19_spread.lib.click_lib import DefaultGroup
from covid19_spread.lib.slurm_pool_executor import (
SlurmPoolExecutor,
JobStatus,
TransactionManager,
)
from covid19_spread.lib.slack import post_slack_message
from submitit.helpers import RsyncSnapshot
from covid19_spread.cross_val import load_config
import sqlite3
from ax.service.ax_client import AxClient
from ax.exceptions.generation_strategy import MaxParallelismReachedException
import time
import queue
import threading
def set_dict(d: Dict[str, Any], keys: List[str], v: Any):
"""
update a dict using a nested list of keys.
Ex:
x = {'a': {'b': {'c': 2}}}
set_dict(x, ['a', 'b'], 4) == {'a': {'b': 4}}
"""
if len(keys) > 0:
d[keys[0]] = set_dict(d[keys[0]], keys[1:], v)
return d
else:
return v
def mk_executor(
name: str, folder: str, extra_params: Dict[str, Any], ex=SlurmPoolExecutor, **kwargs
):
executor = (ex or submitit.AutoExecutor)(folder=folder, **kwargs)
executor.update_parameters(
job_name=name,
partition=cluster.PARTITION,
gpus_per_node=extra_params.get("gpus", 0),
cpus_per_task=extra_params.get("cpus", 3),
mem=f'{cluster.MEM_GB(extra_params.get("memgb", 20))}GB',
array_parallelism=extra_params.get("array_parallelism", 100),
time=extra_params.get("timeout", 12 * 60),
)
return executor
def ensemble(basedirs, cfg, module, prefix, outdir):
def _path(x):
return os.path.join(basedir, prefix + x)
means = []
stds = []
mean_deltas = []
kwargs = {"index_col": "date", "parse_dates": ["date"]}
stdfile = "std_closed_form.csv"
meanfile = "mean_closed_form.csv"
for basedir in basedirs:
if os.path.exists(_path(cfg["validation"]["output"])):
means.append(pd.read_csv(_path(cfg["validation"]["output"]), **kwargs))
if os.path.exists(_path(stdfile)):
stds.append(pd.read_csv(_path(stdfile), **kwargs))
mean_deltas.append(pd.read_csv(_path(meanfile), **kwargs))
if len(stds) > 0:
# Average the variance, and take square root
std = pd.concat(stds).pow(2).groupby(level=0).mean().pow(0.5)
std.to_csv(os.path.join(outdir, prefix + stdfile))
mean_deltas = pd.concat(mean_deltas).groupby(level=0).mean()
mean_deltas.to_csv(os.path.join(outdir, prefix + meanfile))
assert len(means) > 0, "All ensemble jobs failed!!!!"
mod = importlib.import_module("covid19_spread." + module).CV_CLS()
if len(stds) > 0:
pred_interval = cfg.get("prediction_interval", {})
piv = mod.run_prediction_interval(
os.path.join(outdir, prefix + meanfile),
os.path.join(outdir, prefix + stdfile),
pred_interval.get("intervals", [0.99, 0.95, 0.8]),
)
piv.to_csv(os.path.join(outdir, prefix + "piv.csv"), index=False)
mean = pd.concat(means).groupby(level=0).median()
outfile = os.path.join(outdir, prefix + cfg["validation"]["output"])
mean.to_csv(outfile, index_label="date")
# -- metrics --
metric_args = cfg[module].get("metrics", {})
df_val, json_val = mod.compute_metrics(
cfg[module]["data"], outfile, None, metric_args
)
df_val.to_csv(os.path.join(outdir, prefix + "metrics.csv"))
with open(os.path.join(outdir, prefix + "metrics.json"), "w") as fout:
json.dump(json_val, fout)
print(df_val)
def run_cv(
module: str,
basedir: str,
cfg: Dict[str, Any],
prefix="",
basedate=None,
executor=None,
test_run: bool = False, # is this a test or validation run?
):
"""Runs cross validaiton for one set of hyperaparmeters"""
try:
basedir = basedir.replace("%j", submitit.JobEnvironment().job_id)
except Exception:
pass # running locally, basedir is fine...
os.makedirs(basedir, exist_ok=True)
print(f"CWD = {os.getcwd()}")
def _path(path):
return os.path.join(basedir, path)
log_configs(cfg, module, _path(prefix + f"{module}.yml"))
n_models = cfg[module]["train"].get("n_models", 1)
if n_models > 1:
launcher = map if executor is None else executor.map_array
fn = partial(
run_cv,
module,
prefix=prefix,
basedate=basedate,
executor=executor,
test_run=test_run,
)
configs = [
set_dict(copy.deepcopy(cfg), [module, "train", "n_models"], 1)
for _ in range(n_models)
]
basedirs = [os.path.join(basedir, f"job_{i}") for i in range(n_models)]
with ExitStack() as stack:
if executor is not None:
stack.enter_context(executor.set_folder(os.path.join(basedir, "%j")))
jobs = list(launcher(fn, basedirs, configs))
launcher = (
ensemble
if executor is None
else partial(executor.submit_dependent, jobs, ensemble)
)
ensemble_job = launcher(basedirs, cfg, module, prefix, basedir)
if executor is not None:
# Whatever jobs depend on "this" job, should be extended to the newly created jobs
executor.extend_dependencies(jobs + [ensemble_job])
return jobs + [ensemble_job]
# setup input/output paths
dset = cfg[module]["data"]
val_in = _path(prefix + "filtered_" + os.path.basename(dset))
val_test_key = "test" if test_run else "validation"
val_out = _path(prefix + cfg[val_test_key]["output"])
cfg[module]["train"]["fdat"] = val_in
mod = importlib.import_module("covid19_spread." + module).CV_CLS()
# -- store configs to reproduce results --
log_configs(cfg, module, _path(prefix + f"{module}.yml"))
ndays = 0 if test_run else cfg[val_test_key]["days"]
if basedate is not None:
# If we want to train from a particular basedate, then also subtract
# out the different in days. Ex: if ground truth contains data up to 5/20/2020
# but the basedate is 5/10/2020, then drop an extra 10 days in addition to validation.days
gt = metrics.load_ground_truth(dset)
assert gt.index.max() >= basedate
ndays += (gt.index.max() - basedate).days
filter_validation_days(dset, val_in, ndays)
# apply data pre-processing
preprocessed = _path(prefix + "preprocessed_" + os.path.basename(dset))
mod.preprocess(val_in, preprocessed, cfg[module].get("preprocess", {}))
mod.setup_tensorboard(basedir)
# setup logging
train_params = Namespace(**cfg[module]["train"])
n_models = getattr(train_params, "n_models", 1)
print(f"Training {n_models} models")
# -- train --
model = mod.run_train(
preprocessed, train_params, _path(prefix + cfg[module]["output"])
)
# -- simulate --
with th.no_grad():
sim_params = cfg[module].get("simulate", {})
# Returns the number of new cases for each day
df_forecast_deltas = mod.run_simulate(
preprocessed,
train_params,
model,
sim_params=sim_params,
days=cfg[val_test_key]["days"],
)
df_forecast = common.rebase_forecast_deltas(val_in, df_forecast_deltas)
mod.tb_writer.close()
print(f"Storing validation in {val_out}")
df_forecast.to_csv(val_out, index_label="date")
# -- metrics --
metric_args = cfg[module].get("metrics", {})
df_val, json_val = mod.compute_metrics(
cfg[module]["data"], val_out, model, metric_args
)
df_val.to_csv(_path(prefix + "metrics.csv"))
with open(_path(prefix + "metrics.json"), "w") as fout:
json.dump(json_val, fout)
print(df_val)
# -- prediction interval --
if "prediction_interval" in cfg and prefix == "final_model_":
try:
with th.no_grad():
# FIXME: refactor to use rebase_forecast_deltas
gt = metrics.load_ground_truth(val_in)
basedate = gt.index.max()
prev_day = gt.loc[[basedate]]
pred_interval = cfg.get("prediction_interval", {})
df_std, df_mean = mod.run_standard_deviation(
preprocessed,
train_params,
pred_interval.get("nsamples", 100),
pred_interval.get("intervals", [0.99, 0.95, 0.8]),
prev_day.values.T,
model,
pred_interval.get("batch_size", 8),
closed_form=True,
)
df_std.to_csv(_path(f"{prefix}std_closed_form.csv"), index_label="date")
df_mean.to_csv(
_path(f"{prefix}mean_closed_form.csv"), index_label="date"
)
piv = mod.run_prediction_interval(
_path(f"{prefix}mean_closed_form.csv"),
_path(f"{prefix}std_closed_form.csv"),
pred_interval.get("intervals", [0.99, 0.95, 0.8]),
)
piv.to_csv(_path(f"{prefix}piv.csv"), index=False)
except NotImplementedError:
pass # naive...
def filter_validation_days(dset: str, val_in: str, validation_days: int):
"""Filters validation days and writes output to val_in path"""
if dset.endswith(".csv"):
common.drop_k_days_csv(dset, val_in, validation_days)
elif dset.endswith(".h5"):
common.drop_k_days(dset, val_in, validation_days)
else:
raise RuntimeError(f"Unrecognized dataset extension: {dset}")
def load_model(model_pth, cv, args):
chkpnt = th.load(model_pth)
cv.initialize(args)
cv.func.load_state_dict(chkpnt)
return cv.func
def copy_assets(cfg, dir):
if isinstance(cfg, dict):
return {k: copy_assets(v, dir) for k, v in cfg.items()}
elif isinstance(cfg, list):
return [copy_assets(x, dir) for x in cfg]
elif isinstance(cfg, str) and os.path.exists(cfg):
new_pth = os.path.join(dir, "assets", os.path.basename(cfg))
shutil.copy(cfg, new_pth)
return new_pth
else:
return cfg
def log_configs(cfg: Dict[str, Any], module: str, path: str):
"""Logs configs for job for reproducibility"""
with open(path, "w") as f:
yaml.dump(cfg[module], f)
def run_best(config, module, remote, basedir, basedate=None, executor=None):
mod = importlib.import_module("covid19_spread." + module).CV_CLS()
sweep_config = load_config(os.path.join(basedir, "cfg.yml"))
best_runs = mod.model_selection(basedir, config=sweep_config[module], module=module)
if remote and executor is None:
executor = mk_executor(
"model_selection", basedir, config[module].get("resources", {})
)
with open(os.path.join(basedir, "model_selection.json"), "w") as fout:
json.dump([x._asdict() for x in best_runs], fout)
cfg = copy.deepcopy(config)
best_runs_df = pd.DataFrame(best_runs)
def run_cv_and_copy_results(tags, module, pth, cfg, prefix):
try:
jobs = run_cv(
module,
pth,
cfg,
prefix=prefix,
basedate=basedate,
executor=executor,
test_run=True,
)
def rest():
for tag in tags:
shutil.copy(
os.path.join(pth, f'final_model_{cfg["validation"]["output"]}'),
os.path.join(
os.path.dirname(pth), f"forecasts/forecast_{tag}.csv"
),
)
if "prediction_interval" in cfg:
piv_pth = os.path.join(
pth,
f'final_model_{cfg["prediction_interval"]["output_std"]}',
)
if os.path.exists(piv_pth):
shutil.copy(
piv_pth,
os.path.join(
os.path.dirname(pth), f"forecasts/std_{tag}.csv"
),
)
if cfg[module]["train"].get("n_models", 1) > 1 and executor is not None:
executor.submit_dependent(jobs, rest)
else:
rest()
except Exception as e:
msg = f"*Final run failed for {tags}*\nbasedir = {basedir}\nException was: {e}"
post_slack_message(channel="#cron_errors", text=msg)
raise e
for pth, tags in best_runs_df.groupby("pth")["name"].agg(list).items():
os.makedirs(os.path.join(os.path.dirname(pth), "forecasts"), exist_ok=True)
name = ",".join(tags)
print(f"Starting {name}: {pth}")
job_config = load_config(os.path.join(pth, module + ".yml"))
if "test" in cfg:
job_config["train"]["test_on"] = cfg["test"]["days"]
cfg[module] = job_config
launcher = run_cv_and_copy_results
if remote:
launcher = partial(executor.submit, run_cv_and_copy_results)
with executor.set_folder(pth) if remote else nullcontext():
launcher(tags, module, pth, cfg, "final_model_")
@click.group(cls=DefaultGroup, default_command="cv")
def cli():
pass
@cli.command()
@click.argument("chkpnts", nargs=-1)
@click.option("-remote", is_flag=True)
@click.option("-nsamples", type=click.INT)
@click.option("-batchsize", type=int)
@click.option("-closed-form", is_flag=True)
def prediction_interval(chkpnts, remote, nsamples, batchsize, closed_form):
def f(chkpnt_pth):
prefix = "final_model_" if "final_model_" in chkpnt_pth else ""
chkpnt = th.load(chkpnt_pth)
job_pth = os.path.dirname(chkpnt_pth)
cfg_pth = os.path.join(job_pth, "../cfg.yml")
if not os.path.exists(cfg_pth):
cfg_pth = os.path.join(job_pth, "../../cfg.yml")
cfg = load_config(cfg_pth)
module = cfg["this_module"]
job_config = load_config(os.path.join(job_pth, f"{prefix}{module}.yml"))
opt = Namespace(**job_config["train"])
mod = importlib.import_module("covid19_spread." + module).CV_CLS()
new_cases, regions, basedate, device = mod.initialize(opt)
model = mod.func
model.load_state_dict(chkpnt)
dset = os.path.join(
job_pth, prefix + "preprocessed_" + os.path.basename(job_config["data"])
)
val_in = os.path.join(
job_pth, prefix + "filtered_" + os.path.basename(job_config["data"])
)
gt = metrics.load_ground_truth(val_in)
prev_day = gt.loc[[pd.to_datetime(basedate)]]
pred_interval = cfg.get("prediction_interval", {})
df_std, df_mean = mod.run_standard_deviation(
dset,
opt,
nsamples or pred_interval.get("nsamples", 100),
pred_interval.get("intervals", [0.99, 0.95, 0.8]),
prev_day.values.T,
model,
batchsize or pred_interval.get("batch_size", 8),
closed_form=closed_form,
)
suffix = "_closed_form" if closed_form else ""
df_std.to_csv(
os.path.join(job_pth, f"{prefix}std{suffix}.csv"), index_label="date"
)
df_mean.to_csv(
os.path.join(job_pth, f"{prefix}mean{suffix}.csv"), index_label="date"
)
pred_intervals = mod.run_prediction_interval(
os.path.join(job_pth, f"{prefix}mean{suffix}.csv"),
os.path.join(job_pth, f"{prefix}std{suffix}.csv"),
pred_interval.get("intervals", [0.99, 0.95, 0.8]),
)
pred_intervals.to_csv(
os.path.join(job_pth, f"{prefix}piv{suffix}.csv"), index=False
)
if remote:
now = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
folder = os.path.expanduser(f"~/.covid19/logs/{now}")
extra_params = {"gpus": 1, "cpus": 2, "memgb": 20, "timeout": 3600}
ex = mk_executor(
"prediction_interval", folder, extra_params, ex=submitit.AutoExecutor
)
ex.map_array(f, chkpnts)
print(folder)
else:
list(map(f, chkpnts))
@cli.command()
@click.argument("sweep_dirs", nargs=-1)
@click.argument("module")
@click.option("-remote", is_flag=True)
@click.option("-basedate", type=click.DateTime(), default=None)
def model_selection(sweep_dirs, module, remote, basedate):
executor = None
for sweep_dir in sweep_dirs:
cfg = load_config(os.path.join(sweep_dir, "cfg.yml"))
if executor is None:
executor = mk_executor(
"model_selection", sweep_dir, cfg[module].get("resources", {})
)
match = re.search(r"\d{4}-\d{2}-\d{2}", os.path.basename(sweep_dir))
if basedate is None and match:
basedate = pd.to_datetime(match.group(0))
run_best(cfg, module, remote, sweep_dir, basedate, executor=executor)
executor.launch(sweep_dir + "/workers", workers=4)
@cli.command()
@click.argument("config_pth")
@click.argument("module")
@click.option("-validate-only", type=click.BOOL, default=False)
@click.option("-remote", is_flag=True)
@click.option("-array-parallelism", type=click.INT, default=20)
@click.option("-max-jobs", type=click.INT, default=200)
@click.option("-basedir", default=None, help="Path to sweep base directory")
@click.option("-basedate", type=click.DateTime(), help="Date to treat as last date")
@click.option("-ablation", is_flag=True)
def cv(
config_pth: str,
module: str,
validate_only: bool,
remote: bool,
array_parallelism: int,
max_jobs: int,
basedir: str,
basedate: Optional[datetime] = None,
executor=None,
ablation=False,
):
"""
Run cross validation pipeline for a given module.
"""
# FIXME: This is a hack...
in_backfill = executor is not None
now = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
user = cluster.USER
cfg = load_config(config_pth)
region = cfg["region"]
cfg["this_module"] = module
if basedir is None:
if remote:
basedir = f"{cluster.FS}/{user}/covid19/forecasts/{region}/{now}"
else:
basedir = f"/tmp/{user}/covid19/forecasts/{region}/{now}"
os.makedirs(basedir, exist_ok=True)
if not in_backfill:
# Copy any asset files into `basedir/assets`
os.makedirs(os.path.join(basedir, "assets"))
cfg[module] = copy_assets(cfg[module], basedir)
# Copy the dataset into the basedir
shutil.copy(cfg[module]["data"], basedir)
cfg[module]["data"] = os.path.join(basedir, os.path.basename(cfg[module]["data"]))
with open(os.path.join(basedir, "cfg.yml"), "w") as fout:
yaml.dump(cfg, fout)
# if we are running an ablation, create new time features from ablation field
# all list entries in are assumed to be a single ablation
# all features in one list entry will be dropped from the full features to
# perform the ablation
if ablation:
feats = []
if not any([len(x) == 0 for x in cfg[module]["train"]["ablation"]]):
# Add a baseline ablation that uses all time features by default
cfg[module]["train"]["ablation"].append([])
all_feats = set(cfg[module]["train"]["time_features"][0])
for x in cfg[module]["train"]["ablation"]:
feats.append(list(all_feats - set(x)))
cfg[module]["train"]["time_features"] = feats
cfgs = []
sweep_params = [
([module, "train", k], v)
for k, v in cfg[module]["train"].items()
if isinstance(v, list)
]
sweep_params.extend(
[
([module, "preprocess", k], v)
for k, v in cfg[module].get("preprocess", {}).items()
if isinstance(v, list)
]
)
if len(sweep_params) == 0:
cfgs.append(cfg)
else:
random.seed(0)
keys, values = zip(*sweep_params)
for vals in itertools.product(*values):
clone = copy.deepcopy(cfg)
[set_dict(clone, ks, vs) for ks, vs in zip(keys, vals)]
cfgs.append(clone)
random.shuffle(cfgs)
cfgs = cfgs[:max_jobs]
print(f"Launching {len(cfgs)} jobs")
if remote:
extra = cfg[module].get("resources", {})
if executor is None:
executor = mk_executor(
f"cv_{region}",
basedir + "/%j",
{**extra, "array_parallelism": array_parallelism},
)
launcher = executor.map_array
else:
launcher = map
basedirs = [os.path.join(basedir, f"job_{i}") for i in range(len(cfgs))]
with ExitStack() as stack:
if not in_backfill:
stack.enter_context(
RsyncSnapshot(
snapshot_dir=basedir + "/snapshot",
exclude=["notebooks/*", "tests/*"],
)
)
jobs = list(
launcher(
partial(
run_cv, module, basedate=basedate, executor=executor, test_run=False
),
basedirs,
cfgs,
)
)
# Find the best model and retrain on the full dataset
launcher = (
partial(
executor.submit_dependent,
jobs,
run_best,
executor=copy.deepcopy(executor),
)
if remote
else run_best
)
if not validate_only:
job = launcher(cfg, module, remote, basedir, basedate=basedate)
jobs.append(job)
if remote:
executor.launch(basedir + "/workers", array_parallelism)
print(basedir)
return basedir, jobs
@cli.command()
@click.argument("config_pth")
@click.argument("module")
@click.option("-period", type=int, help="Number of days for sliding window")
@click.option(
"-start-date", type=click.DateTime(), default="2020-04-01", help="Start date"
)
@click.option("-dates", default=None, multiple=True, type=click.DateTime())
@click.option("-validate-only", type=click.BOOL, default=False, is_flag=True)
@click.option("-remote", is_flag=True)
@click.option("-array-parallelism", type=click.INT, default=20)
@click.option("-max-jobs", type=click.INT, default=200)
@click.option("-ablation", is_flag=True)
@click.pass_context
def backfill(
ctx: click.Context,
config_pth: str,
module: str,
period: Optional[int] = None,
start_date: Optional[datetime.date] = None,
dates: Optional[List[datetime.date]] = None,
validate_only: bool = False,
remote: bool = False,
array_parallelism: int = 20,
max_jobs: int = 200,
ablation: bool = False,
):
"""
Run the cross validation pipeline over multiple time points.
"""
config = common.mk_absolute_paths(load_config(config_pth))
# allow to set backfill dates in config (function argument overrides)
if not dates and "backfill" in config:
dates = list(pd.to_datetime(config["backfill"]))
assert (
dates is not None or period is not None
), "Must specify either dates or period"
gt = metrics.load_ground_truth(config[module]["data"])
if not dates:
assert period is not None
dates = pd.date_range(
start=start_date, end=gt.index.max(), freq=f"{period}D", closed="left"
)
print(
"Running backfill for "
+ ", ".join(map(lambda x: x.strftime("%Y-%m-%d"), dates))
)
# setup experiment environment
now = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
experiment_id = f'{config["region"]}/{now}'
basedir = f"{cluster.FS}/{cluster.USER}/covid19/forecasts/{experiment_id}"
# setup executor
extra_params = config[module].get("resources", {})
executor = mk_executor(
f'backfill_{config["region"]}',
basedir,
{**extra_params, "array_parallelism": array_parallelism},
)
print(f"Backfilling in {basedir}")
# Copy any asset files into `basedir/assets`
os.makedirs(os.path.join(basedir, "assets"))
config[module] = copy_assets(config[module], basedir)
with RsyncSnapshot(
snapshot_dir=basedir + "/snapshot", exclude=["notebooks/*", "tests/*"],
), tempfile.NamedTemporaryFile() as tfile:
# Make sure that we use the CFG with absolute paths since we are now inside the snapshot directory
with open(tfile.name, "w") as fout:
yaml.dump(config, fout)
for date in dates:
print(f"Running CV for {date.date()}")
cv_params = {
k: v for k, v in ctx.params.items() if k in {p.name for p in cv.params}
}
cv_params["config_pth"] = tfile.name
with executor.nest(), executor.set_folder(
os.path.join(basedir, f"sweep_{date.date()}/%j")
):
_, jobs = ctx.invoke(
cv,
basedir=os.path.join(basedir, f"sweep_{date.date()}"),
basedate=date,
executor=executor,
**cv_params,
)
if remote:
executor.launch(basedir + "/workers", array_parallelism)
@cli.command()
@click.argument("paths", nargs=-1)
def ensemble_jobs(paths):
for path in paths:
ms = json.load(open(os.path.join(path, "model_selection.json")))
ms = {x["name"]: x["pth"] for x in ms}
jobs = [
x for x in glob(os.path.join(ms["best_mae"], "job_*")) if os.path.isdir(x)
]
cfg = load_config(os.path.join(path, "cfg.yml"))
cfg["prediction_interval"]["intervals"] = [0.95, 0.8, 0.5]
ensemble(jobs, cfg, cfg["this_module"], "final_model_", ms["best_mae"])
@cli.command()
@click.argument("sweep_dirs", nargs=-1)
def progress(sweep_dirs):
for sweep_dir in sweep_dirs:
sweep_dir = os.path.realpath(sweep_dir)
db_file = next(iglob(os.path.join(sweep_dir, "**/.job.db"), recursive=True))
db_file = os.path.realpath(db_file)
conn = sqlite3.connect(db_file)
df = pd.read_sql(
f"SELECT status, worker_id FROM jobs WHERE id='{db_file}'", conn
)
msg = {
"sweep_dir": sweep_dir,
"success": int((df["status"] == JobStatus.success.value).sum()),
"failed": int((df["status"] == JobStatus.failure.value).sum()),
"pending": int((df["status"] == JobStatus.pending.value).sum()),
"running": int((df["status"] > len(JobStatus)).sum()),
}
print(json.dumps(msg, indent=4))
@cli.command()
@click.argument("sweep_dir")
@click.argument("workers", type=click.INT)
def add_workers(sweep_dir, workers):
DB = os.path.abspath(glob(f"{sweep_dir}/**/.job.db", recursive=True)[0])
cfg = load_config(glob(f"{sweep_dir}/**/cfg.yml", recursive=True)[0])
extra_params = cfg[cfg["this_module"]].get("resources", {})
executor = mk_executor(
"add_workers", os.path.dirname(DB), extra_params, db_pth=os.path.realpath(DB)
)
executor.launch(f"{sweep_dir}/workers", workers)
@cli.command()
@click.argument("sweep_dir")
@click.option("-workers", type=click.INT)
@click.option("-reset-running", is_flag=True, default=False)
def repair(sweep_dir, workers=None, reset_running=False):
db_file = next(iglob(os.path.join(sweep_dir, "**/.job.db"), recursive=True))
txn_manager = TransactionManager(os.path.realpath(db_file))
cond = ""
if reset_running:
cond = f" OR status >= {len(JobStatus)}"
txn_manager.run(
lambda conn: conn.execute(
f"""
UPDATE jobs SET status={JobStatus.pending}
WHERE id='{os.path.realpath(db_file)}' AND (status={JobStatus.failure} {cond})
"""
)
)
if workers is not None:
cfg = load_config(next(iglob(f"{sweep_dir}/**/cfg.yml", recursive=True)))
extra_params = cfg[cfg["this_module"]].get("resources", {})
executor = mk_executor(
"repair", sweep_dir, extra_params, db_pth=os.path.realpath(db_file)
)
executor.launch(os.path.join(sweep_dir, "workers"), workers or -1)
@cli.command()
@click.argument("sweep_dir")
@click.option(
"--type",
"-t",
type=click.Choice(["failure", "running", "pending", "success"]),
required=True,
)
def list_jobs(sweep_dir, type):
db_file = next(iglob(os.path.join(sweep_dir, "**/.job.db"), recursive=True))
db_file = os.path.realpath(db_file)
txn_manager = TransactionManager(db_file)
if type == "running":
cond = f"status >= {len(JobStatus)}"
else:
cond = f"status = {getattr(JobStatus, type)}"
with txn_manager as cur:
cur.execute(
f"""
SELECT pickle, worker_id FROM jobs WHERE id='{db_file}' AND {cond}
"""
)
for row in cur:
print(row)
@cli.command()
@click.argument("config_pth")
@click.argument("module")
@click.argument("basedate", type=click.DateTime())
@click.option("--iters", type=click.INT, default=300)
@click.option("--array-parallelism", type=click.INT, default=20)
@click.option("--resume")
def optimize(config_pth, module, basedate, iters, array_parallelism, resume):
cfg = load_config(config_pth)
ax_client = AxClient(enforce_sequential_optimization=False)
ax_client.create_experiment(
name="covid optimize",
parameters=cfg[module]["optimize"],
objective_name="mae",
choose_generation_strategy_kwargs={
"max_parallelism_override": int(array_parallelism / 5)
},
minimize=True,
)
region = cfg["region"]
cfg["this_module"] = module
now = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
user = cluster.USER
if resume is not None:
params_used = list(
json.load(open(os.path.join(resume, "best_params.json"))).keys()
)
for metrics_pth in glob(os.path.join(resume, "*", "metrics.csv")):
mets = pd.read_csv(metrics_pth, index_col="Measure")
mae = mets.loc["MAE"].mean()
cfg_ = load_config(os.path.join(os.path.dirname(metrics_pth), "bar.yml"))
params = {k: cfg_["train"][k] for k in params_used}
try:
_, idx = ax_client.attach_trial(params)
ax_client.complete_trial(idx, {"mae": mae})
except ValueError as e:
if "valid value for parameter" in str(e):
continue # this trial isn't valid for this grid, skip it...
raise e
basedir = f"{cluster.FS}/{user}/covid19/forecasts/{region}/{now}"
extra = cfg[module].get("resources", {})
executor = mk_executor(
f"cv_{region}",
basedir + "/%j",
{**extra, "array_parallelism": array_parallelism},
)
db_pth = executor.db_pth
def optimize_run(q, id, current_cfg):
executor = SlurmPoolExecutor(folder=basedir + "/%j", db_pth=db_pth)
executor.update_parameters(
job_name=f"cv_{region}",
partition=cluster.PARTITION,
gpus_per_node=extra.get("gpus", 0),
cpus_per_task=extra.get("cpus", 3),
mem=f'{cluster.MEM_GB(extra.get("memgb", 20))}GB',
array_parallelism=extra.get("array_parallelism", 100),
time=extra.get("timeout", 12 * 60),
)
job = executor.submit(
run_cv,
module=module,
basedir=basedir + "/%j",
cfg=current_cfg,
basedate=basedate,
executor=executor,
test_run=True,
)
result_pth = os.path.join(
os.path.dirname(str(job.paths.result_pickle)), "metrics.csv"
)
while not os.path.exists(os.path.join(result_pth)):
time.sleep(5)
metrics = pd.read_csv(result_pth, index_col="Measure")
q.put({"id": id, "parameters": parameters, "mae": metrics.loc["MAE"].mean()})
return {"mae": metrics.loc["MAE"].mean()}
q = queue.Queue()
waiting_for = 0
launched = False
for _ in range(iters):
while True:
try:
parameters, trial_idx = ax_client.get_next_trial()
break
except MaxParallelismReachedException:
if not launched:
executor.launch(
os.path.join(basedir, "workers"), workers=array_parallelism
)
launched = True
if waiting_for == 0 and q.qsize() == 0:
break
item = q.get()
ax_client.complete_trial(
trial_index=item["id"], raw_data={"mae": item["mae"]}
)
best_parameters, values = ax_client.get_best_parameters()
trials_df = ax_client.generation_strategy.trials_as_df
with open(os.path.join(basedir, "best_params.json"), "w") as fout:
print(json.dumps(best_parameters), file=fout)
with open(os.path.join(basedir, "ax_state.json"), "w") as fout:
print(json.dumps(ax_client.to_json_snapshot()), file=fout)
trials_df.to_csv(os.path.join(basedir, "trials.csv"), index=False)
current_cfg = copy.deepcopy(cfg)
current_cfg[module]["train"] = {**cfg[module]["train"], **parameters}
current_cfg[module]["train"] = {
k: v[0] if isinstance(v, list) else v
for k, v in current_cfg[module]["train"].items()
}
threading.Thread(target=optimize_run, args=(q, trial_idx, current_cfg)).start()
waiting_for += 1
if __name__ == "__main__":
cli()
| covid19_spread-main | cv.py |
#!/usr/bin/env python3
# Copyright (c) 2021-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from covid19_spread.bar import BARCV
import yaml
from argparse import Namespace
import torch as th
class TestBatchedInference:
def test_batched_inference(self):
with th.no_grad():
th.set_default_tensor_type(th.DoubleTensor)
th.manual_seed(0)
mod = BARCV()
cfg = yaml.safe_load(open("cv/us.yml"))
opt = Namespace(
**{
k: v[0] if isinstance(v, list) else v
for k, v in cfg["bar"]["train"].items()
}
)
opt.fdat = cfg["bar"]["data"]
cases, regions, basedate, device = mod.initialize(opt)
cases = cases.type(th.get_default_dtype())
tmax = cases.size(-1)
# torch.bmm can give small precision differences on the CPU when comparing
# batched vs. non-batched inputs. If we do too many simulation iterations,
# this error can compound to highly noticiable values. Limit the number of
# iterations to a small value. Interestingly, on the GPU it isn't a problem...
sim = mod.func.simulate(tmax, cases, 5, deterministic=True)
sim_batched = mod.func.simulate(
tmax, cases.repeat(2, 1, 1).contiguous(), 5, deterministic=True
)
assert (sim - sim_batched[0]).abs().max().item() < 1e-7
| covid19_spread-main | tests/test_batched_bar_inference.py |
#!/usr/bin/env python3
# Copyright (c) 2021-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import cv
import pandas as pd
from click.testing import CliRunner
import pytest
class TestCV:
def test_load_config(self):
"""Checks configs are loaded correctly"""
job_config = cv.load_config("cv/us.yml")
assert "naive" in job_config
assert job_config["region"] == "us"
def test_run_cv(self, tmpdir):
"""Runs cv pipeline using a single set of paramters from cv/us.yml.
Run is stored in temporary directory using PyTest Fixture `tmpdir`
"""
job_config = cv.load_config("cv/us.yml")
cv.run_cv("naive", tmpdir, job_config)
def test_filter_validation_days(self, tmp_path):
"""Tests split of validation days using tmp_path fixtures"""
data_path = "covid19_spread/data/usa/data_cases.csv"
output_path = tmp_path / "val.csv"
cv.filter_validation_days(data_path, output_path, 7)
original_df = pd.read_csv(data_path, index_col="region")
filtered_df = pd.read_csv(output_path, index_col="region")
assert (original_df.shape[1] - filtered_df.shape[1]) == 7
@pytest.mark.integration
class TestCVIntegration:
def test_cv_naive_us(self, tmpdir):
"""Runs integration test with tmpdir fixture that's cleaned up after tests"""
runner = CliRunner()
result = runner.invoke(cv.cv, f"cv/us.yml naive -basedir {tmpdir}")
assert result.exit_code == 0
def test_cv_naive_basedate(self, tmpdir):
"""Runs integration test with tmpdir fixture that's cleaned up after tests"""
runner = CliRunner()
result = runner.invoke(
cv.cv, f"cv/us.yml naive -basedir {tmpdir} -basedate 2020-04-01"
)
assert result.exit_code == 0
| covid19_spread-main | tests/test_cv.py |
#!/usr/bin/env python3
# Copyright (c) 2021-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from covid19_spread import load
import pandas as pd
import pytest
DATA_PATH_US_CSV = "covid19_spread/data/usa/data_cases.csv"
DATA_PATH_NY_CSV = "covid19_spread/data/usa/data_cases_ny.csv"
class TestLoad:
@pytest.mark.parametrize("path", [DATA_PATH_US_CSV, DATA_PATH_NY_CSV])
def test_load_cases_by_region(self, path):
"""Confirms cases loaded are per region"""
cases_df = load.load_confirmed_by_region(path)
assert cases_df.index.name == "date"
assert type(cases_df.index) == pd.core.indexes.datetimes.DatetimeIndex
assert (cases_df >= 0).all().all()
regions = cases_df.columns
suffolk_present = (
"Suffolk County" in regions or "Suffolk County, New York" in regions
)
assert suffolk_present
@pytest.mark.parametrize("path", [DATA_PATH_US_CSV, DATA_PATH_NY_CSV])
def test_load_confirmed(self, path):
df = load.load_confirmed(path, None)
assert df.index.name == "date"
assert (df >= 0).all()
# should only have one column for total cases
assert len(df.shape) == 1
| covid19_spread-main | tests/test_load.py |
#!/usr/bin/env python3
# Copyright (c) 2021-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import pandas as pd
from datetime import timedelta
def load_ground_truth(path):
df = pd.read_csv(path)
df = df.rename(columns={"region": "date"})
df.set_index("date", inplace=True)
df = df.transpose()
df.index = pd.to_datetime(df.index)
return df
def rmse(pred, gt):
return (pred - gt).pow(2).mean(axis=1).pow(1.0 / 2)
def mae(pred, gt):
return (pred - gt).abs().mean(axis=1)
def mape(pred, gt):
return ((pred - gt).abs() / gt.clip(1)).mean(axis=1)
def max_mae(pred, gt):
return (pred - gt).abs().max(axis=1)
def compute_metrics(df_true, df_pred, mincount=0, nanfill=False):
if isinstance(df_true, str):
df_true = load_ground_truth(df_true)
if isinstance(df_pred, str):
df_pred = pd.read_csv(df_pred, parse_dates=["date"], index_col="date")
return _compute_metrics(df_true, df_pred, mincount, nanfill=nanfill)
def _compute_metrics(df_true, df_pred, mincount=0, nanfill=False):
if nanfill:
cols = sorted(set(df_true.columns).difference(set(df_pred.columns)))
zeros = pd.DataFrame(np.zeros((len(df_pred), len(cols))), columns=cols)
zeros.index = df_pred.index
df_pred = pd.concat([df_pred, zeros], axis=1)
common_cols = list(set(df_true.columns).intersection(set(df_pred.columns)))
df_pred = df_pred[common_cols]
df_true = df_true[common_cols]
z = len(df_pred)
# print(df_pred.round(2))
basedate = df_pred.index.min()
pdate = basedate - timedelta(1)
diff = df_true.loc[pdate] - df_true.loc[basedate - timedelta(2)]
naive = [df_true.loc[pdate] + d * diff for d in range(1, z + 1)]
naive = pd.DataFrame(naive)
naive.index = df_pred.index
ix = df_pred.index.intersection(df_true.index)
df_pred = df_pred.loc[ix]
naive = naive.loc[ix]
gt = df_true.loc[ix]
# compute state level MAE
state_gt = gt.transpose().groupby(lambda x: x.split(", ")[-1]).sum()
state_pred = df_pred.transpose().groupby(lambda x: x.split(", ")[-1]).sum()
state_mae = (state_gt.sort_index() - state_pred.sort_index()).abs().mean(axis=0)
metrics = pd.DataFrame(
[
rmse(df_pred, gt),
mae(df_pred, gt),
mape(df_pred, gt),
rmse(naive, gt),
mae(naive, gt),
state_mae,
max_mae(df_pred, gt),
max_mae(naive, gt),
],
columns=df_pred.index.to_numpy(),
)
metrics["Measure"] = [
"RMSE",
"MAE",
"MAPE",
"RMSE_NAIVE",
"MAE_NAIVE",
"STATE_MAE",
"MAX_MAE",
"MAX_NAIVE_MAE",
]
metrics.set_index("Measure", inplace=True)
if metrics.shape[1] > 0:
metrics.loc["MAE_MASE"] = metrics.loc["MAE"] / metrics.loc["MAE_NAIVE"]
metrics.loc["RMSE_MASE"] = metrics.loc["RMSE"] / metrics.loc["RMSE_NAIVE"]
# Stack predictions onto last ground truth date.
# We'll take the diff and compute MAE on the new daily counts
stack = pd.concat(
[df_true.loc[[df_pred.index.min() - timedelta(days=1)]], df_pred]
)
stack_diff = stack.diff().loc[ix]
true_diff = df_true.diff().loc[ix]
metrics.loc["MAE_DELTAS"] = mae(stack_diff, true_diff)
metrics.loc["RMSE_DELTAS"] = rmse(stack_diff, true_diff)
return metrics
| covid19_spread-main | covid19_spread/metrics.py |
#!/usr/bin/env python3
# Copyright (c) 2021-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import pandas as pd
from .cross_val import CV
from . import load
from datetime import timedelta
def simulate(latest_count, latest_delta, latest_date, days):
"""Forecasts 7 days ahead using naive model for a single region:
day_n+1 prediction = day_n + day_n * (day_n - day_n-1 confirmed)
Args:
latest_delta (int): day_n - day_n-1 confirmed
latest_count (int): day_n confirmed
latest_date (datetime): last date with confirmed cases
days (int): number of days to forecast
Returns: dataframe of predictions
"""
forecast = {
-1: latest_count,
0: latest_count + latest_delta,
}
for day in range(1, days):
delta = forecast[day - 1] - forecast[day - 2]
forecast[day] = forecast[day - 1] + delta
# remove latest confirmed from prediction
forecast.pop(-1)
return forecast_to_dataframe(forecast, latest_date, days)
def forecast_to_dataframe(forecast, latest_date, days):
"""Converts dictionary of forecasts into dataframe with dates.
forcast (dict): {0: predicted case count, 1: ...}
"""
prediction_end_date = latest_date + timedelta(days)
dates = pd.date_range(start=latest_date, end=prediction_end_date, closed="right")
forecast_list = [forecast[day] for day in range(days)]
df = pd.DataFrame.from_dict(zip(dates, forecast_list))
df.columns = ["date", "total cases"]
df = df.set_index("date")
return df
def train(region_cases_df):
"""Returns latest count, delta, date needed for forecasting"""
latest_count = region_cases_df[-1]
latest_delta = region_cases_df[-1] - region_cases_df[-2]
latest_date = pd.to_datetime(region_cases_df.index.max())
return latest_count, latest_delta, latest_date
def naive(data_path="data/usa/data.csv", days=7):
"""Performs region level naive forecasts"""
cases_df = load.load_confirmed_by_region(data_path)
regions = cases_df.columns
forecasts = []
for region in regions:
latest_count, latest_delta, latest_date = train(cases_df[region])
forecast_df = simulate(latest_count, latest_delta, latest_date, days)
forecast_df = forecast_df.rename(columns={"total cases": region})
forecasts.append(forecast_df)
df = pd.concat(forecasts, axis=1)
return df
class NaiveCV(CV):
def run_train(self, dset, train_params, model_out):
"""Returns delta between last two days and last confirmed total.
Args:
dset (str): path for confirmed cases
train_params (dict): training parameters
model_out (str): path for saving training checkpoints
Returns: list of (doubling_times (np.float64), regions (list of str))
"""
def run_simulate(self, dset, train_params, model, days, sim_params):
"""Returns new cases count predictions"""
forecast_df = naive(data_path=dset, days=days)
cases_df = load.load_confirmed_by_region(dset)
new_cases_forecast_df = (
pd.concat([cases_df, forecast_df])
.sort_index()
.diff()
.loc[forecast_df.index]
)
return new_cases_forecast_df
CV_CLS = NaiveCV
if __name__ == "__main__":
print(naive())
| covid19_spread-main | covid19_spread/naive.py |
#!/usr/bin/env python3
# Copyright (c) 2021-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import pandas as pd
from numpy.linalg import norm
import os
import re
from covid19_spread.lib import cluster
from subprocess import check_call
from covid19_spread import metrics
from datetime import timedelta
def mk_absolute_paths(cfg):
if isinstance(cfg, dict):
return {k: mk_absolute_paths(v) for k, v in cfg.items()}
elif isinstance(cfg, list):
return list(map(mk_absolute_paths, cfg))
else:
return (
os.path.realpath(cfg)
if isinstance(cfg, str) and os.path.exists(cfg)
else cfg
)
def rebase_forecast_deltas(val_in, df_forecast_deltas):
gt = metrics.load_ground_truth(val_in)
# Ground truth for the day before our first forecast
prev_day = gt.loc[[df_forecast_deltas.index.min() - timedelta(days=1)]]
# Stack the first day ground truth on top of the forecasts
common_cols = set(df_forecast_deltas.columns).intersection(set(gt.columns))
stacked = pd.concat([prev_day[common_cols], df_forecast_deltas[common_cols]])
# Cumulative sum to compute total cases for the forecasts
df_forecast = stacked.sort_index().cumsum().iloc[1:]
return df_forecast
def update_repo(repo, no_pull=False):
user = cluster.USER
match = re.search(r"([^(\/|:)]+)/([^(\/|:)]+)\.git", repo)
name = f"{match.group(1)}_{match.group(2)}"
data_pth = f"{cluster.FS}/{user}/covid19/data/{name}"
if not os.path.exists(data_pth):
check_call(["git", "clone", repo, data_pth])
if not no_pull:
check_call(["git", "checkout", "master"], cwd=data_pth)
check_call(["git", "pull"], cwd=data_pth)
return data_pth
def drop_k_days_csv(dset, outfile, days):
df = pd.read_csv(dset, index_col="region")
if days > 0:
df = df[sorted(df.columns)[:-days]]
df = drop_all_zero_csv(df)
df.to_csv(outfile)
def drop_all_zero_csv(df):
counts = df.sum(axis=1)
df = df[counts > 0]
return df
def smooth_csv(indset: str, outdset: str, days: int):
df = pd.read_csv(indset, index_col="region").transpose()
incident_cases = df.diff()
smooth = np.round(incident_cases.rolling(window=days, min_periods=1).mean())
smooth.iloc[0] = df.iloc[0]
smooth.cumsum(0).transpose().to_csv(outdset)
smooth = smooth_csv
def print_model_stats(mus, beta, S, U, V, A):
C = A - np.diag(np.diag(A))
print("beta =", beta)
print(f"\nNorms : U = {norm(U).item():.3f}, V = {norm(V):.3f}")
print(f"Max Element: U = {np.max(U).item():.3f}, V = {np.max(V):.3f}")
print(f"Avg Element: U = {np.mean(U).item():.3f}, V = {np.mean(V):.3f}")
print(f"\nSelf: max = {np.max(S):.3f}, avg = {np.mean(S):.3f}")
print(f"Cross: max = {np.max(C):.3f}, avg = {np.mean(C):.3f}")
def standardize_county_name(county):
return (
county.replace(" County", "")
.replace(" Parish", "")
.replace(" Municipality", "")
.replace(" Municipality", "")
.replace(" Borough", "")
)
| covid19_spread-main | covid19_spread/common.py |
#!/usr/bin/env python3
# Copyright (c) 2021-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import numpy as np
import pandas as pd
import warnings
from datetime import timedelta
import torch as th
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions import NegativeBinomial, Normal, Poisson
from . import load
from .cross_val import CV
from .common import rebase_forecast_deltas
import yaml
from . import metrics
import click
import sys
from scipy.stats import nbinom, norm
from bisect import bisect_left, bisect_right
from tqdm import tqdm
import timeit
from typing import List
import os
warnings.filterwarnings("ignore", category=UserWarning)
class BetaRNN(nn.Module):
def __init__(self, M, layers, dim, input_dim, dropout=0.0):
# initialize parameters
super(BetaRNN, self).__init__()
self.h0 = nn.Parameter(th.zeros(layers, M, dim))
self.rnn = nn.RNN(input_dim, dim, layers, dropout=dropout)
self.v = nn.Linear(dim, 1, bias=False)
self.fpos = th.sigmoid
# initialize weights
nn.init.xavier_normal_(self.v.weight)
for p in self.rnn.parameters():
if p.dim() == 2:
nn.init.xavier_normal_(p)
def forward(self, x):
ht, hn = self.rnn(x, self.h0)
beta = self.fpos(self.v(ht))
return beta
def __repr__(self):
return str(self.rnn)
class BetaGRU(BetaRNN):
def __init__(self, M, layers, dim, input_dim, dropout=0.0):
super().__init__(M, layers, dim, input_dim, dropout)
self.rnn = nn.GRU(input_dim, dim, layers, dropout=dropout)
self.rnn.reset_parameters()
self.h0 = nn.Parameter(th.randn(layers, M, dim))
class BetaLSTM(BetaRNN):
def __init__(self, M, layers, dim, input_dim, dropout=0.0):
super().__init__(M, layers, dim, input_dim, dropout)
self.rnn = nn.LSTM(input_dim, dim, layers, dropout=dropout)
self.rnn.reset_parameters()
self.h0 = nn.Parameter(th.zeros(layers, M, dim))
self.c0 = nn.Parameter(th.randn(layers, M, dim))
def forward(self, x):
ht, (hn, cn) = self.rnn(x, (self.h0, self.c0))
beta = self.fpos(self.v(ht))
return beta
class BetaLatent(nn.Module):
def __init__(self, fbeta, regions, tmax, time_features):
"""
Params
======
- regions: names of regions (list)
- dim: dimensionality of hidden vector (int)
- layer: number of RNN layers (int)
- tmax: maximum observation time (float)
- time_features: tensor of temporal features (time x region x features)
"""
super(BetaLatent, self).__init__()
self.M = len(regions)
self.tmax = tmax
self.time_features = time_features
input_dim = 0
if time_features is not None:
input_dim += time_features.size(2)
self.fbeta = fbeta(self.M, input_dim)
def forward(self, t, ys):
x = []
if self.time_features is not None:
if self.time_features.size(0) > t.size(0):
f = self.time_features.narrow(0, 0, t.size(0))
else:
f = th.zeros(
t.size(0), self.M, self.time_features.size(2), device=t.device
)
f.copy_(self.time_features.narrow(0, -1, 1))
f.narrow(0, 0, self.time_features.size(0)).copy_(self.time_features)
x.append(f)
x = th.cat(x, dim=2)
beta = self.fbeta(x)
return beta.squeeze().t()
def apply(self, x):
ht, hn = self.rnn(x, self.h0)
return self.fpos(self.v(ht))
def __repr__(self):
return str(self.fbeta)
class BAR(nn.Module):
def __init__(
self,
regions,
beta,
window,
dist,
graph,
features,
self_correlation=True,
cross_correlation=True,
offset=None,
):
super(BAR, self).__init__()
self.regions = regions
self.M = len(regions)
self.beta = beta
self.features = features
self.self_correlation = self_correlation
self.cross_correlation = cross_correlation
self.window = window
self.z = nn.Parameter(th.ones((self.M, 7)).fill_(1))
self._alphas = nn.Parameter(th.zeros((self.M, self.M)).fill_(-3))
self.nu = nn.Parameter(th.ones((self.M, 1)).fill_(8))
self.scale = nn.Parameter(th.ones((self.M, 1)))
self._dist = dist
self.graph = graph
self.offset = offset
self.neighbors = self.M
self.adjdrop = nn.Dropout2d(0.1)
if graph is not None:
assert graph.size(0) == self.M, graph.size()
assert graph.size(1) == self.M, graph.size()
self.neighbors = graph.sum(axis=1)
if features is not None:
self.w_feat = nn.Linear(features.size(1), 1)
nn.init.xavier_normal_(self.w_feat.weight)
def dist(self, scores):
if self._dist == "poisson":
return Poisson(scores)
elif self._dist == "nb":
return NegativeBinomial(scores, logits=self.nu)
elif self._dist == "normal":
return Normal(scores, th.exp(self.nu))
else:
raise RuntimeError("Unknown loss")
def alphas(self):
alphas = self._alphas
if self.self_correlation:
with th.no_grad():
alphas.fill_diagonal_(-1e10)
return alphas
def metapopulation_weights(self):
alphas = self.alphas()
W = th.sigmoid(alphas)
W = W.squeeze(0).squeeze(-1).t()
if self.graph is not None:
W = W * self.graph
return W
def score(self, t, ys):
assert t.size(-1) == ys.size(-1), (t.size(), ys.size())
length = ys.size(-1) - self.window + 1
# beta evolution
beta = self.beta(t, ys)
Z = th.zeros(0).sum()
if self.self_correlation:
ws = F.softplus(self.z)
ws = ws.expand(self.M, self.z.size(1))
# self-correlation
Z = F.conv1d(
F.pad(ys.unsqueeze(0) if ys.ndim == 2 else ys, (self.z.size(1) - 1, 0)),
ws.unsqueeze(1),
groups=self.M,
)
Z = Z.squeeze(0)
Z = Z.div(float(self.z.size(1)))
# cross-correlation
Ys = th.zeros(0).sum(0)
W = th.zeros(1, 1)
if self.cross_correlation:
W = self.metapopulation_weights()
Ys = th.stack(
[
F.pad(ys.narrow(-1, i, length), (self.window - 1, 0))
for i in range(self.window)
]
)
orig_shape = Ys.shape
Ys = Ys.view(-1, Ys.size(-2), Ys.size(-1)) if Ys.ndim == 4 else Ys
Ys = (
th.bmm(W.unsqueeze(0).expand(Ys.size(0), self.M, self.M), Ys)
.view(orig_shape)
.mean(dim=0)
)
with th.no_grad():
self.train_stats = (Z.mean().item(), Ys.mean().item())
if self.features is not None:
Ys = Ys + F.softplus(self.w_feat(self.features))
Ys = beta * (Z + Ys) / self.neighbors
return Ys, beta, W
def simulate(self, tobs, ys, days, deterministic=True, return_stds=False):
preds = ys.clone()
self.eval()
assert tobs == preds.size(-1), (tobs, preds.size())
stds = []
for d in range(days):
t = th.arange(tobs + d, device=ys.device) + 1
s, _, _ = self.score(t, preds)
assert (s >= 0).all(), s.squeeze()
if deterministic:
y = self.dist(s).mean
else:
y = self.dist(s).sample()
assert (y >= 0).all(), y.squeeze()
y = y.narrow(-1, -1, 1).clamp(min=1e-8)
preds = th.cat([preds, y], dim=-1)
stds.append(self.dist(s).stddev)
preds = preds.narrow(-1, -days, days)
self.train()
if return_stds:
return preds, stds
return preds
def __repr__(self):
return f"bAR({self.window}) | {self.beta} | EX ({self.train_stats[0]:.1e}, {self.train_stats[1]:.1e})"
def train(model, new_cases, regions, optimizer, checkpoint, args):
print(args)
days_ahead = getattr(args, "days_ahead", 1)
M = len(regions)
device = new_cases.device
tmax = new_cases.size(1)
t = th.arange(tmax, device=device) + 1
size_pred = tmax - days_ahead
reg = th.tensor([0.0], device=device)
target = new_cases.narrow(1, days_ahead, size_pred)
start_time = timeit.default_timer()
for itr in range(1, args.niters + 1):
optimizer.zero_grad()
scores, beta, W = model.score(t, new_cases)
scores = scores.clamp(min=1e-8)
assert scores.dim() == 2, scores.size()
assert scores.size(1) == size_pred + 1
assert beta.size(0) == M
# compute loss
dist = model.dist(scores.narrow(1, days_ahead - 1, size_pred))
_loss = dist.log_prob(target)
loss = -_loss.sum(axis=1).mean()
stddev = model.dist(scores).stddev.mean()
# loss += stddev * args.weight_decay
# temporal smoothness
if args.temporal > 0:
reg = (
args.temporal * th.pow(beta[:, 1:] - beta[:, :-1], 2).sum(axis=1).mean()
)
# back prop
(loss + reg).backward()
# do AdamW-like update for Granger regularization
if args.granger > 0:
with th.no_grad():
mu = np.log(args.granger / (1 - args.granger))
y = args.granger
n = th.numel(model._alphas)
ex = th.exp(-model._alphas)
model._alphas.fill_diagonal_(mu)
de = 2 * (model._alphas.sigmoid().mean() - y) * ex
nu = n * (ex + 1) ** 2
_grad = de / nu
_grad.fill_diagonal_(0)
r = args.lr * args.eta * n
model._alphas.copy_(model._alphas - r * _grad)
# make sure we have no NaNs
assert loss == loss, (loss, scores, _loss)
nn.utils.clip_grad_norm_(model.parameters(), 5)
# take gradient step
optimizer.step()
# control
if itr % 500 == 0:
time = timeit.default_timer() - start_time
with th.no_grad(), np.printoptions(precision=3, suppress=True):
length = scores.size(1) - 1
maes = th.abs(dist.mean - new_cases.narrow(1, 1, length))
z = model.z
nu = th.sigmoid(model.nu)
means = model.dist(scores).mean
W_spread = (W * (1 - W)).mean()
_err = W.mean() - args.granger
print(
f"[{itr:04d}] Loss {loss.item():.2f} | "
f"Temporal {reg.item():.5f} | "
f"MAE {maes.mean():.2f} | "
f"{model} | "
f"{args.loss} ({means[:, -1].min().item():.2f}, {means[:, -1].max().item():.2f}) | "
f"z ({z.min().item():.2f}, {z.mean().item():.2f}, {z.max().item():.2f}) | "
f"W ({W.min().item():.2f}, {W.mean().item():.2f}, {W.max().item():.2f}) | "
f"W_spread {W_spread:.2f} | mu_err {_err:.3f} | "
f"nu ({nu.min().item():.2f}, {nu.mean().item():.2f}, {nu.max().item():.2f}) | "
f"nb_stddev ({stddev.data.mean().item():.2f}) | "
f"scale ({th.exp(model.scale).mean():.2f}) | "
f"time = {time:.2f}s"
)
th.save(model.state_dict(), checkpoint)
start_time = timeit.default_timer()
print(f"Train MAE,{maes.mean():.2f}")
return model
def _get_arg(args, v, device, regions):
if hasattr(args, v):
print(getattr(args, v))
fs = []
for _file in getattr(args, v):
d = th.load(_file)
_fs = th.cat([d[r].unsqueeze(0) for r in regions], dim=0)
fs.append(_fs)
return th.cat(fs, dim=1).float().to(device)
else:
return None
def _get_dict(args, v, device, regions):
if hasattr(args, v):
_feats = []
for _file in getattr(args, v):
print(f"Loading {_file}")
d = th.load(_file)
feats = None
for i, r in enumerate(regions):
if r not in d:
continue
_f = d[r]
if feats is None:
feats = th.zeros(len(regions), d[r].size(0), _f.size(1))
feats[i, :, : _f.size(1)] = _f
_feats.append(feats.to(device).float())
return th.cat(_feats, dim=2)
else:
return None
class BARCV(CV):
def initialize(self, args):
device = th.device(
"cuda" if th.cuda.is_available() and getattr(args, "cuda", True) else "cpu"
)
cases, regions, basedate = load.load_confirmed_csv(args.fdat)
assert (cases == cases).all(), th.where(cases != cases)
# Cumulative max across time
cases = np.maximum.accumulate(cases, axis=1)
new_cases = th.zeros_like(cases)
new_cases.narrow(1, 1, cases.size(1) - 1).copy_(cases[:, 1:] - cases[:, :-1])
assert (new_cases >= 0).all(), new_cases[th.where(new_cases < 0)]
new_cases = new_cases.float().to(device)[:, args.t0 :]
print("Number of Regions =", new_cases.size(0))
print("Timeseries length =", new_cases.size(1))
print(
"Increase: max all = {}, max last = {}, min last = {}".format(
new_cases.max().item(),
new_cases[:, -1].max().item(),
new_cases[:, -1].min().item(),
)
)
tmax = new_cases.size(1) + 1
# adjust max window size to available data
args.window = min(args.window, new_cases.size(1) - 4)
# setup optional features
graph = (
th.load(args.graph).to(device).float() if hasattr(args, "graph") else None
)
features = _get_arg(args, "features", device, regions)
time_features = _get_dict(args, "time_features", device, regions)
if time_features is not None:
time_features = time_features.transpose(0, 1)
time_features = time_features.narrow(0, args.t0, new_cases.size(1))
print("Feature size = {} x {} x {}".format(*time_features.size()))
print(time_features.min(), time_features.max())
self.weight_decay = 0
# setup beta function
if args.decay.startswith("latent"):
dim, layers = args.decay[6:].split("_")
fbeta = lambda M, input_dim: BetaRNN(
M,
int(layers),
int(dim),
input_dim,
dropout=getattr(args, "dropout", 0.0),
)
beta_net = BetaLatent(fbeta, regions, tmax, time_features)
self.weight_decay = args.weight_decay
elif args.decay.startswith("lstm"):
dim, layers = args.decay[len("lstm") :].split("_")
fbeta = lambda M, input_dim: BetaLSTM(
M,
int(layers),
int(dim),
input_dim,
dropout=getattr(args, "dropout", 0.0),
)
beta_net = BetaLatent(fbeta, regions, tmax, time_features)
self.weight_decay = args.weight_decay
elif args.decay.startswith("gru"):
dim, layers = args.decay[len("gru") :].split("_")
fbeta = lambda M, input_dim: BetaGRU(
M,
int(layers),
int(dim),
input_dim,
dropout=getattr(args, "dropout", 0.0),
)
beta_net = BetaLatent(fbeta, regions, tmax, time_features)
self.weight_decay = args.weight_decay
else:
raise ValueError("Unknown beta function")
self.func = BAR(
regions,
beta_net,
args.window,
args.loss,
graph,
features,
self_correlation=getattr(args, "self_correlation", True),
cross_correlation=not getattr(args, "no_cross_correlation", False),
offset=cases[:, 0].unsqueeze(1).to(device).float(),
).to(device)
return new_cases, regions, basedate, device
def run_train(self, dset, args, checkpoint):
args.fdat = dset
new_cases, regions, _, device = self.initialize(args)
params = []
exclude = {
"z",
"nu",
"_alphas",
"_alpha_weights",
"beta.fbeta.h0",
"beta.fbeta.c0",
"beta.fbeta.conv.weight",
"beta.fbeta.conv.bias",
"scale",
}
for name, p in dict(self.func.named_parameters()).items():
wd = 0 if name in exclude else args.weight_decay
if wd != 0:
print(f"Regularizing {name} = {wd}")
params.append({"params": p, "weight_decay": wd})
optimizer = optim.AdamW(params, lr=args.lr, betas=[args.momentum, 0.999])
model = train(self.func, new_cases, regions, optimizer, checkpoint, args)
return model
def run_prediction_interval(
self, means_pth: str, stds_pth: str, intervals: List[float],
):
means = pd.read_csv(means_pth, index_col="date", parse_dates=["date"])
stds = pd.read_csv(stds_pth, index_col="date", parse_dates=["date"])
means_t = means.values
stds_t = stds.values
multipliers = np.array([norm.ppf(1 - (1 - x) / 2) for x in intervals])
result = np.empty((means_t.shape[0], means_t.shape[1], len(intervals), 3))
lower = means_t[:, :, None] - multipliers.reshape(1, 1, -1) * stds_t[:, :, None]
upper = means_t[:, :, None] + multipliers.reshape(1, 1, -1) * stds_t[:, :, None]
result = np.stack(
[np.clip(lower, a_min=0, a_max=None), upper, np.ones(lower.shape)], axis=-1,
)
cols = pd.MultiIndex.from_product(
[means.columns, intervals, ["lower", "upper", "fallback"]]
)
result_df = pd.DataFrame(result.reshape(result.shape[0], -1), columns=cols)
result_df["date"] = means.index
melted = result_df.melt(
id_vars=["date"], var_name=["location", "interval", "lower/upper"]
)
pivot = melted.pivot(
index=["date", "location", "interval"],
columns="lower/upper",
values="value",
).reset_index()
return pivot.merge(
means.reset_index().melt(
id_vars=["date"], var_name="location", value_name="mean"
),
on=["date", "location"],
).merge(
stds.reset_index().melt(
id_vars=["date"], var_name="location", value_name="std"
),
on=["date", "location"],
)
CV_CLS = BARCV
@click.group()
def cli():
pass
@cli.command()
@click.argument("pth")
def simulate(pth):
chkpnt = th.load(pth)
mod = BARCV()
prefix = ""
if "final_model" in pth:
prefix = "final_model_"
cfg = yaml.safe_load(open(f"{os.path.dirname(pth)}/{prefix}bar.yml"))
args = argparse.Namespace(**cfg["train"])
new_cases, regions, basedate, device = mod.initialize(args)
mod.func.load_state_dict(chkpnt)
res = mod.func.simulate(new_cases.size(1), new_cases, args.test_on)
df = pd.DataFrame(res.cpu().data.numpy().transpose(), columns=regions)
df.index = pd.date_range(
start=pd.to_datetime(basedate) + timedelta(days=1), periods=len(df)
)
df = rebase_forecast_deltas(cfg["data"], df)
gt = pd.read_csv(cfg["data"], index_col="region").transpose()
gt.index = pd.to_datetime(gt.index)
print(metrics._compute_metrics(gt, df, nanfill=True))
def main(args):
parser = argparse.ArgumentParser("beta-AR")
parser.add_argument("-fdat", help="Path to confirmed cases", required=True)
parser.add_argument("-lr", type=float, default=5e-2)
parser.add_argument("-weight-decay", type=float, default=0)
parser.add_argument("-niters", type=int, default=2000)
parser.add_argument("-amsgrad", default=False, action="store_true")
parser.add_argument("-loss", default="lsq", choices=["nb", "poisson"])
parser.add_argument("-decay", default="exp")
parser.add_argument("-t0", default=10, type=int)
parser.add_argument("-fit-on", default=5, type=int)
parser.add_argument("-test-on", default=5, type=int)
parser.add_argument("-checkpoint", type=str, default="/tmp/bar_model.bin")
parser.add_argument("-window", type=int, default=25)
parser.add_argument("-momentum", type=float, default=0.99)
args = parser.parse_args()
mod = BARCV()
model = mod.run_train(args.fdat, args, args.checkpoint)
with th.no_grad():
forecast = mod.run_simulate(args, model)
if __name__ == "__main__":
if len(sys.argv) > 1 and sys.argv[1] in cli.commands:
cli()
else:
main(sys.argv[1:])
| covid19_spread-main | covid19_spread/bar.py |
#!/usr/bin/env python3
# Copyright (c) 2021-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import pandas as pd
import torch as th
import yaml
from pathlib import Path
import json
import os
def load_confirmed_csv(path):
df = pd.read_csv(path)
df.set_index("region", inplace=True)
basedate = df.columns[-1]
nodes = df.index.to_numpy()
cases = df.to_numpy()
return th.from_numpy(cases), nodes, basedate
def load_confirmed(path, regions):
"""Returns dataframe of total confirmed cases"""
df = load_confirmed_by_region(path, regions=regions)
return df.sum(axis=1)
def load_confirmed_by_region(path, regions=None, filter_unknown=True):
"""Loads csv file for confirmed cases by region"""
df = pd.read_csv(path, index_col=0, header=None)
# transpose so dates are along rows to match h5
df = df.T
# set date as index
df = df.rename(columns={"region": "date"})
df = df.set_index("date")
df.index = pd.to_datetime(df.index)
df = df.astype(float)
if regions is not None:
df = df[regions]
if filter_unknown:
df = df.loc[:, df.columns != "Unknown"]
return df
def load_backfill(
jobdir, model=None, indicator="model_selection.json", forecast="best_mae",
):
"""collect all forcasts from job dir"""
forecasts = {}
configs = []
for path in Path(jobdir).rglob(indicator):
date = str(path).split("/")[-2]
assert date.startswith("sweep_"), str(path)
jobs = [m["pth"] for m in json.load(open(path)) if m["name"] == forecast]
assert len(jobs) == 1, jobs
job = jobs[0]
date = date[6:]
forecasts[date] = os.path.join(job, "final_model_validation.csv")
cfg = yaml.safe_load(open(os.path.join(job, "../cfg.yml")))
cfg = yaml.safe_load(
open(os.path.join(job, f"{model or cfg['this_module']}.yml"))
)
cfg = cfg["train"]
cfg["date"] = date
cfg["job"] = job
configs.append(cfg)
configs = pd.DataFrame(configs)
configs.set_index("date", inplace=True)
return forecasts, configs
| covid19_spread-main | covid19_spread/load.py |
#!/usr/bin/env python3
# Copyright (c) 2021-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from typing import Dict, Any, List, Tuple
import pandas as pd
from datetime import timedelta
import torch as th
from tqdm import tqdm
import numpy as np
from .common import mk_absolute_paths
import yaml
from tensorboardX import SummaryWriter
from collections import namedtuple, defaultdict
from itertools import count
from . import common, metrics
import os
from glob import glob
import shutil
import json
BestRun = namedtuple("BestRun", ("pth", "name"))
def load_config(cfg_pth: str) -> Dict[str, Any]:
return mk_absolute_paths(yaml.load(open(cfg_pth), Loader=yaml.FullLoader))
class CV:
def run_simulate(
self,
dset: str,
args: Dict[str, Any],
model: Any,
days: int,
sim_params: Dict[str, Any],
) -> pd.DataFrame:
"""
Run a simulation given a trained model. This should return a pandas DataFrame with each
column corresponding to a location and each row corresponding to a date. The value
of each cell is the forecasted cases per day (*not* cumulative cases)
"""
args.fdat = dset
if model is None:
raise NotImplementedError
cases, regions, basedate, device = self.initialize(args)
tmax = cases.size(1)
test_preds = model.simulate(tmax, cases, days, **sim_params)
test_preds = test_preds.cpu().numpy()
df = pd.DataFrame(test_preds.T, columns=regions)
if basedate is not None:
base = pd.to_datetime(basedate)
ds = [base + timedelta(i) for i in range(1, days + 1)]
df["date"] = ds
df.set_index("date", inplace=True)
return df
def run_standard_deviation(
self,
dset,
args,
nsamples,
intervals,
orig_cases,
model=None,
batch_size=1,
closed_form=False,
):
with th.no_grad():
args.fdat = dset
if model is None:
raise NotImplementedError
cases, regions, basedate, device = self.initialize(args)
tmax = cases.size(1)
base = pd.to_datetime(basedate)
def mk_df(arr):
df = pd.DataFrame(arr, columns=regions)
df.index = pd.date_range(base + timedelta(days=1), periods=args.test_on)
return df
if closed_form:
preds, stds = model.simulate(
tmax, cases, args.test_on, deterministic=True, return_stds=True
)
stds = th.cat([x.narrow(-1, -1, 1) for x in stds], dim=-1)
return mk_df(stds.cpu().numpy().T), mk_df(preds.cpu().numpy().T)
samples = []
if batch_size > 1:
cases = cases.repeat(batch_size, 1, 1)
nsamples = nsamples // batch_size
for i in tqdm(range(nsamples)):
test_preds = model.simulate(tmax, cases, args.test_on, False)
test_preds = test_preds.cpu().numpy()
samples.append(test_preds)
samples = (
np.stack(samples, axis=0)
if batch_size <= 1
else np.concatenate(samples, axis=0)
)
return mk_df(np.std(samples, axis=0).T), mk_df(np.mean(samples, axis=0).T)
def run_train(self, dset, model_params, model_out):
"""
Train a model
"""
...
def preprocess(self, dset: str, preprocessed: str, preprocess_args: Dict[str, Any]):
"""
Perform any kind of model specific pre-processing.
"""
if "smooth" in preprocess_args:
common.smooth(dset, preprocessed, preprocess_args["smooth"])
else:
shutil.copy(dset, preprocessed)
def metric_df(self, basedir):
runs = []
for metrics_pth in glob(os.path.join(basedir, "*/metrics.csv")):
metrics = pd.read_csv(metrics_pth, index_col="Measure")
runs.append(
{
"pth": os.path.dirname(metrics_pth),
"mae": metrics.loc["MAE"][-1],
"rmse": metrics.loc["RMSE"][-1],
"mae_deltas": metrics.loc["MAE_DELTAS"].mean(),
"rmse_deltas": metrics.loc["RMSE_DELTAS"].mean(),
"state_mae": metrics.loc["STATE_MAE"][-1],
}
)
return pd.DataFrame(runs)
def model_selection(self, basedir: str, config, module) -> List[BestRun]:
"""
Evaluate a sweep returning a list of models to retrain on the full dataset.
"""
df = self.metric_df(basedir)
if "ablation" in config["train"]:
ablation_map = defaultdict(count().__next__)
ablations = []
for _, row in df.iterrows():
job_cfg = load_config(os.path.join(row.pth, f"{module}.yml"))
if (
job_cfg["train"]["ablation"] is not None
and len(job_cfg["train"]["ablation"]) > 0
):
ablation = ",".join(
os.path.basename(x) for x in job_cfg["train"]["ablation"]
)
else:
ablation = "null"
ablations.append(ablation)
ablation_map[ablation]
ablation_map = {k: f"ablation_{v}" for k, v in ablation_map.items()}
rev_map = {v: k for k, v in ablation_map.items()}
df["ablation"] = [ablation_map[x] for x in ablations]
with open(os.path.join(basedir, "ablation_map.json"), "w") as fout:
print(json.dumps(rev_map), file=fout)
best_runs = []
for key in ["mae", "rmse", "mae_deltas", "rmse_deltas"]:
best = df.loc[df.groupby("ablation")[key].idxmin()]
best_runs.extend(
[
BestRun(x.pth, f"best_{key}_{x.ablation}")
for _, x in best.iterrows()
]
)
return best_runs
return [
BestRun(df.sort_values(by="mae").iloc[0].pth, "best_mae"),
BestRun(df.sort_values(by="rmse").iloc[0].pth, "best_rmse"),
BestRun(df.sort_values(by="mae_deltas").iloc[0].pth, "best_mae_deltas"),
BestRun(df.sort_values(by="rmse_deltas").iloc[0].pth, "best_rmse_deltas"),
BestRun(df.sort_values(by="state_mae").iloc[0].pth, "best_state_mae"),
]
def compute_metrics(
self, gt: str, forecast: str, model: Any, metric_args: Dict[str, Any]
) -> Tuple[pd.DataFrame, Dict[str, Any]]:
return metrics.compute_metrics(gt, forecast).round(2), {}
def setup_tensorboard(self, basedir):
"""
Setup dir and writer for tensorboard logging
"""
self.tb_writer = SummaryWriter(logdir=basedir)
def run_prediction_interval(
self, means_pth: str, stds_pth: str, intervals: List[float]
):
...
| covid19_spread-main | covid19_spread/cross_val.py |
#!/usr/bin/env python3
# Copyright (c) 2021-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import click
class DefaultGroup(click.Group):
ignore_unknown_options = True
def __init__(self, *args, **kwargs):
default_command = kwargs.pop("default_command", None)
super(DefaultGroup, self).__init__(*args, **kwargs)
self.default_cmd_name = None
if default_command is not None:
self.set_default_command(default_command)
def set_default_command(self, command):
if isinstance(command, str):
cmd_name = command
else:
cmd_name = command.name
self.add_command(command)
self.default_cmd_name = cmd_name
def parse_args(self, ctx, args):
if not args and self.default_cmd_name is not None:
args.insert(0, self.default_cmd_name)
return super(DefaultGroup, self).parse_args(ctx, args)
def get_command(self, ctx, cmd_name):
if cmd_name not in self.commands and self.default_cmd_name is not None:
ctx.args0 = cmd_name
cmd_name = self.default_cmd_name
return super(DefaultGroup, self).get_command(ctx, cmd_name)
def resolve_command(self, ctx, args):
cmd_name, cmd, args = super(DefaultGroup, self).resolve_command(ctx, args)
args0 = getattr(ctx, "args0", None)
if args0 is not None:
args.insert(0, args0)
return cmd_name, cmd, args
class OptionNArgs(click.Option):
def __init__(self, *args, **kwargs):
self.save_other_options = kwargs.pop("save_other_options", True)
nargs = kwargs.pop("nargs", -1)
assert nargs == -1, "nargs, if set, must be -1 not {}".format(nargs)
super(OptionNArgs, self).__init__(*args, **kwargs)
self._previous_parser_process = None
self._eat_all_parser = None
def add_to_parser(self, parser, ctx):
def parser_process(value, state):
# method to hook to the parser.process
done = False
value = [value]
if self.save_other_options:
# grab everything up to the next option
while state.rargs and not done:
for prefix in self._eat_all_parser.prefixes:
if state.rargs[0].startswith(prefix):
done = True
if not done:
value.append(state.rargs.pop(0))
else:
# grab everything remaining
value += state.rargs
state.rargs[:] = []
value = tuple(value)
# call the actual process
self._previous_parser_process(value, state)
retval = super(OptionNArgs, self).add_to_parser(parser, ctx)
for name in self.opts:
our_parser = parser._long_opt.get(name) or parser._short_opt.get(name)
if our_parser:
self._eat_all_parser = our_parser
self._previous_parser_process = our_parser.process
our_parser.process = parser_process
break
return retval
| covid19_spread-main | covid19_spread/lib/click_lib.py |
#!/usr/bin/env python3
# Copyright (c) 2021-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import submitit
from submitit.slurm.slurm import SlurmExecutor, SlurmJob
from submitit.core import core, utils
import uuid
import typing as tp
import time
import sys
import os
import sqlite3
import enum
import random
from contextlib import (
contextmanager,
redirect_stderr,
redirect_stdout,
AbstractContextManager,
)
import traceback
import itertools
import timeit
from covid19_spread.lib.context_managers import env_var
class TransactionManager(AbstractContextManager):
"""
Class for managing exclusive database transactions. This locks the entire
database to ensure atomicity. This allows nesting transactions, where
the inner transaction is idempotent.
"""
def __init__(self, db_pth: str, nretries: int = 20):
self.retries = nretries
self.db_pth = db_pth
self.conn = None
self.cursor = None
self.nesting = 0
self.start_time = None
def __getstate__(self):
state = self.__dict__.copy()
state["nesting"] = 0
state["conn"] = None
state["cursor"] = None
return state
def __setstate__(self, state):
self.__dict__.update(state)
def run(self, txn, ntries: int = 100):
exn = None
for _ in range(ntries):
try:
with self as conn:
conn.execute("BEGIN EXCLUSIVE")
return txn(conn)
except Exception as e:
traceback.print_exc(file=sys.stdout)
sleep_time = random.randint(0, 10)
print(f"Transaction failed! Sleeping for {sleep_time} seconds")
time.sleep(sleep_time)
exn = e
print("Failed too many times!!!!")
raise exn
def __enter__(self):
print(f"Entering transaction, nesting = {self.nesting}")
self.nesting += 1
if self.conn is None:
self.conn = sqlite3.connect(self.db_pth)
self.cursor = self.conn.cursor()
self.start_time = timeit.default_timer()
return self.cursor
def __exit__(self, exc_type, exc_val, tb):
self.nesting -= 1
print(f"Exiting transaction, nesting = {self.nesting}")
if exc_type is not None:
traceback.print_exc(file=sys.stdout)
if self.nesting == 0:
if exc_type is None:
print("committing transaction")
self.conn.commit()
else:
print("Rolling back transaction")
self.conn.rollback()
self.cursor.close()
self.conn.close()
self.cursor = None
self.conn = None
print(f"Finished transaction in {timeit.default_timer() - self.start_time}")
self.start_time = None
class JobStatus(enum.IntEnum):
pending = 0
success = 1
failure = 2
final = 3 # pending if all other jobs are finished
def __conform__(self, protocol):
if protocol is sqlite3.PrepareProtocol:
return self.value
class Worker:
def __init__(self, db_pth: str, worker_id: int):
self.db_pth = db_pth
self.worker_id = worker_id
self.sleep = 0
self.worker_finished = False
self.current_job = None
def fetch_ready_job(self, cur):
# Select a pending job that doesn't have any unfinished dependencies
query = f"""
SELECT
jobs.pickle,
jobs.job_id,
jobs.retry_count,
MIN(COALESCE(j2.status, {JobStatus.success})) as min_status,
MAX(COALESCE(j2.status, {JobStatus.failure})) AS max_status
FROM jobs
LEFT JOIN dependencies USING(pickle)
LEFT JOIN jobs j2 ON dependencies.depends_on=j2.pickle
WHERE
jobs.status={JobStatus.pending} AND
jobs.id='{self.db_pth}' AND
(dependencies.id='{self.db_pth}' OR dependencies.id IS NULL) AND
(j2.id='{self.db_pth}' OR j2.id IS NULL)
GROUP BY jobs.pickle, jobs.job_id
HAVING MIN(COALESCE(j2.status, {JobStatus.success})) >= {JobStatus.success}
AND MAX(COALESCE(j2.status, {JobStatus.success})) <= {JobStatus.success}
LIMIT 1
"""
cur.execute(query)
return cur.fetchall()
def finished(self, cur):
cur.execute(
f"""
SELECT COUNT(1) FROM jobs
WHERE status NOT IN ({JobStatus.success}, {JobStatus.failure}) AND id='{self.db_pth}'
"""
)
return cur.fetchone()[0] == 0
def count_running(self, cur):
cur.execute(
f"SELECT COUNT(1) FROM jobs WHERE status > {len(JobStatus)} AND id='{self.db_pth}'"
)
return cur.fetchone()[0]
def get_final_jobs(self, cur):
cur.execute(
f"SELECT pickle, job_id, retry_count FROM jobs WHERE status={JobStatus.final} AND id='{self.db_pth}' LIMIT 1"
)
return cur.fetchall()
def checkpoint(self):
print(f"Worker {self.worker_id} checkpointing")
if self.current_job is not None:
pickle, job_id, retry_count = self.current_job
print(f"Worker {self.worker_id} setting {pickle} back to pending...")
transaction_manager = TransactionManager(self.db_pth)
# Set the job back to pending
transaction_manager.run(
lambda conn: conn.execute(
f"UPDATE jobs SET status={JobStatus.pending} WHERE pickle='{pickle}' AND id='{self.db_pth}'"
)
)
return submitit.helpers.DelayedSubmission(Worker(self.db_pth, self.worker_id))
def __call__(self):
self.worker_finished = False
worker_job_id = f"worker_{self.worker_id}"
running_status = (
len(JobStatus) + self.worker_id + 1
) # mark in progress with this code
transaction_manager = TransactionManager(self.db_pth)
while not self.worker_finished:
if self.sleep > 0:
print(f"Sleeping for {self.sleep} seconds...")
time.sleep(self.sleep)
print(f"Worker {self.worker_id} getting job to run")
def txn(conn):
ready = self.fetch_ready_job(conn)
status = JobStatus.pending
if len(ready) == 0: # no jobs ready
if self.finished(conn):
self.worker_finished = True
return None # all jobs are finished, exiting...
if self.count_running(conn) > 0:
self.sleep = min(max(self.sleep * 2, 1), 30)
return None
ready = self.get_final_jobs(conn)
status = JobStatus.final
if len(ready) == 0:
self.sleep = min(max(self.sleep * 2, 1), 30)
return None
print(
f"Worker {self.worker_id} is executing final_job: {ready[0][0]}"
)
pickle, job_id, retry_count = ready[0][0], ready[0][1], ready[0][2]
# Mark that we're working on this job.
conn.execute(
f"""
UPDATE jobs SET status={running_status}, worker_id='{worker_job_id}'
WHERE pickle='{pickle}' AND status={status} AND id='{self.db_pth}'
"""
)
return pickle, job_id, retry_count
res = transaction_manager.run(txn)
if res is None:
continue
self.current_job = res
self.sleep = 0
pickle, job_id, retry_count = res
print(f"Worker {self.worker_id} got job to run: {pickle}")
# Run the job
job_dir = os.path.dirname(pickle)
paths = utils.JobPaths(job_dir, job_id=job_id)
with paths.stderr.open("w", buffering=1) as stderr, paths.stdout.open(
"w", buffering=1
) as stdout:
with redirect_stderr(stderr), redirect_stdout(stdout):
try:
with env_var({"SLURM_PICKLE_PTH": str(pickle)}):
dl = utils.DelayedSubmission.load(pickle)
dl.result()
status = JobStatus.success
except Exception:
retry_count -= 1
print(f"Job failed, retry_count = {retry_count}")
status = (
JobStatus.failure if retry_count == 0 else JobStatus.pending
)
traceback.print_exc(file=sys.stderr)
print(f"Worker {self.worker_id} finished job with status {status}")
transaction_manager.run(
lambda conn: conn.execute(
f"UPDATE jobs SET status={status.value}, retry_count={retry_count} WHERE pickle='{pickle}' AND id='{self.db_pth}'"
)
)
self.current_job = None
print(f"Worker {self.worker_id} updated job status")
class SlurmPoolExecutor(SlurmExecutor):
def __init__(self, *args, **kwargs):
db_pth = kwargs.pop("db_pth", None)
super().__init__(*args, **kwargs)
self.launched = False
self.nested = False
os.makedirs(self.folder, exist_ok=True)
if db_pth is None:
# Place the actual database in ~/.slurm_pool/<unique_id>.db
unique_filename = str(uuid.uuid4())
self.db_pth = os.path.expanduser(f"~/.slurm_pool/{unique_filename}.db")
os.makedirs(os.path.dirname(self.db_pth), exist_ok=True)
if not os.path.exists(os.path.join(str(self.folder), ".job.db")):
os.symlink(self.db_pth, os.path.join(str(self.folder), ".job.db"))
else:
self.db_pth = db_pth
print(self.db_pth)
self.transaction_manager = TransactionManager(self.db_pth)
with self.transaction_manager as conn:
conn.execute(
"CREATE TABLE IF NOT EXISTS jobs(status int, pickle text, job_id text, worker_id text, id TEXT, retry_count INT)"
)
conn.execute("CREATE INDEX IF NOT EXISTS jobs_p_idx ON jobs(pickle)")
conn.execute("CREATE INDEX IF NOT EXISTS jobs_id_idx ON jobs(id)")
conn.execute(
"CREATE TABLE IF NOT EXISTS dependencies(pickle text, depends_on text, id TEXT)"
)
conn.execute("CREATE INDEX IF NOT EXISTS dep_p_idx ON dependencies(pickle)")
conn.execute(
"CREATE INDEX IF NOT EXISTS dep_d_idx ON dependencies(depends_on)"
)
conn.execute("CREATE INDEX IF NOT EXISTS dep_id_idx ON dependencies(id)")
def _submit_command(self, command):
tmp_uuid = uuid.uuid4().hex
tasks_ids = list(range(self._num_tasks()))
job = self.job_class(folder=self.folder, job_id=tmp_uuid, tasks=tasks_ids)
return job
def _internal_process_submissions(
self, delayed_submissions: tp.List[utils.DelayedSubmission]
) -> tp.List[core.Job[tp.Any]]:
if len(delayed_submissions) == 1:
jobs = super()._internal_process_submissions(delayed_submissions)
vals = (
JobStatus.pending,
str(jobs[0].paths.submitted_pickle),
jobs[0].job_id,
self.db_pth,
3,
)
with self.transaction_manager as conn:
conn.execute(
"INSERT INTO jobs(status, pickle, job_id, id, retry_count) VALUES(?, ?, ?, ?, ?)",
vals,
)
return jobs
# array
folder = utils.JobPaths.get_first_id_independent_folder(self.folder)
folder.mkdir(parents=True, exist_ok=True)
pickle_paths = []
for d in delayed_submissions:
pickle_path = folder / f"{uuid.uuid4().hex}.pkl"
d.timeout_countdown = self.max_num_timeout
d.dump(pickle_path)
pickle_paths.append(pickle_path)
n = len(delayed_submissions)
self._throttle()
tasks_ids = list(range(len(pickle_paths)))
jobs: tp.List[core.Job[tp.Any]] = [
SlurmJob(folder=self.folder, job_id=f"job_{a}", tasks=tasks_ids)
for a in range(n)
]
with self.transaction_manager as conn:
for job, pickle_path in zip(jobs, pickle_paths):
job.paths.move_temporary_file(pickle_path, "submitted_pickle")
vals = (
JobStatus.pending,
str(job.paths.submitted_pickle),
job.job_id,
self.db_pth,
3,
)
conn.execute(
"INSERT INTO jobs(status, pickle, job_id, id, retry_count) VALUES(?, ?, ?, ?, ?)",
vals,
)
return jobs
def submit(
self, fn: tp.Callable[..., core.R], *args: tp.Any, **kwargs: tp.Any
) -> core.Job[core.R]:
return self.transaction_manager.run(
lambda conn: super(SlurmPoolExecutor, self).submit(fn, *args, **kwargs)
)
def map_array(
self, fn: tp.Callable[..., core.R], *iterable: tp.Iterable[tp.Any]
) -> tp.List[core.Job[core.R]]:
return self.transaction_manager.run(
lambda conn: super(SlurmPoolExecutor, self).map_array(fn, *iterable)
)
def submit_dependent(
self,
depends_on: tp.List[core.Job],
fn: tp.Callable[..., core.R],
*args: tp.Any,
**kwargs: tp.Any,
) -> core.Job[core.R]:
ds = utils.DelayedSubmission(fn, *args, **kwargs)
def txn(conn):
job = self._internal_process_submissions([ds])[0]
for dep in depends_on:
vals = (
str(job.paths.submitted_pickle),
str(dep.paths.submitted_pickle),
self.db_pth,
)
conn.execute(
"INSERT INTO dependencies(pickle, depends_on, id) VALUES (?,?,?)",
vals,
)
return job
return self.transaction_manager.run(txn)
def launch(self, folder=None, workers: int = 2):
if not self.nested:
with self.transaction_manager as conn:
vals = (self.db_pth,)
conn.execute("SELECT COUNT(1) FROM jobs WHERE id=?", vals)
(njobs,) = conn.fetchone()
workers = njobs if workers == -1 else workers
ex = SlurmExecutor(folder or self.folder)
ex.update_parameters(**self.parameters)
self.launched = True
jobs = []
with ex.batch():
for i in range(workers):
jobs.append(ex.submit(Worker(self.db_pth, i)))
return jobs
def extend_dependencies(self, jobs: tp.List[core.Job]):
def txn(conn):
conn.execute(
"""
SELECT pickle
FROM dependencies
WHERE depends_on=? AND id=?
""",
(os.environ["SLURM_PICKLE_PTH"], self.db_pth),
)
my_deps = conn.fetchall()
for (pickle,), depends_on in itertools.product(my_deps, jobs):
vals = (
str(pickle),
str(depends_on.paths.submitted_pickle),
self.db_pth,
)
conn.execute(
"INSERT INTO dependencies (pickle, depends_on, id) VALUES(?,?,?)",
vals,
)
self.transaction_manager.run(txn)
@contextmanager
def nest(self):
self.nested = True
yield
self.nested = False
@contextmanager
def set_folder(self, folder):
old_folder = self.folder
self.folder = folder
yield
self.folder = old_folder
| covid19_spread-main | covid19_spread/lib/slurm_pool_executor.py |
#!/usr/bin/env python3
# Copyright (c) 2021-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import os
import copy
import sys
import typing as tp
@contextlib.contextmanager
def env_var(key_vals: tp.Dict[str, tp.Union[str, None]]):
"""
Context manager for manipulating environment variables. Environment is restored
upon exiting the context manager
Params:
key_vals - mapping of environment variables to their values. Of a value is
`None`, then it is deleted from the environment.
"""
old_dict = {k: os.environ.get(k, None) for k in key_vals.keys()}
for k, v in key_vals.items():
if v is None:
if k in os.environ:
del os.environ[k]
else:
os.environ[k] = v
yield
for k, v in old_dict.items():
if v:
os.environ[k] = v
elif k in os.environ:
del os.environ[k]
@contextlib.contextmanager
def chdir(d):
old_dir = os.getcwd()
os.chdir(d)
yield
os.chdir(old_dir)
@contextlib.contextmanager
def sys_path(x):
old_path = copy.deepcopy(sys.path)
sys.path.insert(0, x)
yield
sys.path = old_path
| covid19_spread-main | covid19_spread/lib/context_managers.py |
#!/usr/bin/env python3
# Copyright (c) 2021-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import getpass
USER = getpass.getuser()
if os.path.exists(f"/checkpoint"):
FS = "/checkpoint"
PARTITION = "learnfair"
MEM_GB = lambda x: x
elif os.path.exists(f"/fsx"):
FS = "/fsx"
PARTITION = "compute"
MEM_GB = lambda x: 0
else:
FS = os.getcwd() # for CI
| covid19_spread-main | covid19_spread/lib/cluster.py |
#!/usr/bin/env python3
# Copyright (c) 2021-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from slack import WebClient
import os
import json
import warnings
def post_slack_message(channel, text):
cred_path = os.path.expanduser("~/.credentials.json")
if not os.path.exists(cred_path):
msg = "Could not find ~/.credentials.json with Slack credentials, not posting message..."
warnings.warn(msg, UserWarning)
return
credentials = json.load(open(cred_path))
if "slack" not in credentials or "bot_token" not in credentials["slack"]:
warnings.warn(
"Could not find Slack credentials in ~/.credentials.json", UserWarning
)
return
client = WebClient(token=credentials["slack"]["bot_token"])
client.chat_postMessage(channel=channel, text=text)
| covid19_spread-main | covid19_spread/lib/slack.py |
#!/usr/bin/env python3
# Copyright (c) 2021-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "../"))
import cv
import tempfile
from subprocess import check_call, check_output
import sqlite3
import click
import datetime
from covid19_spread.lib.context_managers import chdir
script_dir = os.path.dirname(os.path.realpath(__file__))
DB = os.path.join(script_dir, ".sweep.db")
def mk_db():
if not os.path.exists(DB):
conn = sqlite3.connect(DB)
conn.execute(
"""
CREATE TABLE sweeps(
path text primary key,
basedate text NOT NULL,
launch_time real NOT NULL,
module text NOT NULL,
slurm_job text,
id text
);
"""
)
conn.execute(
"""
CREATE TABLE submitted(
sweep_path text UNIQUE,
submitted_at real NOT NULL,
FOREIGN KEY(sweep_path) REFERENCES sweeps(path)
);
"""
)
class Recurring:
script_dir = script_dir
def __init__(self, force=False):
self.force = force
mk_db()
def get_id(self) -> str:
"""Return a unique ID to be used in the database"""
raise NotImplementedError
def update_data(self) -> None:
"""Fetch new data (should be idempotent)"""
raise NotImplementedError
def command(self) -> str:
"""The command to run in cron"""
raise NotImplementedError
def latest_date(self) -> datetime.date:
""""Return the latest date that we have data for"""
raise NotImplementedError
def module(self):
"""CV module to run"""
return "mhp"
def schedule(self) -> str:
"""Cron schedule"""
return "*/5 * * * *" # run every 5 minutes
def install(self) -> None:
"""Method to install cron job"""
crontab = check_output(["crontab", "-l"]).decode("utf-8")
marker = f"__JOB_{self.get_id()}__"
if marker in crontab:
raise ValueError(
"Cron job already installed, cleanup crontab"
" with `crontab -e` before installing again"
)
envs = (
check_output(["conda", "env", "list"]).decode("utf-8").strip().split("\n")
)
active = [e for e in envs if "*" in e]
conda_env = None
if len(active) == 1:
conda_env = f"source activate {active[0].split()[0]}"
with tempfile.NamedTemporaryFile() as tfile:
with open(tfile.name, "w") as fout:
print(crontab, file=fout)
print(f"# {marker}", file=fout)
user = os.environ["USER"]
script = os.path.realpath(__file__)
schedule = self.schedule()
stdoutfile = os.path.join(self.script_dir, f".{self.get_id()}.log")
stderrfile = os.path.join(self.script_dir, f".{self.get_id()}.err")
home = os.path.expanduser("~")
cmd = [
"source /etc/profile.d/modules.sh",
f"source {home}/.profile",
f"source {home}/.bash_profile",
f"source {home}/.bashrc",
conda_env,
"slack-on-fail " + self.command(),
]
cmd = [c for c in cmd if c is not None]
subject = f"ERROR in recurring sweep: {self.get_id()}"
envs = [
f'PATH="/usr/local/bin:/private/home/{user}/bin:/usr/sbin:$PATH"',
"__PROD__=1",
f"USER={user}",
]
print(
f'{schedule} {" ".join(envs)} bash -c "{" && ".join(cmd)} >> {stdoutfile} 2>> {stderrfile}"',
file=fout,
)
check_call(["crontab", tfile.name])
def refresh(self) -> None:
"""Check for new data, schedule a job if new data is found"""
self.update_data()
latest_date = self.latest_date()
conn = sqlite3.connect(DB)
res = conn.execute(
"SELECT path, launch_time FROM sweeps WHERE basedate=? AND id=?",
(str(latest_date), self.get_id()),
)
if not self.force:
for pth, launch_time in res:
launch_time = datetime.datetime.fromtimestamp(launch_time)
if os.path.exists(pth):
print(f"Already launched {pth} at {launch_time}, exiting...")
return
# This directory got deleted, remove it from the database...
conn.execute(
"DELETE FROM sweeps WHERE path=? AND id=?", (pth, self.get_id())
)
conn.commit()
sweep_path = self.launch_job()
vals = (
sweep_path,
str(latest_date),
datetime.datetime.now().timestamp(),
self.module(),
self.get_id(),
)
conn.execute(
"INSERT INTO sweeps(path, basedate, launch_time, module, id) VALUES (?,?,?,?,?)",
vals,
)
conn.commit()
def launch_job(self, **kwargs):
"""Launch the sweep job"""
# Launch the sweep
config = os.path.join(script_dir, f"../../cv/{kwargs.get('cv_config')}.yml")
with chdir(f"{script_dir}/../../"):
sweep_path, jobs = click.Context(cv.cv).invoke(
cv.cv,
config_pth=config,
module=kwargs.get("module", "bar"),
remote=True,
array_parallelism=kwargs.get("array_parallelism", 20),
)
return sweep_path
| covid19_spread-main | covid19_spread/data/recurring.py |
#!/usr/bin/env python3
# Copyright (c) 2021-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
from covid19_spread.common import update_repo
import pandas
import re
import datetime
def get_index():
index = pandas.read_csv(
"https://storage.googleapis.com/covid19-open-data/v2/index.csv"
)
index = index[index["key"].str.match(r"^US_[A-Z]+_\d{5}$").fillna(False)]
index["fips"] = index["subregion2_code"].astype(str).str.zfill(5)
index["name"] = index["subregion2_name"]
return index
def get_nyt(metric="cases"):
print("NYT")
data_repo = update_repo("https://github.com/nytimes/covid-19-data.git")
df = pandas.read_csv(
os.path.join(data_repo, "us-counties.csv"), dtype={"fips": str}
)
index = get_index()
df = df.merge(index[["fips", "subregion1_name", "name"]], on="fips")
df["loc"] = df["subregion1_name"] + "_" + df["name"]
pivot = df.pivot_table(values=metric, columns=["loc"], index="date")
pivot = pivot.fillna(0)
pivot.index = pandas.to_datetime(pivot.index)
if metric == "deaths":
return pivot
# Swap out NYTimes NY state data with the NY DOH data.
NYSTATE_URL = (
"https://health.data.ny.gov/api/views/xdss-u53e/rows.csv?accessType=DOWNLOAD"
)
df = pandas.read_csv(NYSTATE_URL).rename(
columns={"Test Date": "date", "Cumulative Number of Positives": "cases"}
)
df["loc"] = "New York_" + df["County"]
df = df.pivot_table(values=metric, columns=["loc"], index="date")
df.columns = [x + " County" for x in df.columns]
# The NYT labels each date as the date the report comes out, not the date the data corresponds to.
# Add 1 day to the NYS DOH data to get it to align
df.index = pandas.to_datetime(df.index) + datetime.timedelta(days=1)
without_nystate = pivot[[c for c in pivot.columns if not c.startswith("New York")]]
last_date = min(without_nystate.index.max(), df.index.max())
df = df[df.index <= last_date]
without_nystate = without_nystate[without_nystate.index <= last_date]
assert (
df.index.max() == without_nystate.index.max()
), "NYT and DOH data don't matchup yet!"
# Only take NYT data up to the date for which we have nystate data
without_nystate[without_nystate.index <= df.index.max()]
return without_nystate.merge(
df, left_index=True, right_index=True, how="outer"
).fillna(0)
def get_google(metric="cases"):
index = get_index()
df = pandas.read_csv(
"https://storage.googleapis.com/covid19-open-data/v2/epidemiology.csv",
parse_dates=["date"],
)
merged = df.merge(index, on="key")
merged = merged[~merged["subregion2_name"].isnull()]
merged["loc"] = merged["subregion1_name"] + "_" + merged["name"]
value_col = "total_confirmed" if metric == "cases" else "total_deceased"
pivot = merged.pivot(values=value_col, index="date", columns="loc")
if pivot.iloc[-1].isnull().any():
pivot = pivot.iloc[:-1]
pivot.iloc[0] = pivot.iloc[0].fillna(0)
pivot = pivot.fillna(method="ffill")
return pivot
def get_jhu(metric="cases"):
urls = {
"cases": "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_US.csv",
"deaths": "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_US.csv",
}
df = pandas.read_csv(urls[metric])
df = df[~df["FIPS"].isnull()]
df["FIPS"] = df["FIPS"].apply(lambda x: str(int(x)).zfill(5))
index = get_index()
index["loc"] = index["subregion1_name"] + "_" + index["name"]
merged = df.merge(index[["fips", "loc"]], left_on="FIPS", right_on="fips")
date_cols = [c for c in merged.columns if re.match("\d+/\d+/\d+", c)]
transposed = merged[date_cols + ["loc"]].set_index("loc").transpose()
transposed.index = pandas.to_datetime(transposed.index)
return transposed.sort_index()
SOURCES = {
"nyt": get_nyt,
"google": get_google,
"jhu": get_jhu,
}
| covid19_spread-main | covid19_spread/data/usa/process_cases.py |
#!/usr/bin/env python3
# Copyright (c) 2021-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import numpy as np
import pandas as pd
import torch as th
from os import listdir
from os.path import isfile, join
from covid19_spread.data.usa.process_cases import SOURCES
import warnings
from covid19_spread.common import standardize_county_name
import os
import multiprocessing as mp
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
nyc_boroughs = [
"Bronx, New York",
"Kings, New York",
"Queens, New York",
"New York, New York",
"Richmond, New York",
]
def county_id(county, state):
return f"{county}, {state}"
def rename_nyc_boroughs(county_name):
if county_name in nyc_boroughs:
return "New York City, New York"
else:
return county_name
def merge_nyc_boroughs(df, ntypes):
df["region"] = df["region"].transform(rename_nyc_boroughs)
prev_len = len(df)
df = df.groupby(["region", "type"]).mean()
assert len(df) == prev_len - ntypes * 4, (prev_len, len(df))
df = df.reset_index()
print(df[df["region"] == "New York City, New York"])
return df
def process_time_features(df, pth, shift=0, merge_nyc=False, input_resolution="county"):
print(f"Processing {pth} at resolution: {input_resolution}")
time_features = pd.read_csv(pth)
if input_resolution == "county_state":
# Expand state level time features to each county in `df`
idx = df.rename_axis("county").reset_index()[["county"]]
idx["region"] = idx["county"].apply(lambda x: x.split(", ")[-1])
time_features = time_features.merge(idx, on="region").drop(columns="region")
time_features = time_features.rename(columns={"county": "region"})
time_feature_regions = time_features["region"].unique()
ncommon = len(df.index.intersection(time_feature_regions))
if ncommon != len(df):
missing = set(df.index).difference(set(time_feature_regions))
warnings.warn(
f"{pth}: Missing time features for the following regions: {list(missing)}"
)
if ncommon != len(time_feature_regions):
ignoring = set(time_feature_regions).difference(set(df.index))
warnings.warn(
f"{pth}: Ignoring time features for the following regions: {list(ignoring)}"
)
time_features = time_features[time_features["region"].isin(set(df.index))]
if merge_nyc:
time_features = merge_nyc_boroughs(
time_features, len(time_features["type"].unique())
)
# Transpose to have two level columns (region, type) and dates as index
time_features = time_features.set_index(["region", "type"]).transpose().sort_index()
time_features.index = pd.to_datetime(time_features.index)
# Trim prefix if it starts before the dates in `df`
time_features = time_features.loc[time_features.index >= df.columns.min()]
# Fill in dates that are missing in `time_features` that exist in `df`
time_features = time_features.reindex(df.columns)
# Shift time features UP by `shift` days
time_features = time_features.shift(shift)
# forward fill the missing values
time_features = time_features.fillna(method="ffill")
# Fill the beginning end with zeros if null
time_features = time_features.fillna(0)
time_features = time_features[time_features.columns.sort_values()]
feature_tensors = {
region: th.from_numpy(time_features[region].values)
for region in time_features.columns.get_level_values(0).unique()
}
if input_resolution == "county_state":
pth = pth.replace("state", "county_state")
th.save(feature_tensors, pth.replace(".csv", ".pt"))
def run_par(fs, args, kwargs, max_par=None):
if not isinstance(fs, list):
fs = [fs] * len(args)
if "MAX_PARALLELISM" in os.environ:
max_par = int(os.environ["MAX_PARALLELISM"])
print(f"Max parallelism = {max_par}")
if max_par is not None and max_par <= 1:
for _f, _args, _kwargs in zip(fs, args, kwargs):
_f(*_args, **_kwargs)
return
with mp.Pool(max_par) as pool:
results = [
pool.apply_async(f, args=a, kwds=k) for f, a, k in zip(fs, args, kwargs)
]
[r.get() for r in results]
def create_time_features():
from .symptom_survey import prepare as ss_prepare
from .fb import prepare as fb_prepare
from .google import prepare as google_prepare
from .testing import prepare as testing_prepare
fs = [ss_prepare, fb_prepare, google_prepare, testing_prepare]
run_par(fs, [()] * len(fs), [{}] * len(fs))
def main(metric, with_features, source, resolution):
df = SOURCES[source](metric)
df.index = pd.to_datetime(df.index)
dates = df.index
df.columns = [c.split("_")[1] + ", " + c.split("_")[0] for c in df.columns]
# drop all zero columns
df = df[df.columns[(df.sum(axis=0) != 0).values]]
df = df.transpose() # row for each county, columns correspond to dates...
# make sure counts are strictly increasing
df = df.cummax(axis=1)
# throw away all-zero columns, i.e., days with no cases
counts = df.sum(axis=0)
df = df.iloc[:, np.where(counts > 0)[0]]
if resolution == "state":
df = df.groupby(lambda x: x.split(", ")[-1]).sum()
df = df.drop(
index=["Virgin Islands", "Northern Mariana Islands", "Puerto Rico", "Guam"],
errors="ignore",
)
county_id = {c: i for i, c in enumerate(df.index)}
df.to_csv(f"{SCRIPT_DIR}/data_{metric}.csv", index_label="region")
df[df.index.str.endswith("New York")].to_csv(
f"{SCRIPT_DIR}/data_{metric}_ny.csv", index_label="region"
)
df[df.index.str.endswith("Florida")].to_csv(
f"{SCRIPT_DIR}/data_{metric}_fl.csv", index_label="region"
)
if resolution == "county":
# Build state graph...
adj = np.zeros((len(df), len(df)))
for _, g in df.groupby(lambda x: x.split(", ")[-1]):
idxs = np.array([county_id[c] for c in g.index])
adj[np.ix_(idxs, idxs)] = 1
print(adj)
th.save(th.from_numpy(adj), f"{SCRIPT_DIR}/state_graph.pt")
if with_features:
create_time_features()
res = resolution
merge_nyc = metric == "deaths" and res == "county"
features = [
(f"{SCRIPT_DIR}/testing/ratio_features_{res}.csv", 0, res),
(f"{SCRIPT_DIR}/testing/total_features_{res}.csv", 0, res),
(f"{SCRIPT_DIR}/fb/mobility_features_{res}_fb.csv", 5, res),
(f"{SCRIPT_DIR}/google/mobility_features_{res}_google.csv", 5, res),
(f"{SCRIPT_DIR}/google/weather_features_{res}.csv", 5, res),
(f"{SCRIPT_DIR}/google/epi_features_{res}.csv", 7, res),
(f"{SCRIPT_DIR}/google/epi_features_{res}.csv", 7, res),
]
if res == "state":
features.append((f"{SCRIPT_DIR}/google/hosp_features_{res}.csv", 0, res))
features.append((f"{SCRIPT_DIR}/shifted_features_{res}.csv", 0, res))
features.append((f"{SCRIPT_DIR}/google/vaccination_state.csv", 0, "state"))
else:
features.append(
(f"{SCRIPT_DIR}/google/vaccination_state.csv", 0, "county_state")
)
for signal, lag in [
(f"{SCRIPT_DIR}/symptom_survey/doctor-visits_smoothed_adj_cli-{{}}.csv", 2),
(f"{SCRIPT_DIR}/symptom_survey/fb-survey_smoothed_wcli-{{}}.csv", 0),
(
f"{SCRIPT_DIR}/symptom_survey/fb-survey_smoothed_hh_cmnty_cli-{{}}.csv",
0,
),
(
f"{SCRIPT_DIR}/symptom_survey/fb-survey_smoothed_wearing_mask_all-{{}}.csv",
5,
),
(
f"{SCRIPT_DIR}/symptom_survey/fb-survey_smoothed_wothers_masked-{{}}.csv",
5,
),
(
f"{SCRIPT_DIR}/symptom_survey/fb-survey_smoothed_wcovid_vaccinated_or_accept-{{}}.csv",
5,
),
(f"{SCRIPT_DIR}/fb/mobility_features_{{}}_fb.csv", 5),
(f"{SCRIPT_DIR}/google/mobility_features_{{}}_google.csv", 5),
]:
if res == "county":
features.append((signal.format("county"), lag, "county"))
features.append((signal.format("state"), lag, "county_state"))
else:
features.append((signal.format("state"), lag, "state"))
features = [(df, pth, lag, merge_nyc, r) for pth, lag, r in features]
run_par([process_time_features] * len(features), features, [{}] * len(features))
if __name__ == "__main__":
parser = argparse.ArgumentParser("US data")
parser.add_argument("-metric", default="cases", choices=["cases", "deaths"])
parser.add_argument("-with-features", default=False, action="store_true")
parser.add_argument("-source", choices=SOURCES.keys(), default="nyt")
parser.add_argument("-resolution", choices=["county", "state"], default="county")
opt = parser.parse_args()
main(opt.metric, opt.with_features, opt.source, opt.resolution)
| covid19_spread-main | covid19_spread/data/usa/convert.py |
#!/usr/bin/env python3
# Copyright (c) 2021-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
from .. import recurring
import pandas
from ...lib.slack import post_slack_message
from datetime import date, datetime, timedelta
from .convert import main as convert
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
class USARRecurring(recurring.Recurring):
script_dir = SCRIPT_DIR
def get_id(self):
return "us-bar"
def command(self):
return f"recurring run us"
def module(self):
return "bar_time_features"
def schedule(self):
return "*/10 * * * *"
def update_data(self):
convert("cases", with_features=False, source="nyt", resolution="county")
def latest_date(self):
df = pandas.read_csv(f"{SCRIPT_DIR}/data_cases.csv", index_col="region")
max_date = pandas.to_datetime(df.columns).max().date()
if max_date < (date.today() - timedelta(days=1)) and datetime.now().hour > 17:
expected_date = date.today() - timedelta(days=1)
msg = f"*WARNING: new data for {expected_date} is still not available!*"
post_slack_message(channel="#cron_errors", text=msg)
return pandas.to_datetime(df.columns).max().date()
def launch_job(self, **kwargs):
# Make clean with features
convert("cases", with_features=True, source="nyt", resolution="county")
msg = f"*New Data Available for US: {self.latest_date()}*"
post_slack_message(channel="#new-data", text=msg)
return super().launch_job(
module="bar", cv_config="us", array_parallelism=90, **kwargs
)
| covid19_spread-main | covid19_spread/data/usa/us_recurring.py |
#!/usr/bin/env python3
# Copyright (c) 2021-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import pandas as pd
from datetime import datetime
from covid19_spread.data.usa.process_cases import get_index
import os
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
def main():
print("Getting Google mobility data...")
cols = [
"date",
"region",
"retail_and_recreation_percent_change_from_baseline",
"grocery_and_pharmacy_percent_change_from_baseline",
"parks_percent_change_from_baseline",
"transit_stations_percent_change_from_baseline",
"workplaces_percent_change_from_baseline",
"residential_percent_change_from_baseline",
]
def get_county_mobility_google(fin=None):
# Google LLC "Google COVID-19 Community Mobility Reports."
# https://www.google.com/covid19/mobility/ Accessed: 2020-05-04.
# unfortunately, this is only relative to mobility on a baseline date
if fin is None:
fin = "https://www.gstatic.com/covid19/mobility/Global_Mobility_Report.csv"
df_Gmobility_global = pd.read_csv(
fin, parse_dates=["date"], dtype={"census_fips_code": str}
)
df_Gmobility_usa = df_Gmobility_global.query("country_region_code == 'US'")
return df_Gmobility_usa
df = get_county_mobility_google()
df = df[~df["census_fips_code"].isnull()]
index = get_index()
index["region"] = index["subregion2_name"] + ", " + index["subregion1_name"]
df = df.merge(
index, left_on="census_fips_code", right_on="fips", suffixes=("", "_x")
)[list(df.columns) + ["region"]]
df = df[cols]
val_cols = [c for c in df.columns if c not in {"region", "date"}]
ratio = (1 + df.set_index(["region", "date"]) / 100).reset_index()
piv = ratio.pivot(index="date", columns="region", values=val_cols)
piv = piv.rolling(7, min_periods=1).mean().transpose()
piv.iloc[0] = piv.iloc[0].fillna(0)
piv = piv.fillna(0)
dfs = []
for k in piv.index.get_level_values(0).unique():
df = piv.loc[k].copy()
df["type"] = k
dfs.append(df)
df = pd.concat(dfs)
df = df[["type"] + sorted([c for c in df.columns if isinstance(c, datetime)])]
df.columns = [str(c.date()) if isinstance(c, datetime) else c for c in df.columns]
df.to_csv(f"{SCRIPT_DIR}/mobility_features_county_google.csv")
state = get_county_mobility_google()
state = state[(~state["sub_region_1"].isnull()) & (state["sub_region_2"].isnull())]
state["region"] = state["sub_region_1"]
state = state[cols]
ratio = (1 + state[cols].set_index(["region", "date"]) / 100).reset_index()
piv = ratio.pivot(index="date", columns="region", values=val_cols)
piv = piv.rolling(7, min_periods=1).mean().transpose()
piv.columns = [str(x.date()) for x in sorted(piv.columns)]
piv = piv.fillna(0).reset_index(level=0).rename(columns={"level_0": "type"})
piv.to_csv(f"{SCRIPT_DIR}/mobility_features_state_google.csv")
if __name__ == "__main__":
main()
| covid19_spread-main | covid19_spread/data/usa/google/process_mobility.py |
#!/usr/bin/env python3
# Copyright (c) 2021-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import pandas
from datetime import datetime
import os
from covid19_spread.data.usa.process_cases import get_index
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
def main():
index = pandas.read_csv(
"https://storage.googleapis.com/covid19-open-data/v2/index.csv"
)
state_index = index[(index["key"].str.match("^US_[A-Z]+$")).fillna(False)]
index = get_index()
def zscore(piv):
# z-zcore
piv = (piv - piv.mean(skipna=True)) / piv.std(skipna=True)
piv = piv.fillna(method="ffill").fillna(method="bfill")
# piv = piv.fillna(0)
return piv
def zero_one(df):
df = df.fillna(0)
# df = df.div(df.max(axis=0), axis=1)
df = df / df.max(axis=0)
df = df.fillna(0)
return df
def process_df(df, columns, resolution, func_normalize):
idx = state_index if resolution == "state" else index
merged = df.merge(idx, on="key")
if resolution == "state":
exclude = {"US_MP", "US_AS", "US_GU", "US_VI", "US_PR"}
merged = merged[~merged["key"].isin(exclude)]
merged["region"] = merged["subregion1_name"]
else:
merged["region"] = merged["name"] + ", " + merged["subregion1_name"]
piv = merged.pivot(index="date", columns="region", values=columns)
if func_normalize is not None:
piv = func_normalize(piv)
dfs = []
for k in piv.columns.get_level_values(0).unique():
dfs.append(piv[k].transpose())
dfs[-1]["type"] = k
df = pandas.concat(dfs)
df = df[["type"] + [c for c in df.columns if isinstance(c, datetime)]]
df.columns = [
str(c.date()) if isinstance(c, datetime) else c for c in df.columns
]
return df.fillna(0) # in case all values are NaN
def get_df(url):
if "weather" in url:
# This dataset is quite large. Iterate in chunks, and filter out non-US rows
chunks = []
for chunk in pandas.read_csv(url, parse_dates=["date"], chunksize=200000):
chunks.append(
chunk[~chunk["key"].isnull() & chunk["key"].str.startswith("US")]
)
df = pandas.concat(chunks)
else:
df = pandas.read_csv(url, parse_dates=["date"])
return df[~df["key"].isnull() & df["key"].str.startswith("US")]
def do_feature(url, columns, resolution, func_normalize, outfile):
print(f"Fetching {url}")
df = get_df(url)
vaccination = process_df(
df, columns=columns, resolution=resolution, func_normalize=func_normalize
)
vaccination = vaccination.reset_index().set_index(["region", "type"])
vaccination.to_csv(outfile, index_label=["region", "type"])
# --- Vaccination data ---
do_feature(
url="https://storage.googleapis.com/covid19-open-data/v2/vaccinations.csv",
columns=["new_persons_vaccinated", "total_persons_vaccinated"],
resolution="state",
func_normalize=zero_one,
outfile=os.path.join(SCRIPT_DIR, "vaccination_state.csv"),
)
# --- Hospitalizations ---
do_feature(
url="https://storage.googleapis.com/covid19-open-data/v2/hospitalizations.csv",
columns=[
"current_hospitalized",
"current_intensive_care",
"current_ventilator",
],
resolution="state",
func_normalize=lambda x: zero_one(x.clip(0, None)),
outfile=os.path.join(SCRIPT_DIR, "hosp_features_state.csv"),
)
# --- Weather features ---
do_feature(
url="https://storage.googleapis.com/covid19-open-data/v2/weather.csv",
columns=[
"average_temperature",
"minimum_temperature",
"maximum_temperature",
"rainfall",
"relative_humidity",
"dew_point",
],
resolution="state",
func_normalize=zscore,
outfile=os.path.join(SCRIPT_DIR, "weather_features_state.csv"),
)
do_feature(
url="https://storage.googleapis.com/covid19-open-data/v2/weather.csv",
columns=[
"average_temperature",
"minimum_temperature",
"maximum_temperature",
"rainfall",
"relative_humidity",
"dew_point",
],
resolution="county",
func_normalize=zscore,
outfile=os.path.join(SCRIPT_DIR, "weather_features_county.csv"),
)
# --- Epi features ---
do_feature(
url="https://storage.googleapis.com/covid19-open-data/v2/epidemiology.csv",
columns=["new_confirmed"],
resolution="state",
func_normalize=lambda x: zero_one(x.clip(0, None)),
outfile=os.path.join(SCRIPT_DIR, "epi_features_state.csv"),
)
do_feature(
url="https://storage.googleapis.com/covid19-open-data/v2/epidemiology.csv",
columns=["new_confirmed"],
resolution="county",
func_normalize=lambda x: zero_one(x.clip(0, None)),
outfile=os.path.join(SCRIPT_DIR, "epi_features_county.csv"),
)
# ---- Testing -----
print("Getting Google testing data...")
df = get_df("https://storage.googleapis.com/covid19-open-data/v2/epidemiology.csv")
testing = process_df(
df,
columns=["new_tested"],
resolution="state",
func_normalize=lambda x: zero_one(x.clip(0, None)),
)
testing.round(3).to_csv(f"{SCRIPT_DIR}/tested_total_state.csv")
df["ratio"] = df["new_confirmed"] / df["new_tested"]
testing = process_df(
df, columns=["ratio"], resolution="state", func_normalize=None,
)
testing.round(3).to_csv(f"{SCRIPT_DIR}/tested_ratio_state.csv")
if __name__ == "__main__":
main()
| covid19_spread-main | covid19_spread/data/usa/google/process_open_data.py |
#!/usr/bin/env python3
# Copyright (c) 2021-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from .process_mobility import main as mobility_main
from .process_open_data import main as open_data_main
def prepare():
mobility_main()
open_data_main()
| covid19_spread-main | covid19_spread/data/usa/google/__init__.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.