python_code
stringlengths 0
992k
| repo_name
stringlengths 8
46
| file_path
stringlengths 5
162
|
---|---|---|
from vlmo.datasets import WikibkDataset
from .datamodule_base import BaseDataModule
class WikibkDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return WikibkDataset
@property
def dataset_name(self):
return "wikibk"
| EXA-1-master | exa/models/unilm-master/vlmo/vlmo/datamodules/wikibk_datamodule.py |
from vlmo.datasets import CocoCaptionKarpathyDataset
from .datamodule_base import BaseDataModule
class CocoCaptionKarpathyDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return CocoCaptionKarpathyDataset
@property
def dataset_cls_no_false(self):
return CocoCaptionKarpathyDataset
@property
def dataset_name(self):
return "coco"
| EXA-1-master | exa/models/unilm-master/vlmo/vlmo/datamodules/coco_caption_karpathy_datamodule.py |
from vlmo.datasets import F30KCaptionKarpathyDataset
from .datamodule_base import BaseDataModule
class F30KCaptionKarpathyDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return F30KCaptionKarpathyDataset
@property
def dataset_cls_no_false(self):
return F30KCaptionKarpathyDataset
@property
def dataset_name(self):
return "f30k"
| EXA-1-master | exa/models/unilm-master/vlmo/vlmo/datamodules/f30k_caption_karpathy_datamodule.py |
import json
import pandas as pd
import pyarrow as pa
import os
from tqdm import tqdm
from collections import defaultdict
def process(root, iden, row):
texts = [r["sentence"] for r in row]
labels = [r["label"] for r in row]
split = iden.split("-")[0]
if iden.startswith("train"):
directory = row[0]["directory"]
path = f"{root}/images/train/{directory}/{iden}"
else:
path = f"{root}/{split}/{iden}"
with open(f"{path}-img0.png", "rb") as fp:
img0 = fp.read()
with open(f"{path}-img1.png", "rb") as fp:
img1 = fp.read()
return [img0, img1, texts, labels, iden]
def make_arrow(root, dataset_root):
train_data = list(
map(json.loads, open(f"{root}/nlvr2/data/train.json").readlines())
)
test1_data = list(
map(json.loads, open(f"{root}/nlvr2/data/test1.json").readlines())
)
dev_data = list(map(json.loads, open(f"{root}/nlvr2/data/dev.json").readlines()))
balanced_test1_data = list(
map(
json.loads,
open(f"{root}/nlvr2/data/balanced/balanced_test1.json").readlines(),
)
)
balanced_dev_data = list(
map(
json.loads,
open(f"{root}/nlvr2/data/balanced/balanced_dev.json").readlines(),
)
)
unbalanced_test1_data = list(
map(
json.loads,
open(f"{root}/nlvr2/data/unbalanced/unbalanced_test1.json").readlines(),
)
)
unbalanced_dev_data = list(
map(
json.loads,
open(f"{root}/nlvr2/data/unbalanced/unbalanced_dev.json").readlines(),
)
)
splits = [
"train",
"dev",
"test1",
"balanced_dev",
"balanced_test1",
"unbalanced_dev",
"unbalanced_test1",
]
datas = [
train_data,
dev_data,
test1_data,
balanced_dev_data,
balanced_test1_data,
unbalanced_dev_data,
unbalanced_test1_data,
]
annotations = dict()
for split, data in zip(splits, datas):
_annot = defaultdict(list)
for row in tqdm(data):
_annot["-".join(row["identifier"].split("-")[:-1])].append(row)
annotations[split] = _annot
for split in splits:
bs = [
process(root, iden, row) for iden, row in tqdm(annotations[split].items())
]
dataframe = pd.DataFrame(
bs, columns=["image_0", "image_1", "questions", "answers", "identifier"],
)
table = pa.Table.from_pandas(dataframe)
os.makedirs(dataset_root, exist_ok=True)
with pa.OSFile(f"{dataset_root}/nlvr2_{split}.arrow", "wb") as sink:
with pa.RecordBatchFileWriter(sink, table.schema) as writer:
writer.write_table(table)
| EXA-1-master | exa/models/unilm-master/vlmo/vlmo/utils/write_nlvr2.py |
import json
import pandas as pd
import pyarrow as pa
import gc
import random
import os
from tqdm import tqdm
from glob import glob
def path2rest(line):
return [
"None",
[line],
"wikibk",
"train",
]
def make_arrow(root, dataset_root):
for index in range(0, 50):
file_path = f"{root}/wikibk.{index}.txt"
all_sents = []
with open(file_path, "r", encoding="utf-8") as fp:
for line in fp:
all_sents.append(line.strip())
print(file_path)
print("Number of sentences: {}".format(len(all_sents)))
bs = [path2rest(line) for line in tqdm(all_sents)]
dataframe = pd.DataFrame(bs, columns=["image", "caption", "source", "split"],)
table = pa.Table.from_pandas(dataframe)
os.makedirs(dataset_root, exist_ok=True)
with pa.OSFile(f"{dataset_root}/wikibk_train_{index}.arrow", "wb") as sink:
with pa.RecordBatchFileWriter(sink, table.schema) as writer:
writer.write_table(table)
del dataframe
del table
del bs
gc.collect() | EXA-1-master | exa/models/unilm-master/vlmo/vlmo/utils/write_wikibk.py |
import json
import pandas as pd
import pyarrow as pa
import random
import os
from tqdm import tqdm
from glob import glob
from collections import defaultdict
def path2rest(path, iid2captions):
name = path.split("/")[-1]
iid = int(name[:-4])
with open(path, "rb") as fp:
binary = fp.read()
cdicts = iid2captions[iid]
captions = [c["phrase"] for c in cdicts]
widths = [c["width"] for c in cdicts]
heights = [c["height"] for c in cdicts]
xs = [c["x"] for c in cdicts]
ys = [c["y"] for c in cdicts]
return [
binary,
captions,
widths,
heights,
xs,
ys,
str(iid),
]
def make_arrow(root, dataset_root):
with open(f"{root}/annotations/region_descriptions.json", "r") as fp:
captions = json.load(fp)
iid2captions = defaultdict(list)
for cap in tqdm(captions):
cap = cap["regions"]
for c in cap:
iid2captions[c["image_id"]].append(c)
paths = list(glob(f"{root}/images/VG_100K/*.jpg")) + list(
glob(f"{root}/images/VG_100K_2/*.jpg")
)
random.shuffle(paths)
caption_paths = [
path for path in paths if int(path.split("/")[-1][:-4]) in iid2captions
]
if len(paths) == len(caption_paths):
print("all images have caption annotations")
else:
print("not all images have caption annotations")
print(
len(paths), len(caption_paths), len(iid2captions),
)
bs = [path2rest(path, iid2captions) for path in tqdm(caption_paths)]
dataframe = pd.DataFrame(
bs, columns=["image", "caption", "width", "height", "x", "y", "image_id"],
)
table = pa.Table.from_pandas(dataframe)
os.makedirs(dataset_root, exist_ok=True)
with pa.OSFile(f"{dataset_root}/vg.arrow", "wb") as sink:
with pa.RecordBatchFileWriter(sink, table.schema) as writer:
writer.write_table(table)
| EXA-1-master | exa/models/unilm-master/vlmo/vlmo/utils/write_vg.py |
import json
import pandas as pd
import pyarrow as pa
import random
import os
from tqdm import tqdm
from glob import glob
from collections import defaultdict
def path2rest(path, iid2captions, iid2split):
name = path.split("/")[-1]
with open(path, "rb") as fp:
binary = fp.read()
captions = iid2captions[name]
split = iid2split[name]
return [binary, captions, name, split]
def make_arrow(root, dataset_root):
with open(f"{root}/karpathy/dataset_flickr30k.json", "r") as fp:
captions = json.load(fp)
captions = captions["images"]
iid2captions = defaultdict(list)
iid2split = dict()
for cap in tqdm(captions):
filename = cap["filename"]
iid2split[filename] = cap["split"]
for c in cap["sentences"]:
iid2captions[filename].append(c["raw"])
paths = list(glob(f"{root}/flickr30k-images/*.jpg"))
random.shuffle(paths)
caption_paths = [path for path in paths if path.split("/")[-1] in iid2captions]
if len(paths) == len(caption_paths):
print("all images have caption annotations")
else:
print("not all images have caption annotations")
print(
len(paths), len(caption_paths), len(iid2captions),
)
bs = [path2rest(path, iid2captions, iid2split) for path in tqdm(caption_paths)]
for split in ["train", "val", "test"]:
batches = [b for b in bs if b[-1] == split]
dataframe = pd.DataFrame(
batches, columns=["image", "caption", "image_id", "split"],
)
table = pa.Table.from_pandas(dataframe)
os.makedirs(dataset_root, exist_ok=True)
with pa.OSFile(
f"{dataset_root}/f30k_caption_karpathy_{split}.arrow", "wb"
) as sink:
with pa.RecordBatchFileWriter(sink, table.schema) as writer:
writer.write_table(table)
| EXA-1-master | exa/models/unilm-master/vlmo/vlmo/utils/write_f30k_karpathy.py |
import json
import os
import pandas as pd
import pyarrow as pa
import random
from tqdm import tqdm
from glob import glob
from collections import defaultdict
def path2rest(path, iid2captions, iid2split):
name = path.split("/")[-1]
with open(path, "rb") as fp:
binary = fp.read()
captions = iid2captions[name]
split = iid2split[name]
return [binary, captions, name, split]
def make_arrow(root, dataset_root):
with open(f"{root}/karpathy/dataset_coco.json", "r") as fp:
captions = json.load(fp)
captions = captions["images"]
iid2captions = defaultdict(list)
iid2split = dict()
for cap in tqdm(captions):
filename = cap["filename"]
iid2split[filename] = cap["split"]
for c in cap["sentences"]:
iid2captions[filename].append(c["raw"])
paths = list(glob(f"{root}/train2014/*.jpg")) + list(glob(f"{root}/val2014/*.jpg"))
random.shuffle(paths)
caption_paths = [path for path in paths if path.split("/")[-1] in iid2captions]
if len(paths) == len(caption_paths):
print("all images have caption annotations")
else:
print("not all images have caption annotations")
print(
len(paths), len(caption_paths), len(iid2captions),
)
bs = [path2rest(path, iid2captions, iid2split) for path in tqdm(caption_paths)]
for split in ["train", "val", "restval", "test"]:
batches = [b for b in bs if b[-1] == split]
dataframe = pd.DataFrame(
batches, columns=["image", "caption", "image_id", "split"],
)
table = pa.Table.from_pandas(dataframe)
os.makedirs(dataset_root, exist_ok=True)
with pa.OSFile(
f"{dataset_root}/coco_caption_karpathy_{split}.arrow", "wb"
) as sink:
with pa.RecordBatchFileWriter(sink, table.schema) as writer:
writer.write_table(table)
| EXA-1-master | exa/models/unilm-master/vlmo/vlmo/utils/write_coco_karpathy.py |
import json
import pandas as pd
import pyarrow as pa
import gc
import random
import os
from tqdm import tqdm
from glob import glob
def path2rest(path, iid2captions):
split, _, name = path.split("/")[-3:]
split = split.split("_")[-1]
iid = name
with open(path, "rb") as fp:
binary = fp.read()
captions = iid2captions[iid]
return [
binary,
captions,
iid,
split,
]
def make_arrow(root, dataset_root):
for split in ["val", "train"]:
with open(f"{root}/{split}_annot.json", "r") as fp:
captions = json.load(fp)
iid2captions = dict()
for cap in tqdm(captions):
iid = cap[0].split("/")[-1]
iid2captions[iid] = [cap[1]]
paths = list(glob(f"{root}/images_{split}/*/*"))
random.shuffle(paths)
caption_paths = [path for path in paths if path.split("/")[-1] in iid2captions]
if len(paths) == len(caption_paths):
print("all images have caption annotations")
else:
print("not all images have caption annotations")
print(
len(paths), len(caption_paths), len(iid2captions),
)
sub_len = int(len(caption_paths) // 100000)
subs = list(range(sub_len + 1))
for sub in subs:
sub_paths = caption_paths[sub * 100000 : (sub + 1) * 100000]
bs = [path2rest(path, iid2captions) for path in tqdm(sub_paths)]
dataframe = pd.DataFrame(
bs, columns=["image", "caption", "image_id", "split"],
)
table = pa.Table.from_pandas(dataframe)
os.makedirs(dataset_root, exist_ok=True)
with pa.OSFile(
f"{dataset_root}/conceptual_caption_{split}_{sub}.arrow", "wb"
) as sink:
with pa.RecordBatchFileWriter(sink, table.schema) as writer:
writer.write_table(table)
del dataframe
del table
del bs
gc.collect()
| EXA-1-master | exa/models/unilm-master/vlmo/vlmo/utils/write_conceptual_caption.py |
import json
import pandas as pd
import pyarrow as pa
import random
import os
from tqdm import tqdm
from glob import glob
from collections import defaultdict, Counter
from .glossary import normalize_word
def get_score(occurences):
if occurences == 0:
return 0.0
elif occurences == 1:
return 0.3
elif occurences == 2:
return 0.6
elif occurences == 3:
return 0.9
else:
return 1.0
def path2rest(path, split, annotations, label2ans):
iid = int(path.split("/")[-1].split("_")[-1][:-4])
with open(path, "rb") as fp:
binary = fp.read()
_annot = annotations[split][iid]
_annot = list(_annot.items())
qids, qas = [a[0] for a in _annot], [a[1] for a in _annot]
questions = [qa[0] for qa in qas]
answers = [qa[1] for qa in qas] if "test" not in split else list(list())
answer_labels = (
[a["labels"] for a in answers] if "test" not in split else list(list())
)
answer_scores = (
[a["scores"] for a in answers] if "test" not in split else list(list())
)
answers = (
[[label2ans[l] for l in al] for al in answer_labels]
if "test" not in split
else list(list())
)
return [binary, questions, answers, answer_labels, answer_scores, iid, qids, split]
def make_arrow(root, dataset_root):
with open(f"{root}/v2_OpenEnded_mscoco_train2014_questions.json", "r") as fp:
questions_train2014 = json.load(fp)["questions"]
with open(f"{root}/v2_OpenEnded_mscoco_val2014_questions.json", "r") as fp:
questions_val2014 = json.load(fp)["questions"]
with open(f"{root}/v2_OpenEnded_mscoco_test2015_questions.json", "r") as fp:
questions_test2015 = json.load(fp)["questions"]
with open(f"{root}/v2_OpenEnded_mscoco_test-dev2015_questions.json", "r") as fp:
questions_test_dev2015 = json.load(fp)["questions"]
with open(f"{root}/v2_mscoco_train2014_annotations.json", "r") as fp:
annotations_train2014 = json.load(fp)["annotations"]
with open(f"{root}/v2_mscoco_val2014_annotations.json", "r") as fp:
annotations_val2014 = json.load(fp)["annotations"]
annotations = dict()
for split, questions in zip(
["train", "val", "test", "test-dev"],
[
questions_train2014,
questions_val2014,
questions_test2015,
questions_test_dev2015,
],
):
_annot = defaultdict(dict)
for q in tqdm(questions):
_annot[q["image_id"]][q["question_id"]] = [q["question"]]
annotations[split] = _annot
all_major_answers = list()
for split, annots in zip(
["train", "val"], [annotations_train2014, annotations_val2014],
):
_annot = annotations[split]
for q in tqdm(annots):
all_major_answers.append(q["multiple_choice_answer"])
all_major_answers = [normalize_word(word) for word in tqdm(all_major_answers)]
counter = {k: v for k, v in Counter(all_major_answers).items() if v >= 9}
ans2label = {k: i for i, k in enumerate(counter.keys())}
label2ans = list(counter.keys())
for split, annots in zip(
["train", "val"], [annotations_train2014, annotations_val2014],
):
_annot = annotations[split]
for q in tqdm(annots):
answers = q["answers"]
answer_count = {}
for answer in answers:
answer_ = answer["answer"]
answer_count[answer_] = answer_count.get(answer_, 0) + 1
labels = []
scores = []
for answer in answer_count:
if answer not in ans2label:
continue
labels.append(ans2label[answer])
score = get_score(answer_count[answer])
scores.append(score)
_annot[q["image_id"]][q["question_id"]].append(
{"labels": labels, "scores": scores,}
)
for split in ["train", "val"]:
filtered_annot = dict()
for ik, iv in annotations[split].items():
new_q = dict()
for qk, qv in iv.items():
if len(qv[1]["labels"]) != 0:
new_q[qk] = qv
if len(new_q) != 0:
filtered_annot[ik] = new_q
annotations[split] = filtered_annot
for split in [
"train",
"val",
"test",
"test-dev",
]:
annot = annotations[split]
split_name = {
"train": "train2014",
"val": "val2014",
"test": "test2015",
"test-dev": "test2015",
}[split]
paths = list(glob(f"{root}/{split_name}/*.jpg"))
random.shuffle(paths)
annot_paths = [
path
for path in paths
if int(path.split("/")[-1].split("_")[-1][:-4]) in annot
]
if len(paths) == len(annot_paths):
print("all images have caption annotations")
else:
print("not all images have caption annotations")
print(
len(paths), len(annot_paths), len(annot),
)
bs = [
path2rest(path, split, annotations, label2ans) for path in tqdm(annot_paths)
]
dataframe = pd.DataFrame(
bs,
columns=[
"image",
"questions",
"answers",
"answer_labels",
"answer_scores",
"image_id",
"question_id",
"split",
],
)
table = pa.Table.from_pandas(dataframe)
os.makedirs(dataset_root, exist_ok=True)
with pa.OSFile(f"{dataset_root}/vqav2_{split}.arrow", "wb") as sink:
with pa.RecordBatchFileWriter(sink, table.schema) as writer:
writer.write_table(table)
table = pa.ipc.RecordBatchFileReader(
pa.memory_map(f"{dataset_root}/vqav2_val.arrow", "r")
).read_all()
pdtable = table.to_pandas()
df1 = pdtable[:-1000]
df2 = pdtable[-1000:]
df1 = pa.Table.from_pandas(df1)
df2 = pa.Table.from_pandas(df2)
with pa.OSFile(f"{dataset_root}/vqav2_trainable_val.arrow", "wb") as sink:
with pa.RecordBatchFileWriter(sink, df1.schema) as writer:
writer.write_table(df1)
with pa.OSFile(f"{dataset_root}/vqav2_rest_val.arrow", "wb") as sink:
with pa.RecordBatchFileWriter(sink, df2.schema) as writer:
writer.write_table(df2)
| EXA-1-master | exa/models/unilm-master/vlmo/vlmo/utils/write_vqa.py |
import json
import pandas as pd
import pyarrow as pa
import gc
import random
import os
from tqdm import tqdm
from glob import glob
def path2rest(path, iid2captions):
split, _, name = path.split("/")[-3:]
split = split.split("_")[-1]
iid = name
with open(path, "rb") as fp:
binary = fp.read()
captions = iid2captions[iid]
return [
binary,
captions,
iid,
split,
]
def make_arrow(root, dataset_root):
with open(f"{root}/annot.json", "r") as fp:
captions = json.load(fp)
iid2captions = dict()
for cap in tqdm(captions):
iid = cap[0].split("/")[-1]
iid2captions[iid] = [cap[1]]
paths = list(glob(f"{root}/images_train/*/*"))
random.shuffle(paths)
caption_paths = [path for path in paths if path.split("/")[-1] in iid2captions]
if len(paths) == len(caption_paths):
print("all images have caption annotations")
else:
print("not all images have caption annotations")
print(
len(paths), len(caption_paths), len(iid2captions),
)
sub_len = int(len(caption_paths) // 100000)
subs = list(range(sub_len + 1))
for sub in subs:
sub_paths = caption_paths[sub * 100000 : (sub + 1) * 100000]
bs = [path2rest(path, iid2captions) for path in tqdm(sub_paths)]
dataframe = pd.DataFrame(bs, columns=["image", "caption", "image_id", "split"],)
table = pa.Table.from_pandas(dataframe)
os.makedirs(dataset_root, exist_ok=True)
with pa.OSFile(f"{dataset_root}/sbu_{sub}.arrow", "wb") as sink:
with pa.RecordBatchFileWriter(sink, table.schema) as writer:
writer.write_table(table)
del dataframe
del table
del bs
gc.collect()
| EXA-1-master | exa/models/unilm-master/vlmo/vlmo/utils/write_sbu.py |
import re
contractions = {
"aint": "ain't",
"arent": "aren't",
"cant": "can't",
"couldve": "could've",
"couldnt": "couldn't",
"couldn'tve": "couldn't've",
"couldnt've": "couldn't've",
"didnt": "didn't",
"doesnt": "doesn't",
"dont": "don't",
"hadnt": "hadn't",
"hadnt've": "hadn't've",
"hadn'tve": "hadn't've",
"hasnt": "hasn't",
"havent": "haven't",
"hed": "he'd",
"hed've": "he'd've",
"he'dve": "he'd've",
"hes": "he's",
"howd": "how'd",
"howll": "how'll",
"hows": "how's",
"Id've": "I'd've",
"I'dve": "I'd've",
"Im": "I'm",
"Ive": "I've",
"isnt": "isn't",
"itd": "it'd",
"itd've": "it'd've",
"it'dve": "it'd've",
"itll": "it'll",
"let's": "let's",
"maam": "ma'am",
"mightnt": "mightn't",
"mightnt've": "mightn't've",
"mightn'tve": "mightn't've",
"mightve": "might've",
"mustnt": "mustn't",
"mustve": "must've",
"neednt": "needn't",
"notve": "not've",
"oclock": "o'clock",
"oughtnt": "oughtn't",
"ow's'at": "'ow's'at",
"'ows'at": "'ow's'at",
"'ow'sat": "'ow's'at",
"shant": "shan't",
"shed've": "she'd've",
"she'dve": "she'd've",
"she's": "she's",
"shouldve": "should've",
"shouldnt": "shouldn't",
"shouldnt've": "shouldn't've",
"shouldn'tve": "shouldn't've",
"somebody'd": "somebodyd",
"somebodyd've": "somebody'd've",
"somebody'dve": "somebody'd've",
"somebodyll": "somebody'll",
"somebodys": "somebody's",
"someoned": "someone'd",
"someoned've": "someone'd've",
"someone'dve": "someone'd've",
"someonell": "someone'll",
"someones": "someone's",
"somethingd": "something'd",
"somethingd've": "something'd've",
"something'dve": "something'd've",
"somethingll": "something'll",
"thats": "that's",
"thered": "there'd",
"thered've": "there'd've",
"there'dve": "there'd've",
"therere": "there're",
"theres": "there's",
"theyd": "they'd",
"theyd've": "they'd've",
"they'dve": "they'd've",
"theyll": "they'll",
"theyre": "they're",
"theyve": "they've",
"twas": "'twas",
"wasnt": "wasn't",
"wed've": "we'd've",
"we'dve": "we'd've",
"weve": "we've",
"werent": "weren't",
"whatll": "what'll",
"whatre": "what're",
"whats": "what's",
"whatve": "what've",
"whens": "when's",
"whered": "where'd",
"wheres": "where's",
"whereve": "where've",
"whod": "who'd",
"whod've": "who'd've",
"who'dve": "who'd've",
"wholl": "who'll",
"whos": "who's",
"whove": "who've",
"whyll": "why'll",
"whyre": "why're",
"whys": "why's",
"wont": "won't",
"wouldve": "would've",
"wouldnt": "wouldn't",
"wouldnt've": "wouldn't've",
"wouldn'tve": "wouldn't've",
"yall": "y'all",
"yall'll": "y'all'll",
"y'allll": "y'all'll",
"yall'd've": "y'all'd've",
"y'alld've": "y'all'd've",
"y'all'dve": "y'all'd've",
"youd": "you'd",
"youd've": "you'd've",
"you'dve": "you'd've",
"youll": "you'll",
"youre": "you're",
"youve": "you've",
}
manual_map = {
"none": "0",
"zero": "0",
"one": "1",
"two": "2",
"three": "3",
"four": "4",
"five": "5",
"six": "6",
"seven": "7",
"eight": "8",
"nine": "9",
"ten": "10",
}
articles = ["a", "an", "the"]
period_strip = re.compile("(?!<=\d)(\.)(?!\d)")
comma_strip = re.compile("(\d)(\,)(\d)")
punct = [
";",
r"/",
"[",
"]",
'"',
"{",
"}",
"(",
")",
"=",
"+",
"\\",
"_",
"-",
">",
"<",
"@",
"`",
",",
"?",
"!",
]
def normalize_word(token):
_token = token
for p in punct:
if (p + " " in token or " " + p in token) or (
re.search(comma_strip, token) != None
):
_token = _token.replace(p, "")
else:
_token = _token.replace(p, " ")
token = period_strip.sub("", _token, re.UNICODE)
_token = []
temp = token.lower().split()
for word in temp:
word = manual_map.setdefault(word, word)
if word not in articles:
_token.append(word)
for i, word in enumerate(_token):
if word in contractions:
_token[i] = contractions[word]
token = " ".join(_token)
token = token.replace(",", "")
return token
| EXA-1-master | exa/models/unilm-master/vlmo/vlmo/utils/glossary.py |
from .utils import (
inception_normalize,
MinMaxResize,
)
from torchvision import transforms
from .randaug import RandAugment
def pixelbert_transform(size=800):
longer = int((1333 / 800) * size)
return transforms.Compose(
[
MinMaxResize(shorter=size, longer=longer),
transforms.ToTensor(),
inception_normalize,
]
)
def pixelbert_transform_randaug(size=800):
longer = int((1333 / 800) * size)
trs = transforms.Compose(
[
MinMaxResize(shorter=size, longer=longer),
transforms.ToTensor(),
inception_normalize,
]
)
trs.transforms.insert(0, RandAugment(2, 9))
return trs
| EXA-1-master | exa/models/unilm-master/vlmo/vlmo/transforms/pixelbert.py |
from .pixelbert import (
pixelbert_transform,
pixelbert_transform_randaug,
)
from .square_transform import (
square_transform,
square_transform_randaug,
)
_transforms = {
"pixelbert": pixelbert_transform,
"pixelbert_randaug": pixelbert_transform_randaug,
"square_transform": square_transform,
"square_transform_randaug": square_transform_randaug,
}
def keys_to_transforms(keys: list, size=224):
return [_transforms[key](size=size) for key in keys]
| EXA-1-master | exa/models/unilm-master/vlmo/vlmo/transforms/__init__.py |
# code in this file is adpated from rpmcruz/autoaugment
# https://github.com/rpmcruz/autoaugment/blob/master/transformations.py
import random
import PIL, PIL.ImageOps, PIL.ImageEnhance, PIL.ImageDraw
import numpy as np
import torch
from PIL import Image
def ShearX(img, v): # [-0.3, 0.3]
assert -0.3 <= v <= 0.3
if random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, v, 0, 0, 1, 0))
def ShearY(img, v): # [-0.3, 0.3]
assert -0.3 <= v <= 0.3
if random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, v, 1, 0))
def TranslateX(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert -0.45 <= v <= 0.45
if random.random() > 0.5:
v = -v
v = v * img.size[0]
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, v, 0, 1, 0))
def TranslateXabs(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert 0 <= v
if random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, v, 0, 1, 0))
def TranslateY(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert -0.45 <= v <= 0.45
if random.random() > 0.5:
v = -v
v = v * img.size[1]
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, 0, 1, v))
def TranslateYabs(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert 0 <= v
if random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, 0, 1, v))
def Rotate(img, v): # [-30, 30]
assert -30 <= v <= 30
if random.random() > 0.5:
v = -v
return img.rotate(v)
def AutoContrast(img, _):
return PIL.ImageOps.autocontrast(img)
def Invert(img, _):
return PIL.ImageOps.invert(img)
def Equalize(img, _):
return PIL.ImageOps.equalize(img)
def Flip(img, _): # not from the paper
return PIL.ImageOps.mirror(img)
def Solarize(img, v): # [0, 256]
assert 0 <= v <= 256
return PIL.ImageOps.solarize(img, v)
def SolarizeAdd(img, addition=0, threshold=128):
img_np = np.array(img).astype(np.int)
img_np = img_np + addition
img_np = np.clip(img_np, 0, 255)
img_np = img_np.astype(np.uint8)
img = Image.fromarray(img_np)
return PIL.ImageOps.solarize(img, threshold)
def Posterize(img, v): # [4, 8]
v = int(v)
v = max(1, v)
return PIL.ImageOps.posterize(img, v)
def Contrast(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Contrast(img).enhance(v)
def Color(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Color(img).enhance(v)
def Brightness(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Brightness(img).enhance(v)
def Sharpness(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Sharpness(img).enhance(v)
def Cutout(img, v): # [0, 60] => percentage: [0, 0.2]
assert 0.0 <= v <= 0.2
if v <= 0.0:
return img
v = v * img.size[0]
return CutoutAbs(img, v)
def CutoutAbs(img, v): # [0, 60] => percentage: [0, 0.2]
# assert 0 <= v <= 20
if v < 0:
return img
w, h = img.size
x0 = np.random.uniform(w)
y0 = np.random.uniform(h)
x0 = int(max(0, x0 - v / 2.0))
y0 = int(max(0, y0 - v / 2.0))
x1 = min(w, x0 + v)
y1 = min(h, y0 + v)
xy = (x0, y0, x1, y1)
color = (125, 123, 114)
# color = (0, 0, 0)
img = img.copy()
PIL.ImageDraw.Draw(img).rectangle(xy, color)
return img
def SamplePairing(imgs): # [0, 0.4]
def f(img1, v):
i = np.random.choice(len(imgs))
img2 = PIL.Image.fromarray(imgs[i])
return PIL.Image.blend(img1, img2, v)
return f
def Identity(img, v):
return img
def augment_list(): # 16 oeprations and their ranges
# https://github.com/google-research/uda/blob/master/image/randaugment/policies.py#L57
# l = [
# (Identity, 0., 1.0),
# (ShearX, 0., 0.3), # 0
# (ShearY, 0., 0.3), # 1
# (TranslateX, 0., 0.33), # 2
# (TranslateY, 0., 0.33), # 3
# (Rotate, 0, 30), # 4
# (AutoContrast, 0, 1), # 5
# (Invert, 0, 1), # 6
# (Equalize, 0, 1), # 7
# (Solarize, 0, 110), # 8
# (Posterize, 4, 8), # 9
# # (Contrast, 0.1, 1.9), # 10
# (Color, 0.1, 1.9), # 11
# (Brightness, 0.1, 1.9), # 12
# (Sharpness, 0.1, 1.9), # 13
# # (Cutout, 0, 0.2), # 14
# # (SamplePairing(imgs), 0, 0.4), # 15
# ]
# https://github.com/tensorflow/tpu/blob/8462d083dd89489a79e3200bcc8d4063bf362186/models/official/efficientnet/autoaugment.py#L505
l = [
(AutoContrast, 0, 1),
(Equalize, 0, 1),
# (Invert, 0, 1),
(Rotate, 0, 30),
(Posterize, 0, 4),
(Solarize, 0, 256),
(SolarizeAdd, 0, 110),
(Color, 0.1, 1.9),
(Contrast, 0.1, 1.9),
(Brightness, 0.1, 1.9),
(Sharpness, 0.1, 1.9),
(ShearX, 0.0, 0.3),
(ShearY, 0.0, 0.3),
# (CutoutAbs, 0, 40),
(TranslateXabs, 0.0, 100),
(TranslateYabs, 0.0, 100),
]
return l
class Lighting(object):
"""Lighting noise(AlexNet - style PCA - based noise)"""
def __init__(self, alphastd, eigval, eigvec):
self.alphastd = alphastd
self.eigval = torch.Tensor(eigval)
self.eigvec = torch.Tensor(eigvec)
def __call__(self, img):
if self.alphastd == 0:
return img
alpha = img.new().resize_(3).normal_(0, self.alphastd)
rgb = (
self.eigvec.type_as(img)
.clone()
.mul(alpha.view(1, 3).expand(3, 3))
.mul(self.eigval.view(1, 3).expand(3, 3))
.sum(1)
.squeeze()
)
return img.add(rgb.view(3, 1, 1).expand_as(img))
class CutoutDefault(object):
"""
Reference : https://github.com/quark0/darts/blob/master/cnn/utils.py
"""
def __init__(self, length):
self.length = length
def __call__(self, img):
h, w = img.size(1), img.size(2)
mask = np.ones((h, w), np.float32)
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1:y2, x1:x2] = 0.0
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img *= mask
return img
class RandAugment:
def __init__(self, n, m):
self.n = n
self.m = m # [0, 30]
self.augment_list = augment_list()
def __call__(self, img):
ops = random.choices(self.augment_list, k=self.n)
for op, minval, maxval in ops:
val = (float(self.m) / 30) * float(maxval - minval) + minval
img = op(img, val)
return img
| EXA-1-master | exa/models/unilm-master/vlmo/vlmo/transforms/randaug.py |
from torchvision import transforms
from PIL import Image
class MinMaxResize:
def __init__(self, shorter=800, longer=1333):
self.min = shorter
self.max = longer
def __call__(self, x):
w, h = x.size
scale = self.min / min(w, h)
if h < w:
newh, neww = self.min, scale * w
else:
newh, neww = scale * h, self.min
if max(newh, neww) > self.max:
scale = self.max / max(newh, neww)
newh = newh * scale
neww = neww * scale
newh, neww = int(newh + 0.5), int(neww + 0.5)
newh, neww = newh // 32 * 32, neww // 32 * 32
return x.resize((neww, newh), resample=Image.BICUBIC)
class UnNormalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
"""
Args:
tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
Returns:
Tensor: Normalized image.
"""
for t, m, s in zip(tensor, self.mean, self.std):
t.mul_(s).add_(m)
# The normalize code -> t.sub_(m).div_(s)
return tensor
# This is simple maximum entropy normalization performed in Inception paper
inception_normalize = transforms.Compose(
[transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])]
)
# ViT uses simple non-biased inception normalization
# https://github.com/google-research/vision_transformer/blob/master/vit_jax/input_pipeline.py#L132
inception_unnormalize = transforms.Compose(
[UnNormalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])]
)
| EXA-1-master | exa/models/unilm-master/vlmo/vlmo/transforms/utils.py |
import cv2
import numpy as np
## aug functions
def identity_func(img):
return img
def autocontrast_func(img, cutoff=0):
'''
same output as PIL.ImageOps.autocontrast
'''
n_bins = 256
def tune_channel(ch):
n = ch.size
cut = cutoff * n // 100
if cut == 0:
high, low = ch.max(), ch.min()
else:
hist = cv2.calcHist([ch], [0], None, [n_bins], [0, n_bins])
low = np.argwhere(np.cumsum(hist) > cut)
low = 0 if low.shape[0] == 0 else low[0]
high = np.argwhere(np.cumsum(hist[::-1]) > cut)
high = n_bins - 1 if high.shape[0] == 0 else n_bins - 1 - high[0]
if high <= low:
table = np.arange(n_bins)
else:
scale = (n_bins - 1) / (high - low)
offset = -low * scale
table = np.arange(n_bins) * scale + offset
table[table < 0] = 0
table[table > n_bins - 1] = n_bins - 1
table = table.clip(0, 255).astype(np.uint8)
return table[ch]
channels = [tune_channel(ch) for ch in cv2.split(img)]
out = cv2.merge(channels)
return out
def equalize_func(img):
'''
same output as PIL.ImageOps.equalize
PIL's implementation is different from cv2.equalize
'''
n_bins = 256
def tune_channel(ch):
hist = cv2.calcHist([ch], [0], None, [n_bins], [0, n_bins])
non_zero_hist = hist[hist != 0].reshape(-1)
step = np.sum(non_zero_hist[:-1]) // (n_bins - 1)
if step == 0: return ch
n = np.empty_like(hist)
n[0] = step // 2
n[1:] = hist[:-1]
table = (np.cumsum(n) // step).clip(0, 255).astype(np.uint8)
return table[ch]
channels = [tune_channel(ch) for ch in cv2.split(img)]
out = cv2.merge(channels)
return out
def rotate_func(img, degree, fill=(0, 0, 0)):
'''
like PIL, rotate by degree, not radians
'''
H, W = img.shape[0], img.shape[1]
center = W / 2, H / 2
M = cv2.getRotationMatrix2D(center, degree, 1)
out = cv2.warpAffine(img, M, (W, H), borderValue=fill)
return out
def solarize_func(img, thresh=128):
'''
same output as PIL.ImageOps.posterize
'''
table = np.array([el if el < thresh else 255 - el for el in range(256)])
table = table.clip(0, 255).astype(np.uint8)
out = table[img]
return out
def color_func(img, factor):
'''
same output as PIL.ImageEnhance.Color
'''
## implementation according to PIL definition, quite slow
# degenerate = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)[:, :, np.newaxis]
# out = blend(degenerate, img, factor)
# M = (
# np.eye(3) * factor
# + np.float32([0.114, 0.587, 0.299]).reshape(3, 1) * (1. - factor)
# )[np.newaxis, np.newaxis, :]
M = (
np.float32([
[0.886, -0.114, -0.114],
[-0.587, 0.413, -0.587],
[-0.299, -0.299, 0.701]]) * factor
+ np.float32([[0.114], [0.587], [0.299]])
)
out = np.matmul(img, M).clip(0, 255).astype(np.uint8)
return out
def contrast_func(img, factor):
"""
same output as PIL.ImageEnhance.Contrast
"""
mean = np.sum(np.mean(img, axis=(0, 1)) * np.array([0.114, 0.587, 0.299]))
table = np.array([(
el - mean) * factor + mean
for el in range(256)
]).clip(0, 255).astype(np.uint8)
out = table[img]
return out
def brightness_func(img, factor):
'''
same output as PIL.ImageEnhance.Contrast
'''
table = (np.arange(256, dtype=np.float32) * factor).clip(0, 255).astype(np.uint8)
out = table[img]
return out
def sharpness_func(img, factor):
'''
The differences the this result and PIL are all on the 4 boundaries, the center
areas are same
'''
kernel = np.ones((3, 3), dtype=np.float32)
kernel[1][1] = 5
kernel /= 13
degenerate = cv2.filter2D(img, -1, kernel)
if factor == 0.0:
out = degenerate
elif factor == 1.0:
out = img
else:
out = img.astype(np.float32)
degenerate = degenerate.astype(np.float32)[1:-1, 1:-1, :]
out[1:-1, 1:-1, :] = degenerate + factor * (out[1:-1, 1:-1, :] - degenerate)
out = out.astype(np.uint8)
return out
def shear_x_func(img, factor, fill=(0, 0, 0)):
H, W = img.shape[0], img.shape[1]
M = np.float32([[1, factor, 0], [0, 1, 0]])
out = cv2.warpAffine(img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR).astype(np.uint8)
return out
def translate_x_func(img, offset, fill=(0, 0, 0)):
'''
same output as PIL.Image.transform
'''
H, W = img.shape[0], img.shape[1]
M = np.float32([[1, 0, -offset], [0, 1, 0]])
out = cv2.warpAffine(img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR).astype(np.uint8)
return out
def translate_y_func(img, offset, fill=(0, 0, 0)):
'''
same output as PIL.Image.transform
'''
H, W = img.shape[0], img.shape[1]
M = np.float32([[1, 0, 0], [0, 1, -offset]])
out = cv2.warpAffine(img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR).astype(np.uint8)
return out
def posterize_func(img, bits):
'''
same output as PIL.ImageOps.posterize
'''
out = np.bitwise_and(img, np.uint8(255 << (8 - bits)))
return out
def shear_y_func(img, factor, fill=(0, 0, 0)):
H, W = img.shape[0], img.shape[1]
M = np.float32([[1, 0, 0], [factor, 1, 0]])
out = cv2.warpAffine(img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR).astype(np.uint8)
return out
def cutout_func(img, pad_size, replace=(0, 0, 0)):
replace = np.array(replace, dtype=np.uint8)
H, W = img.shape[0], img.shape[1]
rh, rw = np.random.random(2)
pad_size = pad_size // 2
ch, cw = int(rh * H), int(rw * W)
x1, x2 = max(ch - pad_size, 0), min(ch + pad_size, H)
y1, y2 = max(cw - pad_size, 0), min(cw + pad_size, W)
out = img.copy()
out[x1:x2, y1:y2, :] = replace
return out
### level to args
def enhance_level_to_args(MAX_LEVEL):
def level_to_args(level):
return ((level / MAX_LEVEL) * 1.8 + 0.1,)
return level_to_args
def shear_level_to_args(MAX_LEVEL, replace_value):
def level_to_args(level):
level = (level / MAX_LEVEL) * 0.3
if np.random.random() > 0.5: level = -level
return (level, replace_value)
return level_to_args
def translate_level_to_args(translate_const, MAX_LEVEL, replace_value):
def level_to_args(level):
level = (level / MAX_LEVEL) * float(translate_const)
if np.random.random() > 0.5: level = -level
return (level, replace_value)
return level_to_args
def cutout_level_to_args(cutout_const, MAX_LEVEL, replace_value):
def level_to_args(level):
level = int((level / MAX_LEVEL) * cutout_const)
return (level, replace_value)
return level_to_args
def solarize_level_to_args(MAX_LEVEL):
def level_to_args(level):
level = int((level / MAX_LEVEL) * 256)
return (level, )
return level_to_args
def none_level_to_args(level):
return ()
def posterize_level_to_args(MAX_LEVEL):
def level_to_args(level):
level = int((level / MAX_LEVEL) * 4)
return (level, )
return level_to_args
def rotate_level_to_args(MAX_LEVEL, replace_value):
def level_to_args(level):
level = (level / MAX_LEVEL) * 30
if np.random.random() < 0.5:
level = -level
return (level, replace_value)
return level_to_args
func_dict = {
'Identity': identity_func,
'AutoContrast': autocontrast_func,
'Equalize': equalize_func,
'Rotate': rotate_func,
'Solarize': solarize_func,
'Color': color_func,
'Contrast': contrast_func,
'Brightness': brightness_func,
'Sharpness': sharpness_func,
'ShearX': shear_x_func,
'TranslateX': translate_x_func,
'TranslateY': translate_y_func,
'Posterize': posterize_func,
'ShearY': shear_y_func,
}
translate_const = 10
MAX_LEVEL = 10
replace_value = (128, 128, 128)
arg_dict = {
'Identity': none_level_to_args,
'AutoContrast': none_level_to_args,
'Equalize': none_level_to_args,
'Rotate': rotate_level_to_args(MAX_LEVEL, replace_value),
'Solarize': solarize_level_to_args(MAX_LEVEL),
'Color': enhance_level_to_args(MAX_LEVEL),
'Contrast': enhance_level_to_args(MAX_LEVEL),
'Brightness': enhance_level_to_args(MAX_LEVEL),
'Sharpness': enhance_level_to_args(MAX_LEVEL),
'ShearX': shear_level_to_args(MAX_LEVEL, replace_value),
'TranslateX': translate_level_to_args(
translate_const, MAX_LEVEL, replace_value
),
'TranslateY': translate_level_to_args(
translate_const, MAX_LEVEL, replace_value
),
'Posterize': posterize_level_to_args(MAX_LEVEL),
'ShearY': shear_level_to_args(MAX_LEVEL, replace_value),
}
class RandomAugment(object):
def __init__(self, N=2, M=10, isPIL=False, augs=[]):
self.N = N
self.M = M
self.isPIL = isPIL
if augs:
self.augs = augs
else:
self.augs = list(arg_dict.keys())
def get_random_ops(self):
sampled_ops = np.random.choice(self.augs, self.N)
return [(op, 0.5, self.M) for op in sampled_ops]
def __call__(self, img):
if self.isPIL:
img = np.array(img)
ops = self.get_random_ops()
for name, prob, level in ops:
if np.random.random() > prob:
continue
args = arg_dict[name](level)
img = func_dict[name](img, *args)
return img
if __name__ == '__main__':
a = RandomAugment()
img = np.random.randn(32, 32, 3)
a(img) | EXA-1-master | exa/models/unilm-master/vlmo/vlmo/transforms/randaugment.py |
# code in this file is adpated from the ALBEF repo (https://github.com/salesforce/ALBEF)
from .utils import (
inception_normalize,
)
from torchvision import transforms
from .randaugment import RandomAugment
from PIL import Image
def square_transform(size=224):
return transforms.Compose(
[
transforms.Resize((size, size), interpolation=Image.BICUBIC),
transforms.ToTensor(),
inception_normalize,
]
)
def square_transform_randaug(size=224):
return transforms.Compose(
[
transforms.RandomResizedCrop(size, scale=(0.5, 1.0), interpolation=Image.BICUBIC),
transforms.RandomHorizontalFlip(),
RandomAugment(2,7,isPIL=True,augs=['Identity','AutoContrast','Equalize','Brightness','Sharpness',
'ShearX', 'ShearY', 'TranslateX', 'TranslateY', 'Rotate']),
transforms.ToTensor(),
inception_normalize,
]
) | EXA-1-master | exa/models/unilm-master/vlmo/vlmo/transforms/square_transform.py |
from .vlmo_module import VLMo
| EXA-1-master | exa/models/unilm-master/vlmo/vlmo/modules/__init__.py |
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import pytorch_lightning as pl
import numpy as np
import vlmo.modules.multiway_transformer
from transformers.models.bert.modeling_bert import BertConfig, BertEmbeddings
from vlmo.modules import heads, objectives, vlmo_utils
from pytorch_lightning.utilities.distributed import rank_zero_info
from scipy import interpolate
from timm.models import create_model
def convert_to_textpt_ckpt(state_dict, module):
new_state_dict = {}
# Merge relative_position_bias_table from all layer into one tensor,
# so we can use one op for gather the relative position bias for speed up
relative_position_bias_tables = {}
for key in state_dict:
value = state_dict[key]
if "relative_position_bias_table" in key:
# transformer.blocks.0.attn.relative_position_bias_table
layer_idx = int(key.split(".attn.")[0].split('.')[-1])
relative_position_bias_tables[layer_idx] = value
continue
if "mlp" in key:
key_imag = "transformer." + key.replace("mlp", "mlp_imag")
new_state_dict[key_imag] = value
elif "norm2" in key:
key_imag = "transformer." + key.replace("norm2", "norm2_imag")
new_state_dict[key_imag] = value
else:
new_key = "transformer." + key
new_state_dict[new_key] = value
if len(relative_position_bias_tables) > 0:
tensor_list = []
for layer_idx in sorted(relative_position_bias_tables.keys()):
tensor_list.append(relative_position_bias_tables[layer_idx])
relative_position_bias_table = torch.cat(tensor_list, dim=1)
num_distence, _ = relative_position_bias_table.shape
all_relative_position_bias_table = module.relative_position_bias_table.data.clone()
all_relative_position_bias_table[:num_distence, :] = relative_position_bias_table
new_state_dict["relative_position_bias_table"] = all_relative_position_bias_table
return new_state_dict
def interpolate_pos_embed(pos_embed_checkpoint, visual_encoder):
# interpolate position embedding
embedding_size = pos_embed_checkpoint.shape[-1]
num_patches = visual_encoder.patch_embed.num_patches
num_extra_tokens = visual_encoder.pos_embed.shape[-2] - num_patches
# height (== width) for the checkpoint position embedding
orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
# height (== width) for the new position embedding
new_size = int(num_patches ** 0.5)
if orig_size!=new_size:
# class_token and dist_token are kept unchanged
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
# only the position tokens are interpolated
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
pos_tokens = torch.nn.functional.interpolate(
pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
print('reshape position embedding from %d to %d'%(orig_size ** 2,new_size ** 2))
return new_pos_embed
else:
return pos_embed_checkpoint
def convert_deepspeed_ckpt(state_dict):
new_state_dict = {}
for key in state_dict:
if key.startswith("module."):
new_key = key[len("module."):]
value = state_dict[key]
new_state_dict[new_key] = value
else:
new_state_dict[key] = state_dict[key]
return new_state_dict
class VLMo(pl.LightningModule):
def __init__(self, config):
super().__init__()
self.save_hyperparameters()
# backbone & patch projection
self.img_size = config["image_size"]
self.transformer = create_model(
config["model_arch"],
img_size=self.img_size,
pretrained=False,
drop_rate=0,
drop_path_rate=config["drop_path_rate"],
attn_drop_rate=0,
drop_block_rate=None,
config=self.hparams.config,
)
self.patch_size = self.transformer.patch_size
self.vlffn_start_layer_index = self.transformer.vlffn_start_layer_index
self.num_layers = len(self.transformer.blocks)
self.num_features = self.transformer.num_features
self.build_relative_position_embed(config)
# language embedding
bert_config = BertConfig(
vocab_size=config["vocab_size"],
hidden_size=self.num_features,
max_position_embeddings=config["max_text_len"],
hidden_dropout_prob=config["drop_path_rate"],
position_embedding_type="rel_pos" if self.transformer.need_relative_position_embed else "absolute",
)
self.text_embeddings = BertEmbeddings(bert_config)
self.text_embeddings.apply(objectives.init_weights)
self.token_type_embeddings = nn.Embedding(2, self.num_features)
self.token_type_embeddings.apply(objectives.init_weights)
# task layers
self.pooler = heads.Pooler(self.num_features)
self.pooler.apply(objectives.init_weights)
## language modeling
if config["loss_names"]["mlm"] > 0 or config["loss_names"]["textmlm"] > 0:
self.mlm_score = heads.MLMHead(bert_config)
self.mlm_score.apply(objectives.init_weights)
## image-text matching (global hard negative)
if config["loss_names"]["itm"] > 0:
self.itm_score = heads.ITMHead(self.num_features)
self.itm_score.apply(objectives.init_weights)
## contrastive loss (or sampling for global hard negative)
if config["loss_names"]["itc"] > 0:
self.itc_text_proj = heads.ITCHead(self.num_features)
self.itc_image_proj = heads.ITCHead(self.num_features)
self.itc_text_proj.apply(objectives.init_weights)
self.itc_image_proj.apply(objectives.init_weights)
self.itc_vl_text_proj = heads.ITCHead(self.num_features)
self.itc_vl_image_proj = heads.ITCHead(self.num_features)
self.itc_vl_text_proj.apply(objectives.init_weights)
self.itc_vl_image_proj.apply(objectives.init_weights)
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
self.logit_vl_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
## retrieval task ft
if config["loss_names"]["irtr"] > 0:
self.itc_text_proj = heads.ITCHead(self.num_features)
self.itc_image_proj = heads.ITCHead(self.num_features)
self.itc_text_proj.apply(objectives.init_weights)
self.itc_image_proj.apply(objectives.init_weights)
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
self.load_pretrained_weight()
# ===================== Downstream ===================== #
## VQAv2
if self.hparams.config["loss_names"]["vqa"] > 0:
vs = self.hparams.config["vqav2_label_size"]
self.vqa_classifier = nn.Sequential(
nn.Linear(self.num_features, self.num_features * 2),
nn.LayerNorm(self.num_features * 2),
nn.GELU(),
nn.Linear(self.num_features * 2, vs),
)
self.vqa_classifier.apply(objectives.init_weights)
## NLVR2 (Visual reasoning)
if self.hparams.config["loss_names"]["nlvr2"] > 0:
self.nlvr2_classifier = nn.Sequential(
nn.Linear(self.num_features * 2, self.num_features * 2),
nn.LayerNorm(self.num_features * 2),
nn.GELU(),
nn.Linear(self.num_features * 2, 2),
)
self.nlvr2_classifier.apply(objectives.init_weights)
emb_data = self.token_type_embeddings.weight.data
self.token_type_embeddings = nn.Embedding(3, self.num_features)
self.token_type_embeddings.apply(objectives.init_weights)
self.token_type_embeddings.weight.data[0, :] = emb_data[0, :]
self.token_type_embeddings.weight.data[1, :] = emb_data[1, :]
self.token_type_embeddings.weight.data[2, :] = emb_data[1, :]
vlmo_utils.set_metrics(self)
self.current_tasks = list()
# ===================== load downstream (test_only) ======================
if self.hparams.config["load_path"] != "" and self.hparams.config["test_only"]:
rank_zero_info("Load ckpt from: {}".format(self.hparams.config["load_path"]))
ckpt = torch.load(self.hparams.config["load_path"], map_location="cpu")
state_dict = None
for state_dict_key in ("state_dict", "module", "model"):
if state_dict_key in ckpt:
rank_zero_info("Read state dict from ckpt[%s]. " % state_dict_key)
state_dict = ckpt[state_dict_key]
break
if state_dict_key == "module":
state_dict = convert_deepspeed_ckpt(state_dict)
if state_dict is None:
rank_zero_info("Read state dict from ckpt. ")
state_dict = ckpt
missing_keys, unexpected_keys = self.load_state_dict(state_dict, strict=False)
rank_zero_info("missing_keys: {}".format(missing_keys))
rank_zero_info("unexpected_keys: {}".format(unexpected_keys))
def load_pretrained_weight(self):
if self.hparams.config["load_path"] != "" and not self.hparams.config["test_only"]:
config = self.hparams.config
ckpt = torch.load(self.hparams.config["load_path"], map_location="cpu")
rank_zero_info("Load ckpt from: {}".format(self.hparams.config["load_path"]))
state_dict = None
for state_dict_key in ("state_dict", "module", "model"):
if state_dict_key in ckpt:
rank_zero_info("Read state dict from ckpt[%s]. " % state_dict_key)
state_dict = ckpt[state_dict_key]
break
if state_dict_key == "module":
state_dict = convert_deepspeed_ckpt(state_dict)
if state_dict is None:
rank_zero_info("Read state dict from ckpt. ")
state_dict = ckpt
for key in state_dict:
var = state_dict[key]
rank_zero_info("%s = %s" % (key, str(var.size())))
rank_zero_info(config["loss_names"])
if config["loss_names"]["textmlm"] > 0:
rank_zero_info("convert to textpt")
state_dict = convert_to_textpt_ckpt(state_dict, self)
max_text_len = config["max_text_len"]
if "text_embeddings.position_embeddings.weight" in state_dict and state_dict["text_embeddings.position_embeddings.weight"].size(0) != max_text_len:
state_dict["text_embeddings.position_embeddings.weight"].data = state_dict["text_embeddings.position_embeddings.weight"].data[:max_text_len, :]
state_dict["text_embeddings.position_ids"].data = state_dict["text_embeddings.position_ids"].data[:, :max_text_len]
rank_zero_info("text position_embeddings size: {}".format(state_dict["text_embeddings.position_embeddings.weight"].size()))
for check_key in ("relative_position_index", "text_relative_position_index", "text_imag_relative_position_index"):
if check_key in state_dict:
state_dict.pop(check_key)
if "transformer.pos_embed" in state_dict:
pos_embed_reshaped = interpolate_pos_embed(state_dict['transformer.pos_embed'], self.transformer)
state_dict['transformer.pos_embed'] = pos_embed_reshaped
if "relative_position_bias_table" in state_dict:
rel_pos_bias = state_dict["relative_position_bias_table"]
src_num_pos, num_attn_heads = rel_pos_bias.size()
dst_num_pos, _ = self.relative_position_bias_table.size()
dst_patch_shape = self.transformer.patch_embed.patch_shape
if dst_patch_shape[0] != dst_patch_shape[1]:
raise NotImplementedError()
num_extra_tokens = dst_num_pos - (dst_patch_shape[0] * 2 - 1) * (dst_patch_shape[1] * 2 - 1)
src_size = int((src_num_pos - num_extra_tokens) ** 0.5)
dst_size = int((dst_num_pos - num_extra_tokens) ** 0.5)
if src_size != dst_size:
state_dict.pop("relative_position_index")
state_dict.pop("text_relative_position_index")
state_dict.pop("text_imag_relative_position_index")
rank_zero_info("Position interpolate from %dx%d to %dx%d" % (
src_size, src_size, dst_size, dst_size))
extra_tokens = rel_pos_bias[-num_extra_tokens:, :]
rel_pos_bias = rel_pos_bias[:-num_extra_tokens, :]
def geometric_progression(a, r, n):
return a * (1.0 - r ** n) / (1.0 - r)
left, right = 1.01, 1.5
while right - left > 1e-6:
q = (left + right) / 2.0
gp = geometric_progression(1, q, src_size // 2)
if gp > dst_size // 2:
right = q
else:
left = q
# if q > 1.090307:
# q = 1.090307
dis = []
cur = 1
for i in range(src_size // 2):
dis.append(cur)
cur += q ** (i + 1)
r_ids = [-_ for _ in reversed(dis)]
x = r_ids + [0] + dis
y = r_ids + [0] + dis
t = dst_size // 2.0
dx = np.arange(-t, t + 0.1, 1.0)
dy = np.arange(-t, t + 0.1, 1.0)
rank_zero_info("Original positions = %s" % str(x))
rank_zero_info("Target positions = %s" % str(dx))
all_rel_pos_bias = []
for i in range(num_attn_heads):
z = rel_pos_bias[:, i].view(src_size, src_size).float().numpy()
f = interpolate.interp2d(x, y, z, kind='cubic')
all_rel_pos_bias.append(
torch.Tensor(f(dx, dy)).contiguous().view(-1, 1).to(rel_pos_bias.device))
rel_pos_bias = torch.cat(all_rel_pos_bias, dim=-1)
new_rel_pos_bias = torch.cat((rel_pos_bias, extra_tokens), dim=0)
state_dict["relative_position_bias_table"] = new_rel_pos_bias
missing_keys, unexpected_keys = self.load_state_dict(state_dict, strict=False)
rank_zero_info("missing_keys: {}".format(missing_keys))
rank_zero_info("unexpected_keys: {}".format(unexpected_keys))
def get_rel_pos_bias(self, relative_position_index):
if self.relative_position_embed:
relative_position_bias = F.embedding(relative_position_index.long().to(self.relative_position_bias_table.device),
self.relative_position_bias_table)
all_relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, x, y
relative_position_bias_list = torch.chunk(all_relative_position_bias, self.num_layers, dim=0)
return relative_position_bias_list
else:
return [None] * self.num_layers
def build_relative_position_embed(self, config):
if not self.transformer.need_relative_position_embed:
self.relative_position_embed = False
self.text_imag_relative_position_index = None
self.text_relative_position_index = None
self.relative_position_index = None
return
self.relative_position_embed = True
window_size = (int(self.img_size / self.patch_size), int(self.img_size / self.patch_size)) #(14, 14)
rank_zero_info("window_size: {}".format(window_size))
num_heads = self.transformer.num_heads
max_text_len_of_initckpt = config["max_text_len_of_initckpt"] #196
max_text_len = config["max_text_len"] #40
max_imag_len = window_size[0] * window_size[1] + 1 #197
self.window_size = window_size
self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
self.text_num_relative_distance = 2 * max_text_len_of_initckpt
self.all_num_relative_distance = self.num_relative_distance + self.text_num_relative_distance + 2
self.relative_position_bias_table = nn.Parameter(
torch.zeros(self.all_num_relative_distance, num_heads * self.num_layers))
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(window_size[0])
coords_w = torch.arange(window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += window_size[1] - 1
relative_coords[:, :, 0] *= 2 * window_size[1] - 1
relative_position_index = \
torch.zeros(size=(window_size[0] * window_size[1] + 1, ) * 2, dtype=relative_coords.dtype)
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
relative_position_index[0, 0:] = self.num_relative_distance - 3
relative_position_index[0:, 0] = self.num_relative_distance - 2
relative_position_index[0, 0] = self.num_relative_distance - 1
self.relative_position_index = relative_position_index
text_position_ids = torch.arange(max_text_len-1)
text_rel_pos_mat = text_position_ids.unsqueeze(-2) - text_position_ids.unsqueeze(-1)
min_distance = int(2-max_text_len_of_initckpt) #-194
# rank_zero_info("min_distance: {}".format(min_distance))
text_rel_pos_mat = text_rel_pos_mat - min_distance
text_rel_pos_mat += (self.num_relative_distance + 2)
text_relative_position_index = \
torch.zeros(size=(max_text_len, ) * 2, dtype=relative_coords.dtype)
text_relative_position_index[1:, 1:] = text_rel_pos_mat
text_relative_position_index[0, 0:] = self.all_num_relative_distance - 3
text_relative_position_index[0:, 0] = self.all_num_relative_distance - 2
text_relative_position_index[0, 0] = self.all_num_relative_distance - 1
self.text_relative_position_index = text_relative_position_index
text2imag_relative_position_index = torch.ones(max_text_len, max_imag_len) * (self.num_relative_distance)
imag2text_relative_position_index = torch.ones(max_imag_len, max_text_len) * (self.num_relative_distance + 1)
text_row_relative_position_index = torch.cat((text_relative_position_index, text2imag_relative_position_index), 1)
imag_row_relative_position_index = torch.cat((imag2text_relative_position_index, relative_position_index), 1)
text_imag_relative_position_index = torch.cat((text_row_relative_position_index, imag_row_relative_position_index), 0)
self.text_imag_relative_position_index = text_imag_relative_position_index
def infer(
self,
batch,
mask_text=False,
mask_image=False,
image_token_type_idx=1,
image_embeds=None,
image_masks=None,
):
if f"image_{image_token_type_idx - 1}" in batch:
imgkey = f"image_{image_token_type_idx - 1}"
else:
imgkey = "image"
do_mlm = "_mlm" if mask_text else ""
text_ids = batch[f"text_ids{do_mlm}"]
text_labels = batch[f"text_labels{do_mlm}"]
text_masks = batch[f"text_masks"]
text_embeds = self.text_embeddings(text_ids)
img = batch[imgkey][0]
image_embeds, image_masks = self.transformer.visual_embed(img)
image_masks = image_masks.long().to(device=img.get_device())
text_embeds, image_embeds = (
text_embeds + self.token_type_embeddings(torch.zeros_like(text_masks)),
image_embeds
+ self.token_type_embeddings(
torch.full_like(image_masks, image_token_type_idx)
),
)
co_embeds = torch.cat([text_embeds, image_embeds], dim=1)
co_masks = torch.cat([text_masks, image_masks], dim=1)
x = co_embeds
relative_position_bias_list = self.get_rel_pos_bias(self.text_imag_relative_position_index)
for i, blk in enumerate(self.transformer.blocks):
x = blk(x, mask=co_masks, modality_type="vl", relative_position_bias=relative_position_bias_list[i])
x = self.transformer.norm(x)
text_feats, image_feats = (
x[:, : text_embeds.shape[1]],
x[:, text_embeds.shape[1] :],
)
cls_feats = self.pooler(x)
ret = {
"text_feats": text_feats,
"image_feats": image_feats,
"cls_feats": cls_feats,
"raw_cls_feats": x[:, 0],
"image": img,
"text_labels": text_labels,
"text_ids": text_ids,
"text_masks": text_masks,
}
return ret
def infer_text(
self,
batch,
mask_text=False,
):
do_mlm = "_mlm" if mask_text else ""
text_ids = batch[f"text_ids{do_mlm}"]
text_labels = batch[f"text_labels{do_mlm}"]
text_masks = batch[f"text_masks"]
text_embeds = self.text_embeddings(text_ids)
text_embeds = text_embeds + self.token_type_embeddings(torch.zeros_like(text_masks))
co_embeds = text_embeds
co_masks = text_masks
x = co_embeds
all_hidden_states = []
relative_position_bias_list = self.get_rel_pos_bias(self.text_relative_position_index)
for i, blk in enumerate(self.transformer.blocks):
x = blk(x, mask=co_masks, modality_type="text", relative_position_bias=relative_position_bias_list[i])
all_hidden_states.append(x)
vlffn_hiddens = all_hidden_states[self.vlffn_start_layer_index-1]
for vlffn_index in range(self.vlffn_start_layer_index, self.num_layers):
vlffn_hiddens = self.transformer.blocks[vlffn_index](vlffn_hiddens, mask=co_masks, modality_type="vl", relative_position_bias=relative_position_bias_list[vlffn_index])
lffn_hiddens = all_hidden_states[-1]
lffn_hiddens = self.transformer.norm(lffn_hiddens)
text_feats, image_feats = (
lffn_hiddens,
None,
)
cls_feats = self.itc_text_proj(lffn_hiddens[:, 0])
cls_feats = cls_feats / cls_feats.norm(dim=-1, keepdim=True)
vlffn_hiddens = self.transformer.norm(vlffn_hiddens)
cls_vlffn_feats = self.itc_vl_text_proj(vlffn_hiddens[:, 0])
cls_vlffn_feats = cls_vlffn_feats / cls_vlffn_feats.norm(dim=-1, keepdim=True)
ret = {
"text_feats": text_feats,
"image_feats": image_feats,
"cls_feats": cls_feats,
"cls_vlffn_feats": cls_vlffn_feats,
"raw_cls_feats": x[:, 0],
"image_masks": None,
"text_labels": text_labels,
"text_ids": text_ids,
"text_masks": text_masks,
}
return ret
def infer_text_ft(
self,
batch,
mask_text=False,
):
do_mlm = "_mlm" if mask_text else ""
text_ids = batch[f"text_ids{do_mlm}"]
text_labels = batch[f"text_labels{do_mlm}"]
text_masks = batch[f"text_masks"]
text_embeds = self.text_embeddings(text_ids)
text_embeds = text_embeds + self.token_type_embeddings(torch.zeros_like(text_masks))
co_embeds = text_embeds
co_masks = text_masks
x = co_embeds
all_hidden_states = []
relative_position_bias_list = self.get_rel_pos_bias(self.text_relative_position_index)
for i, blk in enumerate(self.transformer.blocks):
x = blk(x, mask=co_masks, modality_type="text", relative_position_bias=relative_position_bias_list[i])
all_hidden_states.append(x)
lffn_hiddens = all_hidden_states[-1]
lffn_hiddens = self.transformer.norm(lffn_hiddens)
text_feats, image_feats = (
lffn_hiddens,
None,
)
cls_feats = self.itc_text_proj(lffn_hiddens[:, 0])
cls_feats = cls_feats / cls_feats.norm(dim=-1, keepdim=True)
ret = {
"text_feats": text_feats,
"image_feats": image_feats,
"cls_feats": cls_feats,
"cls_vlffn_feats": None,
"raw_cls_feats": x[:, 0],
"image_masks": None,
"text_labels": text_labels,
"text_ids": text_ids,
"text_masks": text_masks,
}
return ret
def infer_text_mlm(
self,
batch,
mask_text=False,
):
do_mlm = "_mlm" if mask_text else ""
text_ids = batch[f"text_ids{do_mlm}"]
text_labels = batch[f"text_labels{do_mlm}"]
text_masks = batch[f"text_masks"]
text_embeds = self.text_embeddings(text_ids)
text_embeds = text_embeds + self.token_type_embeddings(torch.zeros_like(text_masks))
co_embeds = text_embeds
co_masks = text_masks
x = co_embeds
all_hidden_states = []
relative_position_bias_list = self.get_rel_pos_bias(self.text_relative_position_index)
for i, blk in enumerate(self.transformer.blocks):
x = blk(x, mask=co_masks, modality_type="text", relative_position_bias=relative_position_bias_list[i])
all_hidden_states.append(x)
lffn_hiddens = all_hidden_states[-1]
lffn_hiddens = self.transformer.norm(lffn_hiddens)
text_feats, image_feats = (
lffn_hiddens,
None,
)
ret = {
"text_feats": text_feats,
"image_feats": image_feats,
"cls_feats": None,
"cls_vlffn_feats": None,
"raw_cls_feats": x[:, 0],
"image_masks": None,
"text_labels": text_labels,
"text_ids": text_ids,
"text_masks": text_masks,
}
return ret
def infer_image(
self,
batch,
mask_image=False,
image_token_type_idx=1,
image_embeds=None,
image_masks=None,
):
if f"image_{image_token_type_idx - 1}" in batch:
imgkey = f"image_{image_token_type_idx - 1}"
else:
imgkey = "image"
img = batch[imgkey][0]
image_embeds, image_masks = self.transformer.visual_embed(img)
image_masks = image_masks.long().to(device=img.get_device())
image_embeds = image_embeds + self.token_type_embeddings(
torch.full_like(image_masks, image_token_type_idx)
)
co_embeds = image_embeds
co_masks = image_masks
x = co_embeds
all_hidden_states = []
relative_position_bias_list = self.get_rel_pos_bias(self.relative_position_index)
for i, blk in enumerate(self.transformer.blocks):
x = blk(x, mask=co_masks, modality_type="image", relative_position_bias=relative_position_bias_list[i])
all_hidden_states.append(x)
vlffn_hiddens = all_hidden_states[self.vlffn_start_layer_index-1]
for vlffn_index in range(self.vlffn_start_layer_index, self.num_layers):
vlffn_hiddens = self.transformer.blocks[vlffn_index](vlffn_hiddens, mask=co_masks, modality_type="vl", relative_position_bias=relative_position_bias_list[vlffn_index])
vffn_hiddens = all_hidden_states[-1]
vffn_hiddens = self.transformer.norm(vffn_hiddens)
text_feats, image_feats = (
None,
vffn_hiddens,
)
cls_feats = self.itc_image_proj(vffn_hiddens[:, 0])
cls_feats = cls_feats / cls_feats.norm(dim=-1, keepdim=True)
vlffn_hiddens = self.transformer.norm(vlffn_hiddens)
cls_vlffn_feats = self.itc_vl_image_proj(vlffn_hiddens[:, 0])
cls_vlffn_feats = cls_vlffn_feats / cls_vlffn_feats.norm(dim=-1, keepdim=True)
ret = {
"text_feats": text_feats,
"image_feats": image_feats,
"cls_feats": cls_feats,
"cls_vlffn_feats": cls_vlffn_feats,
"raw_cls_feats": x[:, 0],
"image_masks": image_masks,
"text_labels": None,
"text_ids": None,
"text_masks": None,
}
return ret
def infer_image_ft(
self,
batch,
mask_image=False,
image_token_type_idx=1,
image_embeds=None,
image_masks=None,
):
if f"image_{image_token_type_idx - 1}" in batch:
imgkey = f"image_{image_token_type_idx - 1}"
else:
imgkey = "image"
img = batch[imgkey][0]
image_embeds, image_masks = self.transformer.visual_embed(img)
image_masks = image_masks.long().to(device=img.get_device())
image_embeds = image_embeds + self.token_type_embeddings(
torch.full_like(image_masks, image_token_type_idx)
)
co_embeds = image_embeds
co_masks = image_masks
x = co_embeds
all_hidden_states = []
relative_position_bias_list = self.get_rel_pos_bias(self.relative_position_index)
for i, blk in enumerate(self.transformer.blocks):
x = blk(x, mask=co_masks, modality_type="image", relative_position_bias=relative_position_bias_list[i])
all_hidden_states.append(x)
vffn_hiddens = all_hidden_states[-1]
vffn_hiddens = self.transformer.norm(vffn_hiddens)
text_feats, image_feats = (
None,
vffn_hiddens,
)
cls_feats = self.itc_image_proj(vffn_hiddens[:, 0])
cls_feats = cls_feats / cls_feats.norm(dim=-1, keepdim=True)
ret = {
"text_feats": text_feats,
"image_feats": image_feats,
"cls_feats": cls_feats,
"cls_vlffn_feats": None,
"raw_cls_feats": x[:, 0],
"image_masks": image_masks,
"text_labels": None,
"text_ids": None,
"text_masks": None,
}
return ret
def forward(self, batch):
ret = dict()
if len(self.current_tasks) == 0:
ret.update(self.infer(batch))
return ret
# Masked Language Modeling
if "mlm" in self.current_tasks:
ret.update(objectives.compute_mlm(self, batch))
# Textonly Masked Language Modeling
if "textmlm" in self.current_tasks:
ret.update(objectives.compute_textonly_mlm(self, batch))
# Contrastive loss for pretraining
if "itc" in self.current_tasks:
ret.update(objectives.compute_itc(self, batch))
# Contrastive loss for finetuning
if "irtr" in self.current_tasks:
ret.update(objectives.compute_irtr(self, batch))
# Image Text Matching with global hard negative, must use with itc
if "itm" in self.current_tasks:
ret.update(objectives.compute_itm_hardneg(self, batch, ret["itc_i2t_logits"], ret["itc_t2i_logits"]))
# Visual Question Answering
if "vqa" in self.current_tasks:
ret.update(objectives.compute_vqa(self, batch))
# Natural Language for Visual Reasoning 2
if "nlvr2" in self.current_tasks:
ret.update(objectives.compute_nlvr2(self, batch))
return ret
def training_step(self, batch, batch_idx):
vlmo_utils.set_task(self)
output = self(batch)
total_loss = sum([v for k, v in output.items() if "loss" in k])
return total_loss
def training_epoch_end(self, outs):
vlmo_utils.epoch_wrapup(self)
def validation_step(self, batch, batch_idx):
vlmo_utils.set_task(self)
output = self(batch)
def validation_epoch_end(self, outs):
vlmo_utils.epoch_wrapup(self)
def test_step(self, batch, batch_idx):
vlmo_utils.set_task(self)
output = self(batch)
ret = dict()
if self.hparams.config["loss_names"]["vqa"] > 0:
ret.update(objectives.vqa_test_step(self, batch, output))
return ret
def test_epoch_end(self, outs):
model_name = self.hparams.config["load_path"].split("/")[-1][:-5]
if self.hparams.config["loss_names"]["vqa"] > 0:
objectives.vqa_test_wrapup(outs, model_name, self.hparams.config["log_dir"])
vlmo_utils.epoch_wrapup(self)
def configure_optimizers(self):
return vlmo_utils.set_schedule(self)
| EXA-1-master | exa/models/unilm-master/vlmo/vlmo/modules/vlmo_module.py |
import torch
import random
import json
from transformers.optimization import AdamW
from transformers import (
get_polynomial_decay_schedule_with_warmup,
get_cosine_schedule_with_warmup,
)
from vlmo.modules.dist_utils import all_gather
from vlmo.modules.objectives import compute_irtr_recall, compute_irtr_recall_with_rerank
from vlmo.gadgets.my_metrics import Accuracy, VQAScore, Scalar
from pytorch_lightning.utilities.distributed import rank_zero_info
def set_metrics(pl_module):
for split in ["train", "val"]:
for k, v in pl_module.hparams.config["loss_names"].items():
if v < 1:
continue
if k == "vqa":
setattr(pl_module, f"{split}_vqa_score", VQAScore())
setattr(pl_module, f"{split}_{k}_loss", Scalar())
elif k == "nlvr2":
if split == "train":
setattr(pl_module, f"train_{k}_accuracy", Accuracy())
setattr(pl_module, f"train_{k}_loss", Scalar())
else:
setattr(pl_module, f"dev_{k}_accuracy", Accuracy())
setattr(pl_module, f"dev_{k}_loss", Scalar())
setattr(pl_module, f"test_{k}_accuracy", Accuracy())
setattr(pl_module, f"test_{k}_loss", Scalar())
elif k == "irtr":
setattr(pl_module, f"{split}_{k}_i2t_accuracy", Accuracy())
setattr(pl_module, f"{split}_{k}_t2i_accuracy", Accuracy())
setattr(pl_module, f"{split}_{k}_loss", Scalar())
setattr(pl_module, f"{split}_{k}_logit_scale", Scalar())
elif k == "itm":
setattr(pl_module, f"{split}_{k}_accuracy", Accuracy())
setattr(pl_module, f"{split}_{k}_loss", Scalar())
elif k == "itc":
setattr(pl_module, f"{split}_{k}_i2t_accuracy", Accuracy())
setattr(pl_module, f"{split}_{k}_t2i_accuracy", Accuracy())
setattr(pl_module, f"{split}_{k}_loss", Scalar())
setattr(pl_module, f"{split}_{k}_logit_scale", Scalar())
setattr(pl_module, f"{split}_{k}_vl_i2t_accuracy", Accuracy())
setattr(pl_module, f"{split}_{k}_vl_t2i_accuracy", Accuracy())
setattr(pl_module, f"{split}_{k}_vl_logit_scale", Scalar())
else:
setattr(pl_module, f"{split}_{k}_accuracy", Accuracy())
setattr(pl_module, f"{split}_{k}_loss", Scalar())
def epoch_wrapup(pl_module):
phase = "train" if pl_module.training else "val"
the_metric = 0
if pl_module.hparams.config["get_recall_metric"] and not pl_module.training:
(val_ir_r1, val_ir_r5, val_ir_r10, val_tr_r1, val_tr_r5, val_tr_r10) = compute_irtr_recall(pl_module, split="val")
val_avg = (val_ir_r1.item() + val_ir_r5.item() + val_ir_r10.item() + val_tr_r1.item() + val_tr_r5.item() + val_tr_r10.item()) / 6.0
pl_module.logger.experiment.add_scalar(
"recalls/val_avg", val_avg, pl_module.global_step
)
(ir_r1, ir_r5, ir_r10, tr_r1, tr_r5, tr_r10) = compute_irtr_recall(pl_module, split="test")
test_avg = (ir_r1.item() + ir_r5.item() + ir_r10.item() + tr_r1.item() + tr_r5.item() + tr_r10.item()) / 6.0
pl_module.logger.experiment.add_scalar(
"recalls/test_avg", test_avg, pl_module.global_step
)
print("val_avg:{}, test_avg:{}".format(val_avg, test_avg))
print("test ir_r1:{}, ir_r5:{}, ir_r10:{}, tr_r1:{}, tr_r5:{}, tr_r10:{}".format(ir_r1, ir_r5, ir_r10, tr_r1, tr_r5, tr_r10))
pl_module.logger.experiment.add_scalar(
"recalls/ir_r1", ir_r1, pl_module.global_step
)
pl_module.logger.experiment.add_scalar(
"recalls/ir_r5", ir_r5, pl_module.global_step
)
pl_module.logger.experiment.add_scalar(
"recalls/ir_r10", ir_r10, pl_module.global_step
)
pl_module.logger.experiment.add_scalar(
"recalls/tr_r1", tr_r1, pl_module.global_step
)
pl_module.logger.experiment.add_scalar(
"recalls/tr_r5", tr_r5, pl_module.global_step
)
pl_module.logger.experiment.add_scalar(
"recalls/tr_r10", tr_r10, pl_module.global_step
)
the_metric += val_avg
for loss_name, v in pl_module.hparams.config["loss_names"].items():
if v < 1:
continue
value = 0
if loss_name == "vqa":
value = getattr(pl_module, f"{phase}_{loss_name}_score").compute()
pl_module.log(f"{loss_name}/{phase}/score_epoch", value)
getattr(pl_module, f"{phase}_{loss_name}_score").reset()
pl_module.log(
f"{loss_name}/{phase}/loss_epoch",
getattr(pl_module, f"{phase}_{loss_name}_loss").compute(),
)
getattr(pl_module, f"{phase}_{loss_name}_loss").reset()
elif loss_name == "nlvr2":
if phase == "train":
value = getattr(pl_module, f"train_{loss_name}_accuracy").compute()
pl_module.log(f"{loss_name}/train/accuracy_epoch", value)
getattr(pl_module, f"train_{loss_name}_accuracy").reset()
pl_module.log(
f"{loss_name}/train/loss_epoch",
getattr(pl_module, f"train_{loss_name}_loss").compute(),
)
getattr(pl_module, f"train_{loss_name}_loss").reset()
else:
value_dev = getattr(pl_module, f"dev_{loss_name}_accuracy").compute()
pl_module.log(f"{loss_name}/dev/accuracy_epoch", value_dev)
getattr(pl_module, f"dev_{loss_name}_accuracy").reset()
pl_module.log(
f"{loss_name}/dev/loss_epoch",
getattr(pl_module, f"dev_{loss_name}_loss").compute(),
)
getattr(pl_module, f"dev_{loss_name}_loss").reset()
value_test = getattr(pl_module, f"test_{loss_name}_accuracy").compute()
pl_module.log(f"{loss_name}/test/accuracy_epoch", value_test)
getattr(pl_module, f"test_{loss_name}_accuracy").reset()
pl_module.log(
f"{loss_name}/test/loss_epoch",
getattr(pl_module, f"test_{loss_name}_loss").compute(),
)
getattr(pl_module, f"test_{loss_name}_loss").reset()
value = value_dev
elif loss_name == "irtr":
value_i2t = getattr(pl_module, f"{phase}_{loss_name}_i2t_accuracy").compute()
pl_module.log(f"{loss_name}/{phase}/i2t_accuracy_epoch", value_i2t)
getattr(pl_module, f"{phase}_{loss_name}_i2t_accuracy").reset()
value_t2i = getattr(pl_module, f"{phase}_{loss_name}_t2i_accuracy").compute()
pl_module.log(f"{loss_name}/{phase}/t2i_accuracy_epoch", value_t2i)
getattr(pl_module, f"{phase}_{loss_name}_t2i_accuracy").reset()
value = value_i2t + value_t2i
pl_module.log(
f"{loss_name}/{phase}/loss_epoch",
getattr(pl_module, f"{phase}_{loss_name}_loss").compute(),
)
getattr(pl_module, f"{phase}_{loss_name}_loss").reset()
elif loss_name == "itm":
value = getattr(pl_module, f"{phase}_{loss_name}_accuracy").compute()
pl_module.log(f"{loss_name}/{phase}/accuracy_epoch", value)
getattr(pl_module, f"{phase}_{loss_name}_accuracy").reset()
pl_module.log(
f"{loss_name}/{phase}/loss_epoch",
getattr(pl_module, f"{phase}_{loss_name}_loss").compute(),
)
getattr(pl_module, f"{phase}_{loss_name}_loss").reset()
elif loss_name == "itc":
value_i2t = getattr(pl_module, f"{phase}_{loss_name}_i2t_accuracy").compute()
pl_module.log(f"{loss_name}/{phase}/i2t_accuracy_epoch", value_i2t)
getattr(pl_module, f"{phase}_{loss_name}_i2t_accuracy").reset()
value_t2i = getattr(pl_module, f"{phase}_{loss_name}_t2i_accuracy").compute()
pl_module.log(f"{loss_name}/{phase}/t2i_accuracy_epoch", value_t2i)
getattr(pl_module, f"{phase}_{loss_name}_t2i_accuracy").reset()
pl_module.log(
f"{loss_name}/{phase}/loss_epoch",
getattr(pl_module, f"{phase}_{loss_name}_loss").compute(),
)
getattr(pl_module, f"{phase}_{loss_name}_loss").reset()
value_vl_i2t = getattr(pl_module, f"{phase}_{loss_name}_vl_i2t_accuracy").compute()
pl_module.log(f"{loss_name}/{phase}/vl_i2t_accuracy_epoch", value_vl_i2t)
getattr(pl_module, f"{phase}_{loss_name}_vl_i2t_accuracy").reset()
value_vl_t2i = getattr(pl_module, f"{phase}_{loss_name}_vl_t2i_accuracy").compute()
pl_module.log(f"{loss_name}/{phase}/vl_t2i_accuracy_epoch", value_vl_t2i)
getattr(pl_module, f"{phase}_{loss_name}_vl_t2i_accuracy").reset()
value = value_i2t + value_t2i
else:
value = getattr(pl_module, f"{phase}_{loss_name}_accuracy").compute()
pl_module.log(f"{loss_name}/{phase}/accuracy_epoch", value)
getattr(pl_module, f"{phase}_{loss_name}_accuracy").reset()
pl_module.log(
f"{loss_name}/{phase}/loss_epoch",
getattr(pl_module, f"{phase}_{loss_name}_loss").compute(),
)
getattr(pl_module, f"{phase}_{loss_name}_loss").reset()
the_metric += value
pl_module.log(f"{phase}/the_metric", the_metric)
def check_non_acc_grad(pl_module):
if pl_module.token_type_embeddings.weight.grad is None:
return True
else:
grad = pl_module.token_type_embeddings.weight.grad
return (grad.sum() == 0).item()
def set_task(pl_module):
pl_module.current_tasks = [
k for k, v in pl_module.hparams.config["loss_names"].items() if v >= 1
]
return
def set_schedule(pl_module):
lr = pl_module.hparams.config["learning_rate"]
wd = pl_module.hparams.config["weight_decay"]
no_decay = [
"bias",
"LayerNorm.bias",
"LayerNorm.weight",
"norm.bias",
"norm.weight",
"norm1.bias",
"norm1.weight",
"norm2.bias",
"norm2.weight",
]
head_names = ["vqa_classifier", "nlvr2_classifier"]
lr_mult = pl_module.hparams.config["lr_mult"]
end_lr = pl_module.hparams.config["end_lr"]
decay_power = pl_module.hparams.config["decay_power"]
optim_type = pl_module.hparams.config["optim_type"]
names = [n for n, p in pl_module.named_parameters()]
optimizer_grouped_parameters = [
{
"params": [
p
for n, p in pl_module.named_parameters()
if not any(nd in n for nd in no_decay)
and not any(bb in n for bb in head_names)
],
"weight_decay": wd,
"lr": lr,
},
{
"params": [
p
for n, p in pl_module.named_parameters()
if any(nd in n for nd in no_decay)
and not any(bb in n for bb in head_names)
],
"weight_decay": 0.0,
"lr": lr,
},
{
"params": [
p
for n, p in pl_module.named_parameters()
if not any(nd in n for nd in no_decay)
and any(bb in n for bb in head_names)
],
"weight_decay": wd,
"lr": lr * lr_mult,
},
{
"params": [
p
for n, p in pl_module.named_parameters()
if any(nd in n for nd in no_decay) and any(bb in n for bb in head_names)
],
"weight_decay": 0.0,
"lr": lr * lr_mult,
},
]
if optim_type == "adamw":
optimizer = AdamW(
optimizer_grouped_parameters, lr=lr, eps=1e-8, betas=(0.9, 0.98)
)
elif optim_type == "adam":
optimizer = torch.optim.Adam(optimizer_grouped_parameters, lr=lr)
elif optim_type == "sgd":
optimizer = torch.optim.SGD(optimizer_grouped_parameters, lr=lr, momentum=0.9)
if pl_module.trainer.max_steps is None or pl_module.trainer.max_steps==-1:
max_steps = (
len(pl_module.trainer.datamodule.train_dataloader())
* pl_module.trainer.max_epochs
// pl_module.trainer.accumulate_grad_batches
)
else:
max_steps = pl_module.trainer.max_steps
warmup_steps = pl_module.hparams.config["warmup_steps"]
if isinstance(pl_module.hparams.config["warmup_steps"], float):
warmup_steps = int(max_steps * warmup_steps)
rank_zero_info("Warmup_steps:{} \t Max_steps:{}".format(warmup_steps, max_steps))
if decay_power == "cosine":
scheduler = get_cosine_schedule_with_warmup(
optimizer,
num_warmup_steps=warmup_steps,
num_training_steps=max_steps,
)
else:
scheduler = get_polynomial_decay_schedule_with_warmup(
optimizer,
num_warmup_steps=warmup_steps,
num_training_steps=max_steps,
lr_end=end_lr,
power=decay_power,
)
sched = {"scheduler": scheduler, "interval": "step"}
return (
[optimizer],
[sched],
)
| EXA-1-master | exa/models/unilm-master/vlmo/vlmo/modules/vlmo_utils.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
import os
import glob
import json
import tqdm
import functools
import torch.distributed as dist
from torch.utils.data.distributed import DistributedSampler
from einops import rearrange
from pytorch_lightning.utilities.distributed import rank_zero_info
from vlmo.modules.dist_utils import all_gather
def compute_mlm(pl_module, batch):
infer = pl_module.infer(batch, mask_text=True, mask_image=False)
mlm_logits = pl_module.mlm_score(infer["text_feats"])
mlm_labels = infer["text_labels"]
mlm_loss = F.cross_entropy(
mlm_logits.view(-1, pl_module.hparams.config["vocab_size"]),
mlm_labels.view(-1),
ignore_index=-100,
)
ret = {
"mlm_loss": mlm_loss * 0.25,
"mlm_logits": mlm_logits,
"mlm_labels": mlm_labels,
"mlm_ids": infer["text_ids"],
}
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_mlm_loss")(ret["mlm_loss"])
acc = getattr(pl_module, f"{phase}_mlm_accuracy")(
ret["mlm_logits"], ret["mlm_labels"]
)
pl_module.log(f"mlm/{phase}/loss", loss)
pl_module.log(f"mlm/{phase}/accuracy", acc)
return ret
def compute_textonly_mlm(pl_module, batch):
infer = pl_module.infer_text_mlm(batch, mask_text=True)
mlm_logits = pl_module.mlm_score(infer["text_feats"])
mlm_labels = infer["text_labels"]
mlm_loss = F.cross_entropy(
mlm_logits.view(-1, pl_module.hparams.config["vocab_size"]),
mlm_labels.view(-1),
ignore_index=-100,
)
ret = {
"mlm_loss": mlm_loss,
"mlm_logits": mlm_logits,
"mlm_labels": mlm_labels,
"mlm_ids": infer["text_ids"],
}
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_textmlm_loss")(ret["mlm_loss"])
acc = getattr(pl_module, f"{phase}_textmlm_accuracy")(
ret["mlm_logits"], ret["mlm_labels"]
)
pl_module.log(f"textmlm/{phase}/loss", loss)
pl_module.log(f"textmlm/{phase}/accuracy", acc)
return ret
def compute_itm_hardneg(pl_module, batch, sim_i2t, sim_t2i):
pos_len = batch["text_ids"].size(0)
neg_len = batch["text_ids"].size(0)
bsz = batch["text_ids"].size(0)
itm_labels = torch.cat([torch.ones(pos_len), torch.zeros(neg_len), torch.zeros(neg_len)]).to(
pl_module.device
)
batch = {k: v for k, v in batch.items()}
infer_pos = pl_module.infer(batch, mask_text=False, mask_image=False)
batch_text_ids = infer_pos["text_ids"]
batch_text_masks = infer_pos["text_masks"]
batch_image = infer_pos["image"]
with torch.no_grad():
world_size = dist.get_world_size()
rank = dist.get_rank()
# We gather tensors from all gpus to get more hard negative candidates.
gathered_text_ids = [
torch.zeros_like(batch_text_ids) for _ in range(world_size)
]
gathered_text_masks = [
torch.zeros_like(batch_text_masks) for _ in range(world_size)
]
gathered_image = [
torch.zeros_like(batch_image) for _ in range(world_size)
]
dist.all_gather(gathered_text_ids, batch_text_ids)
dist.all_gather(gathered_text_masks, batch_text_masks)
dist.all_gather(gathered_image, batch_image)
all_text_ids = torch.cat(
[batch_text_ids]
+ gathered_text_ids[:rank]
+ gathered_text_ids[rank + 1 :]
)
all_text_masks = torch.cat(
[batch_text_masks]
+ gathered_text_masks[:rank]
+ gathered_text_masks[rank + 1 :]
)
all_image = torch.cat(
[batch_image]
+ gathered_image[:rank]
+ gathered_image[rank + 1 :]
)
with torch.no_grad():
weights_i2t = F.softmax(sim_i2t[:bsz, :].float(), dim=1)
weights_t2i = F.softmax(sim_t2i[:bsz, :].float(), dim=1)
weights_i2t.fill_diagonal_(0)
weights_t2i.fill_diagonal_(0)
images_neg = []
for b in range(bsz):
neg_idx = torch.multinomial(weights_t2i[b], 1).item()
images_neg.append(all_image[neg_idx])
images_neg = torch.stack(images_neg, dim=0)
# select a negative text for each image
text_ids_neg = []
text_masks_neg = []
for b in range(bsz):
neg_idx = torch.multinomial(weights_i2t[b], 1).item()
text_ids_neg.append(all_text_ids[neg_idx])
text_masks_neg.append(all_text_masks[neg_idx])
text_ids_neg = torch.stack(text_ids_neg, dim=0)
text_masks_neg = torch.stack(text_masks_neg, dim=0)
# text_labels is not used in ITM loss
batch_imgs_neg = {"image":[images_neg], "text_ids":batch["text_ids"], "text_labels":batch["text_labels"], "text_masks":batch["text_masks"]}
infer_imags_neg = pl_module.infer(batch_imgs_neg, mask_text=False, mask_image=False)
batch_text_neg = {"image":batch["image"], "text_ids":text_ids_neg, "text_labels":batch["text_labels"], "text_masks":text_masks_neg}
infer_text_neg = pl_module.infer(batch_text_neg, mask_text=False, mask_image=False)
all_cls_feats = torch.cat([infer_pos["cls_feats"], infer_imags_neg["cls_feats"], infer_text_neg["cls_feats"]], dim=0)
itm_logits = pl_module.itm_score(all_cls_feats)
itm_loss = F.cross_entropy(itm_logits, itm_labels.long())
ret = {
"itm_loss": itm_loss,
"itm_logits": itm_logits,
"itm_labels": itm_labels,
}
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_itm_loss")(ret["itm_loss"])
acc = getattr(pl_module, f"{phase}_itm_accuracy")(
ret["itm_logits"], ret["itm_labels"]
)
pl_module.log(f"itm/{phase}/loss", loss)
pl_module.log(f"itm/{phase}/accuracy", acc)
return ret
# The implementation of image-text contrastive refers to open_clip (https://github.com/mlfoundations/open_clip)
def compute_itc(pl_module, batch, aggregate=True):
# pl_module.logit_scale.data = torch.clamp(pl_module.logit_scale.data, 0, 4.6052)
infer_imag = pl_module.infer_image(batch, mask_image=False)
infer_text = pl_module.infer_text(batch, mask_text=False)
image_features = infer_imag["cls_feats"]
text_features = infer_text["cls_feats"]
logit_scale = pl_module.logit_scale.exp().mean()
image_vlffn_features = infer_imag["cls_vlffn_feats"]
text_vlffn_features = infer_text["cls_vlffn_feats"]
logit_vl_scale = pl_module.logit_vl_scale.exp().mean()
if aggregate:
world_size = dist.get_world_size()
rank = dist.get_rank()
# We gather tensors from all gpus to get more negatives to contrast with.
gathered_image_features = [
torch.zeros_like(image_features) for _ in range(world_size)
]
gathered_text_features = [
torch.zeros_like(text_features) for _ in range(world_size)
]
dist.all_gather(gathered_image_features, image_features)
dist.all_gather(gathered_text_features, text_features)
all_image_features = torch.cat(
[image_features]
+ gathered_image_features[:rank]
+ gathered_image_features[rank + 1 :]
)
all_text_features = torch.cat(
[text_features]
+ gathered_text_features[:rank]
+ gathered_text_features[rank + 1 :]
)
# this is needed to send gradients back everywhere.
logits_per_image = logit_scale * all_image_features @ all_text_features.t()
logits_per_text = logits_per_image.t()
gathered_image_vlffn_features = [
torch.zeros_like(image_vlffn_features) for _ in range(world_size)
]
gathered_text_vlffn_features = [
torch.zeros_like(text_vlffn_features) for _ in range(world_size)
]
dist.all_gather(gathered_image_vlffn_features, image_vlffn_features)
dist.all_gather(gathered_text_vlffn_features, text_vlffn_features)
all_image_vlffn_features = torch.cat(
[image_vlffn_features]
+ gathered_image_vlffn_features[:rank]
+ gathered_image_vlffn_features[rank + 1 :]
)
all_text_vlffn_features = torch.cat(
[text_vlffn_features]
+ gathered_text_vlffn_features[:rank]
+ gathered_text_vlffn_features[rank + 1 :]
)
# this is needed to send gradients back everywhere.
logits_per_vlffn_image = logit_vl_scale * all_image_vlffn_features @ all_text_vlffn_features.t()
logits_per_vlffn_text = logits_per_vlffn_image.t()
else:
logits_per_image = logit_scale * image_features @ text_features.t()
logits_per_text = logit_scale * text_features @ image_features.t()
ground_truth = torch.arange(len(logits_per_image)).long().to(device=logits_per_image.get_device())
itc_loss = (
F.cross_entropy(logits_per_image.float(), ground_truth)
+ F.cross_entropy(logits_per_text.float(), ground_truth)
) / 2
itc_vlffn_loss = (
F.cross_entropy(logits_per_vlffn_image.float(), ground_truth)
+ F.cross_entropy(logits_per_vlffn_text.float(), ground_truth)
) / 2
itc_total_loss = (itc_loss + itc_vlffn_loss) * 0.5
ret = {
"itc_loss": itc_total_loss,
"itc_i2t_logits": logits_per_image,
"itc_t2i_logits": logits_per_text,
"itc_labels": ground_truth,
"itc_logit_scale": logit_scale,
"itc_logit_vl_scale": logit_vl_scale,
}
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_itc_loss")(ret["itc_loss"])
scale = getattr(pl_module, f"{phase}_itc_logit_scale")(ret["itc_logit_scale"])
i2t_acc = getattr(pl_module, f"{phase}_itc_i2t_accuracy")(
ret["itc_i2t_logits"], ret["itc_labels"]
)
t2i_acc = getattr(pl_module, f"{phase}_itc_t2i_accuracy")(
ret["itc_t2i_logits"], ret["itc_labels"]
)
pl_module.log(f"itc/{phase}/loss", loss)
pl_module.log(f"itc/{phase}/logit_scale", scale)
pl_module.log(f"itc/{phase}/i2t_accuracy", i2t_acc)
pl_module.log(f"itc/{phase}/t2i_accuracy", t2i_acc)
vl_scale = getattr(pl_module, f"{phase}_itc_vl_logit_scale")(ret["itc_logit_vl_scale"])
vl_i2t_acc = getattr(pl_module, f"{phase}_itc_vl_i2t_accuracy")(
logits_per_vlffn_image, ret["itc_labels"]
)
vl_t2i_acc = getattr(pl_module, f"{phase}_itc_vl_t2i_accuracy")(
logits_per_vlffn_text, ret["itc_labels"]
)
pl_module.log(f"itc/{phase}/vl_logit_scale", vl_scale)
pl_module.log(f"itc/{phase}/vl_i2t_accuracy", vl_i2t_acc)
pl_module.log(f"itc/{phase}/vl_t2i_accuracy", vl_t2i_acc)
return ret
def compute_irtr(pl_module, batch, aggregate=True):
# pl_module.logit_scale.data = torch.clamp(pl_module.logit_scale.data, 0, 4.6052)
infer_imag = pl_module.infer_image_ft(batch, mask_image=False)
infer_text = pl_module.infer_text_ft(batch, mask_text=False)
image_features = infer_imag["cls_feats"]
text_features = infer_text["cls_feats"]
logit_scale = pl_module.logit_scale.exp().mean()
if aggregate:
world_size = dist.get_world_size()
rank = dist.get_rank()
# We gather tensors from all gpus to get more negatives to contrast with.
gathered_image_features = [
torch.zeros_like(image_features) for _ in range(world_size)
]
gathered_text_features = [
torch.zeros_like(text_features) for _ in range(world_size)
]
dist.all_gather(gathered_image_features, image_features)
dist.all_gather(gathered_text_features, text_features)
all_image_features = torch.cat(
[image_features]
+ gathered_image_features[:rank]
+ gathered_image_features[rank + 1 :]
)
all_text_features = torch.cat(
[text_features]
+ gathered_text_features[:rank]
+ gathered_text_features[rank + 1 :]
)
# this is needed to send gradients back everywhere.
logits_per_image = logit_scale * all_image_features @ all_text_features.t()
logits_per_text = logits_per_image.t()
else:
logits_per_image = logit_scale * image_features @ text_features.t()
logits_per_text = logit_scale * text_features @ image_features.t()
ground_truth = torch.arange(len(logits_per_image)).long().to(device=logits_per_image.get_device())
irtr_loss = (
F.cross_entropy(logits_per_image.float(), ground_truth)
+ F.cross_entropy(logits_per_text.float(), ground_truth)
) / 2
ret = {
"irtr_loss": irtr_loss,
"irtr_i2t_logits": logits_per_image,
"irtr_t2i_logits": logits_per_text,
"irtr_labels": ground_truth,
"irtr_logit_scale": logit_scale,
}
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_irtr_loss")(ret["irtr_loss"])
scale = getattr(pl_module, f"{phase}_irtr_logit_scale")(ret["irtr_logit_scale"])
i2t_acc = getattr(pl_module, f"{phase}_irtr_i2t_accuracy")(
ret["irtr_i2t_logits"], ret["irtr_labels"]
)
t2i_acc = getattr(pl_module, f"{phase}_irtr_t2i_accuracy")(
ret["irtr_t2i_logits"], ret["irtr_labels"]
)
pl_module.log(f"irtr/{phase}/loss", loss)
pl_module.log(f"irtr/{phase}/logit_scale", scale)
pl_module.log(f"irtr/{phase}/i2t_accuracy", i2t_acc)
pl_module.log(f"irtr/{phase}/t2i_accuracy", t2i_acc)
return ret
def compute_vqa(pl_module, batch):
infer = pl_module.infer(batch, mask_text=False, mask_image=False)
vqa_logits = pl_module.vqa_classifier(infer["cls_feats"])
vqa_targets = torch.zeros(
len(vqa_logits), pl_module.hparams.config["vqav2_label_size"]
).to(pl_module.device)
vqa_labels = batch["vqa_labels"]
vqa_scores = batch["vqa_scores"]
for i, (_label, _score) in enumerate(zip(vqa_labels, vqa_scores)):
for l, s in zip(_label, _score):
vqa_targets[i, l] = s
vqa_loss = (
F.binary_cross_entropy_with_logits(vqa_logits, vqa_targets)
* vqa_targets.shape[1]
) # https://github.com/jnhwkim/ban-vqa/blob/master/train.py#L19
ret = {
"vqa_loss": vqa_loss,
"vqa_logits": vqa_logits,
"vqa_targets": vqa_targets,
"vqa_labels": vqa_labels,
"vqa_scores": vqa_scores,
}
phase = "train" if pl_module.training else "val"
loss = getattr(pl_module, f"{phase}_vqa_loss")(ret["vqa_loss"])
score = getattr(pl_module, f"{phase}_vqa_score")(
ret["vqa_logits"], ret["vqa_targets"]
)
pl_module.log(f"vqa/{phase}/loss", loss)
pl_module.log(f"vqa/{phase}/score", score)
return ret
def compute_nlvr2(pl_module, batch):
infer1 = pl_module.infer(
batch, mask_text=False, mask_image=False, image_token_type_idx=1
)
infer2 = pl_module.infer(
batch, mask_text=False, mask_image=False, image_token_type_idx=2
)
cls_feats = torch.cat([infer1["cls_feats"], infer2["cls_feats"]], dim=-1)
nlvr2_logits = pl_module.nlvr2_classifier(cls_feats)
nlvr2_labels = batch["answers"]
nlvr2_labels = torch.tensor(nlvr2_labels).to(pl_module.device).long()
nlvr2_loss = F.cross_entropy(nlvr2_logits, nlvr2_labels)
ret = {
"nlvr2_loss": nlvr2_loss,
"nlvr2_logits": nlvr2_logits,
"nlvr2_labels": nlvr2_labels,
}
phase = "train" if pl_module.training else "val"
if phase == "train":
loss = getattr(pl_module, f"{phase}_nlvr2_loss")(ret["nlvr2_loss"])
acc = getattr(pl_module, f"{phase}_nlvr2_accuracy")(
ret["nlvr2_logits"], ret["nlvr2_labels"]
)
pl_module.log(f"nlvr2/{phase}/loss", loss)
pl_module.log(f"nlvr2/{phase}/accuracy", acc)
else:
dev_batches = [i for i, n in enumerate(batch["table_name"]) if "dev" in n]
test_batches = [i for i, n in enumerate(batch["table_name"]) if "test" in n]
if dev_batches:
dev_loss = getattr(pl_module, f"dev_nlvr2_loss")(
F.cross_entropy(
ret["nlvr2_logits"][dev_batches], ret["nlvr2_labels"][dev_batches]
)
)
dev_acc = getattr(pl_module, f"dev_nlvr2_accuracy")(
ret["nlvr2_logits"][dev_batches], ret["nlvr2_labels"][dev_batches]
)
pl_module.log(f"nlvr2/dev/loss", dev_loss)
pl_module.log(f"nlvr2/dev/accuracy", dev_acc)
if test_batches:
test_loss = getattr(pl_module, f"test_nlvr2_loss")(
F.cross_entropy(
ret["nlvr2_logits"][test_batches], ret["nlvr2_labels"][test_batches]
)
)
test_acc = getattr(pl_module, f"test_nlvr2_accuracy")(
ret["nlvr2_logits"][test_batches], ret["nlvr2_labels"][test_batches]
)
pl_module.log(f"nlvr2/test/loss", test_loss)
pl_module.log(f"nlvr2/test/accuracy", test_acc)
return ret
@torch.no_grad()
def compute_irtr_recall(pl_module, split="test"):
world_size = dist.get_world_size()
rank = dist.get_rank()
if split == "val":
rank_zero_info("Use val set...")
text_dset = pl_module.trainer.datamodule.dms[0].make_no_false_val_dset()
else:
rank_zero_info("Use test set...")
text_dset = pl_module.trainer.datamodule.dms[0].make_no_false_test_dset()
text_dset.tokenizer = pl_module.trainer.datamodule.dms[0].tokenizer
text_loader = torch.utils.data.DataLoader(
text_dset,
batch_size=32,
num_workers=2, #pl_module.hparams.config["num_workers"],
pin_memory=True,
collate_fn=functools.partial(
text_dset.collate,
mlm_collator=pl_module.trainer.datamodule.dms[0].mlm_collator,
),
)
if split == "val":
image_dset = pl_module.trainer.datamodule.dms[0].make_no_false_val_dset(
image_only=True
)
else:
image_dset = pl_module.trainer.datamodule.dms[0].make_no_false_test_dset(
image_only=True
)
image_dset.tokenizer = pl_module.trainer.datamodule.dms[0].tokenizer
image_loader = torch.utils.data.DataLoader(
image_dset,
batch_size=32,
num_workers=2, #pl_module.hparams.config["num_workers"],
pin_memory=True,
collate_fn=functools.partial(
image_dset.collate,
mlm_collator=pl_module.trainer.datamodule.dms[0].mlm_collator,
),
)
text_preload = list()
for _b in tqdm.tqdm(text_loader, desc="text prefetch loop"):
text_preload.append(
{
"text_ids": _b["text_ids"].to(pl_module.device),
"text_masks": _b["text_masks"].to(pl_module.device),
"text_labels": _b["text_labels"].to(pl_module.device),
"img_index": _b["img_index"],
}
)
tiids = list()
for pre in text_preload:
tiids += pre["img_index"]
tiids = torch.tensor(tiids)
rank_zero_info("len(tiids): {}".format(len(tiids)))
image_preload = list()
for _b in tqdm.tqdm(image_loader, desc="image prefetch loop"):
image_preload.append(
{
"image": [_b["image"][0].to(pl_module.device)],
"img_index": _b["img_index"],
}
)
iids = list()
for pre in image_preload:
iids += pre["img_index"]
iids = torch.tensor(iids)
rank_zero_info("len(iids): {}".format(len(iids)))
txt_cls_feats = list()
for txt_batch in text_preload:
with torch.cuda.amp.autocast():
cls_feats = pl_module.infer_text_ft(
{
"text_ids": txt_batch["text_ids"],
"text_masks": txt_batch["text_masks"],
"text_labels": txt_batch["text_labels"],
}
)["cls_feats"]
txt_cls_feats.append(cls_feats)
img_cls_feats = list()
for img_batch in image_preload:
with torch.cuda.amp.autocast():
cls_feats = pl_module.infer_image_ft(
{
"image": img_batch["image"],
}
)["cls_feats"]
img_cls_feats.append(cls_feats)
txt_cls_feats = torch.cat(txt_cls_feats)
img_cls_feats = torch.cat(img_cls_feats)
rank_zero_info("txt_cls_feats.size(): {}\t{}".format(txt_cls_feats.size(), split))
rank_zero_info("img_cls_feats.size(): {}\t{}".format(img_cls_feats.size(), split))
scores = img_cls_feats @ txt_cls_feats.t()
rank_zero_info("scores.size(): {}".format(scores.size(), split))
topk10 = scores.topk(10, dim=1)
topk5 = scores.topk(5, dim=1)
topk1 = scores.topk(1, dim=1)
topk10_iids = tiids[topk10.indices.to(tiids.device)]
topk5_iids = tiids[topk5.indices.to(tiids.device)]
topk1_iids = tiids[topk1.indices.to(tiids.device)]
tr_r10 = (iids.unsqueeze(1) == topk10_iids).float().max(dim=1)[0].mean()
tr_r5 = (iids.unsqueeze(1) == topk5_iids).float().max(dim=1)[0].mean()
tr_r1 = (iids.unsqueeze(1) == topk1_iids).float().max(dim=1)[0].mean()
topk10 = scores.topk(10, dim=0)
topk5 = scores.topk(5, dim=0)
topk1 = scores.topk(1, dim=0)
topk10_iids = iids[topk10.indices.to(iids.device)]
topk5_iids = iids[topk5.indices.to(iids.device)]
topk1_iids = iids[topk1.indices.to(iids.device)]
ir_r10 = (tiids.unsqueeze(0) == topk10_iids).float().max(dim=0)[0].mean()
ir_r5 = (tiids.unsqueeze(0) == topk5_iids).float().max(dim=0)[0].mean()
ir_r1 = (tiids.unsqueeze(0) == topk1_iids).float().max(dim=0)[0].mean()
return (ir_r1, ir_r5, ir_r10, tr_r1, tr_r5, tr_r10)
@torch.no_grad()
def compute_irtr_recall_with_rerank(pl_module, split="test"):
world_size = dist.get_world_size()
rank = dist.get_rank()
if split == "val":
rank_zero_info("Use val set...")
text_dset = pl_module.trainer.datamodule.dms[0].make_no_false_val_dset()
else:
rank_zero_info("Use test set...")
text_dset = pl_module.trainer.datamodule.dms[0].make_no_false_test_dset()
text_dset.tokenizer = pl_module.trainer.datamodule.dms[0].tokenizer
text_loader = torch.utils.data.DataLoader(
text_dset,
batch_size=32,
num_workers=2, #pl_module.hparams.config["num_workers"],
pin_memory=True,
collate_fn=functools.partial(
text_dset.collate,
mlm_collator=pl_module.trainer.datamodule.dms[0].mlm_collator,
),
)
if split == "val":
image_dset = pl_module.trainer.datamodule.dms[0].make_no_false_val_dset(
image_only=True
)
else:
image_dset = pl_module.trainer.datamodule.dms[0].make_no_false_test_dset(
image_only=True
)
image_dset.tokenizer = pl_module.trainer.datamodule.dms[0].tokenizer
image_loader = torch.utils.data.DataLoader(
image_dset,
batch_size=32,
num_workers=2, #pl_module.hparams.config["num_workers"],
pin_memory=True,
collate_fn=functools.partial(
image_dset.collate,
mlm_collator=pl_module.trainer.datamodule.dms[0].mlm_collator,
),
)
text_preload = list()
for _b in tqdm.tqdm(text_loader, desc="text prefetch loop"):
text_preload.append(
{
"text_ids": _b["text_ids"].to(pl_module.device),
"text_masks": _b["text_masks"].to(pl_module.device),
"text_labels": _b["text_labels"].to(pl_module.device),
"img_index": _b["img_index"],
}
)
tiids = list()
text_ids_list = list()
text_masks_list = list()
text_labels_list = list()
for pre in text_preload:
tiids += pre["img_index"]
text_ids_list.append(pre["text_ids"])
text_masks_list.append(pre["text_masks"])
text_labels_list.append(pre["text_labels"])
tiids = torch.tensor(tiids)
all_text_ids = torch.cat(text_ids_list)
all_text_masks = torch.cat(text_masks_list)
all_text_labels = torch.cat(text_labels_list)
image_preload = list()
for _b in tqdm.tqdm(image_loader, desc="image prefetch loop"):
image_preload.append(
{
"image": [_b["image"][0].to(pl_module.device)],
"img_index": _b["img_index"],
}
)
iids = list()
image_list = list()
for pre in image_preload:
iids += pre["img_index"]
image_list.append(pre["image"][0])
iids = torch.tensor(iids)
all_image = torch.cat(image_list)
txt_cls_feats = list()
for txt_batch in text_preload:
with torch.cuda.amp.autocast():
cls_feats = pl_module.infer_text_ft(
{
"text_ids": txt_batch["text_ids"],
"text_masks": txt_batch["text_masks"],
"text_labels": txt_batch["text_labels"],
}
)["cls_feats"]
txt_cls_feats.append(cls_feats)
img_cls_feats = list()
for img_batch in image_preload:
with torch.cuda.amp.autocast():
cls_feats = pl_module.infer_image_ft(
{
"image": img_batch["image"],
}
)["cls_feats"]
img_cls_feats.append(cls_feats)
txt_cls_feats = torch.cat(txt_cls_feats)
img_cls_feats = torch.cat(img_cls_feats)
scores = img_cls_feats @ txt_cls_feats.t()
rank_zero_info("scores.size(): {}".format(scores.size(), split))
scores_i2t = torch.full((len(iids), len(tiids)), -100.0).to(pl_module.device)
k_test = pl_module.hparams.config["k_test"]
num_tasks = world_size
step = scores.size(0) // num_tasks + 1
start = rank * step
end = min(scores.size(0), start+step)
for i, sims in enumerate(scores[start:end]):
if i%100 == 0:
rank_zero_info("TR Rerank: {}".format(i))
topk_sim, topk_idx = sims.topk(k=k_test, dim=0)
cur_images = all_image[start+i].repeat(k_test, 1, 1, 1)
cur_text_ids = all_text_ids[topk_idx]
cur_text_masks = all_text_masks[topk_idx]
cur_text_labels = all_text_labels[topk_idx]
cur_rerank_batch = {"image":[cur_images], "text_ids":cur_text_ids, "text_labels":cur_text_labels, "text_masks":cur_text_masks}
infer_rerank = pl_module.infer(cur_rerank_batch, mask_text=False, mask_image=False)
itm_logits = pl_module.itm_score(infer_rerank["cls_feats"])
itm_scores = itm_logits[:,1]
scores_i2t[start+i,topk_idx] = itm_scores
scores = scores.t()
scores_t2i = torch.full((len(tiids), len(iids)), -100.0).to(pl_module.device)
step = scores.size(0) // num_tasks + 1
start = rank * step
end = min(scores.size(0), start+step)
for i,sims in enumerate(scores[start:end]):
topk_sim, topk_idx = sims.topk(k=k_test, dim=0)
cur_images = all_image[topk_idx]
cur_text_ids = all_text_ids[start+i].repeat(k_test, 1)
cur_text_masks = all_text_masks[start+i].repeat(k_test, 1)
cur_text_labels = all_text_labels[start+i].repeat(k_test, 1)
cur_rerank_batch = {"image":[cur_images], "text_ids":cur_text_ids, "text_labels":cur_text_labels, "text_masks":cur_text_masks}
infer_rerank = pl_module.infer(cur_rerank_batch, mask_text=False, mask_image=False)
itm_logits = pl_module.itm_score(infer_rerank["cls_feats"])
itm_scores = itm_logits[:,1]
scores_t2i[start+i, topk_idx] = itm_scores
dist.barrier()
torch.distributed.all_reduce(scores_i2t, op=torch.distributed.ReduceOp.SUM)
torch.distributed.all_reduce(scores_t2i, op=torch.distributed.ReduceOp.SUM)
scores_t2i = scores_t2i + scores
scores_i2t = scores_i2t + scores.t()
topk10 = scores_i2t.topk(10, dim=1)
topk5 = scores_i2t.topk(5, dim=1)
topk1 = scores_i2t.topk(1, dim=1)
topk10_iids = tiids[topk10.indices]
topk5_iids = tiids[topk5.indices]
topk1_iids = tiids[topk1.indices]
tr_r10 = (iids.unsqueeze(1) == topk10_iids).float().max(dim=1)[0].mean()
tr_r5 = (iids.unsqueeze(1) == topk5_iids).float().max(dim=1)[0].mean()
tr_r1 = (iids.unsqueeze(1) == topk1_iids).float().max(dim=1)[0].mean()
topk10 = scores_t2i.topk(10, dim=1)
topk5 = scores_t2i.topk(5, dim=1)
topk1 = scores_t2i.topk(1, dim=1)
topk10_iids = iids[topk10.indices]
topk5_iids = iids[topk5.indices]
topk1_iids = iids[topk1.indices]
ir_r10 = (tiids.unsqueeze(1) == topk10_iids).float().max(dim=1)[0].mean()
ir_r5 = (tiids.unsqueeze(1) == topk5_iids).float().max(dim=1)[0].mean()
ir_r1 = (tiids.unsqueeze(1) == topk1_iids).float().max(dim=1)[0].mean()
return (ir_r1, ir_r5, ir_r10, tr_r1, tr_r5, tr_r10)
def init_weights(module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def vqa_test_step(pl_module, batch, output):
id2answer = (
pl_module.trainer.datamodule.dm_dicts["vqa_trainval"].id2answer
if "vqa_trainval" in pl_module.trainer.datamodule.dm_dicts
else pl_module.trainer.datamodule.dm_dicts["vqa"].id2answer
)
vqa_logits = output["vqa_logits"]
vqa_preds = vqa_logits.argmax(dim=-1)
vqa_preds = [id2answer[pred.item()] for pred in vqa_preds]
questions = batch["text"]
qids = batch["qid"]
return {"qids": qids, "preds": vqa_preds}
def vqa_test_wrapup(outs, model_name, output_dir):
rank = torch.distributed.get_rank()
qids, preds = list(), list()
for out in outs:
qids += out["qids"]
preds += out["preds"]
rets = list()
for qid, pred in zip(qids, preds):
rets.append({"question_id": qid, "answer": pred})
with open(f"vqa_submit_{rank}.json", "w") as fp:
json.dump(rets, fp, indent=4)
torch.distributed.barrier()
if rank == 0:
jsons = list()
paths = list(glob.glob("vqa_submit_*.json"))
for path in paths:
with open(path, "r") as fp:
jsons += json.load(fp)
# os.makedirs("result", exist_ok=True)
os.makedirs(output_dir, exist_ok=True)
with open(f"{output_dir}/vqa_submit_{model_name}.json", "w") as fp:
json.dump(jsons, fp, indent=4)
torch.distributed.barrier()
os.remove(f"vqa_submit_{rank}.json")
| EXA-1-master | exa/models/unilm-master/vlmo/vlmo/modules/objectives.py |
""" Vision Transformer (ViT) in PyTorch
A PyTorch implement of Vision Transformers as described in
'An Image Is Worth 16 x 16 Words: Transformers for Image Recognition at Scale' - https://arxiv.org/abs/2010.11929
The official jax code is released and available at https://github.com/google-research/vision_transformer
Acknowledgments:
* The paper authors for releasing code and weights, thanks!
* I fixed my class token impl based on Phil Wang's https://github.com/lucidrains/vit-pytorch ... check it out
for some einops/einsum fun
* Simple transformer style inspired by Andrej Karpathy's https://github.com/karpathy/minGPT
* Bert reference code checks against Huggingface Transformers and Tensorflow Bert
DeiT model defs and weights from https://github.com/facebookresearch/deit,
paper `DeiT: Data-efficient Image Transformers` - https://arxiv.org/abs/2012.12877
Hacked together by / Copyright 2020 Ross Wightman
"""
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from timm.models.registry import register_model
from pytorch_lightning.utilities.distributed import rank_zero_info
class Mlp(nn.Module):
def __init__(
self,
in_features,
hidden_features=None,
out_features=None,
act_layer=nn.GELU,
drop=0.0,
):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(
self,
dim,
num_heads=8,
qkv_bias=False,
qk_scale=None,
attn_drop=0.0,
proj_drop=0.0,
):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
# NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=False)
if qkv_bias:
self.q_bias = nn.Parameter(torch.zeros(dim))
self.v_bias = nn.Parameter(torch.zeros(dim))
else:
self.q_bias = None
self.v_bias = None
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x, mask=None, relative_position_bias=None):
B, N, C = x.shape
qkv_bias = None
if self.q_bias is not None:
qkv_bias = torch.cat((self.q_bias, torch.zeros_like(self.v_bias, requires_grad=False), self.v_bias))
qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias)
qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
q, k, v = (
qkv[0],
qkv[1],
qkv[2],
) # make torchscript happy (cannot use tensor as tuple)
q = q * self.scale
attn = (q.float() @ k.float().transpose(-2, -1))
if relative_position_bias is not None:
attn = attn + relative_position_bias.unsqueeze(0)
if mask is not None:
mask = mask.bool()
attn = attn.masked_fill(~mask[:, None, None, :], float("-inf"))
attn = attn.softmax(dim=-1).type_as(x)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(
self,
dim,
num_heads,
mlp_ratio=4.0,
qkv_bias=False,
qk_scale=None,
drop=0.0,
attn_drop=0.0,
drop_path=0.0,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
with_vlffn=False,
layer_scale_init_values=0.1,
max_text_len=40,
):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
attn_drop=attn_drop,
proj_drop=drop,
)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.norm2_text = norm_layer(dim)
self.norm2_imag = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp_text = Mlp(
in_features=dim,
hidden_features=mlp_hidden_dim,
act_layer=act_layer,
drop=drop,
)
self.mlp_imag = Mlp(
in_features=dim,
hidden_features=mlp_hidden_dim,
act_layer=act_layer,
drop=drop,
)
self.mlp_vl = None
if with_vlffn:
self.mlp_vl = Mlp(
in_features=dim,
hidden_features=mlp_hidden_dim,
act_layer=act_layer,
drop=drop,
)
self.norm2_vl = norm_layer(dim)
self.gamma_1 = \
nn.Parameter(layer_scale_init_values * torch.ones((dim)),requires_grad=True) \
if layer_scale_init_values is not None else 1.0
self.gamma_2 = \
nn.Parameter(layer_scale_init_values * torch.ones((dim)),requires_grad=True) \
if layer_scale_init_values is not None else 1.0
self.max_text_len = max_text_len
def forward(self, x, mask=None, modality_type=None, relative_position_bias=None):
x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x), mask=mask, relative_position_bias=relative_position_bias))
if modality_type == "image":
x = x + self.drop_path(self.gamma_2 * self.mlp_imag(self.norm2_imag(x)))
elif modality_type == "text":
x = x + self.drop_path(self.gamma_2 * self.mlp_text(self.norm2_text(x)))
else:
if self.mlp_vl is None:
x_text = x[:, : self.max_text_len]
x_imag = x[:, self.max_text_len :]
x_text = x_text + self.drop_path(self.gamma_2 * self.mlp_text(self.norm2_text(x_text)))
x_imag = x_imag + self.drop_path(self.gamma_2 * self.mlp_imag(self.norm2_imag(x_imag)))
x = torch.cat([x_text, x_imag], dim=1)
else:
x = x + self.drop_path(self.gamma_2 * self.mlp_vl(self.norm2_vl(x)))
return x
class PatchEmbed(nn.Module):
""" Image to Patch Embedding"""
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
embed_dim=768,
no_patch_embed_bias=False,
):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.patch_shape = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(
in_chans,
embed_dim,
kernel_size=patch_size,
stride=patch_size,
bias=False if no_patch_embed_bias else True,
)
def forward(self, x):
B, C, H, W = x.shape
assert H == self.img_size[0] and W == self.img_size[1], \
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
# FIXME look at relaxing size constraints
x = self.proj(x)
return x
class MultiWayTransformer(nn.Module):
""" Vision Transformer
A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale` -
https://arxiv.org/abs/2010.11929
"""
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.0,
qkv_bias=True,
qk_scale=None,
drop_rate=0.0,
attn_drop_rate=0.0,
drop_path_rate=0.0,
norm_layer=None,
need_relative_position_embed=True,
use_abs_pos_emb=False,
layer_scale_init_values=0.1,
vlffn_start_layer_index=10,
config=None,
):
"""
Args:
img_size (int, tuple): input image size
patch_size (int, tuple): patch size
in_chans (int): number of input channels
num_classes (int): number of classes for classification head
embed_dim (int): embedding dimension
depth (int): depth of transformer
num_heads (int): number of attention heads
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
qkv_bias (bool): enable bias for qkv if True
qk_scale (float): override default qk scale of head_dim ** -0.5 if set
drop_rate (float): dropout rate
attn_drop_rate (float): attention dropout rate
drop_path_rate (float): stochastic depth rate
norm_layer: (nn.Module): normalization layer
need_relative_position_embed (bool): enable relative position bias on self-attention
use_abs_pos_emb (bool): enable abs pos emb
layer_scale_init_values (float or None): layer scale init values, set None to disable
vlffn_start_layer_index (int): vl-ffn start index
config: (dict): other hyper from pytorch-lighting
"""
super().__init__()
drop_path_rate = drop_path_rate if config is None else config["drop_path_rate"]
rank_zero_info("drop path rate: {}".format(drop_path_rate))
self.use_abs_pos_emb = use_abs_pos_emb
self.need_relative_position_embed = need_relative_position_embed
self.num_features = (
self.embed_dim
) = embed_dim # num_features for consistency with other models
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
self.patch_embed = PatchEmbed(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
)
num_patches = self.patch_embed.num_patches
self.patch_size = patch_size
self.num_heads = num_heads
self.vlffn_start_layer_index = vlffn_start_layer_index
if config["loss_names"]["textmlm"] > 0:
self.vlffn_start_layer_index = depth
rank_zero_info("Set vlffn_start_layer_index={} for text-only pretraining".format(self.vlffn_start_layer_index))
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) if self.use_abs_pos_emb else None
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [
x.item() for x in torch.linspace(0, drop_path_rate, depth)
] # stochastic depth decay rule
self.blocks = nn.ModuleList(
[
Block(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
with_vlffn=(i >= self.vlffn_start_layer_index),
layer_scale_init_values=layer_scale_init_values,
max_text_len=config["max_text_len"],
)
for i in range(depth)
]
)
self.norm = norm_layer(embed_dim)
if self.pos_embed is not None:
trunc_normal_(self.pos_embed, std=0.02)
trunc_normal_(self.cls_token, std=0.02)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=0.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
return {"pos_embed", "cls_token"}
def visual_embed(self, _x):
x = self.patch_embed(_x)
x = x.flatten(2).transpose(1, 2)
B, L, _ = x.shape
cls_tokens = self.cls_token.expand(B, -1, -1)
x = torch.cat((cls_tokens, x), dim=1)
if self.pos_embed is not None:
x = x + self.pos_embed
x = self.pos_drop(x)
x_mask = torch.ones(x.shape[0], x.shape[1])
return x, x_mask
# VLMo base/p16
@register_model
def vlmo_base_patch16(pretrained=False, **kwargs):
img_size = kwargs.pop("img_size", 224)
model = MultiWayTransformer(
img_size=img_size, patch_size=16, embed_dim=768, depth=12, num_heads=12,
mlp_ratio=4, qkv_bias=True, vlffn_start_layer_index=10,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
# VLMo large/p16
@register_model
def vlmo_large_patch16(pretrained=False, **kwargs):
img_size = kwargs.pop("img_size", 224)
model = MultiWayTransformer(
img_size=img_size, patch_size=16, embed_dim=1024, depth=24, num_heads=16,
mlp_ratio=4, qkv_bias=True, vlffn_start_layer_index=21,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
# VLMo base+/p16
@register_model
def vlmo_base_plus_patch16(pretrained=False, **kwargs):
img_size = kwargs.pop("img_size", 224)
model = MultiWayTransformer(
img_size=img_size, patch_size=16, embed_dim=544, depth=24, num_heads=16,
mlp_ratio=4, qkv_bias=True, vlffn_start_layer_index=21,
use_abs_pos_emb=True, need_relative_position_embed=False,
layer_scale_init_values=None, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
return model
| EXA-1-master | exa/models/unilm-master/vlmo/vlmo/modules/multiway_transformer.py |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
This file contains primitives for multi-gpu communication.
This is useful when doing distributed training.
"""
import functools
import logging
import numpy as np
import pickle
import torch
import torch.distributed as dist
import torch
_LOCAL_PROCESS_GROUP = None
"""
A torch process group which only includes processes that on the same machine as the current process.
This variable is set when processes are spawned by `launch()` in "engine/launch.py".
"""
def get_world_size() -> int:
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def get_rank() -> int:
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def get_local_rank() -> int:
"""
Returns:
The rank of the current process within the local (per-machine) process group.
"""
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
assert _LOCAL_PROCESS_GROUP is not None
return dist.get_rank(group=_LOCAL_PROCESS_GROUP)
def get_local_size() -> int:
"""
Returns:
The size of the per-machine process group,
i.e. the number of processes per machine.
"""
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size(group=_LOCAL_PROCESS_GROUP)
def is_main_process() -> bool:
return get_rank() == 0
def synchronize():
"""
Helper function to synchronize (barrier) among all processes when
using distributed training
"""
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
@functools.lru_cache()
def _get_global_gloo_group():
"""
Return a process group based on gloo backend, containing all the ranks
The result is cached.
"""
if dist.get_backend() == "nccl":
return dist.new_group(backend="gloo")
else:
return dist.group.WORLD
def _serialize_to_tensor(data, group):
backend = dist.get_backend(group)
assert backend in ["gloo", "nccl"]
device = torch.device("cpu" if backend == "gloo" else "cuda")
buffer = pickle.dumps(data)
if len(buffer) > 1024 ** 3:
logger = logging.getLogger(__name__)
logger.warning(
"Rank {} trying to all-gather {:.2f} GB of data on device {}".format(
get_rank(), len(buffer) / (1024 ** 3), device
)
)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to(device=device)
return tensor
def _pad_to_largest_tensor(tensor, group):
"""
Returns:
list[int]: size of the tensor, on each rank
Tensor: padded tensor that has the max size
"""
world_size = dist.get_world_size(group=group)
assert (
world_size >= 1
), "comm.gather/all_gather must be called from ranks within the given group!"
local_size = torch.tensor([tensor.numel()], dtype=torch.int64, device=tensor.device)
size_list = [
torch.zeros([1], dtype=torch.int64, device=tensor.device)
for _ in range(world_size)
]
dist.all_gather(size_list, local_size, group=group)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
if local_size != max_size:
padding = torch.zeros(
(max_size - local_size,), dtype=torch.uint8, device=tensor.device
)
tensor = torch.cat((tensor, padding), dim=0)
return size_list, tensor
def all_gather(data, group=None):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors).
Args:
data: any picklable object
group: a torch process group. By default, will use a group which
contains all ranks on gloo backend.
Returns:
list[data]: list of data gathered from each rank
"""
if get_world_size() == 1:
return [data]
if group is None:
group = _get_global_gloo_group()
if dist.get_world_size(group) == 1:
return [data]
tensor = _serialize_to_tensor(data, group)
size_list, tensor = _pad_to_largest_tensor(tensor, group)
max_size = max(size_list)
# receiving Tensor from all ranks
tensor_list = [
torch.empty((max_size,), dtype=torch.uint8, device=tensor.device)
for _ in size_list
]
dist.all_gather(tensor_list, tensor, group=group)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def gather(data, dst=0, group=None):
"""
Run gather on arbitrary picklable data (not necessarily tensors).
Args:
data: any picklable object
dst (int): destination rank
group: a torch process group. By default, will use a group which
contains all ranks on gloo backend.
Returns:
list[data]: on dst, a list of data gathered from each rank. Otherwise,
an empty list.
"""
if get_world_size() == 1:
return [data]
if group is None:
group = _get_global_gloo_group()
if dist.get_world_size(group=group) == 1:
return [data]
rank = dist.get_rank(group=group)
tensor = _serialize_to_tensor(data, group)
size_list, tensor = _pad_to_largest_tensor(tensor, group)
# receiving Tensor from all ranks
if rank == dst:
max_size = max(size_list)
tensor_list = [
torch.empty((max_size,), dtype=torch.uint8, device=tensor.device)
for _ in size_list
]
dist.gather(tensor, tensor_list, dst=dst, group=group)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
else:
dist.gather(tensor, [], dst=dst, group=group)
return []
def shared_random_seed():
"""
Returns:
int: a random number that is the same across all workers.
If workers need a shared RNG, they can use this shared seed to
create one.
All workers must call this function, otherwise it will deadlock.
"""
ints = np.random.randint(2 ** 31)
all_ints = all_gather(ints)
return all_ints[0]
def reduce_dict(input_dict, average=True):
"""
Reduce the values in the dictionary from all processes so that process with rank
0 has the reduced results.
Args:
input_dict (dict): inputs to be reduced. All the values must be scalar CUDA Tensor.
average (bool): whether to do average or sum
Returns:
a dict with the same keys as input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.reduce(values, dst=0)
if dist.get_rank() == 0 and average:
# only main process gets accumulated, so only divide by
# world_size in this case
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
| EXA-1-master | exa/models/unilm-master/vlmo/vlmo/modules/dist_utils.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformers.models.bert.modeling_bert import BertPredictionHeadTransform
class Pooler(nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.dense = nn.Linear(hidden_size, hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class ITMHead(nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.fc = nn.Linear(hidden_size, 2)
def forward(self, x):
x = self.fc(x)
return x
class ITCHead(nn.Module):
def __init__(self, hidden_size):
super().__init__()
self.fc = nn.Linear(hidden_size, hidden_size, bias=False)
def forward(self, x):
x = self.fc(x)
return x
class MLMHead(nn.Module):
def __init__(self, config, weight=None):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
if weight is not None:
self.decoder.weight = weight
def forward(self, x):
x = self.transform(x)
x = self.decoder(x) + self.bias
return x
| EXA-1-master | exa/models/unilm-master/vlmo/vlmo/modules/heads.py |
EXA-1-master | exa/models/unilm-master/vlmo/vlmo/gadgets/__init__.py |
|
import torch
from torchmetrics import Metric
class Accuracy(Metric):
def __init__(self, dist_sync_on_step=False):
super().__init__(dist_sync_on_step=dist_sync_on_step)
self.add_state("correct", default=torch.tensor(0.0), dist_reduce_fx="sum")
self.add_state("total", default=torch.tensor(0.0), dist_reduce_fx="sum")
def update(self, logits, target):
logits, target = (
logits.detach().to(self.correct.device),
target.detach().to(self.correct.device),
)
preds = logits.argmax(dim=-1)
preds = preds[target != -100]
target = target[target != -100]
if target.numel() == 0:
return 1
assert preds.shape == target.shape
self.correct += torch.sum(preds == target)
self.total += target.numel()
def compute(self):
return self.correct / self.total
class Scalar(Metric):
def __init__(self, dist_sync_on_step=False):
super().__init__(dist_sync_on_step=dist_sync_on_step)
self.add_state("scalar", default=torch.tensor(0.0), dist_reduce_fx="sum")
self.add_state("total", default=torch.tensor(0.0), dist_reduce_fx="sum")
def update(self, scalar):
if isinstance(scalar, torch.Tensor):
scalar = scalar.detach().to(self.scalar.device)
else:
scalar = torch.tensor(scalar).float().to(self.scalar.device)
self.scalar += scalar
self.total += 1
def compute(self):
return self.scalar / self.total
class VQAScore(Metric):
def __init__(self, dist_sync_on_step=False):
super().__init__(dist_sync_on_step=dist_sync_on_step)
self.add_state("score", default=torch.tensor(0.0), dist_reduce_fx="sum")
self.add_state("total", default=torch.tensor(0.0), dist_reduce_fx="sum")
def update(self, logits, target):
logits, target = (
logits.detach().float().to(self.score.device),
target.detach().float().to(self.score.device),
)
logits = torch.max(logits, 1)[1]
one_hots = torch.zeros(*target.size()).to(target)
one_hots.scatter_(1, logits.view(-1, 1), 1)
scores = one_hots * target
self.score += scores.sum()
self.total += len(logits)
def compute(self):
return self.score / self.total
| EXA-1-master | exa/models/unilm-master/vlmo/vlmo/gadgets/my_metrics.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning multi-lingual models on XNLI (Bert, DistilBERT, XLM, MiniLM).
Adapted from `examples/run_glue.py`"""
import argparse
import glob
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from transformers import (
WEIGHTS_NAME,
AdamW,
BertConfig,
BertForSequenceClassification,
BertTokenizer,
DistilBertConfig,
DistilBertForSequenceClassification,
DistilBertTokenizer,
XLMConfig,
XLMForSequenceClassification,
XLMTokenizer,
get_linear_schedule_with_warmup,
XLMRobertaTokenizer,
)
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import xnli_compute_metrics as compute_metrics
from transformers import xnli_output_modes as output_modes
from transformers import xnli_processors as processors
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
ALL_MODELS = sum(
(tuple(conf.pretrained_config_archive_map.keys()) for conf in (BertConfig, DistilBertConfig, XLMConfig)), ()
)
MODEL_CLASSES = {
"bert": (BertConfig, BertForSequenceClassification, BertTokenizer),
"minilm": (BertConfig, BertForSequenceClassification, XLMRobertaTokenizer),
"xlm": (XLMConfig, XLMForSequenceClassification, XLMTokenizer),
"distilbert": (DistilBertConfig, DistilBertForSequenceClassification, DistilBertTokenizer),
}
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def train(args, train_dataset, model, tokenizer):
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
# Check if saved optimizer or scheduler states exist
if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile(
os.path.join(args.model_name_or_path, "scheduler.pt")
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if os.path.exists(args.model_name_or_path):
try:
# set global_step to gobal_step of last saved checkpoint from model path
checkpoint_suffix = args.model_name_or_path.split("-")[-1].split("/")[0]
global_step = int(checkpoint_suffix)
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
except ValueError:
logger.info(" Starting fine-tuning.")
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(
epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]
)
set_seed(args) # Added here for reproductibility
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert"] else None
) # XLM and DistilBERT don't use segment_ids
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
# Log metrics
if (
args.local_rank == -1 and args.evaluate_during_training
): # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model, tokenizer)
for key, value in results.items():
tb_writer.add_scalar("eval_{}".format(key), value, global_step)
tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step)
logging_loss = tr_loss
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
# Save model checkpoint
output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def evaluate(args, model, tokenizer, prefix=""):
eval_task_names = (args.task_name,)
eval_outputs_dirs = (args.output_dir,)
results = {}
for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True)
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu eval
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert"] else None
) # XLM and DistilBERT don't use segment_ids
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
if args.output_mode == "classification":
preds = np.argmax(preds, axis=1)
else:
raise ValueError("No other `output_mode` for XNLI.")
result = compute_metrics(eval_task, preds, out_label_ids)
results.update(result)
output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results {} *****".format(prefix))
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
return results
def load_and_cache_examples(args, task, tokenizer, evaluate=False):
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
processor = processors[task](language=args.language, train_language=args.train_language)
output_mode = output_modes[task]
# Load data features from cache or dataset file
cached_features_file = os.path.join(
args.data_dir,
"cached_{}_{}_{}_{}_{}".format(
"test" if evaluate else "train",
list(filter(None, args.model_name_or_path.split("/"))).pop(),
str(args.max_seq_length),
str(task),
str(args.train_language if (not evaluate and args.train_language is not None) else args.language),
),
)
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", args.data_dir)
label_list = processor.get_labels()
examples = (
processor.get_test_examples(args.data_dir) if evaluate else processor.get_train_examples(args.data_dir)
)
features = convert_examples_to_features(
examples,
tokenizer,
label_list=label_list,
max_length=args.max_seq_length,
output_mode=output_mode,
pad_on_left=False,
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=0,
)
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
if args.local_rank == 0 and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
if output_mode == "classification":
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
else:
raise ValueError("No other `output_mode` for XNLI.")
dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels)
return dataset
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.",
)
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()),
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS),
)
parser.add_argument(
"--language",
default=None,
type=str,
required=True,
help="Evaluation language. Also train language if `train_language` is set to None.",
)
parser.add_argument(
"--train_language", default=None, type=str, help="Train language if is different of the evaluation language."
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
# Other parameters
parser.add_argument(
"--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name"
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3",
)
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument("--do_train", action="store_true", help="Whether to run training.")
parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the test set.")
parser.add_argument(
"--evaluate_during_training", action="store_true", help="Rul evaluation during training at each logging step."
)
parser.add_argument(
"--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model."
)
parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.")
parser.add_argument(
"--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation."
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
parser.add_argument(
"--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform."
)
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.")
parser.add_argument("--logging_steps", type=int, default=500, help="Log every X updates steps.")
parser.add_argument("--save_steps", type=int, default=500, help="Save checkpoint every X updates steps.")
parser.add_argument(
"--eval_all_checkpoints",
action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number",
)
parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available")
parser.add_argument(
"--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory"
)
parser.add_argument(
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets"
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument("--server_ip", type=str, default="", help="For distant debugging.")
parser.add_argument("--server_port", type=str, default="", help="For distant debugging.")
args = parser.parse_args()
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir
):
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir
)
)
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set seed
set_seed(args)
# Prepare XNLI task
args.task_name = "xnli"
if args.task_name not in processors:
raise ValueError("Task not found: %s" % (args.task_name))
processor = processors[args.task_name](language=args.language, train_language=args.train_language)
args.output_mode = output_modes[args.task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
finetuning_task=args.task_name,
cache_dir=args.cache_dir if args.cache_dir else None,
)
tokenizer = tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
)
model = model_class.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None,
)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False)
global_step, tr_loss = train(args, train_dataset, model, tokenizer)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
# Load a trained model and vocabulary that you have fine-tuned
model = model_class.from_pretrained(args.output_dir)
tokenizer = tokenizer_class.from_pretrained(args.output_dir)
model.to(args.device)
# Evaluation
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(
os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True))
)
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
prefix = checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else ""
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
result = evaluate(args, model, tokenizer, prefix=prefix)
result = dict((k + "_{}".format(global_step), v) for k, v in result.items())
results.update(result)
return results
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/minilm/examples/run_xnli.py |
# --------------------------------------------------------
# WavLM: Large-Scale Self-Supervised Pre-training for Full Stack Speech Processing (https://arxiv.org/abs/2110.13900.pdf)
# Github source: https://github.com/microsoft/unilm/tree/master/wavlm
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Based on fairseq code bases
# https://github.com/pytorch/fairseq
# --------------------------------------------------------
import math
import warnings
from typing import Dict, Optional, Tuple
import torch
from torch import Tensor, nn
from torch.nn import Parameter
import torch.nn.functional as F
class TransposeLast(nn.Module):
def __init__(self, deconstruct_idx=None):
super().__init__()
self.deconstruct_idx = deconstruct_idx
def forward(self, x):
if self.deconstruct_idx is not None:
x = x[self.deconstruct_idx]
return x.transpose(-2, -1)
class Fp32LayerNorm(nn.LayerNorm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, input):
output = F.layer_norm(
input.float(),
self.normalized_shape,
self.weight.float() if self.weight is not None else None,
self.bias.float() if self.bias is not None else None,
self.eps,
)
return output.type_as(input)
class Fp32GroupNorm(nn.GroupNorm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, input):
output = F.group_norm(
input.float(),
self.num_groups,
self.weight.float() if self.weight is not None else None,
self.bias.float() if self.bias is not None else None,
self.eps,
)
return output.type_as(input)
class GradMultiply(torch.autograd.Function):
@staticmethod
def forward(ctx, x, scale):
ctx.scale = scale
res = x.new(x)
return res
@staticmethod
def backward(ctx, grad):
return grad * ctx.scale, None
class SamePad(nn.Module):
def __init__(self, kernel_size, causal=False):
super().__init__()
if causal:
self.remove = kernel_size - 1
else:
self.remove = 1 if kernel_size % 2 == 0 else 0
def forward(self, x):
if self.remove > 0:
x = x[:, :, : -self.remove]
return x
class Swish(nn.Module):
"""Swish function
"""
def __init__(self):
"""Construct an MultiHeadedAttention object."""
super(Swish, self).__init__()
self.act = torch.nn.Sigmoid()
def forward(self, x):
return x * self.act(x)
class GLU_Linear(nn.Module):
def __init__(self, input_dim, output_dim, glu_type="sigmoid", bias_in_glu=True):
super(GLU_Linear, self).__init__()
self.glu_type = glu_type
self.output_dim = output_dim
if glu_type == "sigmoid":
self.glu_act = torch.nn.Sigmoid()
elif glu_type == "swish":
self.glu_act = Swish()
elif glu_type == "relu":
self.glu_act = torch.nn.ReLU()
elif glu_type == "gelu":
self.glu_act = torch.nn.GELU()
if bias_in_glu:
self.linear = nn.Linear(input_dim, output_dim * 2, True)
else:
self.linear = nn.Linear(input_dim, output_dim * 2, False)
def forward(self, x):
# to be consistent with GLU_Linear, we assume the input always has the #channel (#dim) in the last dimension of the tensor, so need to switch the dimension first for 1D-Conv case
x = self.linear(x)
if self.glu_type == "bilinear":
x = (x[:, :, 0:self.output_dim] * x[:, :, self.output_dim:self.output_dim * 2])
else:
x = (x[:, :, 0:self.output_dim] * self.glu_act(x[:, :, self.output_dim:self.output_dim * 2]))
return x
def gelu_accurate(x):
if not hasattr(gelu_accurate, "_a"):
gelu_accurate._a = math.sqrt(2 / math.pi)
return (
0.5 * x * (1 + torch.tanh(gelu_accurate._a * (x + 0.044715 * torch.pow(x, 3))))
)
def gelu(x: torch.Tensor) -> torch.Tensor:
return torch.nn.functional.gelu(x.float()).type_as(x)
def get_activation_fn(activation: str):
"""Returns the activation function corresponding to `activation`"""
if activation == "relu":
return F.relu
elif activation == "gelu":
return gelu
elif activation == "gelu_fast":
warnings.warn(
"--activation-fn=gelu_fast has been renamed to gelu_accurate"
)
return gelu_accurate
elif activation == "gelu_accurate":
return gelu_accurate
elif activation == "tanh":
return torch.tanh
elif activation == "linear":
return lambda x: x
elif activation == "glu":
return lambda x: x
else:
raise RuntimeError("--activation-fn {} not supported".format(activation))
def init_bert_params(module):
"""
Initialize the weights specific to the BERT Model.
This overrides the default initializations depending on the specified arguments.
1. If normal_init_linear_weights is set then weights of linear
layer will be initialized using the normal distribution and
bais will be set to the specified value.
2. If normal_init_embed_weights is set then weights of embedding
layer will be initialized using the normal distribution.
3. If normal_init_proj_weights is set then weights of
in_project_weight for MultiHeadAttention initialized using
the normal distribution (to be validated).
"""
def normal_(data):
# with FSDP, module params will be on CUDA, so we cast them back to CPU
# so that the RNG is consistent with and without FSDP
data.copy_(
data.cpu().normal_(mean=0.0, std=0.02).to(data.device)
)
if isinstance(module, nn.Linear):
normal_(module.weight.data)
if module.bias is not None:
module.bias.data.zero_()
if isinstance(module, nn.Embedding):
normal_(module.weight.data)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
if isinstance(module, MultiheadAttention):
normal_(module.q_proj.weight.data)
normal_(module.k_proj.weight.data)
normal_(module.v_proj.weight.data)
def quant_noise(module, p, block_size):
"""
Wraps modules and applies quantization noise to the weights for
subsequent quantization with Iterative Product Quantization as
described in "Training with Quantization Noise for Extreme Model Compression"
Args:
- module: nn.Module
- p: amount of Quantization Noise
- block_size: size of the blocks for subsequent quantization with iPQ
Remarks:
- Module weights must have the right sizes wrt the block size
- Only Linear, Embedding and Conv2d modules are supported for the moment
- For more detail on how to quantize by blocks with convolutional weights,
see "And the Bit Goes Down: Revisiting the Quantization of Neural Networks"
- We implement the simplest form of noise here as stated in the paper
which consists in randomly dropping blocks
"""
# if no quantization noise, don't register hook
if p <= 0:
return module
# supported modules
assert isinstance(module, (nn.Linear, nn.Embedding, nn.Conv2d))
# test whether module.weight has the right sizes wrt block_size
is_conv = module.weight.ndim == 4
# 2D matrix
if not is_conv:
assert (
module.weight.size(1) % block_size == 0
), "Input features must be a multiple of block sizes"
# 4D matrix
else:
# 1x1 convolutions
if module.kernel_size == (1, 1):
assert (
module.in_channels % block_size == 0
), "Input channels must be a multiple of block sizes"
# regular convolutions
else:
k = module.kernel_size[0] * module.kernel_size[1]
assert k % block_size == 0, "Kernel size must be a multiple of block size"
def _forward_pre_hook(mod, input):
# no noise for evaluation
if mod.training:
if not is_conv:
# gather weight and sizes
weight = mod.weight
in_features = weight.size(1)
out_features = weight.size(0)
# split weight matrix into blocks and randomly drop selected blocks
mask = torch.zeros(
in_features // block_size * out_features, device=weight.device
)
mask.bernoulli_(p)
mask = mask.repeat_interleave(block_size, -1).view(-1, in_features)
else:
# gather weight and sizes
weight = mod.weight
in_channels = mod.in_channels
out_channels = mod.out_channels
# split weight matrix into blocks and randomly drop selected blocks
if mod.kernel_size == (1, 1):
mask = torch.zeros(
int(in_channels // block_size * out_channels),
device=weight.device,
)
mask.bernoulli_(p)
mask = mask.repeat_interleave(block_size, -1).view(-1, in_channels)
else:
mask = torch.zeros(
weight.size(0), weight.size(1), device=weight.device
)
mask.bernoulli_(p)
mask = (
mask.unsqueeze(2)
.unsqueeze(3)
.repeat(1, 1, mod.kernel_size[0], mod.kernel_size[1])
)
# scale weights and apply mask
mask = mask.to(
torch.bool
) # x.bool() is not currently supported in TorchScript
s = 1 / (1 - p)
mod.weight.data = s * weight.masked_fill(mask, 0)
module.register_forward_pre_hook(_forward_pre_hook)
return module
class MultiheadAttention(nn.Module):
"""Multi-headed attention.
See "Attention Is All You Need" for more details.
"""
def __init__(
self,
embed_dim,
num_heads,
kdim=None,
vdim=None,
dropout=0.0,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
self_attention=False,
encoder_decoder_attention=False,
q_noise=0.0,
qn_block_size=8,
has_relative_attention_bias=False,
num_buckets=32,
max_distance=128,
gru_rel_pos=False,
rescale_init=False,
):
super().__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout_module = nn.Dropout(dropout)
self.has_relative_attention_bias = has_relative_attention_bias
self.num_buckets = num_buckets
self.max_distance = max_distance
if self.has_relative_attention_bias:
self.relative_attention_bias = nn.Embedding(num_buckets, num_heads)
self.head_dim = embed_dim // num_heads
self.q_head_dim = self.head_dim
self.k_head_dim = self.head_dim
assert (
self.head_dim * num_heads == self.embed_dim
), "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim ** -0.5
self.self_attention = self_attention
self.encoder_decoder_attention = encoder_decoder_attention
assert not self.self_attention or self.qkv_same_dim, (
"Self-attention requires query, key and " "value to be of the same size"
)
k_bias = True
if rescale_init:
k_bias = False
k_embed_dim = embed_dim
q_embed_dim = embed_dim
self.k_proj = quant_noise(
nn.Linear(self.kdim, k_embed_dim, bias=k_bias), q_noise, qn_block_size
)
self.v_proj = quant_noise(
nn.Linear(self.vdim, embed_dim, bias=bias), q_noise, qn_block_size
)
self.q_proj = quant_noise(
nn.Linear(embed_dim, q_embed_dim, bias=bias), q_noise, qn_block_size
)
self.out_proj = quant_noise(
nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size
)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.gru_rel_pos = gru_rel_pos
if self.gru_rel_pos:
self.grep_linear = nn.Linear(self.q_head_dim, 8)
self.grep_a = nn.Parameter(torch.ones(1, num_heads, 1, 1))
self.reset_parameters()
def reset_parameters(self):
if self.qkv_same_dim:
# Empirically observed the convergence to be much better with
# the scaled initialization
nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))
else:
nn.init.xavier_uniform_(self.k_proj.weight)
nn.init.xavier_uniform_(self.v_proj.weight)
nn.init.xavier_uniform_(self.q_proj.weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.out_proj.bias is not None:
nn.init.constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
if self.has_relative_attention_bias:
nn.init.xavier_normal_(self.relative_attention_bias.weight)
def _relative_positions_bucket(self, relative_positions, bidirectional=True):
num_buckets = self.num_buckets
max_distance = self.max_distance
relative_buckets = 0
if bidirectional:
num_buckets = num_buckets // 2
relative_buckets += (relative_positions > 0).to(torch.long) * num_buckets
relative_positions = torch.abs(relative_positions)
else:
relative_positions = -torch.min(relative_positions, torch.zeros_like(relative_positions))
max_exact = num_buckets // 2
is_small = relative_positions < max_exact
relative_postion_if_large = max_exact + (
torch.log(relative_positions.float() / max_exact)
/ math.log(max_distance / max_exact)
* (num_buckets - max_exact)
).to(torch.long)
relative_postion_if_large = torch.min(
relative_postion_if_large, torch.full_like(relative_postion_if_large, num_buckets - 1)
)
relative_buckets += torch.where(is_small, relative_positions, relative_postion_if_large)
return relative_buckets
def compute_bias(self, query_length, key_length):
context_position = torch.arange(query_length, dtype=torch.long)[:, None]
memory_position = torch.arange(key_length, dtype=torch.long)[None, :]
relative_position = memory_position - context_position
relative_position_bucket = self._relative_positions_bucket(
relative_position,
bidirectional=True
)
relative_position_bucket = relative_position_bucket.to(self.relative_attention_bias.weight.device)
values = self.relative_attention_bias(relative_position_bucket)
values = values.permute([2, 0, 1])
return values
def forward(
self,
query,
key: Optional[Tensor],
value: Optional[Tensor],
key_padding_mask: Optional[Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
need_weights: bool = True,
static_kv: bool = False,
attn_mask: Optional[Tensor] = None,
before_softmax: bool = False,
need_head_weights: bool = False,
position_bias: Optional[Tensor] = None
) -> Tuple[Tensor, Optional[Tensor], Optional[Tensor]]:
"""Input shape: Time x Batch x Channel
Args:
key_padding_mask (ByteTensor, optional): mask to exclude
keys that are pads, of shape `(batch, src_len)`, where
padding elements are indicated by 1s.
need_weights (bool, optional): return the attention weights,
averaged over heads (default: False).
attn_mask (ByteTensor, optional): typically used to
implement causal attention, where the mask prevents the
attention from looking forward in time (default: None).
before_softmax (bool, optional): return the raw attention
weights and values before the attention softmax.
need_head_weights (bool, optional): return the attention
weights for each head. Implies *need_weights*. Default:
return the average attention weights over all heads.
"""
if need_head_weights:
need_weights = True
is_tpu = query.device.type == "xla"
tgt_len, bsz, embed_dim = query.size()
src_len = tgt_len
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
if key is not None:
src_len, key_bsz, _ = key.size()
if not torch.jit.is_scripting():
assert key_bsz == bsz
assert value is not None
assert src_len, bsz == value.shape[:2]
if self.has_relative_attention_bias and position_bias is None:
position_bias = self.compute_bias(tgt_len, src_len)
position_bias = position_bias.unsqueeze(0).repeat(bsz, 1, 1, 1).view(bsz * self.num_heads, tgt_len, src_len)
if (
not is_tpu # don't use PyTorch version on TPUs
and incremental_state is None
and not static_kv
# A workaround for quantization to work. Otherwise JIT compilation
# treats bias in linear module as method.
and not torch.jit.is_scripting()
and self.q_head_dim == self.head_dim
):
assert key is not None and value is not None
assert attn_mask is None
attn_mask_rel_pos = None
if position_bias is not None:
attn_mask_rel_pos = position_bias
if self.gru_rel_pos:
query_layer = query.transpose(0, 1)
new_x_shape = query_layer.size()[:-1] + (self.num_heads, -1)
query_layer = query_layer.view(*new_x_shape)
query_layer = query_layer.permute(0, 2, 1, 3)
_B, _H, _L, __ = query_layer.size()
gate_a, gate_b = torch.sigmoid(self.grep_linear(query_layer).view(
_B, _H, _L, 2, 4).sum(-1, keepdim=False)).chunk(2, dim=-1)
gate_a_1 = gate_a * (gate_b * self.grep_a - 1.0) + 2.0
attn_mask_rel_pos = gate_a_1.view(bsz * self.num_heads, -1, 1) * position_bias
attn_mask_rel_pos = attn_mask_rel_pos.view((-1, tgt_len, tgt_len))
k_proj_bias = self.k_proj.bias
if k_proj_bias is None:
k_proj_bias = torch.zeros_like(self.q_proj.bias)
x, attn = F.multi_head_attention_forward(
query,
key,
value,
self.embed_dim,
self.num_heads,
torch.empty([0]),
torch.cat((self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)),
self.bias_k,
self.bias_v,
self.add_zero_attn,
self.dropout_module.p,
self.out_proj.weight,
self.out_proj.bias,
self.training,
# self.training or self.dropout_module.apply_during_inference,
key_padding_mask,
need_weights,
attn_mask_rel_pos,
use_separate_proj_weight=True,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
)
return x, attn, position_bias
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if saved_state is not None and "prev_key" in saved_state:
# previous time steps are cached - no need to recompute
# key and value if they are static
if static_kv:
assert self.encoder_decoder_attention and not self.self_attention
key = value = None
else:
saved_state = None
if self.self_attention:
q = self.q_proj(query)
k = self.k_proj(query)
v = self.v_proj(query)
elif self.encoder_decoder_attention:
# encoder-decoder attention
q = self.q_proj(query)
if key is None:
assert value is None
k = v = None
else:
k = self.k_proj(key)
v = self.v_proj(key)
else:
assert key is not None and value is not None
q = self.q_proj(query)
k = self.k_proj(key)
v = self.v_proj(value)
q *= self.scaling
if self.bias_k is not None:
assert self.bias_v is not None
k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = torch.cat(
[attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[
key_padding_mask,
key_padding_mask.new_zeros(key_padding_mask.size(0), 1),
],
dim=1,
)
q = (
q.contiguous()
.view(tgt_len, bsz * self.num_heads, self.q_head_dim)
.transpose(0, 1)
)
if k is not None:
k = (
k.contiguous()
.view(-1, bsz * self.num_heads, self.k_head_dim)
.transpose(0, 1)
)
if v is not None:
v = (
v.contiguous()
.view(-1, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if saved_state is not None:
# saved states are stored with shape (bsz, num_heads, seq_len, head_dim)
if "prev_key" in saved_state:
_prev_key = saved_state["prev_key"]
assert _prev_key is not None
prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
k = prev_key
else:
assert k is not None
k = torch.cat([prev_key, k], dim=1)
src_len = k.size(1)
if "prev_value" in saved_state:
_prev_value = saved_state["prev_value"]
assert _prev_value is not None
prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
v = prev_value
else:
assert v is not None
v = torch.cat([prev_value, v], dim=1)
prev_key_padding_mask: Optional[Tensor] = None
if "prev_key_padding_mask" in saved_state:
prev_key_padding_mask = saved_state["prev_key_padding_mask"]
assert k is not None and v is not None
key_padding_mask = MultiheadAttention._append_prev_key_padding_mask(
key_padding_mask=key_padding_mask,
prev_key_padding_mask=prev_key_padding_mask,
batch_size=bsz,
src_len=k.size(1),
static_kv=static_kv,
)
saved_state["prev_key"] = k.view(bsz, self.num_heads, -1, self.head_dim)
saved_state["prev_value"] = v.view(bsz, self.num_heads, -1, self.head_dim)
saved_state["prev_key_padding_mask"] = key_padding_mask
# In this branch incremental_state is never None
assert incremental_state is not None
incremental_state = self._set_input_buffer(incremental_state, saved_state)
assert k is not None
assert k.size(1) == src_len
# This is part of a workaround to get around fork/join parallelism
# not supporting Optional types.
if key_padding_mask is not None and key_padding_mask.dim() == 0:
key_padding_mask = None
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if self.add_zero_attn:
assert v is not None
src_len += 1
k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1)
v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1)
if attn_mask is not None:
attn_mask = torch.cat(
[attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[
key_padding_mask,
torch.zeros(key_padding_mask.size(0), 1).type_as(
key_padding_mask
),
],
dim=1,
)
attn_weights = torch.bmm(q, k.transpose(1, 2))
attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz)
assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(0)
attn_weights += attn_mask
if key_padding_mask is not None:
# don't attend to padding symbols
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
if not is_tpu:
attn_weights = attn_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool),
float("-inf"),
)
else:
attn_weights = attn_weights.transpose(0, 2)
attn_weights = attn_weights.masked_fill(key_padding_mask, float("-inf"))
attn_weights = attn_weights.transpose(0, 2)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if before_softmax:
return attn_weights, v, position_bias
if position_bias is not None:
if self.gru_rel_pos == 1:
query_layer = q.view(bsz, self.num_heads, tgt_len, self.q_head_dim)
_B, _H, _L, __ = query_layer.size()
gate_a, gate_b = torch.sigmoid(self.grep_linear(query_layer).view(
_B, _H, _L, 2, 4).sum(-1, keepdim=False)).chunk(2, dim=-1)
gate_a_1 = gate_a * (gate_b * self.grep_a - 1.0) + 2.0
position_bias = gate_a_1.view(bsz * self.num_heads, -1, 1) * position_bias
position_bias = position_bias.view(attn_weights.size())
attn_weights = attn_weights + position_bias
attn_weights_float = F.softmax(
attn_weights, dim=-1
)
attn_weights = attn_weights_float.type_as(attn_weights)
attn_probs = self.dropout_module(attn_weights)
assert v is not None
attn = torch.bmm(attn_probs, v)
assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn = self.out_proj(attn)
attn_weights: Optional[Tensor] = None
if need_weights:
attn_weights = attn_weights_float.view(
bsz, self.num_heads, tgt_len, src_len
).transpose(1, 0)
if not need_head_weights:
# average attention weights over heads
attn_weights = attn_weights.mean(dim=0)
return attn, attn_weights, position_bias
@staticmethod
def _append_prev_key_padding_mask(
key_padding_mask: Optional[Tensor],
prev_key_padding_mask: Optional[Tensor],
batch_size: int,
src_len: int,
static_kv: bool,
) -> Optional[Tensor]:
# saved key padding masks have shape (bsz, seq_len)
if prev_key_padding_mask is not None and static_kv:
new_key_padding_mask = prev_key_padding_mask
elif prev_key_padding_mask is not None and key_padding_mask is not None:
new_key_padding_mask = torch.cat(
[prev_key_padding_mask.float(), key_padding_mask.float()], dim=1
)
# During incremental decoding, as the padding token enters and
# leaves the frame, there will be a time when prev or current
# is None
elif prev_key_padding_mask is not None:
if src_len > prev_key_padding_mask.size(1):
filler = torch.zeros(
(batch_size, src_len - prev_key_padding_mask.size(1)),
device=prev_key_padding_mask.device,
)
new_key_padding_mask = torch.cat(
[prev_key_padding_mask.float(), filler.float()], dim=1
)
else:
new_key_padding_mask = prev_key_padding_mask.float()
elif key_padding_mask is not None:
if src_len > key_padding_mask.size(1):
filler = torch.zeros(
(batch_size, src_len - key_padding_mask.size(1)),
device=key_padding_mask.device,
)
new_key_padding_mask = torch.cat(
[filler.float(), key_padding_mask.float()], dim=1
)
else:
new_key_padding_mask = key_padding_mask.float()
else:
new_key_padding_mask = prev_key_padding_mask
return new_key_padding_mask
def _get_input_buffer(
self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]]
) -> Dict[str, Optional[Tensor]]:
result = self.get_incremental_state(incremental_state, "attn_state")
if result is not None:
return result
else:
empty_result: Dict[str, Optional[Tensor]] = {}
return empty_result
def _set_input_buffer(
self,
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
buffer: Dict[str, Optional[Tensor]],
):
return self.set_incremental_state(incremental_state, "attn_state", buffer)
def apply_sparse_mask(self, attn_weights, tgt_len: int, src_len: int, bsz: int):
return attn_weights
| EXA-1-master | exa/models/unilm-master/wavlm/modules.py |
# --------------------------------------------------------
# WavLM: Large-Scale Self-Supervised Pre-training for Full Stack Speech Processing (https://arxiv.org/abs/2110.13900.pdf)
# Github source: https://github.com/microsoft/unilm/tree/master/wavlm
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Based on fairseq code bases
# https://github.com/pytorch/fairseq
# --------------------------------------------------------
import math
import logging
from typing import List, Optional, Tuple
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import LayerNorm
from modules import (
Fp32GroupNorm,
Fp32LayerNorm,
GradMultiply,
MultiheadAttention,
SamePad,
init_bert_params,
get_activation_fn,
TransposeLast,
GLU_Linear,
)
logger = logging.getLogger(__name__)
def compute_mask_indices(
shape: Tuple[int, int],
padding_mask: Optional[torch.Tensor],
mask_prob: float,
mask_length: int,
mask_type: str = "static",
mask_other: float = 0.0,
min_masks: int = 0,
no_overlap: bool = False,
min_space: int = 0,
) -> np.ndarray:
"""
Computes random mask spans for a given shape
Args:
shape: the the shape for which to compute masks.
should be of size 2 where first element is batch size and 2nd is timesteps
padding_mask: optional padding mask of the same size as shape, which will prevent masking padded elements
mask_prob: probability for each token to be chosen as start of the span to be masked. this will be multiplied by
number of timesteps divided by length of mask span to mask approximately this percentage of all elements.
however due to overlaps, the actual number will be smaller (unless no_overlap is True)
mask_type: how to compute mask lengths
static = fixed size
uniform = sample from uniform distribution [mask_other, mask_length*2]
normal = sample from normal distribution with mean mask_length and stdev mask_other. mask is min 1 element
poisson = sample from possion distribution with lambda = mask length
min_masks: minimum number of masked spans
no_overlap: if false, will switch to an alternative recursive algorithm that prevents spans from overlapping
min_space: only used if no_overlap is True, this is how many elements to keep unmasked between spans
"""
bsz, all_sz = shape
mask = np.full((bsz, all_sz), False)
all_num_mask = int(
# add a random number for probabilistic rounding
mask_prob * all_sz / float(mask_length)
+ np.random.rand()
)
all_num_mask = max(min_masks, all_num_mask)
mask_idcs = []
for i in range(bsz):
if padding_mask is not None:
sz = all_sz - padding_mask[i].long().sum().item()
num_mask = int(
# add a random number for probabilistic rounding
mask_prob * sz / float(mask_length)
+ np.random.rand()
)
num_mask = max(min_masks, num_mask)
else:
sz = all_sz
num_mask = all_num_mask
if mask_type == "static":
lengths = np.full(num_mask, mask_length)
elif mask_type == "uniform":
lengths = np.random.randint(mask_other, mask_length * 2 + 1, size=num_mask)
elif mask_type == "normal":
lengths = np.random.normal(mask_length, mask_other, size=num_mask)
lengths = [max(1, int(round(x))) for x in lengths]
elif mask_type == "poisson":
lengths = np.random.poisson(mask_length, size=num_mask)
lengths = [int(round(x)) for x in lengths]
else:
raise Exception("unknown mask selection " + mask_type)
if sum(lengths) == 0:
lengths[0] = min(mask_length, sz - 1)
if no_overlap:
mask_idc = []
def arrange(s, e, length, keep_length):
span_start = np.random.randint(s, e - length)
mask_idc.extend(span_start + i for i in range(length))
new_parts = []
if span_start - s - min_space >= keep_length:
new_parts.append((s, span_start - min_space + 1))
if e - span_start - keep_length - min_space > keep_length:
new_parts.append((span_start + length + min_space, e))
return new_parts
parts = [(0, sz)]
min_length = min(lengths)
for length in sorted(lengths, reverse=True):
lens = np.fromiter(
(e - s if e - s >= length + min_space else 0 for s, e in parts),
np.int,
)
l_sum = np.sum(lens)
if l_sum == 0:
break
probs = lens / np.sum(lens)
c = np.random.choice(len(parts), p=probs)
s, e = parts.pop(c)
parts.extend(arrange(s, e, length, min_length))
mask_idc = np.asarray(mask_idc)
else:
min_len = min(lengths)
if sz - min_len <= num_mask:
min_len = sz - num_mask - 1
mask_idc = np.random.choice(sz - min_len, num_mask, replace=False)
mask_idc = np.asarray(
[
mask_idc[j] + offset
for j in range(len(mask_idc))
for offset in range(lengths[j])
]
)
mask_idcs.append(np.unique(mask_idc[mask_idc < sz]))
min_len = min([len(m) for m in mask_idcs])
for i, mask_idc in enumerate(mask_idcs):
if len(mask_idc) > min_len:
mask_idc = np.random.choice(mask_idc, min_len, replace=False)
mask[i, mask_idc] = True
return mask
class WavLMConfig:
def __init__(self, cfg=None):
self.extractor_mode: str = "default" # mode for feature extractor. default has a single group norm with d groups in the first conv block, whereas layer_norm has layer norms in every block (meant to use with normalize=True)
self.encoder_layers: int = 12 # num encoder layers in the transformer
self.encoder_embed_dim: int = 768 # encoder embedding dimension
self.encoder_ffn_embed_dim: int = 3072 # encoder embedding dimension for FFN
self.encoder_attention_heads: int = 12 # num encoder attention heads
self.activation_fn: str = "gelu" # activation function to use
self.layer_norm_first: bool = False # apply layernorm first in the transformer
self.conv_feature_layers: str = "[(512,10,5)] + [(512,3,2)] * 4 + [(512,2,2)] * 2" # string describing convolutional feature extraction layers in form of a python list that contains [(dim, kernel_size, stride), ...]
self.conv_bias: bool = False # include bias in conv encoder
self.feature_grad_mult: float = 1.0 # multiply feature extractor var grads by this
self.normalize: bool = False # normalize input to have 0 mean and unit variance during training
# dropouts
self.dropout: float = 0.1 # dropout probability for the transformer
self.attention_dropout: float = 0.1 # dropout probability for attention weights
self.activation_dropout: float = 0.0 # dropout probability after activation in FFN
self.encoder_layerdrop: float = 0.0 # probability of dropping a tarnsformer layer
self.dropout_input: float = 0.0 # dropout to apply to the input (after feat extr)
self.dropout_features: float = 0.0 # dropout to apply to the features (after feat extr)
# masking
self.mask_length: int = 10 # mask length
self.mask_prob: float = 0.65 # probability of replacing a token with mask
self.mask_selection: str = "static" # how to choose mask length
self.mask_other: float = 0 # secondary mask argument (used for more complex distributions), see help in compute_mask_indicesh
self.no_mask_overlap: bool = False # whether to allow masks to overlap
self.mask_min_space: int = 1 # min space between spans (if no overlap is enabled)
# channel masking
self.mask_channel_length: int = 10 # length of the mask for features (channels)
self.mask_channel_prob: float = 0.0 # probability of replacing a feature with 0
self.mask_channel_selection: str = "static" # how to choose mask length for channel masking
self.mask_channel_other: float = 0 # secondary mask argument (used for more complex distributions), see help in compute_mask_indices
self.no_mask_channel_overlap: bool = False # whether to allow channel masks to overlap
self.mask_channel_min_space: int = 1 # min space between spans (if no overlap is enabled)
# positional embeddings
self.conv_pos: int = 128 # number of filters for convolutional positional embeddings
self.conv_pos_groups: int = 16 # number of groups for convolutional positional embedding
# relative position embedding
self.relative_position_embedding: bool = False # apply relative position embedding
self.num_buckets: int = 320 # number of buckets for relative position embedding
self.max_distance: int = 1280 # maximum distance for relative position embedding
self.gru_rel_pos: bool = False # apply gated relative position embedding
if cfg is not None:
self.update(cfg)
def update(self, cfg: dict):
self.__dict__.update(cfg)
class WavLM(nn.Module):
def __init__(
self,
cfg: WavLMConfig,
) -> None:
super().__init__()
logger.info(f"WavLM Config: {cfg.__dict__}")
self.cfg = cfg
feature_enc_layers = eval(cfg.conv_feature_layers)
self.embed = feature_enc_layers[-1][0]
self.feature_extractor = ConvFeatureExtractionModel(
conv_layers=feature_enc_layers,
dropout=0.0,
mode=cfg.extractor_mode,
conv_bias=cfg.conv_bias,
)
self.post_extract_proj = (
nn.Linear(self.embed, cfg.encoder_embed_dim)
if self.embed != cfg.encoder_embed_dim
else None
)
self.mask_prob = cfg.mask_prob
self.mask_selection = cfg.mask_selection
self.mask_other = cfg.mask_other
self.mask_length = cfg.mask_length
self.no_mask_overlap = cfg.no_mask_overlap
self.mask_min_space = cfg.mask_min_space
self.mask_channel_prob = cfg.mask_channel_prob
self.mask_channel_selection = cfg.mask_channel_selection
self.mask_channel_other = cfg.mask_channel_other
self.mask_channel_length = cfg.mask_channel_length
self.no_mask_channel_overlap = cfg.no_mask_channel_overlap
self.mask_channel_min_space = cfg.mask_channel_min_space
self.dropout_input = nn.Dropout(cfg.dropout_input)
self.dropout_features = nn.Dropout(cfg.dropout_features)
self.feature_grad_mult = cfg.feature_grad_mult
self.mask_emb = nn.Parameter(
torch.FloatTensor(cfg.encoder_embed_dim).uniform_()
)
self.encoder = TransformerEncoder(cfg)
self.layer_norm = LayerNorm(self.embed)
def apply_mask(self, x, padding_mask):
B, T, C = x.shape
if self.mask_prob > 0:
mask_indices = compute_mask_indices(
(B, T),
padding_mask,
self.mask_prob,
self.mask_length,
self.mask_selection,
self.mask_other,
min_masks=2,
no_overlap=self.no_mask_overlap,
min_space=self.mask_min_space,
)
mask_indices = torch.from_numpy(mask_indices).to(x.device)
x[mask_indices] = self.mask_emb
else:
mask_indices = None
if self.mask_channel_prob > 0:
mask_channel_indices = compute_mask_indices(
(B, C),
None,
self.mask_channel_prob,
self.mask_channel_length,
self.mask_channel_selection,
self.mask_channel_other,
no_overlap=self.no_mask_channel_overlap,
min_space=self.mask_channel_min_space,
)
mask_channel_indices = (
torch.from_numpy(mask_channel_indices)
.to(x.device)
.unsqueeze(1)
.expand(-1, T, -1)
)
x[mask_channel_indices] = 0
return x, mask_indices
def forward_padding_mask(
self, features: torch.Tensor, padding_mask: torch.Tensor,
) -> torch.Tensor:
extra = padding_mask.size(1) % features.size(1)
if extra > 0:
padding_mask = padding_mask[:, :-extra]
padding_mask = padding_mask.view(
padding_mask.size(0), features.size(1), -1
)
padding_mask = padding_mask.all(-1)
return padding_mask
def extract_features(
self,
source: torch.Tensor,
padding_mask: Optional[torch.Tensor] = None,
mask: bool = False,
ret_conv: bool = False,
output_layer: Optional[int] = None,
ret_layer_results: bool = False,
):
if self.feature_grad_mult > 0:
features = self.feature_extractor(source)
if self.feature_grad_mult != 1.0:
features = GradMultiply.apply(features, self.feature_grad_mult)
else:
with torch.no_grad():
features = self.feature_extractor(source)
features = features.transpose(1, 2)
features = self.layer_norm(features)
if padding_mask is not None:
padding_mask = self.forward_padding_mask(features, padding_mask)
if self.post_extract_proj is not None:
features = self.post_extract_proj(features)
features = self.dropout_input(features)
if mask:
x, mask_indices = self.apply_mask(
features, padding_mask
)
else:
x = features
# feature: (B, T, D), float
# target: (B, T), long
# x: (B, T, D), float
# padding_mask: (B, T), bool
# mask_indices: (B, T), bool
x, layer_results = self.encoder(
x,
padding_mask=padding_mask,
layer=None if output_layer is None else output_layer - 1
)
res = {"x": x, "padding_mask": padding_mask, "features": features, "layer_results": layer_results}
feature = res["features"] if ret_conv else res["x"]
if ret_layer_results:
feature = (feature, res["layer_results"])
return feature, res["padding_mask"]
class ConvFeatureExtractionModel(nn.Module):
def __init__(
self,
conv_layers: List[Tuple[int, int, int]],
dropout: float = 0.0,
mode: str = "default",
conv_bias: bool = False,
conv_type: str = "default"
):
super().__init__()
assert mode in {"default", "layer_norm"}
def block(
n_in,
n_out,
k,
stride,
is_layer_norm=False,
is_group_norm=False,
conv_bias=False,
):
def make_conv():
conv = nn.Conv1d(n_in, n_out, k, stride=stride, bias=conv_bias)
nn.init.kaiming_normal_(conv.weight)
return conv
assert (
is_layer_norm and is_group_norm
) == False, "layer norm and group norm are exclusive"
if is_layer_norm:
return nn.Sequential(
make_conv(),
nn.Dropout(p=dropout),
nn.Sequential(
TransposeLast(),
Fp32LayerNorm(dim, elementwise_affine=True),
TransposeLast(),
),
nn.GELU(),
)
elif is_group_norm:
return nn.Sequential(
make_conv(),
nn.Dropout(p=dropout),
Fp32GroupNorm(dim, dim, affine=True),
nn.GELU(),
)
else:
return nn.Sequential(make_conv(), nn.Dropout(p=dropout), nn.GELU())
self.conv_type = conv_type
if self.conv_type == "default":
in_d = 1
self.conv_layers = nn.ModuleList()
for i, cl in enumerate(conv_layers):
assert len(cl) == 3, "invalid conv definition: " + str(cl)
(dim, k, stride) = cl
self.conv_layers.append(
block(
in_d,
dim,
k,
stride,
is_layer_norm=mode == "layer_norm",
is_group_norm=mode == "default" and i == 0,
conv_bias=conv_bias,
)
)
in_d = dim
elif self.conv_type == "conv2d":
in_d = 1
self.conv_layers = nn.ModuleList()
for i, cl in enumerate(conv_layers):
assert len(cl) == 3
(dim, k, stride) = cl
self.conv_layers.append(
torch.nn.Conv2d(in_d, dim, k, stride)
)
self.conv_layers.append(torch.nn.ReLU())
in_d = dim
elif self.conv_type == "custom":
in_d = 1
idim = 80
self.conv_layers = nn.ModuleList()
for i, cl in enumerate(conv_layers):
assert len(cl) == 3
(dim, k, stride) = cl
self.conv_layers.append(
torch.nn.Conv2d(in_d, dim, k, stride, padding=1)
)
self.conv_layers.append(
torch.nn.LayerNorm([dim, idim])
)
self.conv_layers.append(torch.nn.ReLU())
in_d = dim
if (i + 1) % 2 == 0:
self.conv_layers.append(
torch.nn.MaxPool2d(2, stride=2, ceil_mode=True)
)
idim = int(math.ceil(idim / 2))
else:
pass
def forward(self, x, mask=None):
# BxT -> BxCxT
x = x.unsqueeze(1)
if self.conv_type == "custom":
for conv in self.conv_layers:
if isinstance(conv, nn.LayerNorm):
x = x.transpose(1, 2)
x = conv(x).transpose(1, 2)
else:
x = conv(x)
x = x.transpose(2, 3).contiguous()
x = x.view(x.size(0), -1, x.size(-1))
else:
for conv in self.conv_layers:
x = conv(x)
if self.conv_type == "conv2d":
b, c, t, f = x.size()
x = x.transpose(2, 3).contiguous().view(b, c * f, t)
return x
class TransformerEncoder(nn.Module):
def __init__(self, args):
super().__init__()
self.dropout = args.dropout
self.embedding_dim = args.encoder_embed_dim
self.pos_conv = nn.Conv1d(
self.embedding_dim,
self.embedding_dim,
kernel_size=args.conv_pos,
padding=args.conv_pos // 2,
groups=args.conv_pos_groups,
)
dropout = 0
std = math.sqrt((4 * (1.0 - dropout)) / (args.conv_pos * self.embedding_dim))
nn.init.normal_(self.pos_conv.weight, mean=0, std=std)
nn.init.constant_(self.pos_conv.bias, 0)
self.pos_conv = nn.utils.weight_norm(self.pos_conv, name="weight", dim=2)
self.pos_conv = nn.Sequential(self.pos_conv, SamePad(args.conv_pos), nn.GELU())
if hasattr(args, "relative_position_embedding"):
self.relative_position_embedding = args.relative_position_embedding
self.num_buckets = args.num_buckets
self.max_distance = args.max_distance
else:
self.relative_position_embedding = False
self.num_buckets = 0
self.max_distance = 0
self.layers = nn.ModuleList(
[
TransformerSentenceEncoderLayer(
embedding_dim=self.embedding_dim,
ffn_embedding_dim=args.encoder_ffn_embed_dim,
num_attention_heads=args.encoder_attention_heads,
dropout=self.dropout,
attention_dropout=args.attention_dropout,
activation_dropout=args.activation_dropout,
activation_fn=args.activation_fn,
layer_norm_first=args.layer_norm_first,
has_relative_attention_bias=(self.relative_position_embedding and i == 0),
num_buckets=self.num_buckets,
max_distance=self.max_distance,
gru_rel_pos=args.gru_rel_pos,
)
for i in range(args.encoder_layers)
]
)
self.layer_norm_first = args.layer_norm_first
self.layer_norm = LayerNorm(self.embedding_dim)
self.layerdrop = args.encoder_layerdrop
self.apply(init_bert_params)
def forward(self, x, padding_mask=None, streaming_mask=None, layer=None):
x, layer_results = self.extract_features(x, padding_mask, streaming_mask, layer)
if self.layer_norm_first and layer is None:
x = self.layer_norm(x)
return x, layer_results
def extract_features(self, x, padding_mask=None, streaming_mask=None, tgt_layer=None):
if padding_mask is not None:
x[padding_mask] = 0
x_conv = self.pos_conv(x.transpose(1, 2))
x_conv = x_conv.transpose(1, 2)
x = x + x_conv
if not self.layer_norm_first:
x = self.layer_norm(x)
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
layer_results = []
z = None
if tgt_layer is not None:
layer_results.append((x, z))
r = None
pos_bias = None
for i, layer in enumerate(self.layers):
dropout_probability = np.random.random()
if not self.training or (dropout_probability > self.layerdrop):
x, z, pos_bias = layer(x, self_attn_padding_mask=padding_mask, need_weights=False,
self_attn_mask=streaming_mask, pos_bias=pos_bias)
if tgt_layer is not None:
layer_results.append((x, z))
if i == tgt_layer:
r = x
break
if r is not None:
x = r
# T x B x C -> B x T x C
x = x.transpose(0, 1)
return x, layer_results
class TransformerSentenceEncoderLayer(nn.Module):
"""
Implements a Transformer Encoder Layer used in BERT/XLM style pre-trained
models.
"""
def __init__(
self,
embedding_dim: float = 768,
ffn_embedding_dim: float = 3072,
num_attention_heads: float = 8,
dropout: float = 0.1,
attention_dropout: float = 0.1,
activation_dropout: float = 0.1,
activation_fn: str = "relu",
layer_norm_first: bool = False,
has_relative_attention_bias: bool = False,
num_buckets: int = 0,
max_distance: int = 0,
rescale_init: bool = False,
gru_rel_pos: bool = False,
) -> None:
super().__init__()
# Initialize parameters
self.embedding_dim = embedding_dim
self.dropout = dropout
self.activation_dropout = activation_dropout
# Initialize blocks
self.activation_name = activation_fn
self.activation_fn = get_activation_fn(activation_fn)
self.self_attn = MultiheadAttention(
self.embedding_dim,
num_attention_heads,
dropout=attention_dropout,
self_attention=True,
has_relative_attention_bias=has_relative_attention_bias,
num_buckets=num_buckets,
max_distance=max_distance,
rescale_init=rescale_init,
gru_rel_pos=gru_rel_pos,
)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(self.activation_dropout)
self.dropout3 = nn.Dropout(dropout)
self.layer_norm_first = layer_norm_first
# layer norm associated with the self attention layer
self.self_attn_layer_norm = LayerNorm(self.embedding_dim)
if self.activation_name == "glu":
self.fc1 = GLU_Linear(self.embedding_dim, ffn_embedding_dim, "swish")
else:
self.fc1 = nn.Linear(self.embedding_dim, ffn_embedding_dim)
self.fc2 = nn.Linear(ffn_embedding_dim, self.embedding_dim)
# layer norm associated with the position wise feed-forward NN
self.final_layer_norm = LayerNorm(self.embedding_dim)
def forward(
self,
x: torch.Tensor,
self_attn_mask: torch.Tensor = None,
self_attn_padding_mask: torch.Tensor = None,
need_weights: bool = False,
pos_bias=None
):
"""
LayerNorm is applied either before or after the self-attention/ffn
modules similar to the original Transformer imlementation.
"""
residual = x
if self.layer_norm_first:
x = self.self_attn_layer_norm(x)
x, attn, pos_bias = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
need_weights=False,
attn_mask=self_attn_mask,
position_bias=pos_bias
)
x = self.dropout1(x)
x = residual + x
residual = x
x = self.final_layer_norm(x)
if self.activation_name == "glu":
x = self.fc1(x)
else:
x = self.activation_fn(self.fc1(x))
x = self.dropout2(x)
x = self.fc2(x)
x = self.dropout3(x)
x = residual + x
else:
x, attn, pos_bias = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=self_attn_padding_mask,
need_weights=need_weights,
attn_mask=self_attn_mask,
position_bias=pos_bias
)
x = self.dropout1(x)
x = residual + x
x = self.self_attn_layer_norm(x)
residual = x
if self.activation_name == "glu":
x = self.fc1(x)
else:
x = self.activation_fn(self.fc1(x))
x = self.dropout2(x)
x = self.fc2(x)
x = self.dropout3(x)
x = residual + x
x = self.final_layer_norm(x)
return x, attn, pos_bias
| EXA-1-master | exa/models/unilm-master/wavlm/WavLM.py |
import argparse
import os
import torch
from fairseq.data import (FairseqDataset, PrependTokenDataset,
TokenBlockDataset, TruncateDataset, data_utils, StripTokenDataset, ConcatDataset)
from fairseq.data.indexed_dataset import make_builder
from tqdm import tqdm
from transformers import AutoTokenizer
from infoxlm.data.tlm_dataset import TLMDataset
class IndexDataset(FairseqDataset):
def __init__(self, indices):
self.indices = indices
self._sizes = [len(i) for i in indices]
@property
def sizes(self):
return self._sizes
def size(self, index):
item = self.__getitem__(index)
return len(item)
def __getitem__(self, index):
item = self.indices[index]
item = torch.LongTensor(item)
return item
def __len__(self):
return len(self.indices)
def collater(self, samples):
raise NotImplementedError
def build_tokenizer(args):
tokenizer = AutoTokenizer.from_pretrained(args.model_name)
return tokenizer
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--model_name", type=str, default="CZWin32768/xlm-align")
parser.add_argument("--input_src", type=str, default="")
parser.add_argument("--input_trg", type=str, default="")
parser.add_argument("--output", type=str, default="")
parser.add_argument("--max_pos", type=int, default=256)
args = parser.parse_args()
return args
def save_items(items, prefix, vocab_size):
bin_fn = "%s.bin" % prefix
idx_fn = "%s.idx" % prefix
builder = make_builder(bin_fn, "mmap", vocab_size=vocab_size)
print("builder: " + str(builder))
for item in items: builder.add_item(item)
builder.finalize(idx_fn)
def get_indices(input_fn, tokenizer):
indices = []
with open(input_fn) as fp:
for lid, line in tqdm(enumerate(fp)):
# DEBUG
# if lid > 500: break
line = line.strip()
indices.append(tokenizer.encode(line))
print("tokenize finished.")
return indices
def main(args):
tokenizer = build_tokenizer(args)
src_indices = get_indices(args.input_src, tokenizer)
trg_indices = get_indices(args.input_trg, tokenizer)
src_dataset = IndexDataset(src_indices)
trg_dataset = IndexDataset(trg_indices)
eos = tokenizer.sep_token_id
bos = tokenizer.cls_token_id
max_pos = args.max_pos
datasets = []
src_dataset = TruncateDataset(
StripTokenDataset(src_dataset, eos), max_pos - 2,)
trg_dataset = TruncateDataset(
StripTokenDataset(trg_dataset, eos), max_pos - 2,)
datasets.append(
TLMDataset(src_dataset, trg_dataset, bos, eos))
datasets.append(
TLMDataset(trg_dataset, src_dataset, bos, eos))
dataset = ConcatDataset(datasets)
print("| get all items ...")
items = [i for i in tqdm(dataset)]
print("| writing binary file ...")
prefix = os.path.join(args.output, "train.0")
save_items(items, prefix, len(tokenizer))
if __name__ == "__main__":
args = get_args()
main(args)
| EXA-1-master | exa/models/unilm-master/infoxlm/tools/para2bin.py |
import argparse
import os
import torch
from fairseq.data import (FairseqDataset, PrependTokenDataset,
TokenBlockDataset, TruncateDataset, data_utils, StripTokenDataset, ConcatDataset, PrependTokenDataset, AppendTokenDataset)
from fairseq.data.indexed_dataset import make_builder
from tqdm import tqdm
from transformers import AutoTokenizer
from infoxlm.data.tlm_dataset import TLMDataset
class IndexDataset(FairseqDataset):
def __init__(self, indices):
self.indices = indices
self._sizes = [len(i) for i in indices]
@property
def sizes(self):
return self._sizes
def size(self, index):
item = self.__getitem__(index)
return len(item)
def __getitem__(self, index):
item = self.indices[index]
item = torch.LongTensor(item)
return item
def __len__(self):
return len(self.indices)
def collater(self, samples):
raise NotImplementedError
def build_tokenizer(args):
tokenizer = AutoTokenizer.from_pretrained(args.model_name)
return tokenizer
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--model_name", type=str, default="CZWin32768/xlm-align")
parser.add_argument("--input_src", type=str, default="")
parser.add_argument("--input_trg", type=str, default="")
parser.add_argument("--output", type=str, default="")
parser.add_argument("--max_pos", type=int, default=256)
args = parser.parse_args()
return args
def save_items(items, prefix, vocab_size):
bin_fn = "%s.bin" % prefix
idx_fn = "%s.idx" % prefix
builder = make_builder(bin_fn, "mmap", vocab_size=vocab_size)
print("builder: " + str(builder))
for item in items: builder.add_item(item)
builder.finalize(idx_fn)
def get_indices(input_fn, tokenizer):
indices = []
with open(input_fn) as fp:
for lid, line in tqdm(enumerate(fp)):
# DEBUG
# if lid > 500: break
line = line.strip()
indices.append(tokenizer.encode(line))
print("tokenize finished.")
return indices
def main(args):
tokenizer = build_tokenizer(args)
src_indices = get_indices(args.input_src, tokenizer)
trg_indices = get_indices(args.input_trg, tokenizer)
src_dataset = IndexDataset(src_indices)
trg_dataset = IndexDataset(trg_indices)
eos = tokenizer.sep_token_id
bos = tokenizer.cls_token_id
max_pos = args.max_pos
datasets = []
src_dataset = TruncateDataset(
StripTokenDataset(src_dataset, eos), max_pos - 2,)
trg_dataset = TruncateDataset(
StripTokenDataset(trg_dataset, eos), max_pos - 2,)
src_dataset = PrependTokenDataset(src_dataset, bos)
trg_dataset = PrependTokenDataset(trg_dataset, bos)
src_dataset = AppendTokenDataset(src_dataset, eos)
trg_dataset = AppendTokenDataset(trg_dataset, eos)
print("| get all items ...")
# items = [i for i in tqdm(dataset)]
items = []
for t1, t2 in tqdm(zip(src_dataset, trg_dataset)):
items.append(t1)
items.append(t2)
print("| writing binary file ...")
prefix = os.path.join(args.output, "train.0")
save_items(items, prefix, len(tokenizer))
if __name__ == "__main__":
args = get_args()
main(args)
| EXA-1-master | exa/models/unilm-master/infoxlm/tools/para2bin4xlco.py |
import argparse
import os
import torch
from fairseq.data import (FairseqDataset, PrependTokenDataset,
TokenBlockDataset, TruncateDataset, data_utils)
from fairseq.data.indexed_dataset import make_builder
from tqdm import tqdm
from transformers import AutoTokenizer
class IndexDataset(FairseqDataset):
def __init__(self, indices):
self.indices = indices
self._sizes = [len(i) for i in indices]
@property
def sizes(self):
return self._sizes
def size(self, index):
item = self.__getitem__(index)
return len(item)
def __getitem__(self, index):
item = self.indices[index]
item = torch.LongTensor(item)
return item
def __len__(self):
return len(self.indices)
def collater(self, samples):
raise NotImplementedError
def build_tokenizer(args):
tokenizer = AutoTokenizer.from_pretrained(args.model_name)
return tokenizer
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--model_name", type=str, default="CZWin32768/xlm-align")
parser.add_argument("--input", type=str, default="")
parser.add_argument("--output", type=str, default="")
parser.add_argument('--sample-break-mode', default='complete',
choices=['none', 'complete', 'complete_doc', 'eos'],
help='If omitted or "none", fills each sample with tokens-per-sample '
'tokens. If set to "complete", splits samples only at the end '
'of sentence, but may include multiple sentences per sample. '
'"complete_doc" is similar but respects doc boundaries. '
'If set to "eos", includes only one sentence per sample.')
parser.add_argument('--tokens-per-sample', default=510, type=int,
help='max number of total tokens over all segments per sample')
parser.add_argument('--dataset_impl', default="mmap", type=str)
args = parser.parse_args()
return args
def save_items(items, prefix, vocab_size):
bin_fn = "%s.bin" % prefix
idx_fn = "%s.idx" % prefix
builder = make_builder(bin_fn, "mmap", vocab_size=vocab_size)
print("builder: " + str(builder))
for item in items: builder.add_item(item)
builder.finalize(idx_fn)
def main(args):
tokenizer = build_tokenizer(args)
indices = []
with open(args.input) as fp:
for line in tqdm(fp):
line = line.strip()
indices.append(tokenizer.encode(line))
print("tokenize finished.")
for i in range(5):
print("example[%d]:" % i)
input_ids = indices[i]
print(input_ids)
tokens = tokenizer.convert_ids_to_tokens(input_ids)
print(tokens)
dataset = IndexDataset(indices)
dataset = TruncateDataset(dataset, args.tokens_per_sample - 1)
dataset = TokenBlockDataset(
dataset,
dataset.sizes,
args.tokens_per_sample - 1, # one less for <s>
pad=tokenizer.pad_token_id,
eos=tokenizer.sep_token_id,
break_mode=args.sample_break_mode,
)
print('| loaded {} blocks from: {}'.format(len(dataset), args.input), flush=True)
dataset = PrependTokenDataset(dataset, tokenizer.cls_token_id)
print("| get all items ...")
items = [i for i in tqdm(dataset)]
print("| writing binary file ...")
prefix = os.path.join(args.output, "train.0")
save_items(items, prefix, len(tokenizer))
if __name__ == "__main__":
args = get_args()
main(args)
| EXA-1-master | exa/models/unilm-master/infoxlm/tools/txt2bin.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Data pre-processing: build vocabularies and binarize training data.
"""
from collections import Counter
from itertools import zip_longest
from fairseq import options, tasks, utils
from fairseq.data import indexed_dataset
from fairseq.binarizer import Binarizer
from multiprocessing import Pool
import os
import shutil
def main(args):
utils.import_user_module(args)
print(args)
os.makedirs(args.destdir, exist_ok=True)
target = not args.only_source
task = tasks.get_task(args.task)
def train_path(lang):
return "{}{}".format(args.trainpref, ("." + lang) if lang else "")
def file_name(prefix, lang):
fname = prefix
if lang is not None:
fname += ".{lang}".format(lang=lang)
return fname
def dest_path(prefix, lang):
return os.path.join(args.destdir, file_name(prefix, lang))
def dict_path(lang):
return dest_path("dict", lang) + ".txt"
def build_dictionary(filenames, src=False, tgt=False):
assert src ^ tgt
return task.build_dictionary(
filenames,
workers=args.workers,
threshold=args.thresholdsrc if src else args.thresholdtgt,
nwords=args.nwordssrc if src else args.nwordstgt,
padding_factor=args.padding_factor,
)
if not args.srcdict and os.path.exists(dict_path(args.source_lang)):
raise FileExistsError(dict_path(args.source_lang))
if target and not args.tgtdict and os.path.exists(dict_path(args.target_lang)):
raise FileExistsError(dict_path(args.target_lang))
if args.joined_dictionary:
assert not args.srcdict or not args.tgtdict, \
"cannot use both --srcdict and --tgtdict with --joined-dictionary"
if args.srcdict:
src_dict = task.load_dictionary(args.srcdict)
elif args.tgtdict:
src_dict = task.load_dictionary(args.tgtdict)
else:
assert args.trainpref, "--trainpref must be set if --srcdict is not specified"
src_dict = build_dictionary(
{train_path(lang) for lang in [args.source_lang, args.target_lang]}, src=True
)
tgt_dict = src_dict
else:
if args.srcdict:
src_dict = task.load_dictionary(args.srcdict)
else:
assert args.trainpref, "--trainpref must be set if --srcdict is not specified"
src_dict = build_dictionary([train_path(args.source_lang)], src=True)
if target:
if args.tgtdict:
tgt_dict = task.load_dictionary(args.tgtdict)
else:
assert args.trainpref, "--trainpref must be set if --tgtdict is not specified"
tgt_dict = build_dictionary([train_path(args.target_lang)], tgt=True)
else:
tgt_dict = None
src_dict.save(dict_path(args.source_lang))
if target and tgt_dict is not None:
tgt_dict.save(dict_path(args.target_lang))
def make_binary_dataset(vocab, input_prefix, output_prefix, lang, num_workers):
print("| [{}] Dictionary: {} types".format(lang, len(vocab) - 1))
n_seq_tok = [0, 0]
replaced = Counter()
def merge_result(worker_result):
replaced.update(worker_result["replaced"])
n_seq_tok[0] += worker_result["nseq"]
n_seq_tok[1] += worker_result["ntok"]
input_file = "{}{}".format(
input_prefix, ("." + lang) if lang is not None else ""
)
offsets = Binarizer.find_offsets(input_file, num_workers)
pool = None
if num_workers > 1:
pool = Pool(processes=num_workers - 1)
for worker_id in range(1, num_workers):
prefix = "{}{}".format(output_prefix, worker_id)
pool.apply_async(
binarize,
(
args,
input_file,
vocab,
prefix,
lang,
offsets[worker_id],
offsets[worker_id + 1]
),
callback=merge_result
)
pool.close()
ds = indexed_dataset.make_builder(dataset_dest_file(args, output_prefix, lang, "bin"),
impl=args.dataset_impl, vocab_size=len(vocab))
merge_result(
Binarizer.binarize(
input_file, vocab, lambda t: ds.add_item(t),
offset=0, end=offsets[1]
)
)
if num_workers > 1:
pool.join()
for worker_id in range(1, num_workers):
prefix = "{}{}".format(output_prefix, worker_id)
temp_file_path = dataset_dest_prefix(args, prefix, lang)
ds.merge_file_(temp_file_path)
os.remove(indexed_dataset.data_file_path(temp_file_path))
os.remove(indexed_dataset.index_file_path(temp_file_path))
ds.finalize(dataset_dest_file(args, output_prefix, lang, "idx"))
print(
"| [{}] {}: {} sents, {} tokens, {:.3}% replaced by {}".format(
lang,
input_file,
n_seq_tok[0],
n_seq_tok[1],
100 * sum(replaced.values()) / n_seq_tok[1],
vocab.unk_word,
)
)
def make_binary_alignment_dataset(input_prefix, output_prefix, num_workers):
nseq = [0]
def merge_result(worker_result):
nseq[0] += worker_result['nseq']
input_file = input_prefix
offsets = Binarizer.find_offsets(input_file, num_workers)
pool = None
if num_workers > 1:
pool = Pool(processes=num_workers - 1)
for worker_id in range(1, num_workers):
prefix = "{}{}".format(output_prefix, worker_id)
pool.apply_async(
binarize_alignments,
(
args,
input_file,
utils.parse_alignment,
prefix,
offsets[worker_id],
offsets[worker_id + 1]
),
callback=merge_result
)
pool.close()
ds = indexed_dataset.make_builder(dataset_dest_file(args, output_prefix, None, "bin"),
impl=args.dataset_impl)
merge_result(
Binarizer.binarize_alignments(
input_file, utils.parse_alignment, lambda t: ds.add_item(t),
offset=0, end=offsets[1]
)
)
if num_workers > 1:
pool.join()
for worker_id in range(1, num_workers):
prefix = "{}{}".format(output_prefix, worker_id)
temp_file_path = dataset_dest_prefix(args, prefix, None)
ds.merge_file_(temp_file_path)
os.remove(indexed_dataset.data_file_path(temp_file_path))
os.remove(indexed_dataset.index_file_path(temp_file_path))
ds.finalize(dataset_dest_file(args, output_prefix, None, "idx"))
print(
"| [alignments] {}: parsed {} alignments".format(
input_file,
nseq[0]
)
)
def make_dataset(vocab, input_prefix, output_prefix, lang, num_workers=1):
if args.dataset_impl == "raw":
# Copy original text file to destination folder
output_text_file = dest_path(
output_prefix + ".{}-{}".format(args.source_lang, args.target_lang),
lang,
)
shutil.copyfile(file_name(input_prefix, lang), output_text_file)
else:
make_binary_dataset(vocab, input_prefix, output_prefix, lang, num_workers)
def make_all(lang, vocab):
if args.trainpref:
make_dataset(vocab, args.trainpref, "train", lang, num_workers=args.workers)
if args.validpref:
for k, validpref in enumerate(args.validpref.split(",")):
outprefix = "valid{}".format(k) if k > 0 else "valid"
make_dataset(vocab, validpref, outprefix, lang, num_workers=args.workers)
if args.testpref:
for k, testpref in enumerate(args.testpref.split(",")):
outprefix = "test{}".format(k) if k > 0 else "test"
make_dataset(vocab, testpref, outprefix, lang, num_workers=args.workers)
def make_all_alignments():
if args.trainpref and os.path.exists(args.trainpref + "." + args.align_suffix):
make_binary_alignment_dataset(args.trainpref + "." + args.align_suffix, "train.align", num_workers=args.workers)
if args.validpref and os.path.exists(args.validpref + "." + args.align_suffix):
make_binary_alignment_dataset(args.validpref + "." + args.align_suffix, "valid.align", num_workers=args.workers)
if args.testpref and os.path.exists(args.testpref + "." + args.align_suffix):
make_binary_alignment_dataset(args.testpref + "." + args.align_suffix, "test.align", num_workers=args.workers)
make_all(args.source_lang, src_dict)
if target:
make_all(args.target_lang, tgt_dict)
if args.align_suffix:
make_all_alignments()
print("| Wrote preprocessed data to {}".format(args.destdir))
if args.alignfile:
assert args.trainpref, "--trainpref must be set if --alignfile is specified"
src_file_name = train_path(args.source_lang)
tgt_file_name = train_path(args.target_lang)
freq_map = {}
with open(args.alignfile, "r", encoding='utf-8') as align_file:
with open(src_file_name, "r", encoding='utf-8') as src_file:
with open(tgt_file_name, "r", encoding='utf-8') as tgt_file:
for a, s, t in zip_longest(align_file, src_file, tgt_file):
si = src_dict.encode_line(s, add_if_not_exist=False)
ti = tgt_dict.encode_line(t, add_if_not_exist=False)
ai = list(map(lambda x: tuple(x.split("-")), a.split()))
for sai, tai in ai:
srcidx = si[int(sai)]
tgtidx = ti[int(tai)]
if srcidx != src_dict.unk() and tgtidx != tgt_dict.unk():
assert srcidx != src_dict.pad()
assert srcidx != src_dict.eos()
assert tgtidx != tgt_dict.pad()
assert tgtidx != tgt_dict.eos()
if srcidx not in freq_map:
freq_map[srcidx] = {}
if tgtidx not in freq_map[srcidx]:
freq_map[srcidx][tgtidx] = 1
else:
freq_map[srcidx][tgtidx] += 1
align_dict = {}
for srcidx in freq_map.keys():
align_dict[srcidx] = max(freq_map[srcidx], key=freq_map[srcidx].get)
with open(
os.path.join(
args.destdir,
"alignment.{}-{}.txt".format(args.source_lang, args.target_lang),
),
"w", encoding='utf-8'
) as f:
for k, v in align_dict.items():
print("{} {}".format(src_dict[k], tgt_dict[v]), file=f)
def binarize(args, filename, vocab, output_prefix, lang, offset, end, append_eos=True):
ds = indexed_dataset.make_builder(dataset_dest_file(args, output_prefix, lang, "bin"),
impl=args.dataset_impl, vocab_size=len(vocab))
def consumer(tensor):
ds.add_item(tensor)
res = Binarizer.binarize(filename, vocab, consumer, append_eos=append_eos,
offset=offset, end=end)
ds.finalize(dataset_dest_file(args, output_prefix, lang, "idx"))
return res
def binarize_alignments(args, filename, parse_alignment, output_prefix, offset, end):
ds = indexed_dataset.make_builder(dataset_dest_file(args, output_prefix, None, "bin"),
impl=args.dataset_impl, vocab_size=None)
def consumer(tensor):
ds.add_item(tensor)
res = Binarizer.binarize_alignments(filename, parse_alignment, consumer, offset=offset,
end=end)
ds.finalize(dataset_dest_file(args, output_prefix, None, "idx"))
return res
def dataset_dest_prefix(args, output_prefix, lang):
base = "{}/{}".format(args.destdir, output_prefix)
if lang is not None:
lang_part = ".{}-{}.{}".format(args.source_lang, args.target_lang, lang)
elif args.only_source:
lang_part = ""
else:
lang_part = ".{}-{}".format(args.source_lang, args.target_lang)
return "{}{}".format(base, lang_part)
def dataset_dest_file(args, output_prefix, lang, extension):
base = dataset_dest_prefix(args, output_prefix, lang)
return "{}.{}".format(base, extension)
def get_offsets(input_file, num_workers):
return Binarizer.find_offsets(input_file, num_workers)
def cli_main():
parser = options.get_preprocessing_parser()
args = parser.parse_args()
main(args)
if __name__ == "__main__":
cli_main()
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/preprocess.py |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Translate pre-processed data with a trained model.
"""
import torch
from fairseq import bleu, checkpoint_utils, options, progress_bar, tasks, utils
from fairseq.meters import StopwatchMeter, TimeMeter
def main(args):
assert args.path is not None, '--path required for generation!'
assert not args.sampling or args.nbest == args.beam, \
'--sampling requires --nbest to be equal to --beam'
assert args.replace_unk is None or args.raw_text, \
'--replace-unk requires a raw text dataset (--raw-text)'
utils.import_user_module(args)
if args.max_tokens is None and args.max_sentences is None:
args.max_tokens = 12000
print(args)
use_cuda = torch.cuda.is_available() and not args.cpu
# Load dataset splits
task = tasks.setup_task(args)
task.load_dataset(args.gen_subset)
# Set dictionaries
try:
src_dict = getattr(task, 'source_dictionary', None)
except NotImplementedError:
src_dict = None
tgt_dict = task.target_dictionary
# Load ensemble
print('| loading model(s) from {}'.format(args.path))
models, _model_args = checkpoint_utils.load_model_ensemble(
args.path.split(':'),
arg_overrides=eval(args.model_overrides),
task=task,
)
# Optimize ensemble for generation
for model in models:
model.make_generation_fast_(
beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,
need_attn=args.print_alignment,
)
if args.fp16:
model.half()
if use_cuda:
model.cuda()
# Load alignment dictionary for unknown word replacement
# (None if no unknown word replacement, empty if no path to align dictionary)
align_dict = utils.load_align_dict(args.replace_unk)
# Load dataset (possibly sharded)
itr = task.get_batch_iterator(
dataset=task.dataset(args.gen_subset),
max_tokens=args.max_tokens,
max_sentences=args.max_sentences,
max_positions=utils.resolve_max_positions(
task.max_positions(),
*[model.max_positions() for model in models]
),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=args.required_batch_size_multiple,
num_shards=args.num_shards,
shard_id=args.shard_id,
num_workers=args.num_workers,
).next_epoch_itr(shuffle=False)
# Initialize generator
gen_timer = StopwatchMeter()
generator = task.build_generator(args)
# Generate and compute BLEU score
if args.sacrebleu:
scorer = bleu.SacrebleuScorer()
else:
scorer = bleu.Scorer(tgt_dict.pad(), tgt_dict.eos(), tgt_dict.unk())
num_sentences = 0
has_target = True
with progress_bar.build_progress_bar(args, itr) as t:
wps_meter = TimeMeter()
for sample in t:
sample = utils.move_to_cuda(sample) if use_cuda else sample
if 'net_input' not in sample:
continue
prefix_tokens = None
if args.prefix_size > 0:
prefix_tokens = sample['target'][:, :args.prefix_size]
gen_timer.start()
hypos = task.inference_step(generator, models, sample, prefix_tokens)
num_generated_tokens = sum(len(h[0]['tokens']) for h in hypos)
gen_timer.stop(num_generated_tokens)
for i, sample_id in enumerate(sample['id'].tolist()):
has_target = sample['target'] is not None
# Remove padding
src_tokens = utils.strip_pad(sample['net_input']['src_tokens'][i, :], tgt_dict.pad())
target_tokens = None
if has_target:
target_tokens = utils.strip_pad(sample['target'][i, :], tgt_dict.pad()).int().cpu()
# Either retrieve the original sentences or regenerate them from tokens.
if align_dict is not None:
src_str = task.dataset(args.gen_subset).src.get_original_text(sample_id)
target_str = task.dataset(args.gen_subset).tgt.get_original_text(sample_id)
else:
if src_dict is not None:
src_str = src_dict.string(src_tokens, args.remove_bpe)
else:
src_str = ""
if has_target:
target_str = tgt_dict.string(target_tokens, args.remove_bpe, escape_unk=True)
if not args.quiet:
if src_dict is not None:
print('S-{}\t{}'.format(sample_id, src_str))
if has_target:
print('T-{}\t{}'.format(sample_id, target_str))
# Process top predictions
for j, hypo in enumerate(hypos[i][:args.nbest]):
hypo_tokens, hypo_str, alignment = utils.post_process_prediction(
hypo_tokens=hypo['tokens'].int().cpu(),
src_str=src_str,
alignment=hypo['alignment'],
align_dict=align_dict,
tgt_dict=tgt_dict,
remove_bpe=args.remove_bpe,
)
if not args.quiet:
print('H-{}\t{}\t{}'.format(sample_id, hypo['score'], hypo_str))
print('P-{}\t{}'.format(
sample_id,
' '.join(map(
lambda x: '{:.4f}'.format(x),
hypo['positional_scores'].tolist(),
))
))
if args.print_alignment:
print('A-{}\t{}'.format(
sample_id,
' '.join(['{}-{}'.format(src_idx, tgt_idx) for src_idx, tgt_idx in alignment])
))
if args.print_step:
print('I-{}\t{}'.format(sample_id, hypo['steps']))
if getattr(args, 'retain_iter_history', False):
print("\n".join([
'E-{}_{}\t{}'.format(
sample_id, step,
utils.post_process_prediction(
h['tokens'].int().cpu(),
src_str, None, None, tgt_dict, None)[1])
for step, h in enumerate(hypo['history'])]))
# Score only the top hypothesis
if has_target and j == 0:
if align_dict is not None or args.remove_bpe is not None:
# Convert back to tokens for evaluation with unk replacement and/or without BPE
target_tokens = tgt_dict.encode_line(target_str, add_if_not_exist=True)
if hasattr(scorer, 'add_string'):
scorer.add_string(target_str, hypo_str)
else:
scorer.add(target_tokens, hypo_tokens)
wps_meter.update(num_generated_tokens)
t.log({'wps': round(wps_meter.avg)})
num_sentences += sample['nsentences']
print('| Translated {} sentences ({} tokens) in {:.1f}s ({:.2f} sentences/s, {:.2f} tokens/s)'.format(
num_sentences, gen_timer.n, gen_timer.sum, num_sentences / gen_timer.sum, 1. / gen_timer.avg))
if has_target:
print('| Generate {} with beam={}: {}'.format(args.gen_subset, args.beam, scorer.result_string()))
return scorer
def cli_main():
parser = options.get_generation_parser()
args = options.parse_args_and_arch(parser)
main(args)
if __name__ == '__main__':
cli_main()
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/generate.py |
#!/usr/bin/env python3 -u
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from fairseq import checkpoint_utils, options, progress_bar, utils
def main(args, override_args=None):
utils.import_user_module(args)
use_fp16 = args.fp16
use_cuda = torch.cuda.is_available() and not args.cpu
if override_args is not None:
overrides = vars(override_args)
overrides.update(eval(getattr(override_args, 'model_overrides', '{}')))
else:
overrides = None
# Load ensemble
print('| loading model(s) from {}'.format(args.path))
models, model_args, task = checkpoint_utils.load_model_ensemble_and_task(
[args.path],
arg_overrides=overrides,
)
model = models[0]
# Move models to GPU
for model in models:
if use_fp16:
model.half()
if use_cuda:
model.cuda()
# Print args
print(model_args)
# Build criterion
criterion = task.build_criterion(model_args)
criterion.eval()
# Load valid dataset (we load training data below, based on the latest checkpoint)
for subset in args.valid_subset.split(','):
try:
task.load_dataset(subset, combine=False, epoch=0)
dataset = task.dataset(subset)
except KeyError:
raise Exception('Cannot find dataset: ' + subset)
# Initialize data iterator
itr = task.get_batch_iterator(
dataset=dataset,
max_tokens=args.max_tokens,
max_sentences=args.max_sentences,
max_positions=utils.resolve_max_positions(
task.max_positions(),
*[m.max_positions() for m in models],
),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=args.required_batch_size_multiple,
seed=args.seed,
num_workers=args.num_workers,
).next_epoch_itr(shuffle=False)
progress = progress_bar.build_progress_bar(
args, itr,
prefix='valid on \'{}\' subset'.format(subset),
no_progress_bar='simple'
)
log_outputs = []
for i, sample in enumerate(progress):
sample = utils.move_to_cuda(sample) if use_cuda else sample
_loss, _sample_size, log_output = task.valid_step(sample, model, criterion)
progress.log(log_output, step=i)
log_outputs.append(log_output)
log_output = task.aggregate_logging_outputs(log_outputs, criterion)
progress.print(log_output, tag=subset, step=i)
def cli_main():
parser = options.get_validation_parser()
args = options.parse_args_and_arch(parser)
# only override args that are explicitly given on the command line
override_parser = options.get_validation_parser()
override_args = options.parse_args_and_arch(override_parser, suppress_defaults=True)
main(args, override_args)
if __name__ == '__main__':
cli_main()
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/validate.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from setuptools import setup, find_packages, Extension
import sys
if sys.version_info < (3, 5):
sys.exit('Sorry, Python >=3.5 is required for fairseq.')
with open('README.md') as f:
readme = f.read()
if sys.platform == 'darwin':
extra_compile_args = ['-stdlib=libc++', '-O3']
else:
extra_compile_args = ['-std=c++11', '-O3']
class NumpyExtension(Extension):
"""Source: https://stackoverflow.com/a/54128391"""
def __init__(self, *args, **kwargs):
self.__include_dirs = []
super().__init__(*args, **kwargs)
@property
def include_dirs(self):
import numpy
return self.__include_dirs + [numpy.get_include()]
@include_dirs.setter
def include_dirs(self, dirs):
self.__include_dirs = dirs
extensions = [
Extension(
'fairseq.libbleu',
sources=[
'fairseq/clib/libbleu/libbleu.cpp',
'fairseq/clib/libbleu/module.cpp',
],
extra_compile_args=extra_compile_args,
),
NumpyExtension(
'fairseq.data.data_utils_fast',
sources=['fairseq/data/data_utils_fast.pyx'],
language='c++',
extra_compile_args=extra_compile_args,
),
NumpyExtension(
'fairseq.data.token_block_utils_fast',
sources=['fairseq/data/token_block_utils_fast.pyx'],
language='c++',
extra_compile_args=extra_compile_args,
),
]
cmdclass = {}
try:
# torch is not available when generating docs
from torch.utils import cpp_extension
extensions.extend([
cpp_extension.CppExtension(
'fairseq.libnat',
sources=[
'fairseq/clib/libnat/edit_dist.cpp',
],
),
])
cmdclass['build_ext'] = cpp_extension.BuildExtension
except ImportError:
pass
if 'READTHEDOCS' in os.environ:
# don't build extensions when generating docs
extensions = []
if 'build_ext' in cmdclass:
del cmdclass['build_ext']
# use CPU build of PyTorch
dependency_links = [
'https://download.pytorch.org/whl/cpu/torch-1.3.0%2Bcpu-cp36-cp36m-linux_x86_64.whl'
]
else:
dependency_links = []
if 'clean' in sys.argv[1:]:
# Source: https://bit.ly/2NLVsgE
print("deleting Cython files...")
import subprocess
subprocess.run(['rm -f fairseq/*.so fairseq/**/*.so'], shell=True)
if 'test' in sys.argv[1:]:
try:
import fairseq.data.token_block_utils_fast
except (ImportError, ModuleNotFoundError):
raise Exception(
'Please install Cython components with `python setup.py build_ext --inplace`'
'before running unit tests.'
)
setup(
name='fairseq',
version='0.9.0',
description='Facebook AI Research Sequence-to-Sequence Toolkit',
url='https://github.com/pytorch/fairseq',
classifiers=[
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
long_description=readme,
long_description_content_type='text/markdown',
setup_requires=[
'cython',
'numpy',
'setuptools>=18.0',
],
install_requires=[
'cffi',
'cython',
'numpy',
'regex',
'sacrebleu',
'torch',
'tqdm',
],
dependency_links=dependency_links,
packages=find_packages(exclude=['scripts', 'tests']),
ext_modules=extensions,
test_suite='tests',
entry_points={
'console_scripts': [
'fairseq-eval-lm = fairseq_cli.eval_lm:cli_main',
'fairseq-generate = fairseq_cli.generate:cli_main',
'fairseq-interactive = fairseq_cli.interactive:cli_main',
'fairseq-preprocess = fairseq_cli.preprocess:cli_main',
'fairseq-score = fairseq_cli.score:main',
'fairseq-train = fairseq_cli.train:cli_main',
'fairseq-validate = fairseq_cli.validate:cli_main',
],
},
cmdclass=cmdclass,
zip_safe=False,
)
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/setup.py |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Translate raw text with a trained model. Batches data on-the-fly.
"""
from collections import namedtuple
import fileinput
import torch
from fairseq import checkpoint_utils, options, tasks, utils
from fairseq.data import encoders
Batch = namedtuple('Batch', 'ids src_tokens src_lengths')
Translation = namedtuple('Translation', 'src_str hypos pos_scores alignments')
def buffered_read(input, buffer_size):
buffer = []
with fileinput.input(files=[input], openhook=fileinput.hook_encoded("utf-8")) as h:
for src_str in h:
buffer.append(src_str.strip())
if len(buffer) >= buffer_size:
yield buffer
buffer = []
if len(buffer) > 0:
yield buffer
def make_batches(lines, args, task, max_positions, encode_fn):
tokens = [
task.source_dictionary.encode_line(
encode_fn(src_str), add_if_not_exist=False
).long()
for src_str in lines
]
lengths = torch.LongTensor([t.numel() for t in tokens])
itr = task.get_batch_iterator(
dataset=task.build_dataset_for_inference(tokens, lengths),
max_tokens=args.max_tokens,
max_sentences=args.max_sentences,
max_positions=max_positions,
).next_epoch_itr(shuffle=False)
for batch in itr:
yield Batch(
ids=batch['id'],
src_tokens=batch['net_input']['src_tokens'], src_lengths=batch['net_input']['src_lengths'],
)
def main(args):
utils.import_user_module(args)
if args.buffer_size < 1:
args.buffer_size = 1
if args.max_tokens is None and args.max_sentences is None:
args.max_sentences = 1
assert not args.sampling or args.nbest == args.beam, \
'--sampling requires --nbest to be equal to --beam'
assert not args.max_sentences or args.max_sentences <= args.buffer_size, \
'--max-sentences/--batch-size cannot be larger than --buffer-size'
print(args)
use_cuda = torch.cuda.is_available() and not args.cpu
# Setup task, e.g., translation
task = tasks.setup_task(args)
# Load ensemble
print('| loading model(s) from {}'.format(args.path))
models, _model_args = checkpoint_utils.load_model_ensemble(
args.path.split(':'),
arg_overrides=eval(args.model_overrides),
task=task,
)
# Set dictionaries
src_dict = task.source_dictionary
tgt_dict = task.target_dictionary
# Optimize ensemble for generation
for model in models:
model.make_generation_fast_(
beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,
need_attn=args.print_alignment,
)
if args.fp16:
model.half()
if use_cuda:
model.cuda()
# Initialize generator
generator = task.build_generator(args)
# Handle tokenization and BPE
tokenizer = encoders.build_tokenizer(args)
bpe = encoders.build_bpe(args)
def encode_fn(x):
if tokenizer is not None:
x = tokenizer.encode(x)
if bpe is not None:
x = bpe.encode(x)
return x
def decode_fn(x):
if bpe is not None:
x = bpe.decode(x)
if tokenizer is not None:
x = tokenizer.decode(x)
return x
# Load alignment dictionary for unknown word replacement
# (None if no unknown word replacement, empty if no path to align dictionary)
align_dict = utils.load_align_dict(args.replace_unk)
max_positions = utils.resolve_max_positions(
task.max_positions(),
*[model.max_positions() for model in models]
)
if args.buffer_size > 1:
print('| Sentence buffer size:', args.buffer_size)
print('| Type the input sentence and press return:')
start_id = 0
for inputs in buffered_read(args.input, args.buffer_size):
results = []
for batch in make_batches(inputs, args, task, max_positions, encode_fn):
src_tokens = batch.src_tokens
src_lengths = batch.src_lengths
if use_cuda:
src_tokens = src_tokens.cuda()
src_lengths = src_lengths.cuda()
sample = {
'net_input': {
'src_tokens': src_tokens,
'src_lengths': src_lengths,
},
}
translations = task.inference_step(generator, models, sample)
for i, (id, hypos) in enumerate(zip(batch.ids.tolist(), translations)):
src_tokens_i = utils.strip_pad(src_tokens[i], tgt_dict.pad())
results.append((start_id + id, src_tokens_i, hypos))
# sort output to match input order
for id, src_tokens, hypos in sorted(results, key=lambda x: x[0]):
if src_dict is not None:
src_str = src_dict.string(src_tokens, args.remove_bpe)
print('S-{}\t{}'.format(id, src_str))
# Process top predictions
for hypo in hypos[:min(len(hypos), args.nbest)]:
hypo_tokens, hypo_str, alignment = utils.post_process_prediction(
hypo_tokens=hypo['tokens'].int().cpu(),
src_str=src_str,
alignment=hypo['alignment'],
align_dict=align_dict,
tgt_dict=tgt_dict,
remove_bpe=args.remove_bpe,
)
hypo_str = decode_fn(hypo_str)
print('H-{}\t{}\t{}'.format(id, hypo['score'], hypo_str))
print('P-{}\t{}'.format(
id,
' '.join(map(lambda x: '{:.4f}'.format(x), hypo['positional_scores'].tolist()))
))
if args.print_alignment:
alignment_str = " ".join(["{}-{}".format(src, tgt) for src, tgt in alignment])
print('A-{}\t{}'.format(
id,
alignment_str
))
# update running id counter
start_id += len(inputs)
def cli_main():
parser = options.get_generation_parser(interactive=True)
args = options.parse_args_and_arch(parser)
main(args)
if __name__ == '__main__':
cli_main()
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/interactive.py |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Train a new model on one or across multiple GPUs.
"""
import collections
import math
import random
import numpy as np
import torch
from fairseq import checkpoint_utils, distributed_utils, options, progress_bar, tasks, utils
from fairseq.data import iterators
from fairseq.trainer import Trainer
from fairseq.meters import AverageMeter, StopwatchMeter
def main(args, init_distributed=False):
utils.import_user_module(args)
assert args.max_tokens is not None or args.max_sentences is not None, \
'Must specify batch size either with --max-tokens or --max-sentences'
# Initialize CUDA and distributed training
if torch.cuda.is_available() and not args.cpu:
torch.cuda.set_device(args.device_id)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if init_distributed:
args.distributed_rank = distributed_utils.distributed_init(args)
if distributed_utils.is_master(args):
checkpoint_utils.verify_checkpoint_directory(args.save_dir)
# Print args
print(args)
# Setup task, e.g., translation, language modeling, etc.
task = tasks.setup_task(args)
# Load valid dataset (we load training data below, based on the latest checkpoint)
for valid_sub_split in args.valid_subset.split(','):
task.load_dataset(valid_sub_split, combine=False, epoch=0)
# Build model and criterion
model = task.build_model(args)
criterion = task.build_criterion(args)
print(model)
print('| model {}, criterion {}'.format(args.arch, criterion.__class__.__name__))
print('| num. model params: {} (num. trained: {})'.format(
sum(p.numel() for p in model.parameters()),
sum(p.numel() for p in model.parameters() if p.requires_grad),
))
# Build trainer
trainer = Trainer(args, task, model, criterion)
print('| training on {} GPUs'.format(args.distributed_world_size))
print('| max tokens per GPU = {} and max sentences per GPU = {}'.format(
args.max_tokens,
args.max_sentences,
))
# Load the latest checkpoint if one is available and restore the
# corresponding train iterator
extra_state, epoch_itr = checkpoint_utils.load_checkpoint(args, trainer)
# Prepare train
task.prepare_train(model, criterion)
# Train until the learning rate gets too small
max_epoch = args.max_epoch or math.inf
max_update = args.max_update or math.inf
lr = trainer.get_lr()
train_meter = StopwatchMeter()
train_meter.start()
valid_subsets = args.valid_subset.split(',')
while (
lr > args.min_lr
and (epoch_itr.epoch < max_epoch or (epoch_itr.epoch == max_epoch
and epoch_itr._next_epoch_itr is not None))
and trainer.get_num_updates() < max_update
):
# train for one epoch
train(args, trainer, task, epoch_itr)
if not args.disable_validation and epoch_itr.epoch % args.validate_interval == 0:
valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets)
else:
valid_losses = [None]
# only use first validation loss to update the learning rate
lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])
# save checkpoint
if epoch_itr.epoch % args.save_interval == 0:
checkpoint_utils.save_checkpoint(args, trainer, epoch_itr, valid_losses[0])
reload_dataset = ':' in getattr(args, 'data', '')
reload_dataset = reload_dataset or args.reload_dataset_per_epoch
# sharded data: get train iterator for next epoch
epoch_itr = trainer.get_train_iterator(epoch_itr.epoch, load_dataset=reload_dataset)
train_meter.stop()
print('| done training in {:.1f} seconds'.format(train_meter.sum))
def train(args, trainer, task, epoch_itr):
"""Train the model for one epoch."""
# Update parameters every N batches
print("| Start train.train ..." , flush=True)
update_freq = args.update_freq[epoch_itr.epoch - 1] \
if epoch_itr.epoch <= len(args.update_freq) else args.update_freq[-1]
# Initialize data iterator
itr = epoch_itr.next_epoch_itr(
fix_batches_to_gpus=args.fix_batches_to_gpus,
shuffle=(epoch_itr.epoch >= args.curriculum),
)
print("| Itr init (1) ...", flush=True)
itr = iterators.GroupedIterator(itr, update_freq)
progress = progress_bar.build_progress_bar(
args, itr, epoch_itr.epoch, no_progress_bar='simple',
)
print("| Itr init (2) ...", flush=True)
extra_meters = collections.defaultdict(lambda: AverageMeter())
valid_subsets = args.valid_subset.split(',')
max_update = args.max_update or math.inf
# ##################### DEBUG #####################
# debug_samples = []
# print("Fetch debug examples ...")
# for i in range(1000):
# debug_samples.append(next(itr))
# progress = progress_bar.build_progress_bar(
# args, iter(debug_samples), epoch_itr.epoch, no_progress_bar='simple',
# )
# ##################### DEBUG #####################
for i, samples in enumerate(progress, start=epoch_itr.iterations_in_epoch):
log_output = trainer.train_step(samples)
if log_output is None:
continue
# log mid-epoch stats
stats = get_training_stats(trainer)
for k, v in log_output.items():
if k in ['loss', 'nll_loss', 'ntokens', 'nsentences', 'sample_size']:
continue # these are already logged above
if 'loss' in k or k == 'accuracy':
extra_meters[k].update(v, log_output['sample_size'])
else:
extra_meters[k].update(v)
stats[k] = extra_meters[k].val
progress.log(stats, tag='train', step=stats['num_updates'])
# ignore the first mini-batch in words-per-second and updates-per-second calculation
if i == 0:
trainer.get_meter('wps').reset()
trainer.get_meter('ups').reset()
num_updates = trainer.get_num_updates()
if (
not args.disable_validation
and args.save_interval_updates > 0
and num_updates % args.save_interval_updates == 0
and num_updates > 0
):
valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets)
checkpoint_utils.save_checkpoint(args, trainer, epoch_itr, valid_losses[0])
elif (args.save_interval_updates > 0
and num_updates % args.save_interval_updates == 0
and num_updates > 0):
checkpoint_utils.save_checkpoint(args, trainer, epoch_itr, None)
if num_updates >= max_update:
break
# log end-of-epoch stats
stats = get_training_stats(trainer)
for k, meter in extra_meters.items():
stats[k] = meter.val
progress.print(stats, tag='train', step=stats['num_updates'])
# reset training meters
for k in [
'train_loss', 'train_nll_loss', 'wps', 'ups', 'wpb', 'bsz', 'gnorm', 'clip',
]:
meter = trainer.get_meter(k)
if meter is not None:
meter.reset()
def get_training_stats(trainer):
stats = collections.OrderedDict()
stats['loss'] = trainer.get_meter('train_loss')
if trainer.get_meter('train_nll_loss').count > 0:
nll_loss = trainer.get_meter('train_nll_loss')
stats['nll_loss'] = nll_loss
else:
nll_loss = trainer.get_meter('train_loss')
stats['ppl'] = utils.get_perplexity(nll_loss.avg)
stats['wps'] = trainer.get_meter('wps')
stats['ups'] = trainer.get_meter('ups')
stats['wpb'] = trainer.get_meter('wpb')
stats['bsz'] = trainer.get_meter('bsz')
stats['num_updates'] = trainer.get_num_updates()
stats['lr'] = trainer.get_lr()
stats['gnorm'] = trainer.get_meter('gnorm')
stats['clip'] = trainer.get_meter('clip')
stats['oom'] = trainer.get_meter('oom')
if trainer.get_meter('loss_scale') is not None:
stats['loss_scale'] = trainer.get_meter('loss_scale')
stats['wall'] = round(trainer.get_meter('wall').elapsed_time)
stats['train_wall'] = trainer.get_meter('train_wall')
return stats
def validate(args, trainer, task, epoch_itr, subsets):
"""Evaluate the model on the validation set(s) and return the losses."""
if args.fixed_validation_seed is not None:
# set fixed seed for every validation
utils.set_torch_seed(args.fixed_validation_seed)
valid_losses = []
for subset in subsets:
# Initialize data iterator
itr = task.get_batch_iterator(
dataset=task.dataset(subset),
max_tokens=args.max_tokens_valid,
max_sentences=args.max_sentences_valid,
max_positions=utils.resolve_max_positions(
task.max_positions(),
trainer.get_model().max_positions(),
),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=args.required_batch_size_multiple,
seed=args.seed,
num_shards=args.distributed_world_size,
shard_id=args.distributed_rank,
num_workers=args.num_workers,
).next_epoch_itr(shuffle=False)
progress = progress_bar.build_progress_bar(
args, itr, epoch_itr.epoch,
prefix='valid on \'{}\' subset'.format(subset),
no_progress_bar='simple'
)
# reset validation loss meters
for k in ['valid_loss', 'valid_nll_loss']:
meter = trainer.get_meter(k)
if meter is not None:
meter.reset()
extra_meters = collections.defaultdict(lambda: AverageMeter())
for sample in progress:
log_output = trainer.valid_step(sample)
for k, v in log_output.items():
if k in ['loss', 'nll_loss', 'ntokens', 'nsentences', 'sample_size']:
continue
extra_meters[k].update(v)
# log validation stats
stats = get_valid_stats(trainer, args, extra_meters)
for k, meter in extra_meters.items():
stats[k] = meter.avg
progress.print(stats, tag=subset, step=trainer.get_num_updates())
valid_losses.append(
stats[args.best_checkpoint_metric].avg
if args.best_checkpoint_metric == 'loss'
else stats[args.best_checkpoint_metric]
)
return valid_losses
def get_valid_stats(trainer, args, extra_meters=None):
stats = collections.OrderedDict()
stats['loss'] = trainer.get_meter('valid_loss')
if trainer.get_meter('valid_nll_loss').count > 0:
nll_loss = trainer.get_meter('valid_nll_loss')
stats['nll_loss'] = nll_loss
else:
nll_loss = stats['loss']
stats['ppl'] = utils.get_perplexity(nll_loss.avg)
stats['num_updates'] = trainer.get_num_updates()
if hasattr(checkpoint_utils.save_checkpoint, 'best'):
key = 'best_{0}'.format(args.best_checkpoint_metric)
best_function = max if args.maximize_best_checkpoint_metric else min
current_metric = None
if args.best_checkpoint_metric == 'loss':
current_metric = stats['loss'].avg
elif args.best_checkpoint_metric in extra_meters:
current_metric = extra_meters[args.best_checkpoint_metric].avg
elif args.best_checkpoint_metric in stats:
current_metric = stats[args.best_checkpoint_metric]
else:
raise ValueError("best_checkpoint_metric not found in logs")
stats[key] = best_function(
checkpoint_utils.save_checkpoint.best,
current_metric,
)
return stats
def distributed_main(i, args, start_rank=0):
args.device_id = i
if args.distributed_rank is None: # torch.multiprocessing.spawn
args.distributed_rank = start_rank + i
main(args, init_distributed=True)
def cli_main():
parser = options.get_training_parser()
args = options.parse_args_and_arch(parser)
if args.distributed_init_method is None:
distributed_utils.infer_init_method(args)
if args.distributed_init_method is not None:
# distributed training
if torch.cuda.device_count() > 1 and not args.distributed_no_spawn:
start_rank = args.distributed_rank
args.distributed_rank = None # assign automatically
torch.multiprocessing.spawn(
fn=distributed_main,
args=(args, start_rank),
nprocs=torch.cuda.device_count(),
)
else:
distributed_main(args.device_id, args)
elif args.distributed_world_size > 1:
# fallback for single node with multiple GPUs
assert args.distributed_world_size <= torch.cuda.device_count()
port = random.randint(10000, 20000)
args.distributed_init_method = 'tcp://localhost:{port}'.format(port=port)
args.distributed_rank = None # set based on device id
if max(args.update_freq) > 1 and args.ddp_backend != 'no_c10d':
print('| NOTE: you may get better performance with: --ddp-backend=no_c10d')
torch.multiprocessing.spawn(
fn=distributed_main,
args=(args, ),
nprocs=args.distributed_world_size,
)
else:
# single GPU training
main(args)
if __name__ == '__main__':
cli_main()
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/train.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import functools
from fairseq.hub_utils import BPEHubInterface as bpe # noqa
from fairseq.hub_utils import TokenizerHubInterface as tokenizer # noqa
from fairseq.models import MODEL_REGISTRY
dependencies = [
'numpy',
'regex',
'requests',
'torch',
]
# torch.hub doesn't build Cython components, so if they are not found then try
# to build them here
try:
import fairseq.data.token_block_utils_fast
except (ImportError, ModuleNotFoundError):
try:
import cython
import os
from setuptools import sandbox
sandbox.run_setup(
os.path.join(os.path.dirname(__file__), 'setup.py'),
['build_ext', '--inplace'],
)
except (ImportError, ModuleNotFoundError):
print(
'Unable to build Cython components. Please make sure Cython is '
'installed if the torch.hub model you are loading depends on it.'
)
for _model_type, _cls in MODEL_REGISTRY.items():
for model_name in _cls.hub_models().keys():
globals()[model_name] = functools.partial(
_cls.from_pretrained,
model_name,
)
# to simplify the interface we only expose named models
# globals()[_model_type] = _cls.from_pretrained
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/hubconf.py |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Evaluate the perplexity of a trained language model.
"""
import numpy as np
import torch
from fairseq import checkpoint_utils, options, progress_bar, tasks, utils
from fairseq.data import LMContextWindowDataset
from fairseq.meters import StopwatchMeter, TimeMeter
from fairseq.sequence_scorer import SequenceScorer
class WordStat(object):
def __init__(self, word, is_bpe):
self.word = word
self.is_bpe = is_bpe
self.log_prob = 0
self.next_word_prob = 0
self.count = 0
self.missing_next_words = 0
def add(self, log_prob, next_word_prob):
""" increments counters for the sum of log probs of current word and next
word (given context ending at current word). Since the next word might be at the end of the example,
or it might be not counted because it is not an ending subword unit,
also keeps track of how many of those we have seen """
if next_word_prob is not None:
self.next_word_prob += next_word_prob
else:
self.missing_next_words += 1
self.log_prob += log_prob
self.count += 1
def __str__(self):
return '{}\t{}\t{}\t{}\t{}\t{}'.format(self.word, self.count, self.log_prob, self.is_bpe,
self.next_word_prob, self.count - self.missing_next_words)
def main(parsed_args):
assert parsed_args.path is not None, '--path required for evaluation!'
utils.import_user_module(parsed_args)
print(parsed_args)
use_cuda = torch.cuda.is_available() and not parsed_args.cpu
task = tasks.setup_task(parsed_args)
# Load ensemble
print('| loading model(s) from {}'.format(parsed_args.path))
models, args = checkpoint_utils.load_model_ensemble(
parsed_args.path.split(':'),
arg_overrides=eval(parsed_args.model_overrides),
task=task,
)
for arg in vars(parsed_args).keys():
if arg not in {
'self_target', 'future_target', 'past_target', 'tokens_per_sample',
'output_size_dictionary', 'add_bos_token',
}:
setattr(args, arg, getattr(parsed_args, arg))
# reduce tokens per sample by the required context window size
args.tokens_per_sample -= args.context_window
task = tasks.setup_task(args)
# Load dataset splits
task.load_dataset(args.gen_subset)
dataset = task.dataset(args.gen_subset)
if args.context_window > 0:
dataset = LMContextWindowDataset(
dataset=dataset,
tokens_per_sample=args.tokens_per_sample,
context_window=args.context_window,
pad_idx=task.source_dictionary.pad(),
)
print('| {} {} {} examples'.format(args.data, args.gen_subset, len(dataset)))
# Optimize ensemble for generation and set the source and dest dicts on the model (required by scorer)
for model in models:
model.make_generation_fast_()
if args.fp16:
model.half()
if use_cuda:
model.cuda()
assert len(models) > 0
print('num. model params: {}'.format(sum(p.numel() for p in models[0].parameters())))
itr = task.get_batch_iterator(
dataset=dataset,
max_tokens=args.max_tokens or 36000,
max_sentences=args.max_sentences,
max_positions=utils.resolve_max_positions(*[
model.max_positions() for model in models
]),
ignore_invalid_inputs=True,
num_shards=args.num_shards,
shard_id=args.shard_id,
num_workers=args.num_workers,
).next_epoch_itr(shuffle=False)
gen_timer = StopwatchMeter()
scorer = SequenceScorer(task.target_dictionary, args.softmax_batch)
score_sum = 0.
count = 0
if args.remove_bpe is not None:
if args.remove_bpe == 'sentencepiece':
raise NotImplementedError
else:
bpe_cont = args.remove_bpe.rstrip()
bpe_toks = set(
i
for i in range(len(task.source_dictionary))
if task.source_dictionary[i].endswith(bpe_cont)
)
bpe_len = len(bpe_cont)
else:
bpe_toks = None
bpe_len = 0
word_stats = dict()
with progress_bar.build_progress_bar(args, itr) as t:
wps_meter = TimeMeter()
for sample in t:
if 'net_input' not in sample:
continue
sample = utils.move_to_cuda(sample) if use_cuda else sample
gen_timer.start()
hypos = scorer.generate(models, sample)
gen_timer.stop(sample['ntokens'])
for i, hypos_i in enumerate(hypos):
hypo = hypos_i[0]
sample_id = sample['id'][i]
tokens = hypo['tokens']
tgt_len = tokens.numel()
pos_scores = hypo['positional_scores'].float()
if args.add_bos_token:
assert hypo['tokens'][0].item() == task.target_dictionary.bos()
tokens = tokens[1:]
pos_scores = pos_scores[1:]
skipped_toks = 0
if bpe_toks is not None:
for i in range(tgt_len - 1):
if tokens[i].item() in bpe_toks:
skipped_toks += 1
pos_scores[i + 1] += pos_scores[i]
pos_scores[i] = 0
inf_scores = pos_scores.eq(float('inf')) | pos_scores.eq(float('-inf'))
if inf_scores.any():
print('| Skipping tokens with inf scores:',
task.target_dictionary.string(tokens[inf_scores.nonzero()]))
pos_scores = pos_scores[(~inf_scores).nonzero()]
score_sum += pos_scores.sum().cpu()
count += pos_scores.numel() - skipped_toks
if args.output_word_probs or args.output_word_stats:
w = ''
word_prob = []
is_bpe = False
for i in range(len(tokens)):
w_ind = tokens[i].item()
w += task.source_dictionary[w_ind]
if bpe_toks is not None and w_ind in bpe_toks:
w = w[:-bpe_len]
is_bpe = True
else:
word_prob.append((w, pos_scores[i].item()))
next_prob = None
ind = i + 1
while ind < len(tokens):
if pos_scores[ind].item() != 0:
next_prob = pos_scores[ind]
break
ind += 1
word_stats.setdefault(w, WordStat(w, is_bpe)).add(pos_scores[i].item(), next_prob)
is_bpe = False
w = ''
if args.output_word_probs:
print(
str(int(sample_id)) + " "
+ ('\t'.join('{} [{:2f}]'.format(x[0], x[1]) for x in word_prob))
)
wps_meter.update(sample['ntokens'])
t.log({'wps': round(wps_meter.avg)})
avg_nll_loss = -score_sum / count
print('| Evaluated {} tokens in {:.1f}s ({:.2f} tokens/s)'.format(gen_timer.n, gen_timer.sum, 1. / gen_timer.avg))
print('| Loss: {:.4f}, Perplexity: {:.2f}'.format(avg_nll_loss, np.exp(avg_nll_loss)))
if args.output_word_stats:
for ws in sorted(word_stats.values(), key=lambda x: x.count, reverse=True):
print(ws)
def cli_main():
parser = options.get_eval_lm_parser()
args = options.parse_args_and_arch(parser)
main(args)
if __name__ == '__main__':
cli_main()
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/eval_lm.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
BLEU scoring of generated translations against reference translations.
"""
import argparse
import os
import sys
from fairseq import bleu
from fairseq.data import dictionary
def get_parser():
parser = argparse.ArgumentParser(description='Command-line script for BLEU scoring.')
# fmt: off
parser.add_argument('-s', '--sys', default='-', help='system output')
parser.add_argument('-r', '--ref', required=True, help='references')
parser.add_argument('-o', '--order', default=4, metavar='N',
type=int, help='consider ngrams up to this order')
parser.add_argument('--ignore-case', action='store_true',
help='case-insensitive scoring')
parser.add_argument('--sacrebleu', action='store_true',
help='score with sacrebleu')
parser.add_argument('--sentence-bleu', action='store_true',
help='report sentence-level BLEUs (i.e., with +1 smoothing)')
# fmt: on
return parser
def main():
parser = get_parser()
args = parser.parse_args()
print(args)
assert args.sys == '-' or os.path.exists(args.sys), \
"System output file {} does not exist".format(args.sys)
assert os.path.exists(args.ref), \
"Reference file {} does not exist".format(args.ref)
dict = dictionary.Dictionary()
def readlines(fd):
for line in fd.readlines():
if args.ignore_case:
yield line.lower()
else:
yield line
if args.sacrebleu:
import sacrebleu
def score(fdsys):
with open(args.ref) as fdref:
print(sacrebleu.corpus_bleu(fdsys, [fdref]))
elif args.sentence_bleu:
def score(fdsys):
with open(args.ref) as fdref:
scorer = bleu.Scorer(dict.pad(), dict.eos(), dict.unk())
for i, (sys_tok, ref_tok) in enumerate(zip(readlines(fdsys), readlines(fdref))):
scorer.reset(one_init=True)
sys_tok = dict.encode_line(sys_tok)
ref_tok = dict.encode_line(ref_tok)
scorer.add(ref_tok, sys_tok)
print(i, scorer.result_string(args.order))
else:
def score(fdsys):
with open(args.ref) as fdref:
scorer = bleu.Scorer(dict.pad(), dict.eos(), dict.unk())
for sys_tok, ref_tok in zip(readlines(fdsys), readlines(fdref)):
sys_tok = dict.encode_line(sys_tok)
ref_tok = dict.encode_line(ref_tok)
scorer.add(ref_tok, sys_tok)
print(scorer.result_string(args.order))
if args.sys == '-':
score(sys.stdin)
else:
with open(args.sys, 'r') as f:
score(f)
if __name__ == '__main__':
main()
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/score.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import tempfile
import unittest
import torch
from fairseq.data import Dictionary
class TestDictionary(unittest.TestCase):
def test_finalize(self):
txt = [
'A B C D',
'B C D',
'C D',
'D',
]
ref_ids1 = list(map(torch.IntTensor, [
[4, 5, 6, 7, 2],
[5, 6, 7, 2],
[6, 7, 2],
[7, 2],
]))
ref_ids2 = list(map(torch.IntTensor, [
[7, 6, 5, 4, 2],
[6, 5, 4, 2],
[5, 4, 2],
[4, 2],
]))
# build dictionary
d = Dictionary()
for line in txt:
d.encode_line(line, add_if_not_exist=True)
def get_ids(dictionary):
ids = []
for line in txt:
ids.append(dictionary.encode_line(line, add_if_not_exist=False))
return ids
def assertMatch(ids, ref_ids):
for toks, ref_toks in zip(ids, ref_ids):
self.assertEqual(toks.size(), ref_toks.size())
self.assertEqual(0, (toks != ref_toks).sum().item())
ids = get_ids(d)
assertMatch(ids, ref_ids1)
# check finalized dictionary
d.finalize()
finalized_ids = get_ids(d)
assertMatch(finalized_ids, ref_ids2)
# write to disk and reload
with tempfile.NamedTemporaryFile(mode='w') as tmp_dict:
d.save(tmp_dict.name)
d = Dictionary.load(tmp_dict.name)
reload_ids = get_ids(d)
assertMatch(reload_ids, ref_ids2)
assertMatch(finalized_ids, reload_ids)
if __name__ == '__main__':
unittest.main()
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/tests/test_dictionary.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from multiprocessing import Manager
import random
import unittest
import torch
import torch.nn as nn
from fairseq import distributed_utils, optim
class Model(nn.Module):
def __init__(self, input_size, output_size):
super(Model, self).__init__()
self.fc = nn.Linear(input_size, output_size)
def forward(self, input):
output = self.fc(input)
return output
def setup_model_loss_criterion(args, rank, is_cuda):
"""
setup model, criterion and optimizer based on input args
"""
args.distributed_rank = rank
distributed_utils.distributed_init(args)
torch.manual_seed(1)
model = Model(args.input_size, args.nb_classes)
loss_fn = nn.CrossEntropyLoss()
if is_cuda:
model = model.cuda()
loss_fn = loss_fn.cuda()
optimizer = optim.sgd.SGD(args, model.parameters())
optimizer = optim.FairseqBMUF(args, optimizer)
return model, loss_fn, optimizer
def train_step(input, target, model, loss_fn, optimizer):
"""Do forward, backward and parameter update."""
model.train()
output = model(input)
loss = loss_fn(output, target)
optimizer.backward(loss)
optimizer.step()
def single_gpu_training(args, rank, iterations, shared_results):
is_cuda = torch.cuda.is_available()
if is_cuda:
torch.cuda.set_device(rank)
model, loss_fn, optimizer = setup_model_loss_criterion(args, rank, is_cuda)
for _ in range(iterations):
input = torch.randn(1, args.input_size)
target = torch.empty(args.batch_size, dtype=torch.long).random_(args.nb_classes)
if is_cuda:
input = input.cuda()
target = target.cuda()
train_step(input, target, model, loss_fn, optimizer)
results = []
for param in model.parameters():
if len(results) == 0:
results = param.flatten().cpu().data
else:
results = torch.cat((results, param.flatten().cpu().data), 0)
shared_results[rank] = results
def setup_args():
args = argparse.Namespace()
args.global_sync_iter = 20
args.block_momentum = 0.875
args.block_lr = 0.5
args.input_size = 5
args.nb_classes = 2
args.batch_size = 1
args.lr = [1e-3]
args.momentum = 0
args.weight_decay = 0
args.warmup_iterations = 0
args.use_nbm = True
args.average_sync = True
args.global_sync_iter = 1
args.distributed_backend = "gloo"
args.distributed_world_size = 2
port = random.randint(10000, 20000)
args.distributed_init_method = "tcp://localhost:{port}".format(port=port)
args.distributed_init_host = "localhost"
args.distributed_port = port + 1
args.local_world_size = args.distributed_world_size
return args
class TestBMUF(unittest.TestCase):
def bmuf_process(self, args, iterations):
processes = []
results = Manager().dict()
ctx = torch.multiprocessing.get_context("spawn")
for rank in range(args.distributed_world_size):
p = ctx.Process(
target=single_gpu_training, args=(args, rank, iterations, results)
)
p.start()
processes.append(p)
for p in processes:
p.join()
# Make sure params in both machines are same
assert len(results) == 2
self.assertAlmostEqual(results[0], results[1])
def test_bmuf_sync(self):
# Train model for 1 iteration and do bmuf sync without doing warmup
args = setup_args()
iterations = 1
self.bmuf_process(args, iterations)
def test_warmup_sync(self):
# Train model for 20 iteration and do warmup sync without doing bmuf sync
args = setup_args()
args.warmup_iterations = 20
iterations = 20
self.bmuf_process(args, iterations)
def test_warmup_sync_bmuf_sync(self):
# Train model for 25 iteration and do warmup sync after 20 iteration
# and bmuf sync after 25 iteration
args = setup_args()
args.warmup_iterations = 20
args.global_sync_iter = 5
iterations = 25
self.bmuf_process(args, iterations)
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertLess((t1 - t2).abs().max(), 1e-4)
if __name__ == '__main__':
unittest.main()
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/tests/test_bmuf.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from fairseq import utils
class TestUtils(unittest.TestCase):
def test_convert_padding_direction(self):
pad = 1
left_pad = torch.LongTensor([
[2, 3, 4, 5, 6],
[1, 7, 8, 9, 10],
[1, 1, 1, 11, 12],
])
right_pad = torch.LongTensor([
[2, 3, 4, 5, 6],
[7, 8, 9, 10, 1],
[11, 12, 1, 1, 1],
])
self.assertAlmostEqual(
right_pad,
utils.convert_padding_direction(
left_pad,
pad,
left_to_right=True,
),
)
self.assertAlmostEqual(
left_pad,
utils.convert_padding_direction(
right_pad,
pad,
right_to_left=True,
),
)
def test_make_positions(self):
pad = 1
left_pad_input = torch.LongTensor([
[9, 9, 9, 9, 9],
[1, 9, 9, 9, 9],
[1, 1, 1, 9, 9],
])
left_pad_output = torch.LongTensor([
[2, 3, 4, 5, 6],
[1, 2, 3, 4, 5],
[1, 1, 1, 2, 3],
])
right_pad_input = torch.LongTensor([
[9, 9, 9, 9, 9],
[9, 9, 9, 9, 1],
[9, 9, 1, 1, 1],
])
right_pad_output = torch.LongTensor([
[2, 3, 4, 5, 6],
[2, 3, 4, 5, 1],
[2, 3, 1, 1, 1],
])
self.assertAlmostEqual(
left_pad_output,
utils.make_positions(left_pad_input, pad),
)
self.assertAlmostEqual(
right_pad_output,
utils.make_positions(right_pad_input, pad),
)
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertLess(utils.item((t1 - t2).abs().max()), 1e-4)
if __name__ == '__main__':
unittest.main()
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/tests/test_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from fairseq.data import iterators
class TestIterators(unittest.TestCase):
def test_counting_iterator(self):
x = list(range(10))
itr = iterators.CountingIterator(x)
self.assertTrue(itr.has_next())
self.assertEqual(next(itr), 0)
self.assertEqual(next(itr), 1)
itr.skip(3)
self.assertEqual(next(itr), 5)
itr.skip(3)
self.assertEqual(next(itr), 9)
self.assertFalse(itr.has_next())
if __name__ == '__main__':
unittest.main()
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/tests/test_iterators.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from typing import Dict, List
import tests.utils as test_utils
import torch
from fairseq import utils
from fairseq.data import (
Dictionary,
LanguagePairDataset,
TransformEosDataset,
data_utils,
noising,
)
class TestDataNoising(unittest.TestCase):
def _get_test_data_with_bpe_cont_marker(self, append_eos=True):
"""
Args:
append_eos: if True, each input sentence in the source tokens tensor
will have an EOS appended to the end.
Returns:
vocabs: BPE vocab with continuation markers as suffixes to denote
non-end of word tokens. This is the standard BPE format used in
fairseq's preprocessing.
x: input tensor containing numberized source tokens, with EOS at the
end if append_eos is true
src_lengths: and source lengths.
"""
vocab = Dictionary()
vocab.add_symbol("he@@")
vocab.add_symbol("llo")
vocab.add_symbol("how")
vocab.add_symbol("are")
vocab.add_symbol("y@@")
vocab.add_symbol("ou")
vocab.add_symbol("n@@")
vocab.add_symbol("ew")
vocab.add_symbol("or@@")
vocab.add_symbol("k")
src_tokens = [
["he@@", "llo", "n@@", "ew", "y@@", "or@@", "k"],
["how", "are", "y@@", "ou"],
]
x, src_lengths = x, src_lengths = self._convert_src_tokens_to_tensor(
vocab=vocab, src_tokens=src_tokens, append_eos=append_eos
)
return vocab, x, src_lengths
def _get_test_data_with_bpe_end_marker(self, append_eos=True):
"""
Args:
append_eos: if True, each input sentence in the source tokens tensor
will have an EOS appended to the end.
Returns:
vocabs: BPE vocab with end-of-word markers as suffixes to denote
tokens at the end of a word. This is an alternative to fairseq's
standard preprocessing framework and is not generally supported
within fairseq.
x: input tensor containing numberized source tokens, with EOS at the
end if append_eos is true
src_lengths: and source lengths.
"""
vocab = Dictionary()
vocab.add_symbol("he")
vocab.add_symbol("llo_EOW")
vocab.add_symbol("how_EOW")
vocab.add_symbol("are_EOW")
vocab.add_symbol("y")
vocab.add_symbol("ou_EOW")
vocab.add_symbol("n")
vocab.add_symbol("ew_EOW")
vocab.add_symbol("or")
vocab.add_symbol("k_EOW")
src_tokens = [
["he", "llo_EOW", "n", "ew_EOW", "y", "or", "k_EOW"],
["how_EOW", "are_EOW", "y", "ou_EOW"],
]
x, src_lengths = x, src_lengths = self._convert_src_tokens_to_tensor(
vocab=vocab, src_tokens=src_tokens, append_eos=append_eos
)
return vocab, x, src_lengths
def _get_test_data_with_word_vocab(self, append_eos=True):
"""
Args:
append_eos: if True, each input sentence in the source tokens tensor
will have an EOS appended to the end.
Returns:
vocabs: word vocab
x: input tensor containing numberized source tokens, with EOS at the
end if append_eos is true
src_lengths: and source lengths.
"""
vocab = Dictionary()
vocab.add_symbol("hello")
vocab.add_symbol("how")
vocab.add_symbol("are")
vocab.add_symbol("you")
vocab.add_symbol("new")
vocab.add_symbol("york")
src_tokens = [
["hello", "new", "york", "you"],
["how", "are", "you", "new", "york"],
]
x, src_lengths = self._convert_src_tokens_to_tensor(
vocab=vocab, src_tokens=src_tokens, append_eos=append_eos
)
return vocab, x, src_lengths
def _convert_src_tokens_to_tensor(
self, vocab: Dictionary, src_tokens: List[List[str]], append_eos: bool
):
src_len = [len(x) for x in src_tokens]
# If we have to append EOS, we include EOS in counting src length
if append_eos:
src_len = [length + 1 for length in src_len]
x = torch.LongTensor(len(src_tokens), max(src_len)).fill_(vocab.pad())
for i in range(len(src_tokens)):
for j in range(len(src_tokens[i])):
x[i][j] = vocab.index(src_tokens[i][j])
if append_eos:
x[i][j + 1] = vocab.eos()
x = x.transpose(1, 0)
return x, torch.LongTensor(src_len)
def assert_eos_at_end(self, x, x_len, eos):
"""Asserts last token of every sentence in x is EOS """
for i in range(len(x_len)):
self.assertEqual(
x[x_len[i] - 1][i],
eos,
(
"Expected eos (token id {eos}) at the end of sentence {i} "
"but got {other} instead"
).format(i=i, eos=eos, other=x[i][-1]),
)
def assert_word_dropout_correct(self, x, x_noised, x_len, l_noised):
# Expect only the first word (2 bpe tokens) of the first example
# was dropped out
self.assertEqual(x_len[0] - 2, l_noised[0])
for i in range(l_noised[0]):
self.assertEqual(x_noised[i][0], x[i + 2][0])
def test_word_dropout_with_eos(self):
vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=True)
with data_utils.numpy_seed(1234):
noising_gen = noising.WordDropout(vocab)
x_noised, l_noised = noising_gen.noising(x, x_len, 0.2)
self.assert_word_dropout_correct(
x=x, x_noised=x_noised, x_len=x_len, l_noised=l_noised
)
self.assert_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos())
def assert_word_blanking_correct(self, x, x_noised, x_len, l_noised, unk):
# Expect only the first word (2 bpe tokens) of the first example
# was blanked out
self.assertEqual(x_len[0], l_noised[0])
for i in range(l_noised[0]):
if i < 2:
self.assertEqual(x_noised[i][0], unk)
else:
self.assertEqual(x_noised[i][0], x[i][0])
def test_word_blank_with_eos(self):
vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=True)
with data_utils.numpy_seed(1234):
noising_gen = noising.WordDropout(vocab)
x_noised, l_noised = noising_gen.noising(x, x_len, 0.2, vocab.unk())
self.assert_word_blanking_correct(
x=x, x_noised=x_noised, x_len=x_len, l_noised=l_noised, unk=vocab.unk()
)
self.assert_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos())
def generate_unchanged_shuffle_map(self, length):
return {i: i for i in range(length)}
def assert_word_shuffle_matches_expected(
self,
x,
x_len,
max_shuffle_distance: int,
vocab: Dictionary,
expected_shufle_maps: List[Dict[int, int]],
expect_eos_at_end: bool,
bpe_end_marker=None,
):
"""
This verifies that with a given x, x_len, max_shuffle_distance, and
vocab, we get the expected shuffle result.
Args:
x: Tensor of shape (T x B) = (sequence_length, batch_size)
x_len: Tensor of length B = batch_size
max_shuffle_distance: arg to pass to noising
expected_shuffle_maps: List[mapping] where mapping is a
Dict[old_index, new_index], mapping x's elements from their
old positions in x to their new positions in x.
expect_eos_at_end: if True, check the output to make sure there is
an EOS at the end.
bpe_end_marker: str denoting the BPE end token. If this is not None, we
set the BPE cont token to None in the noising classes.
"""
bpe_cont_marker = None
if bpe_end_marker is None:
bpe_cont_marker = "@@"
with data_utils.numpy_seed(1234):
word_shuffle = noising.WordShuffle(
vocab, bpe_cont_marker=bpe_cont_marker, bpe_end_marker=bpe_end_marker
)
x_noised, l_noised = word_shuffle.noising(
x, x_len, max_shuffle_distance=max_shuffle_distance
)
# For every example, we have a different expected shuffle map. We check
# that each example is shuffled as expected according to each
# corresponding shuffle map.
for i in range(len(expected_shufle_maps)):
shuffle_map = expected_shufle_maps[i]
for k, v in shuffle_map.items():
self.assertEqual(x[k][i], x_noised[v][i])
# Shuffling should not affect the length of each example
for pre_shuffle_length, post_shuffle_length in zip(x_len, l_noised):
self.assertEqual(pre_shuffle_length, post_shuffle_length)
if expect_eos_at_end:
self.assert_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos())
def test_word_shuffle_with_eos(self):
vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=True)
# Assert word shuffle with max shuffle distance 0 causes input to be
# unchanged
self.assert_word_shuffle_matches_expected(
x=x,
x_len=x_len,
max_shuffle_distance=0,
vocab=vocab,
expected_shufle_maps=[
self.generate_unchanged_shuffle_map(example_len)
for example_len in x_len
],
expect_eos_at_end=True,
)
# Assert word shuffle with max shuffle distance 3 matches our expected
# shuffle order
self.assert_word_shuffle_matches_expected(
x=x,
x_len=x_len,
vocab=vocab,
max_shuffle_distance=3,
expected_shufle_maps=[
self.generate_unchanged_shuffle_map(x_len[0]),
{0: 0, 1: 3, 2: 1, 3: 2},
],
expect_eos_at_end=True,
)
def test_word_shuffle_with_eos_nonbpe(self):
"""The purpose of this is to test shuffling logic with word vocabs"""
vocab, x, x_len = self._get_test_data_with_word_vocab(append_eos=True)
# Assert word shuffle with max shuffle distance 0 causes input to be
# unchanged
self.assert_word_shuffle_matches_expected(
x=x,
x_len=x_len,
max_shuffle_distance=0,
vocab=vocab,
expected_shufle_maps=[
self.generate_unchanged_shuffle_map(example_len)
for example_len in x_len
],
expect_eos_at_end=True,
)
# Assert word shuffle with max shuffle distance 3 matches our expected
# shuffle order
self.assert_word_shuffle_matches_expected(
x=x,
x_len=x_len,
vocab=vocab,
max_shuffle_distance=3,
expected_shufle_maps=[
{0: 0, 1: 1, 2: 3, 3: 2},
{0: 0, 1: 2, 2: 1, 3: 3, 4: 4},
],
expect_eos_at_end=True,
)
def test_word_shuffle_without_eos(self):
"""Same result as word shuffle with eos except no EOS at end"""
vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=False)
# Assert word shuffle with max shuffle distance 0 causes input to be
# unchanged
self.assert_word_shuffle_matches_expected(
x=x,
x_len=x_len,
max_shuffle_distance=0,
vocab=vocab,
expected_shufle_maps=[
self.generate_unchanged_shuffle_map(example_len)
for example_len in x_len
],
expect_eos_at_end=False,
)
# Assert word shuffle with max shuffle distance 3 matches our expected
# shuffle order
self.assert_word_shuffle_matches_expected(
x=x,
x_len=x_len,
vocab=vocab,
max_shuffle_distance=3,
expected_shufle_maps=[
self.generate_unchanged_shuffle_map(x_len[0]),
{0: 0, 1: 3, 2: 1, 3: 2},
],
expect_eos_at_end=False,
)
def test_word_shuffle_without_eos_with_bpe_end_marker(self):
"""Same result as word shuffle without eos except using BPE end token"""
vocab, x, x_len = self._get_test_data_with_bpe_end_marker(append_eos=False)
# Assert word shuffle with max shuffle distance 0 causes input to be
# unchanged
self.assert_word_shuffle_matches_expected(
x=x,
x_len=x_len,
max_shuffle_distance=0,
vocab=vocab,
expected_shufle_maps=[
self.generate_unchanged_shuffle_map(example_len)
for example_len in x_len
],
expect_eos_at_end=False,
bpe_end_marker="_EOW",
)
# Assert word shuffle with max shuffle distance 3 matches our expected
# shuffle order
self.assert_word_shuffle_matches_expected(
x=x,
x_len=x_len,
vocab=vocab,
max_shuffle_distance=3,
expected_shufle_maps=[
self.generate_unchanged_shuffle_map(x_len[0]),
{0: 0, 1: 3, 2: 1, 3: 2},
],
expect_eos_at_end=False,
bpe_end_marker="_EOW",
)
def assert_no_eos_at_end(self, x, x_len, eos):
"""Asserts that the last token of each sentence in x is not EOS """
for i in range(len(x_len)):
self.assertNotEqual(
x[x_len[i] - 1][i],
eos,
"Expected no eos (token id {eos}) at the end of sentence {i}.".format(
eos=eos, i=i
),
)
def test_word_dropout_without_eos(self):
"""Same result as word dropout with eos except no EOS at end"""
vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=False)
with data_utils.numpy_seed(1234):
noising_gen = noising.WordDropout(vocab)
x_noised, l_noised = noising_gen.noising(x, x_len, 0.2)
self.assert_word_dropout_correct(
x=x, x_noised=x_noised, x_len=x_len, l_noised=l_noised
)
self.assert_no_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos())
def test_word_blank_without_eos(self):
"""Same result as word blank with eos except no EOS at end"""
vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=False)
with data_utils.numpy_seed(1234):
noising_gen = noising.WordDropout(vocab)
x_noised, l_noised = noising_gen.noising(x, x_len, 0.2, vocab.unk())
self.assert_word_blanking_correct(
x=x, x_noised=x_noised, x_len=x_len, l_noised=l_noised, unk=vocab.unk()
)
self.assert_no_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos())
def _get_noising_dataset_batch(
self, src_tokens_no_pad, src_dict, append_eos_to_tgt=False,
):
"""
Constructs a NoisingDataset and the corresponding
``LanguagePairDataset(NoisingDataset(src), src)``. If
*append_eos_to_tgt* is True, wrap the source dataset in
:class:`TransformEosDataset` to append EOS to the clean source when
using it as the target.
"""
src_dataset = test_utils.TestDataset(data=src_tokens_no_pad)
noising_dataset = noising.NoisingDataset(
src_dataset=src_dataset,
src_dict=src_dict,
seed=1234,
max_word_shuffle_distance=3,
word_dropout_prob=0.2,
word_blanking_prob=0.2,
noising_class=noising.UnsupervisedMTNoising,
)
tgt = src_dataset
language_pair_dataset = LanguagePairDataset(
src=noising_dataset, tgt=tgt, src_sizes=None, src_dict=src_dict
)
language_pair_dataset = TransformEosDataset(
language_pair_dataset, src_dict.eos(),
append_eos_to_tgt=append_eos_to_tgt,
)
dataloader = torch.utils.data.DataLoader(
dataset=language_pair_dataset,
batch_size=2,
collate_fn=language_pair_dataset.collater,
)
denoising_batch_result = next(iter(dataloader))
return denoising_batch_result
def test_noising_dataset_with_eos(self):
src_dict, src_tokens, _ = self._get_test_data_with_bpe_cont_marker(
append_eos=True
)
# Format data for src_dataset
src_tokens = torch.t(src_tokens)
src_tokens_no_pad = []
for src_sentence in src_tokens:
src_tokens_no_pad.append(
utils.strip_pad(tensor=src_sentence, pad=src_dict.pad())
)
denoising_batch_result = self._get_noising_dataset_batch(
src_tokens_no_pad=src_tokens_no_pad, src_dict=src_dict
)
eos, pad = src_dict.eos(), src_dict.pad()
# Generated noisy source as source
expected_src = torch.LongTensor(
[[4, 5, 10, 11, 8, 12, 13, eos], [pad, pad, pad, 6, 8, 9, 7, eos]]
)
# Original clean source as target (right-padded)
expected_tgt = torch.LongTensor(
[[4, 5, 10, 11, 8, 12, 13, eos], [6, 7, 8, 9, eos, pad, pad, pad]]
)
generated_src = denoising_batch_result["net_input"]["src_tokens"]
tgt_tokens = denoising_batch_result["target"]
self.assertTensorEqual(expected_src, generated_src)
self.assertTensorEqual(expected_tgt, tgt_tokens)
def test_noising_dataset_without_eos(self):
"""
Similar to test noising dataset with eos except that we have to set
*append_eos_to_tgt* to ``True``.
"""
src_dict, src_tokens, _ = self._get_test_data_with_bpe_cont_marker(
append_eos=False
)
# Format data for src_dataset
src_tokens = torch.t(src_tokens)
src_tokens_no_pad = []
for src_sentence in src_tokens:
src_tokens_no_pad.append(
utils.strip_pad(tensor=src_sentence, pad=src_dict.pad())
)
denoising_batch_result = self._get_noising_dataset_batch(
src_tokens_no_pad=src_tokens_no_pad,
src_dict=src_dict,
append_eos_to_tgt=True,
)
eos, pad = src_dict.eos(), src_dict.pad()
# Generated noisy source as source
expected_src = torch.LongTensor(
[[4, 5, 10, 11, 8, 12, 13], [pad, pad, pad, 6, 8, 9, 7]]
)
# Original clean source as target (right-padded)
expected_tgt = torch.LongTensor(
[[4, 5, 10, 11, 8, 12, 13, eos], [6, 7, 8, 9, eos, pad, pad, pad]]
)
generated_src = denoising_batch_result["net_input"]["src_tokens"]
tgt_tokens = denoising_batch_result["target"]
self.assertTensorEqual(expected_src, generated_src)
self.assertTensorEqual(expected_tgt, tgt_tokens)
def assertTensorEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertEqual(t1.ne(t2).long().sum(), 0)
if __name__ == "__main__":
unittest.main()
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/tests/test_noising.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import unittest
from fairseq.modules.sparse_multihead_attention import SparseMultiheadAttention
class TestSparseMultiheadAttention(unittest.TestCase):
def test_sparse_multihead_attention(self):
attn_weights = torch.randn(1, 8, 8)
bidirectional_sparse_mask = torch.tensor([
[0, 0, 0, 0, 0, float('-inf'), float('-inf'), 0],
[0, 0, 0, 0, 0, float('-inf'), float('-inf'), 0],
[0, 0, 0, 0, 0, float('-inf'), float('-inf'), 0],
[0, 0, 0, 0, 0, float('-inf'), float('-inf'), 0],
[float('-inf'), float('-inf'), float('-inf'), 0, 0, 0, 0, 0],
[float('-inf'), float('-inf'), float('-inf'), 0, 0, 0, 0, 0],
[float('-inf'), float('-inf'), float('-inf'), 0, 0, 0, 0, 0],
[float('-inf'), float('-inf'), float('-inf'), 0, 0, 0, 0, 0]
])
bidirectional_attention = SparseMultiheadAttention(16, 1, stride=4, expressivity=1, is_bidirectional=True)
bidirectional_attention_sparse_mask = bidirectional_attention.buffered_sparse_mask(attn_weights, 8, 8)
torch.all(torch.eq(bidirectional_attention_sparse_mask, bidirectional_sparse_mask))
sparse_mask = torch.tensor([
[0, float('-inf'), float('-inf'), float('-inf'), float('-inf'), float('-inf'),
float('-inf'), float('-inf')],
[0, 0, float('-inf'), float('-inf'), float('-inf'), float('-inf'), float('-inf'), float('-inf')],
[0, 0, 0, float('-inf'), float('-inf'), float('-inf'), float('-inf'), float('-inf')],
[0, 0, 0, 0, float('-inf'), float('-inf'), float('-inf'), float('-inf')],
[0, 0, 0, 0, 0, float('-inf'), float('-inf'), float('-inf')],
[float('-inf'), float('-inf'), float('-inf'), 0, 0, 0, float('-inf'), float('-inf')],
[float('-inf'), float('-inf'), float('-inf'), 0, 0, 0, 0, float('-inf')],
[float('-inf'), float('-inf'), float('-inf'), 0, 0, 0, 0, 0],
])
attention = SparseMultiheadAttention(16, 1, stride=4, expressivity=1, is_bidirectional=False)
attention_sparse_mask = attention.buffered_sparse_mask(attn_weights, 8, 8)
torch.all(torch.eq(attention_sparse_mask, sparse_mask))
if __name__ == '__main__':
unittest.main()
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/tests/test_sparse_multihead_attention.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
from io import StringIO
import unittest
from unittest.mock import MagicMock, patch
import torch
from fairseq import data, checkpoint_utils
def mock_trainer(epoch, num_updates, iterations_in_epoch):
trainer = MagicMock()
trainer.load_checkpoint.return_value = {
'train_iterator': {
'epoch': epoch,
'iterations_in_epoch': iterations_in_epoch,
'shuffle': False,
},
}
trainer.get_num_updates.return_value = num_updates
return trainer
def mock_dict():
d = MagicMock()
d.pad.return_value = 1
d.eos.return_value = 2
d.unk.return_value = 3
return d
def get_trainer_and_epoch_itr(epoch, epoch_size, num_updates, iterations_in_epoch):
tokens = torch.LongTensor(list(range(epoch_size))).view(1, -1)
tokens_ds = data.TokenBlockDataset(
tokens, sizes=[tokens.size(-1)], block_size=1, pad=0, eos=1, include_targets=False,
)
trainer = mock_trainer(epoch, num_updates, iterations_in_epoch)
dataset = data.LanguagePairDataset(tokens_ds, tokens_ds.sizes, mock_dict(), shuffle=False)
epoch_itr = data.EpochBatchIterator(
dataset=dataset,
collate_fn=dataset.collater,
batch_sampler=[[i] for i in range(epoch_size)],
)
return trainer, epoch_itr
class TestLoadCheckpoint(unittest.TestCase):
def setUp(self):
self.args_mock = MagicMock()
self.args_mock.optimizer_overrides = '{}'
self.args_mock.reset_dataloader = False
self.args_mock.reset_meters = False
self.args_mock.reset_optimizer = False
self.patches = {
'os.makedirs': MagicMock(),
'os.path.join': MagicMock(),
'os.path.isfile': MagicMock(return_value=True),
'os.path.isabs': MagicMock(return_value=False),
}
self.applied_patches = [patch(p, d) for p, d in self.patches.items()]
[p.start() for p in self.applied_patches]
def test_load_partial_checkpoint(self):
with contextlib.redirect_stdout(StringIO()):
trainer, epoch_itr = get_trainer_and_epoch_itr(2, 150, 200, 50)
trainer.get_train_iterator = MagicMock(return_value=epoch_itr)
_, epoch_itr = checkpoint_utils.load_checkpoint(self.args_mock, trainer)
self.assertEqual(epoch_itr.epoch, 2)
self.assertEqual(epoch_itr.iterations_in_epoch, 50)
itr = epoch_itr.next_epoch_itr(shuffle=False)
self.assertEqual(epoch_itr.epoch, 2)
self.assertEqual(epoch_itr.iterations_in_epoch, 50)
self.assertEqual(next(itr)['net_input']['src_tokens'][0].item(), 50)
self.assertEqual(epoch_itr.iterations_in_epoch, 51)
for _ in range(150 - 52):
next(itr)
self.assertEqual(epoch_itr.iterations_in_epoch, 149)
self.assertTrue(itr.has_next())
next(itr)
self.assertFalse(itr.has_next())
itr = epoch_itr.next_epoch_itr(shuffle=False)
self.assertTrue(itr.has_next())
self.assertEqual(epoch_itr.epoch, 3)
self.assertEqual(epoch_itr.iterations_in_epoch, 0)
def test_load_full_checkpoint(self):
with contextlib.redirect_stdout(StringIO()):
trainer, epoch_itr = get_trainer_and_epoch_itr(2, 150, 300, 150)
trainer.get_train_iterator = MagicMock(return_value=epoch_itr)
_, epoch_itr = checkpoint_utils.load_checkpoint(self.args_mock, trainer)
itr = epoch_itr.next_epoch_itr(shuffle=False)
self.assertEqual(epoch_itr.epoch, 3)
self.assertEqual(epoch_itr.iterations_in_epoch, 0)
self.assertEqual(next(itr)['net_input']['src_tokens'][0].item(), 0)
def test_load_no_checkpoint(self):
with contextlib.redirect_stdout(StringIO()):
trainer, epoch_itr = get_trainer_and_epoch_itr(0, 150, 0, 0)
trainer.get_train_iterator = MagicMock(return_value=epoch_itr)
self.patches['os.path.isfile'].return_value = False
_, epoch_itr = checkpoint_utils.load_checkpoint(self.args_mock, trainer)
itr = epoch_itr.next_epoch_itr(shuffle=False)
self.assertEqual(epoch_itr.epoch, 1)
self.assertEqual(epoch_itr.iterations_in_epoch, 0)
self.assertEqual(next(itr)['net_input']['src_tokens'][0].item(), 0)
def tearDown(self):
patch.stopall()
if __name__ == '__main__':
unittest.main()
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/tests/test_train.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import unittest
import torch
from fairseq.sequence_scorer import SequenceScorer
import tests.utils as test_utils
class TestSequenceScorer(unittest.TestCase):
def test_sequence_scorer(self):
# construct dummy dictionary
d = test_utils.dummy_dictionary(vocab_size=2)
self.assertEqual(d.pad(), 1)
self.assertEqual(d.eos(), 2)
self.assertEqual(d.unk(), 3)
eos = d.eos()
w1 = 4
w2 = 5
# construct dataloader
data = [
{
'source': torch.LongTensor([w1, w2, eos]),
'target': torch.LongTensor([w1, w2, w1, eos]),
},
{
'source': torch.LongTensor([w2, eos]),
'target': torch.LongTensor([w2, w1, eos]),
},
{
'source': torch.LongTensor([w2, eos]),
'target': torch.LongTensor([w2, eos]),
},
]
data_itr = test_utils.dummy_dataloader(data)
# specify expected output probabilities
args = argparse.Namespace()
unk = 0.
args.beam_probs = [
# step 0:
torch.FloatTensor([
# eos w1 w2
[0.0, unk, 0.6, 0.4], # sentence 1
[0.0, unk, 0.4, 0.6], # sentence 2
[0.0, unk, 0.7, 0.3], # sentence 3
]),
# step 1:
torch.FloatTensor([
# eos w1 w2
[0.0, unk, 0.2, 0.7], # sentence 1
[0.0, unk, 0.8, 0.2], # sentence 2
[0.7, unk, 0.1, 0.2], # sentence 3
]),
# step 2:
torch.FloatTensor([
# eos w1 w2
[0.10, unk, 0.50, 0.4], # sentence 1
[0.15, unk, 0.15, 0.7], # sentence 2
[0.00, unk, 0.00, 0.0], # sentence 3
]),
# step 3:
torch.FloatTensor([
# eos w1 w2
[0.9, unk, 0.05, 0.05], # sentence 1
[0.0, unk, 0.00, 0.0], # sentence 2
[0.0, unk, 0.00, 0.0], # sentence 3
]),
]
expected_scores = [
[0.6, 0.7, 0.5, 0.9], # sentence 1
[0.6, 0.8, 0.15], # sentence 2
[0.3, 0.7], # sentence 3
]
task = test_utils.TestTranslationTask.setup_task(args, d, d)
model = task.build_model(args)
scorer = SequenceScorer(task.target_dictionary)
for sample in data_itr:
hypos = task.inference_step(scorer, [model], sample)
for id, hypos_id in zip(sample['id'].tolist(), hypos):
self.assertHypoTokens(hypos_id[0], data[id]['target'])
self.assertHypoScore(hypos_id[0], expected_scores[id])
def assertHypoTokens(self, hypo, tokens):
self.assertTensorEqual(hypo['tokens'], torch.LongTensor(tokens))
def assertHypoScore(self, hypo, pos_probs, normalized=True, lenpen=1.):
pos_scores = torch.FloatTensor(pos_probs).log()
self.assertAlmostEqual(hypo['positional_scores'], pos_scores)
self.assertEqual(pos_scores.numel(), hypo['tokens'].numel())
score = pos_scores.sum()
if normalized:
score /= pos_scores.numel()**lenpen
self.assertLess(abs(score - hypo['score']), 1e-6)
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertLess((t1 - t2).abs().max(), 1e-4)
def assertTensorEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertEqual(t1.ne(t2).long().sum(), 0)
if __name__ == '__main__':
unittest.main()
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/tests/test_sequence_scorer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import unittest
from fairseq.modules.multihead_attention import MultiheadAttention
class TestMultiheadAttention(unittest.TestCase):
def test_append_prev_key_padding_mask(self):
bsz = 1
src_len = 4
cases = [
# no padding mask
(None, None, None),
# current padding mask only
(
torch.tensor([[1]]).bool(),
None,
torch.tensor([[0, 0, 0, 1]]).bool(),
),
# previous padding mask only
(
None,
torch.tensor([[0, 1, 0]]).bool(),
torch.tensor([[0, 1, 0, 0]]).bool(),
),
# both padding masks
(
torch.tensor([[1]]).bool(),
torch.tensor([[0, 1, 0]]).bool(),
torch.tensor([[0, 1, 0, 1]]).bool(),
),
]
for c in cases:
key_padding_mask = MultiheadAttention._append_prev_key_padding_mask(
c[0],
c[1],
batch_size=bsz,
src_len=src_len,
static_kv=False,
)
if key_padding_mask is not None:
self.assertTrue(
torch.all(torch.eq(key_padding_mask, c[2])),
f'Unexpected resultant key padding mask: {key_padding_mask}'
f' given current: {c[0]} and previous: {c[1]}',
)
self.assertEqual(key_padding_mask.size(0), bsz)
self.assertEqual(key_padding_mask.size(1), src_len)
else:
self.assertIsNone(c[2])
if __name__ == '__main__':
unittest.main()
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/tests/test_multihead_attention.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import collections
import unittest
import numpy as np
from fairseq.data import ListDataset, ResamplingDataset
class TestResamplingDataset(unittest.TestCase):
def setUp(self):
self.strings = ["ab", "c", "def", "ghij"]
self.weights = [4.0, 2.0, 7.0, 1.5]
self.size_ratio = 2
self.dataset = ListDataset(
self.strings, np.array([len(s) for s in self.strings])
)
def _test_common(self, resampling_dataset, iters):
assert len(self.dataset) == len(self.strings) == len(self.weights)
assert len(resampling_dataset) == self.size_ratio * len(self.strings)
results = {"ordered_by_size": True, "max_distribution_diff": 0.0}
totalfreqs = 0
freqs = collections.defaultdict(int)
for epoch_num in range(iters):
resampling_dataset.set_epoch(epoch_num)
indices = resampling_dataset.ordered_indices()
assert len(indices) == len(resampling_dataset)
prev_size = -1
for i in indices:
cur_size = resampling_dataset.size(i)
# Make sure indices map to same sequences within an epoch
assert resampling_dataset[i] == resampling_dataset[i]
# Make sure length of sequence is correct
assert cur_size == len(resampling_dataset[i])
freqs[resampling_dataset[i]] += 1
totalfreqs += 1
if prev_size > cur_size:
results["ordered_by_size"] = False
prev_size = cur_size
assert set(freqs.keys()) == set(self.strings)
for s, weight in zip(self.strings, self.weights):
freq = freqs[s] / totalfreqs
expected_freq = weight / sum(self.weights)
results["max_distribution_diff"] = max(
results["max_distribution_diff"], abs(expected_freq - freq)
)
return results
def test_resampling_dataset_batch_by_size_false(self):
resampling_dataset = ResamplingDataset(
self.dataset,
self.weights,
size_ratio=self.size_ratio,
batch_by_size=False,
seed=0,
)
results = self._test_common(resampling_dataset, iters=1000)
# For batch_by_size = False, the batches should be returned in
# arbitrary order of size.
assert not results["ordered_by_size"]
# Allow tolerance in distribution error of 2%.
assert results["max_distribution_diff"] < 0.02
def test_resampling_dataset_batch_by_size_true(self):
resampling_dataset = ResamplingDataset(
self.dataset,
self.weights,
size_ratio=self.size_ratio,
batch_by_size=True,
seed=0,
)
results = self._test_common(resampling_dataset, iters=1000)
# For batch_by_size = True, the batches should be returned in
# increasing order of size.
assert results["ordered_by_size"]
# Allow tolerance in distribution error of 2%.
assert results["max_distribution_diff"] < 0.02
if __name__ == "__main__":
unittest.main()
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/tests/test_resampling_dataset.py |
EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/tests/__init__.py |
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from fairseq.data import (
BacktranslationDataset,
LanguagePairDataset,
TransformEosDataset,
)
from fairseq.sequence_generator import SequenceGenerator
import tests.utils as test_utils
class TestBacktranslationDataset(unittest.TestCase):
def setUp(self):
self.tgt_dict, self.w1, self.w2, self.src_tokens, self.src_lengths, self.model = (
test_utils.sequence_generator_setup()
)
dummy_src_samples = self.src_tokens
self.tgt_dataset = test_utils.TestDataset(data=dummy_src_samples)
self.cuda = torch.cuda.is_available()
def _backtranslation_dataset_helper(
self, remove_eos_from_input_src, remove_eos_from_output_src,
):
tgt_dataset = LanguagePairDataset(
src=self.tgt_dataset,
src_sizes=self.tgt_dataset.sizes,
src_dict=self.tgt_dict,
tgt=None,
tgt_sizes=None,
tgt_dict=None,
)
generator = SequenceGenerator(
tgt_dict=self.tgt_dict,
max_len_a=0,
max_len_b=200,
beam_size=2,
unk_penalty=0,
sampling=False,
)
backtranslation_dataset = BacktranslationDataset(
tgt_dataset=TransformEosDataset(
dataset=tgt_dataset,
eos=self.tgt_dict.eos(),
# remove eos from the input src
remove_eos_from_src=remove_eos_from_input_src,
),
src_dict=self.tgt_dict,
backtranslation_fn=(
lambda sample: generator.generate([self.model], sample)
),
output_collater=TransformEosDataset(
dataset=tgt_dataset,
eos=self.tgt_dict.eos(),
# if we remove eos from the input src, then we need to add it
# back to the output tgt
append_eos_to_tgt=remove_eos_from_input_src,
remove_eos_from_src=remove_eos_from_output_src,
).collater,
cuda=self.cuda,
)
dataloader = torch.utils.data.DataLoader(
backtranslation_dataset,
batch_size=2,
collate_fn=backtranslation_dataset.collater,
)
backtranslation_batch_result = next(iter(dataloader))
eos, pad, w1, w2 = self.tgt_dict.eos(), self.tgt_dict.pad(), self.w1, self.w2
# Note that we sort by src_lengths and add left padding, so actually
# ids will look like: [1, 0]
expected_src = torch.LongTensor([[w1, w2, w1, eos], [pad, pad, w1, eos]])
if remove_eos_from_output_src:
expected_src = expected_src[:, :-1]
expected_tgt = torch.LongTensor([[w1, w2, eos], [w1, w2, eos]])
generated_src = backtranslation_batch_result["net_input"]["src_tokens"]
tgt_tokens = backtranslation_batch_result["target"]
self.assertTensorEqual(expected_src, generated_src)
self.assertTensorEqual(expected_tgt, tgt_tokens)
def test_backtranslation_dataset_no_eos_in_output_src(self):
self._backtranslation_dataset_helper(
remove_eos_from_input_src=False, remove_eos_from_output_src=True,
)
def test_backtranslation_dataset_with_eos_in_output_src(self):
self._backtranslation_dataset_helper(
remove_eos_from_input_src=False, remove_eos_from_output_src=False,
)
def test_backtranslation_dataset_no_eos_in_input_src(self):
self._backtranslation_dataset_helper(
remove_eos_from_input_src=True, remove_eos_from_output_src=False,
)
def assertTensorEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertEqual(t1.ne(t2).long().sum(), 0)
if __name__ == "__main__":
unittest.main()
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/tests/test_backtranslation_dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
from io import StringIO
import os
import random
import sys
import tempfile
import unittest
import torch
from fairseq import options
import preprocess
import train
import generate
import interactive
import eval_lm
import validate
class TestTranslation(unittest.TestCase):
def test_fconv(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_fconv') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'fconv_iwslt_de_en')
generate_main(data_dir)
def test_raw(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_fconv_raw') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ['--dataset-impl', 'raw'])
train_translation_model(data_dir, 'fconv_iwslt_de_en', ['--dataset-impl', 'raw'])
generate_main(data_dir, ['--dataset-impl', 'raw'])
def test_fp16(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_fp16') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'fconv_iwslt_de_en', ['--fp16'])
generate_main(data_dir)
def test_memory_efficient_fp16(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_memory_efficient_fp16') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'fconv_iwslt_de_en', ['--memory-efficient-fp16'])
generate_main(data_dir)
def test_update_freq(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_update_freq') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'fconv_iwslt_de_en', ['--update-freq', '3'])
generate_main(data_dir)
def test_max_positions(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_max_positions') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
with self.assertRaises(Exception) as context:
train_translation_model(
data_dir, 'fconv_iwslt_de_en', ['--max-target-positions', '5'],
)
self.assertTrue(
'skip this example with --skip-invalid-size-inputs-valid-test' in str(context.exception)
)
train_translation_model(
data_dir, 'fconv_iwslt_de_en',
['--max-target-positions', '5', '--skip-invalid-size-inputs-valid-test'],
)
with self.assertRaises(Exception) as context:
generate_main(data_dir)
generate_main(data_dir, ['--skip-invalid-size-inputs-valid-test'])
def test_generation(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_sampling') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'fconv_iwslt_de_en')
generate_main(data_dir, [
'--sampling',
'--temperature', '2',
'--beam', '2',
'--nbest', '2',
])
generate_main(data_dir, [
'--sampling',
'--sampling-topk', '3',
'--beam', '2',
'--nbest', '2',
])
generate_main(data_dir, [
'--sampling',
'--sampling-topp', '0.2',
'--beam', '2',
'--nbest', '2',
])
generate_main(data_dir, ['--prefix-size', '2'])
def test_lstm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_lstm') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'lstm_wiseman_iwslt_de_en', [
'--encoder-layers', '2',
'--decoder-layers', '2',
'--encoder-embed-dim', '8',
'--decoder-embed-dim', '8',
'--decoder-out-embed-dim', '8',
])
generate_main(data_dir)
def test_lstm_bidirectional(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_lstm_bidirectional') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'lstm', [
'--encoder-layers', '2',
'--encoder-bidirectional',
'--encoder-hidden-size', '16',
'--encoder-embed-dim', '8',
'--decoder-embed-dim', '8',
'--decoder-out-embed-dim', '8',
'--decoder-layers', '2',
])
generate_main(data_dir)
def test_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_transformer') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'transformer_iwslt_de_en', [
'--encoder-layers', '2',
'--decoder-layers', '2',
'--encoder-embed-dim', '8',
'--decoder-embed-dim', '8',
], run_validation=True)
generate_main(data_dir)
def test_transformer_cross_self_attention(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_transformer_cross_self_attention') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'transformer_iwslt_de_en', [
'--encoder-layers', '2',
'--decoder-layers', '2',
'--encoder-embed-dim', '8',
'--decoder-embed-dim', '8',
'--decoder-embed-dim', '8',
'--no-cross-attention',
'--cross-self-attention',
'--layer-wise-attention',
], run_validation=True)
generate_main(data_dir, extra_flags=[])
def test_lightconv(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_lightconv') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'lightconv_iwslt_de_en', [
'--encoder-conv-type', 'lightweight',
'--decoder-conv-type', 'lightweight',
'--encoder-embed-dim', '8',
'--decoder-embed-dim', '8',
])
generate_main(data_dir)
def test_dynamicconv(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_dynamicconv') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'lightconv_iwslt_de_en', [
'--encoder-conv-type', 'dynamic',
'--decoder-conv-type', 'dynamic',
'--encoder-embed-dim', '8',
'--decoder-embed-dim', '8',
])
generate_main(data_dir)
def test_cmlm_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_cmlm_transformer') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ['--joined-dictionary'])
train_translation_model(data_dir, 'cmlm_transformer', [
'--apply-bert-init',
'--criterion', 'nat_loss',
'--noise', 'full_mask',
'--pred-length-offset',
'--length-loss-factor', '0.1'
], task='translation_lev')
generate_main(data_dir, [
'--task', 'translation_lev',
'--iter-decode-max-iter', '9',
'--iter-decode-eos-penalty', '0',
'--print-step',
])
def test_levenshtein_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_levenshtein_transformer') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ['--joined-dictionary'])
train_translation_model(data_dir, 'levenshtein_transformer', [
'--apply-bert-init', '--early-exit', '6,6,6',
'--criterion', 'nat_loss'
], task='translation_lev')
generate_main(data_dir, [
'--task', 'translation_lev',
'--iter-decode-max-iter', '9',
'--iter-decode-eos-penalty', '0',
'--print-step',
])
def test_nonautoregressive_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_nonautoregressive_transformer') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ['--joined-dictionary'])
train_translation_model(data_dir, 'nonautoregressive_transformer', [
'--apply-bert-init', '--src-embedding-copy', '--criterion',
'nat_loss', '--noise', 'full_mask', '--pred-length-offset',
'--length-loss-factor', '0.1'
], task='translation_lev')
generate_main(data_dir, [
'--task', 'translation_lev',
'--iter-decode-max-iter', '9',
'--iter-decode-eos-penalty', '0',
'--print-step',
])
def test_iterative_nonautoregressive_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_iterative_nonautoregressive_transformer') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ['--joined-dictionary'])
train_translation_model(data_dir, 'iterative_nonautoregressive_transformer', [
'--apply-bert-init', '--src-embedding-copy', '--criterion',
'nat_loss', '--noise', 'full_mask', '--stochastic-approx',
'--dae-ratio', '0.5', '--train-step', '3'
], task='translation_lev')
generate_main(data_dir, [
'--task', 'translation_lev',
'--iter-decode-max-iter', '9',
'--iter-decode-eos-penalty', '0',
'--print-step',
])
def test_insertion_transformer(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_insertion_transformer') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir, ['--joined-dictionary'])
train_translation_model(data_dir, 'insertion_transformer', [
'--apply-bert-init', '--criterion', 'nat_loss', '--noise',
'random_mask'
], task='translation_lev')
generate_main(data_dir, [
'--task', 'translation_lev',
'--iter-decode-max-iter', '9',
'--iter-decode-eos-penalty', '0',
'--print-step',
])
def test_mixture_of_experts(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_moe') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
train_translation_model(data_dir, 'transformer_iwslt_de_en', [
'--task', 'translation_moe',
'--method', 'hMoElp',
'--mean-pool-gating-network',
'--num-experts', '3',
'--encoder-layers', '2',
'--decoder-layers', '2',
'--encoder-embed-dim', '8',
'--decoder-embed-dim', '8',
])
generate_main(data_dir, [
'--task', 'translation_moe',
'--method', 'hMoElp',
'--mean-pool-gating-network',
'--num-experts', '3',
'--gen-expert', '0'
])
def test_alignment(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_alignment') as data_dir:
create_dummy_data(data_dir, alignment=True)
preprocess_translation_data(data_dir, ['--align-suffix', 'align'])
train_translation_model(
data_dir,
'transformer_align',
[
'--encoder-layers', '2',
'--decoder-layers', '2',
'--encoder-embed-dim', '8',
'--decoder-embed-dim', '8',
'--load-alignments',
'--alignment-layer', '1',
'--criterion', 'label_smoothed_cross_entropy_with_alignment'
],
run_validation=True,
)
generate_main(data_dir)
class TestStories(unittest.TestCase):
def test_fconv_self_att_wp(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_fconv_self_att_wp') as data_dir:
create_dummy_data(data_dir)
preprocess_translation_data(data_dir)
config = [
'--encoder-layers', '[(128, 3)] * 2',
'--decoder-layers', '[(128, 3)] * 2',
'--decoder-attention', 'True',
'--encoder-attention', 'False',
'--gated-attention', 'True',
'--self-attention', 'True',
'--project-input', 'True',
'--encoder-embed-dim', '8',
'--decoder-embed-dim', '8',
'--decoder-out-embed-dim', '8',
'--multihead-self-attention-nheads', '2'
]
train_translation_model(data_dir, 'fconv_self_att_wp', config)
generate_main(data_dir)
# fusion model
os.rename(os.path.join(data_dir, 'checkpoint_last.pt'), os.path.join(data_dir, 'pretrained.pt'))
config.extend([
'--pretrained', 'True',
'--pretrained-checkpoint', os.path.join(data_dir, 'pretrained.pt'),
'--save-dir', os.path.join(data_dir, 'fusion_model'),
])
train_translation_model(data_dir, 'fconv_self_att_wp', config)
class TestLanguageModeling(unittest.TestCase):
def test_fconv_lm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_fconv_lm') as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_language_model(data_dir, 'fconv_lm', [
'--decoder-layers', '[(850, 3)] * 2 + [(1024,4)]',
'--decoder-embed-dim', '280',
'--optimizer', 'nag',
'--lr', '0.1',
])
eval_lm_main(data_dir)
def test_transformer_lm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_transformer_lm') as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_language_model(
data_dir, 'transformer_lm', ['--add-bos-token'], run_validation=True,
)
eval_lm_main(data_dir)
generate_main(data_dir, [
'--task', 'language_modeling',
'--sample-break-mode', 'eos',
'--tokens-per-sample', '500',
])
def test_lightconv_lm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_lightconv_lm') as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_language_model(
data_dir, 'lightconv_lm', ['--add-bos-token'], run_validation=True,
)
eval_lm_main(data_dir)
generate_main(data_dir, [
'--task', 'language_modeling',
'--sample-break-mode', 'eos',
'--tokens-per-sample', '500',
])
class TestMaskedLanguageModel(unittest.TestCase):
def test_legacy_masked_lm(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_legacy_mlm") as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_legacy_masked_language_model(data_dir, "masked_lm")
def _test_pretrained_masked_lm_for_translation(self, learned_pos_emb, encoder_only):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory("test_mlm") as data_dir:
create_dummy_data(data_dir)
preprocess_lm_data(data_dir)
train_legacy_masked_language_model(
data_dir,
arch="masked_lm",
extra_args=('--encoder-learned-pos',) if learned_pos_emb else ()
)
with tempfile.TemporaryDirectory(
"test_mlm_translation"
) as translation_dir:
create_dummy_data(translation_dir)
preprocess_translation_data(
translation_dir, extra_flags=["--joined-dictionary"]
)
# Train transformer with data_dir/checkpoint_last.pt
train_translation_model(
translation_dir,
arch="transformer_from_pretrained_xlm",
extra_flags=[
"--decoder-layers",
"1",
"--decoder-embed-dim",
"32",
"--decoder-attention-heads",
"1",
"--decoder-ffn-embed-dim",
"32",
"--encoder-layers",
"1",
"--encoder-embed-dim",
"32",
"--encoder-attention-heads",
"1",
"--encoder-ffn-embed-dim",
"32",
"--pretrained-xlm-checkpoint",
"{}/checkpoint_last.pt".format(data_dir),
"--activation-fn",
"gelu",
"--max-source-positions",
"500",
"--max-target-positions",
"500",
] + (
["--encoder-learned-pos", "--decoder-learned-pos"]
if learned_pos_emb else []
) + (['--init-encoder-only'] if encoder_only else []),
task="translation_from_pretrained_xlm",
)
def test_pretrained_masked_lm_for_translation_learned_pos_emb(self):
self._test_pretrained_masked_lm_for_translation(True, False)
def test_pretrained_masked_lm_for_translation_sinusoidal_pos_emb(self):
self._test_pretrained_masked_lm_for_translation(False, False)
def test_pretrained_masked_lm_for_translation_encoder_only(self):
self._test_pretrained_masked_lm_for_translation(True, True)
def train_legacy_masked_language_model(data_dir, arch, extra_args=()):
train_parser = options.get_training_parser()
# TODO: langs should be in and out right?
train_args = options.parse_args_and_arch(
train_parser,
[
"--task",
"cross_lingual_lm",
data_dir,
"--arch",
arch,
# Optimizer args
"--optimizer",
"adam",
"--lr-scheduler",
"reduce_lr_on_plateau",
"--lr-shrink",
"0.5",
"--lr",
"0.0001",
"--min-lr",
"1e-09",
# dropout, attention args
"--dropout",
"0.1",
"--attention-dropout",
"0.1",
# MLM args
"--criterion",
"legacy_masked_lm_loss",
"--masked-lm-only",
"--monolingual-langs",
"in,out",
"--num-segment",
"5",
# Transformer args: use a small transformer model for fast training
"--encoder-layers",
"1",
"--encoder-embed-dim",
"32",
"--encoder-attention-heads",
"1",
"--encoder-ffn-embed-dim",
"32",
# Other training args
"--max-tokens",
"500",
"--tokens-per-sample",
"500",
"--save-dir",
data_dir,
"--max-epoch",
"1",
"--no-progress-bar",
"--distributed-world-size",
"1",
"--dataset-impl",
"raw",
] + list(extra_args),
)
train.main(train_args)
class TestCommonOptions(unittest.TestCase):
def test_optimizers(self):
with contextlib.redirect_stdout(StringIO()):
with tempfile.TemporaryDirectory('test_optimizers') as data_dir:
# Use just a bit of data and tiny model to keep this test runtime reasonable
create_dummy_data(data_dir, num_examples=10, maxlen=5)
preprocess_translation_data(data_dir)
optimizers = ['adafactor', 'adam', 'nag', 'adagrad', 'sgd', 'adadelta']
last_checkpoint = os.path.join(data_dir, 'checkpoint_last.pt')
for optimizer in optimizers:
if os.path.exists(last_checkpoint):
os.remove(last_checkpoint)
train_translation_model(data_dir, 'lstm', [
'--required-batch-size-multiple', '1',
'--encoder-layers', '1',
'--encoder-hidden-size', '32',
'--decoder-layers', '1',
'--optimizer', optimizer,
])
generate_main(data_dir)
def create_dummy_data(data_dir, num_examples=1000, maxlen=20, alignment=False):
def _create_dummy_data(filename):
data = torch.rand(num_examples * maxlen)
data = 97 + torch.floor(26 * data).int()
with open(os.path.join(data_dir, filename), 'w') as h:
offset = 0
for _ in range(num_examples):
ex_len = random.randint(1, maxlen)
ex_str = ' '.join(map(chr, data[offset:offset+ex_len]))
print(ex_str, file=h)
offset += ex_len
def _create_dummy_alignment_data(filename_src, filename_tgt, filename):
with open(os.path.join(data_dir, filename_src), 'r') as src_f, \
open(os.path.join(data_dir, filename_tgt), 'r') as tgt_f, \
open(os.path.join(data_dir, filename), 'w') as h:
for src, tgt in zip(src_f, tgt_f):
src_len = len(src.split())
tgt_len = len(tgt.split())
avg_len = (src_len + tgt_len) // 2
num_alignments = random.randint(avg_len // 2, 2 * avg_len)
src_indices = torch.floor(torch.rand(num_alignments) * src_len).int()
tgt_indices = torch.floor(torch.rand(num_alignments) * tgt_len).int()
ex_str = ' '.join(["{}-{}".format(src, tgt) for src, tgt in zip(src_indices, tgt_indices)])
print(ex_str, file=h)
_create_dummy_data('train.in')
_create_dummy_data('train.out')
_create_dummy_data('valid.in')
_create_dummy_data('valid.out')
_create_dummy_data('test.in')
_create_dummy_data('test.out')
if alignment:
_create_dummy_alignment_data('train.in', 'train.out', 'train.align')
_create_dummy_alignment_data('valid.in', 'valid.out', 'valid.align')
_create_dummy_alignment_data('test.in', 'test.out', 'test.align')
def preprocess_translation_data(data_dir, extra_flags=None):
preprocess_parser = options.get_preprocessing_parser()
preprocess_args = preprocess_parser.parse_args(
[
'--source-lang', 'in',
'--target-lang', 'out',
'--trainpref', os.path.join(data_dir, 'train'),
'--validpref', os.path.join(data_dir, 'valid'),
'--testpref', os.path.join(data_dir, 'test'),
'--thresholdtgt', '0',
'--thresholdsrc', '0',
'--destdir', data_dir,
] + (extra_flags or []),
)
preprocess.main(preprocess_args)
def train_translation_model(data_dir, arch, extra_flags=None, task='translation', run_validation=False):
train_parser = options.get_training_parser()
train_args = options.parse_args_and_arch(
train_parser,
[
'--task', task,
data_dir,
'--save-dir', data_dir,
'--arch', arch,
'--lr', '0.05',
'--max-tokens', '500',
'--max-epoch', '1',
'--no-progress-bar',
'--distributed-world-size', '1',
'--source-lang', 'in',
'--target-lang', 'out',
] + (extra_flags or []),
)
train.main(train_args)
if run_validation:
# test validation
validate_parser = options.get_validation_parser()
validate_args = options.parse_args_and_arch(
validate_parser,
[
'--task', task,
data_dir,
'--path', os.path.join(data_dir, 'checkpoint_last.pt'),
'--valid-subset', 'valid',
'--max-tokens', '500',
'--no-progress-bar',
]
)
validate.main(validate_args)
def generate_main(data_dir, extra_flags=None):
if extra_flags is None:
extra_flags = [
'--print-alignment',
]
generate_parser = options.get_generation_parser()
generate_args = options.parse_args_and_arch(
generate_parser,
[
data_dir,
'--path', os.path.join(data_dir, 'checkpoint_last.pt'),
'--beam', '3',
'--batch-size', '64',
'--max-len-b', '5',
'--gen-subset', 'valid',
'--no-progress-bar',
] + (extra_flags or []),
)
# evaluate model in batch mode
generate.main(generate_args)
# evaluate model interactively
generate_args.buffer_size = 0
generate_args.input = '-'
generate_args.max_sentences = None
orig_stdin = sys.stdin
sys.stdin = StringIO('h e l l o\n')
interactive.main(generate_args)
sys.stdin = orig_stdin
def preprocess_lm_data(data_dir):
preprocess_parser = options.get_preprocessing_parser()
preprocess_args = preprocess_parser.parse_args([
'--only-source',
'--trainpref', os.path.join(data_dir, 'train.out'),
'--validpref', os.path.join(data_dir, 'valid.out'),
'--testpref', os.path.join(data_dir, 'test.out'),
'--destdir', data_dir,
])
preprocess.main(preprocess_args)
def train_language_model(data_dir, arch, extra_flags=None, run_validation=False):
train_parser = options.get_training_parser()
train_args = options.parse_args_and_arch(
train_parser,
[
'--task', 'language_modeling',
data_dir,
'--arch', arch,
'--optimizer', 'adam',
'--lr', '0.0001',
'--criterion', 'adaptive_loss',
'--adaptive-softmax-cutoff', '5,10,15',
'--max-tokens', '500',
'--tokens-per-sample', '500',
'--save-dir', data_dir,
'--max-epoch', '1',
'--no-progress-bar',
'--distributed-world-size', '1',
'--ddp-backend', 'no_c10d',
] + (extra_flags or []),
)
train.main(train_args)
if run_validation:
# test validation
validate_parser = options.get_validation_parser()
validate_args = options.parse_args_and_arch(
validate_parser,
[
'--task', 'language_modeling',
data_dir,
'--path', os.path.join(data_dir, 'checkpoint_last.pt'),
'--valid-subset', 'valid',
'--max-tokens', '500',
'--no-progress-bar',
]
)
validate.main(validate_args)
def eval_lm_main(data_dir):
eval_lm_parser = options.get_eval_lm_parser()
eval_lm_args = options.parse_args_and_arch(
eval_lm_parser,
[
data_dir,
'--path', os.path.join(data_dir, 'checkpoint_last.pt'),
'--no-progress-bar',
],
)
eval_lm.main(eval_lm_args)
if __name__ == '__main__':
unittest.main()
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/tests/test_binaries.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import copy
import unittest
import torch
from fairseq.criterions.cross_entropy import CrossEntropyCriterion
from fairseq.criterions.label_smoothed_cross_entropy import LabelSmoothedCrossEntropyCriterion
import tests.utils as test_utils
class TestLabelSmoothing(unittest.TestCase):
def setUp(self):
# build dictionary
self.d = test_utils.dummy_dictionary(3)
vocab = len(self.d)
self.assertEqual(vocab, 4 + 3) # 4 special + 3 tokens
self.assertEqual(self.d.pad(), 1)
self.assertEqual(self.d.eos(), 2)
self.assertEqual(self.d.unk(), 3)
pad, eos, unk, w1, w2, w3 = 1, 2, 3, 4, 5, 6 # noqa: F841
# build dataset
self.data = [
# the first batch item has padding
{'source': torch.LongTensor([w1, eos]), 'target': torch.LongTensor([w1, eos])},
{'source': torch.LongTensor([w1, eos]), 'target': torch.LongTensor([w1, w1, eos])},
]
self.sample = next(test_utils.dummy_dataloader(self.data))
# build model
self.args = argparse.Namespace()
self.args.sentence_avg = False
self.args.probs = torch.FloatTensor([
# pad eos unk w1 w2 w3
[0.05, 0.05, 0.1, 0.05, 0.3, 0.4, 0.05],
[0.05, 0.10, 0.2, 0.05, 0.2, 0.3, 0.10],
[0.05, 0.15, 0.3, 0.05, 0.1, 0.2, 0.15],
]).unsqueeze(0).expand(2, 3, 7) # add batch dimension
self.task = test_utils.TestTranslationTask.setup_task(self.args, self.d, self.d)
self.model = self.task.build_model(self.args)
def test_nll_loss(self):
self.args.label_smoothing = 0.1
nll_crit = CrossEntropyCriterion(self.args, self.task)
smooth_crit = LabelSmoothedCrossEntropyCriterion(self.args, self.task)
nll_loss, nll_sample_size, nll_logging_output = nll_crit(self.model, self.sample)
smooth_loss, smooth_sample_size, smooth_logging_output = smooth_crit(self.model, self.sample)
self.assertLess(abs(nll_loss - nll_logging_output['loss']), 1e-6)
self.assertLess(abs(nll_loss - smooth_logging_output['nll_loss']), 1e-6)
def test_padding(self):
self.args.label_smoothing = 0.1
crit = LabelSmoothedCrossEntropyCriterion(self.args, self.task)
loss, _, logging_output = crit(self.model, self.sample)
def get_one_no_padding(idx):
# create a new sample with just a single batch item so that there's
# no padding
sample1 = next(test_utils.dummy_dataloader([self.data[idx]]))
args1 = copy.copy(self.args)
args1.probs = args1.probs[idx, :, :].unsqueeze(0)
model1 = self.task.build_model(args1)
loss1, _, _ = crit(model1, sample1)
return loss1
loss1 = get_one_no_padding(0)
loss2 = get_one_no_padding(1)
self.assertAlmostEqual(loss, loss1 + loss2)
def test_reduction(self):
self.args.label_smoothing = 0.1
crit = LabelSmoothedCrossEntropyCriterion(self.args, self.task)
loss, _, logging_output = crit(self.model, self.sample, reduce=True)
unreduced_loss, _, _ = crit(self.model, self.sample, reduce=False)
self.assertAlmostEqual(loss, unreduced_loss.sum())
def test_zero_eps(self):
self.args.label_smoothing = 0.0
nll_crit = CrossEntropyCriterion(self.args, self.task)
smooth_crit = LabelSmoothedCrossEntropyCriterion(self.args, self.task)
nll_loss, nll_sample_size, nll_logging_output = nll_crit(self.model, self.sample)
smooth_loss, smooth_sample_size, smooth_logging_output = smooth_crit(self.model, self.sample)
self.assertAlmostEqual(nll_loss, smooth_loss)
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertLess((t1 - t2).abs().max(), 1e-6)
if __name__ == '__main__':
unittest.main()
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/tests/test_label_smoothing.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import torch
from fairseq import utils
from fairseq.data import Dictionary
from fairseq.data.language_pair_dataset import collate
from fairseq.models import (
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqIncrementalDecoder,
)
from fairseq.tasks import FairseqTask
def dummy_dictionary(vocab_size, prefix='token_'):
d = Dictionary()
for i in range(vocab_size):
token = prefix + str(i)
d.add_symbol(token)
d.finalize(padding_factor=1) # don't add extra padding symbols
return d
def dummy_dataloader(
samples,
padding_idx=1,
eos_idx=2,
batch_size=None,
):
if batch_size is None:
batch_size = len(samples)
# add any missing data to samples
for i, sample in enumerate(samples):
if 'id' not in sample:
sample['id'] = i
# create dataloader
dataset = TestDataset(samples)
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
collate_fn=(lambda samples: collate(samples, padding_idx, eos_idx)),
)
return iter(dataloader)
def sequence_generator_setup():
# construct dummy dictionary
d = dummy_dictionary(vocab_size=2)
eos = d.eos()
w1 = 4
w2 = 5
# construct source data
src_tokens = torch.LongTensor([[w1, w2, eos], [w1, w2, eos]])
src_lengths = torch.LongTensor([2, 2])
args = argparse.Namespace()
unk = 0.
args.beam_probs = [
# step 0:
torch.FloatTensor([
# eos w1 w2
# sentence 1:
[0.0, unk, 0.9, 0.1], # beam 1
[0.0, unk, 0.9, 0.1], # beam 2
# sentence 2:
[0.0, unk, 0.7, 0.3],
[0.0, unk, 0.7, 0.3],
]),
# step 1:
torch.FloatTensor([
# eos w1 w2 prefix
# sentence 1:
[1.0, unk, 0.0, 0.0], # w1: 0.9 (emit: w1 <eos>: 0.9*1.0)
[0.0, unk, 0.9, 0.1], # w2: 0.1
# sentence 2:
[0.25, unk, 0.35, 0.4], # w1: 0.7 (don't emit: w1 <eos>: 0.7*0.25)
[0.00, unk, 0.10, 0.9], # w2: 0.3
]),
# step 2:
torch.FloatTensor([
# eos w1 w2 prefix
# sentence 1:
[0.0, unk, 0.1, 0.9], # w2 w1: 0.1*0.9
[0.6, unk, 0.2, 0.2], # w2 w2: 0.1*0.1 (emit: w2 w2 <eos>: 0.1*0.1*0.6)
# sentence 2:
[0.60, unk, 0.4, 0.00], # w1 w2: 0.7*0.4 (emit: w1 w2 <eos>: 0.7*0.4*0.6)
[0.01, unk, 0.0, 0.99], # w2 w2: 0.3*0.9
]),
# step 3:
torch.FloatTensor([
# eos w1 w2 prefix
# sentence 1:
[1.0, unk, 0.0, 0.0], # w2 w1 w2: 0.1*0.9*0.9 (emit: w2 w1 w2 <eos>: 0.1*0.9*0.9*1.0)
[1.0, unk, 0.0, 0.0], # w2 w1 w1: 0.1*0.9*0.1 (emit: w2 w1 w1 <eos>: 0.1*0.9*0.1*1.0)
# sentence 2:
[0.1, unk, 0.5, 0.4], # w2 w2 w2: 0.3*0.9*0.99 (emit: w2 w2 w2 <eos>: 0.3*0.9*0.99*0.1)
[1.0, unk, 0.0, 0.0], # w1 w2 w1: 0.7*0.4*0.4 (emit: w1 w2 w1 <eos>: 0.7*0.4*0.4*1.0)
]),
]
task = TestTranslationTask.setup_task(args, d, d)
model = task.build_model(args)
tgt_dict = task.target_dictionary
return tgt_dict, w1, w2, src_tokens, src_lengths, model
class TestDataset(torch.utils.data.Dataset):
def __init__(self, data):
super().__init__()
self.data = data
self.sizes = None
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
class TestTranslationTask(FairseqTask):
def __init__(self, args, src_dict, tgt_dict, model):
super().__init__(args)
self.src_dict = src_dict
self.tgt_dict = tgt_dict
self.model = model
@classmethod
def setup_task(cls, args, src_dict=None, tgt_dict=None, model=None):
return cls(args, src_dict, tgt_dict, model)
def build_model(self, args):
return TestModel.build_model(args, self)
@property
def source_dictionary(self):
return self.src_dict
@property
def target_dictionary(self):
return self.tgt_dict
class TestModel(FairseqEncoderDecoderModel):
def __init__(self, encoder, decoder):
super().__init__(encoder, decoder)
@classmethod
def build_model(cls, args, task):
encoder = TestEncoder(args, task.source_dictionary)
decoder = TestIncrementalDecoder(args, task.target_dictionary)
return cls(encoder, decoder)
class TestEncoder(FairseqEncoder):
def __init__(self, args, dictionary):
super().__init__(dictionary)
self.args = args
def forward(self, src_tokens, src_lengths=None, **kwargs):
return src_tokens
def reorder_encoder_out(self, encoder_out, new_order):
return encoder_out.index_select(0, new_order)
class TestIncrementalDecoder(FairseqIncrementalDecoder):
def __init__(self, args, dictionary):
super().__init__(dictionary)
assert hasattr(args, 'beam_probs') or hasattr(args, 'probs')
args.max_decoder_positions = getattr(args, 'max_decoder_positions', 100)
self.args = args
def forward(self, prev_output_tokens, encoder_out=None, incremental_state=None):
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
bbsz = prev_output_tokens.size(0)
vocab = len(self.dictionary)
src_len = encoder_out.size(1)
tgt_len = prev_output_tokens.size(1)
# determine number of steps
if incremental_state is not None:
# cache step number
step = utils.get_incremental_state(self, incremental_state, 'step')
if step is None:
step = 0
utils.set_incremental_state(self, incremental_state, 'step', step + 1)
steps = [step]
else:
steps = list(range(tgt_len))
# define output in terms of raw probs
if hasattr(self.args, 'probs'):
assert self.args.probs.dim() == 3, \
'expected probs to have size bsz*steps*vocab'
probs = self.args.probs.index_select(1, torch.LongTensor(steps))
else:
probs = torch.FloatTensor(bbsz, len(steps), vocab).zero_()
for i, step in enumerate(steps):
# args.beam_probs gives the probability for every vocab element,
# starting with eos, then unknown, and then the rest of the vocab
if step < len(self.args.beam_probs):
probs[:, i, self.dictionary.eos():] = self.args.beam_probs[step]
else:
probs[:, i, self.dictionary.eos()] = 1.0
# random attention
attn = torch.rand(bbsz, tgt_len, src_len)
dev = prev_output_tokens.device
return probs.to(dev), attn.to(dev)
def get_normalized_probs(self, net_output, log_probs, _):
# the decoder returns probabilities directly
probs = net_output[0]
if log_probs:
return probs.log()
else:
return probs
def max_positions(self):
return self.args.max_decoder_positions
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/tests/utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import unittest
from fairseq.modules import ConvTBC
import torch.nn as nn
class TestConvTBC(unittest.TestCase):
def test_convtbc(self):
# ksz, in_channels, out_channels
conv_tbc = ConvTBC(4, 5, kernel_size=3, padding=1)
# out_channels, in_channels, ksz
conv1d = nn.Conv1d(4, 5, kernel_size=3, padding=1)
conv_tbc.weight.data.copy_(conv1d.weight.data.transpose(0, 2))
conv_tbc.bias.data.copy_(conv1d.bias.data)
input_tbc = torch.randn(7, 2, 4, requires_grad=True)
input1d = input_tbc.data.transpose(0, 1).transpose(1, 2)
input1d.requires_grad = True
output_tbc = conv_tbc(input_tbc)
output1d = conv1d(input1d)
self.assertAlmostEqual(output_tbc.data.transpose(0, 1).transpose(1, 2), output1d.data)
grad_tbc = torch.randn(output_tbc.size())
grad1d = grad_tbc.transpose(0, 1).transpose(1, 2).contiguous()
output_tbc.backward(grad_tbc)
output1d.backward(grad1d)
self.assertAlmostEqual(conv_tbc.weight.grad.data.transpose(0, 2), conv1d.weight.grad.data)
self.assertAlmostEqual(conv_tbc.bias.grad.data, conv1d.bias.grad.data)
self.assertAlmostEqual(input_tbc.grad.data.transpose(0, 1).transpose(1, 2), input1d.grad.data)
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertLess((t1 - t2).abs().max(), 1e-4)
if __name__ == '__main__':
unittest.main()
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/tests/test_convtbc.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import unittest
import torch
from fairseq.optim.adam import FairseqAdam
from fairseq.optim.fp16_optimizer import MemoryEfficientFP16Optimizer
class TestMemoryEfficientFP16(unittest.TestCase):
def test_load_state_dict(self):
# define simple FP16 model
model = torch.nn.Linear(5, 5).cuda().half()
params = list(model.parameters())
# initialize memory efficient FP16 optimizer
optimizer = FairseqAdam(
argparse.Namespace(
lr=[0.00001],
adam_betas='(0.9, 0.999)',
adam_eps=1e-8,
weight_decay=0.0,
),
params,
)
me_optimizer = MemoryEfficientFP16Optimizer(
argparse.Namespace(
fp16_init_scale=1,
fp16_scale_window=1,
fp16_scale_tolerance=1,
threshold_loss_scale=1,
min_loss_scale=1e-4,
),
params,
optimizer,
)
# optimizer state is created in the first step
loss = model(torch.rand(5).cuda().half()).sum()
me_optimizer.backward(loss)
me_optimizer.step()
# reload state
state = me_optimizer.state_dict()
me_optimizer.load_state_dict(state)
for k, v in me_optimizer.optimizer.state.items():
self.assertTrue(k.dtype == torch.float16)
for v_i in v.values():
if torch.is_tensor(v_i):
self.assertTrue(v_i.dtype == torch.float32)
if __name__ == '__main__':
unittest.main()
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/tests/test_memory_efficient_fp16.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from fairseq.data import TokenBlockDataset
import tests.utils as test_utils
class TestTokenBlockDataset(unittest.TestCase):
def _build_dataset(self, data, **kwargs):
sizes = [len(x) for x in data]
underlying_ds = test_utils.TestDataset(data)
return TokenBlockDataset(underlying_ds, sizes, **kwargs)
def test_eos_break_mode(self):
data = [
torch.tensor([5, 4, 3, 2, 1], dtype=torch.long),
torch.tensor([1], dtype=torch.long),
torch.tensor([8, 7, 6, 1], dtype=torch.long),
]
ds = self._build_dataset(data, block_size=None, pad=0, eos=1, break_mode='eos')
self.assertEqual(ds[0].tolist(), [5, 4, 3, 2, 1])
self.assertEqual(ds[1].tolist(), [1])
self.assertEqual(ds[2].tolist(), [8, 7, 6, 1])
data = [
torch.tensor([5, 4, 3, 2, 1], dtype=torch.long),
torch.tensor([8, 7, 6, 1], dtype=torch.long),
torch.tensor([1], dtype=torch.long),
]
ds = self._build_dataset(data, block_size=None, pad=0, eos=1, break_mode='eos')
self.assertEqual(ds[0].tolist(), [5, 4, 3, 2, 1])
self.assertEqual(ds[1].tolist(), [8, 7, 6, 1])
self.assertEqual(ds[2].tolist(), [1])
def test_block_break_mode(self):
data = [
torch.tensor([5, 4, 3, 2, 1], dtype=torch.long),
torch.tensor([8, 7, 6, 1], dtype=torch.long),
torch.tensor([9, 1], dtype=torch.long),
]
ds = self._build_dataset(data, block_size=3, pad=0, eos=1, break_mode='none')
self.assertEqual(ds[0].tolist(), [5, 4, 3])
self.assertEqual(ds[1].tolist(), [2, 1, 8])
self.assertEqual(ds[2].tolist(), [7, 6, 1])
self.assertEqual(ds[3].tolist(), [9, 1])
def test_complete_break_mode(self):
data = [
torch.tensor([5, 4, 3, 2, 1], dtype=torch.long),
torch.tensor([8, 7, 6, 1], dtype=torch.long),
torch.tensor([9, 1], dtype=torch.long),
]
ds = self._build_dataset(data, block_size=6, pad=0, eos=1, break_mode='complete')
self.assertEqual(ds[0].tolist(), [5, 4, 3, 2, 1])
self.assertEqual(ds[1].tolist(), [8, 7, 6, 1, 9, 1])
data = [
torch.tensor([4, 3, 2, 1], dtype=torch.long),
torch.tensor([5, 1], dtype=torch.long),
torch.tensor([1], dtype=torch.long),
torch.tensor([6, 1], dtype=torch.long),
]
ds = self._build_dataset(data, block_size=3, pad=0, eos=1, break_mode='complete')
self.assertEqual(ds[0].tolist(), [4, 3, 2, 1])
self.assertEqual(ds[1].tolist(), [5, 1, 1])
self.assertEqual(ds[2].tolist(), [6, 1])
if __name__ == "__main__":
unittest.main()
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/tests/test_token_block_dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import unittest
import torch
from fairseq.sequence_generator import SequenceGenerator
import tests.utils as test_utils
class TestSequenceGeneratorBase(unittest.TestCase):
def assertHypoTokens(self, hypo, tokens):
self.assertTensorEqual(hypo['tokens'], torch.LongTensor(tokens))
def assertHypoScore(self, hypo, pos_probs, normalized=True, lenpen=1.):
pos_scores = torch.FloatTensor(pos_probs).log()
self.assertAlmostEqual(hypo['positional_scores'], pos_scores)
self.assertEqual(pos_scores.numel(), hypo['tokens'].numel())
score = pos_scores.sum()
if normalized:
score /= pos_scores.numel()**lenpen
self.assertLess(abs(score - hypo['score']), 1e-6)
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertLess((t1 - t2).abs().max(), 1e-4)
def assertTensorEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertEqual(t1.ne(t2).long().sum(), 0)
class TestSequenceGenerator(TestSequenceGeneratorBase):
def setUp(self):
self.tgt_dict, self.w1, self.w2, src_tokens, src_lengths, self.model = (
test_utils.sequence_generator_setup()
)
self.sample = {
'net_input': {
'src_tokens': src_tokens, 'src_lengths': src_lengths,
},
}
def test_with_normalization(self):
generator = SequenceGenerator(self.tgt_dict, beam_size=2)
hypos = generator.generate([self.model], self.sample)
eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2
# sentence 1, beam 1
self.assertHypoTokens(hypos[0][0], [w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 1.0])
# sentence 1, beam 2
self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos])
self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0])
# sentence 2, beam 1
self.assertHypoTokens(hypos[1][0], [w1, w2, w1, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.4, 1.0])
# sentence 2, beam 2
self.assertHypoTokens(hypos[1][1], [w1, w2, eos])
self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.6])
def test_without_normalization(self):
# Sentence 1: unchanged from the normalized case
# Sentence 2: beams swap order
generator = SequenceGenerator(self.tgt_dict, beam_size=2, normalize_scores=False)
hypos = generator.generate([self.model], self.sample)
eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2
# sentence 1, beam 1
self.assertHypoTokens(hypos[0][0], [w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 1.0], normalized=False)
# sentence 1, beam 2
self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos])
self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0], normalized=False)
# sentence 2, beam 1
self.assertHypoTokens(hypos[1][0], [w1, w2, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.6], normalized=False)
# sentence 2, beam 2
self.assertHypoTokens(hypos[1][1], [w1, w2, w1, eos])
self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.4, 1.0], normalized=False)
def test_with_lenpen_favoring_short_hypos(self):
lenpen = 0.6
generator = SequenceGenerator(self.tgt_dict, beam_size=2, len_penalty=lenpen)
hypos = generator.generate([self.model], self.sample)
eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2
# sentence 1, beam 1
self.assertHypoTokens(hypos[0][0], [w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 1.0], lenpen=lenpen)
# sentence 1, beam 2
self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos])
self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0], lenpen=lenpen)
# sentence 2, beam 1
self.assertHypoTokens(hypos[1][0], [w1, w2, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.6], lenpen=lenpen)
# sentence 2, beam 2
self.assertHypoTokens(hypos[1][1], [w1, w2, w1, eos])
self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.4, 1.0], lenpen=lenpen)
def test_with_lenpen_favoring_long_hypos(self):
lenpen = 5.0
generator = SequenceGenerator(self.tgt_dict, beam_size=2, len_penalty=lenpen)
hypos = generator.generate([self.model], self.sample)
eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2
# sentence 1, beam 1
self.assertHypoTokens(hypos[0][0], [w2, w1, w2, eos])
self.assertHypoScore(hypos[0][0], [0.1, 0.9, 0.9, 1.0], lenpen=lenpen)
# sentence 1, beam 2
self.assertHypoTokens(hypos[0][1], [w1, eos])
self.assertHypoScore(hypos[0][1], [0.9, 1.0], lenpen=lenpen)
# sentence 2, beam 1
self.assertHypoTokens(hypos[1][0], [w1, w2, w1, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.4, 1.0], lenpen=lenpen)
# sentence 2, beam 2
self.assertHypoTokens(hypos[1][1], [w1, w2, eos])
self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.6], lenpen=lenpen)
def test_maxlen(self):
generator = SequenceGenerator(self.tgt_dict, beam_size=2, max_len_b=2)
hypos = generator.generate([self.model], self.sample)
eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2
# sentence 1, beam 1
self.assertHypoTokens(hypos[0][0], [w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 1.0])
# sentence 1, beam 2
self.assertHypoTokens(hypos[0][1], [w2, w2, eos])
self.assertHypoScore(hypos[0][1], [0.1, 0.1, 0.6])
# sentence 2, beam 1
self.assertHypoTokens(hypos[1][0], [w1, w2, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.6])
# sentence 2, beam 2
self.assertHypoTokens(hypos[1][1], [w2, w2, eos])
self.assertHypoScore(hypos[1][1], [0.3, 0.9, 0.01])
class TestDiverseBeamSearch(TestSequenceGeneratorBase):
def setUp(self):
# construct dummy dictionary
d = test_utils.dummy_dictionary(vocab_size=2)
self.assertEqual(d.pad(), 1)
self.assertEqual(d.eos(), 2)
self.assertEqual(d.unk(), 3)
self.eos = d.eos()
self.w1 = 4
self.w2 = 5
# construct source data
self.src_tokens = torch.LongTensor([
[self.w1, self.w2, self.eos],
[self.w1, self.w2, self.eos],
])
self.src_lengths = torch.LongTensor([2, 2])
args = argparse.Namespace()
unk = 0.
args.beam_probs = [
# step 0:
torch.FloatTensor([
# eos w1 w2
# sentence 1:
[0.0, unk, 0.9, 0.1], # beam 1
[0.0, unk, 0.9, 0.1], # beam 2
# sentence 2:
[0.0, unk, 0.7, 0.3],
[0.0, unk, 0.7, 0.3],
]),
# step 1:
torch.FloatTensor([
# eos w1 w2
# sentence 1:
[0.0, unk, 0.6, 0.4],
[0.0, unk, 0.6, 0.4],
# sentence 2:
[0.25, unk, 0.35, 0.4],
[0.25, unk, 0.35, 0.4],
]),
# step 2:
torch.FloatTensor([
# eos w1 w2
# sentence 1:
[1.0, unk, 0.0, 0.0],
[1.0, unk, 0.0, 0.0],
# sentence 2:
[0.9, unk, 0.1, 0.0],
[0.9, unk, 0.1, 0.0],
]),
]
task = test_utils.TestTranslationTask.setup_task(args, d, d)
self.model = task.build_model(args)
self.tgt_dict = task.target_dictionary
def test_diverse_beam_search(self):
generator = SequenceGenerator(
self.tgt_dict, beam_size=2, diverse_beam_groups=2, diverse_beam_strength=0.,
)
sample = {'net_input': {'src_tokens': self.src_tokens, 'src_lengths': self.src_lengths}}
hypos = generator.generate([self.model], sample)
eos, w1, w2 = self.eos, self.w1, self.w2
# sentence 1, beam 1
self.assertHypoTokens(hypos[0][0], [w1, w1, eos])
self.assertHypoScore(hypos[0][0], [0.9, 0.6, 1.0])
# sentence 1, beam 2
self.assertHypoTokens(hypos[0][1], [w1, w1, eos])
self.assertHypoScore(hypos[0][1], [0.9, 0.6, 1.0])
# sentence 2, beam 1
self.assertHypoTokens(hypos[1][0], [w1, w2, eos])
self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.9])
# sentence 2, beam 2
self.assertHypoTokens(hypos[1][1], [w1, w2, eos])
self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.9])
class TestTopPSamplingSearch(TestSequenceGeneratorBase):
def setUp(self):
# construct dummy dictionary
d = test_utils.dummy_dictionary(vocab_size=2)
self.assertEqual(d.pad(), 1)
self.assertEqual(d.eos(), 2)
self.assertEqual(d.unk(), 3)
self.eos = d.eos()
self.w1 = 4
self.w2 = 5
# construct source data
self.src_tokens = torch.LongTensor([
[self.w1, self.w2, self.eos],
[self.w1, self.w2, self.eos],
])
self.src_lengths = torch.LongTensor([2, 2])
args = argparse.Namespace()
unk = 0.
# The minimal probability of top 2 tokens.
self.min_top2_prob = 0.75
# The minimal probability of the top 1 token.
self.min_top1_prob = 0.4
w1_prob = self.min_top1_prob
w2_prob = self.min_top2_prob - self.min_top1_prob
eos_prob = 1 - self.min_top2_prob
args.beam_probs = [
# step 0:
torch.FloatTensor([
# eos w1 w2
[0.0, unk, 1.0, 0.0],
[0.0, unk, 1.0, 0.0],
[0.0, unk, 1.0, 0.0],
[0.0, unk, 1.0, 0.0],
]),
# step 1:
torch.FloatTensor([
# eos w1 w2
[eos_prob, unk, w1_prob, w2_prob],
[eos_prob, unk, w1_prob, w2_prob],
[eos_prob, unk, w1_prob, w2_prob],
[eos_prob, unk, w1_prob, w2_prob],
]),
# step 2:
torch.FloatTensor([
# eos w1 w2
[1.0, unk, 0.0, 0.0],
[1.0, unk, 0.0, 0.0],
[1.0, unk, 0.0, 0.0],
[1.0, unk, 0.0, 0.0],
]),
]
task = test_utils.TestTranslationTask.setup_task(args, d, d)
self.model = task.build_model(args)
self.tgt_dict = task.target_dictionary
def test_topp_sampling_search_low_prob(self):
# Given a prob low enough to top-P sampling, we expect only the top
# 1 token to be sampled, which always results in the same output.
low_sampling_topp = self.min_top1_prob/2.0
generator = SequenceGenerator(
self.tgt_dict, beam_size=2, sampling=True,
sampling_topp=low_sampling_topp
)
sample = {
'net_input': {
'src_tokens': self.src_tokens,
'src_lengths': self.src_lengths
}
}
hypos = generator.generate([self.model], sample)
eos, w1 = self.eos, self.w1
# sentence 1, beam 1
self.assertHypoTokens(hypos[0][0], [w1, w1, eos])
self.assertHypoScore(hypos[0][0], [1.0, 0.4, 1.0])
# sentence 1, beam 2
self.assertHypoTokens(hypos[0][1], [w1, w1, eos])
self.assertHypoScore(hypos[0][1], [1.0, 0.4, 1.0])
# sentence 2, beam 1
self.assertHypoTokens(hypos[1][0], [w1, w1, eos])
self.assertHypoScore(hypos[1][0], [1.0, 0.4, 1.0])
# sentence 2, beam 2
self.assertHypoTokens(hypos[1][1], [w1, w1, eos])
self.assertHypoScore(hypos[1][1], [1.0, 0.4, 1.0])
def test_topp_sampling_search_high_prob(self):
# Given a prob high enough to top-P sampling, any of the top 2
# tokens could be sampled. This can cause different outputs.
high_sampling_topp = (self.min_top1_prob+self.min_top2_prob)/2.0
generator = SequenceGenerator(
self.tgt_dict, beam_size=2, sampling=True,
sampling_topp=high_sampling_topp
)
sample = {
'net_input': {
'src_tokens': self.src_tokens,
'src_lengths': self.src_lengths
}
}
hypos = generator.generate([self.model], sample)
eos, w1, w2 = self.eos, self.w1, self.w2
# sentence 1, beam 1
self.assertTrue(self.hypoTokens(hypos[0][0], [w1, w1, eos]) or
self.hypoTokens(hypos[0][0], [w1, w2, eos]))
self.assertTrue(self.hypoScore(hypos[0][0], [1.0, 0.4, 1.0]) or
self.hypoScore(hypos[0][0], [1.0, 0.35, 1.0]))
# sentence 1, beam 2
self.assertTrue(self.hypoTokens(hypos[0][1], [w1, w1, eos]) or
self.hypoTokens(hypos[0][1], [w1, w2, eos]))
self.assertTrue(self.hypoScore(hypos[0][1], [1.0, 0.4, 1.0]) or
self.hypoScore(hypos[0][1], [1.0, 0.35, 1.0]))
# sentence 2, beam 1
self.assertTrue(self.hypoTokens(hypos[1][0], [w1, w1, eos]) or
self.hypoTokens(hypos[1][0], [w1, w2, eos]))
self.assertTrue(self.hypoScore(hypos[1][0], [1.0, 0.4, 1.0]) or
self.hypoScore(hypos[1][0], [1.0, 0.35, 1.0]))
# sentence 2, beam 2
self.assertTrue(self.hypoTokens(hypos[1][1], [w1, w1, eos]) or
self.hypoTokens(hypos[1][1], [w1, w2, eos]))
self.assertTrue(self.hypoScore(hypos[1][1], [1.0, 0.4, 1.0]) or
self.hypoScore(hypos[1][1], [1.0, 0.35, 1.0]))
def hypoTokens(self, hypo, tokens):
return self.tensorEqual(hypo['tokens'], torch.LongTensor(tokens))
def hypoScore(self, hypo, pos_probs, normalized=True, lenpen=1.):
pos_scores = torch.FloatTensor(pos_probs).log()
if not self.almostEqual(hypo['positional_scores'], pos_scores):
return False
if pos_scores.numel() != hypo['tokens'].numel():
return False
score = pos_scores.sum()
if normalized:
score /= pos_scores.numel() ** lenpen
return abs(score - hypo['score']) < 1e-6
def almostEqual(self, t1, t2):
return t1.size() == t2.size() and (t1 - t2).abs().max() < 1e-4
def tensorEqual(self, t1, t2):
return t1.size() == t2.size() and t1.ne(t2).long().sum() == 0
if __name__ == '__main__':
unittest.main()
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/tests/test_sequence_generator.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import torch
from fairseq.data import LanguagePairDataset, TokenBlockDataset
from fairseq.data.concat_dataset import ConcatDataset
from tests.test_train import mock_dict
class TestConcatDataset(unittest.TestCase):
def setUp(self):
d = mock_dict()
tokens_1 = torch.LongTensor([1]).view(1, -1)
tokens_ds1 = TokenBlockDataset(
tokens_1,
sizes=[tokens_1.size(-1)],
block_size=1,
pad=0,
eos=1,
include_targets=False,
)
self.dataset_1 = LanguagePairDataset(
tokens_ds1, tokens_ds1.sizes, d, shuffle=False
)
tokens_2 = torch.LongTensor([2]).view(1, -1)
tokens_ds2 = TokenBlockDataset(
tokens_2,
sizes=[tokens_2.size(-1)],
block_size=1,
pad=0,
eos=1,
include_targets=False,
)
self.dataset_2 = LanguagePairDataset(
tokens_ds2, tokens_ds2.sizes, d, shuffle=False
)
def test_concat_dataset_basics(self):
d = ConcatDataset(
[self.dataset_1, self.dataset_2]
)
assert(len(d) == 2)
assert(d[0]['source'][0] == 1)
assert(d[1]['source'][0] == 2)
d = ConcatDataset(
[self.dataset_1, self.dataset_2], sample_ratios=[1, 2]
)
assert(len(d) == 3)
assert(d[0]['source'][0] == 1)
assert(d[1]['source'][0] == 2)
assert(d[2]['source'][0] == 2)
d = ConcatDataset(
[self.dataset_1, self.dataset_2], sample_ratios=[2, 1]
)
assert(len(d) == 3)
assert(d[0]['source'][0] == 1)
assert(d[1]['source'][0] == 1)
assert(d[2]['source'][0] == 2)
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/tests/test_concat_dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
from io import StringIO
import json
import os
import tempfile
import unittest
from . import test_binaries
class TestReproducibility(unittest.TestCase):
def _test_reproducibility(self, name, extra_flags=None):
if extra_flags is None:
extra_flags = []
with tempfile.TemporaryDirectory(name) as data_dir:
with contextlib.redirect_stdout(StringIO()):
test_binaries.create_dummy_data(data_dir)
test_binaries.preprocess_translation_data(data_dir)
# train epochs 1 and 2 together
stdout = StringIO()
with contextlib.redirect_stdout(stdout):
test_binaries.train_translation_model(
data_dir, 'fconv_iwslt_de_en', [
'--dropout', '0.0',
'--log-format', 'json',
'--log-interval', '1',
'--max-epoch', '3',
] + extra_flags,
)
stdout = stdout.getvalue()
train_log, valid_log = map(json.loads, stdout.split('\n')[-5:-3])
# train epoch 2, resuming from previous checkpoint 1
os.rename(
os.path.join(data_dir, 'checkpoint1.pt'),
os.path.join(data_dir, 'checkpoint_last.pt'),
)
stdout = StringIO()
with contextlib.redirect_stdout(stdout):
test_binaries.train_translation_model(
data_dir, 'fconv_iwslt_de_en', [
'--dropout', '0.0',
'--log-format', 'json',
'--log-interval', '1',
'--max-epoch', '3',
] + extra_flags,
)
stdout = stdout.getvalue()
train_res_log, valid_res_log = map(json.loads, stdout.split('\n')[-5:-3])
def cast(s):
return round(float(s), 3)
for k in ['train_loss', 'train_ppl', 'train_num_updates', 'train_gnorm']:
self.assertEqual(cast(train_log[k]), cast(train_res_log[k]))
for k in ['valid_loss', 'valid_ppl', 'valid_num_updates', 'valid_best_loss']:
self.assertEqual(cast(valid_log[k]), cast(valid_res_log[k]))
def test_reproducibility(self):
self._test_reproducibility('test_reproducibility')
def test_reproducibility_fp16(self):
self._test_reproducibility('test_reproducibility_fp16', [
'--fp16',
'--fp16-init-scale', '4096',
])
def test_reproducibility_memory_efficient_fp16(self):
self._test_reproducibility('test_reproducibility_memory_efficient_fp16', [
'--memory-efficient-fp16',
'--fp16-init-scale', '4096',
])
if __name__ == '__main__':
unittest.main()
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/tests/test_reproducibility.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import unittest
from fairseq.data import Dictionary
from fairseq.modules import CharacterTokenEmbedder
class TestCharacterTokenEmbedder(unittest.TestCase):
def test_character_token_embedder(self):
vocab = Dictionary()
vocab.add_symbol('hello')
vocab.add_symbol('there')
embedder = CharacterTokenEmbedder(vocab, [(2, 16), (4, 32), (8, 64), (16, 2)], 64, 5, 2)
test_sents = [['hello', 'unk', 'there'], ['there'], ['hello', 'there']]
max_len = max(len(s) for s in test_sents)
input = torch.LongTensor(len(test_sents), max_len + 2).fill_(vocab.pad())
for i in range(len(test_sents)):
input[i][0] = vocab.eos()
for j in range(len(test_sents[i])):
input[i][j + 1] = vocab.index(test_sents[i][j])
input[i][j + 2] = vocab.eos()
embs = embedder(input)
assert embs.size() == (len(test_sents), max_len + 2, 5)
self.assertAlmostEqual(embs[0][0], embs[1][0])
self.assertAlmostEqual(embs[0][0], embs[0][-1])
self.assertAlmostEqual(embs[0][1], embs[2][1])
self.assertAlmostEqual(embs[0][3], embs[1][1])
embs.sum().backward()
assert embedder.char_embeddings.weight.grad is not None
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertLess((t1 - t2).abs().max(), 1e-6)
if __name__ == '__main__':
unittest.main()
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/tests/test_character_token_embedder.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import collections
import os
import tempfile
import unittest
import shutil
import numpy as np
import torch
from torch import nn
from scripts.average_checkpoints import average_checkpoints
class ModelWithSharedParameter(nn.Module):
def __init__(self):
super(ModelWithSharedParameter, self).__init__()
self.embedding = nn.Embedding(1000, 200)
self.FC1 = nn.Linear(200, 200)
self.FC2 = nn.Linear(200, 200)
# tie weight in FC2 to FC1
self.FC2.weight = nn.Parameter(self.FC1.weight)
self.FC2.bias = nn.Parameter(self.FC1.bias)
self.relu = nn.ReLU()
def forward(self, input):
return self.FC2(self.ReLU(self.FC1(input))) + self.FC1(input)
class TestAverageCheckpoints(unittest.TestCase):
def test_average_checkpoints(self):
params_0 = collections.OrderedDict(
[
('a', torch.DoubleTensor([100.0])),
('b', torch.FloatTensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])),
('c', torch.IntTensor([7, 8, 9])),
]
)
params_1 = collections.OrderedDict(
[
('a', torch.DoubleTensor([1.0])),
('b', torch.FloatTensor([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]])),
('c', torch.IntTensor([2, 2, 2])),
]
)
params_avg = collections.OrderedDict(
[
('a', torch.DoubleTensor([50.5])),
('b', torch.FloatTensor([[1.0, 1.5, 2.0], [2.5, 3.0, 3.5]])),
# We expect truncation for integer division
('c', torch.IntTensor([4, 5, 5])),
]
)
fd_0, path_0 = tempfile.mkstemp()
fd_1, path_1 = tempfile.mkstemp()
torch.save(collections.OrderedDict([('model', params_0)]), path_0)
torch.save(collections.OrderedDict([('model', params_1)]), path_1)
output = average_checkpoints([path_0, path_1])['model']
os.close(fd_0)
os.remove(path_0)
os.close(fd_1)
os.remove(path_1)
for (k_expected, v_expected), (k_out, v_out) in zip(
params_avg.items(), output.items()):
self.assertEqual(
k_expected, k_out, 'Key mismatch - expected {} but found {}. '
'(Expected list of keys: {} vs actual list of keys: {})'.format(
k_expected, k_out, params_avg.keys(), output.keys()
)
)
np.testing.assert_allclose(
v_expected.numpy(),
v_out.numpy(),
err_msg='Tensor value mismatch for key {}'.format(k_expected)
)
def test_average_checkpoints_with_shared_parameters(self):
def _construct_model_with_shared_parameters(path, value):
m = ModelWithSharedParameter()
nn.init.constant_(m.FC1.weight, value)
torch.save(
{'model': m.state_dict()},
path
)
return m
tmpdir = tempfile.mkdtemp()
paths = []
path = os.path.join(tmpdir, "m1.pt")
m1 = _construct_model_with_shared_parameters(path, 1.0)
paths.append(path)
path = os.path.join(tmpdir, "m2.pt")
m2 = _construct_model_with_shared_parameters(path, 2.0)
paths.append(path)
path = os.path.join(tmpdir, "m3.pt")
m3 = _construct_model_with_shared_parameters(path, 3.0)
paths.append(path)
new_model = average_checkpoints(paths)
self.assertTrue(
torch.equal(
new_model['model']['embedding.weight'],
(m1.embedding.weight +
m2.embedding.weight +
m3.embedding.weight) / 3.0
)
)
self.assertTrue(
torch.equal(
new_model['model']['FC1.weight'],
(m1.FC1.weight +
m2.FC1.weight +
m3.FC1.weight) / 3.0
)
)
self.assertTrue(
torch.equal(
new_model['model']['FC2.weight'],
(m1.FC2.weight +
m2.FC2.weight +
m3.FC2.weight) / 3.0
)
)
shutil.rmtree(tmpdir)
if __name__ == '__main__':
unittest.main()
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/tests/test_average_checkpoints.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from collections import OrderedDict
import numpy as np
import torch
from fairseq.data import LanguagePairDataset, TokenBlockDataset
from fairseq.data.multi_corpus_sampled_dataset import MultiCorpusSampledDataset
from tests.test_train import mock_dict
class TestMultiCorpusSampledDataset(unittest.TestCase):
def setUp(self):
d = mock_dict()
tokens_1 = torch.LongTensor([1]).view(1, -1)
tokens_ds1 = TokenBlockDataset(
tokens_1,
sizes=[tokens_1.size(-1)],
block_size=1,
pad=0,
eos=1,
include_targets=False,
)
self.dataset_1 = LanguagePairDataset(
tokens_ds1, tokens_ds1.sizes, d, shuffle=False
)
tokens_2 = torch.LongTensor([2]).view(1, -1)
tokens_ds2 = TokenBlockDataset(
tokens_2,
sizes=[tokens_2.size(-1)],
block_size=1,
pad=0,
eos=1,
include_targets=False,
)
self.dataset_2 = LanguagePairDataset(
tokens_ds2, tokens_ds2.sizes, d, shuffle=False
)
def _test_sample_helper(
self,
expected_sample_from_first_ds_percentage,
num_samples=1000,
sampling_func=None,
):
# To make sure test is not flaky
np.random.seed(0)
if sampling_func is None:
m = MultiCorpusSampledDataset(
OrderedDict({0: self.dataset_1, 1: self.dataset_2}),
)
else:
m = MultiCorpusSampledDataset(
OrderedDict({0: self.dataset_1, 1: self.dataset_2}),
sampling_func=sampling_func,
)
m.ordered_indices()
count_sample_from_first_dataset = 0
for _ in range(num_samples):
if m.collater([m[0], m[1]])["net_input"]["src_tokens"][0] == 1:
count_sample_from_first_dataset += 1
sample_from_first_ds_percentage = (
1.0 * count_sample_from_first_dataset / num_samples
)
self.assertLess(
abs(
sample_from_first_ds_percentage
- expected_sample_from_first_ds_percentage
),
0.01,
)
def test_multi_corpus_sampled_dataset_uniform_sample(self):
self._test_sample_helper(expected_sample_from_first_ds_percentage=0.5)
def test_multi_corpus_sampled_dataset_weighted_sample(self):
def naive_weighted_sample(weights):
def f(l):
v = np.random.random()
agg = 0
for i, weight in enumerate(weights):
agg += weight
if agg > v:
return i
return f
self._test_sample_helper(
expected_sample_from_first_ds_percentage=0.9,
sampling_func=naive_weighted_sample(weights=[0.9, 0.1]),
)
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/tests/test_multi_corpus_sampled_dataset.py |
#!/usr/bin/env python3
import argparse
import os
import unittest
from inspect import currentframe, getframeinfo
import numpy as np
import torch
from fairseq.data import data_utils as fairseq_data_utils
from fairseq.data.dictionary import Dictionary
from fairseq.models import (
BaseFairseqModel,
FairseqDecoder,
FairseqEncoder,
FairseqEncoderDecoderModel,
FairseqEncoderModel,
FairseqModel,
)
from fairseq.tasks.fairseq_task import FairseqTask
from examples.speech_recognition.data.data_utils import lengths_to_encoder_padding_mask
DEFAULT_TEST_VOCAB_SIZE = 100
# ///////////////////////////////////////////////////////////////////////////
# utility function to setup dummy dict/task/input
# ///////////////////////////////////////////////////////////////////////////
def get_dummy_dictionary(vocab_size=DEFAULT_TEST_VOCAB_SIZE):
dummy_dict = Dictionary()
# add dummy symbol to satisfy vocab size
for id, _ in enumerate(range(vocab_size)):
dummy_dict.add_symbol("{}".format(id), 1000)
return dummy_dict
class DummyTask(FairseqTask):
def __init__(self, args):
super().__init__(args)
self.dictionary = get_dummy_dictionary()
if getattr(self.args, "ctc", False):
self.dictionary.add_symbol("<ctc_blank>")
self.tgt_dict = self.dictionary
@property
def target_dictionary(self):
return self.dictionary
def get_dummy_task_and_parser():
"""
to build a fariseq model, we need some dummy parse and task. This function
is used to create dummy task and parser to faciliate model/criterion test
Note: we use FbSpeechRecognitionTask as the dummy task. You may want
to use other task by providing another function
"""
parser = argparse.ArgumentParser(
description="test_dummy_s2s_task", argument_default=argparse.SUPPRESS
)
DummyTask.add_args(parser)
args = parser.parse_args([])
task = DummyTask.setup_task(args)
return task, parser
def get_dummy_input(T=100, D=80, B=5, K=100):
forward_input = {}
# T max sequence length
# D feature vector dimension
# B batch size
# K target dimension size
feature = torch.randn(B, T, D)
# this (B, T, D) layout is just a convention, you can override it by
# write your own _prepare_forward_input function
src_lengths = torch.from_numpy(
np.random.randint(low=1, high=T, size=B).astype(np.int64)
)
src_lengths[0] = T # make sure the maximum length matches
prev_output_tokens = []
for b in range(B):
token_length = np.random.randint(low=1, high=src_lengths[b].item() + 1)
tokens = np.random.randint(low=0, high=K, size=token_length)
prev_output_tokens.append(torch.from_numpy(tokens))
prev_output_tokens = fairseq_data_utils.collate_tokens(
prev_output_tokens,
pad_idx=1,
eos_idx=2,
left_pad=False,
move_eos_to_beginning=False,
)
src_lengths, sorted_order = src_lengths.sort(descending=True)
forward_input["src_tokens"] = feature.index_select(0, sorted_order)
forward_input["src_lengths"] = src_lengths
forward_input["prev_output_tokens"] = prev_output_tokens
return forward_input
def get_dummy_encoder_output(encoder_out_shape=(100, 80, 5)):
"""
This only provides an example to generate dummy encoder output
"""
(T, B, D) = encoder_out_shape
encoder_out = {}
encoder_out["encoder_out"] = torch.from_numpy(
np.random.randn(*encoder_out_shape).astype(np.float32)
)
seq_lengths = torch.from_numpy(np.random.randint(low=1, high=T, size=B))
# some dummy mask
encoder_out["encoder_padding_mask"] = torch.arange(T).view(1, T).expand(
B, -1
) >= seq_lengths.view(B, 1).expand(-1, T)
encoder_out["encoder_padding_mask"].t_()
# encoer_padding_mask is (T, B) tensor, with (t, b)-th element indicate
# whether encoder_out[t, b] is valid (=0) or not (=1)
return encoder_out
def _current_postion_info():
cf = currentframe()
frameinfo = " (at {}:{})".format(
os.path.basename(getframeinfo(cf).filename), cf.f_back.f_lineno
)
return frameinfo
def check_encoder_output(encoder_output, batch_size=None):
"""we expect encoder_output to be a dict with the following
key/value pairs:
- encoder_out: a Torch.Tensor
- encoder_padding_mask: a binary Torch.Tensor
"""
if not isinstance(encoder_output, dict):
msg = (
"FairseqEncoderModel.forward(...) must be a dict" + _current_postion_info()
)
return False, msg
if "encoder_out" not in encoder_output:
msg = (
"FairseqEncoderModel.forward(...) must contain encoder_out"
+ _current_postion_info()
)
return False, msg
if "encoder_padding_mask" not in encoder_output:
msg = (
"FairseqEncoderModel.forward(...) must contain encoder_padding_mask"
+ _current_postion_info()
)
return False, msg
if not isinstance(encoder_output["encoder_out"], torch.Tensor):
msg = "encoder_out must be a torch.Tensor" + _current_postion_info()
return False, msg
if encoder_output["encoder_out"].dtype != torch.float32:
msg = "encoder_out must have float32 dtype" + _current_postion_info()
return False, msg
mask = encoder_output["encoder_padding_mask"]
if mask is not None:
if not isinstance(mask, torch.Tensor):
msg = (
"encoder_padding_mask must be a torch.Tensor" + _current_postion_info()
)
return False, msg
if (
mask.dtype != torch.uint8
and (not hasattr(torch, 'bool') or mask.dtype != torch.bool)
):
msg = (
"encoder_padding_mask must have dtype of uint8"
+ _current_postion_info()
)
return False, msg
if mask.dim() != 2:
msg = (
"we expect encoder_padding_mask to be a 2-d tensor, in shape (T, B)"
+ _current_postion_info()
)
return False, msg
if batch_size is not None and mask.size(1) != batch_size:
msg = (
"we expect encoder_padding_mask to be a 2-d tensor, with size(1)"
+ " being the batch size"
+ _current_postion_info()
)
return False, msg
return True, None
def check_decoder_output(decoder_output):
"""we expect output from a decoder is a tuple with the following constraint:
- the first element is a torch.Tensor
- the second element can be anything (reserved for future use)
"""
if not isinstance(decoder_output, tuple):
msg = "FariseqDecoder output must be a tuple" + _current_postion_info()
return False, msg
if len(decoder_output) != 2:
msg = "FairseqDecoder output must be 2-elem tuple" + _current_postion_info()
return False, msg
if not isinstance(decoder_output[0], torch.Tensor):
msg = (
"FariseqDecoder output[0] must be a torch.Tensor" + _current_postion_info()
)
return False, msg
return True, None
# ///////////////////////////////////////////////////////////////////////////
# Base Test class
# ///////////////////////////////////////////////////////////////////////////
class TestBaseFairseqModelBase(unittest.TestCase):
"""
This class is used to facilitate writing unittest for any class derived from
`BaseFairseqModel`.
"""
@classmethod
def setUpClass(cls):
if cls is TestBaseFairseqModelBase:
raise unittest.SkipTest("Skipping test case in base")
super().setUpClass()
def setUpModel(self, model):
self.assertTrue(isinstance(model, BaseFairseqModel))
self.model = model
def setupInput(self):
pass
def setUp(self):
self.model = None
self.forward_input = None
pass
class TestFairseqEncoderDecoderModelBase(TestBaseFairseqModelBase):
"""
base code to test FairseqEncoderDecoderModel (formally known as
`FairseqModel`) must be derived from this base class
"""
@classmethod
def setUpClass(cls):
if cls is TestFairseqEncoderDecoderModelBase:
raise unittest.SkipTest("Skipping test case in base")
super().setUpClass()
def setUpModel(self, model_cls, extra_args_setters=None):
self.assertTrue(
issubclass(model_cls, (FairseqEncoderDecoderModel, FairseqModel)),
msg="This class only tests for FairseqModel subclasses",
)
task, parser = get_dummy_task_and_parser()
model_cls.add_args(parser)
args = parser.parse_args([])
if extra_args_setters is not None:
for args_setter in extra_args_setters:
args_setter(args)
model = model_cls.build_model(args, task)
self.model = model
def setUpInput(self, input=None):
self.forward_input = get_dummy_input() if input is None else input
def setUp(self):
super().setUp()
def test_forward(self):
if self.model and self.forward_input:
forward_output = self.model.forward(**self.forward_input)
# for FairseqEncoderDecoderModel, forward returns a tuple of two
# elements, the first one is a Torch.Tensor
succ, msg = check_decoder_output(forward_output)
if not succ:
self.assertTrue(succ, msg=msg)
self.forward_output = forward_output
def test_get_normalized_probs(self):
if self.model and self.forward_input:
forward_output = self.model.forward(**self.forward_input)
logprob = self.model.get_normalized_probs(forward_output, log_probs=True)
prob = self.model.get_normalized_probs(forward_output, log_probs=False)
# in order for different models/criterion to play with each other
# we need to know whether the logprob or prob output is batch_first
# or not. We assume an additional attribute will be attached to logprob
# or prob. If you find your code failed here, simply override
# FairseqModel.get_normalized_probs, see example at
# https://fburl.com/batch_first_example
self.assertTrue(hasattr(logprob, "batch_first"))
self.assertTrue(hasattr(prob, "batch_first"))
self.assertTrue(torch.is_tensor(logprob))
self.assertTrue(torch.is_tensor(prob))
class TestFairseqEncoderModelBase(TestBaseFairseqModelBase):
"""
base class to test FairseqEncoderModel
"""
@classmethod
def setUpClass(cls):
if cls is TestFairseqEncoderModelBase:
raise unittest.SkipTest("Skipping test case in base")
super().setUpClass()
def setUpModel(self, model_cls, extra_args_setters=None):
self.assertTrue(
issubclass(model_cls, FairseqEncoderModel),
msg="This class is only used for testing FairseqEncoderModel",
)
task, parser = get_dummy_task_and_parser()
model_cls.add_args(parser)
args = parser.parse_args([])
if extra_args_setters is not None:
for args_setter in extra_args_setters:
args_setter(args)
model = model_cls.build_model(args, task)
self.model = model
def setUpInput(self, input=None):
self.forward_input = get_dummy_input() if input is None else input
# get_dummy_input() is originally for s2s, here we delete extra dict
# items, so it can be used for EncoderModel / Encoder as well
self.forward_input.pop("prev_output_tokens", None)
def setUp(self):
super().setUp()
def test_forward(self):
if self.forward_input and self.model:
bsz = self.forward_input["src_tokens"].size(0)
forward_output = self.model.forward(**self.forward_input)
# we expect forward_output to be a dict with the following
# key/value pairs:
# - encoder_out: a Torch.Tensor
# - encoder_padding_mask: a binary Torch.Tensor
succ, msg = check_encoder_output(forward_output, batch_size=bsz)
if not succ:
self.assertTrue(succ, msg=msg)
self.forward_output = forward_output
def test_get_normalized_probs(self):
if self.model and self.forward_input:
forward_output = self.model.forward(**self.forward_input)
logprob = self.model.get_normalized_probs(forward_output, log_probs=True)
prob = self.model.get_normalized_probs(forward_output, log_probs=False)
# in order for different models/criterion to play with each other
# we need to know whether the logprob or prob output is batch_first
# or not. We assume an additional attribute will be attached to logprob
# or prob. If you find your code failed here, simply override
# FairseqModel.get_normalized_probs, see example at
# https://fburl.com/batch_first_example
self.assertTrue(hasattr(logprob, "batch_first"))
self.assertTrue(hasattr(prob, "batch_first"))
self.assertTrue(torch.is_tensor(logprob))
self.assertTrue(torch.is_tensor(prob))
class TestFairseqEncoderBase(unittest.TestCase):
"""
base class to test FairseqEncoder
"""
@classmethod
def setUpClass(cls):
if cls is TestFairseqEncoderBase:
raise unittest.SkipTest("Skipping test case in base")
super().setUpClass()
def setUpEncoder(self, encoder):
self.assertTrue(
isinstance(encoder, FairseqEncoder),
msg="This class is only used for test FairseqEncoder",
)
self.encoder = encoder
def setUpInput(self, input=None):
self.forward_input = get_dummy_input() if input is None else input
# get_dummy_input() is originally for s2s, here we delete extra dict
# items, so it can be used for EncoderModel / Encoder as well
self.forward_input.pop("prev_output_tokens", None)
def setUp(self):
self.encoder = None
self.forward_input = None
def test_forward(self):
if self.encoder and self.forward_input:
bsz = self.forward_input["src_tokens"].size(0)
forward_output = self.encoder.forward(**self.forward_input)
succ, msg = check_encoder_output(forward_output, batch_size=bsz)
if not succ:
self.assertTrue(succ, msg=msg)
self.forward_output = forward_output
class TestFairseqDecoderBase(unittest.TestCase):
"""
base class to test FairseqDecoder
"""
@classmethod
def setUpClass(cls):
if cls is TestFairseqDecoderBase:
raise unittest.SkipTest("Skipping test case in base")
super().setUpClass()
def setUpDecoder(self, decoder):
self.assertTrue(
isinstance(decoder, FairseqDecoder),
msg="This class is only used for test FairseqDecoder",
)
self.decoder = decoder
def setUpInput(self, input=None):
self.forward_input = get_dummy_encoder_output() if input is None else input
def setUpPrevOutputTokens(self, tokens=None):
if tokens is None:
self.encoder_input = get_dummy_input()
self.prev_output_tokens = self.encoder_input["prev_output_tokens"]
else:
self.prev_output_tokens = tokens
def setUp(self):
self.decoder = None
self.forward_input = None
self.prev_output_tokens = None
def test_forward(self):
if (
self.decoder is not None
and self.forward_input is not None
and self.prev_output_tokens is not None
):
forward_output = self.decoder.forward(
prev_output_tokens=self.prev_output_tokens,
encoder_out=self.forward_input,
)
succ, msg = check_decoder_output(forward_output)
if not succ:
self.assertTrue(succ, msg=msg)
self.forward_input = forward_output
class DummyEncoderModel(FairseqEncoderModel):
def __init__(self, encoder):
super().__init__(encoder)
@classmethod
def build_model(cls, args, task):
return cls(DummyEncoder())
def get_logits(self, net_output):
# Inverse of sigmoid to use with BinaryCrossEntropyWithLogitsCriterion as
# F.binary_cross_entropy_with_logits combines sigmoid and CE
return torch.log(
torch.div(net_output["encoder_out"], 1 - net_output["encoder_out"])
)
class DummyEncoder(FairseqEncoder):
def __init__(self):
super().__init__(None)
def forward(self, src_tokens, src_lengths):
mask, max_len = lengths_to_encoder_padding_mask(src_lengths)
return {"encoder_out": src_tokens, "encoder_padding_mask": mask}
class CrossEntropyCriterionTestBase(unittest.TestCase):
@classmethod
def setUpClass(cls):
if cls is CrossEntropyCriterionTestBase:
raise unittest.SkipTest("Skipping base class test case")
super().setUpClass()
def setUpArgs(self):
args = argparse.Namespace()
args.sentence_avg = False
args.threshold = 0.1 # to use with BinaryCrossEntropyWithLogitsCriterion
return args
def setUp(self):
args = self.setUpArgs()
self.model = DummyEncoderModel(encoder=DummyEncoder())
self.criterion = self.criterion_cls(args=args, task=DummyTask(args))
def get_src_tokens(self, correct_prediction, aggregate):
"""
correct_prediction: True if the net_output (src_tokens) should
predict the correct target
aggregate: True if the criterion expects net_output (src_tokens)
aggregated across time axis
"""
predicted_idx = 0 if correct_prediction else 1
if aggregate:
src_tokens = torch.zeros((2, 2), dtype=torch.float)
for b in range(2):
src_tokens[b][predicted_idx] = 1.0
else:
src_tokens = torch.zeros((2, 10, 2), dtype=torch.float)
for b in range(2):
for t in range(10):
src_tokens[b][t][predicted_idx] = 1.0
return src_tokens
def get_target(self, soft_target):
if soft_target:
target = torch.zeros((2, 2), dtype=torch.float)
for b in range(2):
target[b][0] = 1.0
else:
target = torch.zeros((2, 10), dtype=torch.long)
return target
def get_test_sample(self, correct, soft_target, aggregate):
src_tokens = self.get_src_tokens(correct, aggregate)
target = self.get_target(soft_target)
L = src_tokens.size(1)
return {
"net_input": {"src_tokens": src_tokens, "src_lengths": torch.tensor([L])},
"target": target,
"ntokens": src_tokens.size(0) * src_tokens.size(1),
}
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/tests/speech_recognition/asr_test_base.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import numpy as np
import torch
from examples.speech_recognition.data.collaters import Seq2SeqCollater
class TestSeq2SeqCollator(unittest.TestCase):
def test_collate(self):
eos_idx = 1
pad_idx = 0
collater = Seq2SeqCollater(
feature_index=0, label_index=1, pad_index=pad_idx, eos_index=eos_idx
)
# 2 frames in the first sample and 3 frames in the second one
frames1 = np.array([[7, 8], [9, 10]])
frames2 = np.array([[1, 2], [3, 4], [5, 6]])
target1 = np.array([4, 2, 3, eos_idx])
target2 = np.array([3, 2, eos_idx])
sample1 = {"id": 0, "data": [frames1, target1]}
sample2 = {"id": 1, "data": [frames2, target2]}
batch = collater.collate([sample1, sample2])
# collate sort inputs by frame's length before creating the batch
self.assertTensorEqual(batch["id"], torch.tensor([1, 0]))
self.assertEqual(batch["ntokens"], 7)
self.assertTensorEqual(
batch["net_input"]["src_tokens"],
torch.tensor(
[[[1, 2], [3, 4], [5, 6]], [[7, 8], [9, 10], [pad_idx, pad_idx]]]
),
)
self.assertTensorEqual(
batch["net_input"]["prev_output_tokens"],
torch.tensor([[eos_idx, 3, 2, pad_idx], [eos_idx, 4, 2, 3]]),
)
self.assertTensorEqual(batch["net_input"]["src_lengths"], torch.tensor([3, 2]))
self.assertTensorEqual(
batch["target"],
torch.tensor([[3, 2, eos_idx, pad_idx], [4, 2, 3, eos_idx]]),
)
self.assertEqual(batch["nsentences"], 2)
def assertTensorEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertEqual(t1.ne(t2).long().sum(), 0)
if __name__ == "__main__":
unittest.main()
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/tests/speech_recognition/test_collaters.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from examples.speech_recognition.criterions.cross_entropy_acc import CrossEntropyWithAccCriterion
from .asr_test_base import CrossEntropyCriterionTestBase
class CrossEntropyWithAccCriterionTest(CrossEntropyCriterionTestBase):
def setUp(self):
self.criterion_cls = CrossEntropyWithAccCriterion
super().setUp()
def test_cross_entropy_all_correct(self):
sample = self.get_test_sample(correct=True, soft_target=False, aggregate=False)
loss, sample_size, logging_output = self.criterion(
self.model, sample, "sum", log_probs=True
)
assert logging_output["correct"] == 20
assert logging_output["total"] == 20
assert logging_output["sample_size"] == 20
assert logging_output["ntokens"] == 20
def test_cross_entropy_all_wrong(self):
sample = self.get_test_sample(correct=False, soft_target=False, aggregate=False)
loss, sample_size, logging_output = self.criterion(
self.model, sample, "sum", log_probs=True
)
assert logging_output["correct"] == 0
assert logging_output["total"] == 20
assert logging_output["sample_size"] == 20
assert logging_output["ntokens"] == 20
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/tests/speech_recognition/test_cross_entropy.py |
#!/usr/bin/env python3
# import models/encoder/decoder to be tested
from examples.speech_recognition.models.vggtransformer import (
TransformerDecoder,
VGGTransformerEncoder,
VGGTransformerModel,
vggtransformer_1,
vggtransformer_2,
vggtransformer_base,
)
# import base test class
from .asr_test_base import (
DEFAULT_TEST_VOCAB_SIZE,
TestFairseqDecoderBase,
TestFairseqEncoderBase,
TestFairseqEncoderDecoderModelBase,
get_dummy_dictionary,
get_dummy_encoder_output,
get_dummy_input,
)
class VGGTransformerModelTest_mid(TestFairseqEncoderDecoderModelBase):
def setUp(self):
def override_config(args):
"""
vggtrasformer_1 use 14 layers of transformer,
for testing purpose, it is too expensive. For fast turn-around
test, reduce the number of layers to 3.
"""
args.transformer_enc_config = (
"((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 3"
)
super().setUp()
extra_args_setter = [vggtransformer_1, override_config]
self.setUpModel(VGGTransformerModel, extra_args_setter)
self.setUpInput(get_dummy_input(T=50, D=80, B=5, K=DEFAULT_TEST_VOCAB_SIZE))
class VGGTransformerModelTest_big(TestFairseqEncoderDecoderModelBase):
def setUp(self):
def override_config(args):
"""
vggtrasformer_2 use 16 layers of transformer,
for testing purpose, it is too expensive. For fast turn-around
test, reduce the number of layers to 3.
"""
args.transformer_enc_config = (
"((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 3"
)
super().setUp()
extra_args_setter = [vggtransformer_2, override_config]
self.setUpModel(VGGTransformerModel, extra_args_setter)
self.setUpInput(get_dummy_input(T=50, D=80, B=5, K=DEFAULT_TEST_VOCAB_SIZE))
class VGGTransformerModelTest_base(TestFairseqEncoderDecoderModelBase):
def setUp(self):
def override_config(args):
"""
vggtrasformer_base use 12 layers of transformer,
for testing purpose, it is too expensive. For fast turn-around
test, reduce the number of layers to 3.
"""
args.transformer_enc_config = (
"((512, 8, 2048, True, 0.15, 0.15, 0.15),) * 3"
)
super().setUp()
extra_args_setter = [vggtransformer_base, override_config]
self.setUpModel(VGGTransformerModel, extra_args_setter)
self.setUpInput(get_dummy_input(T=50, D=80, B=5, K=DEFAULT_TEST_VOCAB_SIZE))
class VGGTransformerEncoderTest(TestFairseqEncoderBase):
def setUp(self):
super().setUp()
self.setUpInput(get_dummy_input(T=50, D=80, B=5))
def test_forward(self):
print("1. test standard vggtransformer")
self.setUpEncoder(VGGTransformerEncoder(input_feat_per_channel=80))
super().test_forward()
print("2. test vggtransformer with limited right context")
self.setUpEncoder(
VGGTransformerEncoder(
input_feat_per_channel=80, transformer_context=(-1, 5)
)
)
super().test_forward()
print("3. test vggtransformer with limited left context")
self.setUpEncoder(
VGGTransformerEncoder(
input_feat_per_channel=80, transformer_context=(5, -1)
)
)
super().test_forward()
print("4. test vggtransformer with limited right context and sampling")
self.setUpEncoder(
VGGTransformerEncoder(
input_feat_per_channel=80,
transformer_context=(-1, 12),
transformer_sampling=(2, 2),
)
)
super().test_forward()
print("5. test vggtransformer with windowed context and sampling")
self.setUpEncoder(
VGGTransformerEncoder(
input_feat_per_channel=80,
transformer_context=(12, 12),
transformer_sampling=(2, 2),
)
)
class TransformerDecoderTest(TestFairseqDecoderBase):
def setUp(self):
super().setUp()
dict = get_dummy_dictionary(vocab_size=DEFAULT_TEST_VOCAB_SIZE)
decoder = TransformerDecoder(dict)
dummy_encoder_output = get_dummy_encoder_output(encoder_out_shape=(50, 5, 256))
self.setUpDecoder(decoder)
self.setUpInput(dummy_encoder_output)
self.setUpPrevOutputTokens()
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/tests/speech_recognition/test_vggtransformer.py |
EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/tests/speech_recognition/__init__.py |
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
A modified version of the legacy DistributedDataParallel module that uses c10d
communication primitives. This version is simpler than the latest PyTorch
version and is useful for debugging. Notably it does not overlap gradient
communication with the backward pass, which makes it slower but more robust
than the PyTorch version.
This version also supports the *no_sync* context manager, which allows faster
training with `--update-freq`.
"""
from contextlib import contextmanager
import copy
import torch
from torch import nn
from torch.autograd import Variable
from . import distributed_utils
class LegacyDistributedDataParallel(nn.Module):
"""Implements distributed data parallelism at the module level.
A simplified version of :class:`torch.nn.parallel.DistributedDataParallel`.
This version uses a c10d process group for communication and does not
broadcast buffers.
Args:
module (~torch.nn.Module): module to be parallelized
world_size (int): number of parallel workers
process_group (optional): the c10d process group to be used for
distributed data all-reduction. If None, the default process group
will be used.
buffer_size (int, optional): number of elements to buffer before
performing all-reduce (default: 256M).
"""
def __init__(self, module, world_size, process_group=None, buffer_size=2**28):
super().__init__()
self.module = module
self.world_size = world_size
self.process_group = process_group
# Never use a bigger buffer than the number of model params
self.buffer_size = min(buffer_size, sum(p.numel() for p in module.parameters()))
self.buffer = None
# Flag used by the NCCL backend to make sure we only reduce gradients
# one time in the execution engine
self.need_reduction = False
# We can also forcibly accumulate grads locally and only do the
# all-reduce at some later time
self.accumulate_grads = False
# For NCCL backend, since every single NCCL call is asynchoronous, we
# therefore directly enqueue all the NCCL reduction calls to the
# default CUDA stream without spawning up other reduction threads.
# This achieves the best performance.
self._register_grad_hook()
def __getstate__(self):
attrs = copy.copy(self.__dict__)
return attrs
def __setstate__(self, state):
super().__setstate__(state)
self._register_grad_hook()
@contextmanager
def no_sync(self):
"""A context manager to disable gradient synchronization."""
old_accumulate_grads = self.accumulate_grads
self.accumulate_grads = True
yield
self.accumulate_grads = old_accumulate_grads
def forward(self, *inputs, **kwargs):
return self.module(*inputs, **kwargs)
def _register_grad_hook(self):
"""
This function registers the callback all-reduction function for the
NCCL backend. All gradients will be all reduced in one single step.
The NCCL reduction will directly be enqueued into the default CUDA
stream. Therefore, no synchronization is needed.
"""
def all_reduce(params):
buffer = self.buffer
nonzero_buffer = False
if len(params) > 1:
offset = 0
for p in params:
sz = p.numel()
if p.grad is not None:
buffer[offset:offset+sz].copy_(p.grad.data.view(-1))
nonzero_buffer = True
else:
buffer[offset:offset+sz].zero_()
offset += sz
else:
# we only have a single grad to all-reduce
p = params[0]
if p.grad is not None:
buffer = p.grad.data
nonzero_buffer = True
elif p.numel() <= self.buffer.numel():
buffer = buffer[:p.numel()]
buffer.zero_()
else:
buffer = torch.zeros_like(p)
if nonzero_buffer:
buffer.div_(self.world_size)
distributed_utils.all_reduce(buffer, self.process_group)
# copy all-reduced grads back into their original place
offset = 0
for p in params:
sz = p.numel()
if p.grad is not None:
p.grad.data.copy_(buffer[offset:offset+sz].view_as(p))
else:
p.grad = buffer[offset:offset+sz].view_as(p).clone()
offset += sz
def reduction_fn():
# This function only needs to be called once
if not self.need_reduction or self.accumulate_grads:
return
self.need_reduction = False
if self.buffer is None:
self.buffer = next(self.module.parameters()).new(self.buffer_size)
# All-reduce the gradients in buckets
offset = 0
buffered_params = []
for param in self.module.parameters():
if not param.requires_grad:
continue
if param.grad is None:
param.grad = torch.zeros_like(param)
if param.grad.requires_grad:
raise RuntimeError("DistributedDataParallel only works "
"with gradients that don't require "
"grad")
sz = param.numel()
if sz > self.buffer.numel():
# all-reduce big params directly
all_reduce([param])
else:
if offset + sz > self.buffer.numel():
all_reduce(buffered_params)
offset = 0
buffered_params.clear()
buffered_params.append(param)
offset += sz
if len(buffered_params) > 0:
all_reduce(buffered_params)
# Now register the reduction hook on the parameters
for p in self.module.parameters():
def allreduce_hook(*unused):
self.need_reduction = True
Variable._execution_engine.queue_callback(reduction_fn)
if p.requires_grad:
p.register_hook(allreduce_hook)
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/fairseq/legacy_distributed_data_parallel.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import torch
import sys
from fairseq import utils
from fairseq.data.indexed_dataset import get_available_dataset_impl
def get_preprocessing_parser(default_task='translation'):
parser = get_parser('Preprocessing', default_task)
add_preprocess_args(parser)
return parser
def get_training_parser(default_task='translation'):
parser = get_parser('Trainer', default_task)
add_dataset_args(parser, train=True)
add_distributed_training_args(parser)
add_model_args(parser)
add_optimization_args(parser)
add_checkpoint_args(parser)
return parser
def get_generation_parser(interactive=False, default_task='translation'):
parser = get_parser('Generation', default_task)
add_dataset_args(parser, gen=True)
add_generation_args(parser)
if interactive:
add_interactive_args(parser)
return parser
def get_interactive_generation_parser(default_task='translation'):
return get_generation_parser(interactive=True, default_task=default_task)
def get_eval_lm_parser(default_task='language_modeling'):
parser = get_parser('Evaluate Language Model', default_task)
add_dataset_args(parser, gen=True)
add_eval_lm_args(parser)
return parser
def get_validation_parser(default_task=None):
parser = get_parser('Validation', default_task)
add_dataset_args(parser, train=True)
group = parser.add_argument_group('Evaluation')
add_common_eval_args(group)
return parser
def eval_str_list(x, type=float):
if x is None:
return None
if isinstance(x, str):
x = eval(x)
try:
return list(map(type, x))
except TypeError:
return [type(x)]
def eval_bool(x, default=False):
if x is None:
return default
try:
return bool(eval(x))
except TypeError:
return default
def parse_args_and_arch(parser, input_args=None, parse_known=False, suppress_defaults=False):
if suppress_defaults:
# Parse args without any default values. This requires us to parse
# twice, once to identify all the necessary task/model args, and a second
# time with all defaults set to None.
args = parse_args_and_arch(
parser,
input_args=input_args,
parse_known=parse_known,
suppress_defaults=False,
)
suppressed_parser = argparse.ArgumentParser(add_help=False, parents=[parser])
suppressed_parser.set_defaults(**{k: None for k, v in vars(args).items()})
args = suppressed_parser.parse_args(input_args)
return argparse.Namespace(**{
k: v
for k, v in vars(args).items()
if v is not None
})
from fairseq.models import ARCH_MODEL_REGISTRY, ARCH_CONFIG_REGISTRY
# The parser doesn't know about model/criterion/optimizer-specific args, so
# we parse twice. First we parse the model/criterion/optimizer, then we
# parse a second time after adding the *-specific arguments.
# If input_args is given, we will parse those args instead of sys.argv.
args, _ = parser.parse_known_args(input_args)
# Add model-specific args to parser.
if hasattr(args, 'arch'):
model_specific_group = parser.add_argument_group(
'Model-specific configuration',
# Only include attributes which are explicitly given as command-line
# arguments or which have default values.
argument_default=argparse.SUPPRESS,
)
ARCH_MODEL_REGISTRY[args.arch].add_args(model_specific_group)
# Add *-specific args to parser.
from fairseq.registry import REGISTRIES
for registry_name, REGISTRY in REGISTRIES.items():
choice = getattr(args, registry_name, None)
if choice is not None:
cls = REGISTRY['registry'][choice]
if hasattr(cls, 'add_args'):
cls.add_args(parser)
if hasattr(args, 'task'):
from fairseq.tasks import TASK_REGISTRY
TASK_REGISTRY[args.task].add_args(parser)
if getattr(args, 'use_bmuf', False):
# hack to support extra args for block distributed data parallelism
from fairseq.optim.bmuf import FairseqBMUF
FairseqBMUF.add_args(parser)
# Parse a second time.
if parse_known:
args, extra = parser.parse_known_args(input_args)
else:
args = parser.parse_args(input_args)
extra = None
# Post-process args.
if hasattr(args, 'max_sentences_valid') and args.max_sentences_valid is None:
args.max_sentences_valid = args.max_sentences
if hasattr(args, 'max_tokens_valid') and args.max_tokens_valid is None:
args.max_tokens_valid = args.max_tokens
if getattr(args, 'memory_efficient_fp16', False):
args.fp16 = True
# Apply architecture configuration.
if hasattr(args, 'arch'):
ARCH_CONFIG_REGISTRY[args.arch](args)
if parse_known:
return args, extra
else:
return args
def get_parser(desc, default_task='translation'):
# Before creating the true parser, we need to import optional user module
# in order to eagerly import custom tasks, optimizers, architectures, etc.
usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)
usr_parser.add_argument('--user-dir', default=None)
usr_args, _ = usr_parser.parse_known_args()
utils.import_user_module(usr_args)
parser = argparse.ArgumentParser(allow_abbrev=False)
# fmt: off
parser.add_argument('--no-progress-bar', action='store_true', help='disable progress bar')
parser.add_argument('--log-interval', type=int, default=1000, metavar='N',
help='log progress every N batches (when progress bar is disabled)')
parser.add_argument('--log-format', default=None, help='log format to use',
choices=['json', 'none', 'simple', 'tqdm'])
parser.add_argument('--tensorboard-logdir', metavar='DIR', default='',
help='path to save logs for tensorboard, should match --logdir '
'of running tensorboard (default: no tensorboard logging)')
parser.add_argument('--seed', default=1, type=int, metavar='N',
help='pseudo random number generator seed')
parser.add_argument('--cpu', action='store_true', help='use CPU instead of CUDA')
parser.add_argument('--fp16', action='store_true', help='use FP16')
parser.add_argument('--memory-efficient-fp16', action='store_true',
help='use a memory-efficient version of FP16 training; implies --fp16')
parser.add_argument('--fp16-init-scale', default=2 ** 7, type=int,
help='default FP16 loss scale')
parser.add_argument('--fp16-scale-window', type=int,
help='number of updates before increasing loss scale')
parser.add_argument('--fp16-scale-tolerance', default=0.0, type=float,
help='pct of updates that can overflow before decreasing the loss scale')
parser.add_argument('--min-loss-scale', default=1e-4, type=float, metavar='D',
help='minimum FP16 loss scale, after which training is stopped')
parser.add_argument('--threshold-loss-scale', type=float,
help='threshold FP16 loss scale from below')
parser.add_argument('--user-dir', default=None,
help='path to a python module containing custom extensions (tasks and/or architectures)')
parser.add_argument('--empty-cache-freq', default=0, type=int,
help='how often to clear the PyTorch CUDA cache (0 to disable)')
from fairseq.registry import REGISTRIES
for registry_name, REGISTRY in REGISTRIES.items():
parser.add_argument(
'--' + registry_name.replace('_', '-'),
default=REGISTRY['default'],
choices=REGISTRY['registry'].keys(),
)
# Task definitions can be found under fairseq/tasks/
from fairseq.tasks import TASK_REGISTRY
parser.add_argument('--task', metavar='TASK', default=default_task,
choices=TASK_REGISTRY.keys(),
help='task')
# fmt: on
return parser
def add_preprocess_args(parser):
group = parser.add_argument_group('Preprocessing')
# fmt: off
group.add_argument("-s", "--source-lang", default=None, metavar="SRC",
help="source language")
group.add_argument("-t", "--target-lang", default=None, metavar="TARGET",
help="target language")
group.add_argument("--trainpref", metavar="FP", default=None,
help="train file prefix")
group.add_argument("--validpref", metavar="FP", default=None,
help="comma separated, valid file prefixes")
group.add_argument("--testpref", metavar="FP", default=None,
help="comma separated, test file prefixes")
group.add_argument("--align-suffix", metavar="FP", default=None,
help="alignment file suffix")
group.add_argument("--destdir", metavar="DIR", default="data-bin",
help="destination dir")
group.add_argument("--thresholdtgt", metavar="N", default=0, type=int,
help="map words appearing less than threshold times to unknown")
group.add_argument("--thresholdsrc", metavar="N", default=0, type=int,
help="map words appearing less than threshold times to unknown")
group.add_argument("--tgtdict", metavar="FP",
help="reuse given target dictionary")
group.add_argument("--srcdict", metavar="FP",
help="reuse given source dictionary")
group.add_argument("--nwordstgt", metavar="N", default=-1, type=int,
help="number of target words to retain")
group.add_argument("--nwordssrc", metavar="N", default=-1, type=int,
help="number of source words to retain")
group.add_argument("--alignfile", metavar="ALIGN", default=None,
help="an alignment file (optional)")
parser.add_argument('--dataset-impl', metavar='FORMAT', default='mmap',
choices=get_available_dataset_impl(),
help='output dataset implementation')
group.add_argument("--joined-dictionary", action="store_true",
help="Generate joined dictionary")
group.add_argument("--only-source", action="store_true",
help="Only process the source language")
group.add_argument("--padding-factor", metavar="N", default=8, type=int,
help="Pad dictionary size to be multiple of N")
group.add_argument("--workers", metavar="N", default=1, type=int,
help="number of parallel workers")
# fmt: on
return parser
def add_dataset_args(parser, train=False, gen=False):
group = parser.add_argument_group('Dataset and data loading')
# fmt: off
group.add_argument('--num-workers', default=1, type=int, metavar='N',
help='how many subprocesses to use for data loading')
group.add_argument('--skip-invalid-size-inputs-valid-test', action='store_true',
help='ignore too long or too short lines in valid and test set')
group.add_argument('--max-tokens', type=int, metavar='N',
help='maximum number of tokens in a batch')
group.add_argument('--max-sentences', '--batch-size', type=int, metavar='N',
help='maximum number of sentences in a batch')
group.add_argument('--required-batch-size-multiple', default=8, type=int, metavar='N',
help='batch size will be a multiplier of this value')
parser.add_argument('--dataset-impl', metavar='FORMAT',
choices=get_available_dataset_impl(),
help='output dataset implementation')
if train:
group.add_argument('--train-subset', default='train', metavar='SPLIT',
choices=['train', 'valid', 'test'],
help='data subset to use for training (train, valid, test)')
group.add_argument('--valid-subset', default='valid', metavar='SPLIT',
help='comma separated list of data subsets to use for validation'
' (train, valid, valid1, test, test1)')
group.add_argument('--validate-interval', type=int, default=1, metavar='N',
help='validate every N epochs')
group.add_argument('--fixed-validation-seed', default=None, type=int, metavar='N',
help='specified random seed for validation')
group.add_argument('--disable-validation', action='store_true',
help='disable validation')
group.add_argument('--max-tokens-valid', type=int, metavar='N',
help='maximum number of tokens in a validation batch'
' (defaults to --max-tokens)')
group.add_argument('--max-sentences-valid', type=int, metavar='N',
help='maximum number of sentences in a validation batch'
' (defaults to --max-sentences)')
group.add_argument('--curriculum', default=0, type=int, metavar='N',
help='don\'t shuffle batches for first N epochs')
group.add_argument('--reload-dataset-per-epoch', action='store_true',
help='reload dataset per epoch')
if gen:
group.add_argument('--gen-subset', default='test', metavar='SPLIT',
help='data subset to generate (train, valid, test)')
group.add_argument('--num-shards', default=1, type=int, metavar='N',
help='shard generation over N shards')
group.add_argument('--shard-id', default=0, type=int, metavar='ID',
help='id of the shard to generate (id < num_shards)')
# fmt: on
return group
def add_distributed_training_args(parser):
group = parser.add_argument_group('Distributed training')
# fmt: off
group.add_argument('--distributed-world-size', type=int, metavar='N',
default=max(1, torch.cuda.device_count()),
help='total number of GPUs across all nodes (default: all visible GPUs)')
group.add_argument('--distributed-rank', default=0, type=int,
help='rank of the current worker')
group.add_argument('--distributed-backend', default='nccl', type=str,
help='distributed backend')
group.add_argument('--distributed-init-method', default=None, type=str,
help='typically tcp://hostname:port that will be used to '
'establish initial connetion')
group.add_argument('--distributed-port', default=-1, type=int,
help='port number (not required if using --distributed-init-method)')
group.add_argument('--device-id', '--local_rank', default=0, type=int,
help='which GPU to use (usually configured automatically)')
group.add_argument('--distributed-no-spawn', action='store_true',
help='do not spawn multiple processes even if multiple GPUs are visible')
group.add_argument('--ddp-backend', default='c10d', type=str,
choices=['c10d', 'no_c10d'],
help='DistributedDataParallel backend')
group.add_argument('--bucket-cap-mb', default=25, type=int, metavar='MB',
help='bucket size for reduction')
group.add_argument('--fix-batches-to-gpus', action='store_true',
help='don\'t shuffle batches between GPUs; this reduces overall '
'randomness and may affect precision but avoids the cost of '
're-reading the data')
group.add_argument('--find-unused-parameters', default=False, action='store_true',
help='disable unused parameter detection (not applicable to '
'no_c10d ddp-backend')
group.add_argument('--fast-stat-sync', default=False, action='store_true',
help='Enable fast sync of stats between nodes, this hardcodes to '
'sync only some default stats from logging_output.')
# fmt: on
return group
def add_optimization_args(parser):
group = parser.add_argument_group('Optimization')
# fmt: off
group.add_argument('--max-epoch', '--me', default=0, type=int, metavar='N',
help='force stop training at specified epoch')
group.add_argument('--max-update', '--mu', default=0, type=int, metavar='N',
help='force stop training at specified update')
group.add_argument('--clip-norm', default=25, type=float, metavar='NORM',
help='clip threshold of gradients')
group.add_argument('--sentence-avg', action='store_true',
help='normalize gradients by the number of sentences in a batch'
' (default is to normalize by number of tokens)')
group.add_argument('--update-freq', default='1', metavar='N1,N2,...,N_K',
type=lambda uf: eval_str_list(uf, type=int),
help='update parameters every N_i batches, when in epoch i')
group.add_argument('--lr', '--learning-rate', default='0.25', type=eval_str_list,
metavar='LR_1,LR_2,...,LR_N',
help='learning rate for the first N epochs; all epochs >N using LR_N'
' (note: this may be interpreted differently depending on --lr-scheduler)')
group.add_argument('--min-lr', default=-1, type=float, metavar='LR',
help='stop training when the learning rate reaches this minimum')
group.add_argument('--use-bmuf', default=False, action='store_true',
help='specify global optimizer for syncing models on different GPUs/shards')
# fmt: on
return group
def add_checkpoint_args(parser):
group = parser.add_argument_group('Checkpointing')
# fmt: off
group.add_argument('--save-dir', metavar='DIR', default='checkpoints',
help='path to save checkpoints')
group.add_argument('--restore-file', default='checkpoint_last.pt',
help='filename from which to load checkpoint '
'(default: <save-dir>/checkpoint_last.pt')
group.add_argument('--reset-dataloader', action='store_true',
help='if set, does not reload dataloader state from the checkpoint')
group.add_argument('--reset-lr-scheduler', action='store_true',
help='if set, does not load lr scheduler state from the checkpoint')
group.add_argument('--reset-meters', action='store_true',
help='if set, does not load meters from the checkpoint')
group.add_argument('--reset-optimizer', action='store_true',
help='if set, does not load optimizer state from the checkpoint')
group.add_argument('--optimizer-overrides', default="{}", type=str, metavar='DICT',
help='a dictionary used to override optimizer args when loading a checkpoint')
group.add_argument('--save-interval', type=int, default=1, metavar='N',
help='save a checkpoint every N epochs')
group.add_argument('--save-interval-updates', type=int, default=0, metavar='N',
help='save a checkpoint (and validate) every N updates')
group.add_argument('--keep-interval-updates', type=int, default=-1, metavar='N',
help='keep the last N checkpoints saved with --save-interval-updates')
group.add_argument('--keep-last-epochs', type=int, default=-1, metavar='N',
help='keep last N epoch checkpoints')
group.add_argument('--no-save', action='store_true',
help='don\'t save models or checkpoints')
group.add_argument('--no-epoch-checkpoints', action='store_true',
help='only store last and best checkpoints')
group.add_argument('--no-last-checkpoints', action='store_true',
help='don\'t store last checkpoints')
group.add_argument('--no-save-optimizer-state', action='store_true',
help='don\'t save optimizer-state as part of checkpoint')
group.add_argument('--best-checkpoint-metric', type=str, default='loss',
help='metric to use for saving "best" checkpoints')
group.add_argument('--maximize-best-checkpoint-metric', action='store_true',
help='select the largest metric value for saving "best" checkpoints')
# fmt: on
return group
def add_common_eval_args(group):
# fmt: off
group.add_argument('--path', metavar='FILE',
help='path(s) to model file(s), colon separated')
group.add_argument('--remove-bpe', nargs='?', const='@@ ', default=None,
help='remove BPE tokens before scoring (can be set to sentencepiece)')
group.add_argument('--quiet', action='store_true',
help='only print final scores')
group.add_argument('--model-overrides', default="{}", type=str, metavar='DICT',
help='a dictionary used to override model args at generation '
'that were used during model training')
group.add_argument('--results-path', metavar='RESDIR', type=str, default=None,
help='path to save eval results (optional)"')
# fmt: on
def add_eval_lm_args(parser):
group = parser.add_argument_group('LM Evaluation')
add_common_eval_args(group)
# fmt: off
group.add_argument('--output-word-probs', action='store_true',
help='if set, outputs words and their predicted log probabilities to standard output')
group.add_argument('--output-word-stats', action='store_true',
help='if set, outputs word statistics such as word count, average probability, etc')
group.add_argument('--context-window', default=0, type=int, metavar='N',
help='ensures that every evaluated token has access to a context of at least this size,'
' if possible')
group.add_argument('--softmax-batch', default=sys.maxsize, type=int, metavar='N',
help='if BxT is more than this, will batch the softmax over vocab to this amount of tokens'
' in order to fit into GPU memory')
# fmt: on
def add_generation_args(parser):
group = parser.add_argument_group('Generation')
add_common_eval_args(group)
# fmt: off
group.add_argument('--beam', default=5, type=int, metavar='N',
help='beam size')
group.add_argument('--nbest', default=1, type=int, metavar='N',
help='number of hypotheses to output')
group.add_argument('--max-len-a', default=0, type=float, metavar='N',
help=('generate sequences of maximum length ax + b, '
'where x is the source length'))
group.add_argument('--max-len-b', default=200, type=int, metavar='N',
help=('generate sequences of maximum length ax + b, '
'where x is the source length'))
group.add_argument('--min-len', default=1, type=float, metavar='N',
help=('minimum generation length'))
group.add_argument('--match-source-len', default=False, action='store_true',
help=('generations should match the source length'))
group.add_argument('--no-early-stop', action='store_true',
help='deprecated')
group.add_argument('--unnormalized', action='store_true',
help='compare unnormalized hypothesis scores')
group.add_argument('--no-beamable-mm', action='store_true',
help='don\'t use BeamableMM in attention layers')
group.add_argument('--lenpen', default=1, type=float,
help='length penalty: <1.0 favors shorter, >1.0 favors longer sentences')
group.add_argument('--unkpen', default=0, type=float,
help='unknown word penalty: <0 produces more unks, >0 produces fewer')
group.add_argument('--replace-unk', nargs='?', const=True, default=None,
help='perform unknown replacement (optionally with alignment dictionary)')
group.add_argument('--sacrebleu', action='store_true',
help='score with sacrebleu')
group.add_argument('--score-reference', action='store_true',
help='just score the reference translation')
group.add_argument('--prefix-size', default=0, type=int, metavar='PS',
help='initialize generation by target prefix of given length')
group.add_argument('--no-repeat-ngram-size', default=0, type=int, metavar='N',
help='ngram blocking such that this size ngram cannot be repeated in the generation')
group.add_argument('--sampling', action='store_true',
help='sample hypotheses instead of using beam search')
group.add_argument('--sampling-topk', default=-1, type=int, metavar='PS',
help='sample from top K likely next words instead of all words')
group.add_argument('--sampling-topp', default=-1.0, type=float, metavar='PS',
help='sample from the smallest set whose cumulative probability mass exceeds p for next words')
group.add_argument('--temperature', default=1., type=float, metavar='N',
help='temperature for generation')
group.add_argument('--diverse-beam-groups', default=-1, type=int, metavar='N',
help='number of groups for Diverse Beam Search')
group.add_argument('--diverse-beam-strength', default=0.5, type=float, metavar='N',
help='strength of diversity penalty for Diverse Beam Search')
group.add_argument('--print-alignment', action='store_true',
help='if set, uses attention feedback to compute and print alignment to source tokens')
group.add_argument('--print-step', action='store_true')
# arguments for iterative refinement generator
group.add_argument('--iter-decode-eos-penalty', default=0.0, type=float, metavar='N',
help='if > 0.0, it penalized early-stopping in decoding.')
group.add_argument('--iter-decode-max-iter', default=10, type=int, metavar='N',
help='maximum iterations for iterative refinement.')
group.add_argument('--iter-decode-force-max-iter', action='store_true',
help='if set, run exact the maximum number of iterations without early stop')
group.add_argument('--retain-iter-history', action='store_true',
help='if set, decoding returns the whole history of iterative refinement')
# special decoding format for advanced decoding.
group.add_argument('--decoding-format', default=None, type=str, choices=['unigram', 'ensemble', 'vote', 'dp', 'bs'])
# fmt: on
return group
def add_interactive_args(parser):
group = parser.add_argument_group('Interactive')
# fmt: off
group.add_argument('--buffer-size', default=0, type=int, metavar='N',
help='read this many sentences into a buffer before processing them')
group.add_argument('--input', default='-', type=str, metavar='FILE',
help='file to read from; use - for stdin')
# fmt: on
def add_model_args(parser):
group = parser.add_argument_group('Model configuration')
# fmt: off
# Model definitions can be found under fairseq/models/
#
# The model architecture can be specified in several ways.
# In increasing order of priority:
# 1) model defaults (lowest priority)
# 2) --arch argument
# 3) --encoder/decoder-* arguments (highest priority)
from fairseq.models import ARCH_MODEL_REGISTRY
group.add_argument('--arch', '-a', default='fconv', metavar='ARCH', required=True,
choices=ARCH_MODEL_REGISTRY.keys(),
help='Model Architecture')
# fmt: on
return group
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/fairseq/options.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import namedtuple
import torch
from fairseq import utils
DecoderOut = namedtuple('IterativeRefinementDecoderOut', [
'output_tokens',
'output_scores',
'attn',
'step',
'max_step',
'history'
])
class IterativeRefinementGenerator(object):
def __init__(
self,
tgt_dict,
models=None,
eos_penalty=0.0,
max_iter=10,
max_ratio=2,
decoding_format=None,
retain_dropout=False,
adaptive=True,
retain_history=False,
):
"""
Generates translations based on iterative refinement.
Args:
tgt_dict: target dictionary
eos_penalty: if > 0.0, it penalized early-stopping in decoding
max_iter: maximum number of refinement iterations
max_ratio: generate sequences of maximum length ax, where x is the source length
decoding_format: decoding mode in {'unigram', 'ensemble', 'vote', 'dp', 'bs'}
retain_dropout: retaining dropout in the inference
adaptive: decoding with early stop
"""
self.bos = tgt_dict.bos()
self.pad = tgt_dict.pad()
self.unk = tgt_dict.unk()
self.eos = tgt_dict.eos()
self.vocab_size = len(tgt_dict)
self.eos_penalty = eos_penalty
self.max_iter = max_iter
self.max_ratio = max_ratio
self.decoding_format = decoding_format
self.retain_dropout = retain_dropout
self.retain_history = retain_history
self.adaptive = adaptive
self.models = models
def generate_batched_itr(
self,
data_itr,
maxlen_a=None,
maxlen_b=None,
cuda=False,
timer=None,
prefix_size=0,
):
"""Iterate over a batched dataset and yield individual translations.
Args:
maxlen_a/b: generate sequences of maximum length ax + b,
where x is the source sentence length.
cuda: use GPU for generation
timer: StopwatchMeter for timing generations.
"""
for sample in data_itr:
if "net_input" not in sample:
continue
if timer is not None:
timer.start()
with torch.no_grad():
hypos = self.generate(
self.models,
sample,
prefix_tokens=sample["target"][:, :prefix_size]
if prefix_size > 0
else None,
)
if timer is not None:
timer.stop(sample["ntokens"])
for i, id in enumerate(sample["id"]):
# remove padding
src = utils.strip_pad(sample["net_input"]["src_tokens"][i, :], self.pad)
ref = utils.strip_pad(sample["target"][i, :], self.pad)
yield id, src, ref, hypos[i]
@torch.no_grad()
def generate(self, models, sample, prefix_tokens=None):
from fairseq.models.levenshtein_transformer import LevenshteinTransformerModel
from fairseq.models.nonautoregressive_ensembles import EnsembleLevT
if len(models) == 1:
# Keep this for other NAT models for which we have yet to implement ensemble wrappers. Later delete this.
model = models[0]
elif isinstance(models[0], LevenshteinTransformerModel):
model = EnsembleLevT(models)
else:
raise NotImplementedError
if not self.retain_dropout:
model.eval()
# TODO: better encoder inputs?
src_tokens = sample["net_input"]["src_tokens"]
src_lengths = sample["net_input"]["src_lengths"]
bsz, src_len = src_tokens.size()
sent_idxs = torch.arange(bsz)
# encoding
encoder_out = model.forward_encoder([src_tokens, src_lengths])
# initialize buffers (very model specific, with length prediction or not)
prev_decoder_out = model.initialize_output_tokens(encoder_out, src_tokens)
prev_output_tokens = prev_decoder_out.output_tokens.clone()
if self.retain_history:
prev_decoder_out = prev_decoder_out._replace(history=[prev_output_tokens])
finalized = [[] for _ in range(bsz)]
def is_a_loop(x, y, s, a):
b, l_x, l_y = x.size(0), x.size(1), y.size(1)
if l_x > l_y:
y = torch.cat([y, x.new_zeros(b, l_x - l_y).fill_(self.pad)], 1)
s = torch.cat([s, s.new_zeros(b, l_x - l_y)], 1)
if a is not None:
a = torch.cat([a, a.new_zeros(b, l_x - l_y, a.size(2))], 1)
elif l_x < l_y:
x = torch.cat([x, y.new_zeros(b, l_y - l_x).fill_(self.pad)], 1)
return (x == y).all(1), y, s, a
def finalized_hypos(step, prev_out_token, prev_out_score, prev_out_attn):
cutoff = prev_out_token.ne(self.pad)
tokens = prev_out_token[cutoff]
if prev_out_score is None:
scores, score = None, None
else:
scores = prev_out_score[cutoff]
score = scores.mean()
if prev_out_attn is None:
hypo_attn, alignment = None, None
else:
hypo_attn = prev_out_attn[cutoff]
alignment = hypo_attn.max(dim=1)[1]
return {
"steps": step,
"tokens": tokens,
"positional_scores": scores,
"score": score,
"hypo_attn": hypo_attn,
"alignment": alignment,
}
for step in range(self.max_iter + 1):
decoder_options = {
"eos_penalty": self.eos_penalty,
"max_ratio": self.max_ratio,
"decoding_format": self.decoding_format,
}
prev_decoder_out = prev_decoder_out._replace(
step=step,
max_step=self.max_iter + 1,
)
decoder_out = model.forward_decoder(
prev_decoder_out, encoder_out, **decoder_options
)
if self.adaptive:
# terminate if there is a loop
terminated, out_tokens, out_scores, out_attn = is_a_loop(
prev_output_tokens, decoder_out.output_tokens, decoder_out.output_scores, decoder_out.attn
)
decoder_out = decoder_out._replace(
output_tokens=out_tokens,
output_scores=out_scores,
attn=out_attn,
)
else:
terminated = decoder_out.output_tokens.new_zeros(decoder_out.output_tokens.size(0)).bool()
if step == self.max_iter: # reach last iteration, terminate
terminated.fill_(1)
# collect finalized sentences
finalized_idxs = sent_idxs[terminated]
finalized_tokens = decoder_out.output_tokens[terminated]
finalized_scores = decoder_out.output_scores[terminated]
finalized_attn = (
None if decoder_out.attn is None else decoder_out.attn[terminated]
)
if self.retain_history:
finalized_history_tokens = [h[terminated] for h in decoder_out.history]
for i in range(finalized_idxs.size(0)):
finalized[finalized_idxs[i]] = [
finalized_hypos(
step,
finalized_tokens[i],
finalized_scores[i],
None if finalized_attn is None else finalized_attn[i],
)
]
if self.retain_history:
finalized[finalized_idxs[i]][0]['history'] = []
for j in range(len(finalized_history_tokens)):
finalized[finalized_idxs[i]][0]['history'].append(
finalized_hypos(
step,
finalized_history_tokens[j][i],
None, None
)
)
# check if all terminated
if terminated.sum() == terminated.size(0):
break
# for next step
not_terminated = ~terminated
prev_decoder_out = decoder_out._replace(
output_tokens=decoder_out.output_tokens[not_terminated],
output_scores=decoder_out.output_scores[not_terminated],
attn=decoder_out.attn[not_terminated] if decoder_out.attn is not None else None,
history=[h[not_terminated] for h in decoder_out.history] if decoder_out.history is not None else None
)
encoder_out = model.encoder.reorder_encoder_out(encoder_out, not_terminated.nonzero().squeeze())
sent_idxs = sent_idxs[not_terminated]
prev_output_tokens = prev_decoder_out.output_tokens.clone()
return finalized
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/fairseq/iterative_refinement_generator.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import time
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
if self.count <= 0: self.count = 1
self.avg = self.sum / self.count
class TimeMeter(object):
"""Computes the average occurrence of some event per second"""
def __init__(self, init=0):
self.reset(init)
def reset(self, init=0):
self.init = init
self.start = time.time()
self.n = 0
def update(self, val=1):
self.n += val
@property
def avg(self):
return self.n / self.elapsed_time
@property
def elapsed_time(self):
return self.init + (time.time() - self.start)
class StopwatchMeter(object):
"""Computes the sum/avg duration of some event in seconds"""
def __init__(self):
self.reset()
def start(self):
self.start_time = time.time()
def stop(self, n=1):
if self.start_time is not None:
delta = time.time() - self.start_time
self.sum += delta
self.n += n
self.start_time = None
def reset(self):
self.sum = 0
self.n = 0
self.start_time = None
@property
def avg(self):
return self.sum / self.n
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/fairseq/meters.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
REGISTRIES = {}
def setup_registry(
registry_name: str,
base_class=None,
default=None,
):
assert registry_name.startswith('--')
registry_name = registry_name[2:].replace('-', '_')
REGISTRY = {}
REGISTRY_CLASS_NAMES = set()
# maintain a registry of all registries
if registry_name in REGISTRIES:
return # registry already exists
REGISTRIES[registry_name] = {
'registry': REGISTRY,
'default': default,
}
def build_x(args, *extra_args, **extra_kwargs):
choice = getattr(args, registry_name, None)
if choice is None:
return None
cls = REGISTRY[choice]
if hasattr(cls, 'build_' + registry_name):
builder = getattr(cls, 'build_' + registry_name)
else:
builder = cls
set_defaults(args, cls)
return builder(args, *extra_args, **extra_kwargs)
def register_x(name):
def register_x_cls(cls):
if name in REGISTRY:
raise ValueError('Cannot register duplicate {} ({})'.format(registry_name, name))
if cls.__name__ in REGISTRY_CLASS_NAMES:
raise ValueError(
'Cannot register {} with duplicate class name ({})'.format(
registry_name, cls.__name__,
)
)
if base_class is not None and not issubclass(cls, base_class):
raise ValueError('{} must extend {}'.format(cls.__name__, base_class.__name__))
REGISTRY[name] = cls
REGISTRY_CLASS_NAMES.add(cls.__name__)
return cls
return register_x_cls
return build_x, register_x, REGISTRY
def set_defaults(args, cls):
"""Helper to set default arguments based on *add_args*."""
if not hasattr(cls, 'add_args'):
return
parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS, allow_abbrev=False)
cls.add_args(parser)
# copied from argparse.py:
defaults = argparse.Namespace()
for action in parser._actions:
if action.dest is not argparse.SUPPRESS:
if not hasattr(defaults, action.dest):
if action.default is not argparse.SUPPRESS:
setattr(defaults, action.dest, action.default)
for key, default_value in vars(defaults).items():
if not hasattr(args, key):
setattr(args, key, default_value)
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/fairseq/registry.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import ctypes
import math
import torch
try:
from fairseq import libbleu
except ImportError as e:
import sys
sys.stderr.write('ERROR: missing libbleu.so. run `pip install --editable .`\n')
raise e
C = ctypes.cdll.LoadLibrary(libbleu.__file__)
class BleuStat(ctypes.Structure):
_fields_ = [
('reflen', ctypes.c_size_t),
('predlen', ctypes.c_size_t),
('match1', ctypes.c_size_t),
('count1', ctypes.c_size_t),
('match2', ctypes.c_size_t),
('count2', ctypes.c_size_t),
('match3', ctypes.c_size_t),
('count3', ctypes.c_size_t),
('match4', ctypes.c_size_t),
('count4', ctypes.c_size_t),
]
class SacrebleuScorer(object):
def __init__(self):
import sacrebleu
self.sacrebleu = sacrebleu
self.reset()
def reset(self, one_init=False):
if one_init:
raise NotImplementedError
self.ref = []
self.sys = []
def add_string(self, ref, pred):
self.ref.append(ref)
self.sys.append(pred)
def score(self, order=4):
return self.result_string(order).score
def result_string(self, order=4):
if order != 4:
raise NotImplementedError
return self.sacrebleu.corpus_bleu(self.sys, [self.ref])
class Scorer(object):
def __init__(self, pad, eos, unk):
self.stat = BleuStat()
self.pad = pad
self.eos = eos
self.unk = unk
self.reset()
def reset(self, one_init=False):
if one_init:
C.bleu_one_init(ctypes.byref(self.stat))
else:
C.bleu_zero_init(ctypes.byref(self.stat))
def add(self, ref, pred):
if not isinstance(ref, torch.IntTensor):
raise TypeError('ref must be a torch.IntTensor (got {})'
.format(type(ref)))
if not isinstance(pred, torch.IntTensor):
raise TypeError('pred must be a torch.IntTensor(got {})'
.format(type(pred)))
# don't match unknown words
rref = ref.clone()
assert not rref.lt(0).any()
rref[rref.eq(self.unk)] = -999
rref = rref.contiguous().view(-1)
pred = pred.contiguous().view(-1)
C.bleu_add(
ctypes.byref(self.stat),
ctypes.c_size_t(rref.size(0)),
ctypes.c_void_p(rref.data_ptr()),
ctypes.c_size_t(pred.size(0)),
ctypes.c_void_p(pred.data_ptr()),
ctypes.c_int(self.pad),
ctypes.c_int(self.eos))
def score(self, order=4):
psum = sum(math.log(p) if p > 0 else float('-Inf')
for p in self.precision()[:order])
return self.brevity() * math.exp(psum / order) * 100
def precision(self):
def ratio(a, b):
return a / b if b > 0 else 0
return [
ratio(self.stat.match1, self.stat.count1),
ratio(self.stat.match2, self.stat.count2),
ratio(self.stat.match3, self.stat.count3),
ratio(self.stat.match4, self.stat.count4),
]
def brevity(self):
r = self.stat.reflen / self.stat.predlen
return min(1, math.exp(1 - r))
def result_string(self, order=4):
assert order <= 4, "BLEU scores for order > 4 aren't supported"
fmt = 'BLEU{} = {:2.2f}, {:2.1f}'
for _ in range(1, order):
fmt += '/{:2.1f}'
fmt += ' (BP={:.3f}, ratio={:.3f}, syslen={}, reflen={})'
bleup = [p * 100 for p in self.precision()[:order]]
return fmt.format(order, self.score(order=order), *bleup,
self.brevity(), self.stat.predlen/self.stat.reflen,
self.stat.predlen, self.stat.reflen)
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/fairseq/bleu.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
__all__ = ['pdb']
__version__ = '0.9.0'
import fairseq.criterions # noqa
import fairseq.models # noqa
import fairseq.modules # noqa
import fairseq.optim # noqa
import fairseq.optim.lr_scheduler # noqa
import fairseq.pdb # noqa
import fairseq.tasks # noqa
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/fairseq/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
from fairseq import search, utils
from fairseq.data import data_utils
from fairseq.models import FairseqIncrementalDecoder
class SequenceGenerator(object):
def __init__(
self,
tgt_dict,
beam_size=1,
max_len_a=0,
max_len_b=200,
min_len=1,
normalize_scores=True,
len_penalty=1.,
unk_penalty=0.,
retain_dropout=False,
sampling=False,
sampling_topk=-1,
sampling_topp=-1.0,
temperature=1.,
diverse_beam_groups=-1,
diverse_beam_strength=0.5,
match_source_len=False,
no_repeat_ngram_size=0,
):
"""Generates translations of a given source sentence.
Args:
tgt_dict (~fairseq.data.Dictionary): target dictionary
beam_size (int, optional): beam width (default: 1)
max_len_a/b (int, optional): generate sequences of maximum length
ax + b, where x is the source length
min_len (int, optional): the minimum length of the generated output
(not including end-of-sentence)
normalize_scores (bool, optional): normalize scores by the length
of the output (default: True)
len_penalty (float, optional): length penalty, where <1.0 favors
shorter, >1.0 favors longer sentences (default: 1.0)
unk_penalty (float, optional): unknown word penalty, where <0
produces more unks, >0 produces fewer (default: 0.0)
retain_dropout (bool, optional): use dropout when generating
(default: False)
sampling (bool, optional): sample outputs instead of beam search
(default: False)
sampling_topk (int, optional): only sample among the top-k choices
at each step (default: -1)
sampling_topp (float, optional): only sample among the smallest set
of words whose cumulative probability mass exceeds p
at each step (default: -1.0)
temperature (float, optional): temperature, where values
>1.0 produce more uniform samples and values <1.0 produce
sharper samples (default: 1.0)
diverse_beam_groups/strength (float, optional): parameters for
Diverse Beam Search sampling
match_source_len (bool, optional): outputs should match the source
length (default: False)
"""
self.pad = tgt_dict.pad()
self.unk = tgt_dict.unk()
self.eos = tgt_dict.eos()
self.vocab_size = len(tgt_dict)
self.beam_size = beam_size
# the max beam size is the dictionary size - 1, since we never select pad
self.beam_size = min(beam_size, self.vocab_size - 1)
self.max_len_a = max_len_a
self.max_len_b = max_len_b
self.min_len = min_len
self.normalize_scores = normalize_scores
self.len_penalty = len_penalty
self.unk_penalty = unk_penalty
self.retain_dropout = retain_dropout
self.temperature = temperature
self.match_source_len = match_source_len
self.no_repeat_ngram_size = no_repeat_ngram_size
assert sampling_topk < 0 or sampling, '--sampling-topk requires --sampling'
assert sampling_topp < 0 or sampling, '--sampling-topp requires --sampling'
assert temperature > 0, '--temperature must be greater than 0'
if sampling:
self.search = search.Sampling(tgt_dict, sampling_topk, sampling_topp)
elif diverse_beam_groups > 0:
self.search = search.DiverseBeamSearch(tgt_dict, diverse_beam_groups, diverse_beam_strength)
elif match_source_len:
self.search = search.LengthConstrainedBeamSearch(
tgt_dict, min_len_a=1, min_len_b=0, max_len_a=1, max_len_b=0,
)
else:
self.search = search.BeamSearch(tgt_dict)
@torch.no_grad()
def generate(self, models, sample, **kwargs):
"""Generate a batch of translations.
Args:
models (List[~fairseq.models.FairseqModel]): ensemble of models
sample (dict): batch
prefix_tokens (torch.LongTensor, optional): force decoder to begin
with these tokens
bos_token (int, optional): beginning of sentence token
(default: self.eos)
"""
model = EnsembleModel(models)
return self._generate(model, sample, **kwargs)
@torch.no_grad()
def _generate(
self,
model,
sample,
prefix_tokens=None,
bos_token=None,
**kwargs
):
if not self.retain_dropout:
model.eval()
# model.forward normally channels prev_output_tokens into the decoder
# separately, but SequenceGenerator directly calls model.encoder
encoder_input = {
k: v for k, v in sample['net_input'].items()
if k != 'prev_output_tokens'
}
src_tokens = encoder_input['src_tokens']
src_lengths = (src_tokens.ne(self.eos) & src_tokens.ne(self.pad)).long().sum(dim=1)
input_size = src_tokens.size()
# batch dimension goes first followed by source lengths
bsz = input_size[0]
src_len = input_size[1]
beam_size = self.beam_size
if self.match_source_len:
max_len = src_lengths.max().item()
else:
max_len = min(
int(self.max_len_a * src_len + self.max_len_b),
# exclude the EOS marker
model.max_decoder_positions() - 1,
)
# compute the encoder output for each beam
encoder_outs = model.forward_encoder(encoder_input)
new_order = torch.arange(bsz).view(-1, 1).repeat(1, beam_size).view(-1)
new_order = new_order.to(src_tokens.device).long()
encoder_outs = model.reorder_encoder_out(encoder_outs, new_order)
# initialize buffers
scores = src_tokens.new(bsz * beam_size, max_len + 1).float().fill_(0)
scores_buf = scores.clone()
tokens = src_tokens.new(bsz * beam_size, max_len + 2).long().fill_(self.pad)
tokens_buf = tokens.clone()
tokens[:, 0] = self.eos if bos_token is None else bos_token
attn, attn_buf = None, None
# The blacklist indicates candidates that should be ignored.
# For example, suppose we're sampling and have already finalized 2/5
# samples. Then the blacklist would mark 2 positions as being ignored,
# so that we only finalize the remaining 3 samples.
blacklist = src_tokens.new_zeros(bsz, beam_size).eq(-1) # forward and backward-compatible False mask
# list of completed sentences
finalized = [[] for i in range(bsz)]
finished = [False for i in range(bsz)]
num_remaining_sent = bsz
# number of candidate hypos per step
cand_size = 2 * beam_size # 2 x beam size in case half are EOS
# offset arrays for converting between different indexing schemes
bbsz_offsets = (torch.arange(0, bsz) * beam_size).unsqueeze(1).type_as(tokens)
cand_offsets = torch.arange(0, cand_size).type_as(tokens)
# helper function for allocating buffers on the fly
buffers = {}
def buffer(name, type_of=tokens): # noqa
if name not in buffers:
buffers[name] = type_of.new()
return buffers[name]
def is_finished(sent, step, unfin_idx):
"""
Check whether we've finished generation for a given sentence, by
comparing the worst score among finalized hypotheses to the best
possible score among unfinalized hypotheses.
"""
assert len(finalized[sent]) <= beam_size
if len(finalized[sent]) == beam_size or step == max_len:
return True
return False
def finalize_hypos(step, bbsz_idx, eos_scores):
"""
Finalize the given hypotheses at this step, while keeping the total
number of finalized hypotheses per sentence <= beam_size.
Note: the input must be in the desired finalization order, so that
hypotheses that appear earlier in the input are preferred to those
that appear later.
Args:
step: current time step
bbsz_idx: A vector of indices in the range [0, bsz*beam_size),
indicating which hypotheses to finalize
eos_scores: A vector of the same size as bbsz_idx containing
scores for each hypothesis
"""
assert bbsz_idx.numel() == eos_scores.numel()
# clone relevant token and attention tensors
tokens_clone = tokens.index_select(0, bbsz_idx)
tokens_clone = tokens_clone[:, 1:step + 2] # skip the first index, which is EOS
assert not tokens_clone.eq(self.eos).any()
tokens_clone[:, step] = self.eos
attn_clone = attn.index_select(0, bbsz_idx)[:, :, 1:step+2] if attn is not None else None
# compute scores per token position
pos_scores = scores.index_select(0, bbsz_idx)[:, :step+1]
pos_scores[:, step] = eos_scores
# convert from cumulative to per-position scores
pos_scores[:, 1:] = pos_scores[:, 1:] - pos_scores[:, :-1]
# normalize sentence-level scores
if self.normalize_scores:
eos_scores /= (step + 1) ** self.len_penalty
cum_unfin = []
prev = 0
for f in finished:
if f:
prev += 1
else:
cum_unfin.append(prev)
sents_seen = set()
for i, (idx, score) in enumerate(zip(bbsz_idx.tolist(), eos_scores.tolist())):
unfin_idx = idx // beam_size
sent = unfin_idx + cum_unfin[unfin_idx]
sents_seen.add((sent, unfin_idx))
if self.match_source_len and step > src_lengths[unfin_idx]:
score = -math.inf
def get_hypo():
if attn_clone is not None:
# remove padding tokens from attn scores
hypo_attn = attn_clone[i]
else:
hypo_attn = None
return {
'tokens': tokens_clone[i],
'score': score,
'attention': hypo_attn, # src_len x tgt_len
'alignment': None,
'positional_scores': pos_scores[i],
}
if len(finalized[sent]) < beam_size:
finalized[sent].append(get_hypo())
newly_finished = []
for sent, unfin_idx in sents_seen:
# check termination conditions for this sentence
if not finished[sent] and is_finished(sent, step, unfin_idx):
finished[sent] = True
newly_finished.append(unfin_idx)
return newly_finished
reorder_state = None
batch_idxs = None
for step in range(max_len + 1): # one extra step for EOS marker
# reorder decoder internal states based on the prev choice of beams
if reorder_state is not None:
if batch_idxs is not None:
# update beam indices to take into account removed sentences
corr = batch_idxs - torch.arange(batch_idxs.numel()).type_as(batch_idxs)
reorder_state.view(-1, beam_size).add_(corr.unsqueeze(-1) * beam_size)
model.reorder_incremental_state(reorder_state)
encoder_outs = model.reorder_encoder_out(encoder_outs, reorder_state)
lprobs, avg_attn_scores = model.forward_decoder(
tokens[:, :step + 1], encoder_outs, temperature=self.temperature,
)
lprobs[:, self.pad] = -math.inf # never select pad
lprobs[:, self.unk] -= self.unk_penalty # apply unk penalty
# handle max length constraint
if step >= max_len:
lprobs[:, :self.eos] = -math.inf
lprobs[:, self.eos + 1:] = -math.inf
# handle prefix tokens (possibly with different lengths)
if prefix_tokens is not None and step < prefix_tokens.size(1) and step < max_len:
prefix_toks = prefix_tokens[:, step].unsqueeze(-1).repeat(1, beam_size).view(-1)
prefix_lprobs = lprobs.gather(-1, prefix_toks.unsqueeze(-1))
prefix_mask = prefix_toks.ne(self.pad)
lprobs[prefix_mask] = -math.inf
lprobs[prefix_mask] = lprobs[prefix_mask].scatter_(
-1, prefix_toks[prefix_mask].unsqueeze(-1), prefix_lprobs[prefix_mask]
)
# if prefix includes eos, then we should make sure tokens and
# scores are the same across all beams
eos_mask = prefix_toks.eq(self.eos)
if eos_mask.any():
# validate that the first beam matches the prefix
first_beam = tokens[eos_mask].view(-1, beam_size, tokens.size(-1))[:, 0, 1:step + 1]
eos_mask_batch_dim = eos_mask.view(-1, beam_size)[:, 0]
target_prefix = prefix_tokens[eos_mask_batch_dim][:, :step]
assert (first_beam == target_prefix).all()
def replicate_first_beam(tensor, mask):
tensor = tensor.view(-1, beam_size, tensor.size(-1))
tensor[mask] = tensor[mask][:, :1, :]
return tensor.view(-1, tensor.size(-1))
# copy tokens, scores and lprobs from the first beam to all beams
tokens = replicate_first_beam(tokens, eos_mask_batch_dim)
scores = replicate_first_beam(scores, eos_mask_batch_dim)
lprobs = replicate_first_beam(lprobs, eos_mask_batch_dim)
elif step < self.min_len:
# minimum length constraint (does not apply if using prefix_tokens)
lprobs[:, self.eos] = -math.inf
if self.no_repeat_ngram_size > 0:
# for each beam and batch sentence, generate a list of previous ngrams
gen_ngrams = [{} for bbsz_idx in range(bsz * beam_size)]
for bbsz_idx in range(bsz * beam_size):
gen_tokens = tokens[bbsz_idx].tolist()
for ngram in zip(*[gen_tokens[i:] for i in range(self.no_repeat_ngram_size)]):
gen_ngrams[bbsz_idx][tuple(ngram[:-1])] = \
gen_ngrams[bbsz_idx].get(tuple(ngram[:-1]), []) + [ngram[-1]]
# Record attention scores
if avg_attn_scores is not None:
if attn is None:
attn = scores.new(bsz * beam_size, src_tokens.size(1), max_len + 2)
attn_buf = attn.clone()
attn[:, :, step + 1].copy_(avg_attn_scores)
scores = scores.type_as(lprobs)
scores_buf = scores_buf.type_as(lprobs)
eos_bbsz_idx = buffer('eos_bbsz_idx')
eos_scores = buffer('eos_scores', type_of=scores)
self.search.set_src_lengths(src_lengths)
if self.no_repeat_ngram_size > 0:
def calculate_banned_tokens(bbsz_idx):
# before decoding the next token, prevent decoding of ngrams that have already appeared
ngram_index = tuple(tokens[bbsz_idx, step + 2 - self.no_repeat_ngram_size:step + 1].tolist())
return gen_ngrams[bbsz_idx].get(ngram_index, [])
if step + 2 - self.no_repeat_ngram_size >= 0:
# no banned tokens if we haven't generated no_repeat_ngram_size tokens yet
banned_tokens = [calculate_banned_tokens(bbsz_idx) for bbsz_idx in range(bsz * beam_size)]
else:
banned_tokens = [[] for bbsz_idx in range(bsz * beam_size)]
for bbsz_idx in range(bsz * beam_size):
lprobs[bbsz_idx, banned_tokens[bbsz_idx]] = -math.inf
cand_scores, cand_indices, cand_beams = self.search.step(
step,
lprobs.view(bsz, -1, self.vocab_size),
scores.view(bsz, beam_size, -1)[:, :, :step],
)
# cand_bbsz_idx contains beam indices for the top candidate
# hypotheses, with a range of values: [0, bsz*beam_size),
# and dimensions: [bsz, cand_size]
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
# finalize hypotheses that end in eos, except for blacklisted ones
# or candidates with a score of -inf
eos_mask = cand_indices.eq(self.eos) & cand_scores.ne(-math.inf)
eos_mask[:, :beam_size][blacklist] = 0
# only consider eos when it's among the top beam_size indices
torch.masked_select(
cand_bbsz_idx[:, :beam_size],
mask=eos_mask[:, :beam_size],
out=eos_bbsz_idx,
)
finalized_sents = set()
if eos_bbsz_idx.numel() > 0:
torch.masked_select(
cand_scores[:, :beam_size],
mask=eos_mask[:, :beam_size],
out=eos_scores,
)
finalized_sents = finalize_hypos(step, eos_bbsz_idx, eos_scores)
num_remaining_sent -= len(finalized_sents)
assert num_remaining_sent >= 0
if num_remaining_sent == 0:
break
assert step < max_len
if len(finalized_sents) > 0:
new_bsz = bsz - len(finalized_sents)
# construct batch_idxs which holds indices of batches to keep for the next pass
batch_mask = cand_indices.new_ones(bsz)
batch_mask[cand_indices.new(finalized_sents)] = 0
batch_idxs = batch_mask.nonzero().squeeze(-1)
eos_mask = eos_mask[batch_idxs]
cand_beams = cand_beams[batch_idxs]
bbsz_offsets.resize_(new_bsz, 1)
cand_bbsz_idx = cand_beams.add(bbsz_offsets)
cand_scores = cand_scores[batch_idxs]
cand_indices = cand_indices[batch_idxs]
if prefix_tokens is not None:
prefix_tokens = prefix_tokens[batch_idxs]
src_lengths = src_lengths[batch_idxs]
blacklist = blacklist[batch_idxs]
scores = scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
scores_buf.resize_as_(scores)
tokens = tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1)
tokens_buf.resize_as_(tokens)
if attn is not None:
attn = attn.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, attn.size(1), -1)
attn_buf.resize_as_(attn)
bsz = new_bsz
else:
batch_idxs = None
# Set active_mask so that values > cand_size indicate eos or
# blacklisted hypos and values < cand_size indicate candidate
# active hypos. After this, the min values per row are the top
# candidate active hypos.
active_mask = buffer('active_mask')
eos_mask[:, :beam_size] |= blacklist
torch.add(
eos_mask.type_as(cand_offsets) * cand_size,
cand_offsets[:eos_mask.size(1)],
out=active_mask,
)
# get the top beam_size active hypotheses, which are just the hypos
# with the smallest values in active_mask
active_hypos, new_blacklist = buffer('active_hypos'), buffer('new_blacklist')
torch.topk(
active_mask, k=beam_size, dim=1, largest=False,
out=(new_blacklist, active_hypos)
)
# update blacklist to ignore any finalized hypos
blacklist = new_blacklist.ge(cand_size)[:, :beam_size]
assert (~blacklist).any(dim=1).all()
active_bbsz_idx = buffer('active_bbsz_idx')
torch.gather(
cand_bbsz_idx, dim=1, index=active_hypos,
out=active_bbsz_idx,
)
active_scores = torch.gather(
cand_scores, dim=1, index=active_hypos,
out=scores[:, step].view(bsz, beam_size),
)
active_bbsz_idx = active_bbsz_idx.view(-1)
active_scores = active_scores.view(-1)
# copy tokens and scores for active hypotheses
torch.index_select(
tokens[:, :step + 1], dim=0, index=active_bbsz_idx,
out=tokens_buf[:, :step + 1],
)
torch.gather(
cand_indices, dim=1, index=active_hypos,
out=tokens_buf.view(bsz, beam_size, -1)[:, :, step + 1],
)
if step > 0:
torch.index_select(
scores[:, :step], dim=0, index=active_bbsz_idx,
out=scores_buf[:, :step],
)
torch.gather(
cand_scores, dim=1, index=active_hypos,
out=scores_buf.view(bsz, beam_size, -1)[:, :, step],
)
# copy attention for active hypotheses
if attn is not None:
torch.index_select(
attn[:, :, :step + 2], dim=0, index=active_bbsz_idx,
out=attn_buf[:, :, :step + 2],
)
# swap buffers
tokens, tokens_buf = tokens_buf, tokens
scores, scores_buf = scores_buf, scores
if attn is not None:
attn, attn_buf = attn_buf, attn
# reorder incremental state in decoder
reorder_state = active_bbsz_idx
# sort by score descending
for sent in range(len(finalized)):
finalized[sent] = sorted(finalized[sent], key=lambda r: r['score'], reverse=True)
return finalized
class EnsembleModel(torch.nn.Module):
"""A wrapper around an ensemble of models."""
def __init__(self, models):
super().__init__()
self.models = torch.nn.ModuleList(models)
self.incremental_states = None
if all(isinstance(m.decoder, FairseqIncrementalDecoder) for m in models):
self.incremental_states = {m: {} for m in models}
def has_encoder(self):
return hasattr(self.models[0], 'encoder')
def max_decoder_positions(self):
return min(m.max_decoder_positions() for m in self.models)
@torch.no_grad()
def forward_encoder(self, encoder_input):
if not self.has_encoder():
return None
return [model.encoder(**encoder_input) for model in self.models]
@torch.no_grad()
def forward_decoder(self, tokens, encoder_outs, temperature=1.):
if len(self.models) == 1:
return self._decode_one(
tokens,
self.models[0],
encoder_outs[0] if self.has_encoder() else None,
self.incremental_states,
log_probs=True,
temperature=temperature,
)
log_probs = []
avg_attn = None
for model, encoder_out in zip(self.models, encoder_outs):
probs, attn = self._decode_one(
tokens,
model,
encoder_out,
self.incremental_states,
log_probs=True,
temperature=temperature,
)
log_probs.append(probs)
if attn is not None:
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
avg_probs = torch.logsumexp(torch.stack(log_probs, dim=0), dim=0) - math.log(len(self.models))
if avg_attn is not None:
avg_attn.div_(len(self.models))
return avg_probs, avg_attn
def _decode_one(
self, tokens, model, encoder_out, incremental_states, log_probs,
temperature=1.,
):
if self.incremental_states is not None:
decoder_out = list(model.forward_decoder(
tokens, encoder_out=encoder_out, incremental_state=self.incremental_states[model],
))
else:
decoder_out = list(model.forward_decoder(tokens, encoder_out=encoder_out))
decoder_out[0] = decoder_out[0][:, -1:, :]
if temperature != 1.:
decoder_out[0].div_(temperature)
attn = decoder_out[1]
if type(attn) is dict:
attn = attn.get('attn', None)
if attn is not None:
attn = attn[:, -1, :]
probs = model.get_normalized_probs(decoder_out, log_probs=log_probs)
probs = probs[:, -1, :]
return probs, attn
def reorder_encoder_out(self, encoder_outs, new_order):
if not self.has_encoder():
return
return [
model.encoder.reorder_encoder_out(encoder_out, new_order)
for model, encoder_out in zip(self.models, encoder_outs)
]
def reorder_incremental_state(self, new_order):
if self.incremental_states is None:
return
for model in self.models:
model.decoder.reorder_incremental_state(self.incremental_states[model], new_order)
class SequenceGeneratorWithAlignment(SequenceGenerator):
def __init__(self, tgt_dict, left_pad_target=False, **kwargs):
"""Generates translations of a given source sentence.
Produces alignments following "Jointly Learning to Align and
Translate with Transformer Models" (Garg et al., EMNLP 2019).
Args:
left_pad_target (bool, optional): Whether or not the
hypothesis should be left padded or not when they are
teacher forced for generating alignments.
"""
super().__init__(tgt_dict, **kwargs)
self.left_pad_target = left_pad_target
@torch.no_grad()
def generate(self, models, sample, **kwargs):
model = EnsembleModelWithAlignment(models)
finalized = super()._generate(model, sample, **kwargs)
src_tokens = sample['net_input']['src_tokens']
bsz = src_tokens.shape[0]
beam_size = self.beam_size
src_tokens, src_lengths, prev_output_tokens, tgt_tokens = \
self._prepare_batch_for_alignment(sample, finalized)
if any(getattr(m, 'full_context_alignment', False) for m in model.models):
attn = model.forward_align(src_tokens, src_lengths, prev_output_tokens)
else:
attn = [
finalized[i // beam_size][i % beam_size]['attention'].transpose(1, 0)
for i in range(bsz * beam_size)
]
# Process the attn matrix to extract hard alignments.
for i in range(bsz * beam_size):
alignment = utils.extract_hard_alignment(attn[i], src_tokens[i], tgt_tokens[i], self.pad, self.eos)
finalized[i // beam_size][i % beam_size]['alignment'] = alignment
return finalized
def _prepare_batch_for_alignment(self, sample, hypothesis):
src_tokens = sample['net_input']['src_tokens']
bsz = src_tokens.shape[0]
src_tokens = src_tokens[:, None, :].expand(-1, self.beam_size, -1).contiguous().view(bsz * self.beam_size, -1)
src_lengths = sample['net_input']['src_lengths']
src_lengths = src_lengths[:, None].expand(-1, self.beam_size).contiguous().view(bsz * self.beam_size)
prev_output_tokens = data_utils.collate_tokens(
[beam['tokens'] for example in hypothesis for beam in example],
self.pad, self.eos, self.left_pad_target, move_eos_to_beginning=True,
)
tgt_tokens = data_utils.collate_tokens(
[beam['tokens'] for example in hypothesis for beam in example],
self.pad, self.eos, self.left_pad_target, move_eos_to_beginning=False,
)
return src_tokens, src_lengths, prev_output_tokens, tgt_tokens
class EnsembleModelWithAlignment(EnsembleModel):
"""A wrapper around an ensemble of models."""
def __init__(self, models):
super().__init__(models)
def forward_align(self, src_tokens, src_lengths, prev_output_tokens):
avg_attn = None
for model in self.models:
decoder_out = model(src_tokens, src_lengths, prev_output_tokens)
attn = decoder_out[1]['attn']
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
if len(self.models) > 1:
avg_attn.div_(len(self.models))
return avg_attn
def _decode_one(
self, tokens, model, encoder_out, incremental_states, log_probs,
temperature=1.,
):
if self.incremental_states is not None:
decoder_out = list(model.forward_decoder(
tokens,
encoder_out=encoder_out,
incremental_state=self.incremental_states[model],
))
else:
decoder_out = list(model.forward_decoder(tokens, encoder_out=encoder_out))
decoder_out[0] = decoder_out[0][:, -1:, :]
if temperature != 1.:
decoder_out[0].div_(temperature)
attn = decoder_out[1]
if type(attn) is dict:
attn = attn.get('attn', None)
if attn is not None:
attn = attn[:, -1, :]
probs = model.get_normalized_probs(decoder_out, log_probs=log_probs)
probs = probs[:, -1, :]
return probs, attn
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/fairseq/sequence_generator.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import multiprocessing
import os
import pdb
import sys
__all__ = ['set_trace']
_stdin = [None]
_stdin_lock = multiprocessing.Lock()
try:
_stdin_fd = sys.stdin.fileno()
except Exception:
_stdin_fd = None
class MultiprocessingPdb(pdb.Pdb):
"""A Pdb wrapper that works in a multiprocessing environment.
Usage: `from fairseq import pdb; pdb.set_trace()`
"""
def __init__(self):
pdb.Pdb.__init__(self, nosigint=True)
def _cmdloop(self):
stdin_bak = sys.stdin
with _stdin_lock:
try:
if _stdin_fd is not None:
if not _stdin[0]:
_stdin[0] = os.fdopen(_stdin_fd)
sys.stdin = _stdin[0]
self.cmdloop()
finally:
sys.stdin = stdin_bak
def set_trace():
pdb = MultiprocessingPdb()
pdb.set_trace(sys._getframe().f_back)
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/fairseq/pdb.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import re
SPACE_NORMALIZER = re.compile(r"\s+")
def tokenize_line(line):
line = SPACE_NORMALIZER.sub(" ", line)
line = line.strip()
return line.split()
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/fairseq/tokenizer.py |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import copy
import os
import torch
from torch import nn
from fairseq import utils
from fairseq.data import encoders
def from_pretrained(
model_name_or_path,
checkpoint_file='model.pt',
data_name_or_path='.',
archive_map=None,
**kwargs
):
from fairseq import checkpoint_utils, file_utils
if archive_map is not None:
if model_name_or_path in archive_map:
model_name_or_path = archive_map[model_name_or_path]
if data_name_or_path is not None and data_name_or_path in archive_map:
data_name_or_path = archive_map[data_name_or_path]
# allow archive_map to set default arg_overrides (e.g., tokenizer, bpe)
# for each model
if isinstance(model_name_or_path, dict):
for k, v in model_name_or_path.items():
if k == 'checkpoint_file':
checkpoint_file = v
elif (
k != 'path'
# only set kwargs that don't already have overrides
and k not in kwargs
):
kwargs[k] = v
model_name_or_path = model_name_or_path['path']
model_path = file_utils.load_archive_file(model_name_or_path)
# convenience hack for loading data and BPE codes from model archive
if data_name_or_path.startswith('.'):
kwargs['data'] = os.path.abspath(os.path.join(model_path, data_name_or_path))
else:
kwargs['data'] = file_utils.load_archive_file(data_name_or_path)
for file, arg in {
'code': 'bpe_codes',
'bpecodes': 'bpe_codes',
'sentencepiece.bpe.model': 'sentencepiece_vocab',
}.items():
path = os.path.join(model_path, file)
if os.path.exists(path):
kwargs[arg] = path
if 'user_dir' in kwargs:
utils.import_user_module(argparse.Namespace(user_dir=kwargs['user_dir']))
models, args, task = checkpoint_utils.load_model_ensemble_and_task(
[os.path.join(model_path, cpt) for cpt in checkpoint_file.split(':')],
arg_overrides=kwargs,
)
return {
'args': args,
'task': task,
'models': models,
}
class GeneratorHubInterface(nn.Module):
"""
PyTorch Hub interface for generating sequences from a pre-trained
translation or language model.
"""
def __init__(self, args, task, models):
super().__init__()
self.args = args
self.task = task
self.models = nn.ModuleList(models)
self.src_dict = task.source_dictionary
self.tgt_dict = task.target_dictionary
# optimize model for generation
for model in self.models:
model.make_generation_fast_(
beamable_mm_beam_size=(
None if getattr(args, 'no_beamable_mm', False)
else getattr(args, 'beam', 5)
),
need_attn=getattr(args, 'print_alignment', False),
)
# Load alignment dictionary for unknown word replacement
# (None if no unknown word replacement, empty if no path to align dictionary)
self.align_dict = utils.load_align_dict(getattr(args, 'replace_unk', None))
self.tokenizer = encoders.build_tokenizer(args)
self.bpe = encoders.build_bpe(args)
# this is useful for determining the device
self.register_buffer('_float_tensor', torch.tensor([0], dtype=torch.float))
@property
def device(self):
return self._float_tensor.device
def translate(self, sentence: str, beam: int = 5, verbose: bool = False, **kwargs) -> str:
return self.sample(sentence, beam, verbose, **kwargs)
def sample(self, sentence: str, beam: int = 1, verbose: bool = False, **kwargs) -> str:
input = self.encode(sentence)
hypo = self.generate(input, beam, verbose, **kwargs)[0]['tokens']
return self.decode(hypo)
def generate(self, tokens: torch.LongTensor, beam: int = 5, verbose: bool = False, **kwargs) -> torch.LongTensor:
sample = self._build_sample(tokens)
# build generator using current args as well as any kwargs
gen_args = copy.copy(self.args)
gen_args.beam = beam
for k, v in kwargs.items():
setattr(gen_args, k, v)
generator = self.task.build_generator(gen_args)
translations = self.task.inference_step(generator, self.models, sample)
if verbose:
src_str_with_unk = self.string(tokens)
print('S\t{}'.format(src_str_with_unk))
def getarg(name, default):
return getattr(gen_args, name, getattr(self.args, name, default))
# Process top predictions
hypos = translations[0]
if verbose:
for hypo in hypos:
hypo_str = self.decode(hypo['tokens'])
print('H\t{}\t{}'.format(hypo['score'], hypo_str))
print('P\t{}'.format(
' '.join(map(lambda x: '{:.4f}'.format(x), hypo['positional_scores'].tolist()))
))
if hypo['alignment'] is not None and getarg('print_alignment', False):
print('A\t{}'.format(
' '.join(map(lambda x: str(utils.item(x)), hypo['alignment'].int().cpu()))
))
return hypos
def encode(self, sentence: str) -> torch.LongTensor:
sentence = self.tokenize(sentence)
sentence = self.apply_bpe(sentence)
return self.binarize(sentence)
def decode(self, tokens: torch.LongTensor) -> str:
sentence = self.string(tokens)
sentence = self.remove_bpe(sentence)
return self.detokenize(sentence)
def tokenize(self, sentence: str) -> str:
if self.tokenizer is not None:
sentence = self.tokenizer.encode(sentence)
return sentence
def detokenize(self, sentence: str) -> str:
if self.tokenizer is not None:
sentence = self.tokenizer.decode(sentence)
return sentence
def apply_bpe(self, sentence: str) -> str:
if self.bpe is not None:
sentence = self.bpe.encode(sentence)
return sentence
def remove_bpe(self, sentence: str) -> str:
if self.bpe is not None:
sentence = self.bpe.decode(sentence)
return sentence
def binarize(self, sentence: str) -> torch.LongTensor:
return self.src_dict.encode_line(sentence, add_if_not_exist=False).long()
def string(self, tokens: torch.LongTensor) -> str:
return self.tgt_dict.string(tokens)
def _build_sample(self, src_tokens: torch.LongTensor):
assert torch.is_tensor(src_tokens)
dataset = self.task.build_dataset_for_inference([src_tokens], [src_tokens.numel()])
sample = dataset.collater([dataset[0]])
sample = utils.apply_to_sample(
lambda tensor: tensor.to(self.device),
sample
)
return sample
class BPEHubInterface(object):
"""PyTorch Hub interface for Byte-Pair Encoding (BPE)."""
def __init__(self, bpe, **kwargs):
super().__init__()
args = argparse.Namespace(bpe=bpe, **kwargs)
self.bpe = encoders.build_bpe(args)
assert self.bpe is not None
def encode(self, sentence: str) -> str:
return self.bpe.encode(sentence)
def decode(self, sentence: str) -> str:
return self.bpe.decode(sentence)
class TokenizerHubInterface(object):
"""PyTorch Hub interface for tokenization."""
def __init__(self, tokenizer, **kwargs):
super().__init__()
args = argparse.Namespace(tokenizer=tokenizer, **kwargs)
self.tokenizer = encoders.build_tokenizer(args)
assert self.tokenizer is not None
def encode(self, sentence: str) -> str:
return self.tokenizer.encode(sentence)
def decode(self, sentence: str) -> str:
return self.tokenizer.decode(sentence)
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/fairseq/hub_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import sys
from fairseq import utils
class SequenceScorer(object):
"""Scores the target for a given source sentence."""
def __init__(self, tgt_dict, softmax_batch=None):
self.pad = tgt_dict.pad()
self.eos = tgt_dict.eos()
self.softmax_batch = softmax_batch or sys.maxsize
assert self.softmax_batch > 0
@torch.no_grad()
def generate(self, models, sample, **kwargs):
"""Score a batch of translations."""
net_input = sample['net_input']
def batch_for_softmax(dec_out, target):
# assumes decoder_out[0] is the only thing needed (may not be correct for future models!)
first, rest = dec_out[0], dec_out[1:]
bsz, tsz, dim = first.shape
if bsz * tsz < self.softmax_batch:
yield dec_out, target, True
else:
flat = first.contiguous().view(1, -1, dim)
flat_tgt = target.contiguous().view(flat.shape[:-1])
s = 0
while s < flat.size(1):
e = s + self.softmax_batch
yield (flat[:, s:e],) + rest, flat_tgt[:, s:e], False
s = e
def gather_target_probs(probs, target):
probs = probs.gather(
dim=2,
index=target.unsqueeze(-1),
)
return probs
orig_target = sample['target']
# compute scores for each model in the ensemble
avg_probs = None
avg_attn = None
for model in models:
model.eval()
decoder_out = model.forward(**net_input)
attn = decoder_out[1]
if type(attn) is dict:
attn = attn.get('attn', None)
batched = batch_for_softmax(decoder_out, orig_target)
probs, idx = None, 0
for bd, tgt, is_single in batched:
sample['target'] = tgt
curr_prob = model.get_normalized_probs(bd, log_probs=len(models) == 1, sample=sample).data
if is_single:
probs = gather_target_probs(curr_prob, orig_target)
else:
if probs is None:
probs = curr_prob.new(orig_target.numel())
step = curr_prob.size(0) * curr_prob.size(1)
end = step + idx
tgt_probs = gather_target_probs(curr_prob.view(tgt.shape + (curr_prob.size(-1),)), tgt)
probs[idx:end] = tgt_probs.view(-1)
idx = end
sample['target'] = orig_target
probs = probs.view(sample['target'].shape)
if avg_probs is None:
avg_probs = probs
else:
avg_probs.add_(probs)
if attn is not None and torch.is_tensor(attn):
attn = attn.data
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
if len(models) > 1:
avg_probs.div_(len(models))
avg_probs.log_()
if avg_attn is not None:
avg_attn.div_(len(models))
bsz = avg_probs.size(0)
hypos = []
start_idxs = sample['start_indices'] if 'start_indices' in sample else [0] * bsz
for i in range(bsz):
# remove padding from ref
ref = utils.strip_pad(sample['target'][i, start_idxs[i]:], self.pad) \
if sample['target'] is not None else None
tgt_len = ref.numel()
avg_probs_i = avg_probs[i][start_idxs[i]:start_idxs[i] + tgt_len]
score_i = avg_probs_i.sum() / tgt_len
if avg_attn is not None:
avg_attn_i = avg_attn[i]
alignment = utils.extract_hard_alignment(avg_attn_i, sample['net_input']['src_tokens'][i],
sample['target'][i], self.pad, self.eos)
else:
avg_attn_i = alignment = None
hypos.append([{
'tokens': ref,
'score': score_i,
'attention': avg_attn_i,
'alignment': alignment,
'positional_scores': avg_probs_i,
}])
return hypos
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/fairseq/sequence_scorer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import defaultdict
import contextlib
import copy
import importlib.util
import math
import os
import sys
from typing import Callable, List
import warnings
import torch
import torch.nn.functional as F
from itertools import accumulate
from fairseq.modules import gelu, gelu_accurate
def load_ensemble_for_inference(filenames, task, model_arg_overrides=None):
from fairseq import checkpoint_utils
deprecation_warning(
'utils.load_ensemble_for_inference is deprecated. '
'Please use checkpoint_utils.load_model_ensemble instead.'
)
return checkpoint_utils.load_model_ensemble(
filenames, arg_overrides=model_arg_overrides, task=task,
)
def apply_to_sample(f, sample):
if len(sample) == 0:
return {}
def _apply(x):
if torch.is_tensor(x):
return f(x)
elif isinstance(x, dict):
return {
key: _apply(value)
for key, value in x.items()
}
elif isinstance(x, list):
return [_apply(x) for x in x]
else:
return x
return _apply(sample)
def move_to_cuda(sample):
def _move_to_cuda(tensor):
return tensor.cuda()
return apply_to_sample(_move_to_cuda, sample)
INCREMENTAL_STATE_INSTANCE_ID = defaultdict(lambda: 0)
def _get_full_incremental_state_key(module_instance, key):
module_name = module_instance.__class__.__name__
# assign a unique ID to each module instance, so that incremental state is
# not shared across module instances
if not hasattr(module_instance, '_fairseq_instance_id'):
INCREMENTAL_STATE_INSTANCE_ID[module_name] += 1
module_instance._fairseq_instance_id = INCREMENTAL_STATE_INSTANCE_ID[module_name]
return '{}.{}.{}'.format(module_name, module_instance._fairseq_instance_id, key)
def get_incremental_state(module, incremental_state, key):
"""Helper for getting incremental state for an nn.Module."""
full_key = _get_full_incremental_state_key(module, key)
if incremental_state is None or full_key not in incremental_state:
return None
return incremental_state[full_key]
def set_incremental_state(module, incremental_state, key, value):
"""Helper for setting incremental state for an nn.Module."""
if incremental_state is not None:
full_key = _get_full_incremental_state_key(module, key)
incremental_state[full_key] = value
def load_align_dict(replace_unk):
if replace_unk is None:
align_dict = None
elif isinstance(replace_unk, str) and len(replace_unk) > 0:
# Load alignment dictionary for unknown word replacement if it was passed as an argument.
align_dict = {}
with open(replace_unk, 'r') as f:
for line in f:
cols = line.split()
align_dict[cols[0]] = cols[1]
else:
# No alignment dictionary provided but we still want to perform unknown word replacement by copying the
# original source word.
align_dict = {}
return align_dict
def print_embed_overlap(embed_dict, vocab_dict):
embed_keys = set(embed_dict.keys())
vocab_keys = set(vocab_dict.symbols)
overlap = len(embed_keys & vocab_keys)
print("| Found {}/{} types in embedding file.".format(overlap, len(vocab_dict)))
def parse_embedding(embed_path):
"""Parse embedding text file into a dictionary of word and embedding tensors.
The first line can have vocabulary size and dimension. The following lines
should contain word and embedding separated by spaces.
Example:
2 5
the -0.0230 -0.0264 0.0287 0.0171 0.1403
at -0.0395 -0.1286 0.0275 0.0254 -0.0932
"""
embed_dict = {}
with open(embed_path) as f_embed:
next(f_embed) # skip header
for line in f_embed:
pieces = line.rstrip().split(" ")
embed_dict[pieces[0]] = torch.Tensor([float(weight) for weight in pieces[1:]])
return embed_dict
def load_embedding(embed_dict, vocab, embedding):
for idx in range(len(vocab)):
token = vocab[idx]
if token in embed_dict:
embedding.weight.data[idx] = embed_dict[token]
return embedding
def replace_unk(hypo_str, src_str, alignment, align_dict, unk):
from fairseq import tokenizer
# Tokens are strings here
hypo_tokens = tokenizer.tokenize_line(hypo_str)
# TODO: Very rare cases where the replacement is '<eos>' should be handled gracefully
src_tokens = tokenizer.tokenize_line(src_str) + ['<eos>']
for i, ht in enumerate(hypo_tokens):
if ht == unk:
src_token = src_tokens[alignment[i]]
# Either take the corresponding value in the aligned dictionary or just copy the original value.
hypo_tokens[i] = align_dict.get(src_token, src_token)
return ' '.join(hypo_tokens)
def post_process_prediction(hypo_tokens, src_str, alignment, align_dict, tgt_dict, remove_bpe=None):
hypo_str = tgt_dict.string(hypo_tokens, remove_bpe)
if align_dict is not None:
hypo_str = replace_unk(hypo_str, src_str, alignment, align_dict, tgt_dict.unk_string())
if align_dict is not None or remove_bpe is not None:
# Convert back to tokens for evaluating with unk replacement or without BPE
# Note that the dictionary can be modified inside the method.
hypo_tokens = tgt_dict.encode_line(hypo_str, add_if_not_exist=True)
return hypo_tokens, hypo_str, alignment
def make_positions(tensor, padding_idx, onnx_trace=False):
"""Replace non-padding symbols with their position numbers.
Position numbers begin at padding_idx+1. Padding symbols are ignored.
"""
# The series of casts and type-conversions here are carefully
# balanced to both work with ONNX export and XLA. In particular XLA
# prefers ints, cumsum defaults to output longs, and ONNX doesn't know
# how to handle the dtype kwarg in cumsum.
mask = tensor.ne(padding_idx).int()
return (
torch.cumsum(mask, dim=1).type_as(mask) * mask
).long() + padding_idx
def strip_pad(tensor, pad):
return tensor[tensor.ne(pad)]
def buffered_arange(max):
if not hasattr(buffered_arange, 'buf'):
buffered_arange.buf = torch.LongTensor()
if max > buffered_arange.buf.numel():
torch.arange(max, out=buffered_arange.buf)
return buffered_arange.buf[:max]
def convert_padding_direction(src_tokens, padding_idx, right_to_left=False, left_to_right=False):
assert right_to_left ^ left_to_right
pad_mask = src_tokens.eq(padding_idx)
if not pad_mask.any():
# no padding, return early
return src_tokens
if left_to_right and not pad_mask[:, 0].any():
# already right padded
return src_tokens
if right_to_left and not pad_mask[:, -1].any():
# already left padded
return src_tokens
max_len = src_tokens.size(1)
range = buffered_arange(max_len).type_as(src_tokens).expand_as(src_tokens)
num_pads = pad_mask.long().sum(dim=1, keepdim=True)
if right_to_left:
index = torch.remainder(range - num_pads, max_len)
else:
index = torch.remainder(range + num_pads, max_len)
return src_tokens.gather(1, index)
def item(tensor):
if hasattr(tensor, 'item'):
return tensor.item()
if hasattr(tensor, '__getitem__'):
return tensor[0]
return tensor
def clip_grad_norm_(tensor, max_norm):
grad_norm = item(torch.norm(tensor))
if grad_norm > max_norm > 0:
clip_coef = max_norm / (grad_norm + 1e-6)
tensor.mul_(clip_coef)
return grad_norm
def fill_with_neg_inf(t):
"""FP16-compatible function that fills a tensor with -inf."""
return t.float().fill_(float('-inf')).type_as(t)
def resolve_max_positions(*args):
"""Resolve max position constraints from multiple sources."""
def map_value_update(d1, d2):
updated_value = copy.deepcopy(d1)
for key in d2:
if key not in updated_value:
updated_value[key] = d2[key]
else:
updated_value[key] = min(d1[key], d2[key])
return updated_value
def nullsafe_min(l):
minim = None
for item in l:
if minim is None:
minim = item
elif item is not None and item < minim:
minim = item
return minim
max_positions = None
for arg in args:
if max_positions is None:
max_positions = arg
elif arg is not None:
if isinstance(arg, float) or isinstance(arg, int):
max_positions = min(max_positions, arg)
elif isinstance(arg, dict):
max_positions = map_value_update(max_positions, arg)
else:
max_positions = tuple(
map(nullsafe_min, zip(max_positions, arg))
)
return max_positions
def import_user_module(args):
module_path = getattr(args, 'user_dir', None)
if module_path is not None:
module_path = os.path.abspath(args.user_dir)
if not os.path.exists(module_path):
fairseq_rel_path = os.path.join(os.path.dirname(__file__), '..', args.user_dir)
if os.path.exists(fairseq_rel_path):
module_path = fairseq_rel_path
module_parent, module_name = os.path.split(module_path)
if module_name not in sys.modules:
sys.path.insert(0, module_parent)
importlib.import_module(module_name)
sys.path.pop(0)
def softmax(x, dim, onnx_trace=False):
if onnx_trace:
return F.softmax(x.float(), dim=dim)
else:
return F.softmax(x, dim=dim, dtype=torch.float32)
def log_softmax(x, dim, onnx_trace=False):
if onnx_trace:
return F.log_softmax(x.float(), dim=dim)
else:
return F.log_softmax(x, dim=dim, dtype=torch.float32)
def get_perplexity(loss):
try:
return float('{:.2f}'.format(math.pow(2, loss)))
except OverflowError:
return float('inf')
def deprecation_warning(message, stacklevel=3):
# don't use DeprecationWarning, since it's ignored by default
warnings.warn(message, stacklevel=stacklevel)
def get_activation_fn(activation: str) -> Callable:
""" Returns the activation function corresponding to `activation` """
if activation == 'relu':
return F.relu
elif activation == 'gelu':
return gelu
elif activation == 'gelu_fast':
deprecation_warning('--activation-fn=gelu_fast has been renamed to gelu_accurate')
return gelu_accurate
elif activation == 'gelu_accurate':
return gelu_accurate
elif activation == 'tanh':
return torch.tanh
elif activation == 'linear':
return lambda x: x
else:
raise RuntimeError("--activation-fn {} not supported".format(activation))
def get_available_activation_fns() -> List:
return [
'relu',
'gelu',
'gelu_fast', # deprecated
'gelu_accurate',
'tanh',
'linear',
]
@contextlib.contextmanager
def eval(model):
is_training = model.training
model.eval()
yield
model.train(is_training)
def has_parameters(module):
try:
next(module.parameters())
return True
except StopIteration:
return False
def set_torch_seed(seed):
# Set seed based on args.seed and the update number so that we get
# reproducible results when resuming from checkpoints
assert isinstance(seed, int)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
def parse_alignment(line):
"""
Parses a single line from the alingment file.
Args:
line (str): String containing the alignment of the format:
<src_idx_1>-<tgt_idx_1> <src_idx_2>-<tgt_idx_2> ..
<src_idx_m>-<tgt_idx_m>. All indices are 0 indexed.
Returns:
torch.IntTensor: packed alignments of shape (2 * m).
"""
alignments = line.strip().split()
parsed_alignment = torch.IntTensor(2 * len(alignments))
for idx, alignment in enumerate(alignments):
src_idx, tgt_idx = alignment.split('-')
parsed_alignment[2 * idx] = int(src_idx)
parsed_alignment[2 * idx + 1] = int(tgt_idx)
return parsed_alignment
def get_token_to_word_mapping(tokens, exclude_list):
n = len(tokens)
word_start = [int(token not in exclude_list) for token in tokens]
word_idx = list(accumulate(word_start))
token_to_word = {i: word_idx[i] for i in range(n)}
return token_to_word
def extract_hard_alignment(attn, src_sent, tgt_sent, pad, eos):
tgt_valid = ((tgt_sent != pad) & (tgt_sent != eos)).nonzero().squeeze(dim=-1)
src_invalid = ((src_sent == pad) | (src_sent == eos)).nonzero().squeeze(dim=-1)
src_token_to_word = get_token_to_word_mapping(src_sent, [eos, pad])
tgt_token_to_word = get_token_to_word_mapping(tgt_sent, [eos, pad])
alignment = []
if len(tgt_valid) != 0 and len(src_invalid) < len(src_sent):
attn_valid = attn[tgt_valid]
attn_valid[:, src_invalid] = float('-inf')
_, src_indices = attn_valid.max(dim=1)
for tgt_idx, src_idx in zip(tgt_valid, src_indices):
alignment.append((src_token_to_word[src_idx.item()] - 1, tgt_token_to_word[tgt_idx.item()] - 1))
return alignment
def new_arange(x, *size):
"""
Return a Tensor of `size` filled with a range function on the device of x.
If size is empty, using the size of the variable x.
"""
if len(size) == 0:
size = x.size()
return torch.arange(size[-1], device=x.device).expand(*size).contiguous()
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/fairseq/utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import collections
import logging
import os
import re
import shutil
import traceback
from collections import OrderedDict
from typing import Union
import torch
from fairseq.models import FairseqDecoder, FairseqEncoder
from torch.serialization import default_restore_location
def save_checkpoint(args, trainer, epoch_itr, val_loss):
from fairseq import distributed_utils, meters
prev_best = getattr(save_checkpoint, "best", val_loss)
if val_loss is not None:
best_function = max if args.maximize_best_checkpoint_metric else min
save_checkpoint.best = best_function(val_loss, prev_best)
if args.no_save or not distributed_utils.is_master(args):
return
def is_better(a, b):
return a >= b if args.maximize_best_checkpoint_metric else a <= b
write_timer = meters.StopwatchMeter()
write_timer.start()
epoch = epoch_itr.epoch
end_of_epoch = epoch_itr.end_of_epoch()
updates = trainer.get_num_updates()
checkpoint_conds = collections.OrderedDict()
checkpoint_conds["checkpoint{}.pt".format(epoch)] = (
end_of_epoch
and not args.no_epoch_checkpoints
and epoch % args.save_interval == 0
)
checkpoint_conds["checkpoint_{}_{}.pt".format(epoch, updates)] = (
not end_of_epoch
and args.save_interval_updates > 0
and updates % args.save_interval_updates == 0
)
checkpoint_conds["checkpoint_best.pt"] = val_loss is not None and (
not hasattr(save_checkpoint, "best")
or is_better(val_loss, save_checkpoint.best)
)
checkpoint_conds["checkpoint_last.pt"] = not args.no_last_checkpoints
extra_state = {"train_iterator": epoch_itr.state_dict(), "val_loss": val_loss}
if hasattr(save_checkpoint, "best"):
extra_state.update({"best": save_checkpoint.best})
checkpoints = [
os.path.join(args.save_dir, fn) for fn, cond in checkpoint_conds.items() if cond
]
if len(checkpoints) > 0:
trainer.save_checkpoint(checkpoints[0], extra_state)
for cp in checkpoints[1:]:
try:
from fairseq.fb_pathmgr import fb_pathmgr
fb_pathmgr.copy(checkpoints[0], cp, True)
except (ModuleNotFoundError, ImportError):
shutil.copyfile(checkpoints[0], cp)
write_timer.stop()
print(
"| saved checkpoint {} (epoch {} @ {} updates) (writing took {} seconds)".format(
checkpoints[0], epoch, updates, write_timer.sum
)
)
if not end_of_epoch and args.keep_interval_updates > 0:
# remove old checkpoints; checkpoints are sorted in descending order
checkpoints = checkpoint_paths(
args.save_dir, pattern=r"checkpoint_\d+_(\d+)\.pt"
)
for old_chk in checkpoints[args.keep_interval_updates :]:
if os.path.lexists(old_chk):
os.remove(old_chk)
if args.keep_last_epochs > 0:
# remove old epoch checkpoints; checkpoints are sorted in descending order
checkpoints = checkpoint_paths(args.save_dir, pattern=r"checkpoint(\d+)\.pt")
for old_chk in checkpoints[args.keep_last_epochs :]:
if os.path.lexists(old_chk):
os.remove(old_chk)
def load_checkpoint(args, trainer, **passthrough_args):
"""
Load a checkpoint and restore the training iterator.
*passthrough_args* will be passed through to
``trainer.get_train_iterator``.
"""
# only one worker should attempt to create the required dir
if args.distributed_rank == 0:
os.makedirs(args.save_dir, exist_ok=True)
if args.restore_file == "checkpoint_last.pt":
checkpoint_path = os.path.join(args.save_dir, "checkpoint_last.pt")
else:
checkpoint_path = args.restore_file
extra_state = trainer.load_checkpoint(
checkpoint_path,
args.reset_optimizer,
args.reset_lr_scheduler,
eval(args.optimizer_overrides),
reset_meters=args.reset_meters,
)
if (
extra_state is not None
and "best" in extra_state
and not args.reset_optimizer
and not args.reset_meters
):
save_checkpoint.best = extra_state["best"]
if extra_state is not None and not args.reset_dataloader:
# restore iterator from checkpoint
itr_state = extra_state["train_iterator"]
epoch_itr = trainer.get_train_iterator(
epoch=itr_state["epoch"], load_dataset=True, **passthrough_args
)
epoch_itr.load_state_dict(itr_state)
else:
epoch_itr = trainer.get_train_iterator(
epoch=0, load_dataset=True, **passthrough_args
)
trainer.lr_step(epoch_itr.epoch)
return extra_state, epoch_itr
def load_checkpoint_to_cpu(path, arg_overrides=None):
"""Loads a checkpoint to CPU (with upgrading for backward compatibility)."""
try:
from fairseq.fb_pathmgr import fb_pathmgr
with fb_pathmgr.open(path, "rb") as f:
state = torch.load(
f, map_location=lambda s, l: default_restore_location(s, "cpu")
)
except (ModuleNotFoundError, ImportError):
# if path manager not found, continue with local file.
state = torch.load(
path, map_location=lambda s, l: default_restore_location(s, "cpu")
)
args = state["args"]
if arg_overrides is not None:
for arg_name, arg_val in arg_overrides.items():
setattr(args, arg_name, arg_val)
state = _upgrade_state_dict(state)
return state
def load_model_ensemble(filenames, arg_overrides=None, task=None):
"""Loads an ensemble of models.
Args:
filenames (List[str]): checkpoint files to load
arg_overrides (Dict[str,Any], optional): override model args that
were used during model training
task (fairseq.tasks.FairseqTask, optional): task to use for loading
"""
ensemble, args, _task = load_model_ensemble_and_task(filenames, arg_overrides, task)
return ensemble, args
def load_model_ensemble_and_task(filenames, arg_overrides=None, task=None):
from fairseq import tasks
ensemble = []
for filename in filenames:
if not os.path.exists(filename):
raise IOError("Model file not found: {}".format(filename))
state = load_checkpoint_to_cpu(filename, arg_overrides)
args = state["args"]
if task is None:
task = tasks.setup_task(args)
# build model for ensemble
model = task.build_model(args)
model.load_state_dict(state["model"], strict=True, args=args)
ensemble.append(model)
return ensemble, args, task
def checkpoint_paths(path, pattern=r"checkpoint(\d+)\.pt"):
"""Retrieves all checkpoints found in `path` directory.
Checkpoints are identified by matching filename to the specified pattern. If
the pattern contains groups, the result will be sorted by the first group in
descending order.
"""
pt_regexp = re.compile(pattern)
files = os.listdir(path)
entries = []
for i, f in enumerate(files):
m = pt_regexp.fullmatch(f)
if m is not None:
idx = int(m.group(1)) if len(m.groups()) > 0 else i
entries.append((idx, m.group(0)))
return [os.path.join(path, x[1]) for x in sorted(entries, reverse=True)]
def torch_persistent_save(*args, **kwargs):
for i in range(3):
try:
return torch.save(*args, **kwargs)
except Exception:
if i == 2:
logging.error(traceback.format_exc())
def convert_state_dict_type(state_dict, ttype=torch.FloatTensor):
if isinstance(state_dict, dict):
cpu_dict = OrderedDict()
for k, v in state_dict.items():
cpu_dict[k] = convert_state_dict_type(v)
return cpu_dict
elif isinstance(state_dict, list):
return [convert_state_dict_type(v) for v in state_dict]
elif torch.is_tensor(state_dict):
return state_dict.type(ttype)
else:
return state_dict
def save_state(
filename,
args,
model_state_dict,
criterion,
optimizer,
lr_scheduler,
num_updates,
optim_history=None,
extra_state=None,
):
from fairseq import utils
if optim_history is None:
optim_history = []
if extra_state is None:
extra_state = {}
state_dict = {
"args": args,
"model": model_state_dict if model_state_dict else {},
"optimizer_history": optim_history
+ [
{
"criterion_name": criterion.__class__.__name__,
"optimizer_name": optimizer.__class__.__name__,
"lr_scheduler_state": lr_scheduler.state_dict(),
"num_updates": num_updates,
}
],
"extra_state": extra_state,
}
if utils.has_parameters(criterion):
state_dict["criterion"] = criterion.state_dict()
if not args.no_save_optimizer_state:
state_dict["last_optimizer_state"] = convert_state_dict_type(
optimizer.state_dict()
)
try:
from fairseq.fb_pathmgr import fb_pathmgr
with fb_pathmgr.open(filename, "wb") as f:
torch_persistent_save(state_dict, f)
except (ModuleNotFoundError, ImportError):
# if path manager not found, continue with local file.
torch_persistent_save(state_dict, filename)
def _upgrade_state_dict(state):
"""Helper for upgrading old model checkpoints."""
from fairseq import models, registry, tasks
# add optimizer_history
if "optimizer_history" not in state:
state["optimizer_history"] = [
{"criterion_name": "CrossEntropyCriterion", "best_loss": state["best_loss"]}
]
state["last_optimizer_state"] = state["optimizer"]
del state["optimizer"]
del state["best_loss"]
# move extra_state into sub-dictionary
if "epoch" in state and "extra_state" not in state:
state["extra_state"] = {
"epoch": state["epoch"],
"batch_offset": state["batch_offset"],
"val_loss": state["val_loss"],
}
del state["epoch"]
del state["batch_offset"]
del state["val_loss"]
# reduce optimizer history's memory usage (only keep the last state)
if "optimizer" in state["optimizer_history"][-1]:
state["last_optimizer_state"] = state["optimizer_history"][-1]["optimizer"]
for optim_hist in state["optimizer_history"]:
del optim_hist["optimizer"]
# record the optimizer class name
if "optimizer_name" not in state["optimizer_history"][-1]:
state["optimizer_history"][-1]["optimizer_name"] = "FairseqNAG"
# move best_loss into lr_scheduler_state
if "lr_scheduler_state" not in state["optimizer_history"][-1]:
state["optimizer_history"][-1]["lr_scheduler_state"] = {
"best": state["optimizer_history"][-1]["best_loss"]
}
del state["optimizer_history"][-1]["best_loss"]
# keep track of number of updates
if "num_updates" not in state["optimizer_history"][-1]:
state["optimizer_history"][-1]["num_updates"] = 0
# old model checkpoints may not have separate source/target positions
if hasattr(state["args"], "max_positions") and not hasattr(
state["args"], "max_source_positions"
):
state["args"].max_source_positions = state["args"].max_positions
state["args"].max_target_positions = state["args"].max_positions
# use stateful training data iterator
if "train_iterator" not in state["extra_state"]:
state["extra_state"]["train_iterator"] = {
"epoch": state["extra_state"]["epoch"],
"iterations_in_epoch": state["extra_state"].get("batch_offset", 0),
}
# default to translation task
if not hasattr(state["args"], "task"):
state["args"].task = "translation"
# set any missing default values in the task, model or other registries
registry.set_defaults(state["args"], tasks.TASK_REGISTRY[state["args"].task])
registry.set_defaults(state["args"], models.ARCH_MODEL_REGISTRY[state["args"].arch])
for registry_name, REGISTRY in registry.REGISTRIES.items():
choice = getattr(state["args"], registry_name, None)
if choice is not None:
cls = REGISTRY["registry"][choice]
registry.set_defaults(state["args"], cls)
return state
def prune_state_dict(state_dict, args):
"""Prune the given state_dict if desired for LayerDrop
(https://arxiv.org/abs/1909.11556).
Training with LayerDrop allows models to be robust to pruning at inference
time. This function prunes state_dict to allow smaller models to be loaded
from a larger model and re-maps the existing state_dict for this to occur.
It's called by functions that load models from checkpoints and does not
need to be called directly.
"""
if not args or args.arch == "ptt_transformer":
# args should not be none, but don't crash if it is.
return state_dict
encoder_layers_to_keep = (
args.encoder_layers_to_keep if "encoder_layers_to_keep" in vars(args) else None
)
decoder_layers_to_keep = (
args.decoder_layers_to_keep if "decoder_layers_to_keep" in vars(args) else None
)
if not encoder_layers_to_keep and not decoder_layers_to_keep:
return state_dict
# apply pruning
print(
"| Pruning model to specified layer configuration - this works best if the model was trained with LayerDrop"
)
def create_pruning_pass(layers_to_keep, layer_name):
keep_layers = sorted(
[int(layer_string) for layer_string in layers_to_keep.split(",")]
)
mapping_dict = {}
for i in range(len(keep_layers)):
mapping_dict[str(keep_layers[i])] = str(i)
regex = re.compile("^{layer}.*\.layers\.(\d+)".format(layer=layer_name))
return {"substitution_regex": regex, "mapping_dict": mapping_dict}
pruning_passes = []
if encoder_layers_to_keep:
pruning_passes.append(create_pruning_pass(encoder_layers_to_keep, "encoder"))
if decoder_layers_to_keep:
pruning_passes.append(create_pruning_pass(decoder_layers_to_keep, "decoder"))
new_state_dict = {}
for layer_name in state_dict.keys():
match = re.search("\.layers\.(\d+)\.", layer_name)
# if layer has no number in it, it is a supporting layer, such as an
# embedding
if not match:
new_state_dict[layer_name] = state_dict[layer_name]
continue
# otherwise, layer should be pruned.
original_layer_number = match.group(1)
# figure out which mapping dict to replace from
for pruning_pass in pruning_passes:
if original_layer_number in pruning_pass["mapping_dict"] and pruning_pass[
"substitution_regex"
].search(layer_name):
new_layer_number = pruning_pass["mapping_dict"][original_layer_number]
substitution_match = pruning_pass["substitution_regex"].search(
layer_name
)
new_state_key = (
layer_name[: substitution_match.start(1)]
+ new_layer_number
+ layer_name[substitution_match.end(1) :]
)
new_state_dict[new_state_key] = state_dict[layer_name]
# Since layers are now pruned, *_layers_to_keep are no longer needed.
# This is more of "It would make it work fix" rather than a proper fix.
if "encoder_layers_to_keep" in vars(args):
args.encoder_layers_to_keep = None
if "decoder_layers_to_keep" in vars(args):
args.decoder_layers_to_keep = None
return new_state_dict
def load_pretrained_component_from_model(
component: Union[FairseqEncoder, FairseqDecoder], checkpoint: str
):
"""
Load a pretrained FairseqEncoder or FairseqDecoder from checkpoint into the
provided `component` object. If state_dict fails to load, there may be a
mismatch in the architecture of the corresponding `component` found in the
`checkpoint` file.
"""
if not os.path.exists(checkpoint):
raise IOError("Model file not found: {}".format(checkpoint))
state = load_checkpoint_to_cpu(checkpoint)
if isinstance(component, FairseqEncoder):
component_type = "encoder"
elif isinstance(component, FairseqDecoder):
component_type = "decoder"
else:
raise ValueError(
"component to load must be either a FairseqEncoder or "
"FairseqDecoder. Loading other component types are not supported."
)
component_state_dict = OrderedDict()
for key in state["model"].keys():
if key.startswith(component_type):
# encoder.input_layers.0.0.weight --> input_layers.0.0.weight
component_subkey = key[len(component_type) + 1 :]
component_state_dict[component_subkey] = state["model"][key]
component.load_state_dict(component_state_dict, strict=True)
return component
def verify_checkpoint_directory(save_dir: str) -> None:
if not os.path.exists(save_dir):
os.makedirs(save_dir, exist_ok=True)
temp_file_path = os.path.join(save_dir, "dummy")
try:
with open(temp_file_path, "w"):
pass
except OSError as e:
print("| Unable to access checkpoint save directory: {}".format(save_dir))
raise e
else:
os.remove(temp_file_path)
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/fairseq/checkpoint_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import pickle
import socket
import subprocess
import warnings
import torch
import torch.distributed as dist
from fairseq import utils
def is_master(args):
return args.distributed_rank == 0
def infer_init_method(args):
if args.distributed_init_method is not None:
return
# support torch.distributed.launch
if all(key in os.environ for key in [
'MASTER_ADDR', 'MASTER_PORT', 'WORLD_SIZE', 'RANK'
]):
args.distributed_init_method = 'env://'
args.distributed_world_size = int(os.environ['WORLD_SIZE'])
args.distributed_rank = int(os.environ['RANK'])
# we can determine the init method automatically for Slurm
elif args.distributed_port > 0:
node_list = os.environ.get('SLURM_STEP_NODELIST')
if node_list is None:
node_list = os.environ.get('SLURM_JOB_NODELIST')
if node_list is not None:
try:
hostnames = subprocess.check_output(['scontrol', 'show', 'hostnames', node_list])
args.distributed_init_method = 'tcp://{host}:{port}'.format(
host=hostnames.split()[0].decode('utf-8'),
port=args.distributed_port,
)
nnodes = int(os.environ.get('SLURM_NNODES'))
ntasks_per_node = os.environ.get('SLURM_NTASKS_PER_NODE')
if ntasks_per_node is not None:
ntasks_per_node = int(ntasks_per_node)
else:
ntasks = int(os.environ.get('SLURM_NTASKS'))
nnodes = int(os.environ.get('SLURM_NNODES'))
assert ntasks % nnodes == 0
ntasks_per_node = int(ntasks / nnodes)
if ntasks_per_node == 1:
assert args.distributed_world_size % nnodes == 0
gpus_per_node = args.distributed_world_size // nnodes
node_id = int(os.environ.get('SLURM_NODEID'))
args.distributed_rank = node_id * gpus_per_node
else:
assert ntasks_per_node == args.distributed_world_size // nnodes
args.distributed_no_spawn = True
args.distributed_rank = int(os.environ.get('SLURM_PROCID'))
args.device_id = int(os.environ.get('SLURM_LOCALID'))
except subprocess.CalledProcessError as e: # scontrol failed
raise e
except FileNotFoundError: # Slurm is not installed
pass
def distributed_init(args):
if args.distributed_world_size == 1:
raise ValueError('Cannot initialize distributed with distributed_world_size=1')
if torch.distributed.is_initialized():
warnings.warn('Distributed is already initialized, cannot initialize twice!')
else:
print('| distributed init (rank {}): {}'.format(
args.distributed_rank, args.distributed_init_method), flush=True)
dist.init_process_group(
backend=args.distributed_backend,
init_method=args.distributed_init_method,
world_size=args.distributed_world_size,
rank=args.distributed_rank,
)
print('| initialized host {} as rank {}'.format(
socket.gethostname(), args.distributed_rank), flush=True)
# perform a dummy all-reduce to initialize the NCCL communicator
if torch.cuda.is_available():
dist.all_reduce(torch.zeros(1).cuda())
else:
dist.all_reduce(torch.zeros(1))
suppress_output(is_master(args))
args.distributed_rank = torch.distributed.get_rank()
return args.distributed_rank
def suppress_output(is_master):
"""Suppress printing on the current device. Force printing with `force=True`."""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def get_rank():
return dist.get_rank()
def get_world_size():
return dist.get_world_size()
def get_default_group():
return dist.group.WORLD
def all_reduce(tensor, group=None):
if group is None:
group = get_default_group()
return dist.all_reduce(tensor, group=group)
def all_gather_list(data, group=None, max_size=16384):
"""Gathers arbitrary data from all nodes into a list.
Similar to :func:`~torch.distributed.all_gather` but for arbitrary Python
data. Note that *data* must be picklable.
Args:
data (Any): data from the local worker to be gathered on other workers
group (optional): group of the collective
max_size (int, optional): maximum size of the data to be gathered
across workers
"""
rank = get_rank()
world_size = get_world_size()
buffer_size = max_size * world_size
if not hasattr(all_gather_list, '_buffer') or \
all_gather_list._buffer.numel() < buffer_size:
all_gather_list._buffer = torch.cuda.ByteTensor(buffer_size)
all_gather_list._cpu_buffer = torch.ByteTensor(max_size).pin_memory()
buffer = all_gather_list._buffer
buffer.zero_()
cpu_buffer = all_gather_list._cpu_buffer
enc = pickle.dumps(data)
enc_size = len(enc)
if enc_size + 2 > max_size:
raise ValueError('encoded data exceeds max_size: {}'.format(enc_size + 2))
assert max_size < 255*256
cpu_buffer[0] = enc_size // 255 # this encoding works for max_size < 65k
cpu_buffer[1] = enc_size % 255
cpu_buffer[2 : enc_size + 2] = torch.ByteTensor(list(enc))
start = rank * max_size
size = enc_size + 2
buffer[start : start + size].copy_(cpu_buffer[:size])
all_reduce(buffer, group=group)
try:
result = []
for i in range(world_size):
out_buffer = buffer[i * max_size : (i + 1) * max_size]
size = (255 * utils.item(out_buffer[0])) + utils.item(out_buffer[1])
if size > 0:
result.append(pickle.loads(bytes(out_buffer[2 : size + 2].tolist())))
return result
except pickle.UnpicklingError:
raise Exception(
'Unable to unpickle data from other workers. all_gather_list requires all '
'workers to enter the function together, so this error usually indicates '
'that the workers have fallen out of sync somehow. Workers can fall out of '
'sync if one of them runs out of memory, or if there are other conditions '
'in your training script that can cause one worker to finish an epoch '
'while other workers are still iterating over their portions of the data.'
)
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/fairseq/distributed_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Utilities for working with the local dataset cache.
This file is adapted from `AllenNLP <https://github.com/allenai/allennlp>`_.
and `huggingface <https://github.com/huggingface>`_.
"""
import fnmatch
from functools import wraps
from hashlib import sha256
from io import open
import json
import logging
import os
import shutil
import tarfile
import tempfile
try:
from torch.hub import _get_torch_home
torch_cache_home = _get_torch_home()
except ImportError:
torch_cache_home = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(
os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch')))
default_cache_path = os.path.join(torch_cache_home, 'pytorch_fairseq')
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
try:
from pathlib import Path
PYTORCH_FAIRSEQ_CACHE = Path(
os.getenv('PYTORCH_FAIRSEQ_CACHE', default_cache_path))
except (AttributeError, ImportError):
PYTORCH_FAIRSEQ_CACHE = os.getenv(
'PYTORCH_FAIRSEQ_CACHE', default_cache_path)
CONFIG_NAME = "config.json"
WEIGHTS_NAME = "pytorch_model.bin"
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
def load_archive_file(archive_file):
# redirect to the cache, if necessary
try:
resolved_archive_file = cached_path(archive_file, cache_dir=None)
except EnvironmentError:
print(
"Archive name '{}' was not found in archive name list. "
"We assumed '{}' was a path or URL but couldn't find any file "
"associated to this path or URL.".format(
archive_file,
archive_file,
)
)
return None
if resolved_archive_file == archive_file:
print("loading archive file {}".format(archive_file))
else:
print("loading archive file {} from cache at {}".format(
archive_file, resolved_archive_file))
# Extract archive to temp dir and replace .tar.bz2 if necessary
tempdir = None
if not os.path.isdir(resolved_archive_file):
tempdir = tempfile.mkdtemp()
print("extracting archive file {} to temp dir {}".format(
resolved_archive_file, tempdir))
ext = os.path.splitext(archive_file)[1][1:]
with tarfile.open(resolved_archive_file, 'r:' + ext) as archive:
top_dir = os.path.commonprefix(archive.getnames())
archive.extractall(tempdir)
os.remove(resolved_archive_file)
shutil.move(os.path.join(tempdir, top_dir), resolved_archive_file)
shutil.rmtree(tempdir)
return resolved_archive_file
def url_to_filename(url, etag=None):
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the URL's, delimited
by a period.
"""
url_bytes = url.encode('utf-8')
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode('utf-8')
etag_hash = sha256(etag_bytes)
filename += '.' + etag_hash.hexdigest()
return filename
def filename_to_url(filename, cache_dir=None):
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = PYTORCH_FAIRSEQ_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise EnvironmentError("file {} not found".format(cache_path))
meta_path = cache_path + '.json'
if not os.path.exists(meta_path):
raise EnvironmentError("file {} not found".format(meta_path))
with open(meta_path, encoding="utf-8") as meta_file:
metadata = json.load(meta_file)
url = metadata['url']
etag = metadata['etag']
return url, etag
def cached_path(url_or_filename, cache_dir=None):
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = PYTORCH_FAIRSEQ_CACHE
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if parsed.scheme in ('http', 'https', 's3'):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == '':
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
def split_s3_path(url):
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
def s3_request(func):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url, *args, **kwargs):
from botocore.exceptions import ClientError
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise EnvironmentError("file {} not found".format(url))
else:
raise
return wrapper
@s3_request
def s3_etag(url):
"""Check ETag on S3 object."""
import boto3
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@s3_request
def s3_get(url, temp_file):
"""Pull a file directly from S3."""
import boto3
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def http_get(url, temp_file):
import requests
from tqdm import tqdm
req = requests.get(url, stream=True)
content_length = req.headers.get('Content-Length')
total = int(content_length) if content_length is not None else None
progress = tqdm(unit="B", total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(url, cache_dir=None):
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = PYTORCH_FAIRSEQ_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url)
else:
try:
import requests
response = requests.head(url, allow_redirects=True)
if response.status_code != 200:
etag = None
else:
etag = response.headers.get("ETag")
except EnvironmentError:
etag = None
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
# If we don't have a connection (etag is None) and can't identify the file
# try to get the last downloaded one
if not os.path.exists(cache_path) and etag is None:
matching_files = fnmatch.filter(os.listdir(cache_dir), filename + '.*')
matching_files = list(filter(lambda s: not s.endswith('.json'), matching_files))
if matching_files:
cache_path = os.path.join(cache_dir, matching_files[-1])
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, 'wb') as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {'url': url, 'etag': etag}
meta_path = cache_path + '.json'
with open(meta_path, 'w') as meta_file:
output_string = json.dumps(meta)
meta_file.write(output_string)
logger.info("removing temp file %s", temp_file.name)
return cache_path
def read_set_from_file(filename):
'''
Extract a de-duped collection (set) of text from a file.
Expected file format is one item per line.
'''
collection = set()
with open(filename, 'r', encoding='utf-8') as file_:
for line in file_:
collection.add(line.rstrip())
return collection
def get_file_extension(path, dot=True, lower=True):
ext = os.path.splitext(path)[1]
ext = ext if dot else ext[1:]
return ext.lower() if lower else ext
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/fairseq/file_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
class Search(object):
def __init__(self, tgt_dict):
self.pad = tgt_dict.pad()
self.unk = tgt_dict.unk()
self.eos = tgt_dict.eos()
self.vocab_size = len(tgt_dict)
self.scores_buf = None
self.indices_buf = None
self.beams_buf = None
def _init_buffers(self, t):
if self.scores_buf is None:
self.scores_buf = t.new()
self.indices_buf = torch.LongTensor().to(device=t.device)
self.beams_buf = torch.LongTensor().to(device=t.device)
def step(self, step, lprobs, scores):
"""Take a single search step.
Args:
step: the current search step, starting at 0
lprobs: (bsz x input_beam_size x vocab_size)
the model's log-probabilities over the vocabulary at the current step
scores: (bsz x input_beam_size x step)
the historical model scores of each hypothesis up to this point
Return: A tuple of (scores, indices, beams) where:
scores: (bsz x output_beam_size)
the scores of the chosen elements; output_beam_size can be
larger than input_beam_size, e.g., we may return
2*input_beam_size to account for EOS
indices: (bsz x output_beam_size)
the indices of the chosen elements
beams: (bsz x output_beam_size)
the hypothesis ids of the chosen elements, in the range [0, input_beam_size)
"""
raise NotImplementedError
def set_src_lengths(self, src_lengths):
self.src_lengths = src_lengths
class BeamSearch(Search):
def __init__(self, tgt_dict):
super().__init__(tgt_dict)
def step(self, step, lprobs, scores):
super()._init_buffers(lprobs)
bsz, beam_size, vocab_size = lprobs.size()
if step == 0:
# at the first step all hypotheses are equally likely, so use
# only the first beam
lprobs = lprobs[:, ::beam_size, :].contiguous()
else:
# make probs contain cumulative scores for each hypothesis
lprobs.add_(scores[:, :, step - 1].unsqueeze(-1))
torch.topk(
lprobs.view(bsz, -1),
k=min(
# Take the best 2 x beam_size predictions. We'll choose the first
# beam_size of these which don't predict eos to continue with.
beam_size * 2,
lprobs.view(bsz, -1).size(1) - 1, # -1 so we never select pad
),
out=(self.scores_buf, self.indices_buf),
)
torch.div(self.indices_buf, vocab_size, out=self.beams_buf)
self.indices_buf.fmod_(vocab_size)
return self.scores_buf, self.indices_buf, self.beams_buf
class LengthConstrainedBeamSearch(Search):
def __init__(self, tgt_dict, min_len_a, min_len_b, max_len_a, max_len_b):
super().__init__(tgt_dict)
self.min_len_a = min_len_a
self.min_len_b = min_len_b
self.max_len_a = max_len_a
self.max_len_b = max_len_b
self.beam = BeamSearch(tgt_dict)
def step(self, step, lprobs, scores):
min_lens = self.min_len_a * self.src_lengths + self.min_len_b
max_lens = self.max_len_a * self.src_lengths + self.max_len_b
lprobs[step < min_lens, :, self.eos] = -math.inf
lprobs[step == max_lens, :, self.eos] = 0
lprobs[step > max_lens, :, self.eos] = -math.inf
return self.beam.step(step, lprobs, scores)
class DiverseBeamSearch(Search):
"""Diverse Beam Search.
See "Diverse Beam Search: Decoding Diverse Solutions from Neural Sequence
Models" for details.
We only implement the Hamming Diversity penalty here, which performed best
in the original paper.
"""
def __init__(self, tgt_dict, num_groups, diversity_strength):
super().__init__(tgt_dict)
self.num_groups = num_groups
self.diversity_strength = -diversity_strength
self.diversity_buf = None
self.beam = BeamSearch(tgt_dict)
def step(self, step, lprobs, scores):
super()._init_buffers(lprobs)
bsz, beam_size, vocab_size = lprobs.size()
if beam_size % self.num_groups != 0:
raise ValueError(
'DiverseBeamSearch requires --beam to be divisible by the number of groups'
)
# initialize diversity penalty
if self.diversity_buf is None:
self.diversity_buf = lprobs.new()
torch.zeros(lprobs[:, 0, :].size(), out=self.diversity_buf)
scores_G, indices_G, beams_G = [], [], []
for g in range(self.num_groups):
lprobs_g = lprobs[:, g::self.num_groups, :]
scores_g = scores[:, g::self.num_groups, :] if step > 0 else None
# apply diversity penalty
if g > 0:
lprobs_g = torch.add(lprobs_g, self.diversity_strength, self.diversity_buf.unsqueeze(1))
else:
lprobs_g = lprobs_g.contiguous()
scores_buf, indices_buf, beams_buf = self.beam.step(step, lprobs_g, scores_g)
beams_buf.mul_(self.num_groups).add_(g)
scores_G.append(scores_buf.clone())
indices_G.append(indices_buf.clone())
beams_G.append(beams_buf.clone())
# update diversity penalty
self.diversity_buf.scatter_add_(
1,
indices_buf,
self.diversity_buf.new_ones(indices_buf.size())
)
# interleave results from different groups
self.scores_buf = torch.stack(scores_G, dim=2, out=self.scores_buf).view(bsz, -1)
self.indices_buf = torch.stack(indices_G, dim=2, out=self.indices_buf).view(bsz, -1)
self.beams_buf = torch.stack(beams_G, dim=2, out=self.beams_buf).view(bsz, -1)
return self.scores_buf, self.indices_buf, self.beams_buf
class Sampling(Search):
def __init__(self, tgt_dict, sampling_topk=-1, sampling_topp=-1.0):
super().__init__(tgt_dict)
self.sampling_topk = sampling_topk
self.sampling_topp = sampling_topp
def _sample_topp(self, lprobs):
"""Sample among the smallest set of elements whose cumulative probability mass exceeds p.
See `"The Curious Case of Neural Text Degeneration"
(Holtzman et al., 2019) <https://arxiv.org/abs/1904.09751>`_.
Args:
lprobs: (bsz x input_beam_size x vocab_size)
the model's log-probabilities over the vocabulary at the current step
Return: A tuple of (trimed_probs, truncated_indices) where:
trimed_probs: (bsz x input_beam_size x ?)
the model's probabilities over the elements selected to sample from. The
width of the third dimension is determined by top-P.
truncated_indices: (bsz x input_beam_size x ?)
the indices of the chosen elements.
"""
probs = lprobs.exp_()
# sort the last dimension (vocab dimension) in descending order
sorted_probs, sorted_indices = probs.sort(descending=True)
# compute a mask to indicate the words to be included in the top-P set.
cumsum_probs = sorted_probs.cumsum(dim=2)
mask = cumsum_probs.lt(self.sampling_topp)
# note that mask was computed by 'lt'. One more word needs to be included
# so that the cumulative probability mass can exceed p.
cumsum_mask = mask.cumsum(dim=2)
last_included = cumsum_mask[:, :, -1:]
last_included.clamp_(0, mask.size()[2] - 1)
mask = mask.scatter_(2, last_included, 1)
# truncate unnecessary dims.
max_dim = last_included.max()
truncated_mask = mask[:, :, :max_dim + 1]
truncated_probs = sorted_probs[:, :, :max_dim + 1]
truncated_indices = sorted_indices[:, :, :max_dim + 1]
# trim the words that are not in top-P by setting their probabilities
# to 0, so that they would not be sampled later.
trim_mask = (~truncated_mask)
trimed_probs = truncated_probs.masked_fill_(trim_mask, 0)
return trimed_probs, truncated_indices
def step(self, step, lprobs, scores):
super()._init_buffers(lprobs)
bsz, beam_size, vocab_size = lprobs.size()
if step == 0:
# at the first step all hypotheses are equally likely, so use
# only the first beam
lprobs = lprobs[:, ::beam_size, :].contiguous()
# we exclude the first two vocab items, one of which is pad
assert self.pad <= 1, 'sampling assumes the first two symbols can be ignored'
lprobs_nopad = lprobs[:, :, 2:]
if self.sampling_topp > 0:
# only sample from the smallest set of words whose cumulative probability mass exceeds p
probs_nopad, top_indices = self._sample_topp(lprobs_nopad)
elif self.sampling_topk > 0:
# only sample from top-k candidates
lprobs_nopad, top_indices = lprobs_nopad.topk(self.sampling_topk)
probs_nopad = lprobs_nopad.exp_()
else:
probs_nopad = lprobs_nopad.exp_()
# sample
if step == 0:
self.indices_buf = torch.multinomial(
probs_nopad.view(bsz, -1),
beam_size,
replacement=True,
out=self.indices_buf,
).view(bsz, beam_size)
else:
self.indices_buf = torch.multinomial(
probs_nopad.view(bsz * beam_size, -1),
1,
replacement=True,
out=self.indices_buf,
).view(bsz, beam_size)
if step == 0:
# expand to beam size
probs_nopad = probs_nopad.expand(bsz, beam_size, -1)
# gather scores
torch.gather(
probs_nopad,
dim=2,
index=self.indices_buf.unsqueeze(-1),
out=self.scores_buf,
)
self.scores_buf = self.scores_buf.log_().view(bsz, -1)
# remap indices if using top-k or top-P sampling
if self.sampling_topk > 0 or self.sampling_topp > 0:
self.indices_buf = torch.gather(
top_indices.expand(bsz, beam_size, -1),
dim=2,
index=self.indices_buf.unsqueeze(-1),
).squeeze(2)
# remap indices since we excluded the first two vocab items
self.indices_buf.add_(2)
if step == 0:
self.beams_buf = self.indices_buf.new_zeros(bsz, beam_size)
else:
self.beams_buf = torch.arange(0, beam_size, out=self.beams_buf).repeat(bsz, 1)
# make scores cumulative
self.scores_buf.add_(
torch.gather(
scores[:, :, step - 1],
dim=1,
index=self.beams_buf,
)
)
return self.scores_buf, self.indices_buf, self.beams_buf
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/fairseq/search.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Wrapper around various loggers and progress bars (e.g., tqdm).
"""
from collections import OrderedDict
import json
from numbers import Number
import os
import sys
from fairseq import distributed_utils
from fairseq.meters import AverageMeter, StopwatchMeter, TimeMeter
def build_progress_bar(args, iterator, epoch=None, prefix=None, default='tqdm', no_progress_bar='none'):
if args.log_format is None:
args.log_format = no_progress_bar if args.no_progress_bar else default
if args.log_format == 'tqdm' and not sys.stderr.isatty():
args.log_format = 'simple'
if args.log_format == 'json':
bar = json_progress_bar(iterator, epoch, prefix, args.log_interval)
elif args.log_format == 'none':
bar = noop_progress_bar(iterator, epoch, prefix)
elif args.log_format == 'simple':
bar = simple_progress_bar(iterator, epoch, prefix, args.log_interval)
elif args.log_format == 'tqdm':
bar = tqdm_progress_bar(iterator, epoch, prefix)
else:
raise ValueError('Unknown log format: {}'.format(args.log_format))
if args.tensorboard_logdir and distributed_utils.is_master(args):
try:
# [FB only] custom wrapper for TensorBoard
import palaas # noqa
from fairseq.fb_tbmf_wrapper import fb_tbmf_wrapper
bar = fb_tbmf_wrapper(bar, args, args.log_interval)
except ImportError:
bar = tensorboard_log_wrapper(bar, args.tensorboard_logdir, args)
return bar
def format_stat(stat):
if isinstance(stat, Number):
stat = '{:g}'.format(stat)
elif isinstance(stat, AverageMeter):
stat = '{:.3f}'.format(stat.val)
# stat = '{:.3f}'.format(stat.avg)
elif isinstance(stat, TimeMeter):
stat = '{:g}'.format(round(stat.avg))
elif isinstance(stat, StopwatchMeter):
stat = '{:g}'.format(round(stat.sum))
return stat
class progress_bar(object):
"""Abstract class for progress bars."""
def __init__(self, iterable, epoch=None, prefix=None):
self.iterable = iterable
self.offset = getattr(iterable, 'offset', 0)
self.epoch = epoch
self.prefix = ''
if epoch is not None:
self.prefix += '| epoch {:03d}'.format(epoch)
if prefix is not None:
self.prefix += ' | {}'.format(prefix)
def __len__(self):
return len(self.iterable)
def __enter__(self):
return self
def __exit__(self, *exc):
return False
def __iter__(self):
raise NotImplementedError
def log(self, stats, tag='', step=None):
"""Log intermediate stats according to log_interval."""
raise NotImplementedError
def print(self, stats, tag='', step=None):
"""Print end-of-epoch stats."""
raise NotImplementedError
def _str_commas(self, stats):
return ', '.join(key + '=' + stats[key].strip()
for key in stats.keys())
def _str_pipes(self, stats):
return ' | '.join(key + ' ' + stats[key].strip()
for key in stats.keys())
def _format_stats(self, stats):
postfix = OrderedDict(stats)
# Preprocess stats according to datatype
for key in postfix.keys():
postfix[key] = str(format_stat(postfix[key]))
return postfix
class json_progress_bar(progress_bar):
"""Log output in JSON format."""
def __init__(self, iterable, epoch=None, prefix=None, log_interval=1000):
super().__init__(iterable, epoch, prefix)
self.log_interval = log_interval
self.stats = None
def __iter__(self):
size = float(len(self.iterable))
for i, obj in enumerate(self.iterable, start=self.offset):
yield obj
if self.stats is not None and i > 0 and \
self.log_interval is not None and i % self.log_interval == 0:
update = self.epoch - 1 + float(i / size) if self.epoch is not None else None
stats = self._format_stats(self.stats, epoch=self.epoch, update=update)
print(json.dumps(stats), flush=True)
def log(self, stats, tag='', step=None):
"""Log intermediate stats according to log_interval."""
self.stats = stats
def print(self, stats, tag='', step=None):
"""Print end-of-epoch stats."""
self.stats = stats
if tag != '':
self.stats = OrderedDict([(tag + '_' + k, v) for k, v in self.stats.items()])
stats = self._format_stats(self.stats, epoch=self.epoch)
print(json.dumps(stats), flush=True)
def _format_stats(self, stats, epoch=None, update=None):
postfix = OrderedDict()
if epoch is not None:
postfix['epoch'] = epoch
if update is not None:
postfix['update'] = round(update, 3)
# Preprocess stats according to datatype
for key in stats.keys():
postfix[key] = format_stat(stats[key])
return postfix
class noop_progress_bar(progress_bar):
"""No logging."""
def __init__(self, iterable, epoch=None, prefix=None):
super().__init__(iterable, epoch, prefix)
def __iter__(self):
for obj in self.iterable:
yield obj
def log(self, stats, tag='', step=None):
"""Log intermediate stats according to log_interval."""
pass
def print(self, stats, tag='', step=None):
"""Print end-of-epoch stats."""
pass
class simple_progress_bar(progress_bar):
"""A minimal logger for non-TTY environments."""
def __init__(self, iterable, epoch=None, prefix=None, log_interval=1000):
super().__init__(iterable, epoch, prefix)
self.log_interval = log_interval
self.stats = None
def __iter__(self):
size = len(self.iterable)
for i, obj in enumerate(self.iterable, start=self.offset):
yield obj
if self.stats is not None and i > 0 and \
self.log_interval is not None and i % self.log_interval == 0:
postfix = self._str_commas(self.stats)
print('{}: {:5d} / {:d} {}'.format(self.prefix, i, size, postfix),
flush=True)
def log(self, stats, tag='', step=None):
"""Log intermediate stats according to log_interval."""
self.stats = self._format_stats(stats)
def print(self, stats, tag='', step=None):
"""Print end-of-epoch stats."""
postfix = self._str_pipes(self._format_stats(stats))
print('{} | {}'.format(self.prefix, postfix), flush=True)
class tqdm_progress_bar(progress_bar):
"""Log to tqdm."""
def __init__(self, iterable, epoch=None, prefix=None):
super().__init__(iterable, epoch, prefix)
from tqdm import tqdm
self.tqdm = tqdm(iterable, self.prefix, leave=False)
def __iter__(self):
return iter(self.tqdm)
def log(self, stats, tag='', step=None):
"""Log intermediate stats according to log_interval."""
self.tqdm.set_postfix(self._format_stats(stats), refresh=False)
def print(self, stats, tag='', step=None):
"""Print end-of-epoch stats."""
postfix = self._str_pipes(self._format_stats(stats))
self.tqdm.write('{} | {}'.format(self.tqdm.desc, postfix))
class tensorboard_log_wrapper(progress_bar):
"""Log to tensorboard."""
def __init__(self, wrapped_bar, tensorboard_logdir, args):
self.wrapped_bar = wrapped_bar
self.tensorboard_logdir = tensorboard_logdir
self.args = args
try:
from tensorboardX import SummaryWriter
self.SummaryWriter = SummaryWriter
self._writers = {}
except ImportError:
print("tensorboard or required dependencies not found, "
"please see README for using tensorboard. (e.g. pip install tensorboardX)")
self.SummaryWriter = None
def _writer(self, key):
if self.SummaryWriter is None:
return None
if key not in self._writers:
self._writers[key] = self.SummaryWriter(
os.path.join(self.tensorboard_logdir, key),
)
self._writers[key].add_text('args', str(vars(self.args)))
self._writers[key].add_text('sys.argv', " ".join(sys.argv))
return self._writers[key]
def __iter__(self):
return iter(self.wrapped_bar)
def log(self, stats, tag='', step=None):
"""Log intermediate stats to tensorboard."""
self._log_to_tensorboard(stats, tag, step)
self.wrapped_bar.log(stats, tag=tag, step=step)
def print(self, stats, tag='', step=None):
"""Print end-of-epoch stats."""
self._log_to_tensorboard(stats, tag, step)
self.wrapped_bar.print(stats, tag=tag, step=step)
def __exit__(self, *exc):
for writer in getattr(self, '_writers', {}).values():
writer.close()
return False
def _log_to_tensorboard(self, stats, tag='', step=None):
writer = self._writer(tag)
if writer is None:
return
if step is None:
step = stats['num_updates']
for key in stats.keys() - {'num_updates'}:
if isinstance(stats[key], AverageMeter):
writer.add_scalar(key, stats[key].val, step)
elif isinstance(stats[key], Number):
writer.add_scalar(key, stats[key], step)
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/fairseq/progress_bar.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Train a network across multiple GPUs.
"""
import contextlib
import math
import os
import sys
from collections import OrderedDict
from itertools import chain
import torch
from fairseq import checkpoint_utils, distributed_utils, models, optim, utils
from fairseq.meters import AverageMeter, StopwatchMeter, TimeMeter
from fairseq.optim import lr_scheduler
class Trainer(object):
"""Main class for data parallel training.
This class supports synchronous distributed data parallel training,
where multiple workers each have a full model replica and gradients
are accumulated across workers before each update. We use
:class:`~torch.nn.parallel.DistributedDataParallel` to handle
communication of the gradients across workers.
"""
def __init__(self, args, task, model, criterion, dummy_batch=None, oom_batch=None):
self.args = args
self.task = task
# copy model and criterion to current device
self._criterion = criterion
self._model = model
self.cuda = torch.cuda.is_available() and not args.cpu
if args.fp16:
self._criterion = self._criterion.half()
self._model = self._model.half()
if self.cuda:
self._criterion = self._criterion.cuda()
self._model = self._model.cuda()
self._dummy_batch = dummy_batch
self._oom_batch = oom_batch or dummy_batch
self._lr_scheduler = None
self._num_updates = 0
self._optim_history = None
self._optimizer = None
self._prev_grad_norm = None
self._wrapped_criterion = None
self._wrapped_model = None
# Fast stats sync avoids memcpy and is 7% faster when tested on 16 nodes.
# It is less flexible and syncs only the default stats.
self._all_reduce_list = [0.0] * 6
self.fast_stat_sync = args.fast_stat_sync
self.init_meters(args)
def init_meters(self, args):
self.meters = OrderedDict()
self.meters["train_loss"] = AverageMeter()
self.meters["train_nll_loss"] = AverageMeter()
self.meters["valid_loss"] = AverageMeter()
self.meters["valid_nll_loss"] = AverageMeter()
self.meters["wps"] = TimeMeter() # words per second
self.meters["ups"] = TimeMeter() # updates per second
self.meters["wpb"] = AverageMeter() # words per batch
self.meters["bsz"] = AverageMeter() # sentences per batch
self.meters["gnorm"] = AverageMeter() # gradient norm
self.meters["clip"] = AverageMeter() # % of updates clipped
self.meters["oom"] = AverageMeter() # out of memory
if args.fp16:
self.meters["loss_scale"] = AverageMeter() # dynamic loss scale
self.meters["wall"] = TimeMeter() # wall time in seconds
self.meters["train_wall"] = StopwatchMeter() # train wall time in seconds
@property
def criterion(self):
if self._wrapped_criterion is None:
if (
utils.has_parameters(self._criterion)
and self.args.distributed_world_size > 1
and not self.args.use_bmuf
):
self._wrapped_criterion = models.DistributedFairseqModel(
self.args, self._criterion
)
else:
self._wrapped_criterion = self._criterion
return self._wrapped_criterion
@property
def model(self):
if self._wrapped_model is None:
if self.args.distributed_world_size > 1 and not self.args.use_bmuf:
self._wrapped_model = models.DistributedFairseqModel(
self.args, self._model
)
else:
self._wrapped_model = self._model
return self._wrapped_model
@property
def optimizer(self):
if self._optimizer is None:
self._build_optimizer()
return self._optimizer
@property
def lr_scheduler(self):
if self._lr_scheduler is None:
self._build_optimizer() # this will initialize self._lr_scheduler
return self._lr_scheduler
def _build_optimizer(self):
params = list(
filter(
lambda p: p.requires_grad,
chain(self.model.parameters(), self.criterion.parameters()),
)
)
if self.args.fp16:
if self.cuda and torch.cuda.get_device_capability(0)[0] < 7:
print(
"| WARNING: your device does NOT support faster training with --fp16, "
"please switch to FP32 which is likely to be faster"
)
if self.args.memory_efficient_fp16:
self._optimizer = optim.MemoryEfficientFP16Optimizer.build_optimizer(
self.args, params
)
else:
self._optimizer = optim.FP16Optimizer.build_optimizer(self.args, params)
else:
if self.cuda and torch.cuda.get_device_capability(0)[0] >= 7:
print("| NOTICE: your device may support faster training with --fp16")
self._optimizer = optim.build_optimizer(self.args, params)
if self.args.use_bmuf:
self._optimizer = optim.FairseqBMUF(self.args, self._optimizer)
# We should initialize the learning rate scheduler immediately after
# building the optimizer, so that the initial learning rate is set.
self._lr_scheduler = lr_scheduler.build_lr_scheduler(self.args, self.optimizer)
self._lr_scheduler.step_update(0)
def save_checkpoint(self, filename, extra_state):
"""Save all training state in a checkpoint file."""
if distributed_utils.is_master(self.args): # only save one checkpoint
extra_state["train_meters"] = self.meters
checkpoint_utils.save_state(
filename,
self.args,
self.get_model().state_dict(),
self.get_criterion(),
self.optimizer,
self.lr_scheduler,
self.get_num_updates(),
self._optim_history,
extra_state,
)
def load_checkpoint(
self,
filename,
reset_optimizer=False,
reset_lr_scheduler=False,
optimizer_overrides=None,
reset_meters=False,
):
"""Load all training state from a checkpoint file."""
extra_state, self._optim_history, last_optim_state = None, [], None
try:
from fairseq.fb_pathmgr import fb_pathmgr
bexists = fb_pathmgr.isfile(filename)
except (ModuleNotFoundError, ImportError):
bexists = os.path.exists(filename)
if bexists:
state = checkpoint_utils.load_checkpoint_to_cpu(filename)
# load model parameters
try:
self.get_model().load_state_dict(
state["model"], strict=True, args=self.args
)
if utils.has_parameters(self.get_criterion()):
self.get_criterion().load_state_dict(
state["criterion"], strict=True
)
except Exception:
raise Exception(
"Cannot load model parameters from checkpoint {}; "
"please ensure that the architectures match.".format(filename)
)
extra_state = state["extra_state"]
self._optim_history = state["optimizer_history"]
last_optim_state = state.get("last_optimizer_state", None)
if last_optim_state is not None and not reset_optimizer:
# rebuild optimizer after loading model, since params may have changed
self._build_optimizer()
# only reload optimizer and lr_scheduler if they match
last_optim = self._optim_history[-1]
assert (
last_optim["criterion_name"] == self.get_criterion().__class__.__name__
), "Criterion does not match; please reset the optimizer (--reset-optimizer)."
assert (
last_optim["optimizer_name"] == self.optimizer.__class__.__name__
), "Optimizer does not match; please reset the optimizer (--reset-optimizer)."
if not reset_lr_scheduler:
self.lr_scheduler.load_state_dict(last_optim["lr_scheduler_state"])
self.optimizer.load_state_dict(last_optim_state, optimizer_overrides)
self.set_num_updates(last_optim["num_updates"])
if extra_state is not None:
epoch = extra_state["train_iterator"]["epoch"]
print(
"| loaded checkpoint {} (epoch {} @ {} updates)".format(
filename, epoch, self.get_num_updates()
)
)
self.lr_step(epoch)
if "train_meters" in extra_state and not reset_meters:
self.meters.update(extra_state["train_meters"])
del extra_state["train_meters"]
# reset TimeMeters, since their start times don't make sense anymore
for meter in self.meters.values():
if isinstance(meter, TimeMeter):
meter.reset()
else:
print("| no existing checkpoint found {}".format(filename))
return extra_state
def get_train_iterator(
self,
epoch,
combine=True,
load_dataset=True,
data_selector=None,
shard_batch_itr=True,
):
"""Return an EpochBatchIterator over the training set for a given epoch."""
if load_dataset:
print("| loading train data for epoch {}".format(epoch))
self.task.load_dataset(
self.args.train_subset,
epoch=epoch,
combine=combine,
data_selector=data_selector,
)
print("| Max positions: " + str((self.task.max_positions(), self.model.max_positions())))
return self.task.get_batch_iterator(
dataset=self.task.dataset(self.args.train_subset),
max_tokens=self.args.max_tokens,
max_sentences=self.args.max_sentences,
max_positions=utils.resolve_max_positions(
self.task.max_positions(), self.model.max_positions()
),
ignore_invalid_inputs=True,
required_batch_size_multiple=self.args.required_batch_size_multiple,
seed=self.args.seed,
num_shards=self.args.distributed_world_size if shard_batch_itr else 1,
shard_id=self.args.distributed_rank if shard_batch_itr else 0,
num_workers=self.args.num_workers,
epoch=epoch,
)
def train_step(self, samples, dummy_batch=False, raise_oom=False):
"""Do forward, backward and parameter update."""
if self._dummy_batch is None:
self._dummy_batch = samples[0]
self._set_seed()
self.model.train()
self.criterion.train()
self.zero_grad()
if not dummy_batch:
self.meters["train_wall"].start()
# forward and backward pass
logging_outputs, sample_sizes, ooms = [], [], 0
for i, sample in enumerate(samples):
sample = self._prepare_sample(sample)
if sample is None:
# when sample is None, run forward/backward on a dummy batch
# and ignore the resulting gradients
sample = self._prepare_sample(self._dummy_batch)
ignore_grad = True
else:
ignore_grad = False
def maybe_no_sync():
"""
Whenever *samples* contains more than one mini-batch, we
want to accumulate gradients locally and only call
all-reduce in the last backwards pass.
"""
if (
self.args.distributed_world_size > 1
and hasattr(self.model, "no_sync")
and i < len(samples) - 1
):
return self.model.no_sync()
else:
return contextlib.ExitStack() # dummy contextmanager
try:
with maybe_no_sync():
# forward and backward
loss, sample_size, logging_output = self.task.train_step(
sample, self.model, self.criterion, self.optimizer, ignore_grad
)
if not ignore_grad:
logging_outputs.append(logging_output)
sample_sizes.append(sample_size)
if self.fast_stat_sync:
self._all_reduce_list[0] += sample_size
self._all_reduce_list[1] += logging_output.get(
"nsentences", 0.0
)
self._all_reduce_list[2] += logging_output.get("loss", 0.0)
self._all_reduce_list[3] += logging_output.get("nll_loss", 0.0)
self._all_reduce_list[4] += logging_output.get("ntokens", 0.0)
except RuntimeError as e:
if "out of memory" in str(e):
self._log_oom(e)
if raise_oom:
raise e
print("| WARNING OOM!", flush=True)
print(
"| WARNING: attempting to recover from OOM in forward/backward pass",
file=sys.stderr,
)
ooms += 1
self.zero_grad()
else:
raise e
if self.fast_stat_sync:
self._all_reduce_list[5] += ooms
if ooms > 0 and self._oom_batch is not None:
self.handle_ooms(ooms)
if dummy_batch:
return None
# gather logging outputs from all replicas
if self.fast_stat_sync:
# rework all_gather_list
all_reduce_list_tensor = torch.cuda.DoubleTensor(self._all_reduce_list)
if self._sync_stats():
torch.distributed.all_reduce(all_reduce_list_tensor)
# Normalize loss and nll_loss by "sample_size"
# and convert to log base 2
all_reduce_list_tensor[2:4].div_(
(all_reduce_list_tensor[0:1] * torch.log(torch.cuda.DoubleTensor([2])))
)
self._all_reduce_list = all_reduce_list_tensor.tolist()
logging_output = {}
[
sample_size,
logging_output["nsentences"],
logging_output["loss"],
logging_output["nll_loss"],
logging_output["ntokens"],
ooms,
] = self._all_reduce_list
elif self._sync_stats():
logging_outputs, sample_sizes, ooms, prev_norms = zip(
*distributed_utils.all_gather_list(
[logging_outputs, sample_sizes, ooms, self._prev_grad_norm]
)
)
logging_outputs = list(chain.from_iterable(logging_outputs))
sample_sizes = list(chain.from_iterable(sample_sizes))
ooms = sum(ooms)
if not self.args.use_bmuf:
assert all(norm == prev_norms[0] for norm in prev_norms) or all(
math.isnan(norm) or math.isinf(norm) for norm in prev_norms
), "Fatal error: gradients are inconsistent between workers"
self.meters["oom"].update(ooms, len(samples))
if ooms == self.args.distributed_world_size * len(samples):
print("| WARNING: OOM in all workers, skipping update")
self.zero_grad()
return None
if not self.fast_stat_sync:
# aggregate logging outputs and sample sizes
logging_output = self.task.aggregate_logging_outputs(
logging_outputs, self.get_criterion()
)
sample_size = self.task.grad_denom(sample_sizes, self.get_criterion())
if not all(k in logging_output for k in ["ntokens", "nsentences"]):
raise Exception(
(
"Please update the {}.aggregate_logging_outputs() method to "
"return ntokens and nsentences"
).format(self.task.__class__.__name__)
)
try:
# normalize grads by sample size
if sample_size > 0:
self.optimizer.multiply_grads(
self.args.distributed_world_size / float(sample_size)
)
# clip grads
grad_norm = self.optimizer.clip_grad_norm(self.args.clip_norm)
self._prev_grad_norm = grad_norm
# take an optimization step
self.optimizer.step()
self.set_num_updates(self.get_num_updates() + 1)
# task specific update per step
self.task.update_step(self._num_updates)
# update meters
ntokens = logging_output.get("ntokens", 0)
nsentences = logging_output.get("nsentences", 0)
self.meters["wps"].update(ntokens)
self.meters["ups"].update(1.0)
self.meters["wpb"].update(ntokens)
self.meters["bsz"].update(nsentences)
self.meters["gnorm"].update(grad_norm)
self.meters["clip"].update(
1.0
if grad_norm > self.args.clip_norm and self.args.clip_norm > 0
else 0.0
)
self.meters["train_loss"].update(logging_output.get("loss", 0), sample_size)
if "train_acc" in self.meters:
self.meters["train_acc"].update(
logging_output.get("acc", 0), sample_size
)
if "nll_loss" in logging_output:
self.meters["train_nll_loss"].update(
logging_output.get("nll_loss", 0), ntokens
)
# clear CUDA cache to reduce memory fragmentation
if (
self.args.empty_cache_freq > 0
and (
(self.get_num_updates() + self.args.empty_cache_freq - 1)
% self.args.empty_cache_freq
)
== 0
and torch.cuda.is_available()
and not self.args.cpu
):
torch.cuda.empty_cache()
except OverflowError as e:
print("| WARNING: overflow detected, " + str(e))
self.zero_grad()
logging_output = None
except RuntimeError as e:
if "out of memory" in str(e):
self._log_oom(e)
print("| ERROR: OOM during optimization, irrecoverable")
raise e
if self.args.fp16:
self.meters["loss_scale"].reset()
self.meters["loss_scale"].update(self.optimizer.scaler.loss_scale)
self.clear_buffered_stats()
self.meters["train_wall"].stop()
return logging_output
def valid_step(self, sample, raise_oom=False):
"""Do forward pass in evaluation mode."""
with torch.no_grad():
self.model.eval()
self.criterion.eval()
sample = self._prepare_sample(sample)
if sample is None:
sample = self._prepare_sample(self._dummy_batch)
ignore_results = True
else:
ignore_results = False
try:
_loss, sample_size, logging_output = self.task.valid_step(
sample, self.model, self.criterion
)
except RuntimeError as e:
if "out of memory" in str(e):
self._log_oom(e)
if not raise_oom:
print(
"| WARNING: ran out of memory in validation step, retrying batch"
)
for p in self.model.parameters():
if p.grad is not None:
p.grad = None # free some memory
if self.cuda:
torch.cuda.empty_cache()
return self.valid_step(sample, raise_oom=True)
raise e
if ignore_results:
logging_output, sample_size = {}, 0
# gather logging outputs from all replicas
if self.args.distributed_world_size > 1:
logging_output, sample_size = zip(
*distributed_utils.all_gather_list([logging_output, sample_size])
)
logging_output = list(logging_output)
sample_size = list(sample_size)
else:
logging_output = [logging_output]
sample_size = [sample_size]
# aggregate logging outputs and sample sizes
logging_output = self.task.aggregate_logging_outputs(
logging_output, self.get_criterion()
)
sample_size = self.task.grad_denom(sample_size, self.get_criterion())
# update meters for validation
ntokens = logging_output.get("ntokens", 0)
self.meters["valid_loss"].update(logging_output.get("loss", 0), sample_size)
if "valid_acc" in self.meters:
self.meters["valid_acc"].update(logging_output.get("acc", 0), sample_size)
if "nll_loss" in logging_output:
self.meters["valid_nll_loss"].update(
logging_output.get("nll_loss", 0), ntokens
)
return logging_output
def dummy_train_step(self, dummy_batch):
"""Dummy training step for warming caching allocator."""
self.train_step(dummy_batch, dummy_batch=True)
self.zero_grad()
def handle_ooms(self, number_of_ooms):
"""
c10d accumulates/syncs gradients between gpus during backward pass.
In case of OOMs, gpus may fail to sync, so we manually iterate
extra to make sure each gpu makes same number of iterations.
"""
for _ in range(number_of_ooms):
self.train_step([self._oom_batch], True)
def zero_grad(self):
self.optimizer.zero_grad()
def clear_buffered_stats(self):
self._all_reduce_list = [0.0] * 6
def lr_step(self, epoch, val_loss=None):
"""Adjust the learning rate based on the validation loss."""
self.lr_scheduler.step(epoch, val_loss)
# prefer updating the LR based on the number of steps
return self.lr_step_update()
def lr_step_update(self):
"""Update the learning rate after each update."""
return self.lr_scheduler.step_update(self.get_num_updates())
def get_lr(self):
"""Get the current learning rate."""
return self.optimizer.get_lr()
def get_model(self):
"""Get the (non-wrapped) model instance."""
return self._model
def get_criterion(self):
"""Get the (non-wrapped) criterion instance."""
return self._criterion
def get_meter(self, name):
"""Get a specific meter by name."""
if name not in self.meters:
return None
return self.meters[name]
def get_num_updates(self):
"""Get the number of parameters updates."""
return self._num_updates
def set_num_updates(self, num_updates):
"""Set the number of parameters updates."""
self._num_updates = num_updates
self.lr_step_update()
def _prepare_sample(self, sample):
if sample is None or len(sample) == 0:
return None
if self.cuda:
sample = utils.move_to_cuda(sample)
def apply_half(t):
if t.dtype is torch.float32:
return t.half()
return t
if self.args.fp16:
sample = utils.apply_to_sample(apply_half, sample)
return sample
def _set_seed(self):
# Set seed based on args.seed and the update number so that we get
# reproducible results when resuming from checkpoints
seed = self.args.seed + self.get_num_updates()
torch.manual_seed(seed)
if self.cuda:
torch.cuda.manual_seed(seed)
def _sync_stats(self):
return self.args.distributed_world_size > 1 and (
(not self.args.use_bmuf)
or (
self.args.use_bmuf
and (self.get_num_updates() + 1) % self.args.global_sync_iter == 0
)
)
def _log_oom(self, exc):
msg = "| OOM: Ran out of memory with exception: {}".format(exc)
# TODO: print should really go to logger, this print goes
# to stderr, which is buffered, which in many cases is not
# printed out if another exception happens.
# NB(jerry): added a flush to mitigate this
print(msg, file=sys.stderr)
if torch.cuda.is_available() and hasattr(torch.cuda, "memory_summary"):
for device_idx in range(torch.cuda.device_count()):
print(torch.cuda.memory_summary(device=device_idx), file=sys.stderr)
sys.stderr.flush()
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/fairseq/trainer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import Counter
import os
from fairseq.tokenizer import tokenize_line
def safe_readline(f):
pos = f.tell()
while True:
try:
return f.readline()
except UnicodeDecodeError:
pos -= 1
f.seek(pos) # search where this character begins
class Binarizer:
@staticmethod
def binarize(filename, dict, consumer, tokenize=tokenize_line, append_eos=True, reverse_order=False,
offset=0, end=-1):
nseq, ntok = 0, 0
replaced = Counter()
def replaced_consumer(word, idx):
if idx == dict.unk_index and word != dict.unk_word:
replaced.update([word])
with open(filename, 'r', encoding='utf-8') as f:
f.seek(offset)
# next(f) breaks f.tell(), hence readline() must be used
line = safe_readline(f)
while line:
if end > 0 and f.tell() > end:
break
ids = dict.encode_line(
line=line,
line_tokenizer=tokenize,
add_if_not_exist=False,
consumer=replaced_consumer,
append_eos=append_eos,
reverse_order=reverse_order,
)
nseq += 1
ntok += len(ids)
consumer(ids)
line = f.readline()
return {'nseq': nseq, 'nunk': sum(replaced.values()), 'ntok': ntok, 'replaced': replaced}
@staticmethod
def binarize_alignments(filename, alignment_parser, consumer, offset=0, end=-1):
nseq = 0
with open(filename, 'r') as f:
f.seek(offset)
line = safe_readline(f)
while line:
if end > 0 and f.tell() > end:
break
ids = alignment_parser(line)
nseq += 1
consumer(ids)
line = f.readline()
return {'nseq': nseq}
@staticmethod
def find_offsets(filename, num_chunks):
with open(filename, 'r', encoding='utf-8') as f:
size = os.fstat(f.fileno()).st_size
chunk_size = size // num_chunks
offsets = [0 for _ in range(num_chunks + 1)]
for i in range(1, num_chunks):
f.seek(chunk_size * i)
safe_readline(f)
offsets[i] = f.tell()
return offsets
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/fairseq/binarizer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import torch
from fairseq import tokenizer
from fairseq.data import (
data_utils,
FairseqDataset,
iterators,
Dictionary,
)
class FairseqTask(object):
"""
Tasks store dictionaries and provide helpers for loading/iterating over
Datasets, initializing the Model/Criterion and calculating the loss.
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
pass
def __init__(self, args):
self.args = args
self.datasets = {}
self.dataset_to_epoch_iter = {}
@classmethod
def load_dictionary(cls, filename):
"""Load the dictionary from the filename
Args:
filename (str): the filename
"""
return Dictionary.load(filename)
@classmethod
def build_dictionary(cls, filenames, workers=1, threshold=-1, nwords=-1, padding_factor=8):
"""Build the dictionary
Args:
filenames (list): list of filenames
workers (int): number of concurrent workers
threshold (int): defines the minimum word count
nwords (int): defines the total number of words in the final dictionary,
including special symbols
padding_factor (int): can be used to pad the dictionary size to be a
multiple of 8, which is important on some hardware (e.g., Nvidia
Tensor Cores).
"""
d = Dictionary()
for filename in filenames:
Dictionary.add_file_to_dictionary(filename, d, tokenizer.tokenize_line, workers)
d.finalize(threshold=threshold, nwords=nwords, padding_factor=padding_factor)
return d
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
return cls(args, **kwargs)
def load_dataset(self, split, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
raise NotImplementedError
def dataset(self, split):
"""
Return a loaded dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
Returns:
a :class:`~fairseq.data.FairseqDataset` corresponding to *split*
"""
from fairseq.data import FairseqDataset
if split not in self.datasets:
raise KeyError('Dataset not loaded: ' + split)
if not isinstance(self.datasets[split], FairseqDataset):
raise TypeError('Datasets are expected to be of type FairseqDataset')
return self.datasets[split]
def get_batch_iterator(
self, dataset, max_tokens=None, max_sentences=None, max_positions=None,
ignore_invalid_inputs=False, required_batch_size_multiple=1,
seed=1, num_shards=1, shard_id=0, num_workers=0, epoch=0,
):
"""
Get an iterator that yields batches of data from the given dataset.
Args:
dataset (~fairseq.data.FairseqDataset): dataset to batch
max_tokens (int, optional): max number of tokens in each batch
(default: None).
max_sentences (int, optional): max number of sentences in each
batch (default: None).
max_positions (optional): max sentence length supported by the
model (default: None).
ignore_invalid_inputs (bool, optional): don't raise Exception for
sentences that are too long (default: False).
required_batch_size_multiple (int, optional): require batch size to
be a multiple of N (default: 1).
seed (int, optional): seed for random number generator for
reproducibility (default: 1).
num_shards (int, optional): shard the data iterator into N
shards (default: 1).
shard_id (int, optional): which shard of the data iterator to
return (default: 0).
num_workers (int, optional): how many subprocesses to use for data
loading. 0 means the data will be loaded in the main process
(default: 0).
epoch (int, optional): the epoch to start the iterator from
(default: 0).
Returns:
~fairseq.iterators.EpochBatchIterator: a batched iterator over the
given dataset split
"""
# For default fairseq task, return same iterator across epochs
# as datasets are not dynamic, can be overridden in task specific
# setting.
print("| At task.get_batch_iterator ...", flush=True)
if dataset in self.dataset_to_epoch_iter:
return self.dataset_to_epoch_iter[dataset]
assert isinstance(dataset, FairseqDataset)
# initialize the dataset with the correct starting epoch
dataset.set_epoch(epoch)
# get indices ordered by example size
with data_utils.numpy_seed(seed):
indices = dataset.ordered_indices()
print("| At task.get_batch_iterator, indices ordered ... ", flush=True)
# filter examples that are too large
if max_positions is not None:
indices = data_utils.filter_by_size(
indices, dataset, max_positions, raise_exception=(not ignore_invalid_inputs),
)
print("| At task.get_batch_iterator, examples filtered ... ", flush=True)
# create mini-batches with given size constraints
batch_sampler = data_utils.batch_by_size(
indices, dataset.num_tokens, max_tokens=max_tokens, max_sentences=max_sentences,
required_batch_size_multiple=required_batch_size_multiple,
)
print("| At task.get_batch_iterator, batch_sampler created ... ", flush=True)
# return a reusable, sharded iterator
epoch_iter = iterators.EpochBatchIterator(
dataset=dataset,
collate_fn=dataset.collater,
batch_sampler=batch_sampler,
seed=seed,
num_shards=num_shards,
shard_id=shard_id,
num_workers=num_workers,
epoch=epoch,
)
self.dataset_to_epoch_iter[dataset] = epoch_iter
print("| At task.get_batch_iterator, iterator created ... ", flush=True)
return epoch_iter
def build_model(self, args):
"""
Build the :class:`~fairseq.models.BaseFairseqModel` instance for this
task.
Args:
args (argparse.Namespace): parsed command-line arguments
Returns:
a :class:`~fairseq.models.BaseFairseqModel` instance
"""
from fairseq import models
return models.build_model(args, self)
def build_criterion(self, args):
"""
Build the :class:`~fairseq.criterions.FairseqCriterion` instance for
this task.
Args:
args (argparse.Namespace): parsed command-line arguments
Returns:
a :class:`~fairseq.criterions.FairseqCriterion` instance
"""
from fairseq import criterions
return criterions.build_criterion(args, self)
def build_generator(self, args):
if getattr(args, 'score_reference', False):
from fairseq.sequence_scorer import SequenceScorer
return SequenceScorer(self.target_dictionary)
else:
from fairseq.sequence_generator import SequenceGenerator, SequenceGeneratorWithAlignment
if getattr(args, 'print_alignment', False):
seq_gen_cls = SequenceGeneratorWithAlignment
else:
seq_gen_cls = SequenceGenerator
return seq_gen_cls(
self.target_dictionary,
beam_size=getattr(args, 'beam', 5),
max_len_a=getattr(args, 'max_len_a', 0),
max_len_b=getattr(args, 'max_len_b', 200),
min_len=getattr(args, 'min_len', 1),
normalize_scores=(not getattr(args, 'unnormalized', False)),
len_penalty=getattr(args, 'lenpen', 1),
unk_penalty=getattr(args, 'unkpen', 0),
sampling=getattr(args, 'sampling', False),
sampling_topk=getattr(args, 'sampling_topk', -1),
sampling_topp=getattr(args, 'sampling_topp', -1.0),
temperature=getattr(args, 'temperature', 1.),
diverse_beam_groups=getattr(args, 'diverse_beam_groups', -1),
diverse_beam_strength=getattr(args, 'diverse_beam_strength', 0.5),
match_source_len=getattr(args, 'match_source_len', False),
no_repeat_ngram_size=getattr(args, 'no_repeat_ngram_size', 0),
)
def prepare_train(self, model=None, criterion=None):
pass
def train_step(self, sample, model, criterion, optimizer, ignore_grad=False):
"""
Do forward and backward, and return the loss as computed by *criterion*
for the given *model* and *sample*.
Args:
sample (dict): the mini-batch. The format is defined by the
:class:`~fairseq.data.FairseqDataset`.
model (~fairseq.models.BaseFairseqModel): the model
criterion (~fairseq.criterions.FairseqCriterion): the criterion
optimizer (~fairseq.optim.FairseqOptimizer): the optimizer
ignore_grad (bool): multiply loss by 0 if this is set to True
Returns:
tuple:
- the loss
- the sample size, which is used as the denominator for the
gradient
- logging outputs to display while training
"""
model.train()
loss, sample_size, logging_output = criterion(model, sample)
if ignore_grad:
loss *= 0
optimizer.backward(loss)
return loss, sample_size, logging_output
def valid_step(self, sample, model, criterion):
model.eval()
with torch.no_grad():
loss, sample_size, logging_output = criterion(model, sample)
return loss, sample_size, logging_output
def inference_step(self, generator, models, sample, prefix_tokens=None):
with torch.no_grad():
return generator.generate(models, sample, prefix_tokens=prefix_tokens)
def update_step(self, num_updates):
"""Task level update when number of update increases. This is called after optimization step and
learning rate update of each step"""
pass
def grad_denom(self, sample_sizes, criterion):
return criterion.__class__.grad_denom(sample_sizes)
def aggregate_logging_outputs(self, logging_outputs, criterion):
return criterion.__class__.aggregate_logging_outputs(logging_outputs)
def max_positions(self):
"""Return the max input length allowed by the task."""
return None
@property
def source_dictionary(self):
"""Return the source :class:`~fairseq.data.Dictionary` (if applicable
for this task)."""
raise NotImplementedError
@property
def target_dictionary(self):
"""Return the target :class:`~fairseq.data.Dictionary` (if applicable
for this task)."""
raise NotImplementedError
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/fairseq/tasks/fairseq_task.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from fairseq.data import (
data_utils,
Dictionary,
AppendTokenDataset,
DenoisingDataset,
PrependTokenDataset,
StripTokenDataset,
TokenBlockDataset,
)
from fairseq.data.encoders.utils import get_whole_word_mask
from . import FairseqTask, register_task
@register_task('denoising')
class DenoisingTask(FairseqTask):
"""
Denoising task for applying sequence to sequence denoising. (ie. BART)
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument('data', help='path to data directory')
parser.add_argument('--tokens-per-sample', default=512, type=int,
help='max number of total tokens over all segments'
' per sample for dataset')
parser.add_argument('--raw-text', default=False, action='store_true',
help='load raw text dataset')
parser.add_argument(
'--sample-break-mode', default="complete_doc", type=str,
help='mode for breaking sentence',
)
parser.add_argument(
'--mask', default=0.0, type=float,
help='fraction of words/subwords that will be masked',
)
parser.add_argument(
'--mask-random', default=0.0, type=float,
help='instead of using [MASK], use random token this often'
)
parser.add_argument(
'--insert', default=0.0, type=float,
help='insert this percentage of additional random tokens',
)
parser.add_argument(
'--permute', default=0.0, type=float,
help='take this proportion of subwords and permute them',
)
parser.add_argument(
'--rotate', default=0.5, type=float,
help='rotate this proportion of inputs',
)
parser.add_argument(
'--poisson-lambda', default=3.0, type=float,
help='randomly shuffle sentences for this proportion of inputs'
)
parser.add_argument(
'--permute-sentences', default=0.0, type=float,
help='shuffle this proportion of sentences in all inputs'
)
parser.add_argument(
'--mask-length', default="subword", type=str,
choices=['subword', 'word', 'span-poisson'],
help='mask length to choose'
)
parser.add_argument(
'--replace-length', default=-1, type=int,
help='when masking N tokens, replace with 0, 1, or N tokens (use -1 for N)'
)
parser.add_argument(
'--max-source-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the source sequence'
)
parser.add_argument(
'--max-target-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the target sequence'
)
def __init__(self, args, dictionary):
super().__init__(args)
self.dictionary = dictionary
self.seed = args.seed
# add mask token
self.mask_idx = self.dictionary.add_symbol('<mask>')
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task.
"""
dictionary = Dictionary.load(os.path.join(args.data, 'dict.txt'))
print('| dictionary: {} types'.format(len(dictionary)))
if not hasattr(args, 'shuffle_instance'):
args.shuffle_instance = False
return cls(args, dictionary)
def load_dataset(self, split, epoch=0, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = self.args.data.split(':')
assert len(paths) > 0
data_path = paths[epoch % len(paths)]
split_path = os.path.join(data_path, split)
dataset = data_utils.load_indexed_dataset(
split_path,
self.dictionary,
self.args.dataset_impl,
combine=combine,
)
if dataset is None:
raise FileNotFoundError('Dataset not found: {} ({})'.format(split, split_path))
dataset = StripTokenDataset(dataset, self.dictionary.eos())
# create continuous blocks of tokens
dataset = TokenBlockDataset(
dataset,
dataset.sizes,
self.args.tokens_per_sample - 2, # one less for <s> and one for </s>
pad=self.dictionary.pad(),
eos=self.dictionary.eos(),
break_mode=self.args.sample_break_mode,
document_sep_len=0
)
# prepend beginning-of-sentence token (<s>, equiv. to [CLS] in BERT)
dataset = PrependTokenDataset(dataset, self.source_dictionary.bos())
dataset = AppendTokenDataset(dataset, self.source_dictionary.eos())
mask_whole_words = get_whole_word_mask(self.args, self.source_dictionary) \
if self.args.mask_length != 'subword' else None
self.datasets[split] = DenoisingDataset(
dataset, dataset.sizes, self.dictionary, self.mask_idx,
mask_whole_words, shuffle=self.args.shuffle_instance,
seed=self.seed, args=self.args
)
print(
"| Split: {0}, Loaded {1} samples of denoising_dataset".format(
split,
len(self.datasets[split]),
)
)
def max_positions(self):
"""Return the max sentence length allowed by the task."""
return (self.args.max_source_positions, self.args.max_target_positions)
@property
def source_dictionary(self):
"""Return the source :class:`~fairseq.data.Dictionary`."""
return self.dictionary
@property
def target_dictionary(self):
"""Return the target :class:`~fairseq.data.Dictionary`."""
return self.dictionary
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/fairseq/tasks/denoising.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from collections import OrderedDict
import os
import torch
from fairseq import options, utils
from fairseq.data import (
Dictionary,
LanguagePairDataset,
RoundRobinZipDatasets,
TransformEosLangPairDataset,
)
from fairseq.models import FairseqMultiModel
from fairseq.tasks.translation import load_langpair_dataset
from . import FairseqTask, register_task
def _lang_token(lang: str):
return '__{}__'.format(lang)
def _lang_token_index(dic: Dictionary, lang: str):
"""Return language token index."""
idx = dic.index(_lang_token(lang))
assert idx != dic.unk_index, \
'cannot find language token for lang {}'.format(lang)
return idx
@register_task('multilingual_translation')
class MultilingualTranslationTask(FairseqTask):
"""A task for training multiple translation models simultaneously.
We iterate round-robin over batches from multiple language pairs, ordered
according to the `--lang-pairs` argument.
The training loop is roughly:
for i in range(len(epoch)):
for lang_pair in args.lang_pairs:
batch = next_batch_for_lang_pair(lang_pair)
loss = criterion(model_for_lang_pair(lang_pair), batch)
loss.backward()
optimizer.step()
In practice, `next_batch_for_lang_pair` is abstracted in a FairseqDataset
(e.g., `RoundRobinZipDatasets`) and `model_for_lang_pair` is a model that
implements the `FairseqMultiModel` interface.
During inference it is required to specify a single `--source-lang` and
`--target-lang`, which indicates the inference langauge direction.
`--lang-pairs`, `--encoder-langtok`, `--decoder-langtok` have to be set to
the same value as training.
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
parser.add_argument('data', metavar='DIR', help='path to data directory')
parser.add_argument('--lang-pairs', default=None, metavar='PAIRS',
help='comma-separated list of language pairs (in training order): en-de,en-fr,de-fr')
parser.add_argument('-s', '--source-lang', default=None, metavar='SRC',
help='source language (only needed for inference)')
parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET',
help='target language (only needed for inference)')
parser.add_argument('--lazy-load', action='store_true',
help='load the dataset lazily')
parser.add_argument('--raw-text', default=False, action='store_true',
help='load raw text dataset')
parser.add_argument('--left-pad-source', default='True', type=str, metavar='BOOL',
help='pad the source on the left (default: True)')
parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL',
help='pad the target on the left (default: False)')
parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the source sequence')
parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the target sequence')
parser.add_argument('--upsample-primary', default=1, type=int,
help='amount to upsample primary dataset')
parser.add_argument('--encoder-langtok', default=None, type=str, choices=['src', 'tgt'],
metavar='SRCTGT',
help='replace beginning-of-sentence in source sentence with source or target '
'language token. (src/tgt)')
parser.add_argument('--decoder-langtok', action='store_true',
help='replace beginning-of-sentence in target sentence with target language token')
# fmt: on
def __init__(self, args, dicts, training):
super().__init__(args)
self.dicts = dicts
self.training = training
if training:
self.lang_pairs = args.lang_pairs
args.source_lang, args.target_lang = args.lang_pairs[0].split('-')
else:
self.lang_pairs = ['{}-{}'.format(args.source_lang, args.target_lang)]
# eval_lang_pairs for multilingual translation is usually all of the
# lang_pairs. However for other multitask settings or when we want to
# optimize for certain languages we want to use a different subset. Thus
# the eval_lang_pairs class variable is provided for classes that extend
# this class.
self.eval_lang_pairs = self.lang_pairs
# model_lang_pairs will be used to build encoder-decoder model pairs in
# models.build_model(). This allows multitask type of sub-class can
# build models other than the input lang_pairs
self.model_lang_pairs = self.lang_pairs
self.langs = list(dicts.keys())
@classmethod
def setup_task(cls, args, **kwargs):
dicts, training = cls.prepare(args, **kwargs)
return cls(args, dicts, training)
@classmethod
def prepare(cls, args, **kargs):
args.left_pad_source = options.eval_bool(args.left_pad_source)
args.left_pad_target = options.eval_bool(args.left_pad_target)
if getattr(args, 'raw_text', False):
utils.deprecation_warning('--raw-text is deprecated, please use --dataset-impl=raw')
args.dataset_impl = 'raw'
elif getattr(args, 'lazy_load', False):
utils.deprecation_warning('--lazy-load is deprecated, please use --dataset-impl=lazy')
args.dataset_impl = 'lazy'
if args.lang_pairs is None:
raise ValueError('--lang-pairs is required. List all the language pairs in the training objective.')
args.lang_pairs = args.lang_pairs.split(',')
sorted_langs = sorted(list({x for lang_pair in args.lang_pairs for x in lang_pair.split('-')}))
if args.source_lang is not None or args.target_lang is not None:
training = False
else:
training = True
# load dictionaries
dicts = OrderedDict()
for lang in sorted_langs:
paths = args.data.split(':')
assert len(paths) > 0
dicts[lang] = Dictionary.load(os.path.join(paths[0], 'dict.{}.txt'.format(lang)))
if len(dicts) > 0:
assert dicts[lang].pad() == dicts[sorted_langs[0]].pad()
assert dicts[lang].eos() == dicts[sorted_langs[0]].eos()
assert dicts[lang].unk() == dicts[sorted_langs[0]].unk()
if args.encoder_langtok is not None or args.decoder_langtok:
for lang_to_add in sorted_langs:
dicts[lang].add_symbol(_lang_token(lang_to_add))
print('| [{}] dictionary: {} types'.format(lang, len(dicts[lang])))
return dicts, training
def get_encoder_langtok(self, src_lang, tgt_lang):
if self.args.encoder_langtok is None:
return self.dicts[src_lang].eos()
if self.args.encoder_langtok == 'src':
return _lang_token_index(self.dicts[src_lang], src_lang)
else:
return _lang_token_index(self.dicts[src_lang], tgt_lang)
def get_decoder_langtok(self, tgt_lang):
if not self.args.decoder_langtok:
return self.dicts[tgt_lang].eos()
return _lang_token_index(self.dicts[tgt_lang], tgt_lang)
def alter_dataset_langtok(self, lang_pair_dataset,
src_eos=None, src_lang=None, tgt_eos=None, tgt_lang=None):
if self.args.encoder_langtok is None and not self.args.decoder_langtok:
return lang_pair_dataset
new_src_eos = None
if self.args.encoder_langtok is not None and src_eos is not None \
and src_lang is not None and tgt_lang is not None:
new_src_eos = self.get_encoder_langtok(src_lang, tgt_lang)
else:
src_eos = None
new_tgt_bos = None
if self.args.decoder_langtok and tgt_eos is not None and tgt_lang is not None:
new_tgt_bos = self.get_decoder_langtok(tgt_lang)
else:
tgt_eos = None
return TransformEosLangPairDataset(
lang_pair_dataset,
src_eos=src_eos,
new_src_eos=new_src_eos,
tgt_bos=tgt_eos,
new_tgt_bos=new_tgt_bos,
)
def load_dataset(self, split, epoch=0, **kwargs):
"""Load a dataset split."""
paths = self.args.data.split(':')
assert len(paths) > 0
data_path = paths[epoch % len(paths)]
def language_pair_dataset(lang_pair):
src, tgt = lang_pair.split('-')
langpair_dataset = load_langpair_dataset(
data_path, split, src, self.dicts[src], tgt, self.dicts[tgt],
combine=True, dataset_impl=self.args.dataset_impl,
upsample_primary=self.args.upsample_primary,
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
max_source_positions=self.args.max_source_positions,
max_target_positions=self.args.max_target_positions,
)
return self.alter_dataset_langtok(
langpair_dataset,
src_eos=self.dicts[src].eos(),
src_lang=src,
tgt_eos=self.dicts[tgt].eos(),
tgt_lang=tgt,
)
self.datasets[split] = RoundRobinZipDatasets(
OrderedDict([
(lang_pair, language_pair_dataset(lang_pair))
for lang_pair in self.lang_pairs
]),
eval_key=None if self.training else "%s-%s" % (self.args.source_lang, self.args.target_lang),
)
def build_dataset_for_inference(self, src_tokens, src_lengths):
lang_pair = "%s-%s" % (self.args.source_lang, self.args.target_lang)
return RoundRobinZipDatasets(
OrderedDict([(
lang_pair,
self.alter_dataset_langtok(
LanguagePairDataset(
src_tokens, src_lengths,
self.source_dictionary
),
src_eos=self.source_dictionary.eos(),
src_lang=self.args.source_lang,
tgt_eos=self.target_dictionary.eos(),
tgt_lang=self.args.target_lang,
),
)]),
eval_key=lang_pair,
)
def build_model(self, args):
def check_args():
messages = []
if len(set(self.args.lang_pairs).symmetric_difference(args.lang_pairs)) != 0:
messages.append('--lang-pairs should include all the language pairs {}.'.format(args.lang_pairs))
if self.args.encoder_langtok != args.encoder_langtok:
messages.append('--encoder-langtok should be {}.'.format(args.encoder_langtok))
if self.args.decoder_langtok != args.decoder_langtok:
messages.append('--decoder-langtok should {} be set.'.format("" if args.decoder_langtok else "not"))
if len(messages) > 0:
raise ValueError(' '.join(messages))
# Check if task args are consistant with model args
check_args()
from fairseq import models
model = models.build_model(args, self)
if not isinstance(model, FairseqMultiModel):
raise ValueError('MultilingualTranslationTask requires a FairseqMultiModel architecture')
return model
def train_step(self, sample, model, criterion, optimizer, ignore_grad=False):
model.train()
agg_loss, agg_sample_size, agg_logging_output = 0., 0., {}
for lang_pair in self.model_lang_pairs:
if sample[lang_pair] is None or len(sample[lang_pair]) == 0:
continue
loss, sample_size, logging_output = criterion(model.models[lang_pair], sample[lang_pair])
if ignore_grad:
loss *= 0
optimizer.backward(loss)
agg_loss += loss.detach().item()
# TODO make summing of the sample sizes configurable
agg_sample_size += sample_size
agg_logging_output[lang_pair] = logging_output
return agg_loss, agg_sample_size, agg_logging_output
def valid_step(self, sample, model, criterion):
model.eval()
with torch.no_grad():
agg_loss, agg_sample_size, agg_logging_output = 0., 0., {}
for lang_pair in self.eval_lang_pairs:
if lang_pair not in sample or sample[lang_pair] is None or len(sample[lang_pair]) == 0:
continue
loss, sample_size, logging_output = criterion(model.models[lang_pair], sample[lang_pair])
agg_loss += loss.data.item()
# TODO make summing of the sample sizes configurable
agg_sample_size += sample_size
agg_logging_output[lang_pair] = logging_output
return agg_loss, agg_sample_size, agg_logging_output
def inference_step(self, generator, models, sample, prefix_tokens=None):
with torch.no_grad():
return generator.generate(
models,
sample,
prefix_tokens=prefix_tokens,
bos_token=_lang_token_index(self.target_dictionary, self.args.target_lang)
if self.args.decoder_langtok else self.target_dictionary.eos(),
)
def init_logging_output(self, sample):
return {
'ntokens': sum(
sample_lang.get('ntokens', 0)
for sample_lang in sample.values()
) if sample is not None else 0,
'nsentences': sum(
sample_lang['target'].size(0) if 'target' in sample_lang else 0
for sample_lang in sample.values()
) if sample is not None else 0,
}
def grad_denom(self, sample_sizes, criterion):
return criterion.__class__.grad_denom(sample_sizes)
def aggregate_logging_outputs(self, logging_outputs, criterion, logging_output_keys=None):
logging_output_keys = logging_output_keys or self.eval_lang_pairs
# aggregate logging outputs for each language pair
agg_logging_outputs = {
key: criterion.__class__.aggregate_logging_outputs([
logging_output.get(key, {}) for logging_output in logging_outputs
])
for key in logging_output_keys
}
def sum_over_languages(key):
return sum(logging_output[key] for logging_output in agg_logging_outputs.values())
# flatten logging outputs
flat_logging_output = {
'{}:{}'.format(lang_pair, k): v
for lang_pair, agg_logging_output in agg_logging_outputs.items()
for k, v in agg_logging_output.items()
}
flat_logging_output['loss'] = sum_over_languages('loss')
if any('nll_loss' in logging_output for logging_output in agg_logging_outputs.values()):
flat_logging_output['nll_loss'] = sum_over_languages('nll_loss')
flat_logging_output['sample_size'] = sum_over_languages('sample_size')
flat_logging_output['nsentences'] = sum_over_languages('nsentences')
flat_logging_output['ntokens'] = sum_over_languages('ntokens')
return flat_logging_output
@property
def source_dictionary(self):
return self.dicts[self.args.source_lang]
@property
def target_dictionary(self):
return self.dicts[self.args.target_lang]
def max_positions(self):
"""Return the max sentence length allowed by the task."""
if len(self.datasets.values()) == 0:
return {'%s-%s' % (self.args.source_lang, self.args.target_lang):
(self.args.max_source_positions, self.args.max_target_positions)}
return OrderedDict([
(key, (self.args.max_source_positions, self.args.max_target_positions))
for split in self.datasets.keys()
for key in self.datasets[split].datasets.keys()
])
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/fairseq/tasks/multilingual_translation.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from fairseq.utils import new_arange
from fairseq.tasks import register_task
from fairseq.tasks.translation import TranslationTask, load_langpair_dataset
@register_task('translation_lev')
class TranslationLevenshteinTask(TranslationTask):
"""
Translation (Sequence Generation) task for Levenshtein Transformer
See `"Levenshtein Transformer" <https://arxiv.org/abs/1905.11006>`_.
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
TranslationTask.add_args(parser)
parser.add_argument(
'--noise',
default='random_delete',
choices=['random_delete', 'random_mask', 'no_noise', 'full_mask'])
def load_dataset(self, split, epoch=0, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = self.args.data.split(':')
assert len(paths) > 0
data_path = paths[epoch % len(paths)]
# infer langcode
src, tgt = self.args.source_lang, self.args.target_lang
self.datasets[split] = load_langpair_dataset(
data_path, split, src, self.src_dict, tgt, self.tgt_dict,
combine=combine, dataset_impl=self.args.dataset_impl,
upsample_primary=self.args.upsample_primary,
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
max_source_positions=self.args.max_source_positions,
max_target_positions=self.args.max_target_positions,
prepend_bos=True,
)
def inject_noise(self, target_tokens):
def _random_delete(target_tokens):
pad = self.tgt_dict.pad()
bos = self.tgt_dict.bos()
eos = self.tgt_dict.eos()
max_len = target_tokens.size(1)
target_mask = target_tokens.eq(pad)
target_score = target_tokens.clone().float().uniform_()
target_score.masked_fill_(
target_tokens.eq(bos) | target_tokens.eq(eos), 0.0)
target_score.masked_fill_(target_mask, 1)
target_score, target_rank = target_score.sort(1)
target_length = target_mask.size(1) - target_mask.float().sum(
1, keepdim=True)
# do not delete <bos> and <eos> (we assign 0 score for them)
target_cutoff = 2 + ((target_length - 2) * target_score.new_zeros(
target_score.size(0), 1).uniform_()).long()
target_cutoff = target_score.sort(1)[1] >= target_cutoff
prev_target_tokens = target_tokens.gather(
1, target_rank).masked_fill_(target_cutoff, pad).gather(
1,
target_rank.masked_fill_(target_cutoff,
max_len).sort(1)[1])
prev_target_tokens = prev_target_tokens[:, :prev_target_tokens.
ne(pad).sum(1).max()]
return prev_target_tokens
def _random_mask(target_tokens):
pad = self.tgt_dict.pad()
bos = self.tgt_dict.bos()
eos = self.tgt_dict.eos()
unk = self.tgt_dict.unk()
target_masks = target_tokens.ne(pad) & \
target_tokens.ne(bos) & \
target_tokens.ne(eos)
target_score = target_tokens.clone().float().uniform_()
target_score.masked_fill_(~target_masks, 2.0)
target_length = target_masks.sum(1).float()
target_length = target_length * target_length.clone().uniform_()
target_length = target_length + 1 # make sure to mask at least one token.
_, target_rank = target_score.sort(1)
target_cutoff = new_arange(target_rank) < target_length[:, None].long()
prev_target_tokens = target_tokens.masked_fill(
target_cutoff.scatter(1, target_rank, target_cutoff), unk)
return prev_target_tokens
def _full_mask(target_tokens):
pad = self.tgt_dict.pad()
bos = self.tgt_dict.bos()
eos = self.tgt_dict.eos()
unk = self.tgt_dict.unk()
target_mask = target_tokens.eq(bos) | target_tokens.eq(
eos) | target_tokens.eq(pad)
return target_tokens.masked_fill(~target_mask, unk)
if self.args.noise == 'random_delete':
return _random_delete(target_tokens)
elif self.args.noise == 'random_mask':
return _random_mask(target_tokens)
elif self.args.noise == 'full_mask':
return _full_mask(target_tokens)
elif self.args.noise == 'no_noise':
return target_tokens
else:
raise NotImplementedError
def build_generator(self, args):
from fairseq.iterative_refinement_generator import IterativeRefinementGenerator
return IterativeRefinementGenerator(
self.target_dictionary,
eos_penalty=getattr(args, 'iter_decode_eos_penalty', 0.0),
max_iter=getattr(args, 'iter_decode_max_iter', 10),
decoding_format=getattr(args, 'decoding_format', None),
adaptive=not getattr(args, 'iter_decode_force_max_iter', False),
retain_history=getattr(args, 'retain_iter_history', False))
def train_step(self,
sample,
model,
criterion,
optimizer,
ignore_grad=False):
model.train()
sample['prev_target'] = self.inject_noise(sample['target'])
loss, sample_size, logging_output = criterion(model, sample)
if ignore_grad:
loss *= 0
optimizer.backward(loss)
return loss, sample_size, logging_output
def valid_step(self, sample, model, criterion):
model.eval()
with torch.no_grad():
sample['prev_target'] = self.inject_noise(sample['target'])
loss, sample_size, logging_output = criterion(model, sample)
return loss, sample_size, logging_output
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/fairseq/tasks/translation_lev.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import torch
from fairseq import utils
from fairseq.data import (
data_utils,
Dictionary,
MonolingualDataset,
TokenBlockDataset,
TransformEosDataset,
TruncatedDictionary,
)
from fairseq.tasks import FairseqTask, register_task
@register_task("language_modeling")
class LanguageModelingTask(FairseqTask):
"""
Train a language model.
Args:
dictionary (~fairseq.data.Dictionary): the dictionary for the input of
the language model
output_dictionary (~fairseq.data.Dictionary): the dictionary for the
output of the language model. In most cases it will be the same as
*dictionary*, but could possibly be a more limited version of the
dictionary (if ``--output-dictionary-size`` is used).
targets (List[str]): list of the target types that the language model
should predict. Can be one of "self", "future", and "past".
Defaults to "future".
.. note::
The language modeling task is compatible with :mod:`fairseq-train`,
:mod:`fairseq-generate`, :mod:`fairseq-interactive` and
:mod:`fairseq-eval-lm`.
The language modeling task provides the following additional command-line
arguments:
.. argparse::
:ref: fairseq.tasks.language_modeling_parser
:prog:
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
parser.add_argument('data', help='path to data directory')
parser.add_argument('--sample-break-mode', default='none',
choices=['none', 'complete', 'complete_doc', 'eos'],
help='If omitted or "none", fills each sample with tokens-per-sample '
'tokens. If set to "complete", splits samples only at the end '
'of sentence, but may include multiple sentences per sample. '
'"complete_doc" is similar but respects doc boundaries. '
'If set to "eos", includes only one sentence per sample.')
parser.add_argument('--tokens-per-sample', default=1024, type=int,
help='max number of tokens per sample for LM dataset')
parser.add_argument('--lazy-load', action='store_true',
help='load the dataset lazily')
parser.add_argument('--raw-text', default=False, action='store_true',
help='load raw text dataset')
parser.add_argument('--output-dictionary-size', default=-1, type=int,
help='limit the size of output dictionary')
parser.add_argument('--self-target', action='store_true',
help='include self target')
parser.add_argument('--future-target', action='store_true',
help='include future target')
parser.add_argument('--past-target', action='store_true',
help='include past target')
parser.add_argument('--add-bos-token', action='store_true',
help='prepend beginning of sentence token (<s>)')
parser.add_argument('--max-target-positions', type=int, metavar='N',
help='max number of tokens in the target sequence')
# fmt: on
def __init__(self, args, dictionary, output_dictionary=None, targets=None):
super().__init__(args)
self.dictionary = dictionary
self.output_dictionary = output_dictionary or dictionary
if targets is None:
targets = ["future"]
self.targets = targets
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
if getattr(args, "raw_text", False):
utils.deprecation_warning(
"--raw-text is deprecated, please use --dataset-impl=raw"
)
args.dataset_impl = "raw"
elif getattr(args, "lazy_load", False):
utils.deprecation_warning(
"--lazy-load is deprecated, please use --dataset-impl=lazy"
)
args.dataset_impl = "lazy"
dictionary = None
output_dictionary = None
if args.data:
paths = args.data.split(":")
assert len(paths) > 0
dictionary = Dictionary.load(os.path.join(paths[0], "dict.txt"))
print("| dictionary: {} types".format(len(dictionary)))
output_dictionary = dictionary
if args.output_dictionary_size >= 0:
output_dictionary = TruncatedDictionary(
dictionary, args.output_dictionary_size
)
# upgrade old checkpoints
if hasattr(args, "exclude_self_target"):
args.self_target = not args.exclude_self_target
targets = []
if getattr(args, "self_target", False):
targets.append("self")
if getattr(args, "future_target", False):
targets.append("future")
if getattr(args, "past_target", False):
targets.append("past")
if len(targets) == 0:
# standard language modeling
targets = ["future"]
return cls(args, dictionary, output_dictionary, targets=targets)
def build_model(self, args):
model = super().build_model(args)
for target in self.targets:
if target not in model.supported_targets:
raise ValueError(
"Unsupported language modeling target: {}".format(target)
)
return model
def load_dataset(self, split, epoch=0, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = self.args.data.split(":")
assert len(paths) > 0
data_path = paths[epoch % len(paths)]
split_path = os.path.join(data_path, split)
dataset = data_utils.load_indexed_dataset(
split_path, self.dictionary, self.args.dataset_impl, combine=combine
)
if dataset is None:
raise FileNotFoundError(
"Dataset not found: {} ({})".format(split, split_path)
)
dataset = TokenBlockDataset(
dataset,
dataset.sizes,
self.args.tokens_per_sample,
pad=self.dictionary.pad(),
eos=self.dictionary.eos(),
break_mode=self.args.sample_break_mode,
include_targets=True,
)
add_eos_for_other_targets = (
self.args.sample_break_mode is not None
and self.args.sample_break_mode != "none"
)
self.datasets[split] = MonolingualDataset(
dataset,
dataset.sizes,
self.dictionary,
self.output_dictionary,
add_eos_for_other_targets=add_eos_for_other_targets,
shuffle=True,
targets=self.targets,
add_bos_token=self.args.add_bos_token,
)
def build_dataset_for_inference(self, src_tokens, src_lengths):
return TransformEosDataset(
MonolingualDataset(
TokenBlockDataset(
src_tokens,
src_lengths,
block_size=None,
pad=self.source_dictionary.pad(),
eos=self.source_dictionary.eos(),
break_mode="eos",
include_targets=False,
),
src_lengths,
self.source_dictionary,
self.target_dictionary,
add_eos_for_other_targets=False,
shuffle=False,
add_bos_token=self.args.add_bos_token,
),
eos=self.source_dictionary.eos(),
# remove EOS since this will be used as a prefix for generation
remove_eos_from_src=True,
has_target=False,
)
def inference_step(self, generator, models, sample, prefix_tokens=None):
with torch.no_grad():
if prefix_tokens is None and sample["net_input"]["src_tokens"].nelement():
prefix_tokens = sample["net_input"]["src_tokens"]
if prefix_tokens[:, 0].eq(self.source_dictionary.eos()).all():
prefix_tokens = prefix_tokens[:, 1:]
return generator.generate(models, sample, prefix_tokens=prefix_tokens)
@property
def source_dictionary(self):
"""Return the :class:`~fairseq.data.Dictionary` for the language
model."""
return self.dictionary
@property
def target_dictionary(self):
"""Return the :class:`~fairseq.data.Dictionary` for the language
model."""
return self.output_dictionary
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/fairseq/tasks/language_modeling.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import numpy as np
import os
from fairseq import tokenizer
from fairseq.data import (
ConcatDataset,
indexed_dataset,
data_utils,
)
from fairseq.data import Dictionary
from fairseq.data.legacy.block_pair_dataset import BlockPairDataset
from fairseq.data.legacy.masked_lm_dataset import MaskedLMDataset
from fairseq.data.legacy.masked_lm_dictionary import BertDictionary
from . import FairseqTask, register_task
@register_task('legacy_masked_lm')
class LegacyMaskedLMTask(FairseqTask):
"""
Task for training Masked LM (BERT) model.
Args:
dictionary (Dictionary): the dictionary for the input of the task
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument('data', help='colon separated path to data directories list, \
will be iterated upon during epochs in round-robin manner')
parser.add_argument('--tokens-per-sample', default=512, type=int,
help='max number of total tokens over all segments'
' per sample for BERT dataset')
parser.add_argument('--break-mode', default="doc", type=str, help='mode for breaking sentence')
parser.add_argument('--shuffle-dataset', action='store_true', default=False)
def __init__(self, args, dictionary):
super().__init__(args)
self.dictionary = dictionary
self.seed = args.seed
@classmethod
def load_dictionary(cls, filename):
return BertDictionary.load(filename)
@classmethod
def build_dictionary(cls, filenames, workers=1, threshold=-1, nwords=-1, padding_factor=8):
d = BertDictionary()
for filename in filenames:
Dictionary.add_file_to_dictionary(filename, d, tokenizer.tokenize_line, workers)
d.finalize(threshold=threshold, nwords=nwords, padding_factor=padding_factor)
return d
@property
def target_dictionary(self):
return self.dictionary
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task.
"""
paths = args.data.split(':')
assert len(paths) > 0
dictionary = BertDictionary.load(os.path.join(paths[0], 'dict.txt'))
print('| dictionary: {} types'.format(len(dictionary)))
return cls(args, dictionary)
def load_dataset(self, split, epoch=0, combine=False):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
loaded_datasets = []
paths = self.args.data.split(':')
assert len(paths) > 0
data_path = paths[epoch % len(paths)]
print("| data_path", data_path)
for k in itertools.count():
split_k = split + (str(k) if k > 0 else '')
path = os.path.join(data_path, split_k)
ds = indexed_dataset.make_dataset(
path,
impl=self.args.dataset_impl,
fix_lua_indexing=True,
dictionary=self.dictionary,
)
if ds is None:
if k > 0:
break
else:
raise FileNotFoundError('Dataset not found: {} ({})'.format(split, data_path))
with data_utils.numpy_seed(self.seed + k):
loaded_datasets.append(
BlockPairDataset(
ds,
self.dictionary,
ds.sizes,
self.args.tokens_per_sample,
break_mode=self.args.break_mode,
doc_break_size=1,
)
)
print('| {} {} {} examples'.format(data_path, split_k, len(loaded_datasets[-1])))
if not combine:
break
if len(loaded_datasets) == 1:
dataset = loaded_datasets[0]
sizes = dataset.sizes
else:
dataset = ConcatDataset(loaded_datasets)
sizes = np.concatenate([ds.sizes for ds in loaded_datasets])
self.datasets[split] = MaskedLMDataset(
dataset=dataset,
sizes=sizes,
vocab=self.dictionary,
pad_idx=self.dictionary.pad(),
mask_idx=self.dictionary.mask(),
classif_token_idx=self.dictionary.cls(),
sep_token_idx=self.dictionary.sep(),
shuffle=self.args.shuffle_dataset,
seed=self.seed,
)
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/fairseq/tasks/legacy_masked_lm.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import importlib
import os
from .fairseq_task import FairseqTask
TASK_REGISTRY = {}
TASK_CLASS_NAMES = set()
def setup_task(args, **kwargs):
return TASK_REGISTRY[args.task].setup_task(args, **kwargs)
def register_task(name):
"""
New tasks can be added to fairseq with the
:func:`~fairseq.tasks.register_task` function decorator.
For example::
@register_task('classification')
class ClassificationTask(FairseqTask):
(...)
.. note::
All Tasks must implement the :class:`~fairseq.tasks.FairseqTask`
interface.
Please see the
Args:
name (str): the name of the task
"""
def register_task_cls(cls):
if name in TASK_REGISTRY:
raise ValueError('Cannot register duplicate task ({})'.format(name))
if not issubclass(cls, FairseqTask):
raise ValueError('Task ({}: {}) must extend FairseqTask'.format(name, cls.__name__))
if cls.__name__ in TASK_CLASS_NAMES:
raise ValueError('Cannot register task with duplicate class name ({})'.format(cls.__name__))
TASK_REGISTRY[name] = cls
TASK_CLASS_NAMES.add(cls.__name__)
return cls
return register_task_cls
# automatically import any Python files in the tasks/ directory
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith('.py') and not file.startswith('_'):
task_name = file[:file.find('.py')]
importlib.import_module('fairseq.tasks.' + task_name)
# expose `task_parser` for sphinx
if task_name in TASK_REGISTRY:
parser = argparse.ArgumentParser(add_help=False)
group_task = parser.add_argument_group('Task name')
# fmt: off
group_task.add_argument('--task', metavar=task_name,
help='Enable this task with: ``--task=' + task_name + '``')
# fmt: on
group_args = parser.add_argument_group('Additional command-line arguments')
TASK_REGISTRY[task_name].add_args(group_args)
globals()[task_name + '_parser'] = parser
def get_task(name):
return TASK_REGISTRY[name]
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/fairseq/tasks/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from fairseq.data import FileAudioDataset
from . import FairseqTask, register_task
@register_task('audio_pretraining')
class AudioPretrainingTask(FairseqTask):
"""
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument('data', help='path to data directory')
parser.add_argument('--sample-rate', default=16000, type=int,
help='target sample rate. audio files will be up/down sampled to this rate')
parser.add_argument('--max-sample-size', default=None, type=int,
help='max sample size to crop to for batching. default = min sample length')
parser.add_argument('--min-sample-size', default=None, type=int,
help='min sample size to crop to for batching. default = same as --max-sample-size')
def __init__(self, args):
super().__init__(args)
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
return cls(args)
def load_dataset(self, split, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
manifest = os.path.join(self.args.data, '{}.tsv'.format(split))
self.datasets[split] = FileAudioDataset(manifest,
sample_rate=self.args.sample_rate,
max_sample_size=self.args.max_sample_size,
min_sample_size=self.args.min_sample_size)
@property
def target_dictionary(self):
"""Return the :class:`~fairseq.data.Dictionary` for the language
model."""
return None
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/fairseq/tasks/audio_pretraining.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import os
from fairseq import options, utils
from fairseq.data import (
AppendTokenDataset,
ConcatDataset,
data_utils,
indexed_dataset,
LanguagePairDataset,
PrependTokenDataset,
StripTokenDataset,
TruncateDataset,
)
from . import FairseqTask, register_task
def load_langpair_dataset(
data_path, split,
src, src_dict,
tgt, tgt_dict,
combine, dataset_impl, upsample_primary,
left_pad_source, left_pad_target, max_source_positions,
max_target_positions, prepend_bos=False, load_alignments=False,
truncate_source=False,
):
def split_exists(split, src, tgt, lang, data_path):
filename = os.path.join(data_path, '{}.{}-{}.{}'.format(split, src, tgt, lang))
return indexed_dataset.dataset_exists(filename, impl=dataset_impl)
src_datasets = []
tgt_datasets = []
for k in itertools.count():
split_k = split + (str(k) if k > 0 else '')
# infer langcode
if split_exists(split_k, src, tgt, src, data_path):
prefix = os.path.join(data_path, '{}.{}-{}.'.format(split_k, src, tgt))
elif split_exists(split_k, tgt, src, src, data_path):
prefix = os.path.join(data_path, '{}.{}-{}.'.format(split_k, tgt, src))
else:
if k > 0:
break
else:
raise FileNotFoundError('Dataset not found: {} ({})'.format(split, data_path))
src_dataset = data_utils.load_indexed_dataset(prefix + src, src_dict, dataset_impl)
if truncate_source:
src_dataset = AppendTokenDataset(
TruncateDataset(
StripTokenDataset(src_dataset, src_dict.eos()),
max_source_positions - 1,
),
src_dict.eos(),
)
src_datasets.append(src_dataset)
tgt_datasets.append(
data_utils.load_indexed_dataset(prefix + tgt, tgt_dict, dataset_impl)
)
print('| {} {} {}-{} {} examples'.format(data_path, split_k, src, tgt, len(src_datasets[-1])))
if not combine:
break
assert len(src_datasets) == len(tgt_datasets)
if len(src_datasets) == 1:
src_dataset, tgt_dataset = src_datasets[0], tgt_datasets[0]
else:
sample_ratios = [1] * len(src_datasets)
sample_ratios[0] = upsample_primary
src_dataset = ConcatDataset(src_datasets, sample_ratios)
tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios)
if prepend_bos:
assert hasattr(src_dict, "bos_index") and hasattr(tgt_dict, "bos_index")
src_dataset = PrependTokenDataset(src_dataset, src_dict.bos())
tgt_dataset = PrependTokenDataset(tgt_dataset, tgt_dict.bos())
align_dataset = None
if load_alignments:
align_path = os.path.join(data_path, '{}.align.{}-{}'.format(split, src, tgt))
if indexed_dataset.dataset_exists(align_path, impl=dataset_impl):
align_dataset = data_utils.load_indexed_dataset(align_path, None, dataset_impl)
return LanguagePairDataset(
src_dataset, src_dataset.sizes, src_dict,
tgt_dataset, tgt_dataset.sizes, tgt_dict,
left_pad_source=left_pad_source,
left_pad_target=left_pad_target,
max_source_positions=max_source_positions,
max_target_positions=max_target_positions,
align_dataset=align_dataset,
)
@register_task('translation')
class TranslationTask(FairseqTask):
"""
Translate from one (source) language to another (target) language.
Args:
src_dict (~fairseq.data.Dictionary): dictionary for the source language
tgt_dict (~fairseq.data.Dictionary): dictionary for the target language
.. note::
The translation task is compatible with :mod:`fairseq-train`,
:mod:`fairseq-generate` and :mod:`fairseq-interactive`.
The translation task provides the following additional command-line
arguments:
.. argparse::
:ref: fairseq.tasks.translation_parser
:prog:
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
parser.add_argument('data', help='colon separated path to data directories list, \
will be iterated upon during epochs in round-robin manner')
parser.add_argument('-s', '--source-lang', default=None, metavar='SRC',
help='source language')
parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET',
help='target language')
parser.add_argument('--lazy-load', action='store_true',
help='load the dataset lazily')
parser.add_argument('--raw-text', action='store_true',
help='load raw text dataset')
parser.add_argument('--load-alignments', action='store_true',
help='load the binarized alignments')
parser.add_argument('--left-pad-source', default='True', type=str, metavar='BOOL',
help='pad the source on the left')
parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL',
help='pad the target on the left')
parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the source sequence')
parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N',
help='max number of tokens in the target sequence')
parser.add_argument('--upsample-primary', default=1, type=int,
help='amount to upsample primary dataset')
parser.add_argument('--truncate-source', default=False, action='store_true',
help='boolean to truncate source to max-source-positions')
# fmt: on
def __init__(self, args, src_dict, tgt_dict):
super().__init__(args)
self.src_dict = src_dict
self.tgt_dict = tgt_dict
@classmethod
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
args.left_pad_source = options.eval_bool(args.left_pad_source)
args.left_pad_target = options.eval_bool(args.left_pad_target)
if getattr(args, 'raw_text', False):
utils.deprecation_warning('--raw-text is deprecated, please use --dataset-impl=raw')
args.dataset_impl = 'raw'
elif getattr(args, 'lazy_load', False):
utils.deprecation_warning('--lazy-load is deprecated, please use --dataset-impl=lazy')
args.dataset_impl = 'lazy'
paths = args.data.split(':')
assert len(paths) > 0
# find language pair automatically
if args.source_lang is None or args.target_lang is None:
args.source_lang, args.target_lang = data_utils.infer_language_pair(paths[0])
if args.source_lang is None or args.target_lang is None:
raise Exception('Could not infer language pair, please provide it explicitly')
# load dictionaries
src_dict = cls.load_dictionary(os.path.join(paths[0], 'dict.{}.txt'.format(args.source_lang)))
tgt_dict = cls.load_dictionary(os.path.join(paths[0], 'dict.{}.txt'.format(args.target_lang)))
assert src_dict.pad() == tgt_dict.pad()
assert src_dict.eos() == tgt_dict.eos()
assert src_dict.unk() == tgt_dict.unk()
print('| [{}] dictionary: {} types'.format(args.source_lang, len(src_dict)))
print('| [{}] dictionary: {} types'.format(args.target_lang, len(tgt_dict)))
return cls(args, src_dict, tgt_dict)
def load_dataset(self, split, epoch=0, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
paths = self.args.data.split(':')
assert len(paths) > 0
data_path = paths[epoch % len(paths)]
# infer langcode
src, tgt = self.args.source_lang, self.args.target_lang
self.datasets[split] = load_langpair_dataset(
data_path, split, src, self.src_dict, tgt, self.tgt_dict,
combine=combine, dataset_impl=self.args.dataset_impl,
upsample_primary=self.args.upsample_primary,
left_pad_source=self.args.left_pad_source,
left_pad_target=self.args.left_pad_target,
max_source_positions=self.args.max_source_positions,
max_target_positions=self.args.max_target_positions,
load_alignments=self.args.load_alignments,
truncate_source=self.args.truncate_source,
)
def build_dataset_for_inference(self, src_tokens, src_lengths):
return LanguagePairDataset(src_tokens, src_lengths, self.source_dictionary)
def max_positions(self):
"""Return the max sentence length allowed by the task."""
return (self.args.max_source_positions, self.args.max_target_positions)
@property
def source_dictionary(self):
"""Return the source :class:`~fairseq.data.Dictionary`."""
return self.src_dict
@property
def target_dictionary(self):
"""Return the target :class:`~fairseq.data.Dictionary`."""
return self.tgt_dict
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/fairseq/tasks/translation.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.