python_code
stringlengths 0
4.04M
| repo_name
stringlengths 8
58
| file_path
stringlengths 5
147
|
---|---|---|
from __future__ import print_function, unicode_literals, division
import os
import re
import codecs
import platform
from subprocess import check_output
from tempfile import mkdtemp
from functools import partial
try:
from configparser import ConfigParser
except ImportError:
from ConfigParser import ConfigParser
from pyrouge.utils import log
from pyrouge.utils.file_utils import verify_dir
REMAP = {"-lrb-": "(", "-rrb-": ")", "-lcb-": "{", "-rcb-": "}",
"-lsb-": "[", "-rsb-": "]", "``": '"', "''": '"'}
def clean(x):
return re.sub(
r"-lrb-|-rrb-|-lcb-|-rcb-|-lsb-|-rsb-|``|''",
lambda m: REMAP.get(m.group()), x)
class DirectoryProcessor:
@staticmethod
def process(input_dir, output_dir, function):
"""
Apply function to all files in input_dir and save the resulting ouput
files in output_dir.
"""
if not os.path.exists(output_dir):
os.makedirs(output_dir)
logger = log.get_global_console_logger()
logger.info("Processing files in {}.".format(input_dir))
input_file_names = os.listdir(input_dir)
for input_file_name in input_file_names:
input_file = os.path.join(input_dir, input_file_name)
with codecs.open(input_file, "r", encoding="UTF-8") as f:
input_string = f.read()
output_string = function(input_string)
output_file = os.path.join(output_dir, input_file_name)
with codecs.open(output_file, "w", encoding="UTF-8") as f:
f.write(clean(output_string.lower()))
logger.info("Saved processed files to {}.".format(output_dir))
class Rouge155(object):
"""
This is a wrapper for the ROUGE 1.5.5 summary evaluation package.
This class is designed to simplify the evaluation process by:
1) Converting summaries into a format ROUGE understands.
2) Generating the ROUGE configuration file automatically based
on filename patterns.
This class can be used within Python like this:
rouge = Rouge155()
rouge.system_dir = 'test/systems'
rouge.model_dir = 'test/models'
# The system filename pattern should contain one group that
# matches the document ID.
rouge.system_filename_pattern = 'SL.P.10.R.11.SL062003-(\d+).html'
# The model filename pattern has '#ID#' as a placeholder for the
# document ID. If there are multiple model summaries, pyrouge
# will use the provided regex to automatically match them with
# the corresponding system summary. Here, [A-Z] matches
# multiple model summaries for a given #ID#.
rouge.model_filename_pattern = 'SL.P.10.R.[A-Z].SL062003-#ID#.html'
rouge_output = rouge.evaluate()
print(rouge_output)
output_dict = rouge.output_to_dict(rouge_ouput)
print(output_dict)
-> {'rouge_1_f_score': 0.95652,
'rouge_1_f_score_cb': 0.95652,
'rouge_1_f_score_ce': 0.95652,
'rouge_1_precision': 0.95652,
[...]
To evaluate multiple systems:
rouge = Rouge155()
rouge.system_dir = '/PATH/TO/systems'
rouge.model_dir = 'PATH/TO/models'
for system_id in ['id1', 'id2', 'id3']:
rouge.system_filename_pattern = \
'SL.P/.10.R.{}.SL062003-(\d+).html'.format(system_id)
rouge.model_filename_pattern = \
'SL.P.10.R.[A-Z].SL062003-#ID#.html'
rouge_output = rouge.evaluate(system_id)
print(rouge_output)
"""
def __init__(self, rouge_dir=None, rouge_args=None, temp_dir=None):
"""
Create a Rouge155 object.
rouge_dir: Directory containing Rouge-1.5.5.pl
rouge_args: Arguments to pass through to ROUGE if you
don't want to use the default pyrouge
arguments.
"""
self.temp_dir = temp_dir
self.log = log.get_global_console_logger()
self.__set_dir_properties()
self._config_file = None
self._settings_file = self.__get_config_path()
self.__set_rouge_dir(rouge_dir)
self.args = self.__clean_rouge_args(rouge_args)
self._system_filename_pattern = None
self._model_filename_pattern = None
def save_home_dir(self):
config = ConfigParser()
section = 'pyrouge settings'
config.add_section(section)
config.set(section, 'home_dir', self._home_dir)
with open(self._settings_file, 'w') as f:
config.write(f)
self.log.info("Set ROUGE home directory to {}.".format(self._home_dir))
@property
def settings_file(self):
"""
Path of the setttings file, which stores the ROUGE home dir.
"""
return self._settings_file
@property
def bin_path(self):
"""
The full path of the ROUGE binary (although it's technically
a script), i.e. rouge_home_dir/ROUGE-1.5.5.pl
"""
if self._bin_path is None:
raise Exception(
"ROUGE path not set. Please set the ROUGE home directory "
"and ensure that ROUGE-1.5.5.pl exists in it.")
return self._bin_path
@property
def system_filename_pattern(self):
"""
The regular expression pattern for matching system summary
filenames. The regex string.
E.g. "SL.P.10.R.11.SL062003-(\d+).html" will match the system
filenames in the SPL2003/system folder of the ROUGE SPL example
in the "sample-test" folder.
Currently, there is no support for multiple systems.
"""
return self._system_filename_pattern
@system_filename_pattern.setter
def system_filename_pattern(self, pattern):
self._system_filename_pattern = pattern
@property
def model_filename_pattern(self):
"""
The regular expression pattern for matching model summary
filenames. The pattern needs to contain the string "#ID#",
which is a placeholder for the document ID.
E.g. "SL.P.10.R.[A-Z].SL062003-#ID#.html" will match the model
filenames in the SPL2003/system folder of the ROUGE SPL
example in the "sample-test" folder.
"#ID#" is a placeholder for the document ID which has been
matched by the "(\d+)" part of the system filename pattern.
The different model summaries for a given document ID are
matched by the "[A-Z]" part.
"""
return self._model_filename_pattern
@model_filename_pattern.setter
def model_filename_pattern(self, pattern):
self._model_filename_pattern = pattern
@property
def config_file(self):
return self._config_file
@config_file.setter
def config_file(self, path):
config_dir, _ = os.path.split(path)
verify_dir(config_dir, "configuration file")
self._config_file = path
def split_sentences(self):
"""
ROUGE requires texts split into sentences. In case the texts
are not already split, this method can be used.
"""
from pyrouge.utils.sentence_splitter import PunktSentenceSplitter
self.log.info("Splitting sentences.")
ss = PunktSentenceSplitter()
def sent_split_to_string(s): return "\n".join(ss.split(s))
process_func = partial(
DirectoryProcessor.process, function=sent_split_to_string)
self.__process_summaries(process_func)
@staticmethod
def convert_summaries_to_rouge_format(input_dir, output_dir):
"""
Convert all files in input_dir into a format ROUGE understands
and saves the files to output_dir. The input files are assumed
to be plain text with one sentence per line.
input_dir: Path of directory containing the input files.
output_dir: Path of directory in which the converted files
will be saved.
"""
DirectoryProcessor.process(
input_dir, output_dir, Rouge155.convert_text_to_rouge_format)
@staticmethod
def convert_text_to_rouge_format(text, title="dummy title"):
"""
Convert a text to a format ROUGE understands. The text is
assumed to contain one sentence per line.
text: The text to convert, containg one sentence per line.
title: Optional title for the text. The title will appear
in the converted file, but doesn't seem to have
any other relevance.
Returns: The converted text as string.
"""
sentences = text.split("\n")
sent_elems = [
"<a name=\"{i}\">[{i}]</a> <a href=\"#{i}\" id={i}>"
"{text}</a>".format(i=i, text=sent)
for i, sent in enumerate(sentences, start=1)]
html = """<html>
<head>
<title>{title}</title>
</head>
<body bgcolor="white">
{elems}
</body>
</html>""".format(title=title, elems="\n".join(sent_elems))
return html
@staticmethod
def write_config_static(system_dir, system_filename_pattern,
model_dir, model_filename_pattern,
config_file_path, system_id=None):
"""
Write the ROUGE configuration file, which is basically a list
of system summary files and their corresponding model summary
files.
pyrouge uses regular expressions to automatically find the
matching model summary files for a given system summary file
(cf. docstrings for system_filename_pattern and
model_filename_pattern).
system_dir: Path of directory containing
system summaries.
system_filename_pattern: Regex string for matching
system summary filenames.
model_dir: Path of directory containing
model summaries.
model_filename_pattern: Regex string for matching model
summary filenames.
config_file_path: Path of the configuration file.
system_id: Optional system ID string which
will appear in the ROUGE output.
"""
system_filenames = [f for f in os.listdir(system_dir)]
system_models_tuples = []
system_filename_pattern = re.compile(system_filename_pattern)
for system_filename in sorted(system_filenames):
match = system_filename_pattern.match(system_filename)
if match:
id = match.groups(0)[0]
model_filenames = [model_filename_pattern.replace('#ID#', id)]
# model_filenames = Rouge155.__get_model_filenames_for_id(
# id, model_dir, model_filename_pattern)
system_models_tuples.append(
(system_filename, sorted(model_filenames)))
if not system_models_tuples:
raise Exception(
"Did not find any files matching the pattern {} "
"in the system summaries directory {}.".format(
system_filename_pattern.pattern, system_dir))
with codecs.open(config_file_path, 'w', encoding='utf-8') as f:
f.write('<ROUGE-EVAL version="1.55">')
for task_id, (system_filename, model_filenames) in enumerate(
system_models_tuples, start=1):
eval_string = Rouge155.__get_eval_string(
task_id, system_id,
system_dir, system_filename,
model_dir, model_filenames)
f.write(eval_string)
f.write("</ROUGE-EVAL>")
def write_config(self, config_file_path=None, system_id=None):
"""
Write the ROUGE configuration file, which is basically a list
of system summary files and their matching model summary files.
This is a non-static version of write_config_file_static().
config_file_path: Path of the configuration file.
system_id: Optional system ID string which will
appear in the ROUGE output.
"""
if not system_id:
system_id = 1
if (not config_file_path) or (not self._config_dir):
self._config_dir = mkdtemp(dir=self.temp_dir)
config_filename = "rouge_conf.xml"
else:
config_dir, config_filename = os.path.split(config_file_path)
verify_dir(config_dir, "configuration file")
self._config_file = os.path.join(self._config_dir, config_filename)
Rouge155.write_config_static(
self._system_dir, self._system_filename_pattern,
self._model_dir, self._model_filename_pattern,
self._config_file, system_id)
self.log.info(
"Written ROUGE configuration to {}".format(self._config_file))
def evaluate(self, system_id=1, rouge_args=None):
"""
Run ROUGE to evaluate the system summaries in system_dir against
the model summaries in model_dir. The summaries are assumed to
be in the one-sentence-per-line HTML format ROUGE understands.
system_id: Optional system ID which will be printed in
ROUGE's output.
Returns: Rouge output as string.
"""
self.write_config(system_id=system_id)
options = self.__get_options(rouge_args)
command = [self._bin_path] + options
self.log.info(
"Running ROUGE with command {}".format(" ".join(command)))
rouge_output = check_output(command).decode("UTF-8")
return rouge_output
def convert_and_evaluate(self, system_id=1,
split_sentences=False, rouge_args=None):
"""
Convert plain text summaries to ROUGE format and run ROUGE to
evaluate the system summaries in system_dir against the model
summaries in model_dir. Optionally split texts into sentences
in case they aren't already.
This is just a convenience method combining
convert_summaries_to_rouge_format() and evaluate().
split_sentences: Optional argument specifying if
sentences should be split.
system_id: Optional system ID which will be printed
in ROUGE's output.
Returns: ROUGE output as string.
"""
if split_sentences:
self.split_sentences()
self.__write_summaries()
rouge_output = self.evaluate(system_id, rouge_args)
return rouge_output
def output_to_dict(self, output):
"""
Convert the ROUGE output into python dictionary for further
processing.
"""
# 0 ROUGE-1 Average_R: 0.02632 (95%-conf.int. 0.02632 - 0.02632)
pattern = re.compile(
r"(\d+) (ROUGE-\S+) (Average_\w): (\d.\d+) "
r"\(95%-conf.int. (\d.\d+) - (\d.\d+)\)")
results = {}
for line in output.split("\n"):
match = pattern.match(line)
if match:
sys_id, rouge_type, measure, result, conf_begin, conf_end = \
match.groups()
measure = {
'Average_R': 'recall',
'Average_P': 'precision',
'Average_F': 'f_score'
}[measure]
rouge_type = rouge_type.lower().replace("-", '_')
key = "{}_{}".format(rouge_type, measure)
results[key] = float(result)
results["{}_cb".format(key)] = float(conf_begin)
results["{}_ce".format(key)] = float(conf_end)
return results
###################################################################
# Private methods
def __set_rouge_dir(self, home_dir=None):
"""
Verfify presence of ROUGE-1.5.5.pl and data folder, and set
those paths.
"""
if not home_dir:
self._home_dir = self.__get_rouge_home_dir_from_settings()
else:
self._home_dir = home_dir
self.save_home_dir()
self._bin_path = os.path.join(self._home_dir, 'ROUGE-1.5.5.pl')
self.data_dir = os.path.join(self._home_dir, 'data')
if not os.path.exists(self._bin_path):
raise Exception(
"ROUGE binary not found at {}. Please set the "
"correct path by running pyrouge_set_rouge_path "
"/path/to/rouge/home.".format(self._bin_path))
def __get_rouge_home_dir_from_settings(self):
config = ConfigParser()
with open(self._settings_file) as f:
if hasattr(config, "read_file"):
config.read_file(f)
else:
# use deprecated python 2.x method
config.readfp(f)
rouge_home_dir = config.get('pyrouge settings', 'home_dir')
return rouge_home_dir
@staticmethod
def __get_eval_string(
task_id, system_id,
system_dir, system_filename,
model_dir, model_filenames):
"""
ROUGE can evaluate several system summaries for a given text
against several model summaries, i.e. there is an m-to-n
relation between system and model summaries. The system
summaries are listed in the <PEERS> tag and the model summaries
in the <MODELS> tag. pyrouge currently only supports one system
summary per text, i.e. it assumes a 1-to-n relation between
system and model summaries.
"""
peer_elems = "<P ID=\"{id}\">{name}</P>".format(
id=system_id, name=system_filename)
model_elems = ["<M ID=\"{id}\">{name}</M>".format(
id=chr(65 + i), name=name)
for i, name in enumerate(model_filenames)]
model_elems = "\n\t\t\t".join(model_elems)
eval_string = """
<EVAL ID="{task_id}">
<MODEL-ROOT>{model_root}</MODEL-ROOT>
<PEER-ROOT>{peer_root}</PEER-ROOT>
<INPUT-FORMAT TYPE="SEE">
</INPUT-FORMAT>
<PEERS>
{peer_elems}
</PEERS>
<MODELS>
{model_elems}
</MODELS>
</EVAL>
""".format(
task_id=task_id,
model_root=model_dir, model_elems=model_elems,
peer_root=system_dir, peer_elems=peer_elems)
return eval_string
def __process_summaries(self, process_func):
"""
Helper method that applies process_func to the files in the
system and model folders and saves the resulting files to new
system and model folders.
"""
temp_dir = mkdtemp(dir=self.temp_dir)
new_system_dir = os.path.join(temp_dir, "system")
os.mkdir(new_system_dir)
new_model_dir = os.path.join(temp_dir, "model")
os.mkdir(new_model_dir)
self.log.info(
"Processing summaries. Saving system files to {} and "
"model files to {}.".format(new_system_dir, new_model_dir))
process_func(self._system_dir, new_system_dir)
process_func(self._model_dir, new_model_dir)
self._system_dir = new_system_dir
self._model_dir = new_model_dir
def __write_summaries(self):
self.log.info("Writing summaries.")
self.__process_summaries(self.convert_summaries_to_rouge_format)
@staticmethod
def __get_model_filenames_for_id(id, model_dir, model_filenames_pattern):
pattern = re.compile(model_filenames_pattern.replace('#ID#', id))
model_filenames = [
f for f in os.listdir(model_dir) if pattern.match(f)]
if not model_filenames:
raise Exception(
"Could not find any model summaries for the system"
" summary with ID {}. Specified model filename pattern was: "
"{}".format(id, model_filenames_pattern))
return model_filenames
def __get_options(self, rouge_args=None):
"""
Get supplied command line arguments for ROUGE or use default
ones.
"""
if self.args:
options = self.args.split()
elif rouge_args:
options = rouge_args.split()
else:
options = [
'-e', self._data_dir,
'-c', 95,
# '-2',
# '-1',
# '-U',
'-m',
# '-v',
'-r', 1000,
'-n', 2,
# '-w', 1.2,
'-a',
]
options = list(map(str, options))
options = self.__add_config_option(options)
return options
def __create_dir_property(self, dir_name, docstring):
"""
Generate getter and setter for a directory property.
"""
property_name = "{}_dir".format(dir_name)
private_name = "_" + property_name
setattr(self, private_name, None)
def fget(self):
return getattr(self, private_name)
def fset(self, path):
verify_dir(path, dir_name)
setattr(self, private_name, path)
p = property(fget=fget, fset=fset, doc=docstring)
setattr(self.__class__, property_name, p)
def __set_dir_properties(self):
"""
Automatically generate the properties for directories.
"""
directories = [
("home", "The ROUGE home directory."),
("data", "The path of the ROUGE 'data' directory."),
("system", "Path of the directory containing system summaries."),
("model", "Path of the directory containing model summaries."),
]
for (dirname, docstring) in directories:
self.__create_dir_property(dirname, docstring)
def __clean_rouge_args(self, rouge_args):
"""
Remove enclosing quotation marks, if any.
"""
if not rouge_args:
return
quot_mark_pattern = re.compile('"(.+)"')
match = quot_mark_pattern.match(rouge_args)
if match:
cleaned_args = match.group(1)
return cleaned_args
else:
return rouge_args
def __add_config_option(self, options):
return options + [self._config_file]
def __get_config_path(self):
if platform.system() == "Windows":
parent_dir = os.getenv("APPDATA")
config_dir_name = "pyrouge"
elif os.name == "posix":
parent_dir = os.path.expanduser("~")
config_dir_name = ".pyrouge"
else:
parent_dir = os.path.dirname(__file__)
config_dir_name = ""
config_dir = os.path.join(parent_dir, config_dir_name)
if not os.path.exists(config_dir):
os.makedirs(config_dir)
return os.path.join(config_dir, 'settings.ini')
if __name__ == "__main__":
import argparse
from utils.argparsers import rouge_path_parser
parser = argparse.ArgumentParser(parents=[rouge_path_parser])
args = parser.parse_args()
rouge = Rouge155(args.rouge_home)
rouge.save_home_dir()
| data2vec_vision-main | s2s-ft/evaluations/bs_pyrouge.py |
"""BERT finetuning runner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import logging
import glob
import json
import argparse
import math
import string
from multiprocessing import Pool, cpu_count
from tqdm import tqdm, trange
from pathlib import Path
import numpy as np
# pip install py-rouge
import rouge
import time
import tempfile
import shutil
# pip install pyrouge
from evaluations.bs_pyrouge import Rouge155
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--gold", type=str, help="Gold output file.")
parser.add_argument("--pred", type=str, help="Input prediction file.")
parser.add_argument("--split", type=str, default="",
help="Data split (train/dev/test).")
parser.add_argument("--save_best", action='store_true',
help="Save best epoch.")
parser.add_argument("--only_eval_best", action='store_true',
help="Only evaluate best epoch.")
parser.add_argument("--trunc_len", type=int, default=60,
help="Truncate line by the maximum length.")
parser.add_argument("--duplicate_rate", type=float, default=0.7,
help="If the duplicat rate (compared with history) is large, we can discard the current sentence.")
default_process_count = max(1, cpu_count() - 1)
parser.add_argument("--processes", type=int, default=default_process_count,
help="Number of processes to use (default %(default)s)")
parser.add_argument("--perl", action='store_true',
help="Using the perl script.")
parser.add_argument('--lazy_eval', action='store_true',
help="Skip evaluation if the .rouge file exists.")
args = parser.parse_args()
SPECIAL_TOKEN = ["[UNK]", "[PAD]", "[CLS]", "[MASK]"]
evaluator = rouge.Rouge(metrics=['rouge-n', 'rouge-l'], max_n=2,
limit_length=False, apply_avg=True, weight_factor=1.2)
def test_rouge(cand, ref):
temp_dir = tempfile.mkdtemp()
candidates = cand
references = ref
assert len(candidates) == len(references)
cnt = len(candidates)
current_time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime())
tmp_dir = os.path.join(temp_dir, "rouge-tmp-{}".format(current_time))
if not os.path.isdir(tmp_dir):
os.mkdir(tmp_dir)
os.mkdir(tmp_dir + "/candidate")
os.mkdir(tmp_dir + "/reference")
try:
for i in range(cnt):
if len(references[i]) < 1:
continue
with open(tmp_dir + "/candidate/cand.{}.txt".format(i), "w",
encoding="utf-8") as f:
f.write(candidates[i])
with open(tmp_dir + "/reference/ref.{}.txt".format(i), "w",
encoding="utf-8") as f:
f.write(references[i])
r = Rouge155(temp_dir=temp_dir)
r.model_dir = tmp_dir + "/reference/"
r.system_dir = tmp_dir + "/candidate/"
r.model_filename_pattern = 'ref.#ID#.txt'
r.system_filename_pattern = r'cand.(\d+).txt'
rouge_results = r.convert_and_evaluate()
print(rouge_results)
results_dict = r.output_to_dict(rouge_results)
finally:
if os.path.isdir(tmp_dir):
shutil.rmtree(tmp_dir)
return results_dict
def rouge_results_to_str(results_dict):
return ">> ROUGE-F(1/2/l): {:.2f}/{:.2f}/{:.2f}\nROUGE-R(1/2/3/l): {:.2f}/{:.2f}/{:.2f}\n".format(
results_dict["rouge_1_f_score"] * 100,
results_dict["rouge_2_f_score"] * 100,
results_dict["rouge_l_f_score"] * 100,
results_dict["rouge_1_recall"] * 100,
results_dict["rouge_2_recall"] * 100,
results_dict["rouge_l_recall"] * 100
)
def count_tokens(tokens):
counter = {}
for t in tokens:
if t in counter.keys():
counter[t] += 1
else:
counter[t] = 1
return counter
def get_f1(text_a, text_b):
tokens_a = text_a.lower().split()
tokens_b = text_b.lower().split()
if len(tokens_a) == 0 or len(tokens_b) == 0:
return 1 if len(tokens_a) == len(tokens_b) else 0
set_a = count_tokens(tokens_a)
set_b = count_tokens(tokens_b)
match = 0
for token in set_a.keys():
if token in set_b.keys():
match += min(set_a[token], set_b[token])
p = match / len(tokens_a)
r = match / len(tokens_b)
return 2.0 * p * r / (p + r + 1e-5)
_tok_dict = {"(": "-LRB-", ")": "-RRB-",
"[": "-LSB-", "]": "-RSB-",
"{": "-LCB-", "}": "-RCB-"}
def _is_digit(w):
for ch in w:
if not(ch.isdigit() or ch == ','):
return False
return True
def fix_tokenization(text):
input_tokens = text.split()
output_tokens = []
has_left_quote = False
has_left_single_quote = False
i = 0
prev_dash = False
while i < len(input_tokens):
tok = input_tokens[i]
flag_prev_dash = False
if tok in _tok_dict.keys():
output_tokens.append(_tok_dict[tok])
i += 1
elif tok == "\"":
if has_left_quote:
output_tokens.append("''")
else:
output_tokens.append("``")
has_left_quote = not has_left_quote
i += 1
elif tok == "'" and len(output_tokens) > 0 and output_tokens[-1].endswith("n") and i < len(input_tokens) - 1 and input_tokens[i + 1] == "t":
output_tokens[-1] = output_tokens[-1][:-1]
output_tokens.append("n't")
i += 2
elif tok == "'" and i < len(input_tokens) - 1 and input_tokens[i + 1] in ("s", "d", "ll"):
output_tokens.append("'"+input_tokens[i + 1])
i += 2
elif tok == "'":
if has_left_single_quote:
output_tokens.append("'")
else:
output_tokens.append("`")
has_left_single_quote = not has_left_single_quote
i += 1
elif tok == "." and i < len(input_tokens) - 2 and input_tokens[i + 1] == "." and input_tokens[i + 2] == ".":
output_tokens.append("...")
i += 3
elif tok == "," and len(output_tokens) > 0 and _is_digit(output_tokens[-1]) and i < len(input_tokens) - 1 and _is_digit(input_tokens[i + 1]):
# $ 3 , 000 -> $ 3,000
output_tokens[-1] += ','+input_tokens[i + 1]
i += 2
elif tok == "." and len(output_tokens) > 0 and output_tokens[-1].isdigit() and i < len(input_tokens) - 1 and input_tokens[i + 1].isdigit():
# 3 . 03 -> $ 3.03
output_tokens[-1] += '.'+input_tokens[i + 1]
i += 2
elif tok == "." and len(output_tokens) > 0 and len(output_tokens[-1]) == 1 and output_tokens[-1].isupper() and i < len(input_tokens) - 2 and len(input_tokens[i + 1]) == 1 and input_tokens[i + 1].isupper() and input_tokens[i + 2] == '.':
# U . N . -> U.N.
k = i+3
while k+2 < len(input_tokens):
if len(input_tokens[k + 1]) == 1 and input_tokens[k + 1].isupper() and input_tokens[k + 2] == '.':
k += 2
else:
break
output_tokens[-1] += ''.join(input_tokens[i:k])
i += 2
elif tok == "-":
if i < len(input_tokens) - 1 and input_tokens[i + 1] == "-":
output_tokens.append("--")
i += 2
elif i == len(input_tokens) - 1 or i == 0:
output_tokens.append("-")
i += 1
elif output_tokens[-1] not in string.punctuation and input_tokens[i + 1][0] not in string.punctuation:
output_tokens[-1] += "-"
i += 1
flag_prev_dash = True
else:
output_tokens.append("-")
i += 1
elif prev_dash and len(output_tokens) > 0 and tok[0] not in string.punctuation:
output_tokens[-1] += tok
i += 1
else:
output_tokens.append(tok)
i += 1
prev_dash = flag_prev_dash
return " ".join(output_tokens)
def remove_duplicate(l_list, duplicate_rate):
tk_list = [l.lower().split() for l in l_list]
r_list = []
history_set = set()
for i, w_list in enumerate(tk_list):
w_set = set(w_list)
if len(w_set & history_set)/len(w_set) <= duplicate_rate:
r_list.append(l_list[i])
history_set |= w_set
return r_list
def process_eval(eval_fn):
gold_list = []
with open(args.gold, "r", encoding="utf-8") as f_in:
for l in f_in:
line = l.strip().replace(" <S_SEP> ", '\n')
gold_list.append(line)
pred_list = []
with open(eval_fn, "r", encoding="utf-8") as f_in:
for l in f_in:
buf = []
for sentence in l.strip().split("[X_SEP]"):
sentence = fix_tokenization(sentence)
sentence = sentence.replace("(", " -LRB- ").replace(")", " -RRB- ")
sentence = sentence.replace("[", " -LSB- ").replace("]", " -RSB- ")
while " " in sentence:
sentence = sentence.replace(" ", " ")
if any(get_f1(sentence, s) > 1.0 for s in buf):
continue
s_len = len(sentence.split())
if s_len <= 4:
continue
buf.append(sentence)
if args.duplicate_rate and args.duplicate_rate < 1:
buf = remove_duplicate(buf, args.duplicate_rate)
if args.trunc_len:
num_left = args.trunc_len
trunc_list = []
for bit in buf:
tk_list = bit.split()
n = min(len(tk_list), num_left)
trunc_list.append(' '.join(tk_list[:n]))
num_left -= n
if num_left <= 0:
break
else:
trunc_list = buf
line = "\n".join(trunc_list)
pred_list.append(line)
with open(eval_fn+'.post', 'w', encoding='utf-8') as f_out:
for l in pred_list:
f_out.write(l.replace('\n', ' [X_SEP] ').strip())
f_out.write('\n')
# rouge scores
if len(pred_list) < len(gold_list):
# evaluate subset
gold_list = gold_list[:len(pred_list)]
assert len(pred_list) == len(gold_list)
if args.perl:
scores = test_rouge(pred_list, gold_list)
else:
scores = evaluator.get_scores(pred_list, [[it] for it in gold_list])
return eval_fn, scores
def main():
if args.perl:
eval_fn_list = list(glob.glob(args.pred))
else:
eval_fn_list = [eval_fn for eval_fn in glob.glob(args.pred) if not(
args.lazy_eval and Path(eval_fn+".rouge").exists())]
eval_fn_list = list(filter(lambda fn: not(fn.endswith(
'.post') or fn.endswith('.rouge')), eval_fn_list))
if args.only_eval_best:
best_epoch_dict = {}
for dir_path in set(Path(fn).parent for fn in eval_fn_list):
fn_save = os.path.join(dir_path, 'save_best.dev')
if Path(fn_save).exists():
with open(fn_save, 'r') as f_in:
__, o_name, __ = f_in.read().strip().split('\n')
epoch = o_name.split('.')[1]
best_epoch_dict[dir_path] = epoch
new_eval_fn_list = []
for fn in eval_fn_list:
dir_path = Path(fn).parent
if dir_path in best_epoch_dict:
if Path(fn).name.split('.')[1] == best_epoch_dict[dir_path]:
new_eval_fn_list.append(fn)
eval_fn_list = new_eval_fn_list
logger.info("***** Evaluation: %s *****", ','.join(eval_fn_list))
num_pool = min(args.processes, len(eval_fn_list))
p = Pool(num_pool)
r_list = p.imap_unordered(process_eval, eval_fn_list)
r_list = sorted([(fn, scores)
for fn, scores in r_list], key=lambda x: x[0])
rg2_dict = {}
for fn, scores in r_list:
print(fn)
if args.perl:
print(rouge_results_to_str(scores))
else:
rg2_dict[fn] = scores['rouge-2']['f']
print(
"ROUGE-1: {}\tROUGE-2: {}\n".format(scores['rouge-1']['f'], scores['rouge-2']['f']))
with open(fn+".rouge", 'w') as f_out:
f_out.write(json.dumps(
{'rg1': scores['rouge-1']['f'], 'rg2': scores['rouge-2']['f']}))
p.close()
p.join()
if args.save_best:
# find best results
group_dict = {}
for k, v in rg2_dict.items():
d_name, o_name = Path(k).parent, Path(k).name
if (d_name not in group_dict) or (v > group_dict[d_name][1]):
group_dict[d_name] = (o_name, v)
# compare and save the best result
for k, v in group_dict.items():
fn = os.path.join(k, 'save_best.'+args.split)
o_name_s, rst_s = v
should_save = True
if Path(fn).exists():
with open(fn, 'r') as f_in:
rst_f = float(f_in.read().strip().split('\n')[-1])
if rst_s <= rst_f:
should_save = False
if should_save:
with open(fn, 'w') as f_out:
f_out.write('{0}\n{1}\n{2}\n'.format(k, o_name_s, rst_s))
if __name__ == "__main__":
main()
| data2vec_vision-main | s2s-ft/evaluations/eval_for_cnndm.py |
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from transformers import BertConfig, RobertaConfig
from s2s_ft.configuration_unilm import UnilmConfig
logger = logging.getLogger(__name__)
class BertForSeq2SeqConfig(BertConfig):
def __init__(self, label_smoothing=0.1, source_type_id=0, target_type_id=1, **kwargs):
super(BertForSeq2SeqConfig, self).__init__(**kwargs)
self.label_smoothing = label_smoothing
self.source_type_id = source_type_id
self.target_type_id = target_type_id
@classmethod
def from_exist_config(cls, config, label_smoothing=0.1, max_position_embeddings=None):
required_keys = [
"vocab_size", "hidden_size", "num_hidden_layers", "num_attention_heads",
"hidden_act", "intermediate_size", "hidden_dropout_prob", "attention_probs_dropout_prob",
"max_position_embeddings", "type_vocab_size", "initializer_range", "layer_norm_eps"]
kwargs = {}
for key in required_keys:
assert hasattr(config, key)
kwargs[key] = getattr(config, key)
kwargs["vocab_size_or_config_json_file"] = kwargs["vocab_size"]
if isinstance(config, RobertaConfig):
kwargs["type_vocab_size"] = 0
kwargs["max_position_embeddings"] = kwargs["max_position_embeddings"] - 2
additional_keys = [
"source_type_id", "target_type_id"
]
for key in additional_keys:
if hasattr(config, key):
kwargs[key] = getattr(config, key)
if max_position_embeddings is not None and max_position_embeddings > config.max_position_embeddings:
kwargs["max_position_embeddings"] = max_position_embeddings
logger.info(" ** Change max position embeddings to %d ** " % max_position_embeddings)
return cls(label_smoothing=label_smoothing, **kwargs)
| data2vec_vision-main | s2s-ft/s2s_ft/config.py |
# coding=utf-8
# The MIT License (MIT)
# Copyright (c) Microsoft Corporation
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
""" MiniLM model configuration """
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import logging
import sys
from io import open
from transformers.configuration_utils import PretrainedConfig
logger = logging.getLogger(__name__)
MINILM_PRETRAINED_CONFIG_ARCHIVE_MAP = {
'minilm-l12-h384-uncased': "https://unilm.blob.core.windows.net/ckpt/minilm-l12-h384-uncased-config.json",
}
class MinilmConfig(PretrainedConfig):
r"""
:class:`~transformers.MinilmConfig` is the configuration class to store the configuration of a
`MinilmModel`.
Arguments:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `MiniLMModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu", "swish" and "gelu_new" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`MiniLMModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
layer_norm_eps: The epsilon used by LayerNorm.
"""
pretrained_config_archive_map = MINILM_PRETRAINED_CONFIG_ARCHIVE_MAP
def __init__(self,
vocab_size=28996,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=6,
initializer_range=0.02,
layer_norm_eps=1e-12,
source_type_id=0,
target_type_id=1,
**kwargs):
super(MinilmConfig, self).__init__(**kwargs)
if isinstance(vocab_size, str) or (sys.version_info[0] == 2
and isinstance(vocab_size, unicode)):
with open(vocab_size, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size, int):
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.source_type_id = source_type_id
self.target_type_id = target_type_id
else:
raise ValueError("First argument must be either a vocabulary size (int)"
" or the path to a pretrained model config file (str)")
| data2vec_vision-main | s2s-ft/s2s_ft/configuration_minilm.py |
# coding=utf-8
"""PyTorch BERT model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import copy
import json
import math
import logging
import tarfile
import tempfile
import shutil
import numpy as np
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
import torch.nn.functional as F
from transformers.file_utils import cached_path
from torch.nn.modules.loss import _Loss
class LabelSmoothingLoss(_Loss):
"""
With label smoothing,
KL-divergence between q_{smoothed ground truth prob.}(w)
and p_{prob. computed by model}(w) is minimized.
"""
def __init__(self, label_smoothing=0, tgt_vocab_size=0, ignore_index=0, size_average=None, reduce=None, reduction='mean'):
assert 0.0 < label_smoothing <= 1.0
self.ignore_index = ignore_index
super(LabelSmoothingLoss, self).__init__(
size_average=size_average, reduce=reduce, reduction=reduction)
assert label_smoothing > 0
assert tgt_vocab_size > 0
smoothing_value = label_smoothing / (tgt_vocab_size - 2)
one_hot = torch.full((tgt_vocab_size,), smoothing_value)
one_hot[self.ignore_index] = 0
self.register_buffer('one_hot', one_hot.unsqueeze(0))
self.confidence = 1.0 - label_smoothing
self.tgt_vocab_size = tgt_vocab_size
def forward(self, output, target):
"""
output (FloatTensor): batch_size * num_pos * n_classes
target (LongTensor): batch_size * num_pos
"""
assert self.tgt_vocab_size == output.size(2)
batch_size, num_pos = target.size(0), target.size(1)
output = output.view(-1, self.tgt_vocab_size)
target = target.view(-1)
model_prob = self.one_hot.repeat(target.size(0), 1)
model_prob.scatter_(1, target.unsqueeze(1), self.confidence)
model_prob.masked_fill_((target == self.ignore_index).unsqueeze(1), 0)
return F.kl_div(output, model_prob, reduction='none').view(batch_size, num_pos, -1).sum(2)
logger = logging.getLogger(__name__)
PRETRAINED_MODEL_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased.tar.gz",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased.tar.gz",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased.tar.gz",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased.tar.gz",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased.tar.gz",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased.tar.gz",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese.tar.gz",
'unilm-base-cased': "https://unilm.blob.core.windows.net/ckpt/unilm1-base-cased.bin",
'unilm-large-cased': "https://unilm.blob.core.windows.net/ckpt/unilm1-large-cased.bin",
'unilm1-base-cased': "https://unilm.blob.core.windows.net/ckpt/unilm1-base-cased.bin",
'unilm1-large-cased': "https://unilm.blob.core.windows.net/ckpt/unilm1-large-cased.bin",
'unilm1.2-base-uncased': "https://unilm.blob.core.windows.net/ckpt/unilm1.2-base-uncased.bin"
}
CONFIG_NAME = 'config.json'
WEIGHTS_NAME = 'pytorch_model.bin'
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish}
class BertConfig(object):
"""Configuration class to store the configuration of a `BertModel`.
"""
def __init__(self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
relax_projection=0,
new_pos_ids=False,
initializer_range=0.02,
task_idx=None,
fp32_embedding=False,
ffn_type=0,
label_smoothing=None,
num_qkv=0,
seg_emb=False,
source_type_id=0,
target_type_id=1,
no_segment_embedding=False, **kwargs):
"""Constructs BertConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu" and "swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.relax_projection = relax_projection
self.new_pos_ids = new_pos_ids
self.initializer_range = initializer_range
self.task_idx = task_idx
self.fp32_embedding = fp32_embedding
self.ffn_type = ffn_type
self.label_smoothing = label_smoothing
self.num_qkv = num_qkv
self.seg_emb = seg_emb
self.no_segment_embedding = no_segment_embedding
self.source_type_id = source_type_id
self.target_type_id = target_type_id
if type_vocab_size == 0:
self.no_segment_embedding = True
else:
raise ValueError("First argument must be either a vocabulary size (int)"
"or the path to a pretrained model config file (str)")
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
try:
from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm
except ImportError:
print("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex.")
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-5):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class PositionalEmbedding(nn.Module):
def __init__(self, demb):
super(PositionalEmbedding, self).__init__()
self.demb = demb
inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb))
self.register_buffer('inv_freq', inv_freq)
def forward(self, pos_seq, bsz=None):
sinusoid_inp = torch.ger(pos_seq, self.inv_freq)
pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1)
if bsz is not None:
return pos_emb[:, None, :].expand(-1, bsz, -1)
else:
return pos_emb[:, None, :]
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(
config.vocab_size, config.hidden_size)
if config.no_segment_embedding:
self.token_type_embeddings = None
else:
self.token_type_embeddings = nn.Embedding(
config.type_vocab_size, config.hidden_size)
if hasattr(config, 'fp32_embedding'):
self.fp32_embedding = config.fp32_embedding
else:
self.fp32_embedding = False
if hasattr(config, 'new_pos_ids') and config.new_pos_ids:
self.num_pos_emb = 4
else:
self.num_pos_emb = 1
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size * self.num_pos_emb)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None, position_ids=None, task_idx=None):
seq_length = input_ids.size(1)
if position_ids is None:
position_ids = torch.arange(
seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
if self.num_pos_emb > 1:
num_batch = position_embeddings.size(0)
num_pos = position_embeddings.size(1)
position_embeddings = position_embeddings.view(
num_batch, num_pos, self.num_pos_emb, -1)[torch.arange(0, num_batch).long(), :, task_idx, :]
embeddings = words_embeddings + position_embeddings
if self.token_type_embeddings is not None:
embeddings = embeddings + self.token_type_embeddings(token_type_ids)
if self.fp32_embedding:
embeddings = embeddings.half()
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(
config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
if hasattr(config, 'num_qkv') and (config.num_qkv > 1):
self.num_qkv = config.num_qkv
else:
self.num_qkv = 1
self.query = nn.Linear(
config.hidden_size, self.all_head_size * self.num_qkv)
self.key = nn.Linear(config.hidden_size,
self.all_head_size * self.num_qkv)
self.value = nn.Linear(
config.hidden_size, self.all_head_size * self.num_qkv)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.uni_debug_flag = True if os.getenv(
'UNI_DEBUG_FLAG', '') else False
if self.uni_debug_flag:
self.register_buffer('debug_attention_probs',
torch.zeros((512, 512)))
if hasattr(config, 'seg_emb') and config.seg_emb:
self.b_q_s = nn.Parameter(torch.zeros(
1, self.num_attention_heads, 1, self.attention_head_size))
self.seg_emb = nn.Embedding(
config.type_vocab_size, self.all_head_size)
else:
self.b_q_s = None
self.seg_emb = None
def transpose_for_scores(self, x, mask_qkv=None):
if self.num_qkv > 1:
sz = x.size()[:-1] + (self.num_qkv,
self.num_attention_heads, self.all_head_size)
# (batch, pos, num_qkv, head, head_hid)
x = x.view(*sz)
if mask_qkv is None:
x = x[:, :, 0, :, :]
elif isinstance(mask_qkv, int):
x = x[:, :, mask_qkv, :, :]
else:
# mask_qkv: (batch, pos)
if mask_qkv.size(1) > sz[1]:
mask_qkv = mask_qkv[:, :sz[1]]
# -> x: (batch, pos, head, head_hid)
x = x.gather(2, mask_qkv.view(sz[0], sz[1], 1, 1, 1).expand(
sz[0], sz[1], 1, sz[3], sz[4])).squeeze(2)
else:
sz = x.size()[:-1] + (self.num_attention_heads,
self.attention_head_size)
# (batch, pos, head, head_hid)
x = x.view(*sz)
# (batch, head, pos, head_hid)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask, history_states=None,
mask_qkv=None, seg_ids=None, key_history=None, value_history=None,
key_cache=None, value_cache=None,
):
if history_states is None:
mixed_query_layer = self.query(hidden_states)
# possible issue: https://github.com/NVIDIA/apex/issues/131
mixed_key_layer = F.linear(hidden_states, self.key.weight)
mixed_value_layer = self.value(hidden_states)
else:
x_states = torch.cat((history_states, hidden_states), dim=1)
mixed_query_layer = self.query(hidden_states)
# possible issue: https://github.com/NVIDIA/apex/issues/131
mixed_key_layer = F.linear(x_states, self.key.weight)
mixed_value_layer = self.value(x_states)
if key_cache is not None and isinstance(key_cache, list):
key_cache.append(mixed_key_layer)
mixed_key_layer = torch.cat(key_cache, dim=1)
if value_cache is not None and isinstance(value_cache, list):
value_cache.append(mixed_value_layer)
mixed_value_layer = torch.cat(value_cache, dim=1)
query_layer = self.transpose_for_scores(mixed_query_layer, mask_qkv)
key_layer = self.transpose_for_scores(mixed_key_layer, mask_qkv)
value_layer = self.transpose_for_scores(mixed_value_layer, mask_qkv)
if key_history is not None and not isinstance(key_history, list):
key_layer = torch.cat((key_history, key_layer), dim=-2)
value_layer = torch.cat((value_history, value_layer), dim=-2)
# Take the dot product between "query" and "key" to get the raw attention scores.
# (batch, head, pos, pos)
attention_scores = torch.matmul(
query_layer / math.sqrt(self.attention_head_size), key_layer.transpose(-1, -2))
if self.seg_emb is not None:
seg_rep = self.seg_emb(seg_ids)
# (batch, pos, head, head_hid)
seg_rep = seg_rep.view(seg_rep.size(0), seg_rep.size(
1), self.num_attention_heads, self.attention_head_size)
qs = torch.einsum('bnih,bjnh->bnij',
query_layer + self.b_q_s, seg_rep)
attention_scores = attention_scores + qs
# attention_scores = attention_scores / math.sqrt(self.attention_head_size)
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
if self.uni_debug_flag:
_pos = attention_probs.size(-1)
self.debug_attention_probs[:_pos, :_pos].copy_(
attention_probs[0].mean(0).view(_pos, _pos))
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[
:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
if isinstance(key_history, list):
key_history.append(key_layer)
if isinstance(value_history, list):
value_history.append(value_layer)
return context_layer
class BertSelfOutput(nn.Module):
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, input_tensor, attention_mask, history_states=None,
mask_qkv=None, seg_ids=None, key_history=None, value_history=None):
self_output = self.self(
input_tensor, attention_mask, history_states=history_states,
mask_qkv=mask_qkv, seg_ids=seg_ids, key_history=key_history, value_history=value_history)
attention_output = self.output(self_output, input_tensor)
return attention_output
class BertIntermediate(nn.Module):
def __init__(self, config):
super(BertIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
self.intermediate_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class TransformerFFN(nn.Module):
def __init__(self, config):
super(TransformerFFN, self).__init__()
self.ffn_type = config.ffn_type
assert self.ffn_type in (1, 2)
if self.ffn_type in (1, 2):
self.wx0 = nn.Linear(config.hidden_size, config.hidden_size)
if self.ffn_type in (2,):
self.wx1 = nn.Linear(config.hidden_size, config.hidden_size)
if self.ffn_type in (1, 2):
self.output = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, x):
if self.ffn_type in (1, 2):
x0 = self.wx0(x)
if self.ffn_type == 1:
x1 = x
elif self.ffn_type == 2:
x1 = self.wx1(x)
out = self.output(x0 * x1)
out = self.dropout(out)
out = self.LayerNorm(out + x)
return out
class BertLayer(nn.Module):
def __init__(self, config):
super(BertLayer, self).__init__()
self.attention = BertAttention(config)
self.ffn_type = config.ffn_type
if self.ffn_type:
self.ffn = TransformerFFN(config)
else:
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask, history_states=None,
mask_qkv=None, seg_ids=None, key_history=None, value_history=None):
attention_output = self.attention(
hidden_states, attention_mask, history_states=history_states,
mask_qkv=mask_qkv, seg_ids=seg_ids, key_history=key_history, value_history=value_history)
if self.ffn_type:
layer_output = self.ffn(attention_output)
else:
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertEncoder(nn.Module):
def __init__(self, config):
super(BertEncoder, self).__init__()
layer = BertLayer(config)
self.layer = nn.ModuleList([copy.deepcopy(layer)
for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True, prev_embedding=None,
prev_encoded_layers=None, mask_qkv=None, seg_ids=None, key_history=None, value_history=None):
# history embedding and encoded layer must be simultanously given
assert (prev_embedding is None) == (prev_encoded_layers is None)
all_encoder_layers = []
if (prev_embedding is not None) and (prev_encoded_layers is not None):
history_states = prev_embedding
for i, layer_module in enumerate(self.layer):
hidden_states = layer_module(
hidden_states, attention_mask, history_states=history_states, mask_qkv=mask_qkv, seg_ids=seg_ids)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if prev_encoded_layers is not None:
history_states = prev_encoded_layers[i]
else:
for i, layer_module in enumerate(self.layer):
set_key = None
if isinstance(key_history, list):
set_key = key_history if len(key_history) < len(self.layer) else key_history[i]
set_value = None
if isinstance(value_history, list):
set_value = value_history if len(key_history) < len(self.layer) else value_history[i]
hidden_states = layer_module(
hidden_states, attention_mask, mask_qkv=mask_qkv, seg_ids=seg_ids,
key_history=set_key, value_history=set_value)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class BertPooler(nn.Module):
def __init__(self, config):
super(BertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(BertPredictionHeadTransform, self).__init__()
self.transform_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
hid_size = config.hidden_size
if hasattr(config, 'relax_projection') and (config.relax_projection > 1):
hid_size *= config.relax_projection
self.dense = nn.Linear(config.hidden_size, hid_size)
self.LayerNorm = BertLayerNorm(hid_size, eps=1e-5)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(bert_model_embedding_weights.size(1),
bert_model_embedding_weights.size(0),
bias=False)
self.decoder.weight = bert_model_embedding_weights
self.bias = nn.Parameter(torch.zeros(
bert_model_embedding_weights.size(0)))
if hasattr(config, 'relax_projection') and (config.relax_projection > 1):
self.relax_projection = config.relax_projection
else:
self.relax_projection = 0
self.fp32_embedding = config.fp32_embedding
def convert_to_type(tensor):
if self.fp32_embedding:
return tensor.half()
else:
return tensor
self.type_converter = convert_to_type
self.converted = False
def forward(self, hidden_states, task_idx=None):
if not self.converted:
self.converted = True
if self.fp32_embedding:
self.transform.half()
hidden_states = self.transform(self.type_converter(hidden_states))
if self.relax_projection > 1:
num_batch = hidden_states.size(0)
num_pos = hidden_states.size(1)
# (batch, num_pos, relax_projection*hid) -> (batch, num_pos, relax_projection, hid) -> (batch, num_pos, hid)
hidden_states = hidden_states.view(
num_batch, num_pos, self.relax_projection, -1)[torch.arange(0, num_batch).long(), :, task_idx, :]
if self.fp32_embedding:
hidden_states = F.linear(self.type_converter(hidden_states), self.type_converter(
self.decoder.weight), self.type_converter(self.bias))
else:
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config, bert_model_embedding_weights):
super(BertOnlyMLMHead, self).__init__()
self.predictions = BertLMPredictionHead(
config, bert_model_embedding_weights)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertOnlyNSPHead(nn.Module):
def __init__(self, config):
super(BertOnlyNSPHead, self).__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class BertPreTrainingHeads(nn.Module):
def __init__(self, config, bert_model_embedding_weights, num_labels=2):
super(BertPreTrainingHeads, self).__init__()
self.predictions = BertLMPredictionHead(
config, bert_model_embedding_weights)
self.seq_relationship = nn.Linear(config.hidden_size, num_labels)
def forward(self, sequence_output, pooled_output, task_idx=None):
prediction_scores = self.predictions(sequence_output, task_idx)
if pooled_output is None:
seq_relationship_score = None
else:
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class PreTrainedBertModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super(PreTrainedBertModel, self).__init__()
if not isinstance(config, BertConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of class `BertConfig`. "
"To create a model from a Google pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
self.config = config
def init_bert_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
# module.weight.data.copy_(torch.Tensor(
# truncnorm.rvs(-1, 1, size=list(module.weight.data.shape)) * self.config.initializer_range))
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@classmethod
def from_pretrained(cls, pretrained_model_name, config, state_dict=None, cache_dir=None, *inputs, **kwargs):
"""
Instantiate a PreTrainedBertModel from a pre-trained model file or a pytorch state dict.
Download and cache the pre-trained model file if needed.
Params:
pretrained_model_name: either:
- a str with the name of a pre-trained model to load selected in the list of:
. `bert-base-uncased`
. `bert-large-uncased`
. `bert-base-cased`
. `bert-base-multilingual`
. `bert-base-chinese`
- a path or url to a pretrained model archive containing:
. `bert_config.json` a configuration file for the model
. `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance
cache_dir: an optional path to a folder in which the pre-trained models will be cached.
state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models
*inputs, **kwargs: additional input for the specific Bert class
(ex: num_labels for BertForSequenceClassification)
"""
logger.info("Model config {}".format(config))
# clean the arguments in kwargs
for arg_clean in ('config_path', 'type_vocab_size', 'relax_projection', 'new_pos_ids', 'task_idx',
'max_position_embeddings', 'fp32_embedding', 'ffn_type', 'label_smoothing',
'hidden_dropout_prob', 'attention_probs_dropout_prob', 'num_qkv', 'seg_emb',
'word_emb_map', 'num_labels', 'num_rel', 'num_sentlvl_labels'):
if arg_clean in kwargs:
del kwargs[arg_clean]
# Instantiate model.
model = cls(config, *inputs, **kwargs)
if state_dict is None:
weights_path = os.path.join(pretrained_model_name, WEIGHTS_NAME)
state_dict = torch.load(weights_path)
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(
prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(model, prefix='' if hasattr(model, 'bert') else 'bert.')
model.missing_keys = missing_keys
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(error_msgs) > 0:
logger.info('\n'.join(error_msgs))
return model
class BertModel(PreTrainedBertModel):
"""BERT model ("Bidirectional Embedding Representations from a Transformer").
Params:
config: a BertConfig class instance with the configuration to build a new model
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`.
Outputs: Tuple of (encoded_layers, pooled_output)
`encoded_layers`: controled by `output_all_encoded_layers` argument:
- `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end
of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each
encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size],
- `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding
to the last attention block of shape [batch_size, sequence_length, hidden_size],
`pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a
classifier pretrained on top of the hidden state associated to the first character of the
input (`CLF`) to train on the Next-Sentence task (see BERT's paper).
```
"""
def __init__(self, config):
super(BertModel, self).__init__(config)
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.apply(self.init_bert_weights)
def rescale_some_parameters(self):
for layer_id, layer in enumerate(self.encoder.layer):
layer.attention.output.dense.weight.data.div_(
math.sqrt(2.0 * (layer_id + 1)))
layer.output.dense.weight.data.div_(math.sqrt(2.0 * (layer_id + 1)))
def get_extended_attention_mask(self, input_ids, token_type_ids, attention_mask):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
if attention_mask.dim() == 2:
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
elif attention_mask.dim() == 3:
extended_attention_mask = attention_mask.unsqueeze(1)
else:
raise NotImplementedError
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(
dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True,
mask_qkv=None, task_idx=None, key_history=None, value_history=None, position_ids=None):
extended_attention_mask = self.get_extended_attention_mask(
input_ids, token_type_ids, attention_mask)
embedding_output = self.embeddings(
input_ids, token_type_ids, task_idx=task_idx, position_ids=position_ids)
encoded_layers = self.encoder(embedding_output, extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers,
mask_qkv=mask_qkv, seg_ids=token_type_ids,
key_history=key_history, value_history=value_history)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers, pooled_output
class BertModelIncr(BertModel):
def __init__(self, config):
super(BertModelIncr, self).__init__(config)
def forward(self, input_ids, token_type_ids, position_ids, attention_mask, output_all_encoded_layers=True,
prev_embedding=None, prev_encoded_layers=None, mask_qkv=None, task_idx=None):
extended_attention_mask = self.get_extended_attention_mask(
input_ids, token_type_ids, attention_mask)
embedding_output = self.embeddings(
input_ids, token_type_ids, position_ids, task_idx=task_idx)
encoded_layers = self.encoder(embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers,
prev_embedding=prev_embedding,
prev_encoded_layers=prev_encoded_layers, mask_qkv=mask_qkv,
seg_ids=token_type_ids)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return embedding_output, encoded_layers, pooled_output
class BertForPreTraining(PreTrainedBertModel):
"""BERT model with pre-training heads.
This module comprises the BERT model followed by the two pre-training heads:
- the masked language modeling head, and
- the next sentence classification head.
Params:
config: a BertConfig class instance with the configuration to build a new model.
Inputs:
`input_ids`: a torch.LongTensor of shape [batch_size, sequence_length]
with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts
`extract_features.py`, `run_classifier.py` and `run_squad.py`)
`token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token
types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to
a `sentence B` token (see BERT paper for more details).
`attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices
selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max
input sequence length in the current batch. It's the mask that we typically use for attention when
a batch has varying length sentences.
`masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length]
with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss
is only computed for the labels set in [0, ..., vocab_size]
`next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size]
with indices selected in [0, 1].
0 => next sentence is the continuation, 1 => next sentence is a random sentence.
Outputs:
if `masked_lm_labels` and `next_sentence_label` are not `None`:
Outputs the total_loss which is the sum of the masked language modeling loss and the next
sentence classification loss.
if `masked_lm_labels` or `next_sentence_label` is `None`:
Outputs a tuple comprising
- the masked language modeling logits of shape [batch_size, sequence_length, vocab_size], and
- the next sentence classification logits of shape [batch_size, 2].
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]])
input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]])
token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]])
config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768,
num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072)
model = BertForPreTraining(config)
masked_lm_logits_scores, seq_relationship_logits = model(input_ids, token_type_ids, input_mask)
```
"""
def __init__(self, config):
super(BertForPreTraining, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertPreTrainingHeads(
config, self.bert.embeddings.word_embeddings.weight)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None,
next_sentence_label=None, mask_qkv=None, task_idx=None):
sequence_output, pooled_output = self.bert(input_ids, token_type_ids, attention_mask,
output_all_encoded_layers=False, mask_qkv=mask_qkv,
task_idx=task_idx)
prediction_scores, seq_relationship_score = self.cls(
sequence_output, pooled_output)
if masked_lm_labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(
prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
next_sentence_loss = loss_fct(
seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
return total_loss
else:
return prediction_scores, seq_relationship_score
class BertPreTrainingPairTransform(nn.Module):
def __init__(self, config):
super(BertPreTrainingPairTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size * 2, config.hidden_size)
self.transform_act_fn = ACT2FN[config.hidden_act] \
if isinstance(config.hidden_act, str) else config.hidden_act
# self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5)
def forward(self, pair_x, pair_y):
hidden_states = torch.cat([pair_x, pair_y], dim=-1)
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
# hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertPreTrainingPairRel(nn.Module):
def __init__(self, config, num_rel=0):
super(BertPreTrainingPairRel, self).__init__()
self.R_xy = BertPreTrainingPairTransform(config)
self.rel_emb = nn.Embedding(num_rel, config.hidden_size)
def forward(self, pair_x, pair_y, pair_r, pair_pos_neg_mask):
# (batch, num_pair, hidden)
xy = self.R_xy(pair_x, pair_y)
r = self.rel_emb(pair_r)
_batch, _num_pair, _hidden = xy.size()
pair_score = (xy * r).sum(-1)
# torch.bmm(xy.view(-1, 1, _hidden),r.view(-1, _hidden, 1)).view(_batch, _num_pair)
# .mul_(-1.0): objective to loss
return F.logsigmoid(pair_score * pair_pos_neg_mask.type_as(pair_score)).mul_(-1.0)
class BertForPreTrainingLossMask(PreTrainedBertModel):
"""refer to BertForPreTraining"""
def __init__(self, config, num_labels=2, num_rel=0, num_sentlvl_labels=0, no_nsp=False):
super(BertForPreTrainingLossMask, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertPreTrainingHeads(
config, self.bert.embeddings.word_embeddings.weight, num_labels=num_labels)
self.num_sentlvl_labels = num_sentlvl_labels
self.cls2 = None
if self.num_sentlvl_labels > 0:
self.secondary_pred_proj = nn.Embedding(
num_sentlvl_labels, config.hidden_size)
self.cls2 = BertPreTrainingHeads(
config, self.secondary_pred_proj.weight, num_labels=num_sentlvl_labels)
self.crit_mask_lm = nn.CrossEntropyLoss(reduction='none')
if no_nsp:
self.crit_next_sent = None
else:
self.crit_next_sent = nn.CrossEntropyLoss(ignore_index=-1)
self.num_labels = num_labels
self.num_rel = num_rel
if self.num_rel > 0:
self.crit_pair_rel = BertPreTrainingPairRel(
config, num_rel=num_rel)
if hasattr(config, 'label_smoothing') and config.label_smoothing:
self.crit_mask_lm_smoothed = LabelSmoothingLoss(
config.label_smoothing, config.vocab_size, ignore_index=0, reduction='none')
else:
self.crit_mask_lm_smoothed = None
self.apply(self.init_bert_weights)
self.bert.rescale_some_parameters()
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None,
next_sentence_label=None, masked_pos=None, masked_weights=None, task_idx=None, pair_x=None,
pair_x_mask=None, pair_y=None, pair_y_mask=None, pair_r=None, pair_pos_neg_mask=None,
pair_loss_mask=None, masked_pos_2=None, masked_weights_2=None, masked_labels_2=None,
num_tokens_a=None, num_tokens_b=None, mask_qkv=None):
if token_type_ids is None and attention_mask is None:
task_0 = (task_idx == 0)
task_1 = (task_idx == 1)
task_2 = (task_idx == 2)
task_3 = (task_idx == 3)
sequence_length = input_ids.shape[-1]
index_matrix = torch.arange(sequence_length).view(
1, sequence_length).to(input_ids.device)
num_tokens = num_tokens_a + num_tokens_b
base_mask = (index_matrix < num_tokens.view(-1, 1)
).type_as(input_ids)
segment_a_mask = (
index_matrix < num_tokens_a.view(-1, 1)).type_as(input_ids)
token_type_ids = (
task_idx + 1 + task_3.type_as(task_idx)).view(-1, 1) * base_mask
token_type_ids = token_type_ids - segment_a_mask * \
(task_0 | task_3).type_as(segment_a_mask).view(-1, 1)
index_matrix = index_matrix.view(1, 1, sequence_length)
index_matrix_t = index_matrix.view(1, sequence_length, 1)
tril = index_matrix <= index_matrix_t
attention_mask_task_0 = (
index_matrix < num_tokens.view(-1, 1, 1)) & (
index_matrix_t < num_tokens.view(-1, 1, 1))
attention_mask_task_1 = tril & attention_mask_task_0
attention_mask_task_2 = torch.transpose(
tril, dim0=-2, dim1=-1) & attention_mask_task_0
attention_mask_task_3 = (
(index_matrix < num_tokens_a.view(-1, 1, 1)) | tril) & attention_mask_task_0
attention_mask = (attention_mask_task_0 & task_0.view(-1, 1, 1)) | \
(attention_mask_task_1 & task_1.view(-1, 1, 1)) | \
(attention_mask_task_2 & task_2.view(-1, 1, 1)) | \
(attention_mask_task_3 & task_3.view(-1, 1, 1))
attention_mask = attention_mask.type_as(input_ids)
sequence_output, pooled_output = self.bert(
input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False, mask_qkv=mask_qkv,
task_idx=task_idx)
def gather_seq_out_by_pos(seq, pos):
return torch.gather(seq, 1, pos.unsqueeze(2).expand(-1, -1, seq.size(-1)))
def gather_seq_out_by_pos_average(seq, pos, mask):
# pos/mask: (batch, num_pair, max_token_num)
batch_size, max_token_num = pos.size(0), pos.size(-1)
# (batch, num_pair, max_token_num, seq.size(-1))
pos_vec = torch.gather(seq, 1, pos.view(batch_size, -1).unsqueeze(
2).expand(-1, -1, seq.size(-1))).view(batch_size, -1, max_token_num, seq.size(-1))
# (batch, num_pair, seq.size(-1))
mask = mask.type_as(pos_vec)
pos_vec_masked_sum = (
pos_vec * mask.unsqueeze(3).expand_as(pos_vec)).sum(2)
return pos_vec_masked_sum / mask.sum(2, keepdim=True).expand_as(pos_vec_masked_sum)
def loss_mask_and_normalize(loss, mask):
mask = mask.type_as(loss)
loss = loss * mask
denominator = torch.sum(mask) + 1e-5
return (loss / denominator).sum()
if masked_lm_labels is None:
if masked_pos is None:
prediction_scores, seq_relationship_score = self.cls(
sequence_output, pooled_output, task_idx=task_idx)
else:
sequence_output_masked = gather_seq_out_by_pos(
sequence_output, masked_pos)
prediction_scores, seq_relationship_score = self.cls(
sequence_output_masked, pooled_output, task_idx=task_idx)
return prediction_scores, seq_relationship_score
# masked lm
sequence_output_masked = gather_seq_out_by_pos(
sequence_output, masked_pos)
prediction_scores_masked, seq_relationship_score = self.cls(
sequence_output_masked, pooled_output, task_idx=task_idx)
if self.crit_mask_lm_smoothed:
masked_lm_loss = self.crit_mask_lm_smoothed(
F.log_softmax(prediction_scores_masked.float(), dim=-1), masked_lm_labels)
else:
masked_lm_loss = self.crit_mask_lm(
prediction_scores_masked.transpose(1, 2).float(), masked_lm_labels)
masked_lm_loss = loss_mask_and_normalize(
masked_lm_loss.float(), masked_weights)
# next sentence
if self.crit_next_sent is None or next_sentence_label is None:
next_sentence_loss = 0.0
else:
next_sentence_loss = self.crit_next_sent(
seq_relationship_score.view(-1, self.num_labels).float(), next_sentence_label.view(-1))
if self.cls2 is not None and masked_pos_2 is not None:
sequence_output_masked_2 = gather_seq_out_by_pos(
sequence_output, masked_pos_2)
prediction_scores_masked_2, _ = self.cls2(
sequence_output_masked_2, None)
masked_lm_loss_2 = self.crit_mask_lm(
prediction_scores_masked_2.transpose(1, 2).float(), masked_labels_2)
masked_lm_loss_2 = loss_mask_and_normalize(
masked_lm_loss_2.float(), masked_weights_2)
masked_lm_loss = masked_lm_loss + masked_lm_loss_2
if pair_x is None or pair_y is None or pair_r is None or pair_pos_neg_mask is None or pair_loss_mask is None:
return masked_lm_loss, next_sentence_loss
# pair and relation
if pair_x_mask is None or pair_y_mask is None:
pair_x_output_masked = gather_seq_out_by_pos(
sequence_output, pair_x)
pair_y_output_masked = gather_seq_out_by_pos(
sequence_output, pair_y)
else:
pair_x_output_masked = gather_seq_out_by_pos_average(
sequence_output, pair_x, pair_x_mask)
pair_y_output_masked = gather_seq_out_by_pos_average(
sequence_output, pair_y, pair_y_mask)
pair_loss = self.crit_pair_rel(
pair_x_output_masked, pair_y_output_masked, pair_r, pair_pos_neg_mask)
pair_loss = loss_mask_and_normalize(
pair_loss.float(), pair_loss_mask)
return masked_lm_loss, next_sentence_loss, pair_loss
class BertForSeq2SeqFinetuningWithPseudoMask(PreTrainedBertModel):
"""refer to BertForPreTraining"""
def __init__(self, config):
super(BertForSeq2SeqFinetuningWithPseudoMask, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertPreTrainingHeads(
config, self.bert.embeddings.word_embeddings.weight, num_labels=2)
if hasattr(config, 'label_smoothing') and config.label_smoothing:
self.crit_mask_lm_smoothed = LabelSmoothingLoss(
config.label_smoothing, config.vocab_size, ignore_index=0, reduction='none')
self.crit_mask_lm = None
else:
self.crit_mask_lm_smoothed = None
self.crit_mask_lm = nn.CrossEntropyLoss(reduction='none')
@staticmethod
def create_mask(token_ids, num_tokens):
base_position_matrix = torch.arange(
0, token_ids.size(1), dtype=token_ids.dtype, device=token_ids.device).view(1, -1)
return (base_position_matrix < num_tokens.view(-1, 1)).to(token_ids.device).type_as(token_ids)
def create_target_mask(self, target_ids, num_target_tokens):
max_target_len = target_ids.size(1)
target_mask = self.create_mask(target_ids, num_target_tokens)
target_pos_matrix = torch.arange(
0, max_target_len, dtype=target_ids.dtype, device=target_ids.device).view(1, -1)
triangle_attention_mask = \
target_pos_matrix.view(1, max_target_len, 1) >= target_pos_matrix.view(1, 1, max_target_len)
triangle_attention_mask = triangle_attention_mask.type_as(target_mask)
diagonal_attention_mask = \
target_pos_matrix.view(1, max_target_len, 1) == target_pos_matrix.view(1, 1, max_target_len)
diagonal_attention_mask = diagonal_attention_mask.type_as(target_mask)
golden_attention_mask = torch.cat((triangle_attention_mask, torch.zeros_like(triangle_attention_mask)), dim=-1)
pseudo_attention_mask = torch.cat(
(triangle_attention_mask - diagonal_attention_mask, diagonal_attention_mask), dim=-1)
return target_mask, torch.cat((golden_attention_mask, pseudo_attention_mask), dim=1)
def forward(self, source_ids, target_ids, pseudo_ids, num_source_tokens, num_target_tokens,
eval_mode=False, fixed_num_tokens=None):
source_mask = self.create_mask(source_ids, num_source_tokens)
key_history = []
value_history = []
source_sequence_output, pooled_output = self.bert(
source_ids, torch.zeros_like(source_ids), source_mask, output_all_encoded_layers=False,
key_history=key_history, value_history=value_history)
target_mask, extend_target_mask = self.create_target_mask(target_ids, num_target_tokens)
extend_target_mask = extend_target_mask.expand(source_ids.size(0), -1, -1)
mask_matrix = torch.cat(
(source_mask.unsqueeze(1).expand(-1, target_ids.size(1) * 2, -1), extend_target_mask), dim=-1)
target_input_sequence = torch.cat((target_ids, pseudo_ids), dim=-1)
target_segment_ids = torch.ones_like(target_ids)
target_segment_ids = torch.cat((target_segment_ids, target_segment_ids), dim=-1)
target_position_ids = torch.arange(target_ids.size(1), dtype=torch.long, device=target_ids.device)
target_position_ids = target_position_ids.view(1, -1) + num_source_tokens.view(-1, 1)
target_position_ids = torch.cat((target_position_ids, target_position_ids), dim=-1)
target_position_ids = target_position_ids * torch.cat((target_mask, target_mask), dim=-1)
target_sequence_output, target_pooled_output = self.bert(
target_input_sequence, target_segment_ids, mask_matrix, output_all_encoded_layers=False,
key_history=key_history, value_history=value_history, position_ids=target_position_ids)
def loss_mask_and_normalize(loss, mask, fixed_mask_tokens=None):
mask = mask.type_as(loss)
loss = loss * mask
if fixed_mask_tokens:
denominator = fixed_mask_tokens
else:
denominator = torch.sum(mask) + 1e-5
return (loss / denominator).sum()
prediction_scores_masked, seq_relationship_score = self.cls(
target_sequence_output[:, target_ids.size(1):, :], target_pooled_output)
if eval_mode:
return F.softmax(prediction_scores_masked, dim=-1).gather(index=target_ids.unsqueeze(-1), dim=-1).squeeze(
-1), target_mask
if self.crit_mask_lm_smoothed:
masked_lm_loss = self.crit_mask_lm_smoothed(
F.log_softmax(prediction_scores_masked.float(), dim=-1), target_ids)
else:
masked_lm_loss = self.crit_mask_lm(
prediction_scores_masked.transpose(1, 2).float(), target_ids)
pseudo_lm_loss = loss_mask_and_normalize(
masked_lm_loss.float(), target_mask, fixed_mask_tokens=fixed_num_tokens)
return pseudo_lm_loss
class BertForExtractiveSummarization(PreTrainedBertModel):
"""refer to BertForPreTraining"""
def __init__(self, config):
super(BertForExtractiveSummarization, self).__init__(config)
self.bert = BertModel(config)
self.secondary_pred_proj = nn.Embedding(2, config.hidden_size)
self.cls2 = BertPreTrainingHeads(
config, self.secondary_pred_proj.weight, num_labels=2)
self.apply(self.init_bert_weights)
def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_pos_2=None, masked_weights_2=None,
task_idx=None, mask_qkv=None):
sequence_output, pooled_output = self.bert(input_ids, token_type_ids, attention_mask,
output_all_encoded_layers=False, mask_qkv=mask_qkv,
task_idx=task_idx)
def gather_seq_out_by_pos(seq, pos):
return torch.gather(seq, 1, pos.unsqueeze(2).expand(-1, -1, seq.size(-1)))
sequence_output_masked_2 = gather_seq_out_by_pos(
sequence_output, masked_pos_2)
prediction_scores_masked_2, _ = self.cls2(
sequence_output_masked_2, None, task_idx=task_idx)
predicted_probs = torch.nn.functional.softmax(
prediction_scores_masked_2, dim=-1)
return predicted_probs, masked_pos_2, masked_weights_2
class BertForSeq2SeqDecoder(PreTrainedBertModel):
"""refer to BertForPreTraining"""
def __init__(self, config, mask_word_id=0, num_labels=2, num_rel=0,
search_beam_size=1, length_penalty=1.0, eos_id=0, sos_id=0,
forbid_duplicate_ngrams=False, forbid_ignore_set=None, ngram_size=3, min_len=0, mode="s2s",
pos_shift=False):
super(BertForSeq2SeqDecoder, self).__init__(config)
self.bert = BertModelIncr(config)
self.cls = BertPreTrainingHeads(
config, self.bert.embeddings.word_embeddings.weight, num_labels=num_labels)
self.apply(self.init_bert_weights)
self.crit_mask_lm = nn.CrossEntropyLoss(reduction='none')
self.crit_next_sent = nn.CrossEntropyLoss(ignore_index=-1)
self.mask_word_id = mask_word_id
self.num_labels = num_labels
self.num_rel = num_rel
if self.num_rel > 0:
self.crit_pair_rel = BertPreTrainingPairRel(
config, num_rel=num_rel)
self.search_beam_size = search_beam_size
self.length_penalty = length_penalty
self.eos_id = eos_id
self.sos_id = sos_id
self.forbid_duplicate_ngrams = forbid_duplicate_ngrams
self.forbid_ignore_set = forbid_ignore_set
self.ngram_size = ngram_size
self.min_len = min_len
assert mode in ("s2s", "l2r")
self.mode = mode
self.pos_shift = pos_shift
def forward(self, input_ids, token_type_ids, position_ids, attention_mask, task_idx=None, mask_qkv=None):
if self.search_beam_size > 1:
return self.beam_search(input_ids, token_type_ids, position_ids, attention_mask, task_idx=task_idx, mask_qkv=mask_qkv)
input_shape = list(input_ids.size())
batch_size = input_shape[0]
input_length = input_shape[1]
output_shape = list(token_type_ids.size())
output_length = output_shape[1]
output_ids = []
prev_embedding = None
prev_encoded_layers = None
curr_ids = input_ids
mask_ids = input_ids.new(batch_size, 1).fill_(self.mask_word_id)
next_pos = input_length
if self.pos_shift:
sos_ids = input_ids.new(batch_size, 1).fill_(self.sos_id)
while next_pos < output_length:
curr_length = list(curr_ids.size())[1]
if self.pos_shift:
if next_pos == input_length:
x_input_ids = torch.cat((curr_ids, sos_ids), dim=1)
start_pos = 0
else:
x_input_ids = curr_ids
start_pos = next_pos
else:
start_pos = next_pos - curr_length
x_input_ids = torch.cat((curr_ids, mask_ids), dim=1)
curr_token_type_ids = token_type_ids[:, start_pos:next_pos+1]
curr_attention_mask = attention_mask[:,
start_pos:next_pos+1, :next_pos+1]
curr_position_ids = position_ids[:, start_pos:next_pos+1]
new_embedding, new_encoded_layers, _ = \
self.bert(x_input_ids, curr_token_type_ids, curr_position_ids, curr_attention_mask,
output_all_encoded_layers=True, prev_embedding=prev_embedding, prev_encoded_layers=prev_encoded_layers, mask_qkv=mask_qkv)
last_hidden = new_encoded_layers[-1][:, -1:, :]
prediction_scores, _ = self.cls(
last_hidden, None, task_idx=task_idx)
_, max_ids = torch.max(prediction_scores, dim=-1)
output_ids.append(max_ids)
if self.pos_shift:
if prev_embedding is None:
prev_embedding = new_embedding
else:
prev_embedding = torch.cat(
(prev_embedding, new_embedding), dim=1)
if prev_encoded_layers is None:
prev_encoded_layers = [x for x in new_encoded_layers]
else:
prev_encoded_layers = [torch.cat((x[0], x[1]), dim=1) for x in zip(
prev_encoded_layers, new_encoded_layers)]
else:
if prev_embedding is None:
prev_embedding = new_embedding[:, :-1, :]
else:
prev_embedding = torch.cat(
(prev_embedding, new_embedding[:, :-1, :]), dim=1)
if prev_encoded_layers is None:
prev_encoded_layers = [x[:, :-1, :]
for x in new_encoded_layers]
else:
prev_encoded_layers = [torch.cat((x[0], x[1][:, :-1, :]), dim=1)
for x in zip(prev_encoded_layers, new_encoded_layers)]
curr_ids = max_ids
next_pos += 1
return torch.cat(output_ids, dim=1)
def beam_search(self, input_ids, token_type_ids, position_ids, attention_mask, task_idx=None, mask_qkv=None):
input_shape = list(input_ids.size())
batch_size = input_shape[0]
input_length = input_shape[1]
output_shape = list(token_type_ids.size())
output_length = output_shape[1]
output_ids = []
prev_embedding = None
prev_encoded_layers = None
curr_ids = input_ids
mask_ids = input_ids.new(batch_size, 1).fill_(self.mask_word_id)
next_pos = input_length
if self.pos_shift:
sos_ids = input_ids.new(batch_size, 1).fill_(self.sos_id)
K = self.search_beam_size
total_scores = []
beam_masks = []
step_ids = []
step_back_ptrs = []
partial_seqs = []
forbid_word_mask = None
buf_matrix = None
while next_pos < output_length:
curr_length = list(curr_ids.size())[1]
if self.pos_shift:
if next_pos == input_length:
x_input_ids = torch.cat((curr_ids, sos_ids), dim=1)
start_pos = 0
else:
x_input_ids = curr_ids
start_pos = next_pos
else:
start_pos = next_pos - curr_length
x_input_ids = torch.cat((curr_ids, mask_ids), dim=1)
curr_token_type_ids = token_type_ids[:, start_pos:next_pos + 1]
curr_attention_mask = attention_mask[:,
start_pos:next_pos + 1, :next_pos + 1]
curr_position_ids = position_ids[:, start_pos:next_pos + 1]
new_embedding, new_encoded_layers, _ = \
self.bert(x_input_ids, curr_token_type_ids, curr_position_ids, curr_attention_mask,
output_all_encoded_layers=True, prev_embedding=prev_embedding,
prev_encoded_layers=prev_encoded_layers, mask_qkv=mask_qkv)
last_hidden = new_encoded_layers[-1][:, -1:, :]
prediction_scores, _ = self.cls(
last_hidden, None, task_idx=task_idx)
log_scores = torch.nn.functional.log_softmax(
prediction_scores, dim=-1)
if forbid_word_mask is not None:
log_scores += (forbid_word_mask * -10000.0)
if self.min_len and (next_pos - input_length + 1 <= self.min_len):
log_scores[:, :, self.eos_id].fill_(-10000.0)
kk_scores, kk_ids = torch.topk(log_scores, k=K)
if len(total_scores) == 0:
k_ids = torch.reshape(kk_ids, [batch_size, K])
back_ptrs = torch.zeros(batch_size, K, dtype=torch.long)
k_scores = torch.reshape(kk_scores, [batch_size, K])
else:
last_eos = torch.reshape(
beam_masks[-1], [batch_size * K, 1, 1])
last_seq_scores = torch.reshape(
total_scores[-1], [batch_size * K, 1, 1])
kk_scores += last_eos * (-10000.0) + last_seq_scores
kk_scores = torch.reshape(kk_scores, [batch_size, K * K])
k_scores, k_ids = torch.topk(kk_scores, k=K)
back_ptrs = torch.floor_divide(k_ids, K)
kk_ids = torch.reshape(kk_ids, [batch_size, K * K])
k_ids = torch.gather(kk_ids, 1, k_ids)
step_back_ptrs.append(back_ptrs)
step_ids.append(k_ids)
beam_masks.append(torch.eq(k_ids, self.eos_id).type_as(kk_scores))
total_scores.append(k_scores)
def first_expand(x):
input_shape = list(x.size())
expanded_shape = input_shape[:1] + [1] + input_shape[1:]
x = torch.reshape(x, expanded_shape)
repeat_count = [1, K] + [1] * (len(input_shape) - 1)
x = x.repeat(*repeat_count)
x = torch.reshape(x, [input_shape[0] * K] + input_shape[1:])
return x
def select_beam_items(x, ids):
id_shape = list(ids.size())
id_rank = len(id_shape)
assert len(id_shape) == 2
x_shape = list(x.size())
x = torch.reshape(x, [batch_size, K] + x_shape[1:])
x_rank = len(x_shape) + 1
assert x_rank >= 2
if id_rank < x_rank:
ids = torch.reshape(
ids, id_shape + [1] * (x_rank - id_rank))
ids = ids.expand(id_shape + x_shape[1:])
y = torch.gather(x, 1, ids)
y = torch.reshape(y, x_shape)
return y
is_first = (prev_embedding is None)
if self.pos_shift:
if prev_embedding is None:
prev_embedding = first_expand(new_embedding)
else:
prev_embedding = torch.cat(
(prev_embedding, new_embedding), dim=1)
prev_embedding = select_beam_items(
prev_embedding, back_ptrs)
if prev_encoded_layers is None:
prev_encoded_layers = [first_expand(
x) for x in new_encoded_layers]
else:
prev_encoded_layers = [torch.cat((x[0], x[1]), dim=1) for x in zip(
prev_encoded_layers, new_encoded_layers)]
prev_encoded_layers = [select_beam_items(
x, back_ptrs) for x in prev_encoded_layers]
else:
if prev_embedding is None:
prev_embedding = first_expand(new_embedding[:, :-1, :])
else:
prev_embedding = torch.cat(
(prev_embedding, new_embedding[:, :-1, :]), dim=1)
prev_embedding = select_beam_items(
prev_embedding, back_ptrs)
if prev_encoded_layers is None:
prev_encoded_layers = [first_expand(
x[:, :-1, :]) for x in new_encoded_layers]
else:
prev_encoded_layers = [torch.cat((x[0], x[1][:, :-1, :]), dim=1)
for x in zip(prev_encoded_layers, new_encoded_layers)]
prev_encoded_layers = [select_beam_items(
x, back_ptrs) for x in prev_encoded_layers]
curr_ids = torch.reshape(k_ids, [batch_size * K, 1])
if is_first:
token_type_ids = first_expand(token_type_ids)
position_ids = first_expand(position_ids)
attention_mask = first_expand(attention_mask)
mask_ids = first_expand(mask_ids)
if mask_qkv is not None:
mask_qkv = first_expand(mask_qkv)
if self.forbid_duplicate_ngrams:
wids = step_ids[-1].tolist()
ptrs = step_back_ptrs[-1].tolist()
if is_first:
partial_seqs = []
for b in range(batch_size):
for k in range(K):
partial_seqs.append([wids[b][k]])
else:
new_partial_seqs = []
for b in range(batch_size):
for k in range(K):
new_partial_seqs.append(
partial_seqs[ptrs[b][k] + b * K] + [wids[b][k]])
partial_seqs = new_partial_seqs
def get_dup_ngram_candidates(seq, n):
cands = set()
if len(seq) < n:
return []
tail = seq[-(n - 1):]
if self.forbid_ignore_set and any(tk in self.forbid_ignore_set for tk in tail):
return []
for i in range(len(seq) - (n - 1)):
mismatch = False
for j in range(n - 1):
if tail[j] != seq[i + j]:
mismatch = True
break
if (not mismatch) and not (
self.forbid_ignore_set and (seq[i + n - 1] in self.forbid_ignore_set)):
cands.add(seq[i + n - 1])
return list(sorted(cands))
if len(partial_seqs[0]) >= self.ngram_size:
dup_cands = []
for seq in partial_seqs:
dup_cands.append(
get_dup_ngram_candidates(seq, self.ngram_size))
if max(len(x) for x in dup_cands) > 0:
if buf_matrix is None:
vocab_size = list(log_scores.size())[-1]
buf_matrix = np.zeros(
(batch_size * K, vocab_size), dtype=float)
else:
buf_matrix.fill(0)
for bk, cands in enumerate(dup_cands):
for i, wid in enumerate(cands):
buf_matrix[bk, wid] = 1.0
forbid_word_mask = torch.tensor(
buf_matrix, dtype=log_scores.dtype)
forbid_word_mask = torch.reshape(
forbid_word_mask, [batch_size * K, 1, vocab_size]).to(input_ids.device)
else:
forbid_word_mask = None
next_pos += 1
# [(batch, beam)]
total_scores = [x.tolist() for x in total_scores]
step_ids = [x.tolist() for x in step_ids]
step_back_ptrs = [x.tolist() for x in step_back_ptrs]
# back tracking
traces = {'pred_seq': [], 'scores': [], 'wids': [], 'ptrs': []}
for b in range(batch_size):
# [(beam,)]
scores = [x[b] for x in total_scores]
wids_list = [x[b] for x in step_ids]
ptrs = [x[b] for x in step_back_ptrs]
traces['scores'].append(scores)
traces['wids'].append(wids_list)
traces['ptrs'].append(ptrs)
# first we need to find the eos frame where all symbols are eos
# any frames after the eos frame are invalid
last_frame_id = len(scores) - 1
for i, wids in enumerate(wids_list):
if all(wid == self.eos_id for wid in wids):
last_frame_id = i
break
max_score = -math.inf
frame_id = -1
pos_in_frame = -1
for fid in range(last_frame_id + 1):
for i, wid in enumerate(wids_list[fid]):
if wid == self.eos_id or fid == last_frame_id:
s = scores[fid][i]
if self.length_penalty > 0:
s /= math.pow((5 + fid + 1) / 6.0,
self.length_penalty)
if s > max_score:
max_score = s
frame_id = fid
pos_in_frame = i
if frame_id == -1:
traces['pred_seq'].append([0])
else:
seq = [wids_list[frame_id][pos_in_frame]]
for fid in range(frame_id, 0, -1):
pos_in_frame = ptrs[fid][pos_in_frame]
seq.append(wids_list[fid - 1][pos_in_frame])
seq.reverse()
traces['pred_seq'].append(seq)
def _pad_sequence(sequences, max_len, padding_value=0):
trailing_dims = sequences[0].size()[1:]
out_dims = (len(sequences), max_len) + trailing_dims
out_tensor = sequences[0].data.new(*out_dims).fill_(padding_value)
for i, tensor in enumerate(sequences):
length = tensor.size(0)
# use index notation to prevent duplicate references to the tensor
out_tensor[i, :length, ...] = tensor
return out_tensor
# convert to tensors for DataParallel
for k in ('pred_seq', 'scores', 'wids', 'ptrs'):
ts_list = traces[k]
if not isinstance(ts_list[0], torch.Tensor):
dt = torch.float if k == 'scores' else torch.long
ts_list = [torch.tensor(it, dtype=dt) for it in ts_list]
traces[k] = _pad_sequence(
ts_list, output_length, padding_value=0).to(input_ids.device)
return traces
| data2vec_vision-main | s2s-ft/s2s_ft/modeling_decoding.py |
import numpy as np
from random import randint, shuffle, choice
from random import random as rand
import math
import logging
import torch
import torch.utils.data
logger = logging.getLogger(__name__)
def get_random_word(vocab_words):
i = randint(0, len(vocab_words)-1)
return vocab_words[i]
def batch_list_to_batch_tensors(batch):
batch_tensors = []
for x in zip(*batch):
if x[0] is None:
batch_tensors.append(None)
elif isinstance(x[0], torch.Tensor):
batch_tensors.append(torch.stack(x))
else:
batch_tensors.append(torch.tensor(x, dtype=torch.long))
return batch_tensors
def _get_word_split_index(tokens, st, end):
split_idx = []
i = st
while i < end:
if (not tokens[i].startswith('##')) or (i == st):
split_idx.append(i)
i += 1
split_idx.append(end)
return split_idx
def _expand_whole_word(tokens, st, end):
new_st, new_end = st, end
while (new_st >= 0) and tokens[new_st].startswith('##'):
new_st -= 1
while (new_end < len(tokens)) and tokens[new_end].startswith('##'):
new_end += 1
return new_st, new_end
class Pipeline():
""" Pre-process Pipeline Class : callable """
def __init__(self):
super().__init__()
self.skipgram_prb = None
self.skipgram_size = None
self.pre_whole_word = None
self.mask_whole_word = None
self.word_subsample_prb = None
self.sp_prob = None
self.pieces_dir = None
self.vocab_words = None
self.pieces_threshold = 10
self.call_count = 0
self.offline_mode = False
self.skipgram_size_geo_list = None
self.span_same_mask = False
def __call__(self, instance):
raise NotImplementedError
class Preprocess4Seq2seqDecoder(Pipeline):
""" Pre-processing steps for pretraining transformer """
def __init__(self, vocab_words, indexer, max_len=512, max_tgt_length=128,
mode="s2s", pos_shift=False, source_type_id=0, target_type_id=1,
cls_token='[CLS]', sep_token='[SEP]', pad_token='[PAD]'):
super().__init__()
self.max_len = max_len
self.vocab_words = vocab_words # vocabulary (sub)words
self.indexer = indexer # function from token to token index
self.max_len = max_len
self._tril_matrix = torch.tril(torch.ones((max_len, max_len), dtype=torch.long))
self.task_idx = 3 # relax projection layer for different tasks
assert mode in ("s2s", "l2r")
self.mode = mode
self.max_tgt_length = max_tgt_length
self.pos_shift = pos_shift
self.cls_token = cls_token
self.sep_token = sep_token
self.pad_token = pad_token
self.source_type_id = source_type_id
self.target_type_id = target_type_id
self.cc = 0
def __call__(self, instance):
tokens_a, max_a_len = instance
padded_tokens_a = [self.cls_token] + tokens_a + [self.sep_token]
assert len(padded_tokens_a) <= max_a_len + 2
if max_a_len + 2 > len(padded_tokens_a):
padded_tokens_a += [self.pad_token] * \
(max_a_len + 2 - len(padded_tokens_a))
assert len(padded_tokens_a) == max_a_len + 2
max_len_in_batch = min(self.max_tgt_length +
max_a_len + 2, self.max_len)
tokens = padded_tokens_a
segment_ids = [self.source_type_id] * (len(padded_tokens_a)) \
+ [self.target_type_id] * (max_len_in_batch - len(padded_tokens_a))
mask_qkv = None
position_ids = []
for i in range(len(tokens_a) + 2):
position_ids.append(i)
for i in range(len(tokens_a) + 2, max_a_len + 2):
position_ids.append(0)
for i in range(max_a_len + 2, max_len_in_batch):
position_ids.append(i - (max_a_len + 2) + len(tokens_a) + 2)
# Token Indexing
input_ids = self.indexer(tokens)
self.cc += 1
if self.cc < 20:
logger.info("Input src = %s" % " ".join(self.vocab_words[tk_id] for tk_id in input_ids))
# Zero Padding
input_mask = torch.zeros(
max_len_in_batch, max_len_in_batch, dtype=torch.long)
if self.mode == "s2s":
input_mask[:, :len(tokens_a)+2].fill_(1)
else:
st, end = 0, len(tokens_a) + 2
input_mask[st:end, st:end].copy_(
self._tril_matrix[:end, :end])
input_mask[end:, :len(tokens_a)+2].fill_(1)
second_st, second_end = len(padded_tokens_a), max_len_in_batch
input_mask[second_st:second_end, second_st:second_end].copy_(
self._tril_matrix[:second_end-second_st, :second_end-second_st])
return (input_ids, segment_ids, position_ids, input_mask, mask_qkv, self.task_idx)
| data2vec_vision-main | s2s-ft/s2s_ft/s2s_loader.py |
import torch
import logging
from transformers.modeling_utils import cached_path, WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME
logger = logging.getLogger(__name__)
def get_checkpoint_from_transformer_cache(
archive_file, pretrained_model_name_or_path, pretrained_model_archive_map,
cache_dir, force_download, proxies, resume_download,
):
try:
resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir, force_download=force_download,
proxies=proxies, resume_download=resume_download)
except EnvironmentError:
if pretrained_model_name_or_path in pretrained_model_archive_map:
msg = "Couldn't reach server at '{}' to download pretrained weights.".format(
archive_file)
else:
msg = "Model name '{}' was not found in model name list ({}). " \
"We assumed '{}' was a path or url to model weight files named one of {} but " \
"couldn't find any such file at this path or url.".format(
pretrained_model_name_or_path,
', '.join(pretrained_model_archive_map.keys()),
archive_file,
[WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME])
raise EnvironmentError(msg)
if resolved_archive_file == archive_file:
logger.info("loading weights file {}".format(archive_file))
else:
logger.info("loading weights file {} from cache at {}".format(
archive_file, resolved_archive_file))
return torch.load(resolved_archive_file, map_location='cpu')
def hf_roberta_to_hf_bert(state_dict):
logger.info(" * Convert Huggingface RoBERTa format to Huggingface BERT format * ")
new_state_dict = {}
for key in state_dict:
value = state_dict[key]
if key == 'roberta.embeddings.position_embeddings.weight':
value = value[2:]
if key == 'roberta.embeddings.token_type_embeddings.weight':
continue
if key.startswith('roberta'):
key = 'bert.' + key[8:]
elif key.startswith('lm_head'):
if 'layer_norm' in key or 'dense' in key:
key = 'cls.predictions.transform.' + key[8:]
else:
key = 'cls.predictions.' + key[8:]
key = key.replace('layer_norm', 'LayerNorm')
new_state_dict[key] = value
return new_state_dict
def hf_distilbert_to_hf_bert(state_dict):
logger.info(" * Convert Huggingface DistilBERT format to Huggingface BERT format * ")
new_state_dict = {}
for key in state_dict:
value = state_dict[key]
if key == 'roberta.embeddings.position_embeddings.weight':
value = value[2:]
if key == 'roberta.embeddings.token_type_embeddings.weight':
continue
if key.startswith('roberta'):
key = 'bert.' + key[8:]
elif key.startswith('lm_head'):
if 'layer_norm' in key or 'dense' in key:
key = 'cls.predictions.transform.' + key[8:]
else:
key = 'cls.predictions.' + key[8:]
key = key.replace('layer_norm', 'LayerNorm')
new_state_dict[key] = value
return new_state_dict
def hf_bert_to_hf_bert(state_dict):
# keep no change
return state_dict
state_dict_convert = {
'bert': hf_bert_to_hf_bert,
'unilm': hf_bert_to_hf_bert,
'minilm': hf_bert_to_hf_bert,
'roberta': hf_roberta_to_hf_bert,
'xlm-roberta': hf_roberta_to_hf_bert,
'distilbert': hf_distilbert_to_hf_bert,
}
| data2vec_vision-main | s2s-ft/s2s_ft/convert_state_dict.py |
# coding=utf-8
# The MIT License (MIT)
# Copyright (c) Microsoft Corporation
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Tokenization classes for UniLM."""
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import logging
import os
import unicodedata
from io import open
from transformers.tokenization_bert import BertTokenizer, whitespace_tokenize
logger = logging.getLogger(__name__)
VOCAB_FILES_NAMES = {'vocab_file': 'vocab.txt'}
PRETRAINED_VOCAB_FILES_MAP = {
'vocab_file':
{
'unilm-large-cased': "https://unilm.blob.core.windows.net/ckpt/unilm-large-cased-vocab.txt",
'unilm-base-cased': "https://unilm.blob.core.windows.net/ckpt/unilm-base-cased-vocab.txt",
'unilm1-large-cased': "https://unilm.blob.core.windows.net/ckpt/unilm1-large-cased-vocab.txt",
'unilm1-base-cased': "https://unilm.blob.core.windows.net/ckpt/unilm1-base-cased-vocab.txt",
'unilm1.2-base-uncased': "https://unilm.blob.core.windows.net/ckpt/unilm1.2-base-uncased-vocab.txt"
}
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
'unilm-large-cased': 512,
'unilm-base-cased': 512,
'unilm1-large-cased': 512,
'unilm1-base-cased': 512,
'unilm1.2-base-uncased': 512,
}
class UnilmTokenizer(BertTokenizer):
r"""
Constructs a UnilmTokenizer.
:class:`~transformers.UnilmTokenizer` is identical to BertTokenizer and runs end-to-end tokenization: punctuation splitting + wordpiece
Args:
vocab_file: Path to a one-wordpiece-per-line vocabulary file
do_lower_case: Whether to lower case the input. Only has an effect when do_wordpiece_only=False
do_basic_tokenize: Whether to do basic tokenization before wordpiece.
max_len: An artificial maximum length to truncate tokenized sequences to; Effective maximum length is always the
minimum of this value (if specified) and the underlying BERT model's sequence length.
never_split: List of tokens which will never be split during tokenization. Only has an effect when
do_wordpiece_only=False
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
class WhitespaceTokenizer(object):
def tokenize(self, text):
return whitespace_tokenize(text)
| data2vec_vision-main | s2s-ft/s2s_ft/tokenization_unilm.py |
# coding=utf-8
# The MIT License (MIT)
# Copyright (c) Microsoft Corporation
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
""" UniLM model configuration """
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import logging
import sys
from io import open
from transformers.configuration_utils import PretrainedConfig
logger = logging.getLogger(__name__)
UNILM_PRETRAINED_CONFIG_ARCHIVE_MAP = {
'unilm-large-cased': "https://unilm.blob.core.windows.net/ckpt/unilm-large-cased-config.json",
'unilm-base-cased': "https://unilm.blob.core.windows.net/ckpt/unilm-base-cased-config.json",
'unilm1-large-cased': "https://unilm.blob.core.windows.net/ckpt/unilm1-large-cased-config.json",
'unilm1-base-cased': "https://unilm.blob.core.windows.net/ckpt/unilm1-base-cased-config.json",
'unilm1.2-base-uncased': "https://unilm.blob.core.windows.net/ckpt/unilm1.2-base-uncased-config.json",
}
class UnilmConfig(PretrainedConfig):
r"""
:class:`~transformers.UnilmConfig` is the configuration class to store the configuration of a
`UnilmModel`.
Arguments:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `UnilmModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler. If string, "gelu", "relu", "swish" and "gelu_new" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`UnilmModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
layer_norm_eps: The epsilon used by LayerNorm.
"""
pretrained_config_archive_map = UNILM_PRETRAINED_CONFIG_ARCHIVE_MAP
def __init__(self,
vocab_size=28996,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=6,
initializer_range=0.02,
layer_norm_eps=1e-12,
source_type_id=0,
target_type_id=1,
**kwargs):
super(UnilmConfig, self).__init__(**kwargs)
if isinstance(vocab_size, str) or (sys.version_info[0] == 2
and isinstance(vocab_size, unicode)):
with open(vocab_size, "r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size, int):
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.source_type_id = source_type_id
self.target_type_id = target_type_id
else:
raise ValueError("First argument must be either a vocabulary size (int)"
" or the path to a pretrained model config file (str)")
| data2vec_vision-main | s2s-ft/s2s_ft/configuration_unilm.py |
from __future__ import absolute_import, division, print_function
import logging
import os
import json
import random
import glob
import torch
import tqdm
import torch.utils.data
logger = logging.getLogger(__name__)
class Seq2seqDatasetForBert(torch.utils.data.Dataset):
def __init__(
self, features, max_source_len, max_target_len,
vocab_size, cls_id, sep_id, pad_id, mask_id,
random_prob, keep_prob, offset, num_training_instances,
span_len=1, span_prob=1.0):
self.features = features
self.max_source_len = max_source_len
self.max_target_len = max_target_len
self.offset = offset
if offset > 0:
logger.info(" **** Set offset %d in Seq2seqDatasetForBert **** ", offset)
self.cls_id = cls_id
self.sep_id = sep_id
self.pad_id = pad_id
self.random_prob = random_prob
self.keep_prob = keep_prob
self.mask_id = mask_id
self.vocab_size = vocab_size
self.num_training_instances = num_training_instances
self.span_len = span_len
self.span_prob = span_prob
def __len__(self):
return int(self.num_training_instances)
def __trunk(self, ids, max_len):
if len(ids) > max_len - 1:
ids = ids[:max_len - 1]
ids = ids + [self.sep_id]
return ids
def __pad(self, ids, max_len):
if len(ids) < max_len:
return ids + [self.pad_id] * (max_len - len(ids))
else:
assert len(ids) == max_len
return ids
def __getitem__(self, idx):
idx = (self.offset + idx) % len(self.features)
feature = self.features[idx]
source_ids = self.__trunk([self.cls_id] + feature["source_ids"], self.max_source_len)
target_ids = self.__trunk(feature["target_ids"], self.max_target_len)
pseudo_ids = []
for tk_id in target_ids:
p = random.random()
if p < self.keep_prob:
pseudo_ids.append(tk_id)
elif p < self.keep_prob + self.random_prob:
pseudo_ids.append(random.randint(0, self.vocab_size - 1))
else:
pseudo_ids.append(self.mask_id)
num_source_tokens = len(source_ids)
num_target_tokens = len(target_ids)
source_ids = self.__pad(source_ids, self.max_source_len)
target_ids = self.__pad(target_ids, self.max_target_len)
pseudo_ids = self.__pad(pseudo_ids, self.max_target_len)
if self.span_len > 1:
span_ids = []
span_id = 1
while len(span_ids) < num_target_tokens:
p = random.random()
if p < self.span_prob:
span_len = random.randint(2, self.span_len)
span_len = min(span_len, num_target_tokens - len(span_ids))
else:
span_len = 1
span_ids.extend([span_id] * span_len)
span_id += 1
span_ids = self.__pad(span_ids, self.max_target_len)
return source_ids, target_ids, pseudo_ids, num_source_tokens, num_target_tokens, span_ids
else:
return source_ids, target_ids, pseudo_ids, num_source_tokens, num_target_tokens
def batch_list_to_batch_tensors(batch):
batch_tensors = []
for x in zip(*batch):
if isinstance(x[0], torch.Tensor):
batch_tensors.append(torch.stack(x))
else:
batch_tensors.append(torch.tensor(x, dtype=torch.long))
return batch_tensors
def get_max_epoch_model(output_dir):
fn_model_list = glob.glob(os.path.join(output_dir, "model.*.bin"))
fn_optim_list = glob.glob(os.path.join(output_dir, "optim.*.bin"))
if (not fn_model_list) or (not fn_optim_list):
return None
os.path.basename(output_dir)
both_set = set([int(os.path.basename(fn).split('.')[1]) for fn in fn_model_list]
) & set([int(os.path.basename(fn).split('.')[1]) for fn in fn_optim_list])
if both_set:
return max(both_set)
else:
return None
def load_and_cache_examples(
example_file, tokenizer, local_rank, cached_features_file, shuffle=True):
# Make sure only the first process in distributed training process the dataset, and the others will use the cache
if local_rank not in [-1, 0]:
torch.distributed.barrier()
if cached_features_file is not None and os.path.exists(cached_features_file):
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", example_file)
examples = []
with open(example_file, mode="r", encoding="utf-8") as reader:
for line in reader:
examples.append(json.loads(line))
features = []
for example in tqdm.tqdm(examples):
if isinstance(example["src"], list):
source_tokens = example["src"]
target_tokens = example["tgt"]
else:
source_tokens = tokenizer.tokenize(example["src"])
target_tokens = tokenizer.tokenize(example["tgt"])
features.append({
"source_ids": tokenizer.convert_tokens_to_ids(source_tokens),
"target_ids": tokenizer.convert_tokens_to_ids(target_tokens),
})
if shuffle:
random.shuffle(features)
if local_rank in [-1, 0] and cached_features_file is not None:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
# Make sure only the first process in distributed training process the dataset, and the others will use the cache
if local_rank == 0:
torch.distributed.barrier()
return features
| data2vec_vision-main | s2s-ft/s2s_ft/utils.py |
# coding=utf-8
# The MIT License (MIT)
# Copyright (c) Microsoft Corporation
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Tokenization classes for MiniLM."""
from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import logging
import os
import unicodedata
from io import open
from transformers.tokenization_bert import BertTokenizer, whitespace_tokenize
logger = logging.getLogger(__name__)
VOCAB_FILES_NAMES = {'vocab_file': 'vocab.txt'}
PRETRAINED_VOCAB_FILES_MAP = {
'vocab_file':
{
'minilm-l12-h384-uncased': "https://unilm.blob.core.windows.net/ckpt/minilm-l12-h384-uncased-vocab.txt",
}
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
'minilm-l12-h384-uncased': 512,
}
class MinilmTokenizer(BertTokenizer):
r"""
Constructs a MinilmTokenizer.
:class:`~transformers.MinilmTokenizer` is identical to BertTokenizer and runs end-to-end tokenization: punctuation splitting + wordpiece
Args:
vocab_file: Path to a one-wordpiece-per-line vocabulary file
do_lower_case: Whether to lower case the input. Only has an effect when do_wordpiece_only=False
do_basic_tokenize: Whether to do basic tokenization before wordpiece.
max_len: An artificial maximum length to truncate tokenized sequences to; Effective maximum length is always the
minimum of this value (if specified) and the underlying BERT model's sequence length.
never_split: List of tokens which will never be split during tokenization. Only has an effect when
do_wordpiece_only=False
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
class WhitespaceTokenizer(object):
def tokenize(self, text):
return whitespace_tokenize(text)
| data2vec_vision-main | s2s-ft/s2s_ft/tokenization_minilm.py |
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import math
import os
import torch
from torch import nn
from torch.nn.modules.loss import _Loss
import torch.nn.functional as F
from transformers.modeling_bert import \
BertPreTrainedModel, BertSelfOutput, BertIntermediate, BertOutput, BertPredictionHeadTransform
from transformers.modeling_roberta import ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
from transformers.modeling_bert import BERT_PRETRAINED_MODEL_ARCHIVE_MAP
from transformers.modeling_distilbert import DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP
from transformers.modeling_xlm_roberta import XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
from s2s_ft.config import BertForSeq2SeqConfig
from s2s_ft.convert_state_dict import get_checkpoint_from_transformer_cache, state_dict_convert
logger = logging.getLogger(__name__)
BertLayerNorm = torch.nn.LayerNorm
UNILM_PRETRAINED_MODEL_ARCHIVE_MAP = {
'unilm-base-cased': "https://unilm.blob.core.windows.net/ckpt/unilm1-base-cased.bin",
'unilm-large-cased': "https://unilm.blob.core.windows.net/ckpt/unilm1-large-cased.bin",
'unilm1-base-cased': "https://unilm.blob.core.windows.net/ckpt/unilm1-base-cased.bin",
'unilm1-large-cased': "https://unilm.blob.core.windows.net/ckpt/unilm1-large-cased.bin",
'unilm1.2-base-uncased': "https://unilm.blob.core.windows.net/ckpt/unilm1.2-base-uncased.bin"
}
MINILM_PRETRAINED_MODEL_ARCHIVE_MAP = {
'minilm-l12-h384-uncased': "https://unilm.blob.core.windows.net/ckpt/minilm-l12-h384-uncased.bin",
}
class BertPreTrainedForSeq2SeqModel(BertPreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
config_class = BertForSeq2SeqConfig
supported_convert_pretrained_model_archive_map = {
"bert": BERT_PRETRAINED_MODEL_ARCHIVE_MAP,
"roberta": ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP,
"xlm-roberta": XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP,
"unilm": UNILM_PRETRAINED_MODEL_ARCHIVE_MAP,
"minilm": MINILM_PRETRAINED_MODEL_ARCHIVE_MAP,
}
base_model_prefix = "bert_for_seq2seq"
pretrained_model_archive_map = {
**ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP,
**XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP,
**BERT_PRETRAINED_MODEL_ARCHIVE_MAP,
**UNILM_PRETRAINED_MODEL_ARCHIVE_MAP,
**MINILM_PRETRAINED_MODEL_ARCHIVE_MAP,
}
def _init_weights(self, module):
""" Initialize the weights """
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, reuse_position_embedding=None, *model_args, **kwargs):
model_type = kwargs.pop('model_type', None)
if model_type is not None and "state_dict" not in kwargs:
if model_type in cls.supported_convert_pretrained_model_archive_map:
pretrained_model_archive_map = cls.supported_convert_pretrained_model_archive_map[model_type]
if pretrained_model_name_or_path in pretrained_model_archive_map:
state_dict = get_checkpoint_from_transformer_cache(
archive_file=pretrained_model_archive_map[pretrained_model_name_or_path],
pretrained_model_name_or_path=pretrained_model_name_or_path,
pretrained_model_archive_map=pretrained_model_archive_map,
cache_dir=kwargs.get("cache_dir", None), force_download=kwargs.get("force_download", None),
proxies=kwargs.get("proxies", None), resume_download=kwargs.get("resume_download", None),
)
state_dict = state_dict_convert[model_type](state_dict)
kwargs["state_dict"] = state_dict
elif os.path.isfile(pretrained_model_name_or_path):
kwargs["state_dict"] = torch.load(pretrained_model_name_or_path, map_location='cpu')
if kwargs["state_dict"] is None:
logger.info("s2s-ft does't support the model !")
raise NotImplementedError()
config = kwargs["config"]
state_dict = kwargs["state_dict"]
# initialize new position embeddings (From Microsoft/UniLM)
_k = 'bert.embeddings.position_embeddings.weight'
# if _k in state_dict and config.max_position_embeddings != state_dict[_k].shape[0]:
# logger.info("config.max_position_embeddings != state_dict[bert.embeddings.position_embeddings.weight] ({0} - {1})".format(
# config.max_position_embeddings, state_dict[_k].shape[0]))
# if config.max_position_embeddings > state_dict[_k].shape[0]:
# old_size = state_dict[_k].shape[0]
# # state_dict[_k].data = state_dict[_k].data.resize_(config.max_position_embeddings, state_dict[_k].shape[1])
# state_dict[_k].resize_(
# config.max_position_embeddings, state_dict[_k].shape[1])
# start = old_size
# while start < config.max_position_embeddings:
# chunk_size = min(
# old_size, config.max_position_embeddings - start)
# state_dict[_k].data[start:start+chunk_size,
# :].copy_(state_dict[_k].data[:chunk_size, :])
# start += chunk_size
# elif config.max_position_embeddings < state_dict[_k].shape[0]:
# state_dict[_k].data = state_dict[_k].data[:config.max_position_embeddings, :]
_k = 'bert.embeddings.position_embeddings.weight'
if _k in state_dict:
if config.max_position_embeddings > state_dict[_k].shape[0]:
logger.info("Resize > position embeddings !")
old_vocab_size = state_dict[_k].shape[0]
new_postion_embedding = state_dict[_k].data.new_tensor(torch.ones(
size=(config.max_position_embeddings, state_dict[_k].shape[1])), dtype=torch.float)
new_postion_embedding = nn.Parameter(data=new_postion_embedding, requires_grad=True)
new_postion_embedding.data.normal_(mean=0.0, std=config.initializer_range)
max_range = config.max_position_embeddings if reuse_position_embedding else old_vocab_size
shift = 0
while shift < max_range:
delta = min(old_vocab_size, max_range - shift)
new_postion_embedding.data[shift: shift + delta, :] = state_dict[_k][:delta, :]
logger.info(" CP [%d ~ %d] into [%d ~ %d] " % (0, delta, shift, shift + delta))
shift += delta
state_dict[_k] = new_postion_embedding.data
del new_postion_embedding
elif config.max_position_embeddings < state_dict[_k].shape[0]:
logger.info("Resize < position embeddings !")
old_vocab_size = state_dict[_k].shape[0]
new_postion_embedding = state_dict[_k].data.new_tensor(torch.ones(
size=(config.max_position_embeddings, state_dict[_k].shape[1])), dtype=torch.float)
new_postion_embedding = nn.Parameter(data=new_postion_embedding, requires_grad=True)
new_postion_embedding.data.normal_(mean=0.0, std=config.initializer_range)
new_postion_embedding.data.copy_(state_dict[_k][:config.max_position_embeddings, :])
state_dict[_k] = new_postion_embedding.data
del new_postion_embedding
return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(BertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
if config.type_vocab_size > 0:
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
else:
self.token_type_embeddings = None
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
device = input_ids.device if input_ids is not None else inputs_embeds.device
if position_ids is None:
position_ids = torch.arange(seq_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).expand(input_shape)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
embeddings = inputs_embeds + position_embeddings
if self.token_type_embeddings:
embeddings = embeddings + self.token_type_embeddings(token_type_ids)
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config):
super(BertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.output_attentions = config.output_attentions
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def multi_head_attention(self, query, key, value, attention_mask):
query_layer = self.transpose_for_scores(query)
key_layer = self.transpose_for_scores(key)
value_layer = self.transpose_for_scores(value)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
return (context_layer, attention_probs) if self.output_attentions else (context_layer,)
def forward(self, hidden_states, attention_mask=None, encoder_hidden_states=None, split_lengths=None):
mixed_query_layer = self.query(hidden_states)
if split_lengths:
assert not self.output_attentions
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
if encoder_hidden_states is not None:
mixed_key_layer = self.key(encoder_hidden_states)
mixed_value_layer = self.value(encoder_hidden_states)
else:
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
if split_lengths:
query_parts = torch.split(mixed_query_layer, split_lengths, dim=1)
key_parts = torch.split(mixed_key_layer, split_lengths, dim=1)
value_parts = torch.split(mixed_value_layer, split_lengths, dim=1)
key = None
value = None
outputs = []
sum_length = 0
for (query, _key, _value, part_length) in zip(query_parts, key_parts, value_parts, split_lengths):
key = _key if key is None else torch.cat((key, _key), dim=1)
value = _value if value is None else torch.cat((value, _value), dim=1)
sum_length += part_length
outputs.append(self.multi_head_attention(
query, key, value, attention_mask[:, :, sum_length - part_length: sum_length, :sum_length]
)[0])
outputs = (torch.cat(outputs, dim=1), )
else:
outputs = self.multi_head_attention(
mixed_query_layer, mixed_key_layer, mixed_value_layer, attention_mask)
return outputs
class BertAttention(nn.Module):
def __init__(self, config):
super(BertAttention, self).__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(self, hidden_states, attention_mask=None, encoder_hidden_states=None, split_lengths=None):
self_outputs = self.self(
hidden_states, attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states, split_lengths=split_lengths)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class BertLayer(nn.Module):
def __init__(self, config):
super(BertLayer, self).__init__()
self.attention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(self, hidden_states, attention_mask=None, split_lengths=None):
self_attention_outputs = self.attention(
hidden_states, attention_mask, split_lengths=split_lengths)
attention_output = self_attention_outputs[0]
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
outputs = (layer_output,) + self_attention_outputs[1:]
return outputs
class BertEncoder(nn.Module):
def __init__(self, config):
super(BertEncoder, self).__init__()
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])
def forward(self, hidden_states, attention_mask=None, split_lengths=None):
all_hidden_states = ()
all_attentions = ()
for i, layer_module in enumerate(self.layer):
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(hidden_states, attention_mask, split_lengths=split_lengths)
hidden_states = layer_outputs[0]
if self.output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
# Add last layer
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
outputs = outputs + (all_attentions,)
return outputs # last-layer hidden state, (all hidden states), (all attentions)
class BertModel(BertPreTrainedForSeq2SeqModel):
r"""
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``
Sequence of hidden-states at the output of the last layer of the model.
**pooler_output**: ``torch.FloatTensor`` of shape ``(batch_size, hidden_size)``
Last layer hidden-state of the first token of the sequence (classification token)
further processed by a Linear layer and a Tanh activation function. The Linear
layer weights are trained from the next sentence prediction (classification)
objective during Bert pretraining. This output is usually *not* a good summary
of the semantic content of the input, you're often better with averaging or pooling
the sequence of hidden-states for the whole input sequence.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
def __init__(self, config):
super(BertModel, self).__init__(config)
self.config = config
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
def forward(self, input_ids=None, attention_mask=None, token_type_ids=None,
position_ids=None, inputs_embeds=None, split_lengths=None):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
# Provided a padding mask of dimensions [batch_size, seq_length]
# - if the model is a decoder, apply a causal mask in addition to the padding mask
# - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
if attention_mask.dim() == 2:
extended_attention_mask = attention_mask[:, None, None, :]
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output = self.embeddings(
input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds)
encoder_outputs = self.encoder(
embedding_output, attention_mask=extended_attention_mask, split_lengths=split_lengths)
sequence_output = encoder_outputs[0]
outputs = (sequence_output, ) + encoder_outputs[1:] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions)
class LabelSmoothingLoss(_Loss):
"""
With label smoothing,
KL-divergence between q_{smoothed ground truth prob.}(w)
and p_{prob. computed by model}(w) is minimized.
"""
def __init__(self, label_smoothing=0, tgt_vocab_size=0, ignore_index=0, size_average=None, reduce=None, reduction='mean'):
assert 0.0 < label_smoothing <= 1.0
self.ignore_index = ignore_index
super(LabelSmoothingLoss, self).__init__(
size_average=size_average, reduce=reduce, reduction=reduction)
assert label_smoothing > 0
assert tgt_vocab_size > 0
smoothing_value = label_smoothing / (tgt_vocab_size - 2)
one_hot = torch.full((tgt_vocab_size,), smoothing_value)
one_hot[self.ignore_index] = 0
self.register_buffer('one_hot', one_hot.unsqueeze(0))
self.confidence = 1.0 - label_smoothing
self.tgt_vocab_size = tgt_vocab_size
def forward(self, output, target):
"""
output (FloatTensor): batch_size * num_pos * n_classes
target (LongTensor): batch_size * num_pos
"""
assert self.tgt_vocab_size == output.size(2)
batch_size, num_pos = target.size(0), target.size(1)
output = output.view(-1, self.tgt_vocab_size)
target = target.view(-1)
model_prob = self.one_hot.float().repeat(target.size(0), 1)
model_prob.scatter_(1, target.unsqueeze(1), self.confidence)
model_prob.masked_fill_((target == self.ignore_index).unsqueeze(1), 0)
return F.kl_div(output, model_prob, reduction='none').view(batch_size, num_pos, -1).sum(2)
class BertLMPredictionHead(nn.Module):
def __init__(self, config, decoder_weight):
super(BertLMPredictionHead, self).__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder_weight = decoder_weight
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = F.linear(hidden_states, weight=self.decoder_weight, bias=self.bias)
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config, decoder_weight):
super(BertOnlyMLMHead, self).__init__()
self.predictions = BertLMPredictionHead(config, decoder_weight)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertForSequenceToSequence(BertPreTrainedForSeq2SeqModel):
def __init__(self, config):
super(BertForSequenceToSequence, self).__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyMLMHead(config, self.bert.embeddings.word_embeddings.weight)
self.init_weights()
self.log_softmax = nn.LogSoftmax()
# setattr(config, 'label_smoothing', 0.1)
self.source_type_id = config.source_type_id
self.target_type_id = config.target_type_id
if config.label_smoothing > 0:
self.crit_mask_lm_smoothed = LabelSmoothingLoss(
config.label_smoothing, config.vocab_size, ignore_index=0, reduction='none')
self.crit_mask_lm = None
else:
self.crit_mask_lm_smoothed = None
self.crit_mask_lm = nn.CrossEntropyLoss(reduction='none')
@staticmethod
def create_mask_and_position_ids(num_tokens, max_len, offset=None):
base_position_matrix = torch.arange(
0, max_len, dtype=num_tokens.dtype, device=num_tokens.device).view(1, -1)
mask = (base_position_matrix < num_tokens.view(-1, 1)).type_as(num_tokens)
if offset is not None:
base_position_matrix = base_position_matrix + offset.view(-1, 1)
position_ids = base_position_matrix * mask
return mask, position_ids
@staticmethod
def create_attention_mask(source_mask, target_mask, source_position_ids, target_span_ids):
weight = torch.cat((torch.zeros_like(source_position_ids), target_span_ids, -target_span_ids), dim=1)
from_weight = weight.unsqueeze(-1)
to_weight = weight.unsqueeze(1)
true_tokens = (0 <= to_weight) & (torch.cat((source_mask, target_mask, target_mask), dim=1) == 1).unsqueeze(1)
true_tokens_mask = (from_weight >= 0) & true_tokens & (to_weight <= from_weight)
pseudo_tokens_mask = (from_weight < 0) & true_tokens & (-to_weight > from_weight)
pseudo_tokens_mask = pseudo_tokens_mask | ((from_weight < 0) & (to_weight == from_weight))
return (true_tokens_mask | pseudo_tokens_mask).type_as(source_mask)
def forward(self, source_ids, target_ids, pseudo_ids, num_source_tokens, num_target_tokens, target_span_ids=None):
source_len = source_ids.size(1)
target_len = target_ids.size(1)
pseudo_len = pseudo_ids.size(1)
assert target_len == pseudo_len
assert source_len > 0 and target_len > 0
split_lengths = (source_len, target_len, pseudo_len)
input_ids = torch.cat((source_ids, target_ids, pseudo_ids), dim=1)
token_type_ids = torch.cat(
(torch.ones_like(source_ids) * self.source_type_id,
torch.ones_like(target_ids) * self.target_type_id,
torch.ones_like(pseudo_ids) * self.target_type_id), dim=1)
source_mask, source_position_ids = \
self.create_mask_and_position_ids(num_source_tokens, source_len)
target_mask, target_position_ids = \
self.create_mask_and_position_ids(num_target_tokens, target_len, offset=num_source_tokens)
position_ids = torch.cat((source_position_ids, target_position_ids, target_position_ids), dim=1)
if target_span_ids is None:
target_span_ids = target_position_ids
attention_mask = self.create_attention_mask(source_mask, target_mask, source_position_ids, target_span_ids)
outputs = self.bert(
input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids,
position_ids=position_ids, split_lengths=split_lengths)
sequence_output = outputs[0]
pseudo_sequence_output = sequence_output[:, source_len + target_len:, ]
def loss_mask_and_normalize(loss, mask):
mask = mask.type_as(loss)
loss = loss * mask
denominator = torch.sum(mask) + 1e-5
return (loss / denominator).sum()
prediction_scores_masked = self.cls(pseudo_sequence_output)
if self.crit_mask_lm_smoothed:
masked_lm_loss = self.crit_mask_lm_smoothed(
F.log_softmax(prediction_scores_masked.float(), dim=-1), target_ids)
else:
masked_lm_loss = self.crit_mask_lm(
prediction_scores_masked.transpose(1, 2).float(), target_ids)
pseudo_lm_loss = loss_mask_and_normalize(
masked_lm_loss.float(), target_mask)
return pseudo_lm_loss
| data2vec_vision-main | s2s-ft/s2s_ft/modeling.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm, DINO and DeiT code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit/
# https://github.com/facebookresearch/dino
# --------------------------------------------------------'
# Copyright (c) Meta Platforms, Inc. and affiliates
import math
import sys
from typing import Iterable, Optional
import torch
from timm.data import Mixup
from timm.utils import accuracy, ModelEma
import utils
def train_class_batch(model, samples, target, criterion, bool_masked_pos=None):
outputs = model(samples, bool_masked_pos=bool_masked_pos)
loss = criterion(outputs, target)
return loss, outputs
def get_loss_scale_for_deepspeed(model):
optimizer = model.optimizer
return optimizer.loss_scale if hasattr(optimizer, "loss_scale") else optimizer.cur_scale
def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module,
data_loader: Iterable, optimizer: torch.optim.Optimizer,
device: torch.device, epoch: int, loss_scaler, max_norm: float = 0,
model_ema: Optional[ModelEma] = None, mixup_fn: Optional[Mixup] = None, log_writer=None,
start_steps=None, lr_schedule_values=None, wd_schedule_values=None,
num_training_steps_per_epoch=None, update_freq=None, masked_position_generator=None):
model.train(True)
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
metric_logger.add_meter('min_lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 10
if loss_scaler is None:
model.zero_grad()
model.micro_steps = 0
else:
optimizer.zero_grad()
for data_iter_step, (samples, targets) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
step = data_iter_step // update_freq
if step >= num_training_steps_per_epoch:
continue
it = start_steps + step # global training iteration
# Update LR & WD for the first acc
if lr_schedule_values is not None or wd_schedule_values is not None and data_iter_step % update_freq == 0:
for i, param_group in enumerate(optimizer.param_groups):
if lr_schedule_values is not None:
param_group["lr"] = lr_schedule_values[it] * param_group["lr_scale"]
if wd_schedule_values is not None and param_group["weight_decay"] > 0:
param_group["weight_decay"] = wd_schedule_values[it]
bool_masked_pos = None
if masked_position_generator is not None:
bool_masked_pos = torch.tensor([masked_position_generator() for _ in range(samples.size(0))], device=device)
samples = samples.to(device, non_blocking=True)
targets = targets.to(device, non_blocking=True)
if mixup_fn is not None:
samples, targets = mixup_fn(samples, targets)
if loss_scaler is None:
samples = samples.half()
loss, output = train_class_batch(
model, samples, targets, criterion, bool_masked_pos)
else:
with torch.cuda.amp.autocast():
loss, output = train_class_batch(
model, samples, targets, criterion, bool_masked_pos)
loss_value = loss.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value), force=True)
sys.exit(1)
if loss_scaler is None:
loss /= update_freq
model.backward(loss)
model.step()
if (data_iter_step + 1) % update_freq == 0:
# model.zero_grad()
# Deepspeed will call step() & model.zero_grad() automatic
if model_ema is not None:
model_ema.update(model)
grad_norm = None
loss_scale_value = get_loss_scale_for_deepspeed(model)
else:
# this attribute is added by timm on one optimizer (adahessian)
is_second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order
loss /= update_freq
grad_norm = loss_scaler(loss, optimizer, clip_grad=max_norm,
parameters=model.parameters(), create_graph=is_second_order,
update_grad=(data_iter_step + 1) % update_freq == 0)
if (data_iter_step + 1) % update_freq == 0:
optimizer.zero_grad()
if model_ema is not None:
model_ema.update(model)
loss_scale_value = loss_scaler.state_dict()["scale"]
torch.cuda.synchronize()
if mixup_fn is None:
class_acc = (output.max(-1)[-1] == targets).float().mean()
else:
class_acc = None
metric_logger.update(loss=loss_value)
metric_logger.update(class_acc=class_acc)
metric_logger.update(loss_scale=loss_scale_value)
min_lr = 10.
max_lr = 0.
for group in optimizer.param_groups:
min_lr = min(min_lr, group["lr"])
max_lr = max(max_lr, group["lr"])
metric_logger.update(lr=max_lr)
metric_logger.update(min_lr=min_lr)
weight_decay_value = None
for group in optimizer.param_groups:
if group["weight_decay"] > 0:
weight_decay_value = group["weight_decay"]
metric_logger.update(weight_decay=weight_decay_value)
metric_logger.update(grad_norm=grad_norm)
if log_writer is not None:
log_writer.update(loss=loss_value, head="loss")
log_writer.update(class_acc=class_acc, head="loss")
log_writer.update(loss_scale=loss_scale_value, head="opt")
log_writer.update(lr=max_lr, head="opt")
log_writer.update(min_lr=min_lr, head="opt")
log_writer.update(weight_decay=weight_decay_value, head="opt")
log_writer.update(grad_norm=grad_norm, head="opt")
log_writer.set_step()
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
@torch.no_grad()
def evaluate(data_loader, model, device):
criterion = torch.nn.CrossEntropyLoss()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Test:'
# switch to evaluation mode
model.eval()
for batch in metric_logger.log_every(data_loader, 10, header):
images = batch[0]
target = batch[-1]
images = images.to(device, non_blocking=True)
target = target.to(device, non_blocking=True)
# compute output
with torch.cuda.amp.autocast():
output = model(images)
loss = criterion(output, target)
acc1, acc5 = accuracy(output, target, topk=(1, 5))
batch_size = images.shape[0]
metric_logger.update(loss=loss.item())
metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}'
.format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss))
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
| data2vec_vision-main | beit/engine_for_finetuning.py |
"""
Originally inspired by impl at https://github.com/zhunzhong07/Random-Erasing, Apache 2.0
Copyright Zhun Zhong & Liang Zheng
Hacked together by / Copyright 2020 Ross Wightman
Modified by Hangbo Bao, for generating the masked position for visual image transformer
"""
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm, DINO and DeiT code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# Originally inspired by impl at https://github.com/zhunzhong07/Random-Erasing, Apache 2.0
# Copyright Zhun Zhong & Liang Zheng
#
# Hacked together by / Copyright 2020 Ross Wightman
#
# Modified by Hangbo Bao, for generating the masked position for visual image transformer
# --------------------------------------------------------'
import random
import math
import numpy as np
class MaskingGenerator:
def __init__(
self, input_size, num_masking_patches, min_num_patches=4, max_num_patches=None,
min_aspect=0.3, max_aspect=None):
if not isinstance(input_size, tuple):
input_size = (input_size, ) * 2
self.height, self.width = input_size
self.num_patches = self.height * self.width
self.num_masking_patches = num_masking_patches
self.min_num_patches = min_num_patches
self.max_num_patches = num_masking_patches if max_num_patches is None else max_num_patches
max_aspect = max_aspect or 1 / min_aspect
self.log_aspect_ratio = (math.log(min_aspect), math.log(max_aspect))
def __repr__(self):
repr_str = "Generator(%d, %d -> [%d ~ %d], max = %d, %.3f ~ %.3f)" % (
self.height, self.width, self.min_num_patches, self.max_num_patches,
self.num_masking_patches, self.log_aspect_ratio[0], self.log_aspect_ratio[1])
return repr_str
def get_shape(self):
return self.height, self.width
def _mask(self, mask, max_mask_patches):
delta = 0
for attempt in range(10):
target_area = random.uniform(self.min_num_patches, max_mask_patches)
aspect_ratio = math.exp(random.uniform(*self.log_aspect_ratio))
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w < self.width and h < self.height:
top = random.randint(0, self.height - h)
left = random.randint(0, self.width - w)
num_masked = mask[top: top + h, left: left + w].sum()
# Overlap
if 0 < h * w - num_masked <= max_mask_patches:
for i in range(top, top + h):
for j in range(left, left + w):
if mask[i, j] == 0:
mask[i, j] = 1
delta += 1
if delta > 0:
break
return delta
def __call__(self):
mask = np.zeros(shape=self.get_shape(), dtype=np.int)
mask_count = 0
while mask_count < self.num_masking_patches:
max_mask_patches = self.num_masking_patches - mask_count
max_mask_patches = min(max_mask_patches, self.max_num_patches)
delta = self._mask(mask, max_mask_patches)
if delta == 0:
break
else:
mask_count += delta
return mask
| data2vec_vision-main | beit/masking_generator.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on OpenAI DALL-E and lucidrains' DALLE-pytorch code bases
# https://github.com/openai/DALL-E
# https://github.com/lucidrains/DALLE-pytorch
# --------------------------------------------------------'
from math import sqrt
import os
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange
def top_k(logits, thres = 0.5):
num_logits = logits.shape[-1]
k = max(int((1 - thres) * num_logits), 1)
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
class BasicVAE(nn.Module):
def get_codebook_indices(self, images):
raise NotImplementedError()
def decode(self, img_seq):
raise NotImplementedError()
def get_codebook_probs(self, img_seq):
raise NotImplementedError()
def get_image_tokens_size(self):
pass
def get_image_size(self):
pass
class ResBlock(nn.Module):
def __init__(self, chan_in, hidden_size, chan_out):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(chan_in, hidden_size, 3, padding=1),
nn.ReLU(),
nn.Conv2d(hidden_size, hidden_size, 3, padding=1),
nn.ReLU(),
nn.Conv2d(hidden_size, chan_out, 1)
)
def forward(self, x):
return self.net(x) + x
class DiscreteVAE(BasicVAE):
def __init__(
self,
image_size = 256,
num_tokens = 512,
codebook_dim = 512,
num_layers = 3,
hidden_dim = 64,
channels = 3,
smooth_l1_loss = False,
temperature = 0.9,
straight_through = False,
kl_div_loss_weight = 0.
):
super().__init__()
# assert log2(image_size).is_integer(), 'image size must be a power of 2'
assert num_layers >= 1, 'number of layers must be greater than or equal to 1'
self.image_size = image_size
self.num_tokens = num_tokens
self.num_layers = num_layers
self.temperature = temperature
self.straight_through = straight_through
self.codebook = nn.Embedding(num_tokens, codebook_dim)
enc_layers = []
dec_layers = []
enc_in = channels
dec_in = codebook_dim
for layer_id in range(num_layers):
enc_layers.append(nn.Sequential(nn.Conv2d(enc_in, hidden_dim, 4, stride=2, padding=1), nn.ReLU()))
enc_layers.append(ResBlock(chan_in=hidden_dim, hidden_size=hidden_dim, chan_out=hidden_dim))
enc_in = hidden_dim
dec_layers.append(nn.Sequential(nn.ConvTranspose2d(dec_in, hidden_dim, 4, stride=2, padding=1), nn.ReLU()))
dec_layers.append(ResBlock(chan_in=hidden_dim, hidden_size=hidden_dim, chan_out=hidden_dim))
dec_in = hidden_dim
enc_layers.append(nn.Conv2d(hidden_dim, num_tokens, 1))
dec_layers.append(nn.Conv2d(hidden_dim, channels, 1))
self.encoder = nn.Sequential(*enc_layers)
self.decoder = nn.Sequential(*dec_layers)
self.loss_fn = F.smooth_l1_loss if smooth_l1_loss else F.mse_loss
self.kl_div_loss_weight = kl_div_loss_weight
def get_image_size(self):
return self.image_size
def get_image_tokens_size(self):
return self.image_size // 8
@torch.no_grad()
@eval_decorator
def get_codebook_indices(self, images):
logits = self.forward(images, return_logits = True)
codebook_indices = logits.argmax(dim = 1)
return codebook_indices
@torch.no_grad()
@eval_decorator
def get_codebook_probs(self, images):
logits = self.forward(images, return_logits = True)
return nn.Softmax(dim=1)(logits)
def decode(
self,
img_seq
):
image_embeds = self.codebook(img_seq)
b, n, d = image_embeds.shape
h = w = int(sqrt(n))
image_embeds = rearrange(image_embeds, 'b (h w) d -> b d h w', h = h, w = w)
images = self.decoder(image_embeds)
return images
def forward(
self,
img,
return_loss = False,
return_recons = False,
return_logits = False,
temp = None
):
device, num_tokens, image_size, kl_div_loss_weight = img.device, self.num_tokens, self.image_size, self.kl_div_loss_weight
assert img.shape[-1] == image_size and img.shape[-2] == image_size, f'input must have the correct image size {image_size}'
logits = self.encoder(img)
if return_logits:
return logits # return logits for getting hard image indices for DALL-E training
temp = default(temp, self.temperature)
soft_one_hot = F.gumbel_softmax(logits, tau = temp, dim = 1, hard = self.straight_through)
sampled = einsum('b n h w, n d -> b d h w', soft_one_hot, self.codebook.weight)
out = self.decoder(sampled)
if not return_loss:
return out
# reconstruction loss
recon_loss = self.loss_fn(img, out)
# kl divergence
logits = rearrange(logits, 'b n h w -> b (h w) n')
qy = F.softmax(logits, dim = -1)
log_qy = torch.log(qy + 1e-10)
log_uniform = torch.log(torch.tensor([1. / num_tokens], device = device))
kl_div = F.kl_div(log_uniform, log_qy, None, None, 'batchmean', log_target = True)
loss = recon_loss + (kl_div * kl_div_loss_weight)
if not return_recons:
return loss
return loss, out
from dall_e import load_model
class Dalle_VAE(BasicVAE):
def __init__(self, image_size):
super().__init__()
self.encoder = None
self.decoder = None
self.image_size = image_size
def load_model(self, model_dir, device):
self.encoder = load_model(os.path.join(model_dir, "encoder.pkl"), device)
self.decoder = load_model(os.path.join(model_dir, "decoder.pkl"), device)
def decode(self, img_seq):
bsz = img_seq.size()[0]
img_seq = img_seq.view(bsz, self.image_size // 8, self.image_size // 8)
z = F.one_hot(img_seq, num_classes=self.encoder.vocab_size).permute(0, 3, 1, 2).float()
return self.decoder(z).float()
def get_codebook_indices(self, images):
z_logits = self.encoder(images)
return torch.argmax(z_logits, axis=1)
def get_codebook_probs(self, images):
z_logits = self.encoder(images)
return nn.Softmax(dim=1)(z_logits)
def forward(self, img_seq_prob, no_process=False):
if no_process:
return self.decoder(img_seq_prob.float()).float()
else:
bsz, seq_len, num_class = img_seq_prob.size()
z = img_seq_prob.view(bsz, self.image_size // 8, self.image_size // 8, self.encoder.vocab_size)
return self.decoder(z.permute(0, 3, 1, 2).float()).float()
| data2vec_vision-main | beit/modeling_discrete_vae.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# --------------------------------------------------------'
# Copyright (c) Meta Platforms, Inc. and affiliates
import torch
import torchvision.transforms.functional as F
from PIL import Image
import warnings
import math
import random
import numpy as np
class ToNumpy:
def __call__(self, pil_img):
np_img = np.array(pil_img, dtype=np.uint8)
if np_img.ndim < 3:
np_img = np.expand_dims(np_img, axis=-1)
np_img = np.rollaxis(np_img, 2) # HWC to CHW
return np_img
class ToTensor:
def __init__(self, dtype=torch.float32):
self.dtype = dtype
def __call__(self, pil_img):
np_img = np.array(pil_img, dtype=np.uint8)
if np_img.ndim < 3:
np_img = np.expand_dims(np_img, axis=-1)
np_img = np.rollaxis(np_img, 2) # HWC to CHW
return torch.from_numpy(np_img).to(dtype=self.dtype)
_pil_interpolation_to_str = {
Image.NEAREST: 'PIL.Image.NEAREST',
Image.BILINEAR: 'PIL.Image.BILINEAR',
Image.BICUBIC: 'PIL.Image.BICUBIC',
Image.LANCZOS: 'PIL.Image.LANCZOS',
Image.HAMMING: 'PIL.Image.HAMMING',
Image.BOX: 'PIL.Image.BOX',
}
def _pil_interp(method):
if method == 'bicubic':
return Image.BICUBIC
elif method == 'lanczos':
return Image.LANCZOS
elif method == 'hamming':
return Image.HAMMING
else:
# default bilinear, do we want to allow nearest?
return Image.BILINEAR
_RANDOM_INTERPOLATION = (Image.BILINEAR, Image.BICUBIC)
class RandomResizedCropAndInterpolationWithTwoPic:
"""Crop the given PIL Image to random size and aspect ratio with random interpolation.
A crop of random size (default: of 0.08 to 1.0) of the original size and a random
aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop
is finally resized to given size.
This is popularly used to train the Inception networks.
Args:
size: expected output size of each edge
scale: range of size of the origin size cropped
ratio: range of aspect ratio of the origin aspect ratio cropped
interpolation: Default: PIL.Image.BILINEAR
"""
def __init__(self, size, second_size=None, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.),
interpolation='bilinear', second_interpolation='lanczos'):
if isinstance(size, tuple):
self.size = size
else:
self.size = (size, size)
if second_size is not None:
if isinstance(second_size, tuple):
self.second_size = second_size
else:
self.second_size = (second_size, second_size)
else:
self.second_size = None
if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):
warnings.warn("range should be of kind (min, max)")
if interpolation == 'random':
self.interpolation = _RANDOM_INTERPOLATION
else:
self.interpolation = _pil_interp(interpolation)
self.second_interpolation = _pil_interp(second_interpolation) if second_interpolation is not None else None
self.scale = scale
self.ratio = ratio
@staticmethod
def get_params(img, scale, ratio):
"""Get parameters for ``crop`` for a random sized crop.
Args:
img (PIL Image): Image to be cropped.
scale (tuple): range of size of the origin size cropped
ratio (tuple): range of aspect ratio of the origin aspect ratio cropped
Returns:
tuple: params (i, j, h, w) to be passed to ``crop`` for a random
sized crop.
"""
area = img.size[0] * img.size[1]
for attempt in range(10):
target_area = random.uniform(*scale) * area
log_ratio = (math.log(ratio[0]), math.log(ratio[1]))
aspect_ratio = math.exp(random.uniform(*log_ratio))
w = int(round(math.sqrt(target_area * aspect_ratio)))
h = int(round(math.sqrt(target_area / aspect_ratio)))
if w <= img.size[0] and h <= img.size[1]:
i = random.randint(0, img.size[1] - h)
j = random.randint(0, img.size[0] - w)
return i, j, h, w
# Fallback to central crop
in_ratio = img.size[0] / img.size[1]
if in_ratio < min(ratio):
w = img.size[0]
h = int(round(w / min(ratio)))
elif in_ratio > max(ratio):
h = img.size[1]
w = int(round(h * max(ratio)))
else: # whole image
w = img.size[0]
h = img.size[1]
i = (img.size[1] - h) // 2
j = (img.size[0] - w) // 2
return i, j, h, w
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be cropped and resized.
Returns:
PIL Image: Randomly cropped and resized image.
"""
i, j, h, w = self.get_params(img, self.scale, self.ratio)
if isinstance(self.interpolation, (tuple, list)):
interpolation = random.choice(self.interpolation)
else:
interpolation = self.interpolation
if self.second_size is None:
return F.resized_crop(img, i, j, h, w, self.size, interpolation)
else:
return F.resized_crop(img, i, j, h, w, self.size, interpolation), \
F.resized_crop(img, i, j, h, w, self.second_size, self.second_interpolation)
def __repr__(self):
if isinstance(self.interpolation, (tuple, list)):
interpolate_str = ' '.join([_pil_interpolation_to_str[x] for x in self.interpolation])
else:
interpolate_str = _pil_interpolation_to_str[self.interpolation]
format_string = self.__class__.__name__ + '(size={0}'.format(self.size)
format_string += ', scale={0}'.format(tuple(round(s, 4) for s in self.scale))
format_string += ', ratio={0}'.format(tuple(round(r, 4) for r in self.ratio))
format_string += ', interpolation={0}'.format(interpolate_str)
if self.second_size is not None:
format_string += ', second_size={0}'.format(self.second_size)
format_string += ', second_interpolation={0}'.format(_pil_interpolation_to_str[self.second_interpolation])
format_string += ')'
return format_string
| data2vec_vision-main | beit/transforms.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm, DINO and DeiT code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit/
# https://github.com/facebookresearch/dino
# --------------------------------------------------------'
import math
import sys
from typing import Iterable
import torch
import torch.nn as nn
import utils
def train_one_epoch(model: torch.nn.Module, d_vae: torch.nn.Module,
data_loader: Iterable, optimizer: torch.optim.Optimizer,
device: torch.device, epoch: int, loss_scaler, max_norm: float = 0,
log_writer=None, lr_scheduler=None, start_steps=None,
lr_schedule_values=None, wd_schedule_values=None):
model.train()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
metric_logger.add_meter('min_lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 10
for step, (batch, _) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
# assign learning rate & weight decay for each step
it = start_steps + step # global training iteration
if lr_schedule_values is not None or wd_schedule_values is not None:
for i, param_group in enumerate(optimizer.param_groups):
if lr_schedule_values is not None:
param_group["lr"] = lr_schedule_values[it] * param_group["lr_scale"]
if wd_schedule_values is not None and param_group["weight_decay"] > 0:
param_group["weight_decay"] = wd_schedule_values[it]
samples, images, bool_masked_pos = batch
images = images.to(device, non_blocking=True)
samples = samples.to(device, non_blocking=True)
bool_masked_pos = bool_masked_pos.to(device, non_blocking=True)
with torch.no_grad():
input_ids = d_vae.get_codebook_indices(images).flatten(1)
bool_masked_pos = bool_masked_pos.flatten(1).to(torch.bool)
labels = input_ids[bool_masked_pos]
with torch.cuda.amp.autocast():
outputs = model(samples, bool_masked_pos=bool_masked_pos, return_all_tokens=False)
loss = nn.CrossEntropyLoss()(input=outputs, target=labels)
loss_value = loss.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
sys.exit(1)
optimizer.zero_grad()
# this attribute is added by timm on one optimizer (adahessian)
is_second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order
grad_norm = loss_scaler(loss, optimizer, clip_grad=max_norm,
parameters=model.parameters(), create_graph=is_second_order)
loss_scale_value = loss_scaler.state_dict()["scale"]
torch.cuda.synchronize()
mlm_acc = (outputs.max(-1)[1] == labels).float().mean().item()
metric_logger.update(mlm_acc=mlm_acc)
if log_writer is not None:
log_writer.update(mlm_acc=mlm_acc, head="loss")
metric_logger.update(loss=loss_value)
metric_logger.update(loss_scale=loss_scale_value)
min_lr = 10.
max_lr = 0.
for group in optimizer.param_groups:
min_lr = min(min_lr, group["lr"])
max_lr = max(max_lr, group["lr"])
metric_logger.update(lr=max_lr)
metric_logger.update(min_lr=min_lr)
weight_decay_value = None
for group in optimizer.param_groups:
if group["weight_decay"] > 0:
weight_decay_value = group["weight_decay"]
metric_logger.update(weight_decay=weight_decay_value)
metric_logger.update(grad_norm=grad_norm)
if log_writer is not None:
log_writer.update(loss=loss_value, head="loss")
log_writer.update(loss_scale=loss_scale_value, head="opt")
log_writer.update(lr=max_lr, head="opt")
log_writer.update(min_lr=min_lr, head="opt")
log_writer.update(weight_decay=weight_decay_value, head="opt")
log_writer.update(grad_norm=grad_norm, head="opt")
log_writer.set_step()
if lr_scheduler is not None:
lr_scheduler.step_update(start_steps + step)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
| data2vec_vision-main | beit/engine_for_pretraining.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm, DINO and DeiT code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit/
# https://github.com/facebookresearch/dino
# --------------------------------------------------------'
# Copyright (c) Meta Platforms, Inc. and affiliates
import math
import sys
from typing import Iterable
import torch
import torch.nn as nn
import torch.nn.functional as F
import utils
def train_one_epoch(model: torch.nn.Module, model_ema: torch.nn.Module, ema_start_at, target_layers,
d_vae: torch.nn.Module, vae_loss_weight: float,
data_loader: Iterable, optimizer: torch.optim.Optimizer,
device: torch.device, epoch: int, loss_scaler, max_norm: float = 0, l1_beta: float = 0.12,
log_writer=None, lr_scheduler=None, start_steps=None,
lr_schedule_values=None, wd_schedule_values=None, l2_loss=False):
model.train()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
metric_logger.add_meter('min_lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
metric_logger.add_meter('loss_cyc', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
metric_logger.add_meter('loss_beit', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 10
for step, (batch, _) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
# assign learning rate & weight decay for each step
it = start_steps + step # global training iteration
if lr_schedule_values is not None or wd_schedule_values is not None:
for i, param_group in enumerate(optimizer.param_groups):
if lr_schedule_values is not None:
param_group["lr"] = lr_schedule_values[it] * param_group["lr_scale"]
if wd_schedule_values is not None and param_group["weight_decay"] > 0:
param_group["weight_decay"] = wd_schedule_values[it]
samples, images, bool_masked_pos = batch
images = images.to(device, non_blocking=True)
samples = samples.to(device, non_blocking=True)
bool_masked_pos = bool_masked_pos.to(device, non_blocking=True)
with torch.no_grad():
targets = model_ema.module(samples, bool_masked_pos=None, return_all_tokens=True, layer_results=True)
fsz = targets[0].size(-1)
targets = sum(F.layer_norm(targets[i], (fsz,)) for i in target_layers) / len(target_layers)
fsz = targets.size(-1)
target_mask = bool_masked_pos.flatten().bool()
targets = targets.reshape(-1, fsz)[target_mask]
# beit part
input_ids = d_vae.get_codebook_indices(images).flatten(1)
bool_masked_pos = bool_masked_pos.flatten(1).to(torch.bool)
labels = input_ids[bool_masked_pos]
with torch.cuda.amp.autocast():
outputs, beit_outputs = model(samples, bool_masked_pos=bool_masked_pos, return_all_tokens=False)
outputs = outputs.reshape(-1, fsz)
assert outputs.shape == targets.shape
if l2_loss:
cyc_loss = F.mse_loss(outputs, targets)
else:
cyc_loss = F.smooth_l1_loss(outputs, targets, beta=l1_beta)
# beit part
beit_loss = nn.CrossEntropyLoss()(input=beit_outputs, target=labels)
# loss = cyc_loss / (vae_loss_weight + 1) + beit_loss * vae_loss_weight / (vae_loss_weight + 1)
beit_w = max(1 - (epoch / vae_loss_weight), 0)
loss = cyc_loss * (1 - beit_w) + beit_loss * beit_w
loss_value = loss.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
sys.exit(1)
optimizer.zero_grad()
# this attribute is added by timm on one optimizer (adahessian)
is_second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order
grad_norm = loss_scaler(loss, optimizer, clip_grad=max_norm,
parameters=model.parameters(), create_graph=is_second_order)
loss_scale_value = loss_scaler.state_dict()["scale"]
if it == ema_start_at and ema_start_at > 0:
print(f"setting EMA to model params at update {it}")
model_ema.set(model)
elif it >= ema_start_at:
model_ema.update(model)
torch.cuda.synchronize()
metric_logger.update(loss=loss_value)
metric_logger.update(loss_scale=loss_scale_value)
metric_logger.update(loss_cyc=cyc_loss.item())
metric_logger.update(loss_beit=beit_loss.item())
# metric_logger.update(loss_cyc=cyc_loss.item(), head="loss_cyc")
# metric_logger.update(loss_beit=beit_loss.item(), head="loss_beit")
min_lr = 10.
max_lr = 0.
for group in optimizer.param_groups:
min_lr = min(min_lr, group["lr"])
max_lr = max(max_lr, group["lr"])
metric_logger.update(lr=max_lr)
metric_logger.update(min_lr=min_lr)
weight_decay_value = None
for group in optimizer.param_groups:
if group["weight_decay"] > 0:
weight_decay_value = group["weight_decay"]
metric_logger.update(weight_decay=weight_decay_value)
metric_logger.update(grad_norm=grad_norm)
if log_writer is not None:
log_writer.update(loss=loss_value, head="loss")
log_writer.update(loss=cyc_loss.item(), head="loss_cyc")
log_writer.update(loss=beit_loss.item(), head="loss_beit")
log_writer.update(loss_scale=loss_scale_value, head="opt")
log_writer.update(lr=max_lr, head="opt")
log_writer.update(min_lr=min_lr, head="opt")
log_writer.update(weight_decay=weight_decay_value, head="opt")
log_writer.update(grad_norm=grad_norm, head="opt")
log_writer.set_step()
if lr_scheduler is not None:
lr_scheduler.step_update(start_steps + step)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
| data2vec_vision-main | beit/engine_for_cyclical_joint.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm and DeiT code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit/
# --------------------------------------------------------'
# Copyright (c) Meta Platforms, Inc. and affiliates
import math
import torch
import torch.nn as nn
from functools import partial
from modeling_finetune import Block, _cfg, PatchEmbed, RelativePositionBias
from timm.models.registry import register_model
from timm.models.layers import trunc_normal_ as __call_trunc_normal_
def trunc_normal_(tensor, mean=0., std=1.):
__call_trunc_normal_(tensor, mean=mean, std=std, a=-std, b=std)
__all__ = [
'beit_base_patch16_224_8k_vocab',
'beit_large_patch16_224_8k_vocab',
]
class VisionTransformerForMaskedImageModeling(nn.Module):
def __init__(self, img_size=224, patch_size=16, in_chans=3, vocab_size=8192, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=True, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., norm_layer=None, init_values=None, attn_head_dim=None,
use_abs_pos_emb=True, use_rel_pos_bias=False, use_shared_rel_pos_bias=False, init_std=0.02):
super().__init__()
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
if use_abs_pos_emb:
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
else:
self.pos_embed = None
self.pos_drop = nn.Dropout(p=drop_rate)
if use_shared_rel_pos_bias:
self.rel_pos_bias = RelativePositionBias(window_size=self.patch_embed.patch_shape, num_heads=num_heads)
else:
self.rel_pos_bias = None
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,
init_values=init_values, window_size=self.patch_embed.patch_shape if use_rel_pos_bias else None,
attn_head_dim=attn_head_dim,
)
for i in range(depth)])
self.norm = norm_layer(embed_dim)
self.init_std = init_std
self.lm_head = nn.Linear(embed_dim, vocab_size)
if self.pos_embed is not None:
trunc_normal_(self.pos_embed, std=self.init_std)
trunc_normal_(self.cls_token, std=self.init_std)
trunc_normal_(self.mask_token, std=self.init_std)
trunc_normal_(self.lm_head.weight, std=self.init_std)
self.apply(self._init_weights)
self.fix_init_weight()
def fix_init_weight(self):
def rescale(param, layer_id):
param.div_(math.sqrt(2.0 * layer_id))
for layer_id, layer in enumerate(self.blocks):
rescale(layer.attn.proj.weight.data, layer_id + 1)
rescale(layer.mlp.fc2.weight.data, layer_id + 1)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=self.init_std)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
trunc_normal_(m.weight, std=self.init_std)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
def get_num_layers(self):
return len(self.blocks)
def forward_features(self, x, bool_masked_pos):
x = self.patch_embed(x, bool_masked_pos=bool_masked_pos)
batch_size, seq_len, _ = x.size()
cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
mask_token = self.mask_token.expand(batch_size, seq_len, -1)
# replace the masked visual tokens by mask_token
w = bool_masked_pos.unsqueeze(-1).type_as(mask_token)
x = x * (1 - w) + mask_token * w
x = torch.cat((cls_tokens, x), dim=1)
if self.pos_embed is not None:
x = x + self.pos_embed
x = self.pos_drop(x)
rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None
for blk in self.blocks:
x, _ = blk(x, rel_pos_bias=rel_pos_bias)
return self.norm(x)
def forward(self, x, bool_masked_pos, return_all_tokens=False):
x = self.forward_features(x, bool_masked_pos=bool_masked_pos)
x = x[:, 1:]
if return_all_tokens:
return self.lm_head(x)
else:
# return the masked tokens
return self.lm_head(x[bool_masked_pos])
@register_model
def beit_base_patch16_224_8k_vocab(pretrained=False, **kwargs):
_ = kwargs.pop("num_classes")
model = VisionTransformerForMaskedImageModeling(
patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), vocab_size=8192, **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.load(
kwargs["init_ckpt"], map_location="cpu"
)
model.load_state_dict(checkpoint["model"])
return model
@register_model
def beit_large_patch16_224_8k_vocab(pretrained=False, **kwargs):
_ = kwargs.pop("num_classes")
model = VisionTransformerForMaskedImageModeling(
patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), vocab_size=8192, **kwargs)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.load(
kwargs["init_ckpt"], map_location="cpu"
)
model.load_state_dict(checkpoint["model"])
return model
| data2vec_vision-main | beit/modeling_pretrain.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm and DeiT code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit/
# --------------------------------------------------------'
# Copyright (c) Meta Platforms, Inc. and affiliates
import math
import torch
import torch.nn as nn
from functools import partial
from modeling_finetune import Block, _cfg, PatchEmbed, RelativePositionBias
from timm.models.registry import register_model
from timm.models.layers import trunc_normal_ as __call_trunc_normal_
def trunc_normal_(tensor, mean=0.0, std=1.0):
__call_trunc_normal_(tensor, mean=mean, std=std, a=-std, b=std)
__all__ = [
"beit_base_patch16_224",
# 'beit_large_patch16_224_8k_vocab',
]
class VisionTransformerForCyclicalTraining(nn.Module):
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.0,
qkv_bias=True,
qk_scale=None,
drop_rate=0.0,
attn_drop_rate=0.0,
drop_path_rate=0.0,
norm_layer=None,
init_values=None,
attn_head_dim=None,
use_abs_pos_emb=True,
use_rel_pos_bias=False,
use_shared_rel_pos_bias=False,
init_std=0.02,
):
super().__init__()
self.num_features = (
self.embed_dim
) = embed_dim # num_features for consistency with other models
self.patch_embed = PatchEmbed(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
if use_abs_pos_emb:
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
else:
self.pos_embed = None
self.pos_drop = nn.Dropout(p=drop_rate)
if use_shared_rel_pos_bias:
self.rel_pos_bias = RelativePositionBias(
window_size=self.patch_embed.patch_shape, num_heads=num_heads
)
else:
self.rel_pos_bias = None
dpr = [
x.item() for x in torch.linspace(0, drop_path_rate, depth)
] # stochastic depth decay rule
self.blocks = nn.ModuleList(
[
Block(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
init_values=init_values,
window_size=self.patch_embed.patch_shape
if use_rel_pos_bias
else None,
attn_head_dim=attn_head_dim,
)
for i in range(depth)
]
)
self.norm = norm_layer(embed_dim)
self.init_std = init_std
# self.lm_head = nn.Sequential(
# nn.Linear(embed_dim, embed_dim * 2),
# nn.GELU(),
# nn.Linear(embed_dim * 2, embed_dim),
# )
# self.lm_head = nn.Sequential(
# nn.Linear(embed_dim, embed_dim),
# )
self.lm_head = nn.Linear(embed_dim, embed_dim)
if self.pos_embed is not None:
trunc_normal_(self.pos_embed, std=self.init_std)
trunc_normal_(self.cls_token, std=self.init_std)
trunc_normal_(self.mask_token, std=self.init_std)
self.apply(self._init_weights)
self.fix_init_weight()
def fix_init_weight(self):
def rescale(param, layer_id):
param.div_(math.sqrt(2.0 * layer_id))
for layer_id, layer in enumerate(self.blocks):
rescale(layer.attn.proj.weight.data, layer_id + 1)
rescale(layer.mlp.fc2.weight.data, layer_id + 1)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=self.init_std)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
trunc_normal_(m.weight, std=self.init_std)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
@torch.jit.ignore
def no_weight_decay(self):
return {"pos_embed", "cls_token"}
def get_num_layers(self):
return len(self.blocks)
def forward_features(self, x, bool_masked_pos, layer_results):
x = self.patch_embed(x, bool_masked_pos=bool_masked_pos)
batch_size, seq_len, _ = x.size()
cls_tokens = self.cls_token.expand(
batch_size, -1, -1
) # stole cls_tokens impl from Phil Wang, thanks
mask_token = self.mask_token.expand(batch_size, seq_len, -1)
if bool_masked_pos is not None:
# replace the masked visual tokens by mask_token
w = bool_masked_pos.view(bool_masked_pos.size(0), -1, 1).type_as(mask_token)
x = x * (1 - w) + mask_token * w # B x T x C
# print(bool_masked_pos.shape)
# print(bool_masked_pos.sum((1,2)))
# print('x', x.shape)
# bool_masked = bool_masked_pos.reshape(bool_masked_pos.size(0), -1).bool()
# print('bool_masked', bool_masked.shape)
# print('asd1', x[bool_masked].shape)
# exit(0)
x = torch.cat((cls_tokens, x), dim=1)
if self.pos_embed is not None:
x = x + self.pos_embed
x = self.pos_drop(x)
rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None
z = []
for i, blk in enumerate(self.blocks):
x, fc_feature = blk(x, rel_pos_bias=rel_pos_bias)
if layer_results == 'end':
z.append(x)
elif layer_results == 'fc':
z.append(fc_feature)
return z if layer_results else self.norm(x)
def forward(self, x, bool_masked_pos, return_all_tokens=False, layer_results=None):
x = self.forward_features(
x, bool_masked_pos=bool_masked_pos, layer_results=layer_results
)
if layer_results:
return [z[:, 1:] for z in x]
elif return_all_tokens:
x = x[:, 1:]
return self.lm_head(x)
else:
# return the masked tokens
x = x[:, 1:]
bsz = x.size(0)
fsz = x.size(-1)
bool_masked_pos = bool_masked_pos.flatten().bool()
x = x.reshape(-1, fsz)[bool_masked_pos]
return self.lm_head(x)
@register_model
def beit_base_patch16_224(pretrained=False, **kwargs):
# _ = kwargs.pop("num_classes")
model = VisionTransformerForCyclicalTraining(
patch_size=16,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
**kwargs
)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.load(kwargs["init_ckpt"], map_location="cpu")
model.load_state_dict(checkpoint["model"])
return model
@register_model
def beit_large_patch16_224(pretrained=False, **kwargs):
# _ = kwargs.pop("num_classes")
model = VisionTransformerForCyclicalTraining(
patch_size=16,
embed_dim=1024,
depth=24,
num_heads=16,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
**kwargs
)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.load(kwargs["init_ckpt"], map_location="cpu")
model.load_state_dict(checkpoint["model"])
return model
@register_model
def beit_huge_patch16_224(pretrained=False, **kwargs):
# _ = kwargs.pop("num_classes")
model = VisionTransformerForCyclicalTraining(
patch_size=16,
embed_dim=1280,
depth=32,
num_heads=16,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
**kwargs
)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.load(kwargs["init_ckpt"], map_location="cpu")
model.load_state_dict(checkpoint["model"])
return model
# @register_model
# def beit_large_patch16_224_8k_vocab(pretrained=False, **kwargs):
# _ = kwargs.pop("num_classes")
# model = VisionTransformerForMaskedImageModeling(
# patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True,
# norm_layer=partial(nn.LayerNorm, eps=1e-6), vocab_size=8192, **kwargs)
# model.default_cfg = _cfg()
# if pretrained:
# checkpoint = torch.load(
# kwargs["init_ckpt"], map_location="cpu"
# )
# model.load_state_dict(checkpoint["model"])
# return model
| data2vec_vision-main | beit/modeling_cyclical.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm, DINO and DeiT code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit/
# https://github.com/facebookresearch/dino
# --------------------------------------------------------'
# Copyright (c) Meta Platforms, Inc. and affiliates
import os
import torch
from torchvision import datasets, transforms
from timm.data.constants import \
IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD
from transforms import RandomResizedCropAndInterpolationWithTwoPic
from timm.data import create_transform
from dall_e.utils import map_pixels
from masking_generator import MaskingGenerator
from dataset_folder import ImageFolder
from PIL import Image
class DataAugmentationForBEiT(object):
def __init__(self, args):
imagenet_default_mean_and_std = args.imagenet_default_mean_and_std
mean = IMAGENET_INCEPTION_MEAN if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_MEAN
std = IMAGENET_INCEPTION_STD if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_STD
if args.aug_level == 0:
print(' >>>>>> args.aug_level', args.aug_level)
self.common_transform = transforms.Compose([
transforms.CenterCrop(size=args.input_size)
])
elif args.aug_level == 1:
print(' >>>>>> args.aug_level', args.aug_level)
self.common_transform = transforms.Compose([
transforms.Resize(size=int(args.input_size / .875), interpolation=Image.BICUBIC),
transforms.CenterCrop(size=args.input_size)
])
elif args.aug_level == 2:
print(' >>>>>> args.aug_level', args.aug_level)
self.common_transform = transforms.Compose([
transforms.RandomHorizontalFlip(p=0.5),
transforms.Resize(size=int(args.input_size / .875), interpolation=Image.BICUBIC),
transforms.CenterCrop(size=args.input_size)
])
elif args.aug_level == 3:
print(' >>>>>> args.aug_level', args.aug_level)
self.common_transform = transforms.Compose([
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomResizedCrop(size=args.input_size, interpolation=Image.BICUBIC)
])
elif args.aug_level == 4:
print(' >>>>>> args.aug_level', args.aug_level)
self.common_transform = transforms.Compose([
transforms.ColorJitter(0.4, 0.4, 0.4),
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomResizedCrop(size=args.input_size, interpolation=Image.BICUBIC)
])
else:
self.common_transform = transforms.Compose([
transforms.ColorJitter(0.4, 0.4, 0.4),
transforms.RandomHorizontalFlip(p=0.5),
RandomResizedCropAndInterpolationWithTwoPic(
size=args.input_size, second_size=getattr(args, 'second_input_size', None),
interpolation=args.train_interpolation, second_interpolation=getattr(args, 'second_interpolation', None),
),
])
self.patch_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(
mean=torch.tensor(mean),
std=torch.tensor(std))
])
if getattr(args, 'discrete_vae_type', None) is None:
self.visual_token_transform = lambda z: z
elif args.discrete_vae_type == "dall-e":
self.visual_token_transform = transforms.Compose([
transforms.ToTensor(),
map_pixels,
])
elif args.discrete_vae_type == "customized":
self.visual_token_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(
mean=IMAGENET_INCEPTION_MEAN,
std=IMAGENET_INCEPTION_STD,
),
])
else:
raise NotImplementedError()
self.masked_position_generator = MaskingGenerator(
args.window_size, num_masking_patches=args.num_mask_patches,
max_num_patches=args.max_mask_patches_per_block,
min_num_patches=args.min_mask_patches_per_block,
)
def __call__(self, image):
z = self.common_transform(image)
if isinstance(z, tuple):
for_patches, for_visual_tokens = z
return \
self.patch_transform(for_patches), self.visual_token_transform(for_visual_tokens), \
self.masked_position_generator()
else:
return self.patch_transform(z), self.masked_position_generator()
def __repr__(self):
repr = "(DataAugmentationForBEiT,\n"
repr += " common_transform = %s,\n" % str(self.common_transform)
repr += " patch_transform = %s,\n" % str(self.patch_transform)
repr += " visual_tokens_transform = %s,\n" % str(self.visual_token_transform)
repr += " Masked position generator = %s,\n" % str(self.masked_position_generator)
repr += ")"
return repr
def build_beit_pretraining_dataset(args):
transform = DataAugmentationForBEiT(args)
print("Data Aug = %s" % str(transform))
return ImageFolder(args.data_path, transform=transform)
def build_dataset(is_train, args):
transform = build_transform(is_train, args)
print("Transform = ")
if isinstance(transform, tuple):
for trans in transform:
print(" - - - - - - - - - - ")
for t in trans.transforms:
print(t)
else:
for t in transform.transforms:
print(t)
print("---------------------------")
is_valid_file = None
if is_train:
file_filter = getattr(args, "data_set_filter_file", None)
if file_filter is not None:
files = set()
with open(file_filter) as ff:
for l in ff:
files.add(l.rstrip())
is_valid_file = lambda p: os.path.basename(p) in files
if args.data_set == 'CIFAR':
dataset = datasets.CIFAR100(args.data_path, train=is_train, transform=transform)
nb_classes = 100
elif args.data_set == 'IMNET':
root = os.path.join(args.data_path, 'train' if is_train else 'val')
dataset = datasets.ImageFolder(root, transform=transform, is_valid_file=is_valid_file)
nb_classes = 1000
elif args.data_set == "image_folder":
root = args.data_path if is_train else args.eval_data_path
dataset = ImageFolder(root, transform=transform, is_valid_file=is_valid_file)
nb_classes = args.nb_classes
assert len(dataset.class_to_idx) == nb_classes
else:
raise NotImplementedError()
assert nb_classes == args.nb_classes, f"{nb_classes} != {args.nb_classes}"
print("Number of the class = %d" % args.nb_classes)
return dataset, nb_classes
def build_transform(is_train, args):
resize_im = args.input_size > 32
imagenet_default_mean_and_std = args.imagenet_default_mean_and_std
mean = IMAGENET_INCEPTION_MEAN if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_MEAN
std = IMAGENET_INCEPTION_STD if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_STD
if is_train:
# this should always dispatch to transforms_imagenet_train
transform = create_transform(
input_size=args.input_size,
is_training=True,
color_jitter=args.color_jitter,
auto_augment=args.aa,
interpolation=args.train_interpolation,
re_prob=args.reprob,
re_mode=args.remode,
re_count=args.recount,
mean=mean,
std=std,
)
if not resize_im:
# replace RandomResizedCropAndInterpolation with
# RandomCrop
transform.transforms[0] = transforms.RandomCrop(
args.input_size, padding=4)
return transform
t = []
if resize_im:
if args.crop_pct is None:
if args.input_size < 384:
args.crop_pct = 224 / 256
else:
args.crop_pct = 1.0
size = int(args.input_size / args.crop_pct)
t.append(
transforms.Resize(size, interpolation=3), # to maintain same ratio w.r.t. 224 images
)
t.append(transforms.CenterCrop(args.input_size))
t.append(transforms.ToTensor())
t.append(transforms.Normalize(mean, std))
return transforms.Compose(t)
| data2vec_vision-main | beit/datasets.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm and DeiT code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit/
# --------------------------------------------------------'
# Copyright (c) Meta Platforms, Inc. and affiliates
import math
import torch
import torch.nn as nn
from functools import partial
from modeling_finetune import Block, _cfg, PatchEmbed, RelativePositionBias
from timm.models.registry import register_model
from timm.models.layers import trunc_normal_ as __call_trunc_normal_
def trunc_normal_(tensor, mean=0.0, std=1.0):
__call_trunc_normal_(tensor, mean=mean, std=std, a=-std, b=std)
__all__ = [
"beit_base_joint_patch16_224",
# 'beit_large_patch16_224_8k_vocab',
]
class VisionTransformerForCyclicalJointTraining(nn.Module):
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
vocab_size=8192,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.0,
qkv_bias=True,
qk_scale=None,
drop_rate=0.0,
attn_drop_rate=0.0,
drop_path_rate=0.0,
norm_layer=None,
init_values=None,
attn_head_dim=None,
use_abs_pos_emb=True,
use_rel_pos_bias=False,
use_shared_rel_pos_bias=False,
init_std=0.02,
):
super().__init__()
self.num_features = (
self.embed_dim
) = embed_dim # num_features for consistency with other models
self.patch_embed = PatchEmbed(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
if use_abs_pos_emb:
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
else:
self.pos_embed = None
self.pos_drop = nn.Dropout(p=drop_rate)
if use_shared_rel_pos_bias:
self.rel_pos_bias = RelativePositionBias(
window_size=self.patch_embed.patch_shape, num_heads=num_heads
)
else:
self.rel_pos_bias = None
dpr = [
x.item() for x in torch.linspace(0, drop_path_rate, depth)
] # stochastic depth decay rule
self.blocks = nn.ModuleList(
[
Block(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_scale=qk_scale,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
init_values=init_values,
window_size=self.patch_embed.patch_shape
if use_rel_pos_bias
else None,
attn_head_dim=attn_head_dim,
)
for i in range(depth)
]
)
self.norm = norm_layer(embed_dim)
self.init_std = init_std
self.lm_head = nn.Sequential(
nn.Linear(embed_dim, embed_dim * 2),
nn.GELU(),
nn.Linear(embed_dim * 2, embed_dim),
)
self.beit_head = nn.Linear(embed_dim, vocab_size)
if self.pos_embed is not None:
trunc_normal_(self.pos_embed, std=self.init_std)
trunc_normal_(self.cls_token, std=self.init_std)
trunc_normal_(self.mask_token, std=self.init_std)
trunc_normal_(self.beit_head.weight, std=self.init_std)
self.apply(self._init_weights)
self.fix_init_weight()
def fix_init_weight(self):
def rescale(param, layer_id):
param.div_(math.sqrt(2.0 * layer_id))
for layer_id, layer in enumerate(self.blocks):
rescale(layer.attn.proj.weight.data, layer_id + 1)
rescale(layer.mlp.fc2.weight.data, layer_id + 1)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=self.init_std)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
elif isinstance(m, nn.Conv2d):
trunc_normal_(m.weight, std=self.init_std)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
@torch.jit.ignore
def no_weight_decay(self):
return {"pos_embed", "cls_token"}
def get_num_layers(self):
return len(self.blocks)
def forward_features(self, x, bool_masked_pos, layer_results):
x = self.patch_embed(x, bool_masked_pos=bool_masked_pos)
batch_size, seq_len, _ = x.size()
cls_tokens = self.cls_token.expand(
batch_size, -1, -1
) # stole cls_tokens impl from Phil Wang, thanks
mask_token = self.mask_token.expand(batch_size, seq_len, -1)
if bool_masked_pos is not None:
# replace the masked visual tokens by mask_token
w = bool_masked_pos.view(bool_masked_pos.size(0), -1, 1).type_as(mask_token)
x = x * (1 - w) + mask_token * w
x = torch.cat((cls_tokens, x), dim=1)
if self.pos_embed is not None:
x = x + self.pos_embed
x = self.pos_drop(x)
rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None
z = []
for i, blk in enumerate(self.blocks):
x, _ = blk(x, rel_pos_bias=rel_pos_bias)
if layer_results:
z.append(x)
return z if layer_results else self.norm(x)
def forward(self, x, bool_masked_pos, return_all_tokens=False, layer_results=False):
x = self.forward_features(
x, bool_masked_pos=bool_masked_pos, layer_results=layer_results
)
if layer_results:
return [z[:, 1:] for z in x]
elif return_all_tokens:
x = x[:, 1:]
return self.lm_head(x), self.beit_head(x)
else:
# return the masked tokens
x = x[:, 1:]
bsz = x.size(0)
fsz = x.size(-1)
bool_masked_pos = bool_masked_pos.flatten().bool()
x = x.reshape(-1, fsz)[bool_masked_pos]
return self.lm_head(x), self.beit_head(x)
@register_model
def beit_base_joint_patch16_224(pretrained=False, **kwargs):
_ = kwargs.pop("num_classes")
model = VisionTransformerForCyclicalJointTraining(
patch_size=16,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4,
qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
vocab_size=8192,
**kwargs
)
model.default_cfg = _cfg()
if pretrained:
checkpoint = torch.load(kwargs["init_ckpt"], map_location="cpu")
model.load_state_dict(checkpoint["model"])
return model
# @register_model
# def beit_large_patch16_224_8k_vocab(pretrained=False, **kwargs):
# _ = kwargs.pop("num_classes")
# model = VisionTransformerForMaskedImageModeling(
# patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True,
# norm_layer=partial(nn.LayerNorm, eps=1e-6), vocab_size=8192, **kwargs)
# model.default_cfg = _cfg()
# if pretrained:
# checkpoint = torch.load(
# kwargs["init_ckpt"], map_location="cpu"
# )
# model.load_state_dict(checkpoint["model"])
# return model
| data2vec_vision-main | beit/modeling_cyclical_joint.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm, DINO and DeiT code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit/
# https://github.com/facebookresearch/dino
# --------------------------------------------------------'
# Copyright (c) Meta Platforms, Inc. and affiliates
import math
import sys
from typing import Iterable
import torch
import torch.nn.functional as F
import utils
def train_one_epoch(model: torch.nn.Module, model_ema: torch.nn.Module, ema_start_at, decay_init, decay, target_layers,
data_loader: Iterable, optimizer: torch.optim.Optimizer,
device: torch.device, epoch: int, loss_scaler, max_norm: float = 0,
l1_beta: float = 0.12,
log_writer=None, lr_scheduler=None, start_steps=None,
lr_schedule_values=None, wd_schedule_values=None, l2_loss=False, layer_results='end',
var_w0=0, var_w1=0, var_margin0=0.5, var_margin1=0.5, start_lr_decay_at_step=-1,loss_scale=-1, mask_dropout_prob=-1.0,
target_layer_norm_last=True, target_batch_norm=False, target_instance_norm=False,post_target_instance_norm=False,post_target_layer_norm=False):
print(' <<<<<<<< layer_results >>>>>>>>', layer_results)
print(' <<<<<<<< var_w0, var_w1 >>>>>>>>', var_w0, var_w1)
model.train()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
metric_logger.add_meter('min_lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
metric_logger.add_meter('loss_var0', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
# metric_logger.add_meter('loss_var1', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 10
cur_decay = decay
for step, (batch, _) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
# assign learning rate & weight decay for each step
it = start_steps + step # global training iteration
if lr_schedule_values is not None or wd_schedule_values is not None:
for i, param_group in enumerate(optimizer.param_groups):
if lr_schedule_values is not None:
param_group["lr"] = lr_schedule_values[it] * param_group["lr_scale"]
if wd_schedule_values is not None and param_group["weight_decay"] > 0:
param_group["weight_decay"] = wd_schedule_values[it]
if it < ema_start_at:
cur_decay = decay_init + it * (decay - decay_init) / ema_start_at
samples, bool_masked_pos = batch
samples = samples.to(device, non_blocking=True)
bool_masked_pos = bool_masked_pos.to(device, non_blocking=True)
if mask_dropout_prob > 0:
new_mask_tensor = torch.ones_like(bool_masked_pos, dtype=samples.dtype)
new_mask_tensor.fill_(1-mask_dropout_prob)
bool_new_mask_tensor = torch.bernoulli(new_mask_tensor)
bool_masked_pos = torch.logical_and(bool_new_mask_tensor, bool_masked_pos)
with torch.no_grad():
targets = model_ema.module(samples, bool_masked_pos=None, return_all_tokens=True, layer_results=layer_results)
fsz = targets[0].size(-1)
#shape of targets[0] == b x t x dim
layer_vals = [targets[i] for i in target_layers]
if target_instance_norm or target_batch_norm:
layer_vals = [val.permute(0,2,1) for val in layer_vals] # btc => bct
if target_batch_norm:
layer_vals = [F.batch_norm(val.float(), running_mean=None, running_var=None, training=True) for val in layer_vals] # bct => bct
if target_instance_norm:
layer_vals = [F.instance_norm(val.float()) for val in layer_vals] # bct => bct
if target_instance_norm or target_batch_norm:
layer_vals = [val.permute(0,2,1) for val in layer_vals] # bct => btc
if target_layer_norm_last:
layer_vals = (F.layer_norm(val.float(), (fsz,)) for val in layer_vals)
targets = sum(layer_vals) / len(target_layers)
if post_target_instance_norm:
targets = targets.permute(0,2,1)
targets = F.instance_norm(targets.float())
targets = targets.permute(0,2,1)
if post_target_layer_norm:
targets = F.layer_norm(targets.float(), (fsz,))
fsz = targets.size(-1)
target_mask = bool_masked_pos.flatten().bool()
targets = targets.reshape(-1, fsz)[target_mask]
with torch.cuda.amp.autocast():
outputs = model(samples, bool_masked_pos=bool_masked_pos, return_all_tokens=False)
outputs = outputs.float()
eps=1e-6
z0 = outputs.reshape(-1, outputs.size(-1))
z0 = torch.sqrt(z0.var(dim=0) + eps)
if var_w0 > 0:
std_loss0 = torch.sum(F.relu(var_margin0 - z0)) / z0.size(0)
else:
std_loss0 = 0
# z1 = torch.sqrt(outputs.var(dim=1) + eps)
# std_loss1 = torch.sum(F.relu(var_margin1 - z1)) / outputs.size(0)
# print(outputs.shape)
outputs = outputs.reshape(-1, fsz)
assert outputs.shape == targets.shape
if l2_loss:
loss_cyc = F.mse_loss(outputs, targets)
else:
loss_cyc = F.smooth_l1_loss(outputs, targets, beta=l1_beta)
# loss = loss_cyc + std_loss0 * var_w0 + std_loss1 * var_w1
loss = loss_cyc + std_loss0 * var_w0
if loss_scale!=-1:
loss = loss * loss_scale
loss_value = loss.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value), force=True)
sys.exit(1)
optimizer.zero_grad()
# this attribute is added by timm on one optimizer (adahessian)
is_second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order
grad_norm = loss_scaler(loss, optimizer, clip_grad=max_norm,
parameters=model.parameters(), create_graph=is_second_order)
loss_scale_value = loss_scaler.state_dict()["scale"]
# if it == ema_start_at and ema_start_at > 0:
# print(f"setting EMA to model params at update {it}")
# model_ema.set(model)
# elif it >= ema_start_at:
# model_ema.update(model)
if cur_decay!=1 and (start_lr_decay_at_step==-1 or it<=start_lr_decay_at_step):
model_ema._update(model, update_fn=lambda e, m: cur_decay * e + (1. - cur_decay) * m)
else:
cur_decay=0
torch.cuda.synchronize()
metric_logger.update(loss=loss_value)
metric_logger.update(loss_scale=loss_scale_value)
min_lr = 10.
max_lr = 0.
for group in optimizer.param_groups:
min_lr = min(min_lr, group["lr"])
max_lr = max(max_lr, group["lr"])
metric_logger.update(lr=max_lr)
metric_logger.update(min_lr=min_lr)
metric_logger.update(loss_var0=std_loss0)
# metric_logger.update(loss_var1=std_loss1)
weight_decay_value = None
for group in optimizer.param_groups:
if group["weight_decay"] > 0:
weight_decay_value = group["weight_decay"]
metric_logger.update(weight_decay=weight_decay_value)
metric_logger.update(grad_norm=grad_norm)
metric_logger.update(cur_decay=cur_decay)
if log_writer is not None:
log_writer.update(loss=loss_value, head="loss")
# log_writer.update(std_loss0=std_loss0.item(), head="std_loss0")
# log_writer.update(std_loss1=std_loss1.item(), head="std_loss1")
log_writer.update(loss_scale=loss_scale_value, head="opt")
log_writer.update(lr=max_lr, head="opt")
log_writer.update(min_lr=min_lr, head="opt")
log_writer.update(weight_decay=weight_decay_value, head="opt")
log_writer.update(grad_norm=grad_norm, head="opt")
log_writer.update(cur_decay=cur_decay, head="cur_decay")
log_writer.set_step()
if lr_scheduler is not None:
lr_scheduler.step_update(start_steps + step)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
| data2vec_vision-main | beit/engine_for_cyclical.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm, DINO and DeiT code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit
# https://github.com/facebookresearch/dino
# --------------------------------------------------------'
# Copyright (c) Meta Platforms, Inc. and affiliates
import argparse
import datetime
import numpy as np
import time
import torch
import torch.backends.cudnn as cudnn
import json
import os
from pathlib import Path
from timm.models import create_model
from timm.utils import ModelEmaV2
from optim_factory import create_optimizer
from datasets import build_beit_pretraining_dataset
from engine_for_cyclical import train_one_epoch
from utils import NativeScalerWithGradNormCount as NativeScaler
import utils
from scipy import interpolate
import modeling_cyclical
def get_args():
parser = argparse.ArgumentParser("BEiT pre-training script", add_help=False)
parser.add_argument("--batch_size", default=64, type=int)
parser.add_argument("--epochs", default=300, type=int)
parser.add_argument("--save_ckpt_freq", default=10, type=int)
# Model parameters
parser.add_argument(
"--model",
default="deit_base_patch16_224",
type=str,
metavar="MODEL",
help="Name of model to train",
)
parser.add_argument("--rel_pos_bias", action="store_true")
parser.add_argument(
"--disable_rel_pos_bias", action="store_false", dest="rel_pos_bias"
)
parser.set_defaults(rel_pos_bias=True)
parser.add_argument("--abs_pos_emb", action="store_true")
parser.set_defaults(abs_pos_emb=False)
parser.add_argument(
"--layer_scale_init_value",
default=0.1,
type=float,
help="0.1 for base, 1e-5 for large. set 0 to disable layer scale",
)
parser.add_argument(
"--num_mask_patches",
default=75,
type=int,
help="number of the visual tokens/patches need be masked",
)
parser.add_argument("--max_mask_patches_per_block", type=int, default=None)
parser.add_argument("--min_mask_patches_per_block", type=int, default=16)
parser.add_argument(
"--input_size", default=224, type=int, help="images input size for backbone"
)
parser.add_argument(
"--drop_path",
type=float,
default=0.1,
metavar="PCT",
help="Drop path rate (default: 0.1)",
)
# Optimizer parameters
parser.add_argument(
"--opt",
default="adamw",
type=str,
metavar="OPTIMIZER",
help='Optimizer (default: "adamw"',
)
parser.add_argument(
"--opt_eps",
default=1e-8,
type=float,
metavar="EPSILON",
help="Optimizer Epsilon (default: 1e-8)",
)
parser.add_argument(
"--opt_betas",
default=None,
type=float,
nargs="+",
metavar="BETA",
help="Optimizer Betas (default: None, use opt default)",
)
parser.add_argument(
"--clip_grad",
type=float,
default=None,
metavar="NORM",
help="Clip gradient norm (default: None, no clipping)",
)
parser.add_argument(
"--momentum",
type=float,
default=0.9,
metavar="M",
help="SGD momentum (default: 0.9)",
)
parser.add_argument(
"--weight_decay", type=float, default=0.05, help="weight decay (default: 0.05)"
)
parser.add_argument(
"--weight_decay_end",
type=float,
default=None,
help="""Final value of the
weight decay. We use a cosine schedule for WD.
(Set the same value with args.weight_decay to keep weight decay no change)""",
)
parser.add_argument(
"--lr",
type=float,
default=5e-4,
metavar="LR",
help="learning rate (default: 5e-4)",
)
parser.add_argument(
"--warmup_lr",
type=float,
default=1e-6,
metavar="LR",
help="warmup learning rate (default: 1e-6)",
)
parser.add_argument(
"--min_lr",
type=float,
default=1e-5,
metavar="LR",
help="lower lr bound for cyclic schedulers that hit 0 (1e-5)",
)
parser.add_argument(
"--tri_phase_schedule",
type=str,
default=None,
help="string containing a tuple with phase ratios for warmup and decay. e.g. '(0.05,0.15) means 5% warmup, 80% hold, 15% decay",
)
parser.add_argument(
"--warmup_epochs",
type=int,
default=5,
metavar="N",
help="epochs to warmup LR, if scheduler supports",
)
parser.add_argument(
"--warmup_steps",
type=int,
default=-1,
metavar="N",
help="epochs to warmup LR, if scheduler supports",
)
# Augmentation parameters
parser.add_argument(
"--color_jitter",
type=float,
default=0.4,
metavar="PCT",
help="Color jitter factor (default: 0.4)",
)
parser.add_argument(
"--train_interpolation",
type=str,
default="bicubic",
help='Training interpolation (random, bilinear, bicubic default: "bicubic")',
)
parser.add_argument("--aug_level", default=-1, type=int)
parser.add_argument(
"--target_layers", type=str, default="[]", help="target layers (python list)"
)
# Dataset parameters
parser.add_argument(
"--data_path",
default="/datasets01/imagenet_full_size/061417/",
type=str,
help="dataset path",
)
parser.add_argument(
"--imagenet_default_mean_and_std", default=False, action="store_true"
)
parser.add_argument(
"--output_dir", default="", help="path where to save, empty for no saving"
)
parser.add_argument("--log_dir", default=None, help="path where to tensorboard log")
parser.add_argument(
"--device", default="cuda", help="device to use for training / testing"
)
parser.add_argument("--seed", default=0, type=int)
parser.add_argument("--resume", default="", help="resume from checkpoint")
parser.add_argument("--auto_resume", action="store_true")
parser.add_argument("--no_auto_resume", action="store_false", dest="auto_resume")
parser.set_defaults(auto_resume=True)
parser.add_argument("--ema_decay_init", default=0.999, type=float)
parser.add_argument("--ema_decay", default=0.9998, type=float)
parser.add_argument("--ema_start_at", default=25000, type=int)
parser.add_argument(
"--start_epoch", default=0, type=int, metavar="N", help="start epoch"
)
parser.add_argument("--num_workers", default=10, type=int)
parser.add_argument(
"--pin_mem",
action="store_true",
help="Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.",
)
parser.add_argument("--no_pin_mem", action="store_false", dest="pin_mem", help="")
parser.set_defaults(pin_mem=True)
# distributed training parameters
parser.add_argument(
"--world_size", default=1, type=int, help="number of distributed processes"
)
parser.add_argument("--local_rank", default=-1, type=int)
parser.add_argument("--dist_on_itp", action="store_true")
parser.add_argument(
"--dist_url", default="env://", help="url used to set up distributed training"
)
parser.add_argument("--seed_model", default=None, type=str, help="seed model")
parser.add_argument("--model_key", default="model|module", type=str)
parser.add_argument("--model_prefix", default="", type=str)
parser.add_argument("--l2_loss", default=False, action="store_true")
parser.add_argument("--l1_beta", default=0.12, type=float)
parser.add_argument("--layer_results", default="end", type=str)
parser.add_argument("--var_w0", default=0., type=float)
parser.add_argument("--var_w1", default=0., type=float)
parser.add_argument("--var_margin0", default=0.5, type=float)
parser.add_argument("--var_margin1", default=0.5, type=float)
parser.add_argument("--skip_ema_during_lr_decay_for_tri", action="store_true")
parser.add_argument("--loss_scale", default=-1, type=float)
parser.add_argument("--ema_annealing_till_end", default=False, action="store_true")
parser.add_argument("--attn_drop_rate", default=0.0, type=float)
parser.add_argument("--mask_dropout_prob", default=-1.0, type=float, help="prob of flipping already masked position to unmasked")
#target_layer_norm_last=True, target_batch_norm=False, target_instance_norm=False
parser.add_argument("--no_target_layer_norm_last", default=False, action="store_true")
parser.add_argument("--target_batch_norm", default=False, action="store_true")
parser.add_argument("--target_instance_norm", default=False, action="store_true")
parser.add_argument("--post_target_instance_norm", default=False, action="store_true")
parser.add_argument("--post_target_layer_norm", default=False, action="store_true")
return parser.parse_args()
def get_model(args):
print(f"Creating model: {args.model}")
model = create_model(
args.model,
pretrained=False,
drop_path_rate=args.drop_path,
drop_block_rate=None,
use_shared_rel_pos_bias=args.rel_pos_bias,
use_abs_pos_emb=args.abs_pos_emb,
init_values=args.layer_scale_init_value,
attn_drop_rate=args.attn_drop_rate,
)
return model
def main(args):
utils.init_distributed_mode(args)
print(args)
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
# random.seed(seed)
cudnn.benchmark = True
model = get_model(args)
patch_size = model.patch_embed.patch_size
print("Patch size = %s" % str(patch_size))
args.window_size = (
args.input_size // patch_size[0],
args.input_size // patch_size[1],
)
args.patch_size = patch_size
if args.seed_model:
checkpoint = torch.load(args.seed_model, map_location="cpu")
print("Load ckpt from %s" % args.seed_model)
checkpoint_model = None
for model_key in args.model_key.split("|"):
if model_key in checkpoint:
checkpoint_model = checkpoint[model_key]
print("Load state_dict by model_key = %s" % model_key)
break
if checkpoint_model is None:
checkpoint_model = checkpoint
state_dict = model.state_dict()
for k in ["head.weight", "head.bias"]:
if (
k in checkpoint_model
and checkpoint_model[k].shape != state_dict[k].shape
):
print(f"Removing key {k} from pretrained checkpoint")
del checkpoint_model[k]
all_keys = list(checkpoint_model.keys())
for key in all_keys:
if "relative_position_index" in key:
checkpoint_model.pop(key)
if "relative_position_bias_table" in key:
rel_pos_bias = checkpoint_model[key]
src_num_pos, num_attn_heads = rel_pos_bias.size()
dst_num_pos, _ = model.state_dict()[key].size()
dst_patch_shape = model.patch_embed.patch_shape
if dst_patch_shape[0] != dst_patch_shape[1]:
raise NotImplementedError()
num_extra_tokens = dst_num_pos - (dst_patch_shape[0] * 2 - 1) * (
dst_patch_shape[1] * 2 - 1
)
src_size = int((src_num_pos - num_extra_tokens) ** 0.5)
dst_size = int((dst_num_pos - num_extra_tokens) ** 0.5)
if src_size != dst_size:
print(
"Position interpolate for %s from %dx%d to %dx%d"
% (key, src_size, src_size, dst_size, dst_size)
)
extra_tokens = rel_pos_bias[-num_extra_tokens:, :]
rel_pos_bias = rel_pos_bias[:-num_extra_tokens, :]
def geometric_progression(a, r, n):
return a * (1.0 - r ** n) / (1.0 - r)
left, right = 1.01, 1.5
while right - left > 1e-6:
q = (left + right) / 2.0
gp = geometric_progression(1, q, src_size // 2)
if gp > dst_size // 2:
right = q
else:
left = q
# if q > 1.090307:
# q = 1.090307
dis = []
cur = 1
for i in range(src_size // 2):
dis.append(cur)
cur += q ** (i + 1)
r_ids = [-_ for _ in reversed(dis)]
x = r_ids + [0] + dis
y = r_ids + [0] + dis
t = dst_size // 2.0
dx = np.arange(-t, t + 0.1, 1.0)
dy = np.arange(-t, t + 0.1, 1.0)
print("Original positions = %s" % str(x))
print("Target positions = %s" % str(dx))
all_rel_pos_bias = []
for i in range(num_attn_heads):
z = rel_pos_bias[:, i].view(src_size, src_size).float().numpy()
f = interpolate.interp2d(x, y, z, kind="cubic")
all_rel_pos_bias.append(
torch.Tensor(f(dx, dy))
.contiguous()
.view(-1, 1)
.to(rel_pos_bias.device)
)
rel_pos_bias = torch.cat(all_rel_pos_bias, dim=-1)
new_rel_pos_bias = torch.cat((rel_pos_bias, extra_tokens), dim=0)
checkpoint_model[key] = new_rel_pos_bias
# interpolate position embedding
if "pos_embed" in checkpoint_model:
pos_embed_checkpoint = checkpoint_model["pos_embed"]
embedding_size = pos_embed_checkpoint.shape[-1]
num_patches = model.patch_embed.num_patches
num_extra_tokens = model.pos_embed.shape[-2] - num_patches
# height (== width) for the checkpoint position embedding
orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
# height (== width) for the new position embedding
new_size = int(num_patches ** 0.5)
# class_token and dist_token are kept unchanged
if orig_size != new_size:
print(
"Position interpolate from %dx%d to %dx%d"
% (orig_size, orig_size, new_size, new_size)
)
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
# only the position tokens are interpolated
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
pos_tokens = pos_tokens.reshape(
-1, orig_size, orig_size, embedding_size
).permute(0, 3, 1, 2)
pos_tokens = torch.nn.functional.interpolate(
pos_tokens,
size=(new_size, new_size),
mode="bicubic",
align_corners=False,
)
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
checkpoint_model["pos_embed"] = new_pos_embed
utils.load_state_dict(model, checkpoint_model, prefix=args.model_prefix)
# get dataset
dataset_train = build_beit_pretraining_dataset(args)
if True: # args.distributed:
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
sampler_rank = global_rank
num_training_steps_per_epoch = (
len(dataset_train) // args.batch_size // num_tasks
)
print("pre-sampler", num_tasks, global_rank, sampler_rank)
sampler_train = torch.utils.data.DistributedSampler(
dataset_train, num_replicas=num_tasks, rank=sampler_rank, shuffle=True
)
print("Sampler_train = %s" % str(sampler_train))
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
if global_rank == 0 and args.log_dir is not None:
os.makedirs(args.log_dir, exist_ok=True)
log_writer = utils.TensorboardLogger(log_dir=args.log_dir)
else:
log_writer = None
data_loader_train = torch.utils.data.DataLoader(
dataset_train,
sampler=sampler_train,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=True,
)
model.to(device)
model_without_ddp = model
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("Model = %s" % str(model_without_ddp))
print("number of params:", n_parameters)
model_ema = ModelEmaV2(model, decay=args.ema_decay)
print("Using EMA with decay = %.8f" % args.ema_decay)
total_batch_size = args.batch_size * utils.get_world_size()
print("LR = %.8f" % args.lr)
print("Batch size = %d" % total_batch_size)
print("Number of training steps = %d" % num_training_steps_per_epoch)
print(
"Number of training examples per epoch = %d"
% (total_batch_size * num_training_steps_per_epoch)
)
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.gpu], find_unused_parameters=True
)
model_without_ddp = model.module
optimizer = create_optimizer(args, model_without_ddp)
loss_scaler = NativeScaler()
start_lr_decay_at_step = -1
if args.tri_phase_schedule is not None:
from ast import literal_eval
warmup_phase, decay_phase = literal_eval(args.tri_phase_schedule)
print("Use tri phase lr schedule!", warmup_phase, decay_phase)
lr_schedule_values = utils.tri_phase_scheduler(
args.lr,
args.min_lr,
args.epochs,
num_training_steps_per_epoch,
warmup_perc=warmup_phase,
decay_perc=decay_phase,
)
if args.skip_ema_during_lr_decay_for_tri:
start_lr_decay_at_step= (1-decay_phase)*args.epochs*num_training_steps_per_epoch
print("ema will be skipped after "+str(start_lr_decay_at_step)+" updates")
else:
print("Use step level LR & WD scheduler!")
lr_schedule_values = utils.cosine_scheduler(
args.lr,
args.min_lr,
args.epochs,
num_training_steps_per_epoch,
warmup_epochs=args.warmup_epochs,
warmup_steps=args.warmup_steps,
)
if args.weight_decay_end is None:
args.weight_decay_end = args.weight_decay
wd_schedule_values = utils.cosine_scheduler(
args.weight_decay,
args.weight_decay_end,
args.epochs,
num_training_steps_per_epoch,
)
print(
"Max WD = %.7f, Min WD = %.7f"
% (max(wd_schedule_values), min(wd_schedule_values))
)
utils.auto_load_model(
args=args,
model=model,
model_without_ddp=model_without_ddp,
optimizer=optimizer,
loss_scaler=loss_scaler,
model_ema=model_ema,
)
from ast import literal_eval
target_layers = literal_eval(args.target_layers)
assert len(target_layers) > 0
print(f"target layers: {target_layers}")
print(f"Start training for {args.epochs} epochs")
if args.ema_annealing_till_end:
args.ema_start_at = args.epochs * num_training_steps_per_epoch
print(f"EMA annealing till the end activated")
start_time = time.time()
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
data_loader_train.sampler.set_epoch(epoch)
if log_writer is not None:
log_writer.set_step(epoch * num_training_steps_per_epoch)
train_stats = train_one_epoch(
model,
model_ema,
args.ema_start_at,
args.ema_decay_init,
args.ema_decay,
target_layers,
data_loader_train,
optimizer,
device,
epoch,
loss_scaler,
args.clip_grad,
l1_beta=args.l1_beta,
log_writer=log_writer,
start_steps=epoch * num_training_steps_per_epoch,
lr_schedule_values=lr_schedule_values,
wd_schedule_values=wd_schedule_values,
l2_loss=args.l2_loss,
layer_results=args.layer_results,
var_w0=args.var_w0, var_w1=args.var_w1,
var_margin0=args.var_margin0, var_margin1=args.var_margin1,
start_lr_decay_at_step=start_lr_decay_at_step,
loss_scale=args.loss_scale,
mask_dropout_prob=args.mask_dropout_prob,
target_layer_norm_last=not args.no_target_layer_norm_last, target_batch_norm=args.target_batch_norm, target_instance_norm=args.target_instance_norm,
post_target_instance_norm=args.post_target_instance_norm,
post_target_layer_norm=args.post_target_layer_norm
)
if args.output_dir:
if (epoch + 1) % args.save_ckpt_freq == 0 or epoch + 1 == args.epochs:
utils.save_model(
args=args,
model=model,
model_without_ddp=model_without_ddp,
optimizer=optimizer,
loss_scaler=loss_scaler,
epoch=epoch,
model_ema=model_ema,
)
log_stats = {
**{f"train_{k}": v for k, v in train_stats.items()},
"epoch": epoch,
"n_parameters": n_parameters,
}
if args.output_dir and utils.is_main_process():
if log_writer is not None:
log_writer.flush()
with open(
os.path.join(args.output_dir, "log.txt"), mode="a", encoding="utf-8"
) as f:
f.write(json.dumps(log_stats) + "\n")
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print("Training time {}".format(total_time_str))
if __name__ == "__main__":
opts = get_args()
if opts.output_dir:
Path(opts.output_dir).mkdir(parents=True, exist_ok=True)
main(opts)
| data2vec_vision-main | beit/run_cyclical.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm, DINO and DeiT code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit
# https://github.com/facebookresearch/dino
# --------------------------------------------------------'
# Copyright (c) Meta Platforms, Inc. and affiliates
import argparse
import datetime
import numpy as np
import time
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import json
import os
from pathlib import Path
from timm.data.mixup import Mixup
from timm.models import create_model
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from timm.utils import ModelEma
from optim_factory import create_optimizer, get_parameter_groups, LayerDecayValueAssigner
from datasets import build_dataset
from engine_for_finetuning import train_one_epoch, evaluate
from utils import NativeScalerWithGradNormCount as NativeScaler
import utils
from scipy import interpolate
import modeling_finetune
def get_args():
parser = argparse.ArgumentParser('BEiT fine-tuning and evaluation script for image classification', add_help=False)
parser.add_argument('--batch_size', default=64, type=int)
parser.add_argument('--epochs', default=30, type=int)
parser.add_argument('--update_freq', default=1, type=int)
parser.add_argument('--save_ckpt_freq', default=5, type=int)
# Model parameters
parser.add_argument('--model', default='deit_base_patch16_224', type=str, metavar='MODEL',
help='Name of model to train')
parser.add_argument('--rel_pos_bias', action='store_true')
parser.add_argument('--disable_rel_pos_bias', action='store_false', dest='rel_pos_bias')
parser.set_defaults(rel_pos_bias=True)
parser.add_argument('--abs_pos_emb', action='store_true')
parser.set_defaults(abs_pos_emb=False)
parser.add_argument('--layer_scale_init_value', default=0.1, type=float,
help="0.1 for base, 1e-5 for large. set 0 to disable layer scale")
parser.add_argument('--input_size', default=224, type=int,
help='images input size')
parser.add_argument('--drop', type=float, default=0.0, metavar='PCT',
help='Dropout rate (default: 0.)')
parser.add_argument('--attn_drop_rate', type=float, default=0.0, metavar='PCT',
help='Attention dropout rate (default: 0.)')
parser.add_argument('--drop_path', type=float, default=0.1, metavar='PCT',
help='Drop path rate (default: 0.1)')
parser.add_argument('--disable_eval_during_finetuning', action='store_true', default=False)
parser.add_argument('--model_ema', action='store_true', default=False)
parser.add_argument('--model_ema_decay', type=float, default=0.9999, help='')
parser.add_argument('--model_ema_force_cpu', action='store_true', default=False, help='')
# Optimizer parameters
parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "adamw"')
parser.add_argument('--opt_eps', default=1e-8, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: 1e-8)')
parser.add_argument('--opt_betas', default=None, type=float, nargs='+', metavar='BETA',
help='Optimizer Betas (default: None, use opt default)')
parser.add_argument('--clip_grad', type=float, default=None, metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight_decay', type=float, default=0.05,
help='weight decay (default: 0.05)')
parser.add_argument('--weight_decay_end', type=float, default=None, help="""Final value of the
weight decay. We use a cosine schedule for WD and using a larger decay by
the end of training improves performance for ViTs.""")
parser.add_argument('--lr', type=float, default=5e-4, metavar='LR',
help='learning rate (default: 5e-4)')
parser.add_argument('--layer_decay', type=float, default=0.9)
parser.add_argument('--warmup_lr', type=float, default=1e-6, metavar='LR',
help='warmup learning rate (default: 1e-6)')
parser.add_argument('--min_lr', type=float, default=1e-6, metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
parser.add_argument('--warmup_epochs', type=int, default=5, metavar='N',
help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--warmup_steps', type=int, default=-1, metavar='N',
help='num of steps to warmup LR, will overload warmup_epochs if set > 0')
# Augmentation parameters
parser.add_argument('--color_jitter', type=float, default=0.4, metavar='PCT',
help='Color jitter factor (default: 0.4)')
parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". " + "(default: rand-m9-mstd0.5-inc1)'),
parser.add_argument('--smoothing', type=float, default=0.1,
help='Label smoothing (default: 0.1)')
parser.add_argument('--train_interpolation', type=str, default='bicubic',
help='Training interpolation (random, bilinear, bicubic default: "bicubic")')
# Evaluation parameters
parser.add_argument('--crop_pct', type=float, default=None)
# * Random Erase params
parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT',
help='Random erase prob (default: 0.25)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False,
help='Do not random erase first (clean) augmentation split')
# * Mixup params
parser.add_argument('--mixup', type=float, default=0,
help='mixup alpha, mixup enabled if > 0.')
parser.add_argument('--cutmix', type=float, default=0,
help='cutmix alpha, cutmix enabled if > 0.')
parser.add_argument('--cutmix_minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup_prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup_switch_prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup_mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
# * Finetuning params
parser.add_argument('--finetune', default='',
help='finetune from checkpoint')
parser.add_argument('--model_key', default='model|module', type=str)
parser.add_argument('--model_prefix', default='', type=str)
parser.add_argument('--init_scale', default=0.001, type=float)
parser.add_argument('--use_mean_pooling', action='store_true')
parser.set_defaults(use_mean_pooling=True)
parser.add_argument('--use_cls', action='store_false', dest='use_mean_pooling')
parser.add_argument('--disable_weight_decay_on_rel_pos_bias', action='store_true', default=False)
parser.add_argument('--target_layer', default=-1, type=int, help="target output layer (0-based)")
parser.add_argument('--remove_final_norm', action='store_true', dest='remove_final_norm')
parser.add_argument('--reinit_final_norm', action='store_true', dest='reinit_final_norm')
parser.add_argument('--learn_layer_weights', action='store_true', dest='learn_layer_weights') # supersede `target_layer`
parser.add_argument('--layernorm_before_combine', action='store_true', dest='layernorm_before_combine')
# Dataset parameters
parser.add_argument('--data_path', default='/datasets01/imagenet_full_size/061417/', type=str,
help='dataset path')
parser.add_argument('--eval_data_path', default=None, type=str,
help='dataset path for evaluation')
parser.add_argument('--nb_classes', default=0, type=int,
help='number of the classification types')
parser.add_argument('--linear_classifier', action='store_true',
help='linear classifier')
parser.add_argument('--imagenet_default_mean_and_std', default=False, action='store_true')
parser.add_argument('--data_set', default='IMNET', choices=['CIFAR', 'IMNET', 'image_folder'],
type=str, help='ImageNet dataset path')
parser.add_argument('--data_set_filter_file', type=str, default=None, help="path to filter to filter dataset")
parser.add_argument('--output_dir', default='',
help='path where to save, empty for no saving')
parser.add_argument('--log_dir', default=None,
help='path where to tensorboard log')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default='',
help='resume from checkpoint')
parser.add_argument('--auto_resume', action='store_true')
parser.add_argument('--no_auto_resume', action='store_false', dest='auto_resume')
parser.set_defaults(auto_resume=True)
parser.add_argument('--save_ckpt', action='store_true')
parser.add_argument('--no_save_ckpt', action='store_false', dest='save_ckpt')
parser.set_defaults(save_ckpt=True)
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--eval', action='store_true',
help='Perform evaluation only')
parser.add_argument('--dist_eval', action='store_true', default=False,
help='Enabling distributed evaluation')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--pin_mem', action='store_true',
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem')
parser.set_defaults(pin_mem=True)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--local_rank', default=-1, type=int)
parser.add_argument('--dist_on_itp', action='store_true')
parser.add_argument('--dist_url', default='env://',
help='url used to set up distributed training')
parser.add_argument('--enable_deepspeed', action='store_true', default=False)
parser.add_argument(
"--num_mask_patches",
default=0,
type=int,
help="number of the visual tokens/patches need be masked",
)
parser.add_argument("--max_mask_patches_per_block", type=int, default=None)
parser.add_argument("--min_mask_patches_per_block", type=int, default=16)
known_args, _ = parser.parse_known_args()
if known_args.enable_deepspeed:
try:
import deepspeed
from deepspeed import DeepSpeedConfig
parser = deepspeed.add_config_arguments(parser)
ds_init = deepspeed.initialize
except:
print("Please 'pip install deepspeed==0.4.0'")
exit(0)
else:
ds_init = None
return parser.parse_args(), ds_init
def main(args, ds_init):
utils.init_distributed_mode(args)
if ds_init is not None:
utils.create_ds_config(args)
print(args)
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
# random.seed(seed)
cudnn.benchmark = True
dataset_train, args.nb_classes = build_dataset(is_train=True, args=args)
if args.disable_eval_during_finetuning:
dataset_val = None
else:
dataset_val, _ = build_dataset(is_train=False, args=args)
if True: # args.distributed:
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
sampler_train = torch.utils.data.DistributedSampler(
dataset_train, num_replicas=num_tasks, rank=global_rank, shuffle=True
)
print("Sampler_train = %s" % str(sampler_train))
if args.dist_eval:
if len(dataset_val) % num_tasks != 0:
print('Warning: Enabling distributed evaluation with an eval dataset not divisible by process number. '
'This will slightly alter validation results as extra duplicate entries are added to achieve '
'equal num of samples per-process.')
sampler_val = torch.utils.data.DistributedSampler(
dataset_val, num_replicas=num_tasks, rank=global_rank, shuffle=False)
else:
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
if global_rank == 0 and args.log_dir is not None:
os.makedirs(args.log_dir, exist_ok=True)
log_writer = utils.TensorboardLogger(log_dir=args.log_dir)
else:
log_writer = None
data_loader_train = torch.utils.data.DataLoader(
dataset_train, sampler=sampler_train,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=True,
)
if dataset_val is not None:
data_loader_val = torch.utils.data.DataLoader(
dataset_val, sampler=sampler_val,
batch_size=int(1.5 * args.batch_size),
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=False
)
else:
data_loader_val = None
mixup_fn = None
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if mixup_active:
print("Mixup is activated!")
mixup_fn = Mixup(
mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax,
prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode,
label_smoothing=args.smoothing, num_classes=args.nb_classes)
model = create_model(
args.model,
pretrained=False,
num_classes=args.nb_classes,
drop_rate=args.drop,
drop_path_rate=args.drop_path,
attn_drop_rate=args.attn_drop_rate,
drop_block_rate=None,
use_mean_pooling=args.use_mean_pooling,
init_scale=args.init_scale,
use_rel_pos_bias=False,
use_shared_rel_pos_bias=args.rel_pos_bias,
use_abs_pos_emb=args.abs_pos_emb,
init_values=args.layer_scale_init_value,
linear_classifier=args.linear_classifier,
has_masking=args.num_mask_patches > 0,
learn_layer_weights=args.learn_layer_weights,
layernorm_before_combine=args.layernorm_before_combine,
)
patch_size = model.patch_embed.patch_size
print("Patch size = %s" % str(patch_size))
args.window_size = (args.input_size // patch_size[0], args.input_size // patch_size[1])
args.patch_size = patch_size
masked_position_generator = None
if args.num_mask_patches > 0:
from masking_generator import MaskingGenerator
masked_position_generator = MaskingGenerator(
args.window_size, num_masking_patches=args.num_mask_patches,
max_num_patches=args.max_mask_patches_per_block,
min_num_patches=args.min_mask_patches_per_block,
)
if args.finetune:
if args.finetune.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.finetune, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.finetune, map_location='cpu')
print("Load ckpt from %s" % args.finetune)
checkpoint_model = None
for model_key in args.model_key.split('|'):
if model_key in checkpoint:
checkpoint_model = checkpoint[model_key]
print("Load state_dict by model_key = %s" % model_key)
break
if checkpoint_model is None:
checkpoint_model = checkpoint
state_dict = model.state_dict()
for k in ['head.weight', 'head.bias']:
if k in checkpoint_model and checkpoint_model[k].shape != state_dict[k].shape:
print(f"Removing key {k} from pretrained checkpoint")
del checkpoint_model[k]
if args.reinit_final_norm:
for k in ['norm.weight', 'norm.bias', 'fc_norm.weight', 'fc_norm.bias']:
if k in checkpoint_model:
print(f"Removing key {k} from pretrained checkpoint")
del checkpoint_model[k]
if model.use_rel_pos_bias and "rel_pos_bias.relative_position_bias_table" in checkpoint_model:
print("Expand the shared relative position embedding to each transformer block. ")
num_layers = model.get_num_layers()
rel_pos_bias = checkpoint_model["rel_pos_bias.relative_position_bias_table"]
for i in range(num_layers):
checkpoint_model["blocks.%d.attn.relative_position_bias_table" % i] = rel_pos_bias.clone()
checkpoint_model.pop("rel_pos_bias.relative_position_bias_table")
all_keys = list(checkpoint_model.keys())
for key in all_keys:
if "relative_position_index" in key:
checkpoint_model.pop(key)
if "relative_position_bias_table" in key:
rel_pos_bias = checkpoint_model[key]
src_num_pos, num_attn_heads = rel_pos_bias.size()
dst_num_pos, _ = model.state_dict()[key].size()
dst_patch_shape = model.patch_embed.patch_shape
if dst_patch_shape[0] != dst_patch_shape[1]:
raise NotImplementedError()
num_extra_tokens = dst_num_pos - (dst_patch_shape[0] * 2 - 1) * (dst_patch_shape[1] * 2 - 1)
src_size = int((src_num_pos - num_extra_tokens) ** 0.5)
dst_size = int((dst_num_pos - num_extra_tokens) ** 0.5)
if src_size != dst_size:
print("Position interpolate for %s from %dx%d to %dx%d" % (
key, src_size, src_size, dst_size, dst_size))
extra_tokens = rel_pos_bias[-num_extra_tokens:, :]
rel_pos_bias = rel_pos_bias[:-num_extra_tokens, :]
def geometric_progression(a, r, n):
return a * (1.0 - r ** n) / (1.0 - r)
left, right = 1.01, 1.5
while right - left > 1e-6:
q = (left + right) / 2.0
gp = geometric_progression(1, q, src_size // 2)
if gp > dst_size // 2:
right = q
else:
left = q
# if q > 1.090307:
# q = 1.090307
dis = []
cur = 1
for i in range(src_size // 2):
dis.append(cur)
cur += q ** (i + 1)
r_ids = [-_ for _ in reversed(dis)]
x = r_ids + [0] + dis
y = r_ids + [0] + dis
t = dst_size // 2.0
dx = np.arange(-t, t + 0.1, 1.0)
dy = np.arange(-t, t + 0.1, 1.0)
print("Original positions = %s" % str(x))
print("Target positions = %s" % str(dx))
all_rel_pos_bias = []
for i in range(num_attn_heads):
z = rel_pos_bias[:, i].view(src_size, src_size).float().numpy()
f = interpolate.interp2d(x, y, z, kind='cubic')
all_rel_pos_bias.append(
torch.Tensor(f(dx, dy)).contiguous().view(-1, 1).to(rel_pos_bias.device))
rel_pos_bias = torch.cat(all_rel_pos_bias, dim=-1)
new_rel_pos_bias = torch.cat((rel_pos_bias, extra_tokens), dim=0)
checkpoint_model[key] = new_rel_pos_bias
# interpolate position embedding
if 'pos_embed' in checkpoint_model:
pos_embed_checkpoint = checkpoint_model['pos_embed']
embedding_size = pos_embed_checkpoint.shape[-1]
num_patches = model.patch_embed.num_patches
num_extra_tokens = model.pos_embed.shape[-2] - num_patches
# height (== width) for the checkpoint position embedding
orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
# height (== width) for the new position embedding
new_size = int(num_patches ** 0.5)
# class_token and dist_token are kept unchanged
if orig_size != new_size:
print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size))
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
# only the position tokens are interpolated
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
pos_tokens = torch.nn.functional.interpolate(
pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
checkpoint_model['pos_embed'] = new_pos_embed
if not args.learn_layer_weights and args.target_layer != -1:
print(f"model target layer is {args.target_layer}")
model.blocks = model.blocks[:args.target_layer+1]
if args.remove_final_norm:
print(f"removing final norm by replacing it with Identity")
model.norm = None if model.norm is None else nn.Identity()
model.fc_norm = None if model.fc_norm is None else nn.Identity()
if args.linear_classifier:
frozen_params = (
set(n for n, _ in model.named_parameters())
& set(checkpoint_model.keys())
)
for n, p in model.named_parameters():
if n in frozen_params:
p.requires_grad_(False)
param_names = [n for n, p in model.named_parameters() if p.requires_grad]
print(f"Trainable weights: {param_names}")
utils.load_state_dict(model, checkpoint_model, prefix=args.model_prefix)
# model.load_state_dict(checkpoint_model, strict=False)
model.to(device)
model_ema = None
if args.model_ema:
# Important to create EMA model after cuda(), DP wrapper, and AMP but before SyncBN and DDP wrapper
model_ema = ModelEma(
model,
decay=args.model_ema_decay,
device='cpu' if args.model_ema_force_cpu else '',
resume='')
print("Using EMA with decay = %.8f" % args.model_ema_decay)
model_without_ddp = model
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("Model = %s" % str(model_without_ddp))
print('number of params:', n_parameters)
total_batch_size = args.batch_size * args.update_freq * utils.get_world_size()
num_training_steps_per_epoch = len(dataset_train) // total_batch_size
print("LR = %.8f" % args.lr)
print("Batch size = %d" % total_batch_size)
print("Update frequent = %d" % args.update_freq)
print("Number of training examples = %d" % len(dataset_train))
print("Number of training training per epoch = %d" % num_training_steps_per_epoch)
num_layers = model_without_ddp.get_num_layers()
if args.layer_decay < 1.0:
assigner = LayerDecayValueAssigner(list(args.layer_decay ** (num_layers + 1 - i) for i in range(num_layers + 2)))
else:
assigner = None
if assigner is not None:
print("Assigned values = %s" % str(assigner.values))
skip_weight_decay_list = model.no_weight_decay()
if args.disable_weight_decay_on_rel_pos_bias:
for i in range(num_layers):
skip_weight_decay_list.add("blocks.%d.attn.relative_position_bias_table" % i)
if args.enable_deepspeed:
loss_scaler = None
optimizer_params = get_parameter_groups(
model, args.weight_decay, skip_weight_decay_list,
assigner.get_layer_id if assigner is not None else None,
assigner.get_scale if assigner is not None else None)
model, optimizer, _, _ = ds_init(
args=args, model=model, model_parameters=optimizer_params, dist_init_required=not args.distributed,
)
print("model.gradient_accumulation_steps() = %d" % model.gradient_accumulation_steps())
assert model.gradient_accumulation_steps() == args.update_freq
else:
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True)
model_without_ddp = model.module
optimizer = create_optimizer(
args, model_without_ddp, skip_list=skip_weight_decay_list,
get_num_layer=assigner.get_layer_id if assigner is not None else None,
get_layer_scale=assigner.get_scale if assigner is not None else None)
loss_scaler = NativeScaler()
print("Use step level LR scheduler!")
lr_schedule_values = utils.cosine_scheduler(
args.lr, args.min_lr, args.epochs, num_training_steps_per_epoch,
warmup_epochs=args.warmup_epochs, warmup_steps=args.warmup_steps,
)
if args.weight_decay_end is None:
args.weight_decay_end = args.weight_decay
wd_schedule_values = utils.cosine_scheduler(
args.weight_decay, args.weight_decay_end, args.epochs, num_training_steps_per_epoch)
print("Max WD = %.7f, Min WD = %.7f" % (max(wd_schedule_values), min(wd_schedule_values)))
if mixup_fn is not None:
# smoothing is handled with mixup label transform
criterion = SoftTargetCrossEntropy()
elif args.smoothing > 0.:
criterion = LabelSmoothingCrossEntropy(smoothing=args.smoothing)
else:
criterion = torch.nn.CrossEntropyLoss()
print("criterion = %s" % str(criterion))
utils.auto_load_model(
args=args, model=model, model_without_ddp=model_without_ddp,
optimizer=optimizer, loss_scaler=loss_scaler, model_ema=model_ema)
if args.eval:
test_stats = evaluate(data_loader_val, model, device)
print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
exit(0)
print(f"Start training for {args.epochs} epochs")
start_time = time.time()
max_accuracy = 0.0
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
data_loader_train.sampler.set_epoch(epoch)
if log_writer is not None:
log_writer.set_step(epoch * num_training_steps_per_epoch * args.update_freq)
train_stats = train_one_epoch(
model, criterion, data_loader_train, optimizer,
device, epoch, loss_scaler, args.clip_grad, model_ema, mixup_fn,
log_writer=log_writer, start_steps=epoch * num_training_steps_per_epoch,
lr_schedule_values=lr_schedule_values, wd_schedule_values=wd_schedule_values,
num_training_steps_per_epoch=num_training_steps_per_epoch, update_freq=args.update_freq,
masked_position_generator=masked_position_generator,
)
if args.output_dir and args.save_ckpt:
if (epoch + 1) % args.save_ckpt_freq == 0 or epoch + 1 == args.epochs:
utils.save_model(
args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer,
loss_scaler=loss_scaler, epoch=epoch, model_ema=model_ema)
if data_loader_val is not None:
test_stats = evaluate(data_loader_val, model, device)
print(f"Accuracy of the network on the {len(dataset_val)} test images: {test_stats['acc1']:.1f}%")
if max_accuracy < test_stats["acc1"]:
max_accuracy = test_stats["acc1"]
if args.output_dir and args.save_ckpt:
utils.save_model(
args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer,
loss_scaler=loss_scaler, epoch="best", model_ema=model_ema)
print(f'Max accuracy: {max_accuracy:.2f}%')
if log_writer is not None:
log_writer.update(test_acc1=test_stats['acc1'], head="perf", step=epoch)
log_writer.update(test_acc5=test_stats['acc5'], head="perf", step=epoch)
log_writer.update(test_loss=test_stats['loss'], head="perf", step=epoch)
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
**{f'test_{k}': v for k, v in test_stats.items()},
'epoch': epoch,
'n_parameters': n_parameters}
else:
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
# **{f'test_{k}': v for k, v in test_stats.items()},
'epoch': epoch,
'n_parameters': n_parameters}
if args.output_dir and utils.is_main_process():
if log_writer is not None:
log_writer.flush()
with open(os.path.join(args.output_dir, "log.txt"), mode="a", encoding="utf-8") as f:
f.write(json.dumps(log_stats) + "\n")
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
if __name__ == '__main__':
opts, ds_init = get_args()
if opts.output_dir:
Path(opts.output_dir).mkdir(parents=True, exist_ok=True)
main(opts, ds_init)
| data2vec_vision-main | beit/run_class_finetuning.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Modified on torchvision code bases
# https://github.com/pytorch/vision
# --------------------------------------------------------'
# Copyright (c) Meta Platforms, Inc. and affiliates
from torchvision.datasets.vision import VisionDataset
from PIL import Image
import os
import os.path
import random
from typing import Any, Callable, cast, Dict, List, Optional, Tuple
def has_file_allowed_extension(filename: str, extensions: Tuple[str, ...]) -> bool:
"""Checks if a file is an allowed extension.
Args:
filename (string): path to a file
extensions (tuple of strings): extensions to consider (lowercase)
Returns:
bool: True if the filename ends with one of given extensions
"""
return filename.lower().endswith(extensions)
def is_image_file(filename: str) -> bool:
"""Checks if a file is an allowed image extension.
Args:
filename (string): path to a file
Returns:
bool: True if the filename ends with a known image extension
"""
return has_file_allowed_extension(filename, IMG_EXTENSIONS)
def make_dataset(
directory: str,
class_to_idx: Dict[str, int],
extensions: Optional[Tuple[str, ...]] = None,
is_valid_file: Optional[Callable[[str], bool]] = None,
) -> List[Tuple[str, int]]:
instances = []
directory = os.path.expanduser(directory)
both_none = extensions is None and is_valid_file is None
both_something = extensions is not None and is_valid_file is not None
if both_none or both_something:
raise ValueError("Both extensions and is_valid_file cannot be None or not None at the same time")
if extensions is not None:
def is_valid_file(x: str) -> bool:
return has_file_allowed_extension(x, cast(Tuple[str, ...], extensions))
is_valid_file = cast(Callable[[str], bool], is_valid_file)
for target_class in sorted(class_to_idx.keys()):
class_index = class_to_idx[target_class]
target_dir = os.path.join(directory, target_class)
if not os.path.isdir(target_dir):
continue
for root, _, fnames in sorted(os.walk(target_dir, followlinks=True)):
for fname in sorted(fnames):
path = os.path.join(root, fname)
if is_valid_file(path):
item = path, class_index
instances.append(item)
return instances
class DatasetFolder(VisionDataset):
"""A generic data loader where the samples are arranged in this way: ::
root/class_x/xxx.ext
root/class_x/xxy.ext
root/class_x/xxz.ext
root/class_y/123.ext
root/class_y/nsdf3.ext
root/class_y/asd932_.ext
Args:
root (string): Root directory path.
loader (callable): A function to load a sample given its path.
extensions (tuple[string]): A list of allowed extensions.
both extensions and is_valid_file should not be passed.
transform (callable, optional): A function/transform that takes in
a sample and returns a transformed version.
E.g, ``transforms.RandomCrop`` for images.
target_transform (callable, optional): A function/transform that takes
in the target and transforms it.
is_valid_file (callable, optional): A function that takes path of a file
and check if the file is a valid file (used to check of corrupt files)
both extensions and is_valid_file should not be passed.
Attributes:
classes (list): List of the class names sorted alphabetically.
class_to_idx (dict): Dict with items (class_name, class_index).
samples (list): List of (sample path, class_index) tuples
targets (list): The class_index value for each image in the dataset
"""
def __init__(
self,
root: str,
loader: Callable[[str], Any],
extensions: Optional[Tuple[str, ...]] = None,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
is_valid_file: Optional[Callable[[str], bool]] = None,
) -> None:
super(DatasetFolder, self).__init__(root, transform=transform,
target_transform=target_transform)
print("finding classes")
classes, class_to_idx = self._find_classes(self.root)
print("making dataset")
samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file)
if len(samples) == 0:
msg = "Found 0 files in subfolders of: {}\n".format(self.root)
if extensions is not None:
msg += "Supported extensions are: {}".format(",".join(extensions))
raise RuntimeError(msg)
self.loader = loader
self.extensions = extensions
self.classes = classes
self.class_to_idx = class_to_idx
self.samples = samples
self.targets = [s[1] for s in samples]
print("done initializing dataset folder")
def _find_classes(self, dir: str) -> Tuple[List[str], Dict[str, int]]:
"""
Finds the class folders in a dataset.
Args:
dir (string): Root directory path.
Returns:
tuple: (classes, class_to_idx) where classes are relative to (dir), and class_to_idx is a dictionary.
Ensures:
No class is a subdirectory of another.
"""
classes = [d.name for d in os.scandir(dir) if d.is_dir()]
classes.sort()
class_to_idx = {cls_name: i for i, cls_name in enumerate(classes)}
return classes, class_to_idx
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (sample, target) where target is class_index of the target class.
"""
while True:
try:
path, target = self.samples[index]
sample = self.loader(path)
break
except Exception as e:
print(e)
index = random.randint(0, len(self.samples) - 1)
if self.transform is not None:
sample = self.transform(sample)
if self.target_transform is not None:
target = self.target_transform(target)
return sample, target
def __len__(self) -> int:
return len(self.samples)
IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', '.tiff', '.webp')
def pil_loader(path: str) -> Image.Image:
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
# TODO: specify the return type
def accimage_loader(path: str) -> Any:
import accimage
try:
return accimage.Image(path)
except IOError:
# Potentially a decoding problem, fall back to PIL.Image
return pil_loader(path)
def default_loader(path: str) -> Any:
from torchvision import get_image_backend
from shutil import copyfile
import os
sp = path.split('/')
name = sp[-1]
base = '/'.join(sp[:-1])
image_cache_str = "image_cache6"
# if os.path.exists('/scratch/'+image_cache_str+'/') and not os.access('/scratch/'+image_cache_str+'/', os.R_OK):
# image_cache_str = "image_cache3"
if not os.path.isdir('/scratch/'+image_cache_str+'/' + base):
os.makedirs('/scratch/'+image_cache_str+'/'+ base)
if not os.path.exists('/scratch/'+image_cache_str+'/' + path):
copyfile(path, '/scratch/'+image_cache_str+'/' + path)
path = '/scratch/'+image_cache_str+'/' + path
#print('name', name)
#print('base', base)
#print('path', path)
if get_image_backend() == 'accimage':
return accimage_loader(path)
else:
return pil_loader(path)
class ImageFolder(DatasetFolder):
"""A generic data loader where the images are arranged in this way: ::
root/dog/xxx.png
root/dog/xxy.png
root/dog/xxz.png
root/cat/123.png
root/cat/nsdf3.png
root/cat/asd932_.png
Args:
root (string): Root directory path.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
loader (callable, optional): A function to load an image given its path.
is_valid_file (callable, optional): A function that takes path of an Image file
and check if the file is a valid file (used to check of corrupt files)
Attributes:
classes (list): List of the class names sorted alphabetically.
class_to_idx (dict): Dict with items (class_name, class_index).
imgs (list): List of (image path, class_index) tuples
"""
def __init__(
self,
root: str,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
loader: Callable[[str], Any] = default_loader,
is_valid_file: Optional[Callable[[str], bool]] = None,
filter: Optional[str] = None
):
super(ImageFolder, self).__init__(root, loader, IMG_EXTENSIONS if is_valid_file is None else None,
transform=transform,
target_transform=target_transform,
is_valid_file=is_valid_file)
self.imgs = self.samples
| data2vec_vision-main | beit/dataset_folder.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm, DINO and DeiT code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit
# https://github.com/facebookresearch/dino
# --------------------------------------------------------'
# Copyright (c) Meta Platforms, Inc. and affiliates
import argparse
import datetime
import numpy as np
import time
import torch
import torch.backends.cudnn as cudnn
import json
import os
from pathlib import Path
from timm.models import create_model
from optim_factory import create_optimizer
from datasets import build_beit_pretraining_dataset
from engine_for_pretraining import train_one_epoch
from utils import NativeScalerWithGradNormCount as NativeScaler
import utils
import modeling_pretrain
def get_args():
parser = argparse.ArgumentParser('BEiT pre-training script', add_help=False)
parser.add_argument('--batch_size', default=64, type=int)
parser.add_argument('--epochs', default=300, type=int)
parser.add_argument('--save_ckpt_freq', default=10, type=int)
parser.add_argument("--discrete_vae_weight_path", type=str)
parser.add_argument("--discrete_vae_type", type=str, default="dall-e")
# Model parameters
parser.add_argument('--model', default='deit_base_patch16_224', type=str, metavar='MODEL',
help='Name of model to train')
parser.add_argument('--rel_pos_bias', action='store_true')
parser.add_argument('--disable_rel_pos_bias', action='store_false', dest='rel_pos_bias')
parser.set_defaults(rel_pos_bias=True)
parser.add_argument('--abs_pos_emb', action='store_true')
parser.set_defaults(abs_pos_emb=False)
parser.add_argument('--layer_scale_init_value', default=0.1, type=float,
help="0.1 for base, 1e-5 for large. set 0 to disable layer scale")
parser.add_argument('--num_mask_patches', default=75, type=int,
help='number of the visual tokens/patches need be masked')
parser.add_argument('--max_mask_patches_per_block', type=int, default=None)
parser.add_argument('--min_mask_patches_per_block', type=int, default=16)
parser.add_argument('--input_size', default=224, type=int,
help='images input size for backbone')
parser.add_argument('--second_input_size', default=112, type=int,
help='images input size for discrete vae')
parser.add_argument('--drop_path', type=float, default=0.1, metavar='PCT',
help='Drop path rate (default: 0.1)')
# Optimizer parameters
parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "adamw"')
parser.add_argument('--opt_eps', default=1e-8, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: 1e-8)')
parser.add_argument('--opt_betas', default=None, type=float, nargs='+', metavar='BETA',
help='Optimizer Betas (default: None, use opt default)')
parser.add_argument('--clip_grad', type=float, default=None, metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight_decay', type=float, default=0.05,
help='weight decay (default: 0.05)')
parser.add_argument('--weight_decay_end', type=float, default=None, help="""Final value of the
weight decay. We use a cosine schedule for WD.
(Set the same value with args.weight_decay to keep weight decay no change)""")
parser.add_argument('--lr', type=float, default=5e-4, metavar='LR',
help='learning rate (default: 5e-4)')
parser.add_argument('--warmup_lr', type=float, default=1e-6, metavar='LR',
help='warmup learning rate (default: 1e-6)')
parser.add_argument('--min_lr', type=float, default=1e-5, metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0 (1e-5)')
parser.add_argument('--warmup_epochs', type=int, default=5, metavar='N',
help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--warmup_steps', type=int, default=-1, metavar='N',
help='epochs to warmup LR, if scheduler supports')
# Augmentation parameters
parser.add_argument('--color_jitter', type=float, default=0.4, metavar='PCT',
help='Color jitter factor (default: 0.4)')
parser.add_argument('--train_interpolation', type=str, default='bicubic',
help='Training interpolation (random, bilinear, bicubic default: "bicubic")')
parser.add_argument('--second_interpolation', type=str, default='lanczos',
help='Interpolation for discrete vae (random, bilinear, bicubic default: "lanczos")')
# Dataset parameters
parser.add_argument('--data_path', default='/datasets01/imagenet_full_size/061417/', type=str,
help='dataset path')
parser.add_argument('--imagenet_default_mean_and_std', default=False, action='store_true')
parser.add_argument('--output_dir', default='',
help='path where to save, empty for no saving')
parser.add_argument('--log_dir', default=None,
help='path where to tensorboard log')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--auto_resume', action='store_true')
parser.add_argument('--no_auto_resume', action='store_false', dest='auto_resume')
parser.set_defaults(auto_resume=True)
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--pin_mem', action='store_true',
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem',
help='')
parser.set_defaults(pin_mem=True)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--local_rank', default=-1, type=int)
parser.add_argument('--aug_level', default=-100, type=int)
parser.add_argument('--dist_on_itp', action='store_true')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
return parser.parse_args()
def get_model(args):
print(f"Creating model: {args.model}")
model = create_model(
args.model,
pretrained=False,
drop_path_rate=args.drop_path,
drop_block_rate=None,
use_shared_rel_pos_bias=args.rel_pos_bias,
use_abs_pos_emb=args.abs_pos_emb,
init_values=args.layer_scale_init_value,
)
return model
def main(args):
utils.init_distributed_mode(args)
print(args)
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
# random.seed(seed)
cudnn.benchmark = True
model = get_model(args)
patch_size = model.patch_embed.patch_size
print("Patch size = %s" % str(patch_size))
args.window_size = (args.input_size // patch_size[0], args.input_size // patch_size[1])
args.patch_size = patch_size
# get dataset
dataset_train = build_beit_pretraining_dataset(args)
# prepare discrete vae
d_vae = utils.create_d_vae(
weight_path=args.discrete_vae_weight_path, d_vae_type=args.discrete_vae_type,
device=device, image_size=args.second_input_size)
if True: # args.distributed:
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
sampler_rank = global_rank
num_training_steps_per_epoch = len(dataset_train) // args.batch_size // num_tasks
print("pre-sampler", num_tasks, global_rank, sampler_rank)
sampler_train = torch.utils.data.DistributedSampler(
dataset_train, num_replicas=num_tasks, rank=sampler_rank, shuffle=True
)
print("Sampler_train = %s" % str(sampler_train))
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
if global_rank == 0 and args.log_dir is not None:
os.makedirs(args.log_dir, exist_ok=True)
log_writer = utils.TensorboardLogger(log_dir=args.log_dir)
else:
log_writer = None
data_loader_train = torch.utils.data.DataLoader(
dataset_train, sampler=sampler_train,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=True,
)
model.to(device)
model_without_ddp = model
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("Model = %s" % str(model_without_ddp))
print('number of params:', n_parameters)
total_batch_size = args.batch_size * utils.get_world_size()
print("LR = %.8f" % args.lr)
print("Batch size = %d" % total_batch_size)
print("Number of training steps = %d" % num_training_steps_per_epoch)
print("Number of training examples per epoch = %d" % (total_batch_size * num_training_steps_per_epoch))
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu], find_unused_parameters=True)
model_without_ddp = model.module
optimizer = create_optimizer(
args, model_without_ddp)
loss_scaler = NativeScaler()
print("Use step level LR & WD scheduler!")
lr_schedule_values = utils.cosine_scheduler(
args.lr, args.min_lr, args.epochs, num_training_steps_per_epoch,
warmup_epochs=args.warmup_epochs, warmup_steps=args.warmup_steps,
)
if args.weight_decay_end is None:
args.weight_decay_end = args.weight_decay
wd_schedule_values = utils.cosine_scheduler(
args.weight_decay, args.weight_decay_end, args.epochs, num_training_steps_per_epoch)
print("Max WD = %.7f, Min WD = %.7f" % (max(wd_schedule_values), min(wd_schedule_values)))
utils.auto_load_model(
args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer, loss_scaler=loss_scaler)
print(f"Start training for {args.epochs} epochs")
start_time = time.time()
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
data_loader_train.sampler.set_epoch(epoch)
if log_writer is not None:
log_writer.set_step(epoch * num_training_steps_per_epoch)
train_stats = train_one_epoch(
model, d_vae, data_loader_train,
optimizer, device, epoch, loss_scaler,
args.clip_grad, log_writer=log_writer,
start_steps=epoch * num_training_steps_per_epoch,
lr_schedule_values=lr_schedule_values,
wd_schedule_values=wd_schedule_values,
)
if args.output_dir:
if (epoch + 1) % args.save_ckpt_freq == 0 or epoch + 1 == args.epochs:
utils.save_model(
args=args, model=model, model_without_ddp=model_without_ddp, optimizer=optimizer,
loss_scaler=loss_scaler, epoch=epoch)
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
'epoch': epoch, 'n_parameters': n_parameters}
if args.output_dir and utils.is_main_process():
if log_writer is not None:
log_writer.flush()
with open(os.path.join(args.output_dir, "log.txt"), mode="a", encoding="utf-8") as f:
f.write(json.dumps(log_stats) + "\n")
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
if __name__ == '__main__':
opts = get_args()
if opts.output_dir:
Path(opts.output_dir).mkdir(parents=True, exist_ok=True)
main(opts)
| data2vec_vision-main | beit/run_beit_pretraining.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm, DINO and DeiT code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit
# https://github.com/facebookresearch/dino
# --------------------------------------------------------'
# Copyright (c) Meta Platforms, Inc. and affiliates
import io
import os
import math
import time
import json
from collections import defaultdict, deque
import datetime
import numpy as np
from timm.utils import get_state_dict
from pathlib import Path
import torch
import torch.distributed as dist
from torch._six import inf
from modeling_discrete_vae import Dalle_VAE, DiscreteVAE
from tensorboardX import SummaryWriter
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value)
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if v is None:
continue
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {}".format(name, str(meter))
)
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}')
space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
log_msg = [
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}'
]
if torch.cuda.is_available():
log_msg.append('max mem: {memory:.0f}')
log_msg = self.delimiter.join(log_msg)
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == len(iterable) - 1:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB))
else:
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time)))
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {} ({:.4f} s / it)'.format(
header, total_time_str, total_time / len(iterable)))
class TensorboardLogger(object):
def __init__(self, log_dir):
self.writer = SummaryWriter(logdir=log_dir)
self.step = 0
def set_step(self, step=None):
if step is not None:
self.step = step
else:
self.step += 1
def update(self, head='scalar', step=None, **kwargs):
for k, v in kwargs.items():
if v is None:
continue
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.writer.add_scalar(head + "/" + k, v, self.step if step is None else step)
def flush(self):
self.writer.flush()
def _load_checkpoint_for_ema(model_ema, checkpoint):
"""
Workaround for ModelEma._load_checkpoint to accept an already-loaded object
"""
if hasattr(model_ema, "module"):
model_ema.module.load_state_dict(checkpoint['model_ema'])
else:
mem_file = io.BytesIO()
torch.save(checkpoint, mem_file)
mem_file.seek(0)
model_ema._load_checkpoint(mem_file)
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def init_distributed_mode(args):
if args.dist_on_itp:
args.rank = int(os.environ['OMPI_COMM_WORLD_RANK'])
args.world_size = int(os.environ['OMPI_COMM_WORLD_SIZE'])
args.gpu = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK'])
args.dist_url = "tcp://%s:%s" % (os.environ['MASTER_ADDR'], os.environ['MASTER_PORT'])
os.environ['LOCAL_RANK'] = str(args.gpu)
os.environ['RANK'] = str(args.rank)
os.environ['WORLD_SIZE'] = str(args.world_size)
# ["RANK", "WORLD_SIZE", "MASTER_ADDR", "MASTER_PORT", "LOCAL_RANK"]
elif 'RANK' in os.environ and 'WORLD_SIZE' in os.environ and 'SLURM_NODEID' not in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
elif 'RANK' in os.environ and 'WORLD_SIZE' in os.environ and 'SLURM_NODEID' in os.environ:
# args.rank = int(os.environ["RANK"])
# print(os.environ)
gpus_per_node = torch.cuda.device_count()
node_id = int(os.environ.get("SLURM_NODEID"))
args.rank = int(os.environ["RANK"]) + node_id * gpus_per_node
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
elif 'SLURM_PROCID' in os.environ:
os.environ['RANK'] = os.environ['SLURM_PROCID']
args.rank = int(os.environ['SLURM_PROCID'])
os.environ['LOCAL_RANK'] = str(args.rank % torch.cuda.device_count())
args.gpu = args.rank % torch.cuda.device_count()
print("utils.py SLURM_PROCID in os.environ")
print("args.rank "+str(args.rank))
print("args.gpu "+str(args.gpu))
print("args.world_size "+str(args.world_size))
print("SLURM_NTASKS "+str(os.environ['SLURM_NTASKS']))
assert int(args.world_size) == int(os.environ['SLURM_NTASKS'])
os.environ['WORLD_SIZE'] = str(args.world_size)
else:
print('Not using distributed mode')
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}, gpu {}, world_size {}'.format(
args.rank, args.dist_url, args.gpu, args.world_size), flush=True)
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
# torch.distributed.init_process_group(backend=args.dist_backend, init_method='env://')
setup_for_distributed(args.rank == 0)
def load_state_dict(model, state_dict, prefix='', ignore_missing="relative_position_index"):
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(
prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(model, prefix=prefix)
warn_missing_keys = []
ignore_missing_keys = []
for key in missing_keys:
keep_flag = True
for ignore_key in ignore_missing.split('|'):
if ignore_key in key:
keep_flag = False
break
if keep_flag:
warn_missing_keys.append(key)
else:
ignore_missing_keys.append(key)
missing_keys = warn_missing_keys
if len(missing_keys) > 0:
print("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
print("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(ignore_missing_keys) > 0:
print("Ignored weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, ignore_missing_keys))
if len(error_msgs) > 0:
print('\n'.join(error_msgs))
class NativeScalerWithGradNormCount:
state_dict_key = "amp_scaler"
def __init__(self):
self._scaler = torch.cuda.amp.GradScaler()
def __call__(self, loss, optimizer, clip_grad=None, parameters=None, create_graph=False, update_grad=True):
self._scaler.scale(loss).backward(create_graph=create_graph)
if update_grad:
if clip_grad is not None:
assert parameters is not None
self._scaler.unscale_(optimizer) # unscale the gradients of optimizer's assigned params in-place
norm = torch.nn.utils.clip_grad_norm_(parameters, clip_grad)
else:
self._scaler.unscale_(optimizer)
norm = get_grad_norm_(parameters)
self._scaler.step(optimizer)
self._scaler.update()
else:
norm = None
return norm
def state_dict(self):
return self._scaler.state_dict()
def load_state_dict(self, state_dict):
self._scaler.load_state_dict(state_dict)
def get_grad_norm_(parameters, norm_type: float = 2.0) -> torch.Tensor:
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = [p for p in parameters if p.grad is not None]
norm_type = float(norm_type)
if len(parameters) == 0:
return torch.tensor(0.)
device = parameters[0].grad.device
if norm_type == inf:
total_norm = max(p.grad.detach().abs().max().to(device) for p in parameters)
else:
total_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), norm_type).to(device) for p in parameters]), norm_type)
return total_norm
def cosine_scheduler(base_value, final_value, epochs, niter_per_ep, warmup_epochs=0,
start_warmup_value=0, warmup_steps=-1):
warmup_schedule = np.array([])
warmup_iters = warmup_epochs * niter_per_ep
if warmup_steps > 0:
warmup_iters = warmup_steps
print("Set warmup steps = %d" % warmup_iters)
if warmup_epochs > 0:
warmup_schedule = np.linspace(start_warmup_value, base_value, warmup_iters)
iters = np.arange(epochs * niter_per_ep - warmup_iters)
schedule = np.array(
[final_value + 0.5 * (base_value - final_value) * (1 + math.cos(math.pi * i / (len(iters)))) for i in iters])
schedule = np.concatenate((warmup_schedule, schedule))
assert len(schedule) == epochs * niter_per_ep
return schedule
def tri_phase_scheduler(base_value, final_value, epochs, niter_per_ep, warmup_perc=0.05, decay_perc=0.05,
start_warmup_value=0):
assert warmup_perc + decay_perc <= 1
total_updates = int(epochs * niter_per_ep)
warmup_iters = int(warmup_perc * total_updates)
decay_iters = int(decay_perc * total_updates)
hold_iters = total_updates - warmup_iters - decay_iters
print("Set warmup steps = %d" % warmup_iters)
if warmup_iters > 0:
warmup_schedule = np.linspace(start_warmup_value, base_value, warmup_iters)
else:
warmup_schedule = np.array([])
if hold_iters > 0:
hold_schedule = np.full(hold_iters, base_value)
else:
hold_schedule = np.array([])
if decay_iters > 0:
decay_schedule = np.linspace(base_value, final_value, decay_iters)
else:
decay_schedule = np.array([])
schedule = np.concatenate((warmup_schedule, hold_schedule, decay_schedule))
assert len(schedule) == epochs * niter_per_ep, \
f"e: {epochs}, it: {niter_per_ep}, tot: {epochs*niter_per_ep}, " \
f"w: {warmup_iters}, h: {hold_iters}, d: {decay_iters}, len: {len(schedule)}"
return schedule
def save_model(args, epoch, model, model_without_ddp, optimizer, loss_scaler, model_ema=None):
output_dir = Path(args.output_dir)
epoch_name = str(epoch)
if loss_scaler is not None:
checkpoint_paths = [output_dir / ('checkpoint-%s.pth' % epoch_name)]
for checkpoint_path in checkpoint_paths:
to_save = {
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'epoch': epoch,
'scaler': loss_scaler.state_dict(),
'args': args,
}
if model_ema is not None:
to_save['model_ema'] = get_state_dict(model_ema)
save_on_master(to_save, checkpoint_path)
else:
client_state = {'epoch': epoch}
if model_ema is not None:
client_state['model_ema'] = get_state_dict(model_ema)
model.save_checkpoint(save_dir=args.output_dir, tag="checkpoint-%s" % epoch_name, client_state=client_state)
def auto_load_model(args, model, model_without_ddp, optimizer, loss_scaler, model_ema=None):
output_dir = Path(args.output_dir)
if loss_scaler is not None:
# torch.amp
if args.auto_resume and len(args.resume) == 0:
import glob
all_checkpoints = glob.glob(os.path.join(glob.escape(output_dir), 'checkpoint-*.pth'))
latest_ckpt = -1
for ckpt in all_checkpoints:
t = ckpt.split('-')[-1].split('.')[0]
if t.isdigit():
latest_ckpt = max(int(t), latest_ckpt)
print(output_dir, latest_ckpt, all_checkpoints)
if latest_ckpt >= 0:
args.resume = os.path.join(output_dir, 'checkpoint-%d.pth' % latest_ckpt)
print("Auto resume checkpoint: %s" % args.resume)
if args.resume:
if args.resume.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.resume, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.resume, map_location='cpu')
model_without_ddp.load_state_dict(checkpoint['model'])
print("Resume checkpoint %s" % args.resume)
if 'optimizer' in checkpoint and 'epoch' in checkpoint and not getattr(args, "reset_resume", False): # and len(getattr(args, "seed_model", '') or []) == 0:
optimizer.load_state_dict(checkpoint['optimizer'])
args.start_epoch = checkpoint['epoch'] + 1
if hasattr(args, 'model_ema') and args.model_ema:
_load_checkpoint_for_ema(model_ema, checkpoint['model_ema'])
if 'scaler' in checkpoint:
loss_scaler.load_state_dict(checkpoint['scaler'])
print("With optim & sched!")
else:
# deepspeed, only support '--auto_resume'.
if args.auto_resume:
import glob
all_checkpoints = glob.glob(os.path.join(output_dir, 'checkpoint-*'))
latest_ckpt = -1
for ckpt in all_checkpoints:
t = ckpt.split('-')[-1].split('.')[0]
if t.isdigit():
latest_ckpt = max(int(t), latest_ckpt)
if latest_ckpt >= 0:
args.resume = os.path.join(output_dir, 'checkpoint-%d' % latest_ckpt)
print("Auto resume checkpoint: %d" % latest_ckpt)
_, client_states = model.load_checkpoint(args.output_dir, tag='checkpoint-%d' % latest_ckpt)
args.start_epoch = client_states['epoch'] + 1
if model_ema is not None:
if args.model_ema:
_load_checkpoint_for_ema(model_ema, client_states['model_ema'])
def create_d_vae(weight_path, d_vae_type, image_size, device):
if d_vae_type == "dall-e":
return get_dalle_vae(weight_path, image_size, device)
elif d_vae_type == "customized":
return get_d_vae(weight_path, image_size, device)
else:
raise NotImplementedError()
def get_dalle_vae(weight_path, image_size, device):
vae = Dalle_VAE(image_size)
vae.load_model(model_dir=weight_path, device=device)
return vae
def get_d_vae(weight_path, image_size, device):
NUM_TOKENS = 8192
NUM_LAYERS = 3
EMB_DIM = 512
HID_DIM = 256
state_dict = torch.load(os.path.join(weight_path, "pytorch_model.bin"), map_location="cpu")["weights"]
model = DiscreteVAE(
image_size=image_size,
num_layers=NUM_LAYERS,
num_tokens=NUM_TOKENS,
codebook_dim=EMB_DIM,
hidden_dim=HID_DIM,
).to(device)
model.load_state_dict(state_dict)
return model
def create_ds_config(args):
args.deepspeed_config = os.path.join(args.output_dir, "deepspeed_config.json")
with open(args.deepspeed_config, mode="w") as writer:
ds_config = {
"train_batch_size": args.batch_size * args.update_freq * get_world_size(),
"train_micro_batch_size_per_gpu": args.batch_size,
"steps_per_print": 1000,
"optimizer": {
"type": "Adam",
"adam_w_mode": True,
"params": {
"lr": args.lr,
"weight_decay": args.weight_decay,
"bias_correction": True,
"betas": [
0.9,
0.999
],
"eps": 1e-8
}
},
"fp16": {
"enabled": True,
"loss_scale": 0,
"initial_scale_power": 7,
"loss_scale_window": 128
}
}
writer.write(json.dumps(ds_config, indent=2))
| data2vec_vision-main | beit/utils.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm, DINO and DeiT code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit
# https://github.com/facebookresearch/dino
# --------------------------------------------------------'
# Copyright (c) Meta Platforms, Inc. and affiliates
import argparse
import datetime
import numpy as np
import time
import torch
import torch.backends.cudnn as cudnn
import json
import os
from pathlib import Path
from timm.models import create_model
from timm.utils import ModelEmaV2
from optim_factory import create_optimizer
from datasets import build_beit_pretraining_dataset
from engine_for_cyclical_joint import train_one_epoch
from utils import NativeScalerWithGradNormCount as NativeScaler
import utils
from scipy import interpolate
import modeling_cyclical_joint
def get_args():
parser = argparse.ArgumentParser("BEiT pre-training script", add_help=False)
parser.add_argument("--batch_size", default=64, type=int)
parser.add_argument("--epochs", default=300, type=int)
parser.add_argument("--save_ckpt_freq", default=10, type=int)
# Model parameters
parser.add_argument(
"--model",
default="deit_base_patch16_224",
type=str,
metavar="MODEL",
help="Name of model to train",
)
parser.add_argument("--rel_pos_bias", action="store_true")
parser.add_argument(
"--disable_rel_pos_bias", action="store_false", dest="rel_pos_bias"
)
parser.set_defaults(rel_pos_bias=True)
parser.add_argument("--abs_pos_emb", action="store_true")
parser.set_defaults(abs_pos_emb=False)
parser.add_argument(
"--layer_scale_init_value",
default=0.1,
type=float,
help="0.1 for base, 1e-5 for large. set 0 to disable layer scale",
)
parser.add_argument(
"--num_mask_patches",
default=75,
type=int,
help="number of the visual tokens/patches need be masked",
)
parser.add_argument("--max_mask_patches_per_block", type=int, default=None)
parser.add_argument("--min_mask_patches_per_block", type=int, default=16)
parser.add_argument(
"--input_size", default=224, type=int, help="images input size for backbone"
)
# added for vae
parser.add_argument('--second_input_size', default=112, type=int,
help='images input size for discrete vae')
parser.add_argument('--second_interpolation', type=str, default='lanczos',
help='Interpolation for discrete vae (random, bilinear, bicubic default: "lanczos")')
parser.add_argument("--discrete_vae_weight_path", type=str)
parser.add_argument("--discrete_vae_type", type=str, default="dall-e")
parser.add_argument("--vae_loss_weight", default=1., type=float)
parser.add_argument(
"--drop_path",
type=float,
default=0.1,
metavar="PCT",
help="Drop path rate (default: 0.1)",
)
# Optimizer parameters
parser.add_argument(
"--opt",
default="adamw",
type=str,
metavar="OPTIMIZER",
help='Optimizer (default: "adamw"',
)
parser.add_argument(
"--opt_eps",
default=1e-8,
type=float,
metavar="EPSILON",
help="Optimizer Epsilon (default: 1e-8)",
)
parser.add_argument(
"--opt_betas",
default=None,
type=float,
nargs="+",
metavar="BETA",
help="Optimizer Betas (default: None, use opt default)",
)
parser.add_argument(
"--clip_grad",
type=float,
default=None,
metavar="NORM",
help="Clip gradient norm (default: None, no clipping)",
)
parser.add_argument(
"--momentum",
type=float,
default=0.9,
metavar="M",
help="SGD momentum (default: 0.9)",
)
parser.add_argument(
"--weight_decay", type=float, default=0.05, help="weight decay (default: 0.05)"
)
parser.add_argument(
"--weight_decay_end",
type=float,
default=None,
help="""Final value of the
weight decay. We use a cosine schedule for WD.
(Set the same value with args.weight_decay to keep weight decay no change)""",
)
parser.add_argument(
"--lr",
type=float,
default=5e-4,
metavar="LR",
help="learning rate (default: 5e-4)",
)
parser.add_argument(
"--warmup_lr",
type=float,
default=1e-6,
metavar="LR",
help="warmup learning rate (default: 1e-6)",
)
parser.add_argument(
"--min_lr",
type=float,
default=1e-5,
metavar="LR",
help="lower lr bound for cyclic schedulers that hit 0 (1e-5)",
)
parser.add_argument(
"--tri_phase_schedule",
type=str,
default=None,
help="string containing a tuple with phase ratios for warmup and decay. e.g. '(0.05,0.15) means 5% warmup, 80% hold, 15% decay",
)
parser.add_argument(
"--warmup_epochs",
type=int,
default=5,
metavar="N",
help="epochs to warmup LR, if scheduler supports",
)
parser.add_argument(
"--warmup_steps",
type=int,
default=-1,
metavar="N",
help="epochs to warmup LR, if scheduler supports",
)
# Augmentation parameters
parser.add_argument(
"--color_jitter",
type=float,
default=0.4,
metavar="PCT",
help="Color jitter factor (default: 0.4)",
)
parser.add_argument(
"--train_interpolation",
type=str,
default="bicubic",
help='Training interpolation (random, bilinear, bicubic default: "bicubic")',
)
parser.add_argument(
"--target_layers", type=str, default="[]", help="target layers (python list)"
)
# Dataset parameters
parser.add_argument(
"--data_path",
default="/datasets01/imagenet_full_size/061417/",
type=str,
help="dataset path",
)
parser.add_argument(
"--imagenet_default_mean_and_std", default=False, action="store_true"
)
parser.add_argument(
"--output_dir", default="", help="path where to save, empty for no saving"
)
parser.add_argument("--log_dir", default=None, help="path where to tensorboard log")
parser.add_argument(
"--device", default="cuda", help="device to use for training / testing"
)
parser.add_argument("--seed", default=0, type=int)
parser.add_argument("--resume", default="", help="resume from checkpoint")
parser.add_argument("--auto_resume", action="store_true")
parser.add_argument("--no_auto_resume", action="store_false", dest="auto_resume")
parser.set_defaults(auto_resume=True)
parser.add_argument("--ema_decay", default=0.9998, type=float)
parser.add_argument("--ema_start_at", default=25000, type=int)
parser.add_argument(
"--start_epoch", default=0, type=int, metavar="N", help="start epoch"
)
parser.add_argument("--num_workers", default=10, type=int)
parser.add_argument(
"--pin_mem",
action="store_true",
help="Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.",
)
parser.add_argument("--no_pin_mem", action="store_false", dest="pin_mem", help="")
parser.set_defaults(pin_mem=True)
# distributed training parameters
parser.add_argument(
"--world_size", default=1, type=int, help="number of distributed processes"
)
parser.add_argument("--local_rank", default=-1, type=int)
parser.add_argument("--dist_on_itp", action="store_true")
parser.add_argument(
"--dist_url", default="env://", help="url used to set up distributed training"
)
parser.add_argument("--seed_model", default=None, type=str, help="seed model")
parser.add_argument("--model_key", default="model|module", type=str)
parser.add_argument("--model_prefix", default="", type=str)
parser.add_argument("--l2_loss", default=False, action="store_true")
parser.add_argument("--l1_beta", default=0.12, type=float)
return parser.parse_args()
def get_model(args):
print(f"Creating model: {args.model}")
model = create_model(
args.model,
pretrained=False,
drop_path_rate=args.drop_path,
drop_block_rate=None,
use_shared_rel_pos_bias=args.rel_pos_bias,
use_abs_pos_emb=args.abs_pos_emb,
init_values=args.layer_scale_init_value,
)
return model
def main(args):
utils.init_distributed_mode(args)
print(args)
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
# random.seed(seed)
cudnn.benchmark = True
model = get_model(args)
patch_size = model.patch_embed.patch_size
print("Patch size = %s" % str(patch_size))
args.window_size = (
args.input_size // patch_size[0],
args.input_size // patch_size[1],
)
args.patch_size = patch_size
if args.seed_model:
checkpoint = torch.load(args.seed_model, map_location="cpu")
print("Load ckpt from %s" % args.seed_model)
checkpoint_model = None
for model_key in args.model_key.split("|"):
if model_key in checkpoint:
checkpoint_model = checkpoint[model_key]
print("Load state_dict by model_key = %s" % model_key)
break
if checkpoint_model is None:
checkpoint_model = checkpoint
state_dict = model.state_dict()
for k in ["head.weight", "head.bias"]:
if (
k in checkpoint_model
and checkpoint_model[k].shape != state_dict[k].shape
):
print(f"Removing key {k} from pretrained checkpoint")
del checkpoint_model[k]
all_keys = list(checkpoint_model.keys())
for key in all_keys:
if "relative_position_index" in key:
checkpoint_model.pop(key)
if "relative_position_bias_table" in key:
rel_pos_bias = checkpoint_model[key]
src_num_pos, num_attn_heads = rel_pos_bias.size()
dst_num_pos, _ = model.state_dict()[key].size()
dst_patch_shape = model.patch_embed.patch_shape
if dst_patch_shape[0] != dst_patch_shape[1]:
raise NotImplementedError()
num_extra_tokens = dst_num_pos - (dst_patch_shape[0] * 2 - 1) * (
dst_patch_shape[1] * 2 - 1
)
src_size = int((src_num_pos - num_extra_tokens) ** 0.5)
dst_size = int((dst_num_pos - num_extra_tokens) ** 0.5)
if src_size != dst_size:
print(
"Position interpolate for %s from %dx%d to %dx%d"
% (key, src_size, src_size, dst_size, dst_size)
)
extra_tokens = rel_pos_bias[-num_extra_tokens:, :]
rel_pos_bias = rel_pos_bias[:-num_extra_tokens, :]
def geometric_progression(a, r, n):
return a * (1.0 - r ** n) / (1.0 - r)
left, right = 1.01, 1.5
while right - left > 1e-6:
q = (left + right) / 2.0
gp = geometric_progression(1, q, src_size // 2)
if gp > dst_size // 2:
right = q
else:
left = q
# if q > 1.090307:
# q = 1.090307
dis = []
cur = 1
for i in range(src_size // 2):
dis.append(cur)
cur += q ** (i + 1)
r_ids = [-_ for _ in reversed(dis)]
x = r_ids + [0] + dis
y = r_ids + [0] + dis
t = dst_size // 2.0
dx = np.arange(-t, t + 0.1, 1.0)
dy = np.arange(-t, t + 0.1, 1.0)
print("Original positions = %s" % str(x))
print("Target positions = %s" % str(dx))
all_rel_pos_bias = []
for i in range(num_attn_heads):
z = rel_pos_bias[:, i].view(src_size, src_size).float().numpy()
f = interpolate.interp2d(x, y, z, kind="cubic")
all_rel_pos_bias.append(
torch.Tensor(f(dx, dy))
.contiguous()
.view(-1, 1)
.to(rel_pos_bias.device)
)
rel_pos_bias = torch.cat(all_rel_pos_bias, dim=-1)
new_rel_pos_bias = torch.cat((rel_pos_bias, extra_tokens), dim=0)
checkpoint_model[key] = new_rel_pos_bias
# interpolate position embedding
if "pos_embed" in checkpoint_model:
pos_embed_checkpoint = checkpoint_model["pos_embed"]
embedding_size = pos_embed_checkpoint.shape[-1]
num_patches = model.patch_embed.num_patches
num_extra_tokens = model.pos_embed.shape[-2] - num_patches
# height (== width) for the checkpoint position embedding
orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
# height (== width) for the new position embedding
new_size = int(num_patches ** 0.5)
# class_token and dist_token are kept unchanged
if orig_size != new_size:
print(
"Position interpolate from %dx%d to %dx%d"
% (orig_size, orig_size, new_size, new_size)
)
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
# only the position tokens are interpolated
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
pos_tokens = pos_tokens.reshape(
-1, orig_size, orig_size, embedding_size
).permute(0, 3, 1, 2)
pos_tokens = torch.nn.functional.interpolate(
pos_tokens,
size=(new_size, new_size),
mode="bicubic",
align_corners=False,
)
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
checkpoint_model["pos_embed"] = new_pos_embed
utils.load_state_dict(model, checkpoint_model, prefix=args.model_prefix)
# get dataset
dataset_train = build_beit_pretraining_dataset(args)
# prepare discrete vae
d_vae = utils.create_d_vae(
weight_path=args.discrete_vae_weight_path, d_vae_type=args.discrete_vae_type,
device=device, image_size=args.second_input_size)
if True: # args.distributed:
num_tasks = utils.get_world_size()
global_rank = utils.get_rank()
sampler_rank = global_rank
num_training_steps_per_epoch = (
len(dataset_train) // args.batch_size // num_tasks
)
print("pre-sampler", num_tasks, global_rank, sampler_rank)
sampler_train = torch.utils.data.DistributedSampler(
dataset_train, num_replicas=num_tasks, rank=sampler_rank, shuffle=True
)
print("Sampler_train = %s" % str(sampler_train))
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
if global_rank == 0 and args.log_dir is not None:
os.makedirs(args.log_dir, exist_ok=True)
log_writer = utils.TensorboardLogger(log_dir=args.log_dir)
else:
log_writer = None
data_loader_train = torch.utils.data.DataLoader(
dataset_train,
sampler=sampler_train,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=args.pin_mem,
drop_last=True,
)
model.to(device)
model_without_ddp = model
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print("Model = %s" % str(model_without_ddp))
print("number of params:", n_parameters)
model_ema = ModelEmaV2(model, decay=args.ema_decay)
print("Using EMA with decay = %.8f" % args.ema_decay)
total_batch_size = args.batch_size * utils.get_world_size()
print("LR = %.8f" % args.lr)
print("Batch size = %d" % total_batch_size)
print("Number of training steps = %d" % num_training_steps_per_epoch)
print(
"Number of training examples per epoch = %d"
% (total_batch_size * num_training_steps_per_epoch)
)
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.gpu], find_unused_parameters=True
)
model_without_ddp = model.module
optimizer = create_optimizer(args, model_without_ddp)
loss_scaler = NativeScaler()
if args.tri_phase_schedule is not None:
from ast import literal_eval
warmup_phase, decay_phase = literal_eval(args.tri_phase_schedule)
print("Use tri phase lr schedule!", warmup_phase, decay_phase)
lr_schedule_values = utils.tri_phase_scheduler(
args.lr,
args.min_lr,
args.epochs,
num_training_steps_per_epoch,
warmup_perc=warmup_phase,
decay_perc=decay_phase,
)
else:
print("Use step level LR & WD scheduler!")
lr_schedule_values = utils.cosine_scheduler(
args.lr,
args.min_lr,
args.epochs,
num_training_steps_per_epoch,
warmup_epochs=args.warmup_epochs,
warmup_steps=args.warmup_steps,
)
if args.weight_decay_end is None:
args.weight_decay_end = args.weight_decay
wd_schedule_values = utils.cosine_scheduler(
args.weight_decay,
args.weight_decay_end,
args.epochs,
num_training_steps_per_epoch,
)
print(
"Max WD = %.7f, Min WD = %.7f"
% (max(wd_schedule_values), min(wd_schedule_values))
)
utils.auto_load_model(
args=args,
model=model,
model_without_ddp=model_without_ddp,
optimizer=optimizer,
loss_scaler=loss_scaler,
model_ema=model_ema,
)
from ast import literal_eval
target_layers = literal_eval(args.target_layers)
assert len(target_layers) > 0
print(f"target layers: {target_layers}")
print(f"Start training for {args.epochs} epochs")
start_time = time.time()
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
data_loader_train.sampler.set_epoch(epoch)
if log_writer is not None:
log_writer.set_step(epoch * num_training_steps_per_epoch)
train_stats = train_one_epoch(
model,
model_ema,
args.ema_start_at,
target_layers,
d_vae,
args.vae_loss_weight,
data_loader_train,
optimizer,
device,
epoch,
loss_scaler,
args.clip_grad,
l1_beta=args.l1_beta,
log_writer=log_writer,
start_steps=epoch * num_training_steps_per_epoch,
lr_schedule_values=lr_schedule_values,
wd_schedule_values=wd_schedule_values,
l2_loss=args.l2_loss
)
if args.output_dir:
if (epoch + 1) % args.save_ckpt_freq == 0 or epoch + 1 == args.epochs:
utils.save_model(
args=args,
model=model,
model_without_ddp=model_without_ddp,
optimizer=optimizer,
loss_scaler=loss_scaler,
epoch=epoch,
model_ema=model_ema,
)
log_stats = {
**{f"train_{k}": v for k, v in train_stats.items()},
"epoch": epoch,
"n_parameters": n_parameters,
}
if args.output_dir and utils.is_main_process():
if log_writer is not None:
log_writer.flush()
with open(
os.path.join(args.output_dir, "log.txt"), mode="a", encoding="utf-8"
) as f:
f.write(json.dumps(log_stats) + "\n")
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print("Training time {}".format(total_time_str))
if __name__ == "__main__":
opts = get_args()
if opts.output_dir:
Path(opts.output_dir).mkdir(parents=True, exist_ok=True)
main(opts)
| data2vec_vision-main | beit/run_cyclical_joint.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm and DeiT code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit/
# https://github.com/facebookresearch/dino
# --------------------------------------------------------'
# Copyright (c) Meta Platforms, Inc. and affiliates
import math
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.models.layers import drop_path, to_2tuple, trunc_normal_
from timm.models.registry import register_model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic',
'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5),
**kwargs
}
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
def extra_repr(self) -> str:
return 'p={}'.format(self.drop_prob)
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
# x = self.drop(x)
# commit this for the orignal BERT implement
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(
self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.,
proj_drop=0., window_size=None, attn_head_dim=None):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
if attn_head_dim is not None:
head_dim = attn_head_dim
all_head_dim = head_dim * self.num_heads
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False)
if qkv_bias:
self.q_bias = nn.Parameter(torch.zeros(all_head_dim))
self.v_bias = nn.Parameter(torch.zeros(all_head_dim))
else:
self.q_bias = None
self.v_bias = None
if window_size:
self.window_size = window_size
self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
self.relative_position_bias_table = nn.Parameter(
torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# cls to token & token 2 cls & cls to cls
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(window_size[0])
coords_w = torch.arange(window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += window_size[1] - 1
relative_coords[:, :, 0] *= 2 * window_size[1] - 1
relative_position_index = \
torch.zeros(size=(window_size[0] * window_size[1] + 1, ) * 2, dtype=relative_coords.dtype)
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
relative_position_index[0, 0:] = self.num_relative_distance - 3
relative_position_index[0:, 0] = self.num_relative_distance - 2
relative_position_index[0, 0] = self.num_relative_distance - 1
self.register_buffer("relative_position_index", relative_position_index)
else:
self.window_size = None
self.relative_position_bias_table = None
self.relative_position_index = None
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(all_head_dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x, rel_pos_bias=None):
B, N, C = x.shape
qkv_bias = None
if self.q_bias is not None:
qkv_bias = torch.cat((self.q_bias, torch.zeros_like(self.v_bias, requires_grad=False), self.v_bias))
# qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias)
qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
if self.relative_position_bias_table is not None:
relative_position_bias = \
self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1] + 1,
self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
if rel_pos_bias is not None:
attn = attn + rel_pos_bias
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, -1)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., init_values=None, act_layer=nn.GELU, norm_layer=nn.LayerNorm,
window_size=None, attn_head_dim=None):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
attn_drop=attn_drop, proj_drop=drop, window_size=window_size, attn_head_dim=attn_head_dim)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
if init_values > 0:
self.gamma_1 = nn.Parameter(init_values * torch.ones((dim)),requires_grad=True)
self.gamma_2 = nn.Parameter(init_values * torch.ones((dim)),requires_grad=True)
else:
self.gamma_1, self.gamma_2 = None, None
def forward(self, x, rel_pos_bias=None):
if self.gamma_1 is None:
x = x + self.drop_path(self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias))
fc_feature = self.drop_path(self.mlp(self.norm2(x)))
x = x + fc_feature
else:
x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias))
fc_feature = self.drop_path(self.gamma_2 * self.mlp(self.norm2(x)))
x = x + fc_feature
return x, fc_feature
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.patch_shape = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x, **kwargs):
B, C, H, W = x.shape
# FIXME look at relaxing size constraints
assert H == self.img_size[0] and W == self.img_size[1], \
f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x).flatten(2).transpose(1, 2)
return x
class RelativePositionBias(nn.Module):
def __init__(self, window_size, num_heads):
super().__init__()
self.window_size = window_size
self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
self.relative_position_bias_table = nn.Parameter(
torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# cls to token & token 2 cls & cls to cls
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(window_size[0])
coords_w = torch.arange(window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += window_size[1] - 1
relative_coords[:, :, 0] *= 2 * window_size[1] - 1
relative_position_index = \
torch.zeros(size=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype)
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
relative_position_index[0, 0:] = self.num_relative_distance - 3
relative_position_index[0:, 0] = self.num_relative_distance - 2
relative_position_index[0, 0] = self.num_relative_distance - 1
self.register_buffer("relative_position_index", relative_position_index)
# trunc_normal_(self.relative_position_bias_table, std=.02)
def forward(self):
relative_position_bias = \
self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1] + 1,
self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH
return relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
class VisionTransformer(nn.Module):
""" Vision Transformer with support for patch or hybrid CNN input stage
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., norm_layer=nn.LayerNorm, init_values=None,
use_abs_pos_emb=True, use_rel_pos_bias=False, use_shared_rel_pos_bias=False,
use_mean_pooling=True, init_scale=0.001, linear_classifier=False, has_masking=False,
learn_layer_weights=False, layernorm_before_combine=False):
super().__init__()
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
if has_masking:
self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
if use_abs_pos_emb:
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
else:
self.pos_embed = None
self.pos_drop = nn.Dropout(p=drop_rate)
if use_shared_rel_pos_bias:
self.rel_pos_bias = RelativePositionBias(window_size=self.patch_embed.patch_shape, num_heads=num_heads)
else:
self.rel_pos_bias = None
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.use_rel_pos_bias = use_rel_pos_bias
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,
init_values=init_values, window_size=self.patch_embed.patch_shape if use_rel_pos_bias else None)
for i in range(depth)])
self.use_mean_pooling = use_mean_pooling
self.norm = nn.Identity() if use_mean_pooling else norm_layer(embed_dim)
self.fc_norm = norm_layer(embed_dim, elementwise_affine=not linear_classifier) if use_mean_pooling else None
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
if self.pos_embed is not None:
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
if has_masking:
trunc_normal_(self.mask_token, std=.02)
trunc_normal_(self.head.weight, std=.02)
self.apply(self._init_weights)
self.fix_init_weight()
self.learn_layer_weights = learn_layer_weights
self.layernorm_before_combine = layernorm_before_combine
if learn_layer_weights:
self.layer_log_weights = nn.Parameter(torch.zeros(depth,))
self.head.weight.data.mul_(init_scale)
self.head.bias.data.mul_(init_scale)
def fix_init_weight(self):
def rescale(param, layer_id):
param.div_(math.sqrt(2.0 * layer_id))
for layer_id, layer in enumerate(self.blocks):
rescale(layer.attn.proj.weight.data, layer_id + 1)
rescale(layer.mlp.fc2.weight.data, layer_id + 1)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
if m.bias is not None:
nn.init.constant_(m.bias, 0)
if m.weight is not None:
nn.init.constant_(m.weight, 1.0)
def get_num_layers(self):
return len(self.blocks)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=''):
self.num_classes = num_classes
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x, bool_masked_pos=None):
x = self.patch_embed(x)
batch_size, seq_len, _ = x.size()
cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
if bool_masked_pos is not None and self.training:
mask_token = self.mask_token.expand(batch_size, seq_len, -1)
# replace the masked visual tokens by mask_token
w = bool_masked_pos.view(bool_masked_pos.size(0), -1, 1).type_as(mask_token)
x = x * (1 - w) + mask_token * w
x = torch.cat((cls_tokens, x), dim=1)
if self.pos_embed is not None:
x = x + self.pos_embed
x = self.pos_drop(x)
rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None
layer_xs = []
for blk in self.blocks:
x, _ = blk(x, rel_pos_bias=rel_pos_bias) # B x T x C
layer_xs.append(x)
if self.learn_layer_weights:
layer_xs = [
layer_x.mean(1) if self.use_mean_pooling else layer_x[:, 0]
for layer_x in layer_xs
]
layer_xs = [
F.layer_norm(layer_x.float(), layer_x.shape[-1:])
if self.layernorm_before_combine else layer_x
for layer_x in layer_xs
]
weights = self.layer_log_weights.softmax(-1)
return F.linear(torch.stack(layer_xs, -1), weights)
else:
x = self.norm(x)
if self.fc_norm is not None:
t = x[:, 1:, :]
return self.fc_norm(t.mean(1))
else:
return x[:, 0]
def forward(self, x, bool_masked_pos=None):
x = self.forward_features(x, bool_masked_pos)
x = self.head(x)
return x
@register_model
def beit_base_patch16_224(pretrained=False, **kwargs):
model = VisionTransformer(
patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def beit_base_patch16_384(pretrained=False, **kwargs):
model = VisionTransformer(
img_size=384, patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def beit_large_patch16_224(pretrained=False, **kwargs):
model = VisionTransformer(
patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def beit_large_patch16_384(pretrained=False, **kwargs):
model = VisionTransformer(
img_size=384, patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
return model
@register_model
def beit_large_patch16_512(pretrained=False, **kwargs):
model = VisionTransformer(
img_size=512, patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True,
norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs)
model.default_cfg = _cfg()
return model
| data2vec_vision-main | beit/modeling_finetune.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# --------------------------------------------------------'
import torch
from torch import optim as optim
from timm.optim.adafactor import Adafactor
from timm.optim.adahessian import Adahessian
from timm.optim.adamp import AdamP
from timm.optim.lookahead import Lookahead
from timm.optim.nadam import Nadam
# from timm.optim.novograd import NovoGrad
from timm.optim.nvnovograd import NvNovoGrad
from timm.optim.radam import RAdam
from timm.optim.rmsprop_tf import RMSpropTF
from timm.optim.sgdp import SGDP
import json
try:
from apex.optimizers import FusedNovoGrad, FusedAdam, FusedLAMB, FusedSGD
has_apex = True
except ImportError:
has_apex = False
def get_num_layer_for_vit(var_name, num_max_layer):
if var_name in ("cls_token", "mask_token", "pos_embed"):
return 0
elif var_name.startswith("patch_embed"):
return 0
elif var_name.startswith("rel_pos_bias"):
return num_max_layer - 1
elif var_name.startswith("blocks"):
layer_id = int(var_name.split('.')[1])
return layer_id + 1
else:
return num_max_layer - 1
class LayerDecayValueAssigner(object):
def __init__(self, values):
self.values = values
def get_scale(self, layer_id):
return self.values[layer_id]
def get_layer_id(self, var_name):
return get_num_layer_for_vit(var_name, len(self.values))
def get_parameter_groups(model, weight_decay=1e-5, skip_list=(), get_num_layer=None, get_layer_scale=None):
parameter_group_names = {}
parameter_group_vars = {}
for name, param in model.named_parameters():
if not param.requires_grad:
continue # frozen weights
if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list:
group_name = "no_decay"
this_weight_decay = 0.
else:
group_name = "decay"
this_weight_decay = weight_decay
if get_num_layer is not None:
layer_id = get_num_layer(name)
group_name = "layer_%d_%s" % (layer_id, group_name)
else:
layer_id = None
if group_name not in parameter_group_names:
if get_layer_scale is not None:
scale = get_layer_scale(layer_id)
else:
scale = 1.
parameter_group_names[group_name] = {
"weight_decay": this_weight_decay,
"params": [],
"lr_scale": scale
}
parameter_group_vars[group_name] = {
"weight_decay": this_weight_decay,
"params": [],
"lr_scale": scale
}
parameter_group_vars[group_name]["params"].append(param)
parameter_group_names[group_name]["params"].append(name)
print("Param groups = %s" % json.dumps(parameter_group_names, indent=2))
return list(parameter_group_vars.values())
def create_optimizer(args, model, get_num_layer=None, get_layer_scale=None, filter_bias_and_bn=True, skip_list=None):
opt_lower = args.opt.lower()
weight_decay = args.weight_decay
if weight_decay and filter_bias_and_bn:
skip = {}
if skip_list is not None:
skip = skip_list
elif hasattr(model, 'no_weight_decay'):
skip = model.no_weight_decay()
parameters = get_parameter_groups(model, weight_decay, skip, get_num_layer, get_layer_scale)
weight_decay = 0.
else:
parameters = model.parameters()
if 'fused' in opt_lower:
assert has_apex and torch.cuda.is_available(), 'APEX and CUDA required for fused optimizers'
opt_args = dict(lr=args.lr, weight_decay=weight_decay)
if hasattr(args, 'opt_eps') and args.opt_eps is not None:
opt_args['eps'] = args.opt_eps
if hasattr(args, 'opt_betas') and args.opt_betas is not None:
opt_args['betas'] = args.opt_betas
opt_split = opt_lower.split('_')
opt_lower = opt_split[-1]
if opt_lower == 'sgd' or opt_lower == 'nesterov':
opt_args.pop('eps', None)
optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=True, **opt_args)
elif opt_lower == 'momentum':
opt_args.pop('eps', None)
optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=False, **opt_args)
elif opt_lower == 'adam':
optimizer = optim.Adam(parameters, **opt_args)
elif opt_lower == 'adamw':
optimizer = optim.AdamW(parameters, **opt_args)
elif opt_lower == 'nadam':
optimizer = Nadam(parameters, **opt_args)
elif opt_lower == 'radam':
optimizer = RAdam(parameters, **opt_args)
elif opt_lower == 'adamp':
optimizer = AdamP(parameters, wd_ratio=0.01, nesterov=True, **opt_args)
elif opt_lower == 'sgdp':
optimizer = SGDP(parameters, momentum=args.momentum, nesterov=True, **opt_args)
elif opt_lower == 'adadelta':
optimizer = optim.Adadelta(parameters, **opt_args)
elif opt_lower == 'adafactor':
if not args.lr:
opt_args['lr'] = None
optimizer = Adafactor(parameters, **opt_args)
elif opt_lower == 'adahessian':
optimizer = Adahessian(parameters, **opt_args)
elif opt_lower == 'rmsprop':
optimizer = optim.RMSprop(parameters, alpha=0.9, momentum=args.momentum, **opt_args)
elif opt_lower == 'rmsproptf':
optimizer = RMSpropTF(parameters, alpha=0.9, momentum=args.momentum, **opt_args)
elif opt_lower == 'novograd':
optimizer = NovoGrad(parameters, **opt_args)
elif opt_lower == 'nvnovograd':
optimizer = NvNovoGrad(parameters, **opt_args)
elif opt_lower == 'fusedsgd':
opt_args.pop('eps', None)
optimizer = FusedSGD(parameters, momentum=args.momentum, nesterov=True, **opt_args)
elif opt_lower == 'fusedmomentum':
opt_args.pop('eps', None)
optimizer = FusedSGD(parameters, momentum=args.momentum, nesterov=False, **opt_args)
elif opt_lower == 'fusedadam':
optimizer = FusedAdam(parameters, adam_w_mode=False, **opt_args)
elif opt_lower == 'fusedadamw':
optimizer = FusedAdam(parameters, adam_w_mode=True, **opt_args)
elif opt_lower == 'fusedlamb':
optimizer = FusedLAMB(parameters, **opt_args)
elif opt_lower == 'fusednovograd':
opt_args.setdefault('betas', (0.95, 0.98))
optimizer = FusedNovoGrad(parameters, **opt_args)
else:
assert False and "Invalid optimizer"
raise ValueError
if len(opt_split) > 1:
if opt_split[0] == 'lookahead':
optimizer = Lookahead(optimizer)
return optimizer
| data2vec_vision-main | beit/optim_factory.py |
import attr
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
from functools import partial
from dall_e.utils import Conv2d
@attr.s(eq=False, repr=False)
class DecoderBlock(nn.Module):
n_in: int = attr.ib(validator=lambda i, a, x: x >= 1)
n_out: int = attr.ib(validator=lambda i, a, x: x >= 1 and x % 4 ==0)
n_layers: int = attr.ib(validator=lambda i, a, x: x >= 1)
device: torch.device = attr.ib(default=None)
requires_grad: bool = attr.ib(default=False)
def __attrs_post_init__(self) -> None:
super().__init__()
self.n_hid = self.n_out // 4
self.post_gain = 1 / (self.n_layers ** 2)
make_conv = partial(Conv2d, device=self.device, requires_grad=self.requires_grad)
self.id_path = make_conv(self.n_in, self.n_out, 1) if self.n_in != self.n_out else nn.Identity()
self.res_path = nn.Sequential(OrderedDict([
('relu_1', nn.ReLU()),
('conv_1', make_conv(self.n_in, self.n_hid, 1)),
('relu_2', nn.ReLU()),
('conv_2', make_conv(self.n_hid, self.n_hid, 3)),
('relu_3', nn.ReLU()),
('conv_3', make_conv(self.n_hid, self.n_hid, 3)),
('relu_4', nn.ReLU()),
('conv_4', make_conv(self.n_hid, self.n_out, 3)),]))
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.id_path(x) + self.post_gain * self.res_path(x)
@attr.s(eq=False, repr=False)
class Decoder(nn.Module):
group_count: int = 4
n_init: int = attr.ib(default=128, validator=lambda i, a, x: x >= 8)
n_hid: int = attr.ib(default=256, validator=lambda i, a, x: x >= 64)
n_blk_per_group: int = attr.ib(default=2, validator=lambda i, a, x: x >= 1)
output_channels: int = attr.ib(default=3, validator=lambda i, a, x: x >= 1)
vocab_size: int = attr.ib(default=8192, validator=lambda i, a, x: x >= 512)
device: torch.device = attr.ib(default=torch.device('cpu'))
requires_grad: bool = attr.ib(default=False)
use_mixed_precision: bool = attr.ib(default=True)
def __attrs_post_init__(self) -> None:
super().__init__()
blk_range = range(self.n_blk_per_group)
n_layers = self.group_count * self.n_blk_per_group
make_conv = partial(Conv2d, device=self.device, requires_grad=self.requires_grad)
make_blk = partial(DecoderBlock, n_layers=n_layers, device=self.device,
requires_grad=self.requires_grad)
self.blocks = nn.Sequential(OrderedDict([
('input', make_conv(self.vocab_size, self.n_init, 1, use_float16=False)),
('group_1', nn.Sequential(OrderedDict([
*[(f'block_{i + 1}', make_blk(self.n_init if i == 0 else 8 * self.n_hid, 8 * self.n_hid)) for i in blk_range],
('upsample', nn.Upsample(scale_factor=2, mode='nearest')),
]))),
('group_2', nn.Sequential(OrderedDict([
*[(f'block_{i + 1}', make_blk(8 * self.n_hid if i == 0 else 4 * self.n_hid, 4 * self.n_hid)) for i in blk_range],
('upsample', nn.Upsample(scale_factor=2, mode='nearest')),
]))),
('group_3', nn.Sequential(OrderedDict([
*[(f'block_{i + 1}', make_blk(4 * self.n_hid if i == 0 else 2 * self.n_hid, 2 * self.n_hid)) for i in blk_range],
('upsample', nn.Upsample(scale_factor=2, mode='nearest')),
]))),
('group_4', nn.Sequential(OrderedDict([
*[(f'block_{i + 1}', make_blk(2 * self.n_hid if i == 0 else 1 * self.n_hid, 1 * self.n_hid)) for i in blk_range],
]))),
('output', nn.Sequential(OrderedDict([
('relu', nn.ReLU()),
('conv', make_conv(1 * self.n_hid, 2 * self.output_channels, 1)),
]))),
]))
def forward(self, x: torch.Tensor) -> torch.Tensor:
if len(x.shape) != 4:
raise ValueError(f'input shape {x.shape} is not 4d')
if x.shape[1] != self.vocab_size:
raise ValueError(f'input has {x.shape[1]} channels but model built for {self.vocab_size}')
if x.dtype != torch.float32:
raise ValueError('input must have dtype torch.float32')
return self.blocks(x)
| data2vec_vision-main | beit/dall_e/decoder.py |
import io, requests
import torch
import torch.nn as nn
from dall_e.encoder import Encoder
from dall_e.decoder import Decoder
from dall_e.utils import map_pixels, unmap_pixels
def load_model(path: str, device: torch.device = None) -> nn.Module:
if path.startswith('http://') or path.startswith('https://'):
resp = requests.get(path)
resp.raise_for_status()
with io.BytesIO(resp.content) as buf:
return torch.load(buf, map_location=device)
else:
with open(path, 'rb') as f:
return torch.load(f, map_location=device)
| data2vec_vision-main | beit/dall_e/__init__.py |
import attr
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
from functools import partial
from dall_e.utils import Conv2d
@attr.s(eq=False, repr=False)
class EncoderBlock(nn.Module):
n_in: int = attr.ib(validator=lambda i, a, x: x >= 1)
n_out: int = attr.ib(validator=lambda i, a, x: x >= 1 and x % 4 ==0)
n_layers: int = attr.ib(validator=lambda i, a, x: x >= 1)
device: torch.device = attr.ib(default=None)
requires_grad: bool = attr.ib(default=False)
def __attrs_post_init__(self) -> None:
super().__init__()
self.n_hid = self.n_out // 4
self.post_gain = 1 / (self.n_layers ** 2)
make_conv = partial(Conv2d, device=self.device, requires_grad=self.requires_grad)
self.id_path = make_conv(self.n_in, self.n_out, 1) if self.n_in != self.n_out else nn.Identity()
self.res_path = nn.Sequential(OrderedDict([
('relu_1', nn.ReLU()),
('conv_1', make_conv(self.n_in, self.n_hid, 3)),
('relu_2', nn.ReLU()),
('conv_2', make_conv(self.n_hid, self.n_hid, 3)),
('relu_3', nn.ReLU()),
('conv_3', make_conv(self.n_hid, self.n_hid, 3)),
('relu_4', nn.ReLU()),
('conv_4', make_conv(self.n_hid, self.n_out, 1)),]))
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.id_path(x) + self.post_gain * self.res_path(x)
@attr.s(eq=False, repr=False)
class Encoder(nn.Module):
group_count: int = 4
n_hid: int = attr.ib(default=256, validator=lambda i, a, x: x >= 64)
n_blk_per_group: int = attr.ib(default=2, validator=lambda i, a, x: x >= 1)
input_channels: int = attr.ib(default=3, validator=lambda i, a, x: x >= 1)
vocab_size: int = attr.ib(default=8192, validator=lambda i, a, x: x >= 512)
device: torch.device = attr.ib(default=torch.device('cpu'))
requires_grad: bool = attr.ib(default=False)
use_mixed_precision: bool = attr.ib(default=True)
def __attrs_post_init__(self) -> None:
super().__init__()
blk_range = range(self.n_blk_per_group)
n_layers = self.group_count * self.n_blk_per_group
make_conv = partial(Conv2d, device=self.device, requires_grad=self.requires_grad)
make_blk = partial(EncoderBlock, n_layers=n_layers, device=self.device,
requires_grad=self.requires_grad)
self.blocks = nn.Sequential(OrderedDict([
('input', make_conv(self.input_channels, 1 * self.n_hid, 7)),
('group_1', nn.Sequential(OrderedDict([
*[(f'block_{i + 1}', make_blk(1 * self.n_hid, 1 * self.n_hid)) for i in blk_range],
('pool', nn.MaxPool2d(kernel_size=2)),
]))),
('group_2', nn.Sequential(OrderedDict([
*[(f'block_{i + 1}', make_blk(1 * self.n_hid if i == 0 else 2 * self.n_hid, 2 * self.n_hid)) for i in blk_range],
('pool', nn.MaxPool2d(kernel_size=2)),
]))),
('group_3', nn.Sequential(OrderedDict([
*[(f'block_{i + 1}', make_blk(2 * self.n_hid if i == 0 else 4 * self.n_hid, 4 * self.n_hid)) for i in blk_range],
('pool', nn.MaxPool2d(kernel_size=2)),
]))),
('group_4', nn.Sequential(OrderedDict([
*[(f'block_{i + 1}', make_blk(4 * self.n_hid if i == 0 else 8 * self.n_hid, 8 * self.n_hid)) for i in blk_range],
]))),
('output', nn.Sequential(OrderedDict([
('relu', nn.ReLU()),
('conv', make_conv(8 * self.n_hid, self.vocab_size, 1, use_float16=False)),
]))),
]))
def forward(self, x: torch.Tensor) -> torch.Tensor:
if len(x.shape) != 4:
raise ValueError(f'input shape {x.shape} is not 4d')
if x.shape[1] != self.input_channels:
raise ValueError(f'input has {x.shape[1]} channels but model built for {self.input_channels}')
if x.dtype != torch.float32:
raise ValueError('input must have dtype torch.float32')
return self.blocks(x)
| data2vec_vision-main | beit/dall_e/encoder.py |
import attr
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
logit_laplace_eps: float = 0.1
@attr.s(eq=False)
class Conv2d(nn.Module):
n_in: int = attr.ib(validator=lambda i, a, x: x >= 1)
n_out: int = attr.ib(validator=lambda i, a, x: x >= 1)
kw: int = attr.ib(validator=lambda i, a, x: x >= 1 and x % 2 == 1)
use_float16: bool = attr.ib(default=True)
device: torch.device = attr.ib(default=torch.device('cpu'))
requires_grad: bool = attr.ib(default=False)
def __attrs_post_init__(self) -> None:
super().__init__()
w = torch.empty((self.n_out, self.n_in, self.kw, self.kw), dtype=torch.float32,
device=self.device, requires_grad=self.requires_grad)
w.normal_(std=1 / math.sqrt(self.n_in * self.kw ** 2))
b = torch.zeros((self.n_out,), dtype=torch.float32, device=self.device,
requires_grad=self.requires_grad)
self.w, self.b = nn.Parameter(w), nn.Parameter(b)
def forward(self, x: torch.Tensor) -> torch.Tensor:
if self.use_float16 and 'cuda' in self.w.device.type:
if x.dtype != torch.float16:
x = x.half()
w, b = self.w.half(), self.b.half()
else:
if x.dtype != torch.float32:
x = x.float()
w, b = self.w, self.b
return F.conv2d(x, w, b, padding=(self.kw - 1) // 2)
def map_pixels(x: torch.Tensor) -> torch.Tensor:
if x.dtype != torch.float:
raise ValueError('expected input to have type float')
return (1 - 2 * logit_laplace_eps) * x + logit_laplace_eps
def unmap_pixels(x: torch.Tensor) -> torch.Tensor:
if len(x.shape) != 4:
raise ValueError('expected input to be 4d')
if x.dtype != torch.float:
raise ValueError('expected input to have type float')
return torch.clamp((x - logit_laplace_eps) / (1 - 2 * logit_laplace_eps), 0, 1)
| data2vec_vision-main | beit/dall_e/utils.py |
import argparse
import os
import mmcv
import torch
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import get_dist_info, init_dist, load_checkpoint
from mmcv.utils import DictAction
from mmseg.apis import multi_gpu_test, single_gpu_test
from mmseg.datasets import build_dataloader, build_dataset
from mmseg.models import build_segmentor
from backbone import beit
def parse_args():
parser = argparse.ArgumentParser(
description='mmseg test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--aug-test', action='store_true', help='Use Flip and Multi scale aug')
parser.add_argument('--out', help='output result file in pickle format')
parser.add_argument(
'--format-only',
action='store_true',
help='Format the output results without perform evaluation. It is'
'useful when you want to format the result to a specific format and '
'submit it to the test server')
parser.add_argument(
'--eval',
type=str,
nargs='+',
help='evaluation metrics, which depends on the dataset, e.g., "mIoU"'
' for generic datasets, and "cityscapes" for Cityscapes')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument(
'--show-dir', help='directory where painted images will be saved')
parser.add_argument(
'--gpu-collect',
action='store_true',
help='whether to use gpu to collect results.')
parser.add_argument(
'--tmpdir',
help='tmp directory used for collecting results from multiple '
'workers, available when gpu_collect is not specified')
parser.add_argument(
'--options', nargs='+', action=DictAction, help='custom options')
parser.add_argument(
'--eval-options',
nargs='+',
action=DictAction,
help='custom options for evaluation')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
assert args.out or args.eval or args.format_only or args.show \
or args.show_dir, \
('Please specify at least one operation (save/eval/format/show the '
'results / save the results) with the argument "--out", "--eval"'
', "--format-only", "--show" or "--show-dir"')
if args.eval and args.format_only:
raise ValueError('--eval and --format_only cannot be both specified')
if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
raise ValueError('The output file must be a pkl file.')
cfg = mmcv.Config.fromfile(args.config)
if args.options is not None:
cfg.merge_from_dict(args.options)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
if args.aug_test:
# hard code index
cfg.data.test.pipeline[1].img_ratios = [
0.5, 0.75, 1.0, 1.25, 1.5, 1.75
]
cfg.data.test.pipeline[1].flip = True
cfg.model.pretrained = None
cfg.data.test.test_mode = True
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# build the dataloader
# TODO: support multiple images per gpu (only minor changes are needed)
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(
dataset,
samples_per_gpu=1,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False)
# build the model and load checkpoint
cfg.model.train_cfg = None
model = build_segmentor(cfg.model, test_cfg=cfg.get('test_cfg'))
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
model.CLASSES = checkpoint['meta']['CLASSES']
model.PALETTE = checkpoint['meta']['PALETTE']
efficient_test = False
if args.eval_options is not None:
efficient_test = args.eval_options.get('efficient_test', False)
if not distributed:
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader, args.show, args.show_dir,
efficient_test)
else:
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
outputs = multi_gpu_test(model, data_loader, args.tmpdir,
args.gpu_collect, efficient_test)
rank, _ = get_dist_info()
if rank == 0:
if args.out:
print(f'\nwriting results to {args.out}')
mmcv.dump(outputs, args.out)
kwargs = {} if args.eval_options is None else args.eval_options
if args.format_only:
dataset.format_results(outputs, **kwargs)
if args.eval:
dataset.evaluate(outputs, args.eval, **kwargs)
if __name__ == '__main__':
main()
| data2vec_vision-main | beit/semantic_segmentation/tools/test.py |
import argparse
import copy
import os
import os.path as osp
import time
import mmcv
import mmcv_custom
import torch
from mmcv.runner import init_dist
from mmcv.utils import Config, DictAction, get_git_hash
from mmseg import __version__
from mmseg.apis import set_random_seed
from mmcv_custom import train_segmentor
from mmseg.datasets import build_dataset
from mmseg.models import build_segmentor
from mmseg.utils import collect_env, get_root_logger
from backbone import beit
def parse_args():
parser = argparse.ArgumentParser(description='Train a segmentor')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--load-from', help='the checkpoint file to load weights from')
parser.add_argument(
'--resume-from', help='the checkpoint file to resume from')
parser.add_argument(
'--no-validate',
action='store_true',
help='whether not to evaluate the checkpoint during training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument(
'--gpus',
type=int,
help='number of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-ids',
type=int,
nargs='+',
help='ids of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--options', nargs='+', action=DictAction, help='custom options')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.options is not None:
cfg.merge_from_dict(args.options)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
if args.load_from is not None:
cfg.load_from = args.load_from
if args.resume_from is not None:
cfg.resume_from = args.resume_from
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# dump config
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([f'{k}: {v}' for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
meta['env_info'] = env_info
# log some basic info
logger.info(f'Distributed training: {distributed}')
logger.info(f'Config:\n{cfg.pretty_text}')
# set random seeds
if args.seed is not None:
logger.info(f'Set random seed to {args.seed}, deterministic: '
f'{args.deterministic}')
set_random_seed(args.seed, deterministic=args.deterministic)
cfg.seed = args.seed
meta['seed'] = args.seed
meta['exp_name'] = osp.basename(args.config)
model = build_segmentor(
cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg'))
logger.info(model)
datasets = [build_dataset(cfg.data.train)]
if len(cfg.workflow) == 2:
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.pipeline
datasets.append(build_dataset(val_dataset))
if cfg.checkpoint_config is not None:
# save mmseg version, config file content and class names in
# checkpoints as meta data
cfg.checkpoint_config.meta = dict(
mmseg_version=f'{__version__}+{get_git_hash()[:7]}',
config=cfg.pretty_text,
CLASSES=datasets[0].CLASSES,
PALETTE=datasets[0].PALETTE)
# add an attribute for visualization convenience
model.CLASSES = datasets[0].CLASSES
train_segmentor(
model,
datasets,
cfg,
distributed=distributed,
validate=(not args.no_validate),
timestamp=timestamp,
meta=meta)
if __name__ == '__main__':
main()
| data2vec_vision-main | beit/semantic_segmentation/tools/train.py |
import json
from mmcv.runner import OPTIMIZER_BUILDERS, DefaultOptimizerConstructor
from mmcv.runner import get_dist_info
def get_num_layer_for_vit(var_name, num_max_layer):
if var_name in ("backbone.cls_token", "backbone.mask_token", "backbone.pos_embed"):
return 0
elif var_name.startswith("backbone.patch_embed"):
return 0
elif var_name.startswith("backbone.blocks"):
layer_id = int(var_name.split('.')[2])
return layer_id + 1
else:
return num_max_layer - 1
@OPTIMIZER_BUILDERS.register_module()
class LayerDecayOptimizerConstructor(DefaultOptimizerConstructor):
def add_params(self, params, module, prefix='', is_dcn_module=None):
"""Add all parameters of module to the params list.
The parameters of the given module will be added to the list of param
groups, with specific rules defined by paramwise_cfg.
Args:
params (list[dict]): A list of param groups, it will be modified
in place.
module (nn.Module): The module to be added.
prefix (str): The prefix of the module
is_dcn_module (int|float|None): If the current module is a
submodule of DCN, `is_dcn_module` will be passed to
control conv_offset layer's learning rate. Defaults to None.
"""
parameter_groups = {}
print(self.paramwise_cfg)
num_layers = self.paramwise_cfg.get('num_layers') + 2
layer_decay_rate = self.paramwise_cfg.get('layer_decay_rate')
print("Build LayerDecayOptimizerConstructor %f - %d" % (layer_decay_rate, num_layers))
weight_decay = self.base_wd
for name, param in module.named_parameters():
if not param.requires_grad:
continue # frozen weights
if len(param.shape) == 1 or name.endswith(".bias") or name in ('pos_embed', 'cls_token'):
group_name = "no_decay"
this_weight_decay = 0.
else:
group_name = "decay"
this_weight_decay = weight_decay
layer_id = get_num_layer_for_vit(name, num_layers)
group_name = "layer_%d_%s" % (layer_id, group_name)
if group_name not in parameter_groups:
scale = layer_decay_rate ** (num_layers - layer_id - 1)
parameter_groups[group_name] = {
"weight_decay": this_weight_decay,
"params": [],
"param_names": [],
"lr_scale": scale,
"group_name": group_name,
"lr": scale * self.base_lr,
}
parameter_groups[group_name]["params"].append(param)
parameter_groups[group_name]["param_names"].append(name)
rank, _ = get_dist_info()
if rank == 0:
to_display = {}
for key in parameter_groups:
to_display[key] = {
"param_names": parameter_groups[key]["param_names"],
"lr_scale": parameter_groups[key]["lr_scale"],
"lr": parameter_groups[key]["lr"],
"weight_decay": parameter_groups[key]["weight_decay"],
}
print("Param groups = %s" % json.dumps(to_display, indent=2))
# state_dict = module.state_dict()
# for group_name in parameter_groups:
# group = parameter_groups[group_name]
# for name in group["param_names"]:
# group["params"].append(state_dict[name])
params.extend(parameter_groups.values())
| data2vec_vision-main | beit/semantic_segmentation/mmcv_custom/layer_decay_optimizer_constructor.py |
import random
import warnings
import numpy as np
import torch
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import build_optimizer, build_runner
from mmseg.core import DistEvalHook, EvalHook
from mmseg.datasets import build_dataloader, build_dataset
from mmseg.utils import get_root_logger
try:
import apex
except:
print('apex is not installed')
def set_random_seed(seed, deterministic=False):
"""Set random seed.
Args:
seed (int): Seed to be used.
deterministic (bool): Whether to set the deterministic option for
CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
to True and `torch.backends.cudnn.benchmark` to False.
Default: False.
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def train_segmentor(model,
dataset,
cfg,
distributed=False,
validate=False,
timestamp=None,
meta=None):
"""Launch segmentor training."""
logger = get_root_logger(cfg.log_level)
# prepare data loaders
dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
data_loaders = [
build_dataloader(
ds,
cfg.data.samples_per_gpu,
cfg.data.workers_per_gpu,
# cfg.gpus will be ignored if distributed
len(cfg.gpu_ids),
dist=distributed,
seed=cfg.seed,
drop_last=True) for ds in dataset
]
# build optimizer
optimizer = build_optimizer(model, cfg.optimizer)
# use apex fp16 optimizer
if cfg.optimizer_config.get("type", None) and cfg.optimizer_config["type"] == "DistOptimizerHook":
if cfg.optimizer_config.get("use_fp16", False):
model, optimizer = apex.amp.initialize(
model.cuda(), optimizer, opt_level="O1")
for m in model.modules():
if hasattr(m, "fp16_enabled"):
m.fp16_enabled = True
# put model on gpus
if distributed:
find_unused_parameters = cfg.get('find_unused_parameters', False)
# Sets the `find_unused_parameters` parameter in
# torch.nn.parallel.DistributedDataParallel
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False,
find_unused_parameters=find_unused_parameters)
else:
model = MMDataParallel(
model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids)
if cfg.get('runner') is None:
cfg.runner = {'type': 'IterBasedRunner', 'max_iters': cfg.total_iters}
warnings.warn(
'config is now expected to have a `runner` section, '
'please set `runner` in your config.', UserWarning)
runner = build_runner(
cfg.runner,
default_args=dict(
model=model,
batch_processor=None,
optimizer=optimizer,
work_dir=cfg.work_dir,
logger=logger,
meta=meta))
# register hooks
runner.register_training_hooks(cfg.lr_config, cfg.optimizer_config,
cfg.checkpoint_config, cfg.log_config,
cfg.get('momentum_config', None))
# an ugly walkaround to make the .log and .log.json filenames the same
runner.timestamp = timestamp
# register eval hooks
if validate:
val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))
val_dataloader = build_dataloader(
val_dataset,
samples_per_gpu=1,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False)
eval_cfg = cfg.get('evaluation', {})
eval_cfg['by_epoch'] = 'IterBasedRunner' not in cfg.runner['type']
eval_hook = DistEvalHook if distributed else EvalHook
runner.register_hook(eval_hook(val_dataloader, **eval_cfg))
if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
runner.run(data_loaders, cfg.workflow)
| data2vec_vision-main | beit/semantic_segmentation/mmcv_custom/train_api.py |
import mmcv
import numpy as np
from mmseg.datasets.builder import PIPELINES
@PIPELINES.register_module()
class SETR_Resize(object):
"""Resize images & seg.
This transform resizes the input image to some scale. If the input dict
contains the key "scale", then the scale in the input dict is used,
otherwise the specified scale in the init method is used.
``img_scale`` can either be a tuple (single-scale) or a list of tuple
(multi-scale). There are 3 multiscale modes:
- ``ratio_range is not None``: randomly sample a ratio from the ratio range
and multiply it with the image scale.
- ``ratio_range is None and multiscale_mode == "range"``: randomly sample a
scale from the a range.
- ``ratio_range is None and multiscale_mode == "value"``: randomly sample a
scale from multiple scales.
Args:
img_scale (tuple or list[tuple]): Images scales for resizing.
multiscale_mode (str): Either "range" or "value".
ratio_range (tuple[float]): (min_ratio, max_ratio)
keep_ratio (bool): Whether to keep the aspect ratio when resizing the
image.
"""
def __init__(self,
img_scale=None,
multiscale_mode='range',
ratio_range=None,
keep_ratio=True,
crop_size=None,
setr_multi_scale=False):
if img_scale is None:
self.img_scale = None
else:
if isinstance(img_scale, list):
self.img_scale = img_scale
else:
self.img_scale = [img_scale]
# assert mmcv.is_list_of(self.img_scale, tuple)
if ratio_range is not None:
# mode 1: given a scale and a range of image ratio
assert len(self.img_scale) == 1
else:
# mode 2: given multiple scales or a range of scales
assert multiscale_mode in ['value', 'range']
self.multiscale_mode = multiscale_mode
self.ratio_range = ratio_range
self.keep_ratio = keep_ratio
self.crop_size = crop_size
self.setr_multi_scale = setr_multi_scale
@staticmethod
def random_select(img_scales):
"""Randomly select an img_scale from given candidates.
Args:
img_scales (list[tuple]): Images scales for selection.
Returns:
(tuple, int): Returns a tuple ``(img_scale, scale_dix)``,
where ``img_scale`` is the selected image scale and
``scale_idx`` is the selected index in the given candidates.
"""
assert mmcv.is_list_of(img_scales, tuple)
scale_idx = np.random.randint(len(img_scales))
img_scale = img_scales[scale_idx]
return img_scale, scale_idx
@staticmethod
def random_sample(img_scales):
"""Randomly sample an img_scale when ``multiscale_mode=='range'``.
Args:
img_scales (list[tuple]): Images scale range for sampling.
There must be two tuples in img_scales, which specify the lower
and uper bound of image scales.
Returns:
(tuple, None): Returns a tuple ``(img_scale, None)``, where
``img_scale`` is sampled scale and None is just a placeholder
to be consistent with :func:`random_select`.
"""
assert mmcv.is_list_of(img_scales, tuple) and len(img_scales) == 2
img_scale_long = [max(s) for s in img_scales]
img_scale_short = [min(s) for s in img_scales]
long_edge = np.random.randint(
min(img_scale_long),
max(img_scale_long) + 1)
short_edge = np.random.randint(
min(img_scale_short),
max(img_scale_short) + 1)
img_scale = (long_edge, short_edge)
return img_scale, None
@staticmethod
def random_sample_ratio(img_scale, ratio_range):
"""Randomly sample an img_scale when ``ratio_range`` is specified.
A ratio will be randomly sampled from the range specified by
``ratio_range``. Then it would be multiplied with ``img_scale`` to
generate sampled scale.
Args:
img_scale (tuple): Images scale base to multiply with ratio.
ratio_range (tuple[float]): The minimum and maximum ratio to scale
the ``img_scale``.
Returns:
(tuple, None): Returns a tuple ``(scale, None)``, where
``scale`` is sampled ratio multiplied with ``img_scale`` and
None is just a placeholder to be consistent with
:func:`random_select`.
"""
assert isinstance(img_scale, tuple) and len(img_scale) == 2
min_ratio, max_ratio = ratio_range
assert min_ratio <= max_ratio
ratio = np.random.random_sample() * (max_ratio - min_ratio) + min_ratio
scale = int(img_scale[0] * ratio), int(img_scale[1] * ratio)
return scale, None
def _random_scale(self, results):
"""Randomly sample an img_scale according to ``ratio_range`` and
``multiscale_mode``.
If ``ratio_range`` is specified, a ratio will be sampled and be
multiplied with ``img_scale``.
If multiple scales are specified by ``img_scale``, a scale will be
sampled according to ``multiscale_mode``.
Otherwise, single scale will be used.
Args:
results (dict): Result dict from :obj:`dataset`.
Returns:
dict: Two new keys 'scale` and 'scale_idx` are added into
``results``, which would be used by subsequent pipelines.
"""
if self.ratio_range is not None:
scale, scale_idx = self.random_sample_ratio(
self.img_scale[0], self.ratio_range)
elif len(self.img_scale) == 1:
scale, scale_idx = self.img_scale[0], 0
elif self.multiscale_mode == 'range':
scale, scale_idx = self.random_sample(self.img_scale)
elif self.multiscale_mode == 'value':
scale, scale_idx = self.random_select(self.img_scale)
else:
raise NotImplementedError
results['scale'] = scale
results['scale_idx'] = scale_idx
def _resize_img(self, results):
"""Resize images with ``results['scale']``."""
if self.keep_ratio:
if self.setr_multi_scale:
if min(results['scale']) < self.crop_size[0]:
new_short = self.crop_size[0]
else:
new_short = min(results['scale'])
h, w = results['img'].shape[:2]
if h > w:
new_h, new_w = new_short * h / w, new_short
else:
new_h, new_w = new_short, new_short * w / h
results['scale'] = (new_h, new_w)
img, scale_factor = mmcv.imrescale(
results['img'], results['scale'], return_scale=True)
# the w_scale and h_scale has minor difference
# a real fix should be done in the mmcv.imrescale in the future
new_h, new_w = img.shape[:2]
h, w = results['img'].shape[:2]
w_scale = new_w / w
h_scale = new_h / h
else:
img, w_scale, h_scale = mmcv.imresize(
results['img'], results['scale'], return_scale=True)
scale_factor = np.array([w_scale, h_scale, w_scale, h_scale],
dtype=np.float32)
results['img'] = img
results['img_shape'] = img.shape
results['pad_shape'] = img.shape # in case that there is no padding
results['scale_factor'] = scale_factor
results['keep_ratio'] = self.keep_ratio
def _resize_seg(self, results):
"""Resize semantic segmentation map with ``results['scale']``."""
for key in results.get('seg_fields', []):
if self.keep_ratio:
gt_seg = mmcv.imrescale(
results[key], results['scale'], interpolation='nearest')
else:
gt_seg = mmcv.imresize(
results[key], results['scale'], interpolation='nearest')
results['gt_semantic_seg'] = gt_seg
def __call__(self, results):
"""Call function to resize images, bounding boxes, masks, semantic
segmentation map.
Args:
results (dict): Result dict from loading pipeline.
Returns:
dict: Resized results, 'img_shape', 'pad_shape', 'scale_factor',
'keep_ratio' keys are added into result dict.
"""
if 'scale' not in results:
self._random_scale(results)
self._resize_img(results)
self._resize_seg(results)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(img_scale={self.img_scale}, '
f'multiscale_mode={self.multiscale_mode}, '
f'ratio_range={self.ratio_range}, '
f'keep_ratio={self.keep_ratio})')
return repr_str
| data2vec_vision-main | beit/semantic_segmentation/mmcv_custom/resize_transform.py |
# Copyright (c) Open-MMLab. All rights reserved.
import io
import os
import os.path as osp
import pkgutil
import time
import warnings
from collections import OrderedDict
from importlib import import_module
from tempfile import TemporaryDirectory
import torch
import torchvision
from torch.optim import Optimizer
from torch.utils import model_zoo
from torch.nn import functional as F
import mmcv
from mmcv.fileio import FileClient
from mmcv.fileio import load as load_file
from mmcv.parallel import is_module_wrapper
from mmcv.utils import mkdir_or_exist
from mmcv.runner import get_dist_info
from scipy import interpolate
import numpy as np
import math
ENV_MMCV_HOME = 'MMCV_HOME'
ENV_XDG_CACHE_HOME = 'XDG_CACHE_HOME'
DEFAULT_CACHE_DIR = '~/.cache'
def _get_mmcv_home():
mmcv_home = os.path.expanduser(
os.getenv(
ENV_MMCV_HOME,
os.path.join(
os.getenv(ENV_XDG_CACHE_HOME, DEFAULT_CACHE_DIR), 'mmcv')))
mkdir_or_exist(mmcv_home)
return mmcv_home
def load_state_dict(module, state_dict, strict=False, logger=None):
"""Load state_dict to a module.
This method is modified from :meth:`torch.nn.Module.load_state_dict`.
Default value for ``strict`` is set to ``False`` and the message for
param mismatch will be shown even if strict is False.
Args:
module (Module): Module that receives the state_dict.
state_dict (OrderedDict): Weights.
strict (bool): whether to strictly enforce that the keys
in :attr:`state_dict` match the keys returned by this module's
:meth:`~torch.nn.Module.state_dict` function. Default: ``False``.
logger (:obj:`logging.Logger`, optional): Logger to log the error
message. If not specified, print function will be used.
"""
unexpected_keys = []
all_missing_keys = []
err_msg = []
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
# use _load_from_state_dict to enable checkpoint version control
def load(module, prefix=''):
# recursively check parallel module in case that the model has a
# complicated structure, e.g., nn.Module(nn.Module(DDP))
if is_module_wrapper(module):
module = module.module
local_metadata = {} if metadata is None else metadata.get(
prefix[:-1], {})
module._load_from_state_dict(state_dict, prefix, local_metadata, True,
all_missing_keys, unexpected_keys,
err_msg)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(module)
load = None # break load->load reference cycle
# ignore "num_batches_tracked" of BN layers
missing_keys = [
key for key in all_missing_keys if 'num_batches_tracked' not in key
]
if unexpected_keys:
err_msg.append('unexpected key in source '
f'state_dict: {", ".join(unexpected_keys)}\n')
if missing_keys:
err_msg.append(
f'missing keys in source state_dict: {", ".join(missing_keys)}\n')
rank, _ = get_dist_info()
if len(err_msg) > 0 and rank == 0:
err_msg.insert(
0, 'The model and loaded state dict do not match exactly\n')
err_msg = '\n'.join(err_msg)
if strict:
raise RuntimeError(err_msg)
elif logger is not None:
logger.warning(err_msg)
else:
print(err_msg)
def load_url_dist(url, model_dir=None, map_location="cpu"):
"""In distributed setting, this function only download checkpoint at local
rank 0."""
rank, world_size = get_dist_info()
rank = int(os.environ.get('LOCAL_RANK', rank))
if rank == 0:
checkpoint = model_zoo.load_url(url, model_dir=model_dir, map_location=map_location)
if world_size > 1:
torch.distributed.barrier()
if rank > 0:
checkpoint = model_zoo.load_url(url, model_dir=model_dir, map_location=map_location)
return checkpoint
def load_pavimodel_dist(model_path, map_location=None):
"""In distributed setting, this function only download checkpoint at local
rank 0."""
try:
from pavi import modelcloud
except ImportError:
raise ImportError(
'Please install pavi to load checkpoint from modelcloud.')
rank, world_size = get_dist_info()
rank = int(os.environ.get('LOCAL_RANK', rank))
if rank == 0:
model = modelcloud.get(model_path)
with TemporaryDirectory() as tmp_dir:
downloaded_file = osp.join(tmp_dir, model.name)
model.download(downloaded_file)
checkpoint = torch.load(downloaded_file, map_location=map_location)
if world_size > 1:
torch.distributed.barrier()
if rank > 0:
model = modelcloud.get(model_path)
with TemporaryDirectory() as tmp_dir:
downloaded_file = osp.join(tmp_dir, model.name)
model.download(downloaded_file)
checkpoint = torch.load(
downloaded_file, map_location=map_location)
return checkpoint
def load_fileclient_dist(filename, backend, map_location):
"""In distributed setting, this function only download checkpoint at local
rank 0."""
rank, world_size = get_dist_info()
rank = int(os.environ.get('LOCAL_RANK', rank))
allowed_backends = ['ceph']
if backend not in allowed_backends:
raise ValueError(f'Load from Backend {backend} is not supported.')
if rank == 0:
fileclient = FileClient(backend=backend)
buffer = io.BytesIO(fileclient.get(filename))
checkpoint = torch.load(buffer, map_location=map_location)
if world_size > 1:
torch.distributed.barrier()
if rank > 0:
fileclient = FileClient(backend=backend)
buffer = io.BytesIO(fileclient.get(filename))
checkpoint = torch.load(buffer, map_location=map_location)
return checkpoint
def get_torchvision_models():
model_urls = dict()
for _, name, ispkg in pkgutil.walk_packages(torchvision.models.__path__):
if ispkg:
continue
_zoo = import_module(f'torchvision.models.{name}')
if hasattr(_zoo, 'model_urls'):
_urls = getattr(_zoo, 'model_urls')
model_urls.update(_urls)
return model_urls
def get_external_models():
mmcv_home = _get_mmcv_home()
default_json_path = osp.join(mmcv.__path__[0], 'model_zoo/open_mmlab.json')
default_urls = load_file(default_json_path)
assert isinstance(default_urls, dict)
external_json_path = osp.join(mmcv_home, 'open_mmlab.json')
if osp.exists(external_json_path):
external_urls = load_file(external_json_path)
assert isinstance(external_urls, dict)
default_urls.update(external_urls)
return default_urls
def get_mmcls_models():
mmcls_json_path = osp.join(mmcv.__path__[0], 'model_zoo/mmcls.json')
mmcls_urls = load_file(mmcls_json_path)
return mmcls_urls
def get_deprecated_model_names():
deprecate_json_path = osp.join(mmcv.__path__[0],
'model_zoo/deprecated.json')
deprecate_urls = load_file(deprecate_json_path)
assert isinstance(deprecate_urls, dict)
return deprecate_urls
def _process_mmcls_checkpoint(checkpoint):
state_dict = checkpoint['state_dict']
new_state_dict = OrderedDict()
for k, v in state_dict.items():
if k.startswith('backbone.'):
new_state_dict[k[9:]] = v
new_checkpoint = dict(state_dict=new_state_dict)
return new_checkpoint
def _load_checkpoint(filename, map_location=None):
"""Load checkpoint from somewhere (modelzoo, file, url).
Args:
filename (str): Accept local filepath, URL, ``torchvision://xxx``,
``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for
details.
map_location (str | None): Same as :func:`torch.load`. Default: None.
Returns:
dict | OrderedDict: The loaded checkpoint. It can be either an
OrderedDict storing model weights or a dict containing other
information, which depends on the checkpoint.
"""
if filename.startswith('modelzoo://'):
warnings.warn('The URL scheme of "modelzoo://" is deprecated, please '
'use "torchvision://" instead')
model_urls = get_torchvision_models()
model_name = filename[11:]
checkpoint = load_url_dist(model_urls[model_name])
elif filename.startswith('torchvision://'):
model_urls = get_torchvision_models()
model_name = filename[14:]
checkpoint = load_url_dist(model_urls[model_name])
elif filename.startswith('open-mmlab://'):
model_urls = get_external_models()
model_name = filename[13:]
deprecated_urls = get_deprecated_model_names()
if model_name in deprecated_urls:
warnings.warn(f'open-mmlab://{model_name} is deprecated in favor '
f'of open-mmlab://{deprecated_urls[model_name]}')
model_name = deprecated_urls[model_name]
model_url = model_urls[model_name]
# check if is url
if model_url.startswith(('http://', 'https://')):
checkpoint = load_url_dist(model_url)
else:
filename = osp.join(_get_mmcv_home(), model_url)
if not osp.isfile(filename):
raise IOError(f'{filename} is not a checkpoint file')
checkpoint = torch.load(filename, map_location=map_location)
elif filename.startswith('mmcls://'):
model_urls = get_mmcls_models()
model_name = filename[8:]
checkpoint = load_url_dist(model_urls[model_name])
checkpoint = _process_mmcls_checkpoint(checkpoint)
elif filename.startswith(('http://', 'https://')):
checkpoint = load_url_dist(filename)
elif filename.startswith('pavi://'):
model_path = filename[7:]
checkpoint = load_pavimodel_dist(model_path, map_location=map_location)
elif filename.startswith('s3://'):
checkpoint = load_fileclient_dist(
filename, backend='ceph', map_location=map_location)
else:
if not osp.isfile(filename):
raise IOError(f'{filename} is not a checkpoint file')
checkpoint = torch.load(filename, map_location=map_location)
return checkpoint
def cosine_scheduler(base_value, final_value, epochs, niter_per_ep, warmup_epochs=0,
start_warmup_value=0, warmup_steps=-1):
warmup_schedule = np.array([])
warmup_iters = warmup_epochs * niter_per_ep
if warmup_steps > 0:
warmup_iters = warmup_steps
print("Set warmup steps = %d" % warmup_iters)
if warmup_epochs > 0:
warmup_schedule = np.linspace(start_warmup_value, base_value, warmup_iters)
iters = np.arange(epochs * niter_per_ep - warmup_iters)
schedule = np.array(
[final_value + 0.5 * (base_value - final_value) * (1 + math.cos(math.pi * i / (len(iters)))) for i in iters])
schedule = np.concatenate((warmup_schedule, schedule))
assert len(schedule) == epochs * niter_per_ep
return schedule
def load_checkpoint(model,
filename,
map_location='cpu',
strict=False,
logger=None):
"""Load checkpoint from a file or URI.
Args:
model (Module): Module to load checkpoint.
filename (str): Accept local filepath, URL, ``torchvision://xxx``,
``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for
details.
map_location (str): Same as :func:`torch.load`.
strict (bool): Whether to allow different params for the model and
checkpoint.
logger (:mod:`logging.Logger` or None): The logger for error message.
Returns:
dict or OrderedDict: The loaded checkpoint.
"""
checkpoint = _load_checkpoint(filename, map_location)
# OrderedDict is a subclass of dict
if not isinstance(checkpoint, dict):
raise RuntimeError(
f'No state_dict found in checkpoint file {filename}')
# get state_dict from checkpoint
if 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
elif 'model' in checkpoint:
state_dict = checkpoint['model']
elif 'module' in checkpoint:
state_dict = checkpoint['module']
else:
state_dict = checkpoint
# strip prefix of state_dict
if list(state_dict.keys())[0].startswith('module.'):
state_dict = {k[7:]: v for k, v in state_dict.items()}
# for MoBY, load model of online branch
if sorted(list(state_dict.keys()))[0].startswith('encoder'):
state_dict = {k.replace('encoder.', ''): v for k, v in state_dict.items() if k.startswith('encoder.')}
# reshape absolute position embedding for Swin
if state_dict.get('absolute_pos_embed') is not None:
absolute_pos_embed = state_dict['absolute_pos_embed']
N1, L, C1 = absolute_pos_embed.size()
N2, C2, H, W = model.absolute_pos_embed.size()
if N1 != N2 or C1 != C2 or L != H*W:
logger.warning("Error in loading absolute_pos_embed, pass")
else:
state_dict['absolute_pos_embed'] = absolute_pos_embed.view(N2, H, W, C2).permute(0, 3, 1, 2)
rank, _ = get_dist_info()
all_keys = list(state_dict.keys())
for key in all_keys:
if "relative_position_index" in key:
state_dict.pop(key)
if "relative_position_bias_table" in key:
rel_pos_bias = state_dict[key]
src_num_pos, num_attn_heads = rel_pos_bias.size()
dst_num_pos, _ = model.state_dict()[key].size()
dst_patch_shape = model.patch_embed.patch_shape
if dst_patch_shape[0] != dst_patch_shape[1]:
raise NotImplementedError()
num_extra_tokens = dst_num_pos - (dst_patch_shape[0] * 2 - 1) * (dst_patch_shape[1] * 2 - 1)
src_size = int((src_num_pos - num_extra_tokens) ** 0.5)
dst_size = int((dst_num_pos - num_extra_tokens) ** 0.5)
if src_size != dst_size:
if rank == 0:
print("Position interpolate for %s from %dx%d to %dx%d" % (
key, src_size, src_size, dst_size, dst_size))
extra_tokens = rel_pos_bias[-num_extra_tokens:, :]
rel_pos_bias = rel_pos_bias[:-num_extra_tokens, :]
def geometric_progression(a, r, n):
return a * (1.0 - r ** n) / (1.0 - r)
left, right = 1.01, 1.5
while right - left > 1e-6:
q = (left + right) / 2.0
gp = geometric_progression(1, q, src_size // 2)
if gp > dst_size // 2:
right = q
else:
left = q
# if q > 1.13492:
# q = 1.13492
dis = []
cur = 1
for i in range(src_size // 2):
dis.append(cur)
cur += q ** (i + 1)
r_ids = [-_ for _ in reversed(dis)]
x = r_ids + [0] + dis
y = r_ids + [0] + dis
t = dst_size // 2.0
dx = np.arange(-t, t + 0.1, 1.0)
dy = np.arange(-t, t + 0.1, 1.0)
if rank == 0:
print("x = {}".format(x))
print("dx = {}".format(dx))
all_rel_pos_bias = []
for i in range(num_attn_heads):
z = rel_pos_bias[:, i].view(src_size, src_size).float().numpy()
f = interpolate.interp2d(x, y, z, kind='cubic')
all_rel_pos_bias.append(
torch.Tensor(f(dx, dy)).contiguous().view(-1, 1).to(rel_pos_bias.device))
rel_pos_bias = torch.cat(all_rel_pos_bias, dim=-1)
new_rel_pos_bias = torch.cat((rel_pos_bias, extra_tokens), dim=0)
state_dict[key] = new_rel_pos_bias
if 'pos_embed' in state_dict:
pos_embed_checkpoint = state_dict['pos_embed']
embedding_size = pos_embed_checkpoint.shape[-1]
num_patches = model.patch_embed.num_patches
num_extra_tokens = model.pos_embed.shape[-2] - num_patches
# height (== width) for the checkpoint position embedding
orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
# height (== width) for the new position embedding
new_size = int(num_patches ** 0.5)
# class_token and dist_token are kept unchanged
if orig_size != new_size:
if rank == 0:
print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size))
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
# only the position tokens are interpolated
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
pos_tokens = torch.nn.functional.interpolate(
pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
state_dict['pos_embed'] = new_pos_embed
# interpolate position bias table if needed
relative_position_bias_table_keys = [k for k in state_dict.keys() if "relative_position_bias_table" in k]
for table_key in relative_position_bias_table_keys:
table_pretrained = state_dict[table_key]
table_current = model.state_dict()[table_key]
L1, nH1 = table_pretrained.size()
L2, nH2 = table_current.size()
if nH1 != nH2:
logger.warning(f"Error in loading {table_key}, pass")
else:
if L1 != L2:
S1 = int(L1 ** 0.5)
S2 = int(L2 ** 0.5)
table_pretrained_resized = F.interpolate(
table_pretrained.permute(1, 0).view(1, nH1, S1, S1),
size=(S2, S2), mode='bicubic')
state_dict[table_key] = table_pretrained_resized.view(nH2, L2).permute(1, 0)
# load state_dict
load_state_dict(model, state_dict, strict, logger)
return checkpoint
def weights_to_cpu(state_dict):
"""Copy a model state_dict to cpu.
Args:
state_dict (OrderedDict): Model weights on GPU.
Returns:
OrderedDict: Model weights on GPU.
"""
state_dict_cpu = OrderedDict()
for key, val in state_dict.items():
state_dict_cpu[key] = val.cpu()
return state_dict_cpu
def _save_to_state_dict(module, destination, prefix, keep_vars):
"""Saves module state to `destination` dictionary.
This method is modified from :meth:`torch.nn.Module._save_to_state_dict`.
Args:
module (nn.Module): The module to generate state_dict.
destination (dict): A dict where state will be stored.
prefix (str): The prefix for parameters and buffers used in this
module.
"""
for name, param in module._parameters.items():
if param is not None:
destination[prefix + name] = param if keep_vars else param.detach()
for name, buf in module._buffers.items():
# remove check of _non_persistent_buffers_set to allow nn.BatchNorm2d
if buf is not None:
destination[prefix + name] = buf if keep_vars else buf.detach()
def get_state_dict(module, destination=None, prefix='', keep_vars=False):
"""Returns a dictionary containing a whole state of the module.
Both parameters and persistent buffers (e.g. running averages) are
included. Keys are corresponding parameter and buffer names.
This method is modified from :meth:`torch.nn.Module.state_dict` to
recursively check parallel module in case that the model has a complicated
structure, e.g., nn.Module(nn.Module(DDP)).
Args:
module (nn.Module): The module to generate state_dict.
destination (OrderedDict): Returned dict for the state of the
module.
prefix (str): Prefix of the key.
keep_vars (bool): Whether to keep the variable property of the
parameters. Default: False.
Returns:
dict: A dictionary containing a whole state of the module.
"""
# recursively check parallel module in case that the model has a
# complicated structure, e.g., nn.Module(nn.Module(DDP))
if is_module_wrapper(module):
module = module.module
# below is the same as torch.nn.Module.state_dict()
if destination is None:
destination = OrderedDict()
destination._metadata = OrderedDict()
destination._metadata[prefix[:-1]] = local_metadata = dict(
version=module._version)
_save_to_state_dict(module, destination, prefix, keep_vars)
for name, child in module._modules.items():
if child is not None:
get_state_dict(
child, destination, prefix + name + '.', keep_vars=keep_vars)
for hook in module._state_dict_hooks.values():
hook_result = hook(module, destination, prefix, local_metadata)
if hook_result is not None:
destination = hook_result
return destination
def save_checkpoint(model, filename, optimizer=None, meta=None):
"""Save checkpoint to file.
The checkpoint will have 3 fields: ``meta``, ``state_dict`` and
``optimizer``. By default ``meta`` will contain version and time info.
Args:
model (Module): Module whose params are to be saved.
filename (str): Checkpoint filename.
optimizer (:obj:`Optimizer`, optional): Optimizer to be saved.
meta (dict, optional): Metadata to be saved in checkpoint.
"""
if meta is None:
meta = {}
elif not isinstance(meta, dict):
raise TypeError(f'meta must be a dict or None, but got {type(meta)}')
meta.update(mmcv_version=mmcv.__version__, time=time.asctime())
if is_module_wrapper(model):
model = model.module
if hasattr(model, 'CLASSES') and model.CLASSES is not None:
# save class name to the meta
meta.update(CLASSES=model.CLASSES)
checkpoint = {
'meta': meta,
'state_dict': weights_to_cpu(get_state_dict(model))
}
# save optimizer state dict in the checkpoint
if isinstance(optimizer, Optimizer):
checkpoint['optimizer'] = optimizer.state_dict()
elif isinstance(optimizer, dict):
checkpoint['optimizer'] = {}
for name, optim in optimizer.items():
checkpoint['optimizer'][name] = optim.state_dict()
if filename.startswith('pavi://'):
try:
from pavi import modelcloud
from pavi.exception import NodeNotFoundError
except ImportError:
raise ImportError(
'Please install pavi to load checkpoint from modelcloud.')
model_path = filename[7:]
root = modelcloud.Folder()
model_dir, model_name = osp.split(model_path)
try:
model = modelcloud.get(model_dir)
except NodeNotFoundError:
model = root.create_training_model(model_dir)
with TemporaryDirectory() as tmp_dir:
checkpoint_file = osp.join(tmp_dir, model_name)
with open(checkpoint_file, 'wb') as f:
torch.save(checkpoint, f)
f.flush()
model.create_file(checkpoint_file, name=model_name)
else:
mmcv.mkdir_or_exist(osp.dirname(filename))
# immediately flush buffer
with open(filename, 'wb') as f:
torch.save(checkpoint, f)
f.flush()
| data2vec_vision-main | beit/semantic_segmentation/mmcv_custom/checkpoint.py |
# -*- coding: utf-8 -*-
from .checkpoint import load_checkpoint
from .layer_decay_optimizer_constructor import LayerDecayOptimizerConstructor
from .resize_transform import SETR_Resize
from .apex_runner.optimizer import DistOptimizerHook
from .train_api import train_segmentor
__all__ = ['load_checkpoint', 'LayerDecayOptimizerConstructor', 'SETR_Resize', 'DistOptimizerHook', 'train_segmentor']
| data2vec_vision-main | beit/semantic_segmentation/mmcv_custom/__init__.py |
# Copyright (c) Open-MMLab. All rights reserved.
import os.path as osp
import platform
import shutil
import torch
from torch.optim import Optimizer
import mmcv
from mmcv.runner import RUNNERS, IterBasedRunner
from .checkpoint import save_checkpoint
try:
import apex
except:
print('apex is not installed')
@RUNNERS.register_module()
class IterBasedRunnerAmp(IterBasedRunner):
"""Iteration-based Runner with AMP support.
This runner train models iteration by iteration.
"""
def save_checkpoint(self,
out_dir,
filename_tmpl='iter_{}.pth',
meta=None,
save_optimizer=True,
create_symlink=False):
"""Save checkpoint to file.
Args:
out_dir (str): Directory to save checkpoint files.
filename_tmpl (str, optional): Checkpoint file template.
Defaults to 'iter_{}.pth'.
meta (dict, optional): Metadata to be saved in checkpoint.
Defaults to None.
save_optimizer (bool, optional): Whether save optimizer.
Defaults to True.
create_symlink (bool, optional): Whether create symlink to the
latest checkpoint file. Defaults to True.
"""
if meta is None:
meta = dict(iter=self.iter + 1, epoch=self.epoch + 1)
elif isinstance(meta, dict):
meta.update(iter=self.iter + 1, epoch=self.epoch + 1)
else:
raise TypeError(
f'meta should be a dict or None, but got {type(meta)}')
if self.meta is not None:
meta.update(self.meta)
filename = filename_tmpl.format(self.iter + 1)
filepath = osp.join(out_dir, filename)
optimizer = self.optimizer if save_optimizer else None
save_checkpoint(self.model, filepath, optimizer=optimizer, meta=meta)
# in some environments, `os.symlink` is not supported, you may need to
# set `create_symlink` to False
# if create_symlink:
# dst_file = osp.join(out_dir, 'latest.pth')
# if platform.system() != 'Windows':
# mmcv.symlink(filename, dst_file)
# else:
# shutil.copy(filepath, dst_file)
def resume(self,
checkpoint,
resume_optimizer=True,
map_location='default'):
if map_location == 'default':
if torch.cuda.is_available():
device_id = torch.cuda.current_device()
checkpoint = self.load_checkpoint(
checkpoint,
map_location=lambda storage, loc: storage.cuda(device_id))
else:
checkpoint = self.load_checkpoint(checkpoint)
else:
checkpoint = self.load_checkpoint(
checkpoint, map_location=map_location)
self._epoch = checkpoint['meta']['epoch']
self._iter = checkpoint['meta']['iter']
self._inner_iter = checkpoint['meta']['iter']
if 'optimizer' in checkpoint and resume_optimizer:
if isinstance(self.optimizer, Optimizer):
self.optimizer.load_state_dict(checkpoint['optimizer'])
elif isinstance(self.optimizer, dict):
for k in self.optimizer.keys():
self.optimizer[k].load_state_dict(
checkpoint['optimizer'][k])
else:
raise TypeError(
'Optimizer should be dict or torch.optim.Optimizer '
f'but got {type(self.optimizer)}')
if 'amp' in checkpoint:
apex.amp.load_state_dict(checkpoint['amp'])
self.logger.info('load amp state dict')
self.logger.info(f'resumed from epoch: {self.epoch}, iter {self.iter}')
| data2vec_vision-main | beit/semantic_segmentation/mmcv_custom/apex_runner/apex_iter_based_runner.py |
# Copyright (c) Open-MMLab. All rights reserved.
import os.path as osp
import time
from tempfile import TemporaryDirectory
import torch
from torch.optim import Optimizer
import mmcv
from mmcv.parallel import is_module_wrapper
from mmcv.runner.checkpoint import weights_to_cpu, get_state_dict
try:
import apex
except:
print('apex is not installed')
def save_checkpoint(model, filename, optimizer=None, meta=None):
"""Save checkpoint to file.
The checkpoint will have 4 fields: ``meta``, ``state_dict`` and
``optimizer``, ``amp``. By default ``meta`` will contain version
and time info.
Args:
model (Module): Module whose params are to be saved.
filename (str): Checkpoint filename.
optimizer (:obj:`Optimizer`, optional): Optimizer to be saved.
meta (dict, optional): Metadata to be saved in checkpoint.
"""
if meta is None:
meta = {}
elif not isinstance(meta, dict):
raise TypeError(f'meta must be a dict or None, but got {type(meta)}')
meta.update(mmcv_version=mmcv.__version__, time=time.asctime())
if is_module_wrapper(model):
model = model.module
if hasattr(model, 'CLASSES') and model.CLASSES is not None:
# save class name to the meta
meta.update(CLASSES=model.CLASSES)
checkpoint = {
'meta': meta,
'state_dict': weights_to_cpu(get_state_dict(model))
}
# save optimizer state dict in the checkpoint
if isinstance(optimizer, Optimizer):
checkpoint['optimizer'] = optimizer.state_dict()
elif isinstance(optimizer, dict):
checkpoint['optimizer'] = {}
for name, optim in optimizer.items():
checkpoint['optimizer'][name] = optim.state_dict()
# save amp state dict in the checkpoint
checkpoint['amp'] = apex.amp.state_dict()
if filename.startswith('pavi://'):
try:
from pavi import modelcloud
from pavi.exception import NodeNotFoundError
except ImportError:
raise ImportError(
'Please install pavi to load checkpoint from modelcloud.')
model_path = filename[7:]
root = modelcloud.Folder()
model_dir, model_name = osp.split(model_path)
try:
model = modelcloud.get(model_dir)
except NodeNotFoundError:
model = root.create_training_model(model_dir)
with TemporaryDirectory() as tmp_dir:
checkpoint_file = osp.join(tmp_dir, model_name)
with open(checkpoint_file, 'wb') as f:
torch.save(checkpoint, f)
f.flush()
model.create_file(checkpoint_file, name=model_name)
else:
mmcv.mkdir_or_exist(osp.dirname(filename))
# immediately flush buffer
with open(filename, 'wb') as f:
torch.save(checkpoint, f)
f.flush()
| data2vec_vision-main | beit/semantic_segmentation/mmcv_custom/apex_runner/checkpoint.py |
# Copyright (c) Open-MMLab. All rights reserved.
from .checkpoint import save_checkpoint
from .apex_iter_based_runner import IterBasedRunnerAmp
__all__ = [
'save_checkpoint', 'IterBasedRunnerAmp',
]
| data2vec_vision-main | beit/semantic_segmentation/mmcv_custom/apex_runner/__init__.py |
from mmcv.runner import OptimizerHook, HOOKS
try:
import apex
except:
print('apex is not installed')
@HOOKS.register_module()
class DistOptimizerHook(OptimizerHook):
"""Optimizer hook for distributed training."""
def __init__(self, update_interval=1, grad_clip=None, coalesce=True, bucket_size_mb=-1, use_fp16=False):
self.grad_clip = grad_clip
self.coalesce = coalesce
self.bucket_size_mb = bucket_size_mb
self.update_interval = update_interval
self.use_fp16 = use_fp16
def before_run(self, runner):
runner.optimizer.zero_grad()
def after_train_iter(self, runner):
runner.outputs['loss'] /= self.update_interval
if self.use_fp16:
with apex.amp.scale_loss(runner.outputs['loss'], runner.optimizer) as scaled_loss:
scaled_loss.backward()
else:
runner.outputs['loss'].backward()
if self.every_n_iters(runner, self.update_interval):
if self.grad_clip is not None:
self.clip_grads(runner.model.parameters())
runner.optimizer.step()
runner.optimizer.zero_grad()
| data2vec_vision-main | beit/semantic_segmentation/mmcv_custom/apex_runner/optimizer.py |
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook', by_epoch=False),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
cudnn_benchmark = True
| data2vec_vision-main | beit/semantic_segmentation/configs/_base_/default_runtime.py |
# dataset settings
dataset_type = 'ADE20KDataset'
data_root = 'data/ade/ADEChallengeData2016'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (640, 640)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', reduce_zero_label=True),
dict(type='Resize', img_scale=(2560, 640), ratio_range=(0.5, 2.0)),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(2560, 640),
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=4,
workers_per_gpu=4,
train=dict(
type=dataset_type,
data_root=data_root,
img_dir='images/training',
ann_dir='annotations/training',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
data_root=data_root,
img_dir='images/validation',
ann_dir='annotations/validation',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
data_root=data_root,
img_dir='images/validation',
ann_dir='annotations/validation',
pipeline=test_pipeline))
| data2vec_vision-main | beit/semantic_segmentation/configs/_base_/datasets/ade20k_640x640.py |
# dataset settings
dataset_type = 'ADE20KDataset'
data_root = 'data/ade/ADEChallengeData2016'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (512, 512)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', reduce_zero_label=True),
dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', prob=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(2048, 512),
# img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
samples_per_gpu=4,
workers_per_gpu=4,
train=dict(
type=dataset_type,
data_root=data_root,
img_dir='images/training',
ann_dir='annotations/training',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
data_root=data_root,
img_dir='images/validation',
ann_dir='annotations/validation',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
data_root=data_root,
img_dir='images/validation',
ann_dir='annotations/validation',
pipeline=test_pipeline))
| data2vec_vision-main | beit/semantic_segmentation/configs/_base_/datasets/ade20k.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm, mmseg, setr, xcit and swin code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/fudan-zvg/SETR
# https://github.com/facebookresearch/xcit/
# https://github.com/microsoft/Swin-Transformer
# --------------------------------------------------------'
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='EncoderDecoder',
pretrained=None,
backbone=dict(
type='XCiT',
patch_size=16,
embed_dim=384,
depth=12,
num_heads=8,
mlp_ratio=4,
qkv_bias=True,
use_abs_pos_emb=True,
use_rel_pos_bias=False,
),
decode_head=dict(
type='UPerHead',
in_channels=[384, 384, 384, 384],
in_index=[0, 1, 2, 3],
pool_scales=(1, 2, 3, 6),
channels=512,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
auxiliary_head=dict(
type='FCNHead',
in_channels=384,
in_index=2,
channels=256,
num_convs=1,
concat_input=False,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),
# model training and testing settings
train_cfg=dict(),
test_cfg=dict(mode='whole'))
| data2vec_vision-main | beit/semantic_segmentation/configs/_base_/models/upernet_beit.py |
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
optimizer_config = dict()
# learning policy
lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False)
# runtime settings
runner = dict(type='IterBasedRunner', max_iters=160000)
checkpoint_config = dict(by_epoch=False, interval=16000)
evaluation = dict(interval=16000, metric='mIoU')
| data2vec_vision-main | beit/semantic_segmentation/configs/_base_/schedules/schedule_160k.py |
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005)
optimizer_config = dict()
# learning policy
lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False)
# runtime settings
runner = dict(type='IterBasedRunner', max_iters=320000)
checkpoint_config = dict(by_epoch=False, interval=32000)
evaluation = dict(interval=32000, metric='mIoU')
| data2vec_vision-main | beit/semantic_segmentation/configs/_base_/schedules/schedule_320k.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm, mmseg, setr, xcit and swin code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/fudan-zvg/SETR
# https://github.com/facebookresearch/xcit/
# https://github.com/microsoft/Swin-Transformer
# --------------------------------------------------------'
_base_ = [
'../../_base_/models/upernet_beit.py', '../../_base_/datasets/ade20k_640x640.py',
'../../_base_/default_runtime.py', '../../_base_/schedules/schedule_320k.py'
]
# We set samples_per_gpu to 1 and optimizer_config.update_interval to 2, the total update step keep 160k.
crop_size = (640, 640)
model = dict(
backbone=dict(
type='BEiT',
img_size=640,
patch_size=16,
embed_dim=1024,
depth=24,
num_heads=16,
mlp_ratio=4,
qkv_bias=True,
use_abs_pos_emb=False,
use_rel_pos_bias=True,
init_values=1e-6,
drop_path_rate=0.2,
out_indices=[7, 11, 15, 23],
),
decode_head=dict(
in_channels=[1024, 1024, 1024, 1024],
num_classes=150,
channels=1024,
),
auxiliary_head=dict(
in_channels=1024,
num_classes=150
),
test_cfg = dict(mode='slide', crop_size=crop_size, stride=(426, 426))
)
# AdamW optimizer, no weight decay for position embedding & layer norm in backbone
# optimizer = dict(_delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01,
# paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.),
# 'relative_position_bias_table': dict(decay_mult=0.),
# 'norm': dict(decay_mult=0.)}))
optimizer = dict(_delete_=True, type='AdamW', lr=2e-5, betas=(0.9, 0.999), weight_decay=0.05,
constructor='LayerDecayOptimizerConstructor',
paramwise_cfg=dict(num_layers=24, layer_decay_rate=0.95))
lr_config = dict(_delete_=True, policy='poly',
warmup='linear',
warmup_iters=3000,
warmup_ratio=1e-6,
power=1.0, min_lr=0.0, by_epoch=False)
# By default, models are trained on 8 GPUs with 2 images per GPU
data=dict(samples_per_gpu=1)
runner = dict(type='IterBasedRunnerAmp')
# do not use mmdet version fp16
# We set samples_per_gpu to 1 and optimizer_config.update_interval to 2, the total update step keep 160k.
fp16 = None
optimizer_config = dict(
type="DistOptimizerHook",
update_interval=2,
grad_clip=None,
coalesce=True,
bucket_size_mb=-1,
use_fp16=True,
)
| data2vec_vision-main | beit/semantic_segmentation/configs/beit/upernet/upernet_beit_large_24_640_slide_160k_ade20k.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm, mmseg, setr, xcit and swin code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/fudan-zvg/SETR
# https://github.com/facebookresearch/xcit/
# https://github.com/microsoft/Swin-Transformer
# --------------------------------------------------------'
_base_ = [
'../../_base_/models/upernet_beit.py', '../../_base_/datasets/ade20k.py',
'../../_base_/default_runtime.py', '../../_base_/schedules/schedule_160k.py'
]
crop_size = (512, 512)
model = dict(
backbone=dict(
type='BEiT',
img_size=512,
patch_size=16,
embed_dim=1024,
depth=24,
num_heads=16,
mlp_ratio=4,
qkv_bias=True,
use_abs_pos_emb=False,
use_rel_pos_bias=True,
init_values=1e-6,
drop_path_rate=0.2,
out_indices=[7, 11, 15, 23],
),
decode_head=dict(
in_channels=[1024, 1024, 1024, 1024],
num_classes=150,
channels=1024,
),
auxiliary_head=dict(
in_channels=1024,
num_classes=150
),
test_cfg = dict(mode='slide', crop_size=crop_size, stride=(341, 341))
)
# AdamW optimizer, no weight decay for position embedding & layer norm in backbone
# optimizer = dict(_delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01,
# paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.),
# 'relative_position_bias_table': dict(decay_mult=0.),
# 'norm': dict(decay_mult=0.)}))
optimizer = dict(_delete_=True, type='AdamW', lr=2e-5, betas=(0.9, 0.999), weight_decay=0.05,
constructor='LayerDecayOptimizerConstructor',
paramwise_cfg=dict(num_layers=24, layer_decay_rate=0.95))
lr_config = dict(_delete_=True, policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=1e-6,
power=1.0, min_lr=0.0, by_epoch=False)
# By default, models are trained on 8 GPUs with 2 images per GPU
data=dict(samples_per_gpu=2)
runner = dict(type='IterBasedRunnerAmp')
# do not use mmdet version fp16
fp16 = None
optimizer_config = dict(
type="DistOptimizerHook",
update_interval=1,
grad_clip=None,
coalesce=True,
bucket_size_mb=-1,
use_fp16=True,
)
| data2vec_vision-main | beit/semantic_segmentation/configs/beit/upernet/upernet_beit_large_24_512_slide_160k_ade20k.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm, mmseg, setr, xcit and swin code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/fudan-zvg/SETR
# https://github.com/facebookresearch/xcit/
# https://github.com/microsoft/Swin-Transformer
# --------------------------------------------------------'
_base_ = [
'../../_base_/models/upernet_beit.py', '../../_base_/datasets/ade20k.py',
'../../_base_/default_runtime.py', '../../_base_/schedules/schedule_160k.py'
]
crop_size = (512, 512)
model = dict(
backbone=dict(
type='BEiT',
img_size=512,
patch_size=16,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4,
qkv_bias=True,
use_abs_pos_emb=False,
use_rel_pos_bias=True,
init_values=0.1,
drop_path_rate=0.1,
out_indices=[3, 5, 7, 11]
),
decode_head=dict(
in_channels=[768, 768, 768, 768],
num_classes=150,
channels=768,
),
auxiliary_head=dict(
in_channels=768,
num_classes=150
),
test_cfg = dict(mode='slide', crop_size=crop_size, stride=(341, 341))
)
# AdamW optimizer, no weight decay for position embedding & layer norm in backbone
# optimizer = dict(_delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01,
# paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.),
# 'relative_position_bias_table': dict(decay_mult=0.),
# 'norm': dict(decay_mult=0.)}))
optimizer = dict(_delete_=True, type='AdamW', lr=3e-5, betas=(0.9, 0.999), weight_decay=0.05,
constructor='LayerDecayOptimizerConstructor',
paramwise_cfg=dict(num_layers=12, layer_decay_rate=0.9))
lr_config = dict(_delete_=True, policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=1e-6,
power=1.0, min_lr=0.0, by_epoch=False)
# By default, models are trained on 8 GPUs with 2 images per GPU
data=dict(samples_per_gpu=2)
runner = dict(type='IterBasedRunnerAmp')
# do not use mmdet version fp16
fp16 = None
optimizer_config = dict(
type="DistOptimizerHook",
update_interval=1,
grad_clip=None,
coalesce=True,
bucket_size_mb=-1,
use_fp16=True,
)
| data2vec_vision-main | beit/semantic_segmentation/configs/beit/upernet/upernet_beit_base_12_512_slide_160k_ade20k.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm, mmseg, setr, xcit and swin code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/fudan-zvg/SETR
# https://github.com/facebookresearch/xcit/
# https://github.com/microsoft/Swin-Transformer
# --------------------------------------------------------'
_base_ = [
'../../_base_/models/upernet_beit.py', '../../_base_/datasets/ade20k.py',
'../../_base_/default_runtime.py', '../../_base_/schedules/schedule_160k.py'
]
crop_size = (512, 512)
model = dict(
backbone=dict(
type='BEiT',
img_size=512,
patch_size=16,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4,
qkv_bias=True,
use_abs_pos_emb=False,
use_rel_pos_bias=True,
init_values=0.1,
drop_path_rate=0.1,
out_indices=[3, 5, 7, 11]
),
decode_head=dict(
in_channels=[768, 768, 768, 768],
num_classes=150,
channels=768,
),
auxiliary_head=dict(
in_channels=768,
num_classes=150
),
test_cfg = dict(mode='slide', crop_size=crop_size, stride=(341, 341))
)
# AdamW optimizer, no weight decay for position embedding & layer norm in backbone
# optimizer = dict(_delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01,
# paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.),
# 'relative_position_bias_table': dict(decay_mult=0.),
# 'norm': dict(decay_mult=0.)}))
optimizer = dict(_delete_=True, type='AdamW', lr=3e-5, betas=(0.9, 0.999), weight_decay=0.05,
constructor='LayerDecayOptimizerConstructor',
paramwise_cfg=dict(num_layers=12, layer_decay_rate=0.9))
lr_config = dict(_delete_=True, policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=1e-6,
power=1.0, min_lr=0.0, by_epoch=False)
# By default, models are trained on 8 GPUs with 2 images per GPU
data = dict(samples_per_gpu=2)
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (512, 512)
# test_cfg = dict(mode='slide', crop_size=crop_size, stride=(341, 341))
find_unused_parameters = True
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(2048, 512),
img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=True,
transforms=[
dict(type='SETR_Resize', keep_ratio=True,
crop_size=crop_size, setr_multi_scale=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
| data2vec_vision-main | beit/semantic_segmentation/configs/beit/upernet/upernet_beit_base_12_512_slide_160k_ade20k_ms.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm, mmseg, setr, xcit and swin code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/fudan-zvg/SETR
# https://github.com/facebookresearch/xcit/
# https://github.com/microsoft/Swin-Transformer
# --------------------------------------------------------'
_base_ = [
'../../_base_/models/upernet_beit.py', '../../_base_/datasets/ade20k.py',
'../../_base_/default_runtime.py', '../../_base_/schedules/schedule_160k.py'
]
crop_size = (512, 512)
model = dict(
backbone=dict(
type='BEiT',
img_size=512,
patch_size=16,
embed_dim=1024,
depth=24,
num_heads=16,
mlp_ratio=4,
qkv_bias=True,
use_abs_pos_emb=False,
use_rel_pos_bias=True,
init_values=1e-6,
drop_path_rate=0.2,
out_indices=[7, 11, 15, 23],
),
decode_head=dict(
in_channels=[1024, 1024, 1024, 1024],
num_classes=150,
channels=1024,
),
auxiliary_head=dict(
in_channels=1024,
num_classes=150
),
test_cfg = dict(mode='slide', crop_size=crop_size, stride=(341, 341))
)
# AdamW optimizer, no weight decay for position embedding & layer norm in backbone
# optimizer = dict(_delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01,
# paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.),
# 'relative_position_bias_table': dict(decay_mult=0.),
# 'norm': dict(decay_mult=0.)}))
optimizer = dict(_delete_=True, type='AdamW', lr=2e-5, betas=(0.9, 0.999), weight_decay=0.05,
constructor='LayerDecayOptimizerConstructor',
paramwise_cfg=dict(num_layers=24, layer_decay_rate=0.95))
lr_config = dict(_delete_=True, policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=1e-6,
power=1.0, min_lr=0.0, by_epoch=False)
# By default, models are trained on 8 GPUs with 2 images per GPU
data = dict(samples_per_gpu=2)
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (512, 512)
# test_cfg = dict(mode='slide', crop_size=crop_size, stride=(341, 341))
find_unused_parameters = True
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(2048, 512),
img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=True,
transforms=[
dict(type='SETR_Resize', keep_ratio=True,
crop_size=crop_size, setr_multi_scale=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
runner = dict(type='IterBasedRunnerAmp')
# do not use mmdet version fp16
fp16 = None
optimizer_config = dict(
type="DistOptimizerHook",
update_interval=1,
grad_clip=None,
coalesce=True,
bucket_size_mb=-1,
use_fp16=True,
)
| data2vec_vision-main | beit/semantic_segmentation/configs/beit/upernet/upernet_beit_large_24_512_slide_160k_ade20k_ms.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm, mmseg, setr, xcit and swin code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/fudan-zvg/SETR
# https://github.com/facebookresearch/xcit/
# https://github.com/microsoft/Swin-Transformer
# --------------------------------------------------------'
_base_ = [
'../../_base_/models/upernet_beit.py', '../../_base_/datasets/ade20k_640x640.py',
'../../_base_/default_runtime.py', '../../_base_/schedules/schedule_160k.py'
]
crop_size = (640, 640)
model = dict(
backbone=dict(
type='BEiT',
img_size=640,
patch_size=16,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4,
qkv_bias=True,
use_abs_pos_emb=False,
use_rel_pos_bias=True,
init_values=0.1,
drop_path_rate=0.1,
out_indices=[3, 5, 7, 11]
),
decode_head=dict(
in_channels=[768, 768, 768, 768],
num_classes=150,
channels=768,
),
auxiliary_head=dict(
in_channels=768,
num_classes=150
),
test_cfg = dict(mode='slide', crop_size=crop_size, stride=(426, 426))
)
# AdamW optimizer, no weight decay for position embedding & layer norm in backbone
# optimizer = dict(_delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01,
# paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.),
# 'relative_position_bias_table': dict(decay_mult=0.),
# 'norm': dict(decay_mult=0.)}))
optimizer = dict(_delete_=True, type='AdamW', lr=3e-5, betas=(0.9, 0.999), weight_decay=0.05,
constructor='LayerDecayOptimizerConstructor',
paramwise_cfg=dict(num_layers=12, layer_decay_rate=0.9))
lr_config = dict(_delete_=True, policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=1e-6,
power=1.0, min_lr=0.0, by_epoch=False)
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (640, 640)
# test_cfg = dict(mode='slide', crop_size=crop_size, stride=(341, 341))
find_unused_parameters = True
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(2560, 640),
img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=True,
transforms=[
dict(type='SETR_Resize', keep_ratio=True,
crop_size=crop_size, setr_multi_scale=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline),
samples_per_gpu=2,
)
runner = dict(type='IterBasedRunnerAmp')
# do not use mmdet version fp16
fp16 = None
optimizer_config = dict(
type="DistOptimizerHook",
update_interval=1,
grad_clip=None,
coalesce=True,
bucket_size_mb=-1,
use_fp16=True,
)
| data2vec_vision-main | beit/semantic_segmentation/configs/beit/upernet/upernet_beit_base_12_640_slide_160k_ade20k_ms.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm, mmseg, setr, xcit and swin code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/fudan-zvg/SETR
# https://github.com/facebookresearch/xcit/
# https://github.com/microsoft/Swin-Transformer
# --------------------------------------------------------'
_base_ = [
'../../_base_/models/upernet_beit.py', '../../_base_/datasets/ade20k_640x640.py',
'../../_base_/default_runtime.py', '../../_base_/schedules/schedule_320k.py'
]
crop_size = (640, 640)
model = dict(
backbone=dict(
type='BEiT',
img_size=640,
patch_size=16,
embed_dim=1024,
depth=24,
num_heads=16,
mlp_ratio=4,
qkv_bias=True,
use_abs_pos_emb=False,
use_rel_pos_bias=True,
init_values=1e-6,
drop_path_rate=0.2,
out_indices=[7, 11, 15, 23],
),
decode_head=dict(
in_channels=[1024, 1024, 1024, 1024],
num_classes=150,
channels=1024,
),
auxiliary_head=dict(
in_channels=1024,
num_classes=150
),
test_cfg = dict(mode='slide', crop_size=crop_size, stride=(426, 426))
)
# AdamW optimizer, no weight decay for position embedding & layer norm in backbone
# optimizer = dict(_delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01,
# paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.),
# 'relative_position_bias_table': dict(decay_mult=0.),
# 'norm': dict(decay_mult=0.)}))
optimizer = dict(_delete_=True, type='AdamW', lr=2e-5, betas=(0.9, 0.999), weight_decay=0.05,
constructor='LayerDecayOptimizerConstructor',
paramwise_cfg=dict(num_layers=24, layer_decay_rate=0.95))
lr_config = dict(_delete_=True, policy='poly',
warmup='linear',
warmup_iters=3000,
warmup_ratio=1e-6,
power=1.0, min_lr=0.0, by_epoch=False)
# By default, models are trained on 8 GPUs with 2 images per GPU
data=dict(samples_per_gpu=1)
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (640, 640)
# test_cfg = dict(mode='slide', crop_size=crop_size, stride=(341, 341))
find_unused_parameters = True
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(2560, 640),
img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
flip=True,
transforms=[
dict(type='SETR_Resize', keep_ratio=True,
crop_size=crop_size, setr_multi_scale=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
runner = dict(type='IterBasedRunnerAmp')
# do not use mmdet version fp16
fp16 = None
optimizer_config = dict(
type="DistOptimizerHook",
update_interval=2,
grad_clip=None,
coalesce=True,
bucket_size_mb=-1,
use_fp16=True,
)
| data2vec_vision-main | beit/semantic_segmentation/configs/beit/upernet/upernet_beit_large_24_640_slide_160k_ade20k_ms.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm, mmseg, setr, xcit and swin code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/fudan-zvg/SETR
# https://github.com/facebookresearch/xcit/
# https://github.com/microsoft/Swin-Transformer
# --------------------------------------------------------'
_base_ = [
'../../_base_/models/upernet_beit.py', '../../_base_/datasets/ade20k_640x640.py',
'../../_base_/default_runtime.py', '../../_base_/schedules/schedule_160k.py'
]
crop_size = (640, 640)
model = dict(
backbone=dict(
type='BEiT',
img_size=640,
patch_size=16,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4,
qkv_bias=True,
use_abs_pos_emb=False,
use_rel_pos_bias=True,
init_values=0.1,
drop_path_rate=0.1,
out_indices=[3, 5, 7, 11]
),
decode_head=dict(
in_channels=[768, 768, 768, 768],
num_classes=150,
channels=768,
),
auxiliary_head=dict(
in_channels=768,
num_classes=150
),
test_cfg = dict(mode='slide', crop_size=crop_size, stride=(426, 426))
)
# AdamW optimizer, no weight decay for position embedding & layer norm in backbone
# optimizer = dict(_delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01,
# paramwise_cfg=dict(custom_keys={'absolute_pos_embed': dict(decay_mult=0.),
# 'relative_position_bias_table': dict(decay_mult=0.),
# 'norm': dict(decay_mult=0.)}))
optimizer = dict(_delete_=True, type='AdamW', lr=3e-5, betas=(0.9, 0.999), weight_decay=0.05,
constructor='LayerDecayOptimizerConstructor',
paramwise_cfg=dict(num_layers=12, layer_decay_rate=0.9))
lr_config = dict(_delete_=True, policy='poly',
warmup='linear',
warmup_iters=1500,
warmup_ratio=1e-6,
power=1.0, min_lr=0.0, by_epoch=False)
# By default, models are trained on 8 GPUs with 2 images per GPU
data=dict(samples_per_gpu=2)
runner = dict(type='IterBasedRunnerAmp')
# do not use mmdet version fp16
fp16 = None
optimizer_config = dict(
type="DistOptimizerHook",
update_interval=1,
grad_clip=None,
coalesce=True,
bucket_size_mb=-1,
use_fp16=True,
)
| data2vec_vision-main | beit/semantic_segmentation/configs/beit/upernet/upernet_beit_base_12_640_slide_160k_ade20k.py |
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm, mmseg, setr, xcit and swin code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/fudan-zvg/SETR
# https://github.com/facebookresearch/xcit/
# https://github.com/microsoft/Swin-Transformer
# --------------------------------------------------------'
import math
import torch
from functools import partial
import torch.nn as nn
import torch.nn.functional as F
from timm.models.layers import drop_path, to_2tuple, trunc_normal_
from mmcv_custom import load_checkpoint
from mmseg.utils import get_root_logger
from mmseg.models.builder import BACKBONES
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob=None):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
def forward(self, x):
return drop_path(x, self.drop_prob, self.training)
def extra_repr(self) -> str:
return 'p={}'.format(self.drop_prob)
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
# x = self.drop(x)
# commit this for the orignal BERT implement
x = self.fc2(x)
x = self.drop(x)
return x
class Attention(nn.Module):
def __init__(
self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0.,
proj_drop=0., window_size=None, attn_head_dim=None):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
if attn_head_dim is not None:
head_dim = attn_head_dim
all_head_dim = head_dim * self.num_heads
# NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights
self.scale = qk_scale or head_dim ** -0.5
self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False)
if qkv_bias:
self.q_bias = nn.Parameter(torch.zeros(all_head_dim))
self.v_bias = nn.Parameter(torch.zeros(all_head_dim))
else:
self.q_bias = None
self.v_bias = None
if window_size:
self.window_size = window_size
self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
self.relative_position_bias_table = nn.Parameter(
torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# cls to token & token 2 cls & cls to cls
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(window_size[0])
coords_w = torch.arange(window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += window_size[1] - 1
relative_coords[:, :, 0] *= 2 * window_size[1] - 1
relative_position_index = \
torch.zeros(size=(window_size[0] * window_size[1] + 1, ) * 2, dtype=relative_coords.dtype)
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
relative_position_index[0, 0:] = self.num_relative_distance - 3
relative_position_index[0:, 0] = self.num_relative_distance - 2
relative_position_index[0, 0] = self.num_relative_distance - 1
self.register_buffer("relative_position_index", relative_position_index)
# trunc_normal_(self.relative_position_bias_table, std=.0)
else:
self.window_size = None
self.relative_position_bias_table = None
self.relative_position_index = None
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(all_head_dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x, rel_pos_bias=None):
B, N, C = x.shape
qkv_bias = None
if self.q_bias is not None:
qkv_bias = torch.cat((self.q_bias, torch.zeros_like(self.v_bias, requires_grad=False), self.v_bias))
# qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias)
qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
if self.relative_position_bias_table is not None:
relative_position_bias = \
self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1] + 1,
self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
attn = attn + relative_position_bias.unsqueeze(0)
if rel_pos_bias is not None:
attn = attn + rel_pos_bias
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, -1)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., init_values=None, act_layer=nn.GELU, norm_layer=nn.LayerNorm,
window_size=None, attn_head_dim=None):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
attn_drop=attn_drop, proj_drop=drop, window_size=window_size, attn_head_dim=attn_head_dim)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
if init_values is not None:
self.gamma_1 = nn.Parameter(init_values * torch.ones((dim)),requires_grad=True)
self.gamma_2 = nn.Parameter(init_values * torch.ones((dim)),requires_grad=True)
else:
self.gamma_1, self.gamma_2 = None, None
def forward(self, x, rel_pos_bias=None):
if self.gamma_1 is None:
x = x + self.drop_path(self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias))
x = x + self.drop_path(self.mlp(self.norm2(x)))
else:
x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x), rel_pos_bias=rel_pos_bias))
x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x)))
return x
class PatchEmbed(nn.Module):
""" Image to Patch Embedding
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768):
super().__init__()
img_size = to_2tuple(img_size)
patch_size = to_2tuple(patch_size)
num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0])
self.patch_shape = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
self.img_size = img_size
self.patch_size = patch_size
self.num_patches = num_patches
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x, **kwargs):
B, C, H, W = x.shape
# FIXME look at relaxing size constraints
# assert H == self.img_size[0] and W == self.img_size[1], \
# f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
x = self.proj(x)
Hp, Wp = x.shape[2], x.shape[3]
x = x.flatten(2).transpose(1, 2)
return x, (Hp, Wp)
class HybridEmbed(nn.Module):
""" CNN Feature Map Embedding
Extract feature map from CNN, flatten, project to embedding dim.
"""
def __init__(self, backbone, img_size=224, feature_size=None, in_chans=3, embed_dim=768):
super().__init__()
assert isinstance(backbone, nn.Module)
img_size = to_2tuple(img_size)
self.img_size = img_size
self.backbone = backbone
if feature_size is None:
with torch.no_grad():
# FIXME this is hacky, but most reliable way of determining the exact dim of the output feature
# map for all networks, the feature metadata has reliable channel and stride info, but using
# stride to calc feature dim requires info about padding of each stage that isn't captured.
training = backbone.training
if training:
backbone.eval()
o = self.backbone(torch.zeros(1, in_chans, img_size[0], img_size[1]))[-1]
feature_size = o.shape[-2:]
feature_dim = o.shape[1]
backbone.train(training)
else:
feature_size = to_2tuple(feature_size)
feature_dim = self.backbone.feature_info.channels()[-1]
self.num_patches = feature_size[0] * feature_size[1]
self.proj = nn.Linear(feature_dim, embed_dim)
def forward(self, x):
x = self.backbone(x)[-1]
x = x.flatten(2).transpose(1, 2)
x = self.proj(x)
return x
class RelativePositionBias(nn.Module):
def __init__(self, window_size, num_heads):
super().__init__()
self.window_size = window_size
self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
self.relative_position_bias_table = nn.Parameter(
torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH
# cls to token & token 2 cls & cls to cls
# get pair-wise relative position index for each token inside the window
coords_h = torch.arange(window_size[0])
coords_w = torch.arange(window_size[1])
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += window_size[1] - 1
relative_coords[:, :, 0] *= 2 * window_size[1] - 1
relative_position_index = \
torch.zeros(size=(window_size[0] * window_size[1] + 1,) * 2, dtype=relative_coords.dtype)
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
relative_position_index[0, 0:] = self.num_relative_distance - 3
relative_position_index[0:, 0] = self.num_relative_distance - 2
relative_position_index[0, 0] = self.num_relative_distance - 1
self.register_buffer("relative_position_index", relative_position_index)
# trunc_normal_(self.relative_position_bias_table, std=.02)
def forward(self):
relative_position_bias = \
self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1] + 1,
self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH
return relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
@BACKBONES.register_module()
class BEiT(nn.Module):
""" Vision Transformer with support for patch or hybrid CNN input stage
"""
def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=80, embed_dim=768, depth=12,
num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
drop_path_rate=0., hybrid_backbone=None, norm_layer=None, init_values=None,
use_abs_pos_emb=True, use_rel_pos_bias=False, use_shared_rel_pos_bias=False,
out_indices=[3, 5, 7, 11]):
super().__init__()
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
self.num_classes = num_classes
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
if hybrid_backbone is not None:
self.patch_embed = HybridEmbed(
hybrid_backbone, img_size=img_size, in_chans=in_chans, embed_dim=embed_dim)
else:
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim)
num_patches = self.patch_embed.num_patches
self.out_indices = out_indices
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
# self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
if use_abs_pos_emb:
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim))
else:
self.pos_embed = None
self.pos_drop = nn.Dropout(p=drop_rate)
if use_shared_rel_pos_bias:
self.rel_pos_bias = RelativePositionBias(window_size=self.patch_embed.patch_shape, num_heads=num_heads)
else:
self.rel_pos_bias = None
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.use_rel_pos_bias = use_rel_pos_bias
self.blocks = nn.ModuleList([
Block(
dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer,
init_values=init_values, window_size=self.patch_embed.patch_shape if use_rel_pos_bias else None)
for i in range(depth)])
if self.pos_embed is not None:
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
# trunc_normal_(self.mask_token, std=.02)
self.out_indices = out_indices
if patch_size == 16:
self.fpn1 = nn.Sequential(
nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2),
nn.SyncBatchNorm(embed_dim),
nn.GELU(),
nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2),
)
self.fpn2 = nn.Sequential(
nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2),
)
self.fpn3 = nn.Identity()
self.fpn4 = nn.MaxPool2d(kernel_size=2, stride=2)
elif patch_size == 8:
self.fpn1 = nn.Sequential(
nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2),
)
self.fpn2 = nn.Identity()
self.fpn3 = nn.Sequential(
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.fpn4 = nn.Sequential(
nn.MaxPool2d(kernel_size=4, stride=4),
)
self.apply(self._init_weights)
self.fix_init_weight()
def fix_init_weight(self):
def rescale(param, layer_id):
param.div_(math.sqrt(2.0 * layer_id))
for layer_id, layer in enumerate(self.blocks):
rescale(layer.attn.proj.weight.data, layer_id + 1)
rescale(layer.mlp.fc2.weight.data, layer_id + 1)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def init_weights(self, pretrained=None):
"""Initialize the weights in backbone.
Args:
pretrained (str, optional): Path to pre-trained weights.
Defaults to None.
"""
def _init_weights(m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
if isinstance(pretrained, str):
self.apply(_init_weights)
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=False, logger=logger)
elif pretrained is None:
self.apply(_init_weights)
else:
raise TypeError('pretrained must be a str or None')
def get_num_layers(self):
return len(self.blocks)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token'}
def forward_features(self, x):
B, C, H, W = x.shape
x, (Hp, Wp) = self.patch_embed(x)
batch_size, seq_len, _ = x.size()
cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks
x = torch.cat((cls_tokens, x), dim=1)
if self.pos_embed is not None:
x = x + self.pos_embed
x = self.pos_drop(x)
rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None
features = []
for i, blk in enumerate(self.blocks):
x = blk(x, rel_pos_bias=rel_pos_bias)
if i in self.out_indices:
xp = x[:, 1:, :].permute(0, 2, 1).reshape(B, -1, Hp, Wp)
features.append(xp.contiguous())
ops = [self.fpn1, self.fpn2, self.fpn3, self.fpn4]
for i in range(len(features)):
features[i] = ops[i](features[i])
return tuple(features)
def forward(self, x):
x = self.forward_features(x)
return x
| data2vec_vision-main | beit/semantic_segmentation/backbone/beit.py |
#!/usr/bin/env python3
import torch
from setuptools import find_packages, setup
torch_ver = [int(x) for x in torch.__version__.split(".")[:2]]
assert torch_ver >= [1, 4], "Requires PyTorch >= 1.4"
setup(
name="layoutlm",
version="0.0",
author="Yiheng Xu",
url="https://github.com/microsoft/unilm/tree/master/layoutlm",
description="LayoutLM",
packages=find_packages(exclude=("configs", "tests")),
python_requires=">=3.6",
install_requires=[
"transformers==2.9.0",
"tensorboardX==2.0",
"lxml==4.5.1",
"seqeval==0.0.12",
"Pillow==7.1.2",
],
extras_require={
"dev": ["flake8==3.8.2", "isort==4.3.21", "black==19.10b0", "pre-commit==2.4.0"]
},
)
| data2vec_vision-main | layoutlm/deprecated/setup.py |
# coding=utf-8
from __future__ import absolute_import, division, print_function
import argparse
import glob
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from transformers import (
WEIGHTS_NAME,
AdamW,
BertConfig,
BertForSequenceClassification,
BertTokenizerFast,
RobertaConfig,
RobertaForSequenceClassification,
RobertaTokenizer,
get_linear_schedule_with_warmup,
)
from layoutlm import LayoutlmConfig, LayoutlmForSequenceClassification
from layoutlm.data.rvl_cdip import CdipProcessor, load_and_cache_examples
try:
from torch.utils.tensorboard import SummaryWriter
except:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
ALL_MODELS = sum(
(
tuple(conf.pretrained_config_archive_map.keys())
for conf in (BertConfig, RobertaConfig, LayoutlmConfig)
),
(),
)
MODEL_CLASSES = {
"bert": (BertConfig, BertForSequenceClassification, BertTokenizerFast),
"roberta": (RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer),
"layoutlm": (LayoutlmConfig, LayoutlmForSequenceClassification, BertTokenizerFast),
}
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def simple_accuracy(preds, labels):
return (preds == labels).mean()
def train(args, train_dataset, model, tokenizer): # noqa C901
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter(comment="_" + os.path.basename(args.output_dir))
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = (
RandomSampler(train_dataset)
if args.local_rank == -1
else DistributedSampler(train_dataset)
)
train_dataloader = DataLoader(
train_dataset, sampler=train_sampler, batch_size=args.train_batch_size
)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = (
args.max_steps
// (len(train_dataloader) // args.gradient_accumulation_steps)
+ 1
)
else:
t_total = (
len(train_dataloader)
// args.gradient_accumulation_steps
* args.num_train_epochs
)
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [
p
for n, p in model.named_parameters()
if not any(nd in n for nd in no_decay)
],
"weight_decay": args.weight_decay,
},
{
"params": [
p
for n, p in model.named_parameters()
if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
optimizer = AdamW(
optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon
)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use fp16 training."
)
model, optimizer = amp.initialize(
model, optimizer, opt_level=args.fp16_opt_level
)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True,
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(
" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size
)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(
int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]
)
set_seed(args) # Added here for reproductibility (even between python 2 and 3)
for _ in train_iterator:
epoch_iterator = tqdm(
train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]
)
for step, batch in enumerate(epoch_iterator):
model.train()
if args.model_type != "layoutlm":
batch = batch[:4]
batch = tuple(t.to(args.device) for t in batch)
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"labels": batch[3],
}
if args.model_type == "layoutlm":
inputs["bbox"] = batch[4]
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "layoutlm"] else None
) # RoBERTa don't use segment_ids
outputs = model(**inputs)
loss = outputs[
0
] # model outputs are always tuple in transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(
amp.master_params(optimizer), args.max_grad_norm
)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if (
args.local_rank in [-1, 0]
and args.logging_steps > 0
and global_step % args.logging_steps == 0
):
# Log metrics
if (
args.local_rank == -1 and args.evaluate_during_training
): # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model, tokenizer, "val")
for key, value in results.items():
tb_writer.add_scalar(
"eval_{}".format(key), value, global_step
)
tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_writer.add_scalar(
"loss",
(tr_loss - logging_loss) / args.logging_steps,
global_step,
)
logging_loss = tr_loss
if (
args.local_rank in [-1, 0]
and args.save_steps > 0
and global_step % args.save_steps == 0
):
# Save model checkpoint
output_dir = os.path.join(
args.output_dir, "checkpoint-{}".format(global_step)
)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
tokenizer.save_pretrained(output_dir)
logger.info("Saving model checkpoint to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def evaluate(args, model, tokenizer, mode, prefix=""):
results = {}
eval_dataset = load_and_cache_examples(args, tokenizer, mode=mode)
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(
eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size
)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
if args.model_type != "layoutlm":
batch = batch[:4]
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"labels": batch[3],
}
if args.model_type == "layoutlm":
inputs["bbox"] = batch[4]
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "layoutlm"] else None
) # RoBERTa don"t use segment_ids
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(
out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0
)
eval_loss = eval_loss / nb_eval_steps
preds = np.argmax(preds, axis=1)
result = {"acc": simple_accuracy(preds=preds, labels=out_label_ids)}
results.update(result)
output_eval_file = os.path.join(
args.output_dir, prefix, "{}_results.txt".format(mode)
)
with open(output_eval_file, "w") as writer:
logger.info("***** {} results {} *****".format(mode, prefix))
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
output_eval_file = os.path.join(
args.output_dir, prefix, "{}_compare.txt".format(mode)
)
with open(output_eval_file, "w") as writer:
for p, l in zip(preds, out_label_ids):
writer.write("%s %s\n" % (p, l))
return results
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument(
"--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.",
)
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()),
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name selected in the list: "
+ ", ".join(ALL_MODELS),
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
## Other parameters
parser.add_argument(
"--config_name",
default="",
type=str,
help="Pretrained config name or path if not the same as model_name",
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3",
)
parser.add_argument(
"--max_seq_length",
default=512,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--do_train", action="store_true", help="Whether to run training."
)
parser.add_argument(
"--do_eval", action="store_true", help="Whether to run eval on the dev set."
)
parser.add_argument(
"--do_test", action="store_true", help="Whether to run test on the test set."
)
parser.add_argument(
"--evaluate_during_training",
action="store_true",
help="Rul evaluation during training at each logging step.",
)
parser.add_argument(
"--do_lower_case",
action="store_true",
help="Set this flag if you are using an uncased model.",
)
parser.add_argument(
"--per_gpu_train_batch_size",
default=8,
type=int,
help="Batch size per GPU/CPU for training.",
)
parser.add_argument(
"--per_gpu_eval_batch_size",
default=8,
type=int,
help="Batch size per GPU/CPU for evaluation.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.",
)
parser.add_argument(
"--weight_decay", default=0.0, type=float, help="Weight deay if we apply some."
)
parser.add_argument(
"--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer."
)
parser.add_argument(
"--max_grad_norm", default=1.0, type=float, help="Max gradient norm."
)
parser.add_argument(
"--num_train_epochs",
default=3.0,
type=float,
help="Total number of training epochs to perform.",
)
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument(
"--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps."
)
parser.add_argument(
"--logging_steps", type=int, default=50, help="Log every X updates steps."
)
parser.add_argument(
"--save_steps",
type=int,
default=50,
help="Save checkpoint every X updates steps.",
)
parser.add_argument(
"--eval_all_checkpoints",
action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number",
)
parser.add_argument(
"--no_cuda", action="store_true", help="Avoid using CUDA when available"
)
parser.add_argument(
"--overwrite_output_dir",
action="store_true",
help="Overwrite the content of the output directory",
)
parser.add_argument(
"--overwrite_cache",
action="store_true",
help="Overwrite the cached training and evaluation sets",
)
parser.add_argument(
"--seed", type=int, default=42, help="random seed for initialization"
)
parser.add_argument(
"--tpu",
action="store_true",
help="Whether to run on the TPU defined in the environment variables",
)
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument(
"--local_rank",
type=int,
default=-1,
help="For distributed training: local_rank",
)
parser.add_argument(
"--server_ip", type=str, default="", help="For distant debugging."
)
parser.add_argument(
"--server_port", type=str, default="", help="For distant debugging."
)
args = parser.parse_args()
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir
):
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir
)
)
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(
address=(args.server_ip, args.server_port), redirect_output=True
)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device(
"cuda:0" if torch.cuda.is_available() and not args.no_cuda else "cpu"
)
if torch.cuda.is_available():
torch.cuda.set_device(device)
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set seed
set_seed(args)
processor = CdipProcessor()
label_list = processor.get_labels()
num_labels = len(label_list)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
)
tokenizer = tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
)
model = model_class.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
train_dataset = load_and_cache_examples(args, tokenizer, mode="train")
global_step, tr_loss = train(args, train_dataset, model, tokenizer)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
# Load a trained model and vocabulary that you have fine-tuned
model = model_class.from_pretrained(args.output_dir)
tokenizer = tokenizer_class.from_pretrained(
args.output_dir, do_lower_case=args.do_lower_case
)
model.to(args.device)
# Evaluation
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
tokenizer = tokenizer_class.from_pretrained(
args.output_dir, do_lower_case=args.do_lower_case
)
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(
os.path.dirname(c)
for c in sorted(
glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True)
)
)
logging.getLogger("transformers.modeling_utils").setLevel(
logging.WARN
) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
prefix = (
checkpoint.split("/")[-1]
if checkpoint.find("checkpoint") != -1 and args.eval_all_checkpoints
else ""
)
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
result = evaluate(args, model, tokenizer, mode="val", prefix=prefix)
result = dict(
("val_" + k + "_{}".format(global_step), v) for k, v in result.items()
)
results.update(result)
if args.do_test and args.local_rank in [-1, 0]:
tokenizer = tokenizer_class.from_pretrained(
args.output_dir, do_lower_case=args.do_lower_case
)
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(
os.path.dirname(c)
for c in sorted(
glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True)
)
)
logging.getLogger("transformers.modeling_utils").setLevel(
logging.WARN
) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
prefix = (
checkpoint.split("/")[-1]
if checkpoint.find("checkpoint") != -1 and args.eval_all_checkpoints
else ""
)
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
result = evaluate(args, model, tokenizer, mode="test", prefix=prefix)
result = dict(
("test_" + k + "_{}".format(global_step), v) for k, v in result.items()
)
results.update(result)
return results
if __name__ == "__main__":
main()
| data2vec_vision-main | layoutlm/deprecated/examples/classification/run_classification.py |
import argparse
import json
import os
from PIL import Image
from transformers import AutoTokenizer
def bbox_string(box, width, length):
return (
str(int(1000 * (box[0] / width)))
+ " "
+ str(int(1000 * (box[1] / length)))
+ " "
+ str(int(1000 * (box[2] / width)))
+ " "
+ str(int(1000 * (box[3] / length)))
)
def actual_bbox_string(box, width, length):
return (
str(box[0])
+ " "
+ str(box[1])
+ " "
+ str(box[2])
+ " "
+ str(box[3])
+ "\t"
+ str(width)
+ " "
+ str(length)
)
def convert(args):
with open(
os.path.join(args.output_dir, args.data_split + ".txt.tmp"),
"w",
encoding="utf8",
) as fw, open(
os.path.join(args.output_dir, args.data_split + "_box.txt.tmp"),
"w",
encoding="utf8",
) as fbw, open(
os.path.join(args.output_dir, args.data_split + "_image.txt.tmp"),
"w",
encoding="utf8",
) as fiw:
for file in os.listdir(args.data_dir):
file_path = os.path.join(args.data_dir, file)
with open(file_path, "r", encoding="utf8") as f:
data = json.load(f)
image_path = file_path.replace("annotations", "images")
image_path = image_path.replace("json", "png")
file_name = os.path.basename(image_path)
image = Image.open(image_path)
width, length = image.size
for item in data["form"]:
words, label = item["words"], item["label"]
words = [w for w in words if w["text"].strip() != ""]
if len(words) == 0:
continue
if label == "other":
for w in words:
fw.write(w["text"] + "\tO\n")
fbw.write(
w["text"]
+ "\t"
+ bbox_string(w["box"], width, length)
+ "\n"
)
fiw.write(
w["text"]
+ "\t"
+ actual_bbox_string(w["box"], width, length)
+ "\t"
+ file_name
+ "\n"
)
else:
if len(words) == 1:
fw.write(words[0]["text"] + "\tS-" + label.upper() + "\n")
fbw.write(
words[0]["text"]
+ "\t"
+ bbox_string(words[0]["box"], width, length)
+ "\n"
)
fiw.write(
words[0]["text"]
+ "\t"
+ actual_bbox_string(words[0]["box"], width, length)
+ "\t"
+ file_name
+ "\n"
)
else:
fw.write(words[0]["text"] + "\tB-" + label.upper() + "\n")
fbw.write(
words[0]["text"]
+ "\t"
+ bbox_string(words[0]["box"], width, length)
+ "\n"
)
fiw.write(
words[0]["text"]
+ "\t"
+ actual_bbox_string(words[0]["box"], width, length)
+ "\t"
+ file_name
+ "\n"
)
for w in words[1:-1]:
fw.write(w["text"] + "\tI-" + label.upper() + "\n")
fbw.write(
w["text"]
+ "\t"
+ bbox_string(w["box"], width, length)
+ "\n"
)
fiw.write(
w["text"]
+ "\t"
+ actual_bbox_string(w["box"], width, length)
+ "\t"
+ file_name
+ "\n"
)
fw.write(words[-1]["text"] + "\tE-" + label.upper() + "\n")
fbw.write(
words[-1]["text"]
+ "\t"
+ bbox_string(words[-1]["box"], width, length)
+ "\n"
)
fiw.write(
words[-1]["text"]
+ "\t"
+ actual_bbox_string(words[-1]["box"], width, length)
+ "\t"
+ file_name
+ "\n"
)
fw.write("\n")
fbw.write("\n")
fiw.write("\n")
def seg_file(file_path, tokenizer, max_len):
subword_len_counter = 0
output_path = file_path[:-4]
with open(file_path, "r", encoding="utf8") as f_p, open(
output_path, "w", encoding="utf8"
) as fw_p:
for line in f_p:
line = line.rstrip()
if not line:
fw_p.write(line + "\n")
subword_len_counter = 0
continue
token = line.split("\t")[0]
current_subwords_len = len(tokenizer.tokenize(token))
# Token contains strange control characters like \x96 or \x95
# Just filter out the complete line
if current_subwords_len == 0:
continue
if (subword_len_counter + current_subwords_len) > max_len:
fw_p.write("\n" + line + "\n")
subword_len_counter = current_subwords_len
continue
subword_len_counter += current_subwords_len
fw_p.write(line + "\n")
def seg(args):
tokenizer = AutoTokenizer.from_pretrained(
args.model_name_or_path, do_lower_case=True
)
seg_file(
os.path.join(args.output_dir, args.data_split + ".txt.tmp"),
tokenizer,
args.max_len,
)
seg_file(
os.path.join(args.output_dir, args.data_split + "_box.txt.tmp"),
tokenizer,
args.max_len,
)
seg_file(
os.path.join(args.output_dir, args.data_split + "_image.txt.tmp"),
tokenizer,
args.max_len,
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_dir", type=str, default="data/training_data/annotations"
)
parser.add_argument("--data_split", type=str, default="train")
parser.add_argument("--output_dir", type=str, default="data")
parser.add_argument("--model_name_or_path", type=str, default="bert-base-uncased")
parser.add_argument("--max_len", type=int, default=510)
args = parser.parse_args()
convert(args)
seg(args)
| data2vec_vision-main | layoutlm/deprecated/examples/seq_labeling/preprocess.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Fine-tuning the library models for named entity recognition on CoNLL-2003 (Bert or Roberta). """
from __future__ import absolute_import, division, print_function
import argparse
import glob
import logging
import os
import random
import shutil
import numpy as np
import torch
from seqeval.metrics import (
classification_report,
f1_score,
precision_score,
recall_score,
)
from tensorboardX import SummaryWriter
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from transformers import (
WEIGHTS_NAME,
AdamW,
BertConfig,
BertForTokenClassification,
BertTokenizer,
RobertaConfig,
RobertaForTokenClassification,
RobertaTokenizer,
get_linear_schedule_with_warmup,
)
from layoutlm import FunsdDataset, LayoutlmConfig, LayoutlmForTokenClassification
logger = logging.getLogger(__name__)
ALL_MODELS = sum(
(
tuple(conf.pretrained_config_archive_map.keys())
for conf in (BertConfig, RobertaConfig, LayoutlmConfig)
),
(),
)
MODEL_CLASSES = {
"bert": (BertConfig, BertForTokenClassification, BertTokenizer),
"roberta": (RobertaConfig, RobertaForTokenClassification, RobertaTokenizer),
"layoutlm": (LayoutlmConfig, LayoutlmForTokenClassification, BertTokenizer),
}
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def collate_fn(data):
batch = [i for i in zip(*data)]
for i in range(len(batch)):
if i < len(batch) - 2:
batch[i] = torch.stack(batch[i], 0)
return tuple(batch)
def get_labels(path):
with open(path, "r") as f:
labels = f.read().splitlines()
if "O" not in labels:
labels = ["O"] + labels
return labels
def train( # noqa C901
args, train_dataset, model, tokenizer, labels, pad_token_label_id
):
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter(logdir="runs/" + os.path.basename(args.output_dir))
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = (
RandomSampler(train_dataset)
if args.local_rank == -1
else DistributedSampler(train_dataset)
)
train_dataloader = DataLoader(
train_dataset,
sampler=train_sampler,
batch_size=args.train_batch_size,
collate_fn=None,
)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = (
args.max_steps
// (len(train_dataloader) // args.gradient_accumulation_steps)
+ 1
)
else:
t_total = (
len(train_dataloader)
// args.gradient_accumulation_steps
* args.num_train_epochs
)
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [
p
for n, p in model.named_parameters()
if not any(nd in n for nd in no_decay)
],
"weight_decay": args.weight_decay,
},
{
"params": [
p
for n, p in model.named_parameters()
if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
optimizer = AdamW(
optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon
)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use fp16 training."
)
model, optimizer = amp.initialize(
model, optimizer, opt_level=args.fp16_opt_level
)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True,
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(
" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size
)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(
int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]
)
set_seed(args) # Added here for reproductibility (even between python 2 and 3)
for _ in train_iterator:
epoch_iterator = tqdm(
train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]
)
for step, batch in enumerate(epoch_iterator):
model.train()
inputs = {
"input_ids": batch[0].to(args.device),
"attention_mask": batch[1].to(args.device),
"labels": batch[3].to(args.device),
}
if args.model_type in ["layoutlm"]:
inputs["bbox"] = batch[4].to(args.device)
inputs["token_type_ids"] = (
batch[2].to(args.device) if args.model_type in ["bert", "layoutlm"] else None
) # RoBERTa don"t use segment_ids
outputs = model(**inputs)
# model outputs are always tuple in pytorch-transformers (see doc)
loss = outputs[0]
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(
amp.master_params(optimizer), args.max_grad_norm
)
else:
torch.nn.utils.clip_grad_norm_(
model.parameters(), args.max_grad_norm
)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if (
args.local_rank in [-1, 0]
and args.logging_steps > 0
and global_step % args.logging_steps == 0
):
# Log metrics
if (
args.local_rank in [-1, 0] and args.evaluate_during_training
): # Only evaluate when single GPU otherwise metrics may not average well
results, _ = evaluate(
args,
model,
tokenizer,
labels,
pad_token_label_id,
mode="dev",
)
for key, value in results.items():
tb_writer.add_scalar(
"eval_{}".format(key), value, global_step
)
tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_writer.add_scalar(
"loss",
(tr_loss - logging_loss) / args.logging_steps,
global_step,
)
logging_loss = tr_loss
if (
args.local_rank in [-1, 0]
and args.save_steps > 0
and global_step % args.save_steps == 0
):
# Save model checkpoint
output_dir = os.path.join(
args.output_dir, "checkpoint-{}".format(global_step)
)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def evaluate(args, model, tokenizer, labels, pad_token_label_id, mode, prefix=""):
eval_dataset = FunsdDataset(args, tokenizer, labels, pad_token_label_id, mode=mode)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(
eval_dataset,
sampler=eval_sampler,
batch_size=args.eval_batch_size,
collate_fn=None,
)
# Eval!
logger.info("***** Running evaluation %s *****", prefix)
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
model.eval()
for batch in tqdm(eval_dataloader, desc="Evaluating"):
with torch.no_grad():
inputs = {
"input_ids": batch[0].to(args.device),
"attention_mask": batch[1].to(args.device),
"labels": batch[3].to(args.device),
}
if args.model_type in ["layoutlm"]:
inputs["bbox"] = batch[4].to(args.device)
inputs["token_type_ids"] = (
batch[2].to(args.device)
if args.model_type in ["bert", "layoutlm"]
else None
) # RoBERTa don"t use segment_ids
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
if args.n_gpu > 1:
tmp_eval_loss = (
tmp_eval_loss.mean()
) # mean() to average on multi-gpu parallel evaluating
eval_loss += tmp_eval_loss.item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(
out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0
)
eval_loss = eval_loss / nb_eval_steps
preds = np.argmax(preds, axis=2)
label_map = {i: label for i, label in enumerate(labels)}
out_label_list = [[] for _ in range(out_label_ids.shape[0])]
preds_list = [[] for _ in range(out_label_ids.shape[0])]
for i in range(out_label_ids.shape[0]):
for j in range(out_label_ids.shape[1]):
if out_label_ids[i, j] != pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
results = {
"loss": eval_loss,
"precision": precision_score(out_label_list, preds_list),
"recall": recall_score(out_label_list, preds_list),
"f1": f1_score(out_label_list, preds_list),
}
report = classification_report(out_label_list, preds_list)
logger.info("\n" + report)
logger.info("***** Eval results %s *****", prefix)
for key in sorted(results.keys()):
logger.info(" %s = %s", key, str(results[key]))
return results, preds_list
def main(): # noqa C901
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument(
"--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the training files for the CoNLL-2003 NER task.",
)
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()),
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name selected in the list: "
+ ", ".join(ALL_MODELS),
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
## Other parameters
parser.add_argument(
"--labels",
default="",
type=str,
help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.",
)
parser.add_argument(
"--config_name",
default="",
type=str,
help="Pretrained config name or path if not the same as model_name",
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3",
)
parser.add_argument(
"--max_seq_length",
default=512,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--do_train", action="store_true", help="Whether to run training."
)
parser.add_argument(
"--do_eval", action="store_true", help="Whether to run eval on the dev set."
)
parser.add_argument(
"--do_predict",
action="store_true",
help="Whether to run predictions on the test set.",
)
parser.add_argument(
"--evaluate_during_training",
action="store_true",
help="Whether to run evaluation during training at each logging step.",
)
parser.add_argument(
"--do_lower_case",
action="store_true",
help="Set this flag if you are using an uncased model.",
)
parser.add_argument(
"--per_gpu_train_batch_size",
default=8,
type=int,
help="Batch size per GPU/CPU for training.",
)
parser.add_argument(
"--per_gpu_eval_batch_size",
default=8,
type=int,
help="Batch size per GPU/CPU for evaluation.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.",
)
parser.add_argument(
"--weight_decay", default=0.0, type=float, help="Weight decay if we apply some."
)
parser.add_argument(
"--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer."
)
parser.add_argument(
"--max_grad_norm", default=1.0, type=float, help="Max gradient norm."
)
parser.add_argument(
"--num_train_epochs",
default=3.0,
type=float,
help="Total number of training epochs to perform.",
)
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument(
"--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps."
)
parser.add_argument(
"--logging_steps", type=int, default=50, help="Log every X updates steps."
)
parser.add_argument(
"--save_steps",
type=int,
default=50,
help="Save checkpoint every X updates steps.",
)
parser.add_argument(
"--eval_all_checkpoints",
action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number",
)
parser.add_argument(
"--no_cuda", action="store_true", help="Avoid using CUDA when available"
)
parser.add_argument(
"--overwrite_output_dir",
action="store_true",
help="Overwrite the content of the output directory",
)
parser.add_argument(
"--overwrite_cache",
action="store_true",
help="Overwrite the cached training and evaluation sets",
)
parser.add_argument(
"--seed", type=int, default=42, help="random seed for initialization"
)
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument(
"--local_rank",
type=int,
default=-1,
help="For distributed training: local_rank",
)
parser.add_argument(
"--server_ip", type=str, default="", help="For distant debugging."
)
parser.add_argument(
"--server_port", type=str, default="", help="For distant debugging."
)
args = parser.parse_args()
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
):
if not args.overwrite_output_dir:
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir
)
)
else:
if args.local_rank in [-1, 0]:
shutil.rmtree(args.output_dir)
if not os.path.exists(args.output_dir) and (args.do_eval or args.do_predict):
raise ValueError(
"Output directory ({}) does not exist. Please train and save the model before inference stage.".format(
args.output_dir
)
)
if (
not os.path.exists(args.output_dir)
and args.do_train
and args.local_rank in [-1, 0]
):
os.makedirs(args.output_dir)
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(
address=(args.server_ip, args.server_port), redirect_output=True
)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device(
"cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu"
)
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(
filename=os.path.join(args.output_dir, "train.log")
if args.local_rank in [-1, 0]
else None,
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set seed
set_seed(args)
labels = get_labels(args.labels)
num_labels = len(labels)
# Use cross entropy ignore index as padding label id so that only real label ids contribute to the loss later
pad_token_label_id = CrossEntropyLoss().ignore_index
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
cache_dir=args.cache_dir if args.cache_dir else None,
)
tokenizer = tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
)
model = model_class.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None,
)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
train_dataset = FunsdDataset(
args, tokenizer, labels, pad_token_label_id, mode="train"
)
global_step, tr_loss = train(
args, train_dataset, model, tokenizer, labels, pad_token_label_id
)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
# Evaluation
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
tokenizer = tokenizer_class.from_pretrained(
args.output_dir, do_lower_case=args.do_lower_case
)
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(
os.path.dirname(c)
for c in sorted(
glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True)
)
)
logging.getLogger("pytorch_transformers.modeling_utils").setLevel(
logging.WARN
) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
result, _ = evaluate(
args,
model,
tokenizer,
labels,
pad_token_label_id,
mode="test",
prefix=global_step,
)
if global_step:
result = {"{}_{}".format(global_step, k): v for k, v in result.items()}
results.update(result)
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
for key in sorted(results.keys()):
writer.write("{} = {}\n".format(key, str(results[key])))
if args.do_predict and args.local_rank in [-1, 0]:
tokenizer = tokenizer_class.from_pretrained(
args.model_name_or_path, do_lower_case=args.do_lower_case
)
model = model_class.from_pretrained(args.output_dir)
model.to(args.device)
result, predictions = evaluate(
args, model, tokenizer, labels, pad_token_label_id, mode="test"
)
# Save results
output_test_results_file = os.path.join(args.output_dir, "test_results.txt")
with open(output_test_results_file, "w") as writer:
for key in sorted(result.keys()):
writer.write("{} = {}\n".format(key, str(result[key])))
# Save predictions
output_test_predictions_file = os.path.join(
args.output_dir, "test_predictions.txt"
)
with open(output_test_predictions_file, "w", encoding="utf8") as writer:
with open(
os.path.join(args.data_dir, "test.txt"), "r", encoding="utf8"
) as f:
example_id = 0
for line in f:
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
writer.write(line)
if not predictions[example_id]:
example_id += 1
elif predictions[example_id]:
output_line = (
line.split()[0]
+ " "
+ predictions[example_id].pop(0)
+ "\n"
)
writer.write(output_line)
else:
logger.warning(
"Maximum sequence length exceeded: No prediction for '%s'.",
line.split()[0],
)
return results
if __name__ == "__main__":
main()
| data2vec_vision-main | layoutlm/deprecated/examples/seq_labeling/run_seq_labeling.py |
# flake8: noqa
from .data.funsd import FunsdDataset
from .modeling.layoutlm import (
LayoutlmConfig,
LayoutlmForSequenceClassification,
LayoutlmForTokenClassification,
)
| data2vec_vision-main | layoutlm/deprecated/layoutlm/__init__.py |
data2vec_vision-main | layoutlm/deprecated/layoutlm/modeling/__init__.py |
|
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import BertConfig, BertModel, BertPreTrainedModel
from transformers.modeling_bert import BertLayerNorm
logger = logging.getLogger(__name__)
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_MAP = {}
LAYOUTLM_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
class LayoutlmConfig(BertConfig):
pretrained_config_archive_map = LAYOUTLM_PRETRAINED_CONFIG_ARCHIVE_MAP
model_type = "bert"
def __init__(self, max_2d_position_embeddings=1024, **kwargs):
super().__init__(**kwargs)
self.max_2d_position_embeddings = max_2d_position_embeddings
class LayoutlmEmbeddings(nn.Module):
def __init__(self, config):
super(LayoutlmEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(
config.vocab_size, config.hidden_size, padding_idx=0
)
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size
)
self.x_position_embeddings = nn.Embedding(
config.max_2d_position_embeddings, config.hidden_size
)
self.y_position_embeddings = nn.Embedding(
config.max_2d_position_embeddings, config.hidden_size
)
self.h_position_embeddings = nn.Embedding(
config.max_2d_position_embeddings, config.hidden_size
)
self.w_position_embeddings = nn.Embedding(
config.max_2d_position_embeddings, config.hidden_size
)
self.token_type_embeddings = nn.Embedding(
config.type_vocab_size, config.hidden_size
)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(
self,
input_ids,
bbox,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
):
seq_length = input_ids.size(1)
if position_ids is None:
position_ids = torch.arange(
seq_length, dtype=torch.long, device=input_ids.device
)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0])
upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1])
right_position_embeddings = self.x_position_embeddings(bbox[:, :, 2])
lower_position_embeddings = self.y_position_embeddings(bbox[:, :, 3])
h_position_embeddings = self.h_position_embeddings(
bbox[:, :, 3] - bbox[:, :, 1]
)
w_position_embeddings = self.w_position_embeddings(
bbox[:, :, 2] - bbox[:, :, 0]
)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = (
words_embeddings
+ position_embeddings
+ left_position_embeddings
+ upper_position_embeddings
+ right_position_embeddings
+ lower_position_embeddings
+ h_position_embeddings
+ w_position_embeddings
+ token_type_embeddings
)
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class LayoutlmModel(BertModel):
config_class = LayoutlmConfig
pretrained_model_archive_map = LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "bert"
def __init__(self, config):
super(LayoutlmModel, self).__init__(config)
self.embeddings = LayoutlmEmbeddings(config)
self.init_weights()
def forward(
self,
input_ids,
bbox,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(
dtype=next(self.parameters()).dtype
) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = (
head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
)
head_mask = head_mask.expand(
self.config.num_hidden_layers, -1, -1, -1, -1
)
elif head_mask.dim() == 2:
head_mask = (
head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
) # We can specify head_mask for each layer
head_mask = head_mask.to(
dtype=next(self.parameters()).dtype
) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.config.num_hidden_layers
embedding_output = self.embeddings(
input_ids, bbox, position_ids=position_ids, token_type_ids=token_type_ids
)
encoder_outputs = self.encoder(
embedding_output, extended_attention_mask, head_mask=head_mask
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output)
outputs = (sequence_output, pooled_output) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions)
class LayoutlmForTokenClassification(BertPreTrainedModel):
config_class = LayoutlmConfig
pretrained_model_archive_map = LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "bert"
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = LayoutlmModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def forward(
self,
input_ids,
bbox,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
outputs = self.bert(
input_ids=input_ids,
bbox=bbox,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
outputs = (logits,) + outputs[
2:
] # add hidden states and attention if they are here
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)[active_loss]
active_labels = labels.view(-1)[active_loss]
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), scores, (hidden_states), (attentions)
class LayoutlmForSequenceClassification(BertPreTrainedModel):
config_class = LayoutlmConfig
pretrained_model_archive_map = LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "bert"
def __init__(self, config):
super(LayoutlmForSequenceClassification, self).__init__(config)
self.num_labels = config.num_labels
self.bert = LayoutlmModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)
self.init_weights()
def forward(
self,
input_ids,
bbox,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
outputs = self.bert(
input_ids=input_ids,
bbox=bbox,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
outputs = (logits,) + outputs[
2:
] # add hidden states and attention if they are here
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), logits, (hidden_states), (attentions)
| data2vec_vision-main | layoutlm/deprecated/layoutlm/modeling/layoutlm.py |
# coding=utf-8
import copy
import json
import logging
import os
import re
from multiprocessing import Pool
import torch
from lxml import html
from torch.utils.data import TensorDataset
from tqdm import tqdm
from transformers import DataProcessor
logger = logging.getLogger(__name__)
def get_text(node):
textnodes = node.xpath(".//text()")
s = "".join([text for text in textnodes])
return re.sub(r"\s+", " ", s).strip()
def get_prop(node, name):
title = node.get("title")
props = title.split(";")
for prop in props:
(key, args) = prop.split(None, 1)
args = args.strip('"')
if key == name:
return args
return None
class DocExample(object):
def __init__(self, guid, text_a, text_b=None, bbox=None, label=None):
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.bbox = bbox
self.label = label
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class CdipProcessor(DataProcessor):
"""Processor for the CDIP data set."""
def worker(self, line):
file, label = line.split()
text, bbox = self.read_hocr_file(self.data_dir, file)
return [text, bbox, label]
def get_examples(self, data_dir, mode):
self.data_dir = data_dir
with open(os.path.join(data_dir, "labels", "{}.txt".format(mode))) as f:
lines = f.readlines()
examples = []
with tqdm(lines, desc="Gettting {} examples".format(mode)) as t, Pool(24) as p:
for example in p.imap(self.worker, lines):
examples.append(example)
t.update()
return self._create_examples(examples, mode)
def _get_examples(self, data_dir, mode):
with open(os.path.join(data_dir, "labels", "{}.txt".format(mode))) as f:
lines = []
for line in tqdm(f.readlines(), desc="Gettting {} examples".format(mode)):
file, label = line.split()
text, bbox = self.read_hocr_file(data_dir, file)
lines.append([text, bbox, label])
return self._create_examples(lines, mode)
def read_hocr_file(self, data_dir, file):
hocr_file = os.path.join(data_dir, "images", file[:-4] + ".xml")
text_buffer = []
bbox_buffer = []
try:
doc = html.parse(hocr_file)
except AssertionError:
logger.warning(
"%s is empty or its format is unacceptable. Skipped.", hocr_file
)
return [], []
for page in doc.xpath("//*[@class='ocr_page']"):
page_bbox = [int(x) for x in get_prop(page, "bbox").split()]
width, height = page_bbox[2], page_bbox[3]
for word in doc.xpath("//*[@class='ocrx_word']"):
textnodes = word.xpath(".//text()")
s = "".join([text for text in textnodes])
text = re.sub(r"\s+", " ", s).strip()
if text:
text_buffer.append(text)
bbox = [int(x) for x in get_prop(word, "bbox").split()]
bbox = [
bbox[0] / width,
bbox[1] / height,
bbox[2] / width,
bbox[3] / height,
]
bbox = [int(x * 1000) for x in bbox]
bbox_buffer.append(bbox)
return text_buffer, bbox_buffer
def get_labels(self):
return list(map(str, list(range(16))))
def _create_examples(self, lines, mode):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (mode, i)
text = line[0]
bbox = line[1]
label = line[2]
examples.append(
DocExample(guid=guid, text_a=text, text_b=None, bbox=bbox, label=label)
)
return examples
class DocFeature(object):
def __init__(self, input_ids, bboxes, attention_mask, token_type_ids, label):
assert (
0 <= all(bboxes) <= 1000
), "Error with input bbox ({}): the coordinate value is not between 0 and 1000".format(
bboxes
)
self.input_ids = input_ids
self.bboxes = bboxes
self.attention_mask = attention_mask
self.token_type_ids = token_type_ids
self.label = label
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def convert_examples_to_features(
examples,
tokenizer,
max_length=512,
label_list=None,
pad_on_left=False,
pad_token="[PAD]",
pad_token_id=0,
pad_token_segment_id=0,
mask_padding_with_zero=True,
):
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(tqdm(examples)):
tokens = []
bboxes = []
if len(example.text_a) == 0:
bboxes.append([0, 0, 0, 0])
tokens.append(pad_token)
for token, bbox in zip(example.text_a, example.bbox):
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
bboxes.append(bbox)
tokens.append(sub_token)
tokens = tokens[: max_length - 2]
bboxes = bboxes[: max_length - 2]
bboxes = [[0, 0, 0, 0]] + bboxes + [[1000, 1000, 1000, 1000]]
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_ids = [tokenizer.cls_token_id] + input_ids + [tokenizer.sep_token_id]
token_type_ids = [0] * len(input_ids)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token_id] * padding_length) + input_ids
bboxes = ([[0, 0, 0, 0]] * padding_length) + bboxes
attention_mask = (
[0 if mask_padding_with_zero else 1] * padding_length
) + attention_mask
token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids
else:
input_ids = input_ids + ([pad_token_id] * padding_length)
bboxes = bboxes + ([[0, 0, 0, 0]] * padding_length)
attention_mask = attention_mask + (
[0 if mask_padding_with_zero else 1] * padding_length
)
token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_length, "Error with input length {} vs {}".format(
len(input_ids), max_length
)
assert len(bboxes) == max_length, "Error with input length {} vs {}".format(
len(bboxes), max_length
)
assert (
len(attention_mask) == max_length
), "Error with input length {} vs {}".format(len(attention_mask), max_length)
assert (
len(token_type_ids) == max_length
), "Error with input length {} vs {}".format(len(token_type_ids), max_length)
label = label_map[example.label]
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_ids: %s" % " ".join([str(x) for x in bboxes]))
logger.info(
"attention_mask: %s" % " ".join([str(x) for x in attention_mask])
)
logger.info(
"token_type_ids: %s" % " ".join([str(x) for x in token_type_ids])
)
logger.info("label: %s (id = %d)" % (example.label, label))
features.append(
DocFeature(
input_ids=input_ids,
bboxes=bboxes,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
label=label,
)
)
return features
def load_and_cache_examples(args, tokenizer, mode="train"):
if args.local_rank not in [-1, 0] and mode == "train":
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
processor = CdipProcessor()
# Load data features from cache or dataset file
cached_features_file = os.path.join(
args.data_dir,
"cached_{}_{}_{}".format(
mode,
list(filter(None, args.model_name_or_path.split("/"))).pop(),
str(args.max_seq_length),
),
)
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", args.data_dir)
label_list = processor.get_labels()
examples = processor.get_examples(args.data_dir, mode)
features = convert_examples_to_features(
examples,
tokenizer,
label_list=label_list,
max_length=args.max_seq_length,
pad_on_left=bool(args.model_type in ["xlnet"]),
# pad on the left for xlnet
pad_token=tokenizer.pad_token,
pad_token_id=tokenizer.pad_token_id,
pad_token_segment_id=4 if args.model_type in ["xlnet"] else 0,
)
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
if args.local_rank == 0 and mode == "train":
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_bboxes = torch.tensor([f.bboxes for f in features], dtype=torch.long)
all_attention_mask = torch.tensor(
[f.attention_mask for f in features], dtype=torch.long
)
all_token_type_ids = torch.tensor(
[f.token_type_ids for f in features], dtype=torch.long
)
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
dataset = TensorDataset(
all_input_ids, all_attention_mask, all_token_type_ids, all_labels, all_bboxes
)
return dataset
if __name__ == "__main__":
import argparse
from transformers import BertTokenizerFast
parser = argparse.ArgumentParser()
args = parser.parse_args()
args.local_rank = -1
args.data_dir = "data"
args.model_name_or_path = "bert-base-uncased"
args.max_seq_length = 512
args.model_type = "bert"
args.overwrite_cache = True
tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased")
dataset = load_and_cache_examples(args, tokenizer, mode="test")
print(len(dataset))
| data2vec_vision-main | layoutlm/deprecated/layoutlm/data/rvl_cdip.py |
# flake8: noqa
from .funsd import FunsdDataset
| data2vec_vision-main | layoutlm/deprecated/layoutlm/data/__init__.py |
import logging
import os
import torch
from torch.utils.data import Dataset
logger = logging.getLogger(__name__)
class FunsdDataset(Dataset):
def __init__(self, args, tokenizer, labels, pad_token_label_id, mode):
if args.local_rank not in [-1, 0] and mode == "train":
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Load data features from cache or dataset file
cached_features_file = os.path.join(
args.data_dir,
"cached_{}_{}_{}".format(
mode,
list(filter(None, args.model_name_or_path.split("/"))).pop(),
str(args.max_seq_length),
),
)
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", args.data_dir)
examples = read_examples_from_file(args.data_dir, mode)
features = convert_examples_to_features(
examples,
labels,
args.max_seq_length,
tokenizer,
cls_token_at_end=bool(args.model_type in ["xlnet"]),
# xlnet has a cls token at the end
cls_token=tokenizer.cls_token,
cls_token_segment_id=2 if args.model_type in ["xlnet"] else 0,
sep_token=tokenizer.sep_token,
sep_token_extra=bool(args.model_type in ["roberta"]),
# roberta uses an extra separator b/w pairs of sentences, cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805
pad_on_left=bool(args.model_type in ["xlnet"]),
# pad on the left for xlnet
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=4 if args.model_type in ["xlnet"] else 0,
pad_token_label_id=pad_token_label_id,
)
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
if args.local_rank == 0 and mode == "train":
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
self.features = features
# Convert to Tensors and build dataset
self.all_input_ids = torch.tensor(
[f.input_ids for f in features], dtype=torch.long
)
self.all_input_mask = torch.tensor(
[f.input_mask for f in features], dtype=torch.long
)
self.all_segment_ids = torch.tensor(
[f.segment_ids for f in features], dtype=torch.long
)
self.all_label_ids = torch.tensor(
[f.label_ids for f in features], dtype=torch.long
)
self.all_bboxes = torch.tensor([f.boxes for f in features], dtype=torch.long)
def __len__(self):
return len(self.features)
def __getitem__(self, index):
return (
self.all_input_ids[index],
self.all_input_mask[index],
self.all_segment_ids[index],
self.all_label_ids[index],
self.all_bboxes[index],
)
class InputExample(object):
"""A single training/test example for token classification."""
def __init__(self, guid, words, labels, boxes, actual_bboxes, file_name, page_size):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
words: list. The words of the sequence.
labels: (Optional) list. The labels for each word of the sequence. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.words = words
self.labels = labels
self.boxes = boxes
self.actual_bboxes = actual_bboxes
self.file_name = file_name
self.page_size = page_size
class InputFeatures(object):
"""A single set of features of data."""
def __init__(
self,
input_ids,
input_mask,
segment_ids,
label_ids,
boxes,
actual_bboxes,
file_name,
page_size,
):
assert (
0 <= all(boxes) <= 1000
), "Error with input bbox ({}): the coordinate value is not between 0 and 1000".format(
boxes
)
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_ids = label_ids
self.boxes = boxes
self.actual_bboxes = actual_bboxes
self.file_name = file_name
self.page_size = page_size
def read_examples_from_file(data_dir, mode):
file_path = os.path.join(data_dir, "{}.txt".format(mode))
box_file_path = os.path.join(data_dir, "{}_box.txt".format(mode))
image_file_path = os.path.join(data_dir, "{}_image.txt".format(mode))
guid_index = 1
examples = []
with open(file_path, encoding="utf-8") as f, open(
box_file_path, encoding="utf-8"
) as fb, open(image_file_path, encoding="utf-8") as fi:
words = []
boxes = []
actual_bboxes = []
file_name = None
page_size = None
labels = []
for line, bline, iline in zip(f, fb, fi):
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
if words:
examples.append(
InputExample(
guid="{}-{}".format(mode, guid_index),
words=words,
labels=labels,
boxes=boxes,
actual_bboxes=actual_bboxes,
file_name=file_name,
page_size=page_size,
)
)
guid_index += 1
words = []
boxes = []
actual_bboxes = []
file_name = None
page_size = None
labels = []
else:
splits = line.split("\t")
bsplits = bline.split("\t")
isplits = iline.split("\t")
assert len(splits) == 2
assert len(bsplits) == 2
assert len(isplits) == 4
assert splits[0] == bsplits[0]
words.append(splits[0])
if len(splits) > 1:
labels.append(splits[-1].replace("\n", ""))
box = bsplits[-1].replace("\n", "")
box = [int(b) for b in box.split()]
boxes.append(box)
actual_bbox = [int(b) for b in isplits[1].split()]
actual_bboxes.append(actual_bbox)
page_size = [int(i) for i in isplits[2].split()]
file_name = isplits[3].strip()
else:
# Examples could have no label for mode = "test"
labels.append("O")
if words:
examples.append(
InputExample(
guid="%s-%d".format(mode, guid_index),
words=words,
labels=labels,
boxes=boxes,
actual_bboxes=actual_bboxes,
file_name=file_name,
page_size=page_size,
)
)
return examples
def convert_examples_to_features(
examples,
label_list,
max_seq_length,
tokenizer,
cls_token_at_end=False,
cls_token="[CLS]",
cls_token_segment_id=1,
sep_token="[SEP]",
sep_token_extra=False,
pad_on_left=False,
pad_token=0,
cls_token_box=[0, 0, 0, 0],
sep_token_box=[1000, 1000, 1000, 1000],
pad_token_box=[0, 0, 0, 0],
pad_token_segment_id=0,
pad_token_label_id=-1,
sequence_a_segment_id=0,
mask_padding_with_zero=True,
):
""" Loads a data file into a list of `InputBatch`s
`cls_token_at_end` define the location of the CLS token:
- False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
- True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
`cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
"""
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
file_name = example.file_name
page_size = example.page_size
width, height = page_size
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d", ex_index, len(examples))
tokens = []
token_boxes = []
actual_bboxes = []
label_ids = []
for word, label, box, actual_bbox in zip(
example.words, example.labels, example.boxes, example.actual_bboxes
):
word_tokens = tokenizer.tokenize(word)
tokens.extend(word_tokens)
token_boxes.extend([box] * len(word_tokens))
actual_bboxes.extend([actual_bbox] * len(word_tokens))
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend(
[label_map[label]] + [pad_token_label_id] * (len(word_tokens) - 1)
)
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
special_tokens_count = 3 if sep_token_extra else 2
if len(tokens) > max_seq_length - special_tokens_count:
tokens = tokens[: (max_seq_length - special_tokens_count)]
token_boxes = token_boxes[: (max_seq_length - special_tokens_count)]
actual_bboxes = actual_bboxes[: (max_seq_length - special_tokens_count)]
label_ids = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
token_boxes += [sep_token_box]
actual_bboxes += [[0, 0, width, height]]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
token_boxes += [sep_token_box]
actual_bboxes += [[0, 0, width, height]]
label_ids += [pad_token_label_id]
segment_ids = [sequence_a_segment_id] * len(tokens)
if cls_token_at_end:
tokens += [cls_token]
token_boxes += [cls_token_box]
actual_bboxes += [[0, 0, width, height]]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
tokens = [cls_token] + tokens
token_boxes = [cls_token_box] + token_boxes
actual_bboxes = [[0, 0, width, height]] + actual_bboxes
label_ids = [pad_token_label_id] + label_ids
segment_ids = [cls_token_segment_id] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = (
[0 if mask_padding_with_zero else 1] * padding_length
) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
label_ids = ([pad_token_label_id] * padding_length) + label_ids
token_boxes = ([pad_token_box] * padding_length) + token_boxes
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
token_boxes += [pad_token_box] * padding_length
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(label_ids) == max_seq_length
assert len(token_boxes) == max_seq_length
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s", example.guid)
logger.info("tokens: %s", " ".join([str(x) for x in tokens]))
logger.info("input_ids: %s", " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s", " ".join([str(x) for x in input_mask]))
logger.info("segment_ids: %s", " ".join([str(x) for x in segment_ids]))
logger.info("label_ids: %s", " ".join([str(x) for x in label_ids]))
logger.info("boxes: %s", " ".join([str(x) for x in token_boxes]))
logger.info("actual_bboxes: %s", " ".join([str(x) for x in actual_bboxes]))
features.append(
InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_ids=label_ids,
boxes=token_boxes,
actual_bboxes=actual_bboxes,
file_name=file_name,
page_size=page_size,
)
)
return features
| data2vec_vision-main | layoutlm/deprecated/layoutlm/data/funsd.py |
from setuptools import setup, find_packages
setup(
name = "adalm",
version = "0.0",
author = "Microsoft",
author_email = "",
description = "domain adaptation toolkit",
keywords = "domain adaptation with extended vocab",
license='Apache',
url = "https://github.com/littlefive5/AdaLM",
packages=find_packages(exclude=["*.tests", "*.tests.*",
"tests.*", "tests"]),
install_requires=['numpy',
'boto3',
'requests',
'tqdm',
'urllib3==1.25.4'],
python_requires='>=3.5.0',
tests_require=['pytest'],
classifiers=[
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
) | data2vec_vision-main | adalm/setup.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning the library models for sequence classification on GLUE (Bert, XLM, XLNet, RoBERTa)."""
from __future__ import absolute_import, division, print_function
import argparse
import glob
import logging
import os
import random
import json
import numpy as np
import torch
from seqeval.metrics import f1_score, precision_score, recall_score
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from torch.utils.data.distributed import DistributedSampler
from torch.nn import CrossEntropyLoss
try:
from torch.utils.tensorboard import SummaryWriter
except:
from tensorboardX import SummaryWriter
from tqdm import tqdm, trange
from transformers import (
WEIGHTS_NAME,
AdamW,
BertConfig,
BertForTokenClassification,
BertTokenizer,
DistilBertConfig,
DistilBertForTokenClassification,
DistilBertTokenizer,
RobertaConfig,
RobertaForTokenClassification,
RobertaTokenizer,
XLMRobertaConfig,
XLMRobertaForTokenClassification,
XLMRobertaTokenizer
)
from transformers import AdamW, get_linear_schedule_with_warmup
from utils_ner import convert_examples_to_features, get_labels, read_examples_from_file
logger = logging.getLogger(__name__)
ALL_MODELS = sum(
(
tuple(conf.pretrained_config_archive_map.keys())
for conf in (BertConfig, RobertaConfig, DistilBertConfig, XLMRobertaConfig)
),
(),
)
MODEL_CLASSES = {
"bert": (BertConfig, BertForTokenClassification, BertTokenizer),
"roberta": (RobertaConfig, RobertaForTokenClassification, RobertaTokenizer),
"distilbert": (DistilBertConfig, DistilBertForTokenClassification, DistilBertTokenizer),
"xlmroberta": (XLMRobertaConfig, XLMRobertaForTokenClassification, XLMRobertaTokenizer),
}
TOKENIZER_ARGS = ["do_lower_case", "strip_accents", "keep_accents", "use_fast"]
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def train(args, train_dataset, model, tokenizer, labels, pad_token_label_id):
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
if args.warmup_ratio > 0:
args.warmup_steps = int(t_total*args.warmup_ratio)
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
# Check if saved optimizer or scheduler states exist
if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile(
os.path.join(args.model_name_or_path, "scheduler.pt")
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
metric_for_best = args.metric_for_choose_best_checkpoint
best_performance = None
best_epoch = None
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0])
set_seed(args) # Added here for reproductibility (even between python 2 and 3)
for _ in train_iterator:
if args.disable_tqdm:
epoch_iterator = train_dataloader
else:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'labels': batch[3]}
if args.model_type != 'distilbert':
inputs['token_type_ids'] = batch[2] if args.model_type in ['bert', 'xlnet', 'unilm', 'adapterbert'] else None # XLM, DistilBERT and RoBERTa don't use segment_ids
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.max_grad_norm > 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
epoch_iterator.set_description('Iter (loss=%5.3f) lr=%9.7f' % (loss.item(), scheduler.get_lr()[0]))
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
logs = {}
loss_scalar = (tr_loss - logging_loss) / args.logging_steps
learning_rate_scalar = scheduler.get_lr()[0]
logs['learning_rate'] = learning_rate_scalar
logs['loss'] = loss_scalar
logging_loss = tr_loss
for key, value in logs.items():
tb_writer.add_scalar(key, value, global_step)
logger.info(json.dumps({**logs, **{'step': global_step}}))
if args.max_steps > 0 and global_step > args.max_steps:
if not args.disable_tqdm:
epoch_iterator.close()
break
if args.local_rank in [-1, 0]:
logs = {}
if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model, tokenizer, prefix='epoch-{}'.format(_ + 1))
for key, value in results.items():
eval_key = 'eval_{}'.format(key)
logs[eval_key] = value
if metric_for_best is None:
metric_for_best = key
if best_epoch is None or best_performance[metric_for_best] < results[metric_for_best]:
best_epoch = 'epoch-{}'.format(_ + 1)
best_performance = results
loss_scalar = (tr_loss - logging_loss) / args.logging_steps
learning_rate_scalar = scheduler.get_lr()[0]
logs['learning_rate'] = learning_rate_scalar
logs['loss'] = loss_scalar
logging_loss = tr_loss
for key, value in logs.items():
tb_writer.add_scalar(key, value, global_step)
print(json.dumps({**logs, **{'step': global_step}}))
# Save model checkpoint
output_dir = os.path.join(args.output_dir, 'epoch-{}'.format(_ + 1))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, 'training_args.bin'))
logger.info("Saving model checkpoint to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
if best_epoch is not None:
logger.info(" ***************** Best checkpoint: {}, choosed by {} *****************".format(
best_epoch, metric_for_best))
logger.info("Best performance = %s" % json.dumps(best_performance))
save_best_result(best_epoch, best_performance, args.output_dir)
return global_step, tr_loss / global_step
def save_best_result(best_epoch, best_performance, output_dir):
best_performance["checkpoint"] = best_epoch
with open(os.path.join(output_dir, "best_performance.json"), mode="w") as writer:
writer.write(json.dumps(best_performance, indent=2))
def evaluate(args, model, tokenizer, labels, pad_token_label_id, mode, prefix=""):
eval_dataset = load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode=mode)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu evaluate
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation %s *****", prefix)
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
model.eval()
for batch in tqdm(eval_dataloader, desc="Evaluating"):
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet", "adapterbert"] else None
) # XLM and RoBERTa don"t use segment_ids
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
if args.n_gpu > 1:
tmp_eval_loss = tmp_eval_loss.mean() # mean() to average on multi-gpu parallel evaluating
eval_loss += tmp_eval_loss.item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
preds = np.argmax(preds, axis=2)
label_map = {i: label for i, label in enumerate(labels)}
out_label_list = [[] for _ in range(out_label_ids.shape[0])]
preds_list = [[] for _ in range(out_label_ids.shape[0])]
for i in range(out_label_ids.shape[0]):
for j in range(out_label_ids.shape[1]):
if out_label_ids[i, j] != pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
results = {
"loss": eval_loss,
"precision": precision_score(out_label_list, preds_list),
"recall": recall_score(out_label_list, preds_list),
"f1": f1_score(out_label_list, preds_list),
}
logger.info("***** Eval results %s *****", prefix)
for key in sorted(results.keys()):
logger.info(" %s = %s", key, str(results[key]))
output_file = os.path.join(args.output_dir, "eval_out.txt")
with open(output_file, "w+", encoding="utf-8") as f:
for line in tqdm(preds_list):
line = " ".join(line) + "\n"
f.write(line)
return results, preds_list
def test(args, model, tokenizer, labels, pad_token_label_id, mode, prefix=""):
test_dataset = load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode="test")
args.test_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
test_sampler = SequentialSampler(test_dataset) if args.local_rank == -1 else DistributedSampler(test_dataset)
test_dataloader = DataLoader(test_dataset, sampler=test_sampler, batch_size=args.test_batch_size)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
logger.info("***** Running Prediction %s *****", prefix)
logger.info(" Num examples = %d", len(test_dataset))
logger.info(" Batch size = %d", args.test_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
model.eval()
for batch in tqdm(test_dataloader, desc="Prediction"):
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet","adapterbert"] else None
) # XLM and RoBERTa don"t use segment_ids
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
if args.n_gpu > 1:
tmp_eval_loss = tmp_eval_loss.mean() # mean() to average on multi-gpu parallel evaluating
eval_loss += tmp_eval_loss.item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
preds = np.argmax(preds, axis=2)
label_map = {i: label for i, label in enumerate(labels)}
out_label_list = [[] for _ in range(out_label_ids.shape[0])]
preds_list = [[] for _ in range(out_label_ids.shape[0])]
for i in range(out_label_ids.shape[0]):
for j in range(out_label_ids.shape[1]):
if out_label_ids[i, j] != pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
results = {
"loss": eval_loss,
"precision": precision_score(out_label_list, preds_list),
"recall": recall_score(out_label_list, preds_list),
"f1": f1_score(out_label_list, preds_list),
}
print(out_label_list[0])
print(preds_list[0])
out_file = os.path.join(args.output_dir, "predict.txt")
logger.info("write results into {}".format(out_file))
output_eval_file = os.path.join(args.output_dir, "predict_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Predict results {} *****".format(prefix))
writer.write(json.dumps(results, indent=2))
logger.info("Result = %s" % json.dumps(results, indent=2))
with open(out_file, "w+", encoding="utf-8") as f:
for line in preds_list:
line = " ".join(line) + "\n"
f.write(line)
return results, preds_list
def load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode):
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Load data features from cache or dataset file
cached_features_file = os.path.join(
args.data_dir,
"cached_{}_{}_{}".format(
mode, list(filter(None, args.model_name_or_path.split("/"))).pop(), str(args.max_seq_length)
),
)
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", args.data_dir)
examples = read_examples_from_file(args.data_dir, mode)
features = convert_examples_to_features(
examples,
labels,
args.max_seq_length,
tokenizer,
cls_token_at_end=bool(args.model_type in ["xlnet"]),
# xlnet has a cls token at the end
cls_token=tokenizer.cls_token,
cls_token_segment_id=2 if args.model_type in ["xlnet"] else 0,
sep_token=tokenizer.sep_token,
sep_token_extra=bool(args.model_type in ["roberta"]),
# roberta uses an extra separator b/w pairs of sentences, cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805
pad_on_left=bool(args.model_type in ["xlnet"]),
# pad on the left for xlnet
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=4 if args.model_type in ["xlnet"] else 0,
pad_token_label_id=pad_token_label_id,
mode=mode,
)
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
if args.local_rank == 0 and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_ids for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
return dataset
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--data_dir", default=None, type=str, required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--model_type", default="unilm", type=str,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()))
parser.add_argument("--model_name_or_path", default=None, type=str, required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS))
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument('--disable_tqdm', action='store_true',
help='Disable the tqdm bar. ')
## Other parameters
parser.add_argument("--labels", default="", type=str,
help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.")
parser.add_argument("--config_name", default="", type=str,
help="Pretrained config name or path if not the same as model_name")
parser.add_argument("--tokenizer_name", default="", type=str,
help="Pretrained tokenizer name or path if not the same as model_name")
parser.add_argument("--cache_dir", default="", type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument("--max_seq_length", default=128, type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.")
parser.add_argument("--do_train", action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_predict", action="store_true", help="Whether to run predictions on the test set.")
parser.add_argument("--evaluate_during_training", action='store_true',
help="Rul evaluation during training at each logging step.")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument(
"--keep_accents", action="store_const", const=True, help="Set this flag if model is trained with accents."
)
parser.add_argument(
"--strip_accents", action="store_const", const=True, help="Set this flag if model is trained without accents."
)
parser.add_argument("--use_fast", action="store_const", const=True, help="Set this flag to use fast tokenization.")
parser.add_argument("--per_gpu_train_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--learning_rate", default=5e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float,
help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--num_train_epochs", default=3.0, type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--max_steps", default=-1, type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
parser.add_argument("--warmup_ratio", default=0.1, type=float,
help="Linear warmup over warmup_ratio.")
parser.add_argument('--logging_steps', type=int, default=50,
help="Log every X updates steps.")
parser.add_argument("--save_steps", type=int, default=500, help="Save checkpoint every X updates steps.")
parser.add_argument("--eval_all_checkpoints", action='store_true',
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number")
parser.add_argument("--no_cuda", action='store_true',
help="Avoid using CUDA when available")
parser.add_argument('--overwrite_output_dir', action='store_true',
help="Overwrite the content of the output directory")
parser.add_argument('--overwrite_cache', action='store_true',
help="Overwrite the cached training and evaluation sets")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--metric_for_choose_best_checkpoint', type=str, default=None,
help="Set the metric to choose the best checkpoint")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument("--local_rank", type=int, default=-1,
help="For distributed training: local_rank")
parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.")
parser.add_argument('--server_port', type=str, default='', help="For distant debugging.")
args = parser.parse_args()
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir:
raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir))
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
# Set seed
set_seed(args)
# Prepare CONLL-2003 task
labels = get_labels(args.labels)
num_labels = len(labels)
# Use cross entropy ignore index as padding label id so that only real label ids contribute to the loss later
pad_token_label_id = CrossEntropyLoss().ignore_index
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
id2label={str(i): label for i, label in enumerate(labels)},
label2id={label: i for i, label in enumerate(labels)},
cache_dir=args.cache_dir if args.cache_dir else None,
)
tokenizer_args = {k: v for k, v in vars(args).items() if v is not None and k in TOKENIZER_ARGS}
logger.info("Tokenizer arguments: %s", tokenizer_args)
tokenizer_name = args.tokenizer_name if args.tokenizer_name else args.model_name_or_path
tokenizer = tokenizer_class.from_pretrained(
tokenizer_name,
cache_dir=args.cache_dir if args.cache_dir else None,
**tokenizer_args,
)
if not hasattr(config, 'need_pooler') or config.need_pooler is not True:
setattr(config, 'need_pooler', True)
model = model_class.from_pretrained(
args.model_name_or_path, config=config,
cache_dir=args.cache_dir if args.cache_dir else None)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
train_dataset = load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode="train")
global_step, tr_loss = train(args, train_dataset, model, tokenizer, labels, pad_token_label_id)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
tokenizer.save_pretrained(args.output_dir)
# Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
# Evaluation
if args.do_eval and args.local_rank in [-1, 0]:
tokenizer = tokenizer_class.from_pretrained(args.output_dir, **tokenizer_args)
checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True)))
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
metric_for_best = args.metric_for_choose_best_checkpoint
best_performance = None
best_epoch = None
for checkpoint in checkpoints:
prefix = checkpoint.split('/')[-1] if checkpoint.find('checkpoint') != -1 else ""
checkpoint_config = config_class.from_pretrained(checkpoint)
model = model_class.from_pretrained(checkpoint, config=checkpoint_config)
model.to(args.device)
result, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode="dev", prefix=global_step)
if metric_for_best is None:
metric_for_best = list(result.keys())[-1]
if best_epoch is None:
best_epoch = checkpoint
best_performance = result
else:
if best_performance[metric_for_best] < result[metric_for_best]:
best_performance = result
best_epoch = checkpoint
if best_epoch is not None:
logger.info(" ***************** Best checkpoint: {}, choosed by {} *****************".format(
best_epoch, metric_for_best))
logger.info("Best performance = %s" % json.dumps(best_performance))
save_best_result(best_epoch, best_performance, args.output_dir)
checkpoint = best_epoch
checkpoint_config = config_class.from_pretrained(checkpoint)
model = model_class.from_pretrained(checkpoint, config=checkpoint_config)
model.to(args.device)
result, _ = test(args, model, tokenizer, labels, pad_token_label_id, mode="test", prefix=global_step)
if __name__ == "__main__":
main()
| data2vec_vision-main | adalm/finetune/run_ner.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Named entity recognition fine-tuning: utilities to work with CoNLL-2003 task. """
import logging
import os
from tqdm import *
logger = logging.getLogger(__name__)
class InputExample(object):
"""A single training/test example for token classification."""
def __init__(self, guid, words, labels=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
words: list. The words of the sequence.
labels: (Optional) list. The labels for each word of the sequence. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.words = words
self.labels = labels
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_ids = None):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_ids = label_ids
def read_examples_from_file(data_dir, mode):
file_path = os.path.join(data_dir, "{}.txt".format(mode))
guid_index = 1
examples = []
with open(file_path, encoding="utf-8") as f:
for line in f.readlines():
line = line.strip().split("\t")
words = line[0].split()
labels = line[1].split()
assert len(words) == len(labels)
guid_index +=1
examples.append(InputExample(guid=guid_index, words=words, labels=labels))
return examples
def convert_examples_to_features(
examples,
label_list,
max_seq_length,
tokenizer,
cls_token_at_end=False,
cls_token="[CLS]",
cls_token_segment_id=1,
sep_token="[SEP]",
sep_token_extra=False,
pad_on_left=False,
pad_token=0,
pad_token_segment_id=0,
pad_token_label_id=-100,
sequence_a_segment_id=0,
mask_padding_with_zero=True,
mode="train",
):
""" Loads a data file into a list of `InputBatch`s
`cls_token_at_end` define the location of the CLS token:
- False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
- True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
`cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
"""
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(tqdm(examples)):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d", ex_index, len(examples))
tokens = []
label_ids = []
for word, label in zip(example.words, example.labels):
word_tokens = tokenizer.tokenize(word)
tokens.extend(word_tokens)
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(word_tokens) - 1))
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
special_tokens_count = 3 if sep_token_extra else 2
if len(tokens) > max_seq_length - special_tokens_count:
tokens = tokens[: (max_seq_length - special_tokens_count)]
label_ids = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
segment_ids = [sequence_a_segment_id] * len(tokens)
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
tokens = [cls_token] + tokens
label_ids = [pad_token_label_id] + label_ids
segment_ids = [cls_token_segment_id] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
label_ids = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(label_ids) == max_seq_length
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s", example.guid)
logger.info("tokens: %s", " ".join([str(x) for x in tokens]))
logger.info("input_ids: %s", " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s", " ".join([str(x) for x in input_mask]))
logger.info("segment_ids: %s", " ".join([str(x) for x in segment_ids]))
logger.info("label_ids: %s", " ".join([str(x) for x in label_ids]))
features.append(
InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_ids=label_ids)
)
return features
def get_labels(path):
if path:
with open(path, "r") as f:
labels = f.read().splitlines()
if "O" not in labels:
labels = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] | data2vec_vision-main | adalm/finetune/utils_ner.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning the library models for sequence classification on GLUE (Bert, XLM, XLNet, RoBERTa)."""
from __future__ import absolute_import, division, print_function
import argparse
import glob
import logging
import os
import random
import json
import numpy as np
import torch
from sklearn.metrics import matthews_corrcoef, f1_score
from sklearn.metrics import cohen_kappa_score, precision_score, recall_score, precision_recall_fscore_support
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from torch.utils.data.distributed import DistributedSampler
from torch.nn import CrossEntropyLoss
try:
from torch.utils.tensorboard import SummaryWriter
except:
from tensorboardX import SummaryWriter
from tqdm import tqdm, trange
from transformers import (
WEIGHTS_NAME,
AdamW,
BertConfig,
BertForTokenClassification,
BertTokenizer,
DistilBertConfig,
DistilBertForTokenClassification,
DistilBertTokenizer,
RobertaConfig,
RobertaForTokenClassification,
RobertaTokenizer,
XLMRobertaConfig,
XLMRobertaForTokenClassification,
XLMRobertaTokenizer
)
from transformers import AdamW, get_linear_schedule_with_warmup
from utils_ner import convert_examples_to_features, get_labels, read_examples_from_file
logger = logging.getLogger(__name__)
ALL_MODELS = sum(
(
tuple(conf.pretrained_config_archive_map.keys())
for conf in (BertConfig, RobertaConfig, DistilBertConfig, XLMRobertaConfig)
),
(),
)
MODEL_CLASSES = {
"bert": (BertConfig, BertForTokenClassification, BertTokenizer),
"roberta": (RobertaConfig, RobertaForTokenClassification, RobertaTokenizer),
"distilbert": (DistilBertConfig, DistilBertForTokenClassification, DistilBertTokenizer),
"xlmroberta": (XLMRobertaConfig, XLMRobertaForTokenClassification, XLMRobertaTokenizer),
}
TOKENIZER_ARGS = ["do_lower_case", "strip_accents", "keep_accents", "use_fast"]
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def get_f1(prec, rec):
return 2*prec*rec/(prec+rec)
def token_f1(true, pred, labels):
print(true[:30])
print(pred[:30])
print(labels)
total_f1 = 0.0
class_scores = zip(labels, precision_score(true,pred,labels,average=None), recall_score(true,pred,labels,average=None))
for label, prec, rec in class_scores:
print('Label: %s' %label)
if label != 'O':
total_f1 += get_f1(prec, rec)
print('\tf1 = %f' %get_f1(prec, rec))
print('\tprecision = %f' %prec)
print('\trecall = %f' %rec)
return total_f1/3
def train(args, train_dataset, model, tokenizer, labels, pad_token_label_id):
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
if args.warmup_ratio > 0:
args.warmup_steps = int(t_total*args.warmup_ratio)
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
# Check if saved optimizer or scheduler states exist
if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile(
os.path.join(args.model_name_or_path, "scheduler.pt")
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
metric_for_best = args.metric_for_choose_best_checkpoint
best_performance = None
best_epoch = None
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0])
set_seed(args) # Added here for reproductibility (even between python 2 and 3)
for _ in train_iterator:
if args.disable_tqdm:
epoch_iterator = train_dataloader
else:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'labels': batch[3]}
if args.model_type != 'distilbert':
inputs['token_type_ids'] = batch[2] if args.model_type in ['bert', 'xlnet', 'unilm', 'adapterbert'] else None # XLM, DistilBERT and RoBERTa don't use segment_ids
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.max_grad_norm > 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
epoch_iterator.set_description('Iter (loss=%5.3f) lr=%9.7f' % (loss.item(), scheduler.get_lr()[0]))
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
logs = {}
loss_scalar = (tr_loss - logging_loss) / args.logging_steps
learning_rate_scalar = scheduler.get_lr()[0]
logs['learning_rate'] = learning_rate_scalar
logs['loss'] = loss_scalar
logging_loss = tr_loss
for key, value in logs.items():
tb_writer.add_scalar(key, value, global_step)
logger.info(json.dumps({**logs, **{'step': global_step}}))
if args.max_steps > 0 and global_step > args.max_steps:
if not args.disable_tqdm:
epoch_iterator.close()
break
if args.local_rank in [-1, 0]:
logs = {}
if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model, tokenizer, prefix='epoch-{}'.format(_ + 1))
for key, value in results.items():
eval_key = 'eval_{}'.format(key)
logs[eval_key] = value
if metric_for_best is None:
metric_for_best = key
if best_epoch is None or best_performance[metric_for_best] < results[metric_for_best]:
best_epoch = 'epoch-{}'.format(_ + 1)
best_performance = results
loss_scalar = (tr_loss - logging_loss) / args.logging_steps
learning_rate_scalar = scheduler.get_lr()[0]
logs['learning_rate'] = learning_rate_scalar
logs['loss'] = loss_scalar
logging_loss = tr_loss
for key, value in logs.items():
tb_writer.add_scalar(key, value, global_step)
print(json.dumps({**logs, **{'step': global_step}}))
# Save model checkpoint
output_dir = os.path.join(args.output_dir, 'epoch-{}'.format(_ + 1))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, 'training_args.bin'))
logger.info("Saving model checkpoint to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
if best_epoch is not None:
logger.info(" ***************** Best checkpoint: {}, choosed by {} *****************".format(
best_epoch, metric_for_best))
logger.info("Best performance = %s" % json.dumps(best_performance))
save_best_result(best_epoch, best_performance, args.output_dir)
return global_step, tr_loss / global_step
def save_best_result(best_epoch, best_performance, output_dir):
best_performance["checkpoint"] = best_epoch
with open(os.path.join(output_dir, "best_performance.json"), mode="w") as writer:
writer.write(json.dumps(best_performance, indent=2))
def evaluate(args, model, tokenizer, labels, pad_token_label_id, mode, prefix=""):
eval_dataset = load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode=mode)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu evaluate
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation %s *****", prefix)
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
model.eval()
for batch in tqdm(eval_dataloader, desc="Evaluating"):
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet", "adapterbert"] else None
) # XLM and RoBERTa don"t use segment_ids
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
if args.n_gpu > 1:
tmp_eval_loss = tmp_eval_loss.mean() # mean() to average on multi-gpu parallel evaluating
eval_loss += tmp_eval_loss.item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
preds = np.argmax(preds, axis=2)
label_map = {i: label for i, label in enumerate(labels)}
out_label_list = [[] for _ in range(out_label_ids.shape[0])]
preds_list = [[] for _ in range(out_label_ids.shape[0])]
for i in range(out_label_ids.shape[0]):
for j in range(out_label_ids.shape[1]):
if out_label_ids[i, j] != pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
out_labels = [i for item in out_label_list for i in item]
preds_labels = [i for item in preds_list for i in item]
results = {
"loss": eval_loss,
"f1": token_f1(true = out_labels,pred = preds_labels, labels = labels),
}
logger.info("***** Eval results %s *****", prefix)
for key in sorted(results.keys()):
logger.info(" %s = %s", key, str(results[key]))
output_file = os.path.join(args.output_dir, "eval_out.txt")
with open(output_file, "w+", encoding="utf-8") as f:
for line in tqdm(preds_list):
line = " ".join(line) + "\n"
f.write(line)
return results, preds_list
def test(args, model, tokenizer, labels, pad_token_label_id, mode, prefix=""):
test_dataset = load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode="test")
args.test_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
test_sampler = SequentialSampler(test_dataset) if args.local_rank == -1 else DistributedSampler(test_dataset)
test_dataloader = DataLoader(test_dataset, sampler=test_sampler, batch_size=args.test_batch_size)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
logger.info("***** Running Prediction %s *****", prefix)
logger.info(" Num examples = %d", len(test_dataset))
logger.info(" Batch size = %d", args.test_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
model.eval()
for batch in tqdm(test_dataloader, desc="Prediction"):
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet", "adapterbert"] else None
) # XLM and RoBERTa don"t use segment_ids
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
if args.n_gpu > 1:
tmp_eval_loss = tmp_eval_loss.mean() # mean() to average on multi-gpu parallel evaluating
eval_loss += tmp_eval_loss.item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
preds = np.argmax(preds, axis=2)
label_map = {i: label for i, label in enumerate(labels)}
out_label_list = [[] for _ in range(out_label_ids.shape[0])]
preds_list = [[] for _ in range(out_label_ids.shape[0])]
for i in range(out_label_ids.shape[0]):
for j in range(out_label_ids.shape[1]):
if out_label_ids[i, j] != pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
out_file = os.path.join(args.output_dir, "predict.txt")
out_labels = [i for item in out_label_list for i in item]
preds_labels = [i for item in preds_list for i in item]
results = {
"loss": eval_loss,
"f1": token_f1(true = out_labels,pred = preds_labels, labels = labels),
}
print(out_label_list[0])
print(preds_list[0])
logger.info("write results into {}".format(out_file))
output_eval_file = os.path.join(args.output_dir, "predict_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Predict results {} *****".format(prefix))
writer.write(json.dumps(results, indent=2))
logger.info("Result = %s" % json.dumps(results, indent=2))
with open(out_file, "w+", encoding="utf-8") as f:
for line in preds_list:
line = " ".join(line) + "\n"
f.write(line)
for key in sorted(results.keys()):
logger.info(" %s = %s", key, str(results[key]))
return results, preds_list
def load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode):
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Load data features from cache or dataset file
cached_features_file = os.path.join(
args.data_dir,
"cached_{}_{}_{}".format(
mode, list(filter(None, args.model_name_or_path.split("/"))).pop(), str(args.max_seq_length)
),
)
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", args.data_dir)
examples = read_examples_from_file(args.data_dir, mode)
features = convert_examples_to_features(
examples,
labels,
args.max_seq_length,
tokenizer,
cls_token_at_end=bool(args.model_type in ["xlnet"]),
# xlnet has a cls token at the end
cls_token=tokenizer.cls_token,
cls_token_segment_id=2 if args.model_type in ["xlnet"] else 0,
sep_token=tokenizer.sep_token,
sep_token_extra=bool(args.model_type in ["roberta"]),
# roberta uses an extra separator b/w pairs of sentences, cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805
pad_on_left=bool(args.model_type in ["xlnet"]),
# pad on the left for xlnet
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=4 if args.model_type in ["xlnet"] else 0,
pad_token_label_id=pad_token_label_id,
mode=mode,
)
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
if args.local_rank == 0 and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_ids for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
return dataset
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--data_dir", default=None, type=str, required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--model_type", default="unilm", type=str,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()))
parser.add_argument("--model_name_or_path", default=None, type=str, required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS))
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument('--disable_tqdm', action='store_true',
help='Disable the tqdm bar. ')
## Other parameters
parser.add_argument("--labels", default="", type=str,
help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.")
parser.add_argument("--config_name", default="", type=str,
help="Pretrained config name or path if not the same as model_name")
parser.add_argument("--tokenizer_name", default="", type=str,
help="Pretrained tokenizer name or path if not the same as model_name")
parser.add_argument("--cache_dir", default="", type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument("--max_seq_length", default=128, type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.")
parser.add_argument("--do_train", action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_predict", action="store_true", help="Whether to run predictions on the test set.")
parser.add_argument("--evaluate_during_training", action='store_true',
help="Rul evaluation during training at each logging step.")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument(
"--keep_accents", action="store_const", const=True, help="Set this flag if model is trained with accents."
)
parser.add_argument(
"--strip_accents", action="store_const", const=True, help="Set this flag if model is trained without accents."
)
parser.add_argument("--use_fast", action="store_const", const=True, help="Set this flag to use fast tokenization.")
parser.add_argument("--per_gpu_train_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--learning_rate", default=5e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float,
help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--num_train_epochs", default=3.0, type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--max_steps", default=-1, type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
parser.add_argument("--warmup_ratio", default=0.1, type=float,
help="Linear warmup over warmup_ratio.")
parser.add_argument('--logging_steps', type=int, default=50,
help="Log every X updates steps.")
parser.add_argument("--save_steps", type=int, default=500, help="Save checkpoint every X updates steps.")
parser.add_argument("--eval_all_checkpoints", action='store_true',
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number")
parser.add_argument("--no_cuda", action='store_true',
help="Avoid using CUDA when available")
parser.add_argument('--overwrite_output_dir', action='store_true',
help="Overwrite the content of the output directory")
parser.add_argument('--overwrite_cache', action='store_true',
help="Overwrite the cached training and evaluation sets")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--metric_for_choose_best_checkpoint', type=str, default=None,
help="Set the metric to choose the best checkpoint")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument("--local_rank", type=int, default=-1,
help="For distributed training: local_rank")
parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.")
parser.add_argument('--server_port', type=str, default='', help="For distant debugging.")
args = parser.parse_args()
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir:
raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir))
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
# Set seed
set_seed(args)
# Prepare CONLL-2003 task
labels = get_labels(args.labels)
num_labels = len(labels)
# Use cross entropy ignore index as padding label id so that only real label ids contribute to the loss later
pad_token_label_id = CrossEntropyLoss().ignore_index
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
id2label={str(i): label for i, label in enumerate(labels)},
label2id={label: i for i, label in enumerate(labels)},
cache_dir=args.cache_dir if args.cache_dir else None,
)
tokenizer_args = {k: v for k, v in vars(args).items() if v is not None and k in TOKENIZER_ARGS}
logger.info("Tokenizer arguments: %s", tokenizer_args)
tokenizer_name = args.tokenizer_name if args.tokenizer_name else args.model_name_or_path
tokenizer = tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
cache_dir=args.cache_dir if args.cache_dir else None,
**tokenizer_args,
)
if not hasattr(config, 'need_pooler') or config.need_pooler is not True:
setattr(config, 'need_pooler', True)
model = model_class.from_pretrained(
args.model_name_or_path, config=config,
cache_dir=args.cache_dir if args.cache_dir else None)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
train_dataset = load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode="train")
global_step, tr_loss = train(args, train_dataset, model, tokenizer, labels, pad_token_label_id)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
tokenizer.save_pretrained(args.output_dir)
# Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
# Evaluation
if args.do_eval and args.local_rank in [-1, 0]:
tokenizer = tokenizer_class.from_pretrained(args.output_dir, **tokenizer_args)
checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True)))
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
metric_for_best = args.metric_for_choose_best_checkpoint
best_performance = None
best_epoch = None
for checkpoint in checkpoints:
prefix = checkpoint.split('/')[-1] if checkpoint.find('checkpoint') != -1 else ""
checkpoint_config = config_class.from_pretrained(checkpoint)
model = model_class.from_pretrained(checkpoint, config=checkpoint_config)
model.to(args.device)
result, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode="dev", prefix=global_step)
if metric_for_best is None:
metric_for_best = list(result.keys())[-1]
if best_epoch is None:
best_epoch = checkpoint
best_performance = result
else:
if best_performance[metric_for_best] < result[metric_for_best]:
best_performance = result
best_epoch = checkpoint
if best_epoch is not None:
logger.info(" ***************** Best checkpoint: {}, choosed by {} *****************".format(
best_epoch, metric_for_best))
logger.info("Best performance = %s" % json.dumps(best_performance))
save_best_result(best_epoch, best_performance, args.output_dir)
checkpoint = best_epoch
checkpoint_config = config_class.from_pretrained(checkpoint)
model = model_class.from_pretrained(checkpoint, config=checkpoint_config)
model.to(args.device)
result, _ = test(args, model, tokenizer, labels, pad_token_label_id, mode="test", prefix=global_step)
if __name__ == "__main__":
main()
| data2vec_vision-main | adalm/finetune/run_pico.py |
data2vec_vision-main | adalm/finetune/__init__.py |
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning the library models for sequence classification on GLUE (Bert, XLM, XLNet, RoBERTa)."""
from __future__ import absolute_import, division, print_function
import argparse
import glob
import logging
import os
import random
import json
import time
import numpy as np
import torch
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from torch.utils.data.distributed import DistributedSampler
try:
from torch.utils.tensorboard import SummaryWriter
except:
from tensorboardX import SummaryWriter
from tqdm import tqdm, trange
from transformers import (WEIGHTS_NAME, BertConfig,
BertForSequenceClassification, BertTokenizer,
RobertaConfig,
RobertaForSequenceClassification,
RobertaTokenizer,
XLMConfig, XLMForSequenceClassification,
XLMTokenizer, XLNetConfig,
XLNetForSequenceClassification,
XLNetTokenizer,
DistilBertConfig,
DistilBertForSequenceClassification,
DistilBertTokenizer,
AlbertConfig,
AlbertForSequenceClassification,
AlbertTokenizer,
XLMRobertaConfig,
XLMRobertaForSequenceClassification,
XLMRobertaTokenizer,
)
from transformers import AdamW, get_linear_schedule_with_warmup
from nlu_finetune.utils_for_glue import glue_compute_metrics as compute_metrics
from nlu_finetune.utils_for_glue import glue_output_modes as output_modes
from nlu_finetune.utils_for_glue import glue_processors as processors
from nlu_finetune.utils_for_glue import glue_convert_examples_to_features as convert_examples_to_features
logger = logging.getLogger(__name__)
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (BertConfig, XLNetConfig, XLMConfig,
RobertaConfig, DistilBertConfig)), ())
MODEL_CLASSES = {
'bert': (BertConfig, BertForSequenceClassification, BertTokenizer),
'xlnet': (XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer),
'xlm': (XLMConfig, XLMForSequenceClassification, XLMTokenizer),
'roberta': (RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer),
'distilbert': (DistilBertConfig, DistilBertForSequenceClassification, DistilBertTokenizer),
'albert': (AlbertConfig, AlbertForSequenceClassification, AlbertTokenizer),
'xlm-roberta': (XLMRobertaConfig, XLMRobertaForSequenceClassification, XLMRobertaTokenizer),
}
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def train(args, train_dataset, model, tokenizer):
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(
train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, num_workers=1)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
warmup_steps = t_total * args.warmup_ratio
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
metric_for_best = args.metric_for_choose_best_checkpoint
best_performance = None
best_epoch = None
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0])
set_seed(args) # Added here for reproductibility (even between python 2 and 3)
for _ in train_iterator:
if args.disable_tqdm:
epoch_iterator = train_dataloader
else:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'labels': batch[3]}
if args.model_type != 'distilbert':
inputs['token_type_ids'] = batch[2] if args.model_type in ['bert', 'xlnet', 'unilm', 'adapterbert'] else None # XLM, DistilBERT and RoBERTa don't use segment_ids
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.max_grad_norm > 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
logs = {}
loss_scalar = (tr_loss - logging_loss) / args.logging_steps
learning_rate_scalar = scheduler.get_lr()[0]
logs['learning_rate'] = learning_rate_scalar
logs['loss'] = loss_scalar
logging_loss = tr_loss
for key, value in logs.items():
tb_writer.add_scalar(key, value, global_step)
logger.info(json.dumps({**logs, **{'step': global_step}}))
if args.max_steps > 0 and global_step > args.max_steps:
if not args.disable_tqdm:
epoch_iterator.close()
break
if args.local_rank in [-1, 0]:
logs = {}
if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model, tokenizer, prefix='epoch-{}'.format(_ + 1))
for key, value in results.items():
eval_key = 'eval_{}'.format(key)
logs[eval_key] = value
if metric_for_best is None:
metric_for_best = key
if best_epoch is None or best_performance[metric_for_best] < results[metric_for_best]:
best_epoch = 'epoch-{}'.format(_ + 1)
best_performance = results
loss_scalar = (tr_loss - logging_loss) / args.logging_steps
learning_rate_scalar = scheduler.get_lr()[0]
logs['learning_rate'] = learning_rate_scalar
logs['loss'] = loss_scalar
logging_loss = tr_loss
for key, value in logs.items():
tb_writer.add_scalar(key, value, global_step)
print(json.dumps({**logs, **{'step': global_step}}))
# Save model checkpoint
output_dir = os.path.join(args.output_dir, 'epoch-{}'.format(_ + 1))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, 'training_args.bin'))
logger.info("Saving model checkpoint to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
if best_epoch is not None:
logger.info(" ***************** Best checkpoint: {}, choosed by {} *****************".format(
best_epoch, metric_for_best))
logger.info("Best performance = %s" % json.dumps(best_performance))
save_best_result(best_epoch, best_performance, args.output_dir)
return global_step, tr_loss / global_step
def save_best_result(best_epoch, best_performance, output_dir):
best_performance["checkpoint"] = best_epoch
with open(os.path.join(output_dir, "best_performance.json"), mode="w") as writer:
writer.write(json.dumps(best_performance, indent=2))
def evaluate(args, model, tokenizer, prefix=""):
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,)
eval_outputs_dirs = (args.output_dir, args.output_dir + '-MM') if args.task_name == "mnli" else (args.output_dir,)
results = {}
for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
cached_dev_file = args.cached_dev_file
if cached_dev_file is not None:
cached_dev_file = cached_dev_file + '_' + eval_task
eval_dataset = load_and_cache_examples(
args, eval_task, tokenizer, cached_features_file=cached_dev_file, evaluate=True)
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu eval
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
if args.disable_tqdm:
epoch_iterator = eval_dataloader
else:
epoch_iterator = tqdm(eval_dataloader, desc="Evaluating")
for batch in epoch_iterator:
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'labels': batch[3]}
if args.model_type != 'distilbert':
inputs['token_type_ids'] = batch[2] if args.model_type in ['bert', 'xlnet', 'adapterbert'] else None # XLM, DistilBERT and RoBERTa don't use segment_ids
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs['labels'].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
if args.output_mode == "classification":
preds = np.argmax(preds, axis=1)
elif args.output_mode == "regression":
preds = np.squeeze(preds)
processor = processors[eval_task]()
result = compute_metrics(eval_task, preds, out_label_ids,processor.get_labels()[1:])
results[eval_task] = result
eval_output_dir = os.path.join(eval_output_dir, prefix)
if not os.path.exists(eval_output_dir):
os.makedirs(eval_output_dir)
output_eval_file = os.path.join(eval_output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results {} *****".format(prefix))
writer.write(json.dumps(result, indent=2))
logger.info("Result = %s" % json.dumps(result, indent=2))
return results
def load_and_cache_examples(args, task, tokenizer, cached_features_file=None, evaluate=False):
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
processor = processors[task]()
output_mode = output_modes[task]
examples = None
if cached_features_file is None:
if args.disable_auto_cache and args.local_rank != -1:
logger.warning("Please cache the features in DDP mode !")
raise RuntimeError()
if not args.disable_auto_cache:
# Load data features from cache or dataset file
cached_features_file = os.path.join(args.data_dir, 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train',
list(filter(None, args.model_name_or_path.split('/'))).pop(),
str(args.max_seq_length),
str(task)))
if cached_features_file is not None and os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", args.data_dir)
label_list = processor.get_labels()
if task in ['mnli', 'mnli-mm'] and args.model_type in ['roberta', 'xlmroberta']:
# HACK(label indices are swapped in RoBERTa pretrained model)
label_list[1], label_list[2] = label_list[2], label_list[1]
examples = processor.get_dev_examples(args.data_dir) if evaluate else processor.get_train_examples(args.data_dir)
features = convert_examples_to_features(examples,
tokenizer,
label_list=label_list,
max_length=args.max_seq_length,
output_mode=output_mode,
pad_on_left=bool(args.model_type in ['xlnet']), # pad on the left for xlnet
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=4 if args.model_type in ['xlnet'] else 0,
)
if args.local_rank in [-1, 0] and cached_features_file is not None:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
if args.local_rank == 0 and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
if output_mode == "classification":
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
elif output_mode == "regression":
all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels)
return dataset
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--data_dir", default=None, type=str, required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--model_type", default="unilm", type=str,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()))
parser.add_argument("--model_name_or_path", default=None, type=str, required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS))
parser.add_argument("--task_name", default=None, type=str, required=True,
help="The name of the task to train selected in the list: " + ", ".join(processors.keys()))
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--cached_train_file", default=None, type=str,
help="Path to cache the train set features. ")
parser.add_argument("--cached_dev_file", default=None, type=str,
help="Path to cache the dev set features. ")
parser.add_argument('--disable_auto_cache', action='store_true',
help='Disable the function for automatic cache the training/dev features.')
parser.add_argument('--disable_tqdm', action='store_true',
help='Disable the tqdm bar. ')
## Other parameters
parser.add_argument("--config_name", default="", type=str,
help="Pretrained config name or path if not the same as model_name")
parser.add_argument("--tokenizer_name", default="", type=str,
help="Pretrained tokenizer name or path if not the same as model_name")
parser.add_argument("--sentencepieces_model_path", default=None, type=str,
help="File path to the sentencepieces model, will repleace the default tokenizer and --tokenizer_name. ")
parser.add_argument("--cache_dir", default="", type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument("--max_seq_length", default=128, type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.")
parser.add_argument("--do_train", action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--evaluate_during_training", action='store_true',
help="Rul evaluation during training at each logging step.")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--per_gpu_train_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--learning_rate", default=5e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float,
help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--num_train_epochs", default=3.0, type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--max_steps", default=-1, type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
parser.add_argument("--warmup_ratio", default=0.1, type=float,
help="Linear warmup over warmup_ratio.")
parser.add_argument('--logging_steps', type=int, default=50,
help="Log every X updates steps.")
parser.add_argument("--eval_all_checkpoints", action='store_true',
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number")
parser.add_argument("--no_cuda", action='store_true',
help="Avoid using CUDA when available")
parser.add_argument('--overwrite_output_dir', action='store_true',
help="Overwrite the content of the output directory")
parser.add_argument('--overwrite_cache', action='store_true',
help="Overwrite the cached training and evaluation sets")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--metric_for_choose_best_checkpoint', type=str, default=None,
help="Set the metric to choose the best checkpoint")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument("--local_rank", type=int, default=-1,
help="For distributed training: local_rank")
parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.")
parser.add_argument('--server_port', type=str, default='', help="For distant debugging.")
args = parser.parse_args()
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir:
raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir))
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
# Set seed
set_seed(args)
# Prepare GLUE task
args.task_name = args.task_name.lower()
if args.task_name not in processors:
raise ValueError("Task not found: %s" % (args.task_name))
processor = processors[args.task_name]()
args.output_mode = output_modes[args.task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
finetuning_task=args.task_name,
cache_dir=args.cache_dir if args.cache_dir else None)
tokenizer_name = args.tokenizer_name if args.tokenizer_name else args.model_name_or_path
tokenizer = tokenizer_class.from_pretrained(tokenizer_name,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None)
if not hasattr(config, 'need_pooler') or config.need_pooler is not True:
setattr(config, 'need_pooler', True)
model = model_class.from_pretrained(
args.model_name_or_path, config=config,
cache_dir=args.cache_dir if args.cache_dir else None)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
train_dataset = load_and_cache_examples(
args, args.task_name, tokenizer, cached_features_file=args.cached_train_file, evaluate=False)
global_step, tr_loss = train(args, train_dataset, model, tokenizer)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
tokenizer.save_pretrained(args.output_dir)
# Evaluation
if args.do_eval and args.local_rank in [-1, 0]:
tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True)))
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
metric_for_best = args.metric_for_choose_best_checkpoint
best_performance = None
best_epoch = None
for checkpoint in checkpoints:
prefix = checkpoint.split('/')[-1] if checkpoint.find('checkpoint') != -1 else ""
checkpoint_config = config_class.from_pretrained(checkpoint)
model = model_class.from_pretrained(checkpoint, config=checkpoint_config)
model.to(args.device)
result = evaluate(args, model, tokenizer, prefix=prefix)
if metric_for_best is None:
metric_for_best = list(list(result.values())[0].keys())[0]
if best_epoch is None:
best_epoch = checkpoint
best_performance = result
else:
for eval_task in result:
if best_performance[eval_task][metric_for_best] < result[eval_task][metric_for_best]:
best_performance[eval_task] = result[eval_task]
best_epoch = checkpoint
if best_epoch is not None:
logger.info(" ***************** Best checkpoint: {}, choosed by {} *****************".format(
best_epoch, metric_for_best))
logger.info("Best performance = %s" % json.dumps(best_performance))
save_best_result(best_epoch, best_performance, args.output_dir)
if __name__ == "__main__":
main()
| data2vec_vision-main | adalm/finetune/run_classifier.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" GLUE processors and helpers """
import logging
import os
import csv
import sys
import copy
import json
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import matthews_corrcoef, f1_score
from sklearn.preprocessing import MultiLabelBinarizer
logger = logging.getLogger(__name__)
class InputExample(object):
"""
A single training/test example for simple sequence classification.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
def __init__(self, guid, text_a, text_b=None, label=None):
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class InputFeatures(object):
"""
A single set of features of data.
Args:
input_ids: Indices of input sequence tokens in the vocabulary.
attention_mask: Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
Usually ``1`` for tokens that are NOT MASKED, ``0`` for MASKED (padded) tokens.
token_type_ids: Segment token indices to indicate first and second portions of the inputs.
label: Label corresponding to the input
"""
def __init__(self, input_ids, attention_mask=None, token_type_ids=None, label=None):
self.input_ids = input_ids
self.attention_mask = attention_mask
self.token_type_ids = token_type_ids
self.label = label
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding="utf-8-sig") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
if sys.version_info[0] == 2:
line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
return lines
@classmethod
def _read_json(cls, input_file):
with open(input_file, "r", encoding="utf-8-sig") as f:
lines = json.loads(f.read())
return lines
@classmethod
def _read_jsonl(cls, input_file):
with open(input_file, "r", encoding="utf-8-sig") as f:
lines = f.readlines()
return lines
def glue_convert_examples_to_features(examples, tokenizer,
max_length=512,
task=None,
label_list=None,
output_mode=None,
pad_on_left=False,
pad_token=0,
pad_token_segment_id=0,
mask_padding_with_zero=True):
"""
Loads a data file into a list of ``InputFeatures``
Args:
examples: List of ``InputExamples`` or ``tf.data.Dataset`` containing the examples.
tokenizer: Instance of a tokenizer that will tokenize the examples
max_length: Maximum example length
task: GLUE task
label_list: List of labels. Can be obtained from the processor using the ``processor.get_labels()`` method
output_mode: String indicating the output mode. Either ``regression`` or ``classification``
pad_on_left: If set to ``True``, the examples will be padded on the left rather than on the right (default)
pad_token: Padding token
pad_token_segment_id: The segment ID for the padding token (It is usually 0, but can vary such as for XLNet where it is 4)
mask_padding_with_zero: If set to ``True``, the attention mask will be filled by ``1`` for actual values
and by ``0`` for padded values. If set to ``False``, inverts it (``1`` for padded values, ``0`` for
actual values)
Returns:
If the ``examples`` input is a ``tf.data.Dataset``, will return a ``tf.data.Dataset``
containing the task-specific features. If the input is a list of ``InputExamples``, will return
a list of task-specific ``InputFeatures`` which can be fed to the model.
"""
is_tf_dataset = False
if task is not None:
processor = glue_processors[task]()
if label_list is None:
label_list = processor.get_labels()
logger.info("Using label list %s for task %s" % (label_list, task))
if output_mode is None:
output_mode = glue_output_modes[task]
logger.info("Using output mode %s for task %s" % (output_mode, task))
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
logger.info("Writing example %d" % (ex_index))
if is_tf_dataset:
example = processor.get_example_from_tensor_dict(example)
example = processor.tfds_map(example)
inputs = tokenizer.encode_plus(
example.text_a,
example.text_b,
add_special_tokens=True,
max_length=max_length,
)
input_ids = inputs["input_ids"]
if "token_type_ids" in inputs:
token_type_ids = inputs["token_type_ids"]
else:
token_type_ids = []
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask
if len(token_type_ids) == 0:
padding_length = max_length
token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids
else:
input_ids = input_ids + ([pad_token] * padding_length)
attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
if len(token_type_ids) == 0:
padding_length = max_length
token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_length, "Error with input length {} vs {}".format(len(input_ids), max_length)
assert len(attention_mask) == max_length, "Error with input length {} vs {}".format(len(attention_mask), max_length)
assert len(token_type_ids) == max_length, "Error with input length {} vs {}".format(len(token_type_ids), max_length)
if output_mode == "classification":
label = label_map[example.label]
elif output_mode == "regression":
label = float(example.label)
else:
raise KeyError(output_mode)
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_tokens: %s" % " ".join(tokenizer.convert_ids_to_tokens(input_ids)))
logger.info("attention_mask: %s" % " ".join([str(x) for x in attention_mask]))
logger.info("token_type_ids: %s" % " ".join([str(x) for x in token_type_ids]))
logger.info("label: %s (id = %d)" % (example.label, label))
features.append(
InputFeatures(input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
label=label))
return features
class MrpcProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(tensor_dict['idx'].numpy(),
tensor_dict['sentence1'].numpy().decode('utf-8'),
tensor_dict['sentence2'].numpy().decode('utf-8'),
str(tensor_dict['label'].numpy()))
def get_train_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {}".format(os.path.join(data_dir, "train.tsv")))
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = line[3]
text_b = line[4]
label = line[0]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MnliProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(tensor_dict['idx'].numpy(),
tensor_dict['premise'].numpy().decode('utf-8'),
tensor_dict['hypothesis'].numpy().decode('utf-8'),
str(tensor_dict['label'].numpy()))
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
"dev_matched")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test_matched.tsv")),
"test_matched")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[8]
text_b = line[9]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MnliMismatchedProcessor(MnliProcessor):
"""Processor for the MultiNLI Mismatched data set (GLUE version)."""
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_mismatched.tsv")),
"dev_mismatched")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test_mismatched.tsv")),
"test_mismatched")
class ColaProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(tensor_dict['idx'].numpy(),
tensor_dict['sentence'].numpy().decode('utf-8'),
None,
str(tensor_dict['label'].numpy()))
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line[3]
label = line[1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class Sst2Processor(DataProcessor):
"""Processor for the SST-2 data set (GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(tensor_dict['idx'].numpy(),
tensor_dict['sentence'].numpy().decode('utf-8'),
None,
str(tensor_dict['label'].numpy()))
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = line[0]
label = line[1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class StsbProcessor(DataProcessor):
"""Processor for the STS-B data set (GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(tensor_dict['idx'].numpy(),
tensor_dict['sentence1'].numpy().decode('utf-8'),
tensor_dict['sentence2'].numpy().decode('utf-8'),
str(tensor_dict['label'].numpy()))
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return [None]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[1]
text_b = line[2]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class QqpProcessor(DataProcessor):
"""Processor for the QQP data set (GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(tensor_dict['idx'].numpy(),
tensor_dict['question1'].numpy().decode('utf-8'),
tensor_dict['question2'].numpy().decode('utf-8'),
str(tensor_dict['label'].numpy()))
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
try:
text_a = line[3]
text_b = line[4]
label = line[5]
except IndexError:
continue
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class QnliProcessor(DataProcessor):
"""Processor for the QNLI data set (GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(tensor_dict['idx'].numpy(),
tensor_dict['question'].numpy().decode('utf-8'),
tensor_dict['sentence'].numpy().decode('utf-8'),
str(tensor_dict['label'].numpy()))
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")),
"dev_matched")
def get_labels(self):
"""See base class."""
return ["entailment", "not_entailment"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[1]
text_b = line[2]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class RteProcessor(DataProcessor):
"""Processor for the RTE data set (GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(tensor_dict['idx'].numpy(),
tensor_dict['sentence1'].numpy().decode('utf-8'),
tensor_dict['sentence2'].numpy().decode('utf-8'),
str(tensor_dict['label'].numpy()))
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["entailment", "not_entailment"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[1]
text_b = line[2]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class WnliProcessor(DataProcessor):
"""Processor for the WNLI data set (GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(tensor_dict['idx'].numpy(),
tensor_dict['sentence1'].numpy().decode('utf-8'),
tensor_dict['sentence2'].numpy().decode('utf-8'),
str(tensor_dict['label'].numpy()))
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[1]
text_b = line[2]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class ChemProcessor(DataProcessor):
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["false","CPR:3", "CPR:4", "CPR:5", "CPR:6", "CPR:9"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, line[0])
text_a = line[1]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, label=label))
return examples
class ARCProcessor(DataProcessor):
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_jsonl(os.path.join(data_dir, "train.jsonl")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_jsonl(os.path.join(data_dir, "dev.jsonl")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_jsonl(os.path.join(data_dir, "test.jsonl")), "test")
def get_labels(self):
"""See base class."""
return ["CompareOrContrast", "Background", "Uses", "Motivation", "Extends", "Future"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
line = json.loads(line)
guid = "%s-%s" % (set_type, i)
text_a = line["text"]
label = line["label"]
examples.append(
InputExample(guid=guid, text_a=text_a, label=label))
return examples
class SCIProcessor(DataProcessor):
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_jsonl(os.path.join(data_dir, "train.jsonl")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_jsonl(os.path.join(data_dir, "dev.jsonl")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_jsonl(os.path.join(data_dir, "test.jsonl")), "test")
def get_labels(self):
"""See base class."""
return ["COMPARE","CONJUNCTION","FEATURE-OF","HYPONYM-OF","USED-FOR","EVALUATE-FOR","PART-OF"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
line = json.loads(line)
guid = "%s-%s" % (set_type, i)
text_a = line["text"]
label = line["label"]
examples.append(
InputExample(guid=guid, text_a=text_a, label=label))
return examples
glue_tasks_num_labels = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
"chemprot": 6,
"arc": 6,
"sci": 7,
}
glue_processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mnli-mm": MnliMismatchedProcessor,
"mrpc": MrpcProcessor,
"sst-2": Sst2Processor,
"sts-b": StsbProcessor,
"qqp": QqpProcessor,
"qnli": QnliProcessor,
"rte": RteProcessor,
"wnli": WnliProcessor,
"chemprot": ChemProcessor,
"arc": ARCProcessor,
"sci": SCIProcessor,
}
glue_output_modes = {
"cola": "classification",
"mnli": "classification",
"mnli-mm": "classification",
"mrpc": "classification",
"sst-2": "classification",
"sts-b": "regression",
"qqp": "classification",
"qnli": "classification",
"rte": "classification",
"wnli": "classification",
"chemprot": "classification",
"arc": "classification",
"sci": "classification",
}
def simple_accuracy(preds, labels):
return (preds == labels).mean()
def acc_and_f1(preds, labels):
acc = simple_accuracy(preds, labels)
f1 = f1_score(y_true=labels, y_pred=preds)
return {
"acc": acc,
"f1": f1,
"acc_and_f1": (acc + f1) / 2,
}
def acc_and_macro_f1(preds, labels):
acc = simple_accuracy(preds, labels)
f1 = f1_score(y_true=labels, y_pred=preds,average="macro")
return {
"f1": f1,
"acc": acc,
"acc_and_f1": (acc + f1) / 2,
}
def acc_and_micro_f1(preds, labels, label_list):
acc = simple_accuracy(preds, labels)
print(label_list)
label_list = [str(i+1) for i in range(len(label_list))]
print(label_list)
mlb = MultiLabelBinarizer(classes = label_list)
labels = labels.tolist()
labels = [str(i) for i in labels]
print(labels[:20])
labels = mlb.fit_transform(labels)
preds = preds.tolist()
preds = [str(i) for i in preds]
print(preds[:20])
preds = mlb.fit_transform(preds)
f1 = f1_score(y_true=labels, y_pred=preds,average="micro")
return {
"f1": f1,
"acc": acc,
"f1_macro": f1_score(y_true=labels, y_pred=preds,average="macro"),
"acc_and_f1": (acc + f1) / 2,
}
def pearson_and_spearman(preds, labels):
pearson_corr = pearsonr(preds, labels)[0]
spearman_corr = spearmanr(preds, labels)[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def glue_compute_metrics(task_name, preds, labels, label_list):
assert len(preds) == len(labels)
if task_name == "cola":
return {"mcc": matthews_corrcoef(labels, preds)}
elif task_name == "sst-2":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "mrpc":
return acc_and_f1(preds, labels)
elif task_name == "sts-b":
return pearson_and_spearman(preds, labels)
elif task_name == "qqp":
return acc_and_f1(preds, labels)
elif task_name == "mnli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "mnli-mm":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "qnli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "rte":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "wnli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "chemprot":
return acc_and_micro_f1(preds, labels, label_list)
elif task_name == "arc" or task_name == "sci":
return acc_and_macro_f1(preds, labels)
else:
raise KeyError(task_name)
| data2vec_vision-main | adalm/finetune/utils_for_glue.py |
from __future__ import absolute_import
from __future__ import division
from numpy.core.fromnumeric import argsort
from text_encoder import SubwordTextEncoder
import tokenizer
import tempfile
import argparse
from transformers import BertTokenizer
import random
import math
import numpy as np
def merge_output_file_with_bert_vocab(output_filename, bert_vocab, temp_path):
writer = open(output_filename, 'w', encoding='utf-8')
_set = set()
with open(bert_vocab, 'r', encoding='utf-8') as reader:
for line in reader:
writer.write(line)
_set.add(line.strip())
print(temp_path)
with open(temp_path, 'r', encoding='utf-8') as reader:
for line in reader:
if line.strip() not in _set:
writer.write(line)
writer.close()
def build_target_size_vocab(token_counts, reserved_tokens, target_size):
min_val = 1
max_val = len(token_counts) // (target_size ** 0.5)
encoder = SubwordTextEncoder.build_to_target_size(target_size,token_counts,min_val, max_val, num_iterations=5,
reserved_tokens=reserved_tokens, max_subtoken_length=None)
fd, temp_vocab = tempfile.mkstemp()
encoder.store_to_file(temp_vocab, add_single_quotes=False)
return encoder, temp_vocab
def compute_language_model(documents, vocab_file):
all_tokens = 0
tokenized_documents = []
bert_tokenizer = BertTokenizer(vocab_file ,do_lower_case = True)
words = bert_tokenizer.vocab
for word in words.keys():
words[word] = 0
for doc in documents:
tokens = bert_tokenizer.tokenize(doc)
all_tokens += len(tokens)
for token in tokens:
words[token] +=1
tokenized_documents.append(tokens)
for word in words.keys():
words[word] /= all_tokens
probs = []
for doc in tokenized_documents:
p = 0.0
for token in doc:
p += math.log(words[token])
probs.append(p)
return np.mean(probs)
def vocab_extend(corpus, raw_vocab, output_filename, interval=10000 , threshold = 0.01):
"""
@description : The function to get the incremental vocabulary for
@param :
@Returns :
"""
documents = []
for line in open(corpus, "r",encoding='utf-8'):
line = line.replace('\n','')
if len(line) < 5:
continue
documents.append(line)
print("docunments: "+str(len(documents)))
token_counts = tokenizer.corpus_token_counts(
corpus, corpus_max_lines = 4400000,
split_on_newlines = True, additional_chars="", do_lower_case=True)
lines = open(raw_vocab, 'r', encoding='utf-8').readlines()
lines = [s.strip() for s in lines if len(s) > 0]
reserved_tokens = lines
random.shuffle(documents)
origin_size = (len(reserved_tokens) // interval) * interval
pre_lm = compute_language_model(documents, raw_vocab)
print("origin_size: " + str(origin_size))
print("pre_lm: "+ str(pre_lm))
target_size = origin_size
while True:
target_size = target_size + interval
_, temp_vocab = build_target_size_vocab(token_counts, reserved_tokens, target_size)
now_lm = compute_language_model(documents, temp_vocab)
print('now_lm: '+ str(now_lm))
delta = (pre_lm - now_lm)/pre_lm
print('delta: ' + str(delta))
if delta <= threshold:
merge_output_file_with_bert_vocab(output_filename, raw_vocab, temp_vocab)
break
pre_lm = now_lm
vocab_extend('cs_data.txt', 'vocab.txt', 'cs.vocab')
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--corpus", default=None, type=str, required=True,
help="the file of the corpus to train the vocabulary.")
parser.add_argument("--raw_vocab", default=None, type=str, required=True,
help="the path to the file of the origin vocabulary")
parser.add_argument("--output_file", default=None, type=str, required=True,
help="the output file of the final vocabulary")
parser.add_argument('--interval', type=int, default=10000,
help="The interval of the vocabulary size.")
parser.add_argument('--threshold', type=int, default=10000,
help="The final threhold of the P(D)'s increase")
args = parser.parse_args()
return args
def main():
args = get_args()
vocab_extend(args.corpus, args.raw_vocab, args.output_file, args.interval, args.threshold)
if __name__ == '__main__':
main() | data2vec_vision-main | adalm/incr_bpe/vocab_extend.py |
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encoders for text data.
* TextEncoder: base class
* SubwordTextEncoder: invertible
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from itertools import chain
import re
import time
import logging
import six
from six.moves import range # pylint: disable=redefined-builtin
# from tensor2tensor.data_generators import tokenizer
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
# Reserved tokens for things like padding and EOS symbols.
PAD = "[PAD]"
EOS = "[EOS]"
UNK = "[UNK]"
CLS = "[CLS]"
SEP = "[SEP]"
MASK = "[MASK]"
RESERVED_TOKENS = [PAD, EOS, UNK, CLS, SEP, MASK]
NUM_RESERVED_TOKENS = len(RESERVED_TOKENS)
PAD_ID = RESERVED_TOKENS.index(PAD) # Normally 0
EOS_ID = RESERVED_TOKENS.index(EOS) # Normally 1
if six.PY2:
RESERVED_TOKENS_BYTES = RESERVED_TOKENS
else:
RESERVED_TOKENS_BYTES = [bytes(PAD, "ascii"), bytes(EOS, "ascii")]
# Regular expression for unescaping token strings.
# '\u' is converted to '_'
# '\\' is converted to '\'
# '\213;' is converted to unichr(213)
_UNESCAPE_REGEX = re.compile(r"\\u|\\\\|\\([0-9]+);")
_ESCAPE_CHARS = set(u"\\_u;0123456789")
_SPECIAL_CHARS = set(u"!\"\'#$%&*()`+,-./:;<=>?@[]^_{}~|")
# Unicode utility functions that work with Python 2 and 3
def native_to_unicode(s):
if is_unicode(s):
return s
try:
return to_unicode(s)
except UnicodeDecodeError:
res = to_unicode(s, ignore_errors=True)
logger.info("Ignoring Unicode error, outputting: %s" % res)
return res
def unicode_to_native(s):
if six.PY2:
return s.encode("utf-8") if is_unicode(s) else s
else:
return s
def is_unicode(s):
return isinstance(s, six.text_type)
def to_unicode(s, ignore_errors=False):
if is_unicode(s):
return s
error_mode = "ignore" if ignore_errors else "strict"
return s.decode("utf-8", errors=error_mode)
# def to_unicode_ignore_errors(s):
# return to_unicode(s, ignore_errors=True)
# def to_unicode_utf8(s):
# return unicode(s, "utf-8") if six.PY2 else s.decode("utf-8")
# def strip_ids(ids, ids_to_strip):
# """Strip ids_to_strip from the end ids."""
# ids = list(ids)
# while ids and ids[-1] in ids_to_strip:
# ids.pop()
# return ids
class TextEncoder(object):
"""Base class for converting from ints to/from human readable strings."""
def __init__(self, num_reserved_ids=NUM_RESERVED_TOKENS):
self._num_reserved_ids = num_reserved_ids
@property
def num_reserved_ids(self):
return self._num_reserved_ids
# def encode(self, s):
# """Transform a human-readable string into a sequence of int ids.
#
# The ids should be in the range [num_reserved_ids, vocab_size). Ids [0,
# num_reserved_ids) are reserved.
#
# EOS is not appended.
#
# Args:
# s: human-readable string to be converted.
#
# Returns:
# ids: list of integers
# """
# return [int(w) + self._num_reserved_ids for w in s.split()]
#
# def decode(self, ids, strip_extraneous=False):
# """Transform a sequence of int ids into a human-readable string.
#
# EOS is not expected in ids.
#
# Args:
# ids: list of integers to be converted.
# strip_extraneous: bool, whether to strip off extraneous tokens
# (EOS and PAD).
#
# Returns:
# s: human-readable string.
# """
# if strip_extraneous:
# ids = strip_ids(ids, list(range(self._num_reserved_ids or 0)))
# return " ".join(self.decode_list(ids))
#
# def decode_list(self, ids):
# """Transform a sequence of int ids into a their string versions.
#
# This method supports transforming individual input/output ids to their
# string versions so that sequence to/from text conversions can be visualized
# in a human readable format.
#
# Args:
# ids: list of integers to be converted.
#
# Returns:
# strs: list of human-readable string.
# """
# decoded_ids = []
# for id_ in ids:
# if 0 <= id_ < self._num_reserved_ids:
# decoded_ids.append(RESERVED_TOKENS[int(id_)])
# else:
# decoded_ids.append(id_ - self._num_reserved_ids)
# return [str(d) for d in decoded_ids]
@property
def vocab_size(self):
raise NotImplementedError()
def _escape_token(token, alphabet):
"""Escape away underscores and OOV characters and append '_'.
This allows the token to be expressed as the concatenation of a list
of subtokens from the vocabulary. The underscore acts as a sentinel
which allows us to invertibly concatenate multiple such lists.
Args:
token: A unicode string to be escaped.
alphabet: A set of all characters in the vocabulary's alphabet.
Returns:
escaped_token: An escaped unicode string.
Raises:
ValueError: If the provided token is not unicode.
"""
if not isinstance(token, six.text_type):
raise ValueError("Expected string type for token, got %s" % type(token))
token = token.replace(u"\\", u"\\\\").replace(u"_", u"\\u")
ret = [c if c in alphabet and c != u"\n" else r"\%d;" % ord(c) for c in token]
return u"".join(ret) + "_"
def _my_escape_token(token, alphabet):
if not isinstance(token, six.text_type):
raise ValueError("Expected string type for token, got %s" % type(token))
token = token.replace(u"\\", u"\\\\").replace(u"_", u"\\u")
ret = [c if c in alphabet and c != u"\n" else r"\%d;" % ord(c) for c in token]
return "_" + u"".join(ret)
# def _unescape_token(escaped_token):
# """Inverse of _escape_token().
#
# Args:
# escaped_token: a unicode string
#
# Returns:
# token: a unicode string
# """
#
# def match(m):
# if m.group(1) is None:
# return u"_" if m.group(0) == u"\\u" else u"\\"
#
# try:
# return six.unichr(int(m.group(1)))
# except (ValueError, OverflowError) as _:
# return u"\u3013" # Unicode for undefined character.
#
# trimmed = escaped_token[:-1] if escaped_token.endswith("_") else escaped_token
# return _UNESCAPE_REGEX.sub(match, trimmed)
class SubwordTextEncoder(TextEncoder):
"""Class for invertibly encoding text using a limited vocabulary.
Invertibly encodes a native string as a sequence of subtokens from a limited
vocabulary.
A SubwordTextEncoder is built from a corpus (so it is tailored to the text in
the corpus), and stored to a file. See text_encoder_build_subword.py.
It can then be loaded and used to encode/decode any text.
Encoding has four phases:
1. Tokenize into a list of tokens. Each token is a unicode string of either
all alphanumeric characters or all non-alphanumeric characters. We drop
tokens consisting of a single space that are between two alphanumeric
tokens.
2. Escape each token. This escapes away special and out-of-vocabulary
characters, and makes sure that each token ends with an underscore, and
has no other underscores.
3. Represent each escaped token as a the concatenation of a list of subtokens
from the limited vocabulary. Subtoken selection is done greedily from
beginning to end. That is, we construct the list in order, always picking
the longest subtoken in our vocabulary that matches a prefix of the
remaining portion of the encoded token.
4. Concatenate these lists. This concatenation is invertible due to the
fact that the trailing underscores indicate when one list is finished.
"""
def __init__(self, filename=None):
"""Initialize and read from a file, if provided.
Args:
filename: filename from which to read vocab. If None, do not load a
vocab
"""
self._alphabet = set()
# self.filename = filename
# if filename is not None:
# self._load_from_file(filename)
super(SubwordTextEncoder, self).__init__()
# def encode(self, s):
# """Converts a native string to a list of subtoken ids.
#
# Args:
# s: a native string.
# Returns:
# a list of integers in the range [0, vocab_size)
# """
# return self._tokens_to_subtoken_ids(
# tokenizer.encode(native_to_unicode(s)))
#
# def encode_without_tokenizing(self, token_text):
# """Converts string to list of subtoken ids without calling tokenizer.
#
# This treats `token_text` as a single token and directly converts it
# to subtoken ids. This may be useful when the default tokenizer doesn't
# do what we want (e.g., when encoding text with tokens composed of lots of
# nonalphanumeric characters). It is then up to the caller to make sure that
# raw text is consistently converted into tokens. Only use this if you are
# sure that `encode` doesn't suit your needs.
#
# Args:
# token_text: A native string representation of a single token.
# Returns:
# A list of subword token ids; i.e., integers in the range [0, vocab_size).
# """
# return self._tokens_to_subtoken_ids([native_to_unicode(token_text)])
# def decode(self, ids, strip_extraneous=False):
# """Converts a sequence of subtoken ids to a native string.
#
# Args:
# ids: a list of integers in the range [0, vocab_size)
# strip_extraneous: bool, whether to strip off extraneous tokens
# (EOS and PAD).
#
# Returns:
# a native string
# """
# if strip_extraneous:
# ids = strip_ids(ids, list(range(self._num_reserved_ids or 0)))
# return unicode_to_native(
# tokenizer.decode(self._subtoken_ids_to_tokens(ids)))
# def decode_list(self, ids):
# return [self._subtoken_id_to_subtoken_string(s) for s in ids]
@property
def vocab_size(self):
"""The subtoken vocabulary size."""
return len(self._all_subtoken_strings)
# def _tokens_to_subtoken_ids(self, tokens):
# """Converts a list of tokens to a list of subtoken ids.
#
# Args:
# tokens: a list of strings.
# Returns:
# a list of integers in the range [0, vocab_size)
# """
# ret = []
# for token in tokens:
# ret.extend(self._token_to_subtoken_ids(token))
# return ret
# def _token_to_subtoken_ids(self, token):
# """Converts token to a list of subtoken ids.
#
# Args:
# token: a string.
# Returns:
# a list of integers in the range [0, vocab_size)
# """
# cache_location = hash(token) % self._cache_size
# cache_key, cache_value = self._cache[cache_location]
# if cache_key == token:
# return cache_value
# ret = self._escaped_token_to_subtoken_ids(
# _escape_token(token, self._alphabet))
# self._cache[cache_location] = (token, ret)
# return ret
# def _subtoken_ids_to_tokens(self, subtokens):
# """Converts a list of subtoken ids to a list of tokens.
#
# Args:
# subtokens: a list of integers in the range [0, vocab_size)
# Returns:
# a list of strings.
# """
# concatenated = "".join(
# [self._subtoken_id_to_subtoken_string(s) for s in subtokens])
# split = concatenated.split("_")
# ret = []
# for t in split:
# if t:
# unescaped = _unescape_token(t + "_")
# if unescaped:
# ret.append(unescaped)
# return ret
# def _subtoken_id_to_subtoken_string(self, subtoken):
# """Converts a subtoken integer ID to a subtoken string."""
# if 0 <= subtoken < self.vocab_size:
# return self._all_subtoken_strings[subtoken]
# return u""
def _escaped_token_to_subtoken_strings(self, escaped_token):
"""Converts an escaped token string to a list of subtoken strings.
Args:
escaped_token: An escaped token as a unicode string.
Returns:
A list of subtokens as unicode strings.
"""
# NOTE: This algorithm is greedy; it won't necessarily produce the "best"
# list of subtokens.
ret = []
start = 0
token_len = len(escaped_token)
while start < token_len:
for end in range(
min(token_len, start + self._max_subtoken_len), start, -1):
subtoken = escaped_token[start:end]
if subtoken in self._subtoken_string_to_id:
ret.append(subtoken)
start = end
break
else: # Did not break
# If there is no possible encoding of the escaped token then one of the
# characters in the token is not in the alphabet. This should be
# impossible and would be indicative of a bug.
assert False, "Token substring not found in subtoken vocabulary."
return ret
# def _escaped_token_to_subtoken_ids(self, escaped_token):
# """Converts an escaped token string to a list of subtoken IDs.
#
# Args:
# escaped_token: An escaped token as a unicode string.
# Returns:
# A list of subtoken IDs as integers.
# """
# return [
# self._subtoken_string_to_id[subtoken]
# for subtoken in self._escaped_token_to_subtoken_strings(escaped_token)
# ]
# @classmethod
# def build_from_generator(cls,
# generator,
# target_size,
# max_subtoken_length=None,
# reserved_tokens=None):
# """Builds a SubwordTextEncoder from the generated text.
#
# Args:
# generator: yields text.
# target_size: int, approximate vocabulary size to create.
# max_subtoken_length: Maximum length of a subtoken. If this is not set,
# then the runtime and memory use of creating the vocab is quadratic in
# the length of the longest token. If this is set, then it is instead
# O(max_subtoken_length * length of longest token).
# reserved_tokens: List of reserved tokens. The global variable
# `RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this
# argument is `None`, it will use `RESERVED_TOKENS`.
#
# Returns:
# SubwordTextEncoder with `vocab_size` approximately `target_size`.
# """
# token_counts = collections.defaultdict(int)
# for item in generator:
# for tok in tokenizer.encode(native_to_unicode(item)):
# token_counts[tok] += 1
# encoder = cls.build_to_target_size(
# target_size, token_counts, 1, 1e3,
# max_subtoken_length=max_subtoken_length,
# reserved_tokens=reserved_tokens)
# return encoder
#
@classmethod
def build_to_target_size(cls,
target_size,
token_counts,
min_val,
max_val,
max_subtoken_length=None,
reserved_tokens=None,
num_iterations=4):
"""Builds a SubwordTextEncoder that has `vocab_size` near `target_size`.
Uses simple recursive binary search to find a minimum token count that most
closely matches the `target_size`.
Args:
target_size: Desired vocab_size to approximate.
token_counts: A dictionary of token counts, mapping string to int.
min_val: An integer; lower bound for the minimum token count.
max_val: An integer; upper bound for the minimum token count.
max_subtoken_length: Maximum length of a subtoken. If this is not set,
then the runtime and memory use of creating the vocab is quadratic in
the length of the longest token. If this is set, then it is instead
O(max_subtoken_length * length of longest token).
reserved_tokens: List of reserved tokens. The global variable
`RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this
argument is `None`, it will use `RESERVED_TOKENS`.
num_iterations: An integer; how many iterations of refinement.
Returns:
A SubwordTextEncoder instance.
Raises:
ValueError: If `min_val` is greater than `max_val`.
"""
if min_val > max_val:
raise ValueError("Lower bound for the minimum token count "
"is greater than the upper bound.")
if target_size < 1:
raise ValueError("Target size must be positive.")
if reserved_tokens is None:
reserved_tokens = RESERVED_TOKENS
def bisect(min_val, max_val):
"""Bisection to find the right size."""
present_count = (max_val + min_val) // 2
logger.info("Trying min_count %d" % present_count)
subtokenizer = cls()
subtokenizer.build_from_token_counts(
token_counts, present_count, num_iterations,
max_subtoken_length=max_subtoken_length,
reserved_tokens=reserved_tokens)
# Being within 1% of the target size is ok.
is_ok = abs(subtokenizer.vocab_size - target_size) * 100 < target_size
# If min_val == max_val, we can't do any better than this.
if is_ok or min_val >= max_val or present_count < 2:
return subtokenizer
if subtokenizer.vocab_size > target_size:
other_subtokenizer = bisect(present_count + 1, max_val)
else:
other_subtokenizer = bisect(min_val, present_count - 1)
if other_subtokenizer is None:
return subtokenizer
if (abs(other_subtokenizer.vocab_size - target_size) <
abs(subtokenizer.vocab_size - target_size)):
return other_subtokenizer
return subtokenizer
return bisect(min_val, max_val)
def build_from_token_counts(self,
token_counts,
min_count,
num_iterations=4,
reserved_tokens=None,
max_subtoken_length=None):
"""Train a SubwordTextEncoder based on a dictionary of word counts.
Args:
token_counts: a dictionary of Unicode strings to int.
min_count: an integer - discard subtokens with lower counts.
num_iterations: an integer. how many iterations of refinement.
reserved_tokens: List of reserved tokens. The global variable
`RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this
argument is `None`, it will use `RESERVED_TOKENS`.
max_subtoken_length: Maximum length of a subtoken. If this is not set,
then the runtime and memory use of creating the vocab is quadratic in
the length of the longest token. If this is set, then it is instead
O(max_subtoken_length * length of longest token).
Raises:
ValueError: if reserved is not 0 or len(RESERVED_TOKENS). In this case, it
is not clear what the space is being reserved for, or when it will be
filled in.
"""
# import pudb; pu.db
if reserved_tokens is None:
reserved_tokens = RESERVED_TOKENS
else:
# There is not complete freedom in replacing RESERVED_TOKENS.
new_reserved_tokens = RESERVED_TOKENS
for token in reserved_tokens:
if token in new_reserved_tokens:
continue
new_reserved_tokens.append(token)
reserved_tokens = new_reserved_tokens
for default, proposed in zip(RESERVED_TOKENS, reserved_tokens):
if default != proposed:
raise ValueError("RESERVED_TOKENS must be a prefix of "
"reserved_tokens.")
start_time = time.time()
#import pudb; pu.db
# Initialize the alphabet. Note, this must include reserved tokens or it can
# result in encoding failures. Remove RESERVED_TOKENS.
alphabet_tokens = chain(six.iterkeys(token_counts),
[native_to_unicode(t) for t in reserved_tokens[len(RESERVED_TOKENS):]])
# all alphabets in tokens
self._init_alphabet_from_tokens(alphabet_tokens)
# Bootstrap the initial list of subtokens with the characters from the
# alphabet plus the escaping characters.
self._init_subtokens_from_list(list(self._alphabet),
reserved_tokens=reserved_tokens)
# We build iteratively. On each iteration, we segment all the words,
# then count the resulting potential subtokens, keeping the ones
# with high enough counts for our new vocabulary.
if min_count < 1:
min_count = 1
for i in range(num_iterations):
#logger.info("Iteration {0}".format(i))
# Collect all substrings of the encoded token that break along current
# subtoken boundaries.
subtoken_counts = collections.defaultdict(int)
for token, count in six.iteritems(token_counts):
iter_start_time = time.time()
# escaped_token = _escape_token(token, self._alphabet) # added "_" at the end
escaped_token = _my_escape_token(token, self._alphabet)
subtokens = self._escaped_token_to_subtoken_strings(escaped_token)
# print(escaped_token)
# print(subtokens)
# excaped_token '_1234' -> subtoknes ['_12', '34'] (ex)
# '_1234':100 -> '_', '_1', '_12', '_123', '_1234','3', '34' :+= 100,
start = 0
for subtoken in subtokens:
last_position = len(escaped_token) + 1
if max_subtoken_length is not None:
last_position = min(last_position, start + max_subtoken_length)
for end in range(start + 1, last_position):
new_subtoken = escaped_token[start:end]
subtoken_counts[new_subtoken] += count
start += len(subtoken)
iter_time_secs = time.time() - iter_start_time
if iter_time_secs > 0.1:
logger.info(u"Processing token [{0}] took {1} seconds, consider "
"setting Text2TextProblem.max_subtoken_length to a "
"smaller value.".format(token, iter_time_secs))
# print(len(subtoken_counts))
# Array of sets of candidate subtoken strings, by length.
len_to_subtoken_strings = []
for subtoken_string, count in six.iteritems(subtoken_counts):
lsub = len(subtoken_string)
if count >= min_count:
while len(len_to_subtoken_strings) <= lsub:
len_to_subtoken_strings.append(set())
len_to_subtoken_strings[lsub].add(subtoken_string)
# Consider the candidates longest to shortest, so that if we accept
# a longer subtoken string, we can decrement the counts of its prefixes.
new_subtoken_strings_with_count = []
for lsub in range(len(len_to_subtoken_strings) - 1, 0, -1):
subtoken_strings = len_to_subtoken_strings[lsub]
for subtoken_string in subtoken_strings:
count = subtoken_counts[subtoken_string]
if count >= min_count:
# Exclude alphabet tokens here, as they must be included later,
# explicitly, regardless of count.
if subtoken_string not in self._alphabet:
new_subtoken_strings_with_count.append((count, subtoken_string))
for l in range(1, lsub):
subtoken_counts[subtoken_string[:l]] -= count
# Include the alphabet explicitly to guarantee all strings are encodable.
new_subtoken_strings_with_count.extend((subtoken_counts.get(a, 0), a)
for a in self._alphabet)
new_subtoken_strings_with_count.sort(reverse=True)
# Reinitialize to the candidate vocabulary.
new_subtoken_strings = [subtoken for _, subtoken in new_subtoken_strings_with_count]
if reserved_tokens:
# escaped_reserved_tokens = [
# _escape_token(native_to_unicode(t), self._alphabet)
# for t in reserved_tokens
# ]
# new_subtoken_strings = escaped_reserved_tokens + new_subtoken_strings
new_subtoken_strings = reserved_tokens + new_subtoken_strings
new_subtoken_strings = list(set(new_subtoken_strings))
self._init_subtokens_from_list(new_subtoken_strings)
#logger.info("vocab_size = %d" % self.vocab_size)
# print("vocab_size = %d" % self.vocab_size)
# print(self.vocab_size)
self.subtokens_with_counts = new_subtoken_strings_with_count
# Frequency of "_" is high.
# So remove from current position and add to the last.
new_subtoken_strings.remove("_")
new_subtoken_strings.insert(len(new_subtoken_strings), "_")
oov_list = []
for idx, subtoken in enumerate(new_subtoken_strings):
if subtoken.startswith("_") and subtoken != "_":
new_subtoken_strings[idx] = subtoken[1:]
elif subtoken[0] in self._alphabet and subtoken not in reserved_tokens:
new_subtoken_strings[idx] = "##" + subtoken
else:
oov_list.append(subtoken)
new_subtoken_strings.extend(char for char in self._alphabet
if char not in new_subtoken_strings)
# print(new_subtoken_strings)
# print(oov_list)
new_subtoken_strings = list(set(new_subtoken_strings))
self._init_subtokens_from_list(new_subtoken_strings)
#logger.info("vocab_size = %d" % self.vocab_size)
logger.info("total vocab size : {}, {} seconds elapsed ".format(self.vocab_size, time.time() - start_time))
# @property
# def all_subtoken_strings(self):
# return tuple(self._all_subtoken_strings)
#
# def dump(self):
# """Debugging dump of the current subtoken vocabulary."""
# subtoken_strings = [(i, s)
# for s, i in six.iteritems(self._subtoken_string_to_id)]
# print(u", ".join(u"{0} : '{1}'".format(i, s)
# for i, s in sorted(subtoken_strings)))
def _init_subtokens_from_list(self, subtoken_strings, reserved_tokens=None):
"""Initialize token information from a list of subtoken strings.
Args:
subtoken_strings: a list of subtokens
reserved_tokens: List of reserved tokens. We must have `reserved_tokens`
as None or the empty list, or else the global variable `RESERVED_TOKENS`
must be a prefix of `reserved_tokens`.
Raises:
ValueError: if reserved is not 0 or len(RESERVED_TOKENS). In this case, it
is not clear what the space is being reserved for, or when it will be
filled in.
"""
if reserved_tokens is None:
reserved_tokens = []
if reserved_tokens:
self._all_subtoken_strings = reserved_tokens + subtoken_strings
else:
self._all_subtoken_strings = subtoken_strings
# we remember the maximum length of any subtoken to avoid having to
# check arbitrarily long strings.
self._max_subtoken_len = max([len(s) for s in subtoken_strings])
self._subtoken_string_to_id = {
s: i + len(reserved_tokens)
for i, s in enumerate(subtoken_strings) if s
}
# Initialize the cache to empty.
self._cache_size = 2 ** 20
self._cache = [(None, None)] * self._cache_size
def _init_alphabet_from_tokens(self, tokens):
"""Initialize alphabet from an iterable of token or subtoken strings."""
# Include all characters from all tokens in the alphabet to guarantee that
# any token can be encoded. Additionally, include all escaping characters.
self._alphabet = {c for token in tokens for c in token}
self._alphabet |= _ESCAPE_CHARS
self._alphabet |= _SPECIAL_CHARS
# def _load_from_file_object(self, f):
# """Load from a file object.
#
# Args:
# f: File object to load vocabulary from
# """
# subtoken_strings = []
# for line in f:
# s = line.strip()
# # Some vocab files wrap words in single quotes, but others don't
# if ((s.startswith("'") and s.endswith("'")) or
# (s.startswith("\"") and s.endswith("\""))):
# s = s[1:-1]
# subtoken_strings.append(native_to_unicode(s))
# self._init_subtokens_from_list(subtoken_strings)
# self._init_alphabet_from_tokens(subtoken_strings)
#
# def _load_from_file(self, filename):
# """Load from a vocab file."""
# if not tf.gfile.Exists(filename):
# raise ValueError("File %s not found" % filename)
# with tf.gfile.Open(filename) as f:
# self._load_from_file_object(f)
def store_to_file(self, filename, add_single_quotes=True):
#with tf.gfile.Open(filename, "w") as f:
with open(filename, "w") as f:
for subtoken_string in self._all_subtoken_strings:
if add_single_quotes:
f.write("'" + unicode_to_native(subtoken_string) + "'\n")
else:
f.write(unicode_to_native(subtoken_string) + "\n")
def store_to_file_with_counts(self, filename):
# with tf.gfile.Open(filename, "w") as f:
with open(filename, "w") as f:
for subtoken_string, count in self.subtokens_with_counts:
f.write(unicode_to_native(subtoken_string + "\t" + str(count)) + "\n")
| data2vec_vision-main | adalm/incr_bpe/text_encoder.py |
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple invertible tokenizer.
Converts from a unicode string to a list of tokens
(represented as Unicode strings).
This tokenizer has the following desirable properties:
- It is invertible.
- Alphanumeric characters are broken away from non-alphanumeric characters.
- A single space between words does not produce an extra token.
- The full Unicode punctuation and separator set is recognized.
The tokenization algorithm is as follows:
1. Split the text into a list of tokens, splitting at every boundary of an
alphanumeric character and a non-alphanumeric character. This produces
a list which alternates between "alphanumeric tokens"
(strings of alphanumeric characters) and "non-alphanumeric tokens"
(strings of non-alphanumeric characters).
2. Remove every token consisting of a single space, unless it is
the very first or very last token in the list. These tokens are now
implied by the fact that there are two adjacent alphanumeric tokens.
e.g. u"Dude - that's so cool."
-> [u"Dude", u" - ", u"that", u"'", u"s", u"so", u"cool", u"."]
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import sys
import unicodedata
import six
import logging
from six.moves import range # pylint: disable=redefined-builtin
# from tensor2tensor.utils import mlperf_log
import time
import glob
# Conversion between Unicode and UTF-8, if required (on Python2)
_native_to_unicode = (lambda s: s.decode("utf-8")) if six.PY2 else (lambda s: s)
logger = logging.getLogger(__name__)
# This set contains all letter and number characters.
_ALPHANUMERIC_CHAR_SET = set(
six.unichr(i) for i in range(sys.maxunicode)
if (unicodedata.category(six.unichr(i)).startswith("L") or
unicodedata.category(six.unichr(i)).startswith("N") or
unicodedata.category(six.unichr(i)).startswith("P")))
# unicodedata.category(six.unichr(i)).startswith("S")
def encode(text):
"""Encode a unicode string as a list of tokens.
Args:
text: a unicode string
Returns:
a list of tokens as Unicode strings
"""
if not text:
return []
ret = []
token_start = 0
# Classify each character in the input string
is_alnum = [c in _ALPHANUMERIC_CHAR_SET for c in text]
add_remaining = False
for pos in range(1, len(text)):
add_remaining = False
if is_alnum[pos] != is_alnum[pos - 1]:
if not is_alnum[pos]:
token = text[token_start:pos]
if token != u" " or token_start == 0:
add_remaining = False
ret.append(token)
else:
add_remaining = True
token_start = pos
final_token = text[token_start:] if text[-1] in _ALPHANUMERIC_CHAR_SET else text[token_start:-1]
if add_remaining:
ret.append(final_token)
# split on punctuation
final_tokens = []
for token in ret:
splitted_token = _run_split_on_punc(token)
final_tokens.extend(splitted_token)
return final_tokens
def _run_split_on_punc(text, never_split=None):
"""Splits punctuation on a piece of text."""
if never_split is not None and text in never_split:
return [text]
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
def decode(tokens):
"""Decode a list of tokens to a unicode string.
Args:
tokens: a list of Unicode strings
Returns:
a unicode string
"""
token_is_alnum = [t[0] in _ALPHANUMERIC_CHAR_SET for t in tokens]
ret = []
for i, token in enumerate(tokens):
if i > 0 and token_is_alnum[i - 1] and token_is_alnum[i]:
ret.append(u" ")
ret.append(token)
return "".join(ret)
def _read_filepattern(filepattern, max_lines=None, split_on_newlines=True, do_lower_case=False):
"""Reads files matching a wildcard pattern, yielding the contents.
Args:
filepattern: A wildcard pattern matching one or more files.
max_lines: If set, stop reading after reading this many lines.
split_on_newlines: A boolean. If true, then split files by lines and strip
leading and trailing whitespace from each line. Otherwise, treat each
file as a single string.
Yields:
The contents of the files as lines, if split_on_newlines is True, or
the entire contents of each file if False.
"""
filenames = sorted(glob.glob(filepattern))
print(filenames, 'do lower case:', do_lower_case)
lines_read = 0
for filename in filenames:
start = time.time()
with open(filename) as f:
if split_on_newlines:
for line in f:
if do_lower_case:
line = line.lower()
yield line.strip()
lines_read += 1
if max_lines and lines_read >= max_lines:
return
if lines_read % 100000 == 0:
print("read", lines_read, "lines,", time.time() - start, "secs elapsed")
else:
if max_lines:
doc = []
for line in f:
if do_lower_case:
line = line.lower()
doc.append(line)
lines_read += 1
if max_lines and lines_read >= max_lines:
yield "".join(doc)
return
yield "".join(doc)
else:
yield f.read()
print(time.time() - start, "for reading read file :", filename)
def corpus_token_counts(
text_filepattern, corpus_max_lines, split_on_newlines=True, additional_chars="", do_lower_case=False):
"""Read the corpus and compute a dictionary of token counts.
Args:
text_filepattern: A pattern matching one or more files.
corpus_max_lines: An integer; maximum total lines to read.
split_on_newlines: A boolean. If true, then split files by lines and strip
leading and trailing whitespace from each line. Otherwise, treat each
file as a single string.
additional_chars: A String. Each consisting characters will be treat as normal
alphabets so that they will be included in each vocab.
Returns:
a dictionary mapping token to count.
"""
if additional_chars:
_ALPHANUMERIC_CHAR_SET.add(additional_chars)
counts = collections.Counter()
for doc in _read_filepattern(
text_filepattern,
max_lines=corpus_max_lines,
split_on_newlines=split_on_newlines,
do_lower_case=do_lower_case):
counts.update(encode(_native_to_unicode(doc)))
print("read all files")
return counts
def vocab_token_counts(text_filepattern, max_lines, do_lower_case=False):
"""Read a vocab file and return a dictionary of token counts.
Reads a two-column CSV file of tokens and their frequency in a dataset. The
tokens are presumed to be generated by encode() or the equivalent.
Args:
text_filepattern: A pattern matching one or more files.
max_lines: An integer; maximum total lines to read.
Returns:
a dictionary mapping token to count.
"""
ret = {}
for i, line in enumerate(
_read_filepattern(text_filepattern, max_lines=max_lines)):
if "," not in line:
logger.warning("Malformed vocab line #%d '%s'", i, line)
continue
if do_lower_case:
line = line.lower()
token, count = line.rsplit(",", 1)
ret[_native_to_unicode(token)] = int(count)
return ret
| data2vec_vision-main | adalm/incr_bpe/tokenizer.py |
#-*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from text_encoder import SubwordTextEncoder
import tokenizer
import os
import tempfile
import tensorflow as tf
tf.flags.DEFINE_string('output_filename', '/tmp/my.subword_text_encoder',
'where to store the SubwordTextEncoder')
tf.flags.DEFINE_string('corpus_filepattern', '',
'Corpus of one or more text files')
tf.flags.DEFINE_string('vocab_filepattern', '', 'One or more vocabulary files '
'(one word per line as "word,count")')
tf.flags.DEFINE_integer('min_count', 5, 'Minimum subtoken count in corpus')
tf.flags.DEFINE_integer('vocab_size', 30000, 'The final vocab size. It will produce a vocab with a near vocab size')
tf.flags.DEFINE_integer('corpus_max_lines', None,
'How many lines of corpus to read')
tf.flags.DEFINE_integer('num_iterations', 5, 'Number of iterations')
tf.flags.DEFINE_bool('split_on_newlines', True, 'Break corpus into lines.')
tf.flags.DEFINE_string('additional_chars', "", 'Set special characters to be included in vocab. ex : "~", "/".')
tf.flags.DEFINE_integer('max_subtoken_length', None, 'Max subtoken length')
tf.flags.DEFINE_string('raw_vocab', None, 'Raw bert vovab file')
tf.flags.DEFINE_bool('do_lower_case', False, 'Whether or not to lowercase the input corpus')
FLAGS = tf.flags.FLAGS
def merge_output_file_with_bert_vocab(output_filename, bert_vocab, temp_path):
writer = open(output_filename, 'w', encoding='utf-8')
_set = set()
with open(bert_vocab, 'r', encoding='utf-8') as reader:
for line in reader:
writer.write(line)
_set.add(line.strip())
print(temp_path)
with open(temp_path, 'r', encoding='utf-8') as reader:
for line in reader:
if line.strip() not in _set:
writer.write(line)
writer.close()
# os.remove(temp_path)
def main(unused_argv):
if FLAGS.corpus_filepattern and FLAGS.vocab_filepattern:
raise ValueError(
'Must only provide one of --corpus_filepattern or --vocab_filepattern')
elif FLAGS.corpus_filepattern:
token_counts = tokenizer.corpus_token_counts(
FLAGS.corpus_filepattern,
FLAGS.corpus_max_lines,
split_on_newlines=FLAGS.split_on_newlines, additional_chars=FLAGS.additional_chars, do_lower_case=FLAGS.do_lower_case)
elif FLAGS.vocab_filepattern:
token_counts = tokenizer.vocab_token_counts(FLAGS.vocab_filepattern,
FLAGS.corpus_max_lines, FLAGS.do_lower_case)
else:
raise ValueError(
'Must provide one of --corpus_filepattern or --vocab_filepattern')
reserved_tokens = None
if FLAGS.raw_vocab:
lines = open(FLAGS.raw_vocab, 'r', encoding='utf-8').readlines()
lines = [s.strip() for s in lines if len(s) > 0]
reserved_tokens = lines
print(len(token_counts))
print(len(reserved_tokens))
target_size = FLAGS.vocab_size
if target_size <= len(reserved_tokens):
raise ValueError("The vocab_size must be larger than the origin vocab's size ")
if target_size >= len(token_counts):
raise ValueError("The vocab_size is too large. Please set it smaller or prepare more corpus.")
min_val = 1
max_val = len(token_counts) // (target_size ** 0.5)
fd, temp_path = tempfile.mkstemp()
encoder = SubwordTextEncoder.build_to_target_size(target_size,token_counts,min_val, max_val, num_iterations=FLAGS.num_iterations,
reserved_tokens=reserved_tokens, max_subtoken_length=FLAGS.max_subtoken_length)
# encoder = SubwordTextEncoder()
# encoder.build_from_token_counts(token_counts, FLAGS.min_count,
# FLAGS.num_iterations, reserved_tokens=reserved_tokens, max_subtoken_length=FLAGS.max_subtoken_length)
encoder.store_to_file(temp_path, add_single_quotes=False)
merge_output_file_with_bert_vocab(FLAGS.output_filename, FLAGS.raw_vocab, temp_path)
if __name__ == '__main__':
tf.app.run()
| data2vec_vision-main | adalm/incr_bpe/subword_builder.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
from typing import List, Optional
import numpy as np
from base import BaseModel
from stats import DirichletMultinomial, DirichletPrior, NormalInverseGammaNormal
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class ClaraGibbs(BaseModel):
def __init__(
self,
burn_in: int = 1000,
num_samples: int = 1000,
sample_lag: int = 1,
theta_scale: Optional[float] = None,
theta_mean: Optional[List[float]] = None,
psi_scale: Optional[List[float]] = None,
psi_mean: Optional[List[List[float]]] = None,
):
super().__init__("ClaraGibbs")
self.burn_in = burn_in
self.num_samples = num_samples
self.sample_lag = sample_lag
self.theta_scale = theta_scale
self.theta_mean = theta_mean
self.psi_scale = psi_scale
self.psi_mean = psi_mean
def _increment(
self,
z: int,
item_ratings: List[int],
item_labelers: List[int],
item_scores: Optional[List[List[float]]] = None,
):
self.theta.increment(z)
for j in range(len(item_ratings)):
self.psi[item_labelers[j]][z].increment(item_ratings[j])
if item_scores is not None:
for c in range(self.C):
self.phi[c][z].add_observation(item_scores[c][z], False)
def _decrement(
self,
z: int,
item_ratings: List[int],
item_labelers: List[int],
item_scores: Optional[List[List[float]]] = None,
):
self.theta.decrement(z)
for j in range(len(item_ratings)):
self.psi[item_labelers[j]][z].decrement(item_ratings[j])
if item_scores is not None:
for c in range(self.C):
self.phi[c][z].remove_observation(item_scores[c][z], False)
def _sample(
self,
item_ratings: List[int],
item_labelers: List[int],
item_scores: Optional[List[List[float]]] = None,
):
probs = np.array([self.theta.get_posterior_prob(r) for r in range(self.R)])
for k in range(self.R):
for j in range(len(item_ratings)):
labeler = item_labelers[j]
rating = item_ratings[j]
probs[k] *= self.psi[labeler][k].get_posterior_prob(rating)
if item_scores is not None:
for c in range(self.C):
probs[k] *= self.phi[c][k].get_posterior_prob(item_scores[c][k])
norm_probs = probs / np.sum(probs)
return np.random.choice(self.R, p=norm_probs)
def _update_gaussians(self):
for c in range(self.C):
for r in range(self.R):
self.phi[c][r].estimate_parameters()
def _get_log_likelihood(self) -> float:
llh = 0.0
llh += self.theta.get_log_likelihood()
for l_psi in self.psi:
for r_psi in l_psi:
llh += r_psi.get_log_likelihood()
if self.C > 0:
for c_phi in self.phi:
for r_phi in c_phi:
llh += r_phi.get_log_likelihood()
return llh
def _get_priors(
self, ratings: np.array, labelers: np.array, scores: Optional[np.array] = None
):
logger.info("Getting priors ...")
# theta
if self.theta_scale is None:
self.theta_scale = 1.0
logger.info(f" theta_scale = {self.theta_scale}")
if self.theta_mean is None:
flatten_ratings = np.hstack(ratings)
obs_ratings, obs_counts = np.unique(flatten_ratings, return_counts=True)
theta_counts = np.zeros(self.R, dtype=float)
for i in range(len(obs_ratings)):
theta_counts[obs_ratings[i]] = 1.0 + obs_counts[i]
self.theta_mean = theta_counts / np.sum(theta_counts)
logger.info(f" theta_mean = {self.theta_mean}")
theta_prior = DirichletPrior.from_scale_mean(self.theta_scale, self.theta_mean)
logger.info(f" theta_prior = {theta_prior}")
# psi
if self.psi_scale is None:
self.psi_scale = [1.0] * self.R
logger.info(f" psi_scale = {self.psi_scale}")
if self.psi_mean is None:
diag_value = 0.75
off_diag_value = (1.0 - diag_value) / (self.R - 1)
self.psi_mean = np.zeros((self.R, self.R), dtype=float)
for r in range(self.R):
for o in range(self.R):
self.psi_mean[r][o] = diag_value if r == o else off_diag_value
logger.info(f" psi_mean = {self.psi_mean.tolist()}")
psi_prior = [
DirichletPrior.from_scale_mean(self.psi_scale[r], self.psi_mean[r])
for r in range(self.R)
]
logger.info(f" psi_prior = {psi_prior}")
# phi
phi_prior = None
if scores is not None:
phi_prior = [[None for r in range(self.R)] for c in range(self.C)]
logger.info(f" phi_prior = {phi_prior}")
return theta_prior, psi_prior, phi_prior
def _init(
self,
ratings: np.array,
labelers: np.array,
true_ratings: np.array,
scores: Optional[np.array] = None,
):
logger.info("Initializing ...")
N = len(ratings)
# process priors
theta_prior, psi_prior, phi_prior = self._get_priors(ratings, labelers, scores)
self.theta = DirichletMultinomial(prior=theta_prior)
self.psi: List[List[DirichletMultinomial]] = [
[DirichletMultinomial(psi_prior[r]) for r in range(self.R)]
for j in range(self.A)
]
if scores is not None:
self.phi: List[List[NormalInverseGammaNormal]] = [
[NormalInverseGammaNormal(phi_prior[c][r]) for r in range(self.R)]
for c in range(self.C)
]
# initialize assignments
self.zs = np.empty(N, dtype=int)
for n in range(N):
if true_ratings[n] == -1:
(values, counts) = np.unique(ratings[n], return_counts=True)
z = values[np.argmax(counts)]
else:
z = true_ratings[n]
self._increment(
z=z,
item_ratings=ratings[n],
item_labelers=labelers[n],
item_scores=None if scores is None else scores[n].tolist(),
)
self.zs[n] = z
# update Gaussians
if self.C > 0:
self._update_gaussians()
self._log_status()
def _log_status(self):
logger.info(f" llh = {self._get_log_likelihood()}")
logger.info(f" theta = {self.theta}")
for l in range(self.A):
for r in range(self.R):
logger.info(f" psi[{l}][{r}] = {self.psi[l][r]}")
if self.C != 0:
for c in range(self.C):
for r in range(self.R):
logger.info(f" phi[{c}][{r}] = {self.phi[c][r]}")
def _iterate(
self,
ratings: np.array,
labelers: np.array,
true_ratings: np.array,
scores: Optional[np.array] = None,
):
logger.info("Sampling ...")
N = len(ratings)
max_iters = self.burn_in + self.num_samples * self.sample_lag
num_logs = 10
log_step = (int)(max_iters / num_logs)
indices = np.array(range(N))
for iter in range(max_iters):
n_changes = 0
np.random.shuffle(indices)
for n in indices:
self._decrement(
z=self.zs[n],
item_ratings=ratings[n],
item_labelers=labelers[n],
item_scores=None if scores is None else scores[n].tolist(),
)
z = self._sample(
item_ratings=ratings[n],
item_labelers=labelers[n],
item_scores=None if scores is None else scores[n].tolist(),
)
if self.zs[n] != z:
n_changes += 1
self.zs[n] = z
self._increment(
z=self.zs[n],
item_ratings=ratings[n],
item_labelers=labelers[n],
item_scores=None if scores is None else scores[n].tolist(),
)
if self.C > 0:
self._update_gaussians()
# collect samples
is_stored = iter >= self.burn_in and iter % self.sample_lag == 0
if is_stored:
self.theta.add_posterior_estimate()
for l_psi in self.psi:
for r_psi in l_psi:
r_psi.add_posterior_estimate()
if iter % log_step == 0:
logger.info(f" Iter {iter} / {max_iters}")
logger.info(f" n_changes = {n_changes} / {N}")
self._log_status()
logger.info(f"Done sampling!")
def fit(
self,
R: int, # num. unique ratings
A: int, # num. unique labelers
ratings: np.array,
labelers: Optional[np.array] = None,
scores: Optional[np.array] = None, # (N x C x R)-shaped array
true_ratings: Optional[np.array] = None,
):
logger.info("Fitting ...")
N = len(ratings)
self.R = R
self.A = A
self.C = 0 if scores is None else scores.shape[1]
logger.info(f" N = {N}")
logger.info(f" R = {self.R}")
logger.info(f" A = {self.A}")
logger.info(f" C = {self.C}")
# standardize observed variables
if labelers is None: # use a single confusion matrix if None
labelers = np.array(
[np.zeros(len(ratings[n]), dtype=int) for n in range(N)]
)
if true_ratings is None:
true_ratings = np.repeat(-1, N)
# initialize latent variables
self._init(ratings, labelers, true_ratings, scores)
# sample
self._iterate(ratings, labelers, true_ratings, scores)
def predict(self, **kwargs):
pass
def get_prevalence(self):
mean_est, ci_est = self.theta.summarize_posterior_estimate()
return {"mean": mean_est, "ci": ci_est}
def get_confusion_matrix(self, labeler_id: int):
labeler_psi = self.psi[labeler_id]
estimates = []
for r in range(self.R):
mean_est, ci_est = labeler_psi[r].summarize_posterior_estimate()
estimates.append({"mean": mean_est, "ci": ci_est})
return estimates
| clara-main | gibbs.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import math
from collections import defaultdict
from typing import Dict, Generic, List, Optional, TypeVar
import numpy as np
from scipy.special import loggamma
from scipy.stats import norm
logger = logging.getLogger(__name__)
T = TypeVar("T")
class Counter(Generic[T]):
def __init__(self) -> None:
self.counts: Dict[T, int] = defaultdict(int)
self.count_sum: int = 0
def __repr__(self):
return f"counts: {dict(self.counts)}. count_sum = {self.count_sum}."
def increment(self, observation: T) -> None:
self.counts[observation] += 1
self.count_sum += 1
def decrement(self, observation: T) -> None:
if observation not in self.counts or self.counts[observation] < 1:
raise RuntimeError(
f"Trying to decrement {observation}, but was never observed"
)
self.counts[observation] -= 1
self.count_sum -= 1
def get_count(self, observation: T) -> int:
return self.counts[observation]
def get_count_sum(self) -> int:
return self.count_sum
class DirichletPrior:
def __init__(self, dimension: int, scale: float, mean_vals: List[float]) -> None:
self.dimension = dimension
self.scale = scale
self.mean_vals = mean_vals
self.vals = [self.scale * mean_val for mean_val in self.mean_vals]
self._validate()
def __repr__(self):
return (
f"dimension = {self.dimension}. "
f"scale = {self.scale}. "
f"mean = {self.mean_vals}."
)
@classmethod
def from_dim_scale(cls, dimension: int, scale: float) -> "DirichletPrior":
prior = cls(dimension, scale, mean_vals=[(1.0 / dimension)] * dimension)
return prior
@classmethod
def from_scale_mean(cls, scale: float, mean_vals: List[float]) -> "DirichletPrior":
prior = cls(len(mean_vals), scale, mean_vals=mean_vals)
return prior
def _validate(self):
if abs(sum(self.mean_vals) - 1.0) > 1e-6:
raise RuntimeError(f"Invalid DirichletPrior {self.mean_vals}")
class DirichletMultinomial:
def __init__(self, prior: DirichletPrior, data: Counter = None) -> None:
self.prior: DirichletPrior = prior
self.data: Counter = data if data is not None else Counter()
self.posteriors: List[List[float]] = []
def __repr__(self):
return (
f"prior: {self.prior}. data: {self.data}. "
f"posterior: {self.get_posterior_dist()}"
)
@classmethod
def from_prior(cls, prior) -> "DirichletMultinomial":
return DirichletMultinomial(prior)
@classmethod
def from_dim_alpha(cls, dim, alpha) -> "DirichletMultinomial":
prior = DirichletPrior.from_dim_scale(dim, alpha)
return DirichletMultinomial(prior)
@classmethod
def from_scale_mean(cls, scale, mean) -> "DirichletMultinomial":
prior = DirichletPrior.from_scale_mean(scale, mean)
return DirichletMultinomial(prior)
def add_posterior_estimate(self) -> None:
self.posteriors.append(self.get_posterior_dist())
def summarize_posterior_estimate(self, lb: float = 2.5, ub: float = 97.5):
mean_est = np.mean(self.posteriors, axis=0)
ci_est = np.percentile(self.posteriors, [lb, ub], axis=0)
return mean_est.tolist(), ci_est.tolist()
def increment(self, observation: int) -> None:
self.data.increment(observation)
def decrement(self, observation: int) -> None:
self.data.decrement(observation)
def get_posterior_count(self, observation: int) -> float:
return self.prior.vals[observation] + self.data.get_count(observation)
def get_posterior_parameter(self) -> List[float]:
return [
self.get_posterior_count(observation)
for observation in range(self.prior.dimension)
]
def get_posterior_count_sum(self) -> float:
return self.prior.scale + self.data.get_count_sum()
def get_posterior_prob(self, observation: int) -> float:
return self.get_posterior_count(observation) / self.get_posterior_count_sum()
def get_posterior_dist(self) -> List[float]:
return [
self.get_posterior_prob(observation)
for observation in range(self.prior.dimension)
]
def sample_from_posterior(self) -> List[float]:
return np.random.dirichlet(self.get_posterior_parameter()).tolist()
def get_log_likelihood(self) -> float:
llh = loggamma(self.prior.scale)
for i_dim in range(self.prior.dimension):
prior_val = self.prior.vals[i_dim]
llh -= loggamma(prior_val)
llh += loggamma(prior_val + self.data.get_count(i_dim))
llh -= loggamma(self.prior.scale + self.data.get_count_sum())
return llh
class NormalInverseGammaPrior:
__slots__ = ["mu", "sigma", "alpha", "beta"]
def __init__(self, mu: float, sigma: float, alpha: float, beta: float) -> None:
self.mu = mu
self.sigma = sigma
self.alpha = alpha
self.beta = beta
def __repr__(self):
return (
f"mu = {self.mu}. sigma = {self.sigma}. "
f"alpha = {self.alpha}. beta = {self.beta}."
)
@classmethod
def from_hyperparameter(
cls, mu: float, sigma: float, alpha: float, beta: float
) -> "NormalInverseGammaPrior":
prior = cls(mu, sigma, alpha, beta)
return prior
class Normal:
def __init__(self) -> None:
self.observations: List[float] = []
self.count = 0
self.sum = 0.0
self.sum_squared = 0.0
def __repr__(self):
return (
f"count = {self.count}. sum = {self.sum}. "
f"sum_squared = {self.sum_squared}"
)
def add_observation(self, observation: float) -> None:
self.observations.append(observation)
self.count += 1
self.sum += observation
self.sum_squared += observation * observation
def remove_observation(self, observation: float) -> None:
self.observations.remove(observation)
self.count -= 1
self.sum -= observation
self.sum_squared -= observation * observation
def get_count(self) -> int:
return self.count
def get_sum(self) -> float:
return self.sum
def get_sum_squared(self) -> float:
return self.sum_squared
class NormalInverseGammaNormal:
def __init__(
self,
prior: NormalInverseGammaPrior = None, # MLE if prior is None
data: Normal = None,
) -> None:
self.prior: NormalInverseGammaPrior = prior
self.data: Normal = data if data is not None else Normal()
self.mean: float = 0.0
self.variance: float = 0.0
if data is not None:
self.estimate_parameters()
def __repr__(self):
return (
f"prior: {self.prior}. data: {self.data}. "
f"mean: {self.mean}. variance: {self.variance}"
)
@classmethod
def from_prior_hyperparameters(
cls, mu, sigma, alpha, beta
) -> "NormalInverseGammaNormal":
prior = NormalInverseGammaPrior.from_hyperparameter(mu, sigma, alpha, beta)
return NormalInverseGammaNormal(prior)
def add_observation(self, observation: float, estimate: bool = True) -> None:
self.data.add_observation(observation)
if estimate:
self.estimate_parameters()
def remove_observation(self, observation: float, estimate: bool = True) -> None:
self.data.remove_observation(observation)
if estimate:
self.estimate_parameters()
def estimate_parameters(self) -> None:
# MLE
self.mean = self.data.sum / self.data.count
self.variance = (
self.data.sum_squared - self.data.count * self.mean * self.mean
) / (self.data.count - 1)
def get_posterior_log_prob(self, observation: float) -> float:
return norm.logpdf(observation, self.mean, math.sqrt(self.variance))
def get_posterior_prob(self, observation: float) -> float:
return norm.pdf(observation, self.mean, math.sqrt(self.variance))
def get_log_likelihood(self) -> float:
return sum(self.get_posterior_log_prob(x) for x in self.data.observations)
| clara-main | stats.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
import numpy as np
import pandas as pd
logger = logging.getLogger(__name__)
def generate_score(
true_ratings: np.array, score_means: np.array, score_stdvs: np.array
):
num_items = len(true_ratings)
num_ones = np.sum(true_ratings)
num_zeros = num_items - num_ones
logger.info(f"num_items = {num_items}")
logger.info(f"num_ones = {num_ones}")
logger.info(f"num_zeros = {num_zeros}")
logger.info(f"score_means = {score_means}")
logger.info(f"score_stdvs = {score_stdvs}")
score_zeros = np.random.normal(score_means[0], score_stdvs[0], num_zeros)
score_ones = np.random.normal(score_means[1], score_stdvs[1], num_ones)
df = pd.DataFrame({"true_rating": true_ratings.tolist()})
df["score"] = 0
df["score"] = df["score"].astype(float)
df.loc[df.true_rating == 0, "score"] = score_zeros
df.loc[df.true_rating == 1, "score"] = score_ones
scores = np.empty((num_items, 2), dtype=float)
scores[:, 1] = np.exp(df["score"]) / (1 + np.exp(df["score"]))
scores[:, 0] = 1.0 - scores[:, 1]
return scores
def generate_dataset_tiebreaking(
dataset_id: int, theta: np.array, psi: np.array, num_items: int
) -> pd.DataFrame:
"""
Function to generate one dataset for a particular dataset_id which has
- 2 labels for each item where these labels agree,
and 3 labels if there is a disagreement
- one single confusion matrix
Args:
dataset_id: a numeric id for this dataset
theta: the true prevalence
psi: the true confusion matrix shared by all labelers
num_items: number of items in the dataset
"""
# Set dataset_ids and item ids to be {dataset_id}_{item_num}
ids = ["{}_{}".format(str(dataset_id), str(x)) for x in range(num_items)]
dids = [str(dataset_id) for i in range(num_items)]
# Randomly choose item labels and set labelers to be the same for each dataset_id
ys = np.random.choice(len(theta), size=num_items, p=theta)
# For each item generate random labels based on the psi confusion matrix
ratings = []
labelers = []
for i in range(num_items):
item_rating = np.random.choice(len(theta), size=3, p=psi[ys[i]])
if item_rating[0] == item_rating[1]:
ratings.append([item_rating[0], item_rating[1]])
else:
ratings.append(list(item_rating))
labelers.append([dataset_id] * len(ratings[i]))
data = [dids, ids, labelers, ratings, ys.tolist()]
columns = ["dataset", "id", "labelers", "ratings", "true_rating"]
df = pd.DataFrame(data=data).transpose()
df.columns = columns
return df
def generate_dataset_tiebreaking_with_scores(
dataset_id: int, theta: np.array, psi: np.array, num_items: int
) -> pd.DataFrame:
df = generate_dataset_tiebreaking(dataset_id, theta, psi, num_items)
C = 2 # number of classifiers
R = 2 # binary labels
scores = np.empty((num_items, C, R), dtype=float)
# generate classifiers cores
scores[:, 0, :] = generate_score(
true_ratings=df.true_rating,
score_means=np.array([-4, 4]),
score_stdvs=np.array([1, 1]),
)
scores[:, 1, :] = generate_score(
true_ratings=df.true_rating,
score_means=np.array([-1, 1]),
score_stdvs=np.array([1, 1]),
)
logger.info(f"scores.shape = {scores.shape}")
df["scores"] = scores.tolist()
return df
def generate_labeler_confusion_matrix(num_labelers: int, psi_mean: np.array,
psi_std: np.array):
'''
Function to generate a seperate confusion matrix for each labeler
Args:
num_labelers: number of different confusion matrix
psi_mean: mean value of true positive rate and true negative rate
psi_std: standard deviation of true positive rate and true negative rate
Return: a list of 2x2 confusion matrix with length equals to num_labelers
'''
num_classes = 2
psi = np.zeros((num_labelers, num_classes, num_classes), dtype=float)
for i in range(num_labelers):
for j in range(0, num_classes):
# Generate true positive/negative probability between 0.5 and 1.0
psi[i][j][j] = min(
max(np.random.normal(psi_mean[j], psi_std[j]), 0.5), 1.0)
# Fill in false positive/negative value as needed
for k in range(num_classes):
if j != k:
psi[i][j][k] = 1.0 - psi[i][j][j]
return psi
def generate_dataset_tiebreaking_different_labeler_cm(
dataset_id: int, theta: np.array, psi: np.array, num_items: int
) -> pd.DataFrame:
"""
Function to generate one dataset for a particular dataset_id which has
- 2 labels for each item where these labels agree,
and 3 labels if there is a disagreement
- a list of confusion matrix
Args:
dataset_id: a numeric id for this dataset
theta: the true prevalence
psi: the list of 2x2 confusion matrix for different labelers
num_items: number of items in the dataset
"""
num_labelers = len(psi)
if num_labelers < 3:
raise Exception("Sorry, number of labelers need to be larger than 3!")
# set dataset_ids and item ids to be {dataset_id}_{item_num}
ids = ["{}_{}".format(str(dataset_id), str(x)) for x in range(num_items)]
dids = [str(dataset_id) for i in range(num_items)]
# randomly choose item labels and set labelers to be the same for each dataset_id
ys = np.random.choice(len(theta), size=num_items, p=theta)
item_ratings_all = []
labelers_all = []
for i in range(num_items):
# randomly select labelers for each item
labelers = np.random.choice(range(0, num_labelers), size=3, replace=False)
item_ratings = []
for j in range(3):
labeler = labelers[j]
rating = np.random.choice(len(theta), p=psi[labeler][ys[i]])
item_ratings.append(rating)
if item_ratings[0] == item_ratings[1]:
item_ratings_all.append([item_ratings[0], item_ratings[1]])
labelers_all.append([labelers[0], labelers[1]])
else:
item_ratings_all.append(item_ratings)
labelers_all.append(labelers)
data = [dids, ids, labelers_all, item_ratings_all, ys.tolist()]
columns = ["dataset", "id", "labelers", "ratings", "true_rating"]
df = pd.DataFrame(data=data).transpose()
df.columns = columns
return df
| clara-main | simulator.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
from abc import ABC, abstractmethod
class BaseModel(ABC):
def __init__(self, name: str, **kwargs):
self.name = name
@abstractmethod
def fit(self, **kwargs):
pass
@abstractmethod
def predict(self, **kwargs):
pass
| clara-main | base.py |
from collections import defaultdict
from typing import Dict, List, NamedTuple
import numpy as np
import pandas as pd
def generate_common_cm(L: int, h: float, gamma: float) -> np.ndarray:
"""
Generates the L x L common confusion matrix using the heterogeneity factor, h and
the lower bound on accuracy, gamma. The first L/2 labels map to the first decision, while the
remaining L/2 labels map to the second decision.
We generate the common error matrix M_error, and then mix it with the identify matrix
using gamma.
When h = 0, every row of M_error is [1, 1, ..., 1] normalized. When h = 1, every row of M_error
is [1*1, 2*2, ..., L/2 * L/2, 1*1, 2*2, ..., L/2*L/2] noramlized. For any h in between,
we simply take a convex combination of the two.
For example: suppose that L = 4 and gamma = 0.8. Then:
no_heterogeneity = [0.25, 0.25, 0.25, 0.25]
max_heterogeneity = [0.1, 0.4, 0.1, 0.4]
"""
no_heterogeneity = np.ones(int(L))
no_heterogeneity = no_heterogeneity / sum(no_heterogeneity)
max_heterogeneity = np.array(
[x**1.5 for x in range(1, int(L / 2) + 1)]
+ [x**1.5 for x in range(1, int(L / 2) + 1)]
)
max_heterogeneity = max_heterogeneity / sum(max_heterogeneity)
M_error_row = no_heterogeneity * (1 - h) + max_heterogeneity * h
M_error = np.array([M_error_row for _ in range(L)])
return np.array(gamma * np.identity(L) + (1 - gamma) * M_error)
def generate_random_matrix_with_lb(L: int, gamma: float) -> np.ndarray:
M = np.random.rand(L, L)
M = M / M.sum(axis=1)[:, np.newaxis]
return np.array(gamma * np.identity(L) + (1 - gamma) * M)
def generate_reviewer_cm(
a: float, # a = 0 is the first reviewer, a = 1 is the last.
L: int, # size of confusion matrix
gamma: float, # lower bound on diagonal entries
) -> np.ndarray:
"""
For each of the A reviewers, their confusion matrix is diagonal with uniform off-diagonal entries
with mean equal to mean_acc.
"""
mean_acc = gamma * (1 - a) + 1 * a
accuracy = [
np.random.uniform(mean_acc - 0.1, min(1, mean_acc + 0.1)) for _ in range(L)
]
matrix = []
for idx in range(L): # constructing each row of the matrix
row = [
(1 - accuracy[idx]) / (L - 1) for _ in range(L)
] # non-diagonal entries are uniform
row[idx] = accuracy[idx] # diagonal entry given by
matrix.append(row)
return np.array(matrix)
def generate_data(
L: int, # Number of labels
I: int, # Number of items
A: int, # Number of reviewers
M_final: List[np.ndarray], # LxL confusion matrix for each of the A reviewers.
label_to_decision: Dict[str, str], # map from labels to decision
num_reviews_per_content=3, # number of reviews to draw for each of the I items
):
data = defaultdict(list)
for _ in range(I):
y = np.random.choice(
range(L),
)
reviews = []
labels = []
labels_binary = []
for _ in range(num_reviews_per_content):
reviewer = np.random.choice(range(A))
rating = np.random.choice(range(L), p=M_final[reviewer][y])
reviews.append(str(reviewer))
labels.append(str(rating))
labels_binary.append(label_to_decision[str(rating)])
data["labelers"].append(reviews)
data["ratings"].append(labels)
data["ratings_binary"].append(labels_binary)
data["first_rating_binary"].append(labels_binary[0])
data["true_label"].append(y)
data["true_decision"].append(label_to_decision[str(y)])
data["first_decision_correct"].append(
labels_binary[0] == label_to_decision[str(y)]
)
return pd.DataFrame(data)
class SimulationDataInstance(NamedTuple):
df_train: pd.DataFrame
df_test: pd.DataFrame
decision_to_ratings_map: Dict[str, List[str]]
common_confusion: np.ndarray
reviewer_confusions: List[np.ndarray]
def generate_simulation_data(
I: int, # number of items
L: int, # number of labels, the first int(L/2) map to decision A and the remaining to decision B.
A: int, # number of annotators/reviewers
gamma: float, # lower bound on the any reviewer's accuracy
h: float, # heterogeneity factor, how different L/2 labels in each decision vary in terms of reliability
mixing_factor: float, # weight between individual CM and population CM
) -> SimulationDataInstance:
assert (
L % 2 == 0
), "L must be even so it can be partioned into two equal sets of decisions"
M_common = generate_common_cm(L, h, gamma)
M_final = []
for a in range(A):
M_personal = generate_reviewer_cm((float(a) + 1) / (A + 1), L, gamma)
M_final.append(M_personal * mixing_factor + M_common * (1 - mixing_factor))
labels_to_decisions = {str(i): "A" if i < L / 2 else "B" for i in range(L)}
decision_to_ratings_map = {
"A": [str(i) for i in range(L) if i < L / 2],
"B": [str(i) for i in range(L) if i >= L / 2],
}
train_data = generate_data(L, I, A, M_final, labels_to_decisions, 3)
test_data = generate_data(L, max(I, 20000), A, M_final, labels_to_decisions, 3)
return SimulationDataInstance(
df_train=train_data,
df_test=test_data,
decision_to_ratings_map=decision_to_ratings_map,
common_confusion=M_common,
reviewer_confusions=M_final,
)
| clara-main | mapping-aware-model/simulator.py |
stan_code="""
data {
int<lower=1> A; // number of annotators
int<lower=2> K; // number of categories
int<lower=1> N; // number of annotations
int<lower=1> I; // number of items
int<lower=1> L; // total number of flat labels (L in the overleaf)
int<lower=1> D[K]; // number of ratings for each decision (D_k in the overleaf)
int<lower=1> D_max; // max number of ratings for any decision (not in overleaf, implementation only)
// the label decision of the l-th flat label
int<lower=1, upper=K> c[L];
// the index within the label decision of the l-th flat label
int<lower=1, upper=D_max> ell[L];
// the item the n-th annotation belongs to
int<lower=1, upper=I> ii[N];
// the annotator which produced the n-th annotation
int<lower=1, upper=A> aa[N];
// the flat index of the label of the n-th annotation
int<lower=1, upper=L> x[N];
vector<lower=0>[K] alpha; // class prevalence prior
// weight for each item
vector<lower=0>[I] weights;
// lower bound on the diagonal of the decision confusion matrix
real<lower=0.5, upper=1.0> gamma;
}
parameters {
simplex[K] theta; // prevalence in the categories
vector<lower=0, upper=1>[K] psi_diag_unconstrained[A];
simplex[K-1] psi_cond_error[A, K];
// shared parameters across all reviewers
simplex[D_max] eta[K];
vector<lower=1, upper=gamma/(1-gamma)>[D_max] rho[K, K];
}
transformed parameters {
// shared parameters
vector[D_max] log_pi[K, K];
vector<lower=0>[D_max] pi_unnormalized[K, K];
// per reviewer decision confusion matrix (K by K)
vector[K] log_psi_diag[A];
vector[K] log1m_psi_diag[A];
vector[K-1] log_psi_cond_error[A, K];
vector[K] log_psi[A, K];
// rectangular confusion matrix (K by L)
vector[L] log_psi_rectangular[A, K];
vector[K] log_item_probs[I];
// constructing log_psi (K x K) - copied from CLARAStanConstrainedConfusion
for (a in 1:A) {
for (i in 1:K) {
log_psi_diag[a, i] = log(gamma + (1 - gamma) * psi_diag_unconstrained[a, i]);
}
}
log1m_psi_diag = log1m_exp(log_psi_diag);
log_psi_cond_error = log(psi_cond_error);
for (a in 1:A) {
for (i in 1:K) {
log_psi[a, i, i] = log_psi_diag[a, i];
for (j in 1:(i-1)) {
log_psi[a, i, j] = log1m_psi_diag[a, i] + log_psi_cond_error[a, i, j];
}
for (j in (i+1):K) {
log_psi[a, i, j] = log1m_psi_diag[a, i] + log_psi_cond_error[a, i, j-1];
}
}
}
// construct beta from eta and rho
for (k_true in 1:K) {
for (k_predicted in 1:K) {
for (l in 1:D[k_predicted]) {
if (k_true == k_predicted) {
pi_unnormalized[k_true, k_predicted, l] = eta[k_predicted, l];
} else {
pi_unnormalized[k_true, k_predicted, l] = eta[k_predicted, l] * rho[k_true, k_predicted, l];
}
}
for (l in (D[k_predicted] + 1):D_max) {
pi_unnormalized[k_true, k_predicted, l] = 0.000001;
}
log_pi[k_true, k_predicted] = log(pi_unnormalized[k_true, k_predicted] / sum(pi_unnormalized[k_true, k_predicted]) );
}
}
// constructing log_psi_rectangular (K x L)
for (a in 1:A) {
for (k in 1:K) {
for (l in 1:L) {
log_psi_rectangular[a, k, l] = log_psi[a, k, c[l]] + log_pi[k, c[l], ell[l]];
}
}
}
for (i in 1:I) {
for (k in 1:K) {
log_item_probs[i, k] = log(theta[k]);
}
}
for (n in 1:N) {
for (k in 1:K) {
log_item_probs[ii[n], k] = log_item_probs[ii[n], k] + log_psi_rectangular[aa[n], k, x[n]];
}
}
}
model {
theta ~ dirichlet(alpha);
for (a in 1:A) {
for (k in 1:K) {
psi_diag_unconstrained[a, k] ~ beta(5, 5);
psi_cond_error[a, k] ~ dirichlet(rep_vector(1, K-1));
}
}
for (k_true in 1:K) {
eta[k_true] ~ dirichlet(append_row(rep_vector(10, D[k_true]), rep_vector(0.001, D_max-D[k_true])));
for (k_predicted in 1:K) {
for (l in 1:D_max) {
rho[k_true, k_predicted, l] ~ uniform(1, gamma/(1-gamma));
}
}
}
for (i in 1:I) {
target += weights[i] * log_sum_exp(log_item_probs[i]);
}
}
generated quantities {
vector[K] item_probs[I]; // the true class distribution of each item
for(i in 1:I)
item_probs[i] = softmax(log_item_probs[i]);
}
"""
| clara-main | mapping-aware-model/mapping_aware_model.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import logging
import os
from utils import file_tqdm
logging.basicConfig(level=logging.INFO)
def convert(ast):
increase_by = {} # count of how many idx to increase the new idx by:
# each time there is a value node
cur = 0
for i, node in enumerate(ast):
increase_by[i] = cur
if "value" in node:
cur += 1
new_dp = []
for i, node in enumerate(ast):
inc = increase_by[i]
if "value" in node:
child = [i + inc + 1]
if "children" in node:
child += [n + increase_by[n] for n in node["children"]]
new_dp.append({"type": node["type"], "children": child})
new_dp.append({"value": node["value"]})
else:
if "children" in node:
node["children"] = [n + increase_by[n] for n in node["children"]]
new_dp.append(node)
# sanity check
children = []
for node in new_dp:
if "children" in node:
children += node["children"]
assert len(children) == len(set(children))
return new_dp
def main():
parser = argparse.ArgumentParser(description="Generate datapoints from AST")
parser.add_argument("--input_fp", "-i", help="Filepath with the ASTs to be parsed")
parser.add_argument(
"--out_fp",
"-o",
default="/tmp/new_trees.json",
help="Filepath with the output dps",
)
args = parser.parse_args()
if os.path.exists(args.out_fp):
os.remove(args.out_fp)
logging.info("Loading asts from: {}".format(args.input_fp))
with open(args.input_fp, "r") as f, open(args.out_fp, "w") as fout:
for line in file_tqdm(f):
dp = json.loads(line.strip())
print(json.dumps(convert(dp)), file=fout)
logging.info("Wrote dps to: {}".format(args.out_fp))
if __name__ == "__main__":
main()
| code-prediction-transformer-main | generate_new_trees.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import logging
import pickle
from collections import Counter
from utils import file_tqdm, get_dfs
logging.basicConfig(level=logging.INFO)
UNK = "<unk_token>"
PAD = "<pad_token>"
def get_value(line, input_type):
if input_type == "ast":
return get_dfs(line)
elif input_type == "leaf":
return get_dfs(line, only_leaf=True)
elif input_type == "source_code":
return line[0]
def main():
parser = argparse.ArgumentParser(description="Create vocab for py150 dataset")
parser.add_argument("--n_vocab", "-n", type=int, default=100000)
parser.add_argument("--input_fp", "-i")
parser.add_argument("--out_fp", "-o", default="/tmp/vocab.pkl")
parser.add_argument(
"--input_type",
"-t",
choices=["ast", "leaf", "source_code"],
help="Where to get the input from (all AST nodes, leaf nodes, or source code",
)
args = parser.parse_args()
logging.info("Reading from: {}".format(args.input_fp))
logging.info("Input type: {}".format(args.input_type))
vocab = Counter()
with open(args.input_fp, "r") as f:
for line in file_tqdm(f):
vocab.update(get_value(json.loads(line.strip()), args.input_type))
vocab_to_keep = [i[0] for i in vocab.most_common(args.n_vocab)]
top_total = sum(i[1] for i in vocab.most_common(args.n_vocab))
total = sum(vocab.values())
logging.info("Total # of vocab: {}".format(len(vocab)))
logging.info(
"Using {} top vocab covers: {:.2f}% of the entire dataset".format(
args.n_vocab, 100 * top_total / total
)
)
logging.info("Top 10 most common vocab:")
for v, i in vocab.most_common(10):
print(v, i)
# add unk and pad tokens
vocab_to_keep.append(UNK)
vocab_to_keep.append(PAD)
logging.info("Added {} and {}".format(UNK, PAD))
# dump vocab to file
with open(args.out_fp, "wb") as fout:
pickle.dump(vocab_to_keep, fout)
logging.info("Wrote {} vocab to: {}".format(len(vocab_to_keep), args.out_fp))
if __name__ == "__main__":
main()
| code-prediction-transformer-main | generate_vocab.py |
#!/usr/bin/env python3
# Copyright (c) 2019 OpenAI, HugginFace Inc. team. and TaeHwan Jung
# Copyright (c) Facebook, Inc. and its affiliates.
# ----------------------------------------------------------------------------
# MIT LICENSE
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ----------------------------------------------------------------------------
"""
Transformer model is adapted from: https://github.com/graykode/gpt-2-Pytorch
(Commit: 46ae886391a94c6683be438269252c4afd5ba762)
Original Paper and repository here: https://github.com/openai/gpt-2
RNN implementation is adapted from: https://github.com/pytorch/examples/tree/master/word_language_model
"""
import copy
import math
import torch
import torch.nn as nn
def gelu(x):
return (
0.5
* x
* (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
)
class PathLSTM(nn.Module):
def __init__(self, vocab_size, n_embd):
super(PathLSTM, self).__init__()
self.embedding = nn.Embedding(vocab_size, n_embd)
self.LSTM = nn.LSTM(n_embd, n_embd, batch_first=True)
def forward(self, paths):
embed = self.embedding(paths) # bs, max_len, max_path_len, n_embd
batch_size, bag_size, path_len, n_embd = embed.shape
_, (h_n, _) = self.LSTM(embed.view(batch_size * bag_size, path_len, n_embd))
return h_n.permute((1, 0, 2)).view((batch_size, bag_size, -1))
class LayerNorm(nn.Module):
def __init__(self, hidden_size, std_eps=1e-6):
"""Construct a layernorm module in the TF style.
"""
super(LayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.std_eps = std_eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).std(-1, keepdim=True)
x = (x - u) / (s + self.std_eps)
return self.weight * x + self.bias
class Attention(nn.Module):
def __init__(
self, nx, n_ctx, n_head, scale=False
):
super(Attention, self).__init__()
n_state = nx
# [switch nx => n_state from Block to Attention to keep identical to TF implem]
assert n_state % n_head == 0
self.register_buffer(
"bias", torch.tril(torch.ones(n_ctx, n_ctx)).view(1, 1, n_ctx, n_ctx)
)
self.n_head = n_head
self.split_size = n_state
self.scale = scale
self.c_attn = nn.Linear(nx, n_state * 3)
self.c_proj = nn.Linear(nx, n_state)
def _attn(self, q, k, v):
w = torch.matmul(q, k)
if self.scale:
w = w / math.sqrt(v.size(-1))
nd, ns = w.size(-2), w.size(-1)
b = self.bias[:, :, ns - nd : ns, :ns]
w = w * b - 1e10 * (1 - b)
w = nn.Softmax(dim=-1)(w)
return torch.matmul(w, v)
def merge_heads(self, x):
x = x.permute(0, 2, 1, 3).contiguous()
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states
def split_heads(self, x, k=False):
new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states
if k:
return x.permute(0, 2, 3, 1) # (batch, head, head_features, seq_length)
else:
return x.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
def forward(self, x):
x = self.c_attn(x)
query, key, value = x.split(self.split_size, dim=2)
query = self.split_heads(query)
key = self.split_heads(key, k=True)
value = self.split_heads(value)
# self attention component
a = self._attn(query, key, value)
a = self.merge_heads(a)
a = self.c_proj(a)
return a
class MLP(nn.Module):
def __init__(self, n_state, n_embd):
super(MLP, self).__init__()
self.c_fc = nn.Linear(n_embd, n_state)
self.c_proj = nn.Linear(n_state, n_embd)
self.act = gelu
def forward(self, x):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
return h2
class Block(nn.Module):
def __init__(
self,
n_ctx,
n_head,
n_embd,
layer_norm_epsilon,
scale=False,
):
super(Block, self).__init__()
self.ln_1 = LayerNorm(n_embd, std_eps=layer_norm_epsilon)
self.attn = Attention(
n_embd, n_ctx, n_head, scale
)
self.ln_2 = LayerNorm(n_embd, std_eps=layer_norm_epsilon)
self.mlp = MLP(4 * n_embd, n_embd)
def forward(self, x):
a = self.attn(self.ln_1(x))
x = x + a
m = self.mlp(self.ln_2(x))
x = x + m
return x
class GPT2Model(nn.Module):
def __init__(
self,
vocab_size,
n_layer,
n_embd,
n_ctx,
n_head,
layer_norm_epsilon,
root_paths,
):
super(GPT2Model, self).__init__()
self.n_layer = n_layer
self.n_embd = n_embd
self.n_vocab = vocab_size
self.wte = nn.Embedding(vocab_size, n_embd)
if root_paths:
self.path_lstm = PathLSTM(vocab_size, n_embd)
block = Block(
n_ctx,
n_head,
n_embd,
layer_norm_epsilon,
scale=True,
)
self.h = nn.ModuleList([copy.deepcopy(block) for _ in range(n_layer)])
self.ln_f = LayerNorm(n_embd, std_eps=layer_norm_epsilon)
def forward(self, input_ids, paths=None):
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_ids.size(-1))
inputs_embeds = self.wte(input_ids)
path_embeds = self.path_lstm(paths) if paths is not None else 0
hidden_states = inputs_embeds + path_embeds
for block in self.h:
hidden_states = block(hidden_states)
hidden_states = self.ln_f(hidden_states)
output_shape = input_shape + (hidden_states.size(-1),)
return hidden_states.view(*output_shape)
class GPT2LMHead(nn.Module):
def __init__(self, model_embeddings_weights, n_embd):
super(GPT2LMHead, self).__init__()
self.n_embd = n_embd
self.set_embeddings_weights(model_embeddings_weights)
def set_embeddings_weights(self, model_embeddings_weights):
embed_shape = model_embeddings_weights.shape
self.decoder = nn.Linear(embed_shape[1], embed_shape[0], bias=False)
self.decoder.weight = model_embeddings_weights # Tied weights
def forward(self, hidden_state):
lm_logits = self.decoder(hidden_state)
return lm_logits
class TransformerModel(nn.Module):
def __init__(
self,
vocab_size,
loss_fn,
n_layer,
n_embd,
n_ctx,
n_head,
layer_norm_epsilon,
root_paths=False,
):
super(TransformerModel, self).__init__()
self.transformer = GPT2Model(
vocab_size,
n_layer,
n_embd,
n_ctx,
n_head,
layer_norm_epsilon,
root_paths,
)
self.lm_head = GPT2LMHead(self.transformer.wte.weight, n_embd)
self.loss_fn = loss_fn
def reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def forward(
self, x, y, ext=None, paths=None, return_loss=False
):
hidden_states = self.transformer(x, paths)
y_pred = self.lm_head(hidden_states)
if not return_loss:
return y_pred
# ext contains a list of idx of where to take the loss from
# we linearize it first
ids = []
max_len = y.size(-1)
for i, ext_i in enumerate(ext):
ids += [i * max_len + j for j in range(ext_i, max_len)]
loss = self.loss_fn(y_pred.view(-1, y_pred.size(-1))[ids], y.view(-1)[ids])
return loss
# base RNN model
class LSTMModel(torch.nn.Module):
def __init__(self, vocab_size, n_embd, loss_fn, n_ctx):
super(LSTMModel, self).__init__()
self.embedding = nn.Embedding(vocab_size, n_embd)
self.lstm = nn.LSTM(n_embd, n_embd, num_layers=1, dropout=0.5, batch_first=True)
self.decoder = nn.Linear(n_embd, vocab_size)
self.loss_fn = loss_fn
self.half_ctx = int(n_ctx / 2)
def reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def forward(
self, x, y, ext=None, paths=None, return_loss=False
):
embed = self.embedding(x) # bs, max_len, n_embd
self.lstm.flatten_parameters()
lstm_out, _ = self.lstm(embed) # bs, max_len, n_embd
y_pred = self.decoder(lstm_out) # bs, max_len, vocab_size
if not return_loss:
return y_pred
# ext contains a list of idx of where to take the loss from
# we linearize it first
ids = []
max_len = y.size(-1)
for i, ext_i in enumerate(ext):
ids += [i * max_len + j for j in range(ext_i, max_len)]
loss = self.loss_fn(y_pred.view(-1, y_pred.size(-1))[ids], y.view(-1)[ids])
return loss
| code-prediction-transformer-main | model.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import json
import logging
import os
import pickle
import torch
import utils
logging.basicConfig(level=logging.INFO)
UNK = "<unk_token>"
PAD = "<pad_token>"
class BaseSetup(object):
def __init__(
self, base_dir, fp, ids_fp, max_vocab=100000, mode="train"
):
super().__init__()
if mode not in {"train", "test"}:
raise Exception("Mode must be either train or test")
self.mode = mode
self.fp = fp
self.max_vocab = max_vocab
# get all the relevant filepaths
self.filepaths = {
"vocab": os.path.join(base_dir, "vocab.pkl"),
"metrics": os.path.join(base_dir, "{}_metrics.txt".format(mode)),
"conv": os.path.join(base_dir, "{}_converted.txt".format(mode)),
}
self._add_extra_filepaths(base_dir)
logging.info("Writing metrics to: {}".format(self.filepaths["metrics"]))
# filter dataset
filtered_fp = self._filter_dataset()
# set up vocab
self.vocab = self._create_vocab()
# convert
if not os.path.exists(self.filepaths["conv"]):
with open(filtered_fp, "r") as fin, open(
self.filepaths["conv"], "w"
) as fout:
for line in utils.file_tqdm(fin):
line = json.loads(line.strip())
print(json.dumps(self.vocab.convert(line)), file=fout)
logging.info(
"Converted dataset to idx and saved to: {}".format(
self.filepaths["conv"]
)
)
# return dataset
self.dataset = self._create_dataset(self.filepaths["conv"], ids_fp)
logging.info("Loaded dataset from {}".format(self.filepaths["conv"]))
def return_data(self):
return self.vocab, self.dataset, self.filepaths["metrics"]
def _add_extra_filepaths(self, base_dir):
return
def _filter_dataset(self):
return self.fp
def _create_vocab(self):
raise NotImplementedError("method must be implemented by a subclass.")
def _create_dataset(self, fp, ids_fp):
raise NotImplementedError("method must be implemented by a subclass.")
class BaseVocab(object):
def __init__(self, vocab_fp):
super().__init__()
self.unk_token = UNK
self.pad_token = PAD
self.pad_idx = None
self.unk_idx = None
if not os.path.exists(vocab_fp):
raise Exception("Get the vocab from generate_vocab.py")
with open(vocab_fp, "rb") as fin:
self.idx2vocab = pickle.load(fin)
logging.info("Loaded vocab from: {}".format(vocab_fp))
self.vocab2idx = {token: i for i, token in enumerate(self.idx2vocab)}
self.unk_idx = self.vocab2idx[self.unk_token]
self.pad_idx = self.vocab2idx[self.pad_token]
logging.info("Vocab size: {}".format(len(self.idx2vocab)))
def __len__(self):
return len(self.idx2vocab)
def convert(self, line):
raise NotImplementedError("method must be implemented by a subclass.")
class BaseDataset(torch.utils.data.Dataset):
def __init__(self, fp, ids_fp):
super().__init__()
self.fp = fp
self.ids_fp = ids_fp
self._line_pos_dp = list(utils.line_positions(fp))
self._line_pos_ids = list(utils.line_positions(ids_fp))
assert (len(self._line_pos_dp) == len(self._line_pos_ids))
def __len__(self):
return len(self._line_pos_dp)
def __getitem__(self, idx):
line_pos = self._line_pos_dp[idx]
with open(self.fp) as f:
f.seek(line_pos)
dp_line = f.readline().strip()
line_pos = self._line_pos_ids[idx]
with open(self.ids_fp) as f:
f.seek(line_pos)
ids_line = f.readline().strip()
return (json.loads(dp_line), json.loads(ids_line))
@staticmethod
def collate(seqs, pad_idx=None):
raise NotImplementedError("method must be implemented by a subclass.")
| code-prediction-transformer-main | dataset.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
import multiprocessing as mp
from tqdm import tqdm
def line_positions(file_path):
with open(file_path) as f:
while True:
pos = f.tell()
if f.readline():
yield pos
else:
break
def get_number_of_lines(fobj):
nol = sum(1 for _ in fobj)
fobj.seek(0)
return nol
def file_tqdm(f):
return tqdm(f, total=get_number_of_lines(f))
def parallelize(iterable, f, f_args=(), worker_init=None, n_cores=None):
if n_cores == 1:
return _mp_iterate_over(f, iterable, f_args)
if n_cores is None:
n_cores = int(mp.cpu_count())
lst = list(iterable)
chunksize = math.ceil(len(lst) / n_cores)
with mp.Pool(processes=n_cores, initializer=worker_init) as pool:
jobs = [
pool.apply_async(
_mp_iterate_over, (f, lst[i * chunksize : (i + 1) * chunksize], f_args)
)
for i in range(n_cores)
]
multiple_results = [job.get() for job in jobs]
results = flatten(multiple_results)
return results
def _mp_iterate_over(f, lst, f_args):
return [f(x, *f_args) for x in lst]
def flatten(list_of_lists):
return [x for xs in list_of_lists for x in xs]
########################################################################
# generating dataset utils
def get_dfs(ast, only_leaf=False):
dp = []
for node in ast:
if "value" in node:
dp.append(node["value"])
else:
if not only_leaf:
dp.append(node["type"])
return dp
def separate_dps(ast, max_len):
"""
Handles training / evaluation on long ASTs by splitting
them into smaller ASTs of length max_len, with a sliding
window of max_len / 2.
Example: for an AST ast with length 1700, and max_len = 1000,
the output will be:
[[ast[0:1000], 0], [ast[500:1500], 1000], [ast[700:1700], 1500]]
Input:
ast : List[Dictionary]
List of nodes in pre-order traversal.
max_len : int
Output:
aug_asts : List[List[List, int]]
List of (ast, beginning idx of unseen nodes)
"""
half_len = int(max_len / 2)
if len(ast) <= max_len:
return [[ast, 0]]
aug_asts = [[ast[:max_len], 0]]
i = half_len
while i < len(ast) - max_len:
aug_asts.append([ast[i : i + max_len], half_len])
i += half_len
idx = max_len - (len(ast) - (i + half_len))
aug_asts.append([ast[-max_len:], idx])
return aug_asts
def get_ancestors(ast):
ancestors = {0: []}
node2parent = {0: 0}
for i, node in enumerate(ast):
if "children" in node:
for child in node["children"]:
node2parent[child] = i
ancestors[i] = [i] + ancestors[node2parent[i]]
return ancestors
def get_terminal_nodes(ast):
terminal_nodes = [i for i, node in enumerate(ast) if "children" not in node]
return terminal_nodes
def tokenize(s):
pattern = re.compile(r"(?<!^)(?=[A-Z])")
tokenized = pattern.sub("_", s).lower().split("_")
return list(filter(None, tokenized))[:5]
| code-prediction-transformer-main | utils.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import json
import logging
from functools import partial
from typing import Set
import torch
torch.manual_seed(0)
logging.getLogger().setLevel(logging.INFO)
def prepare_data(batch, device):
x = batch["input_seq"].to(device)
y = batch["target_seq"].to(device)
ext = batch["extended"]
rel = batch["rel_mask"].to(device) if "rel_mask" in batch else None
child = batch["child_mask"].to(device) if "child_mask" in batch else None
paths = batch["root_paths"].to(device) if "root_paths" in batch else None
return x, y, ext, rel, child, paths
def build_dataloader(dataset, batch_size, collate_fn, train_split=0.90):
train_len = int(train_split * len(dataset))
train_dataset, val_dataset = torch.utils.data.random_split(
dataset, lengths=([train_len, len(dataset) - train_len])
)
logging.info("Batch size: {}".format(batch_size))
logging.info(
"Train / val split ({}%): {} / {}".format(
100 * train_split, len(train_dataset), len(val_dataset)
)
)
train_dataloader = torch.utils.data.DataLoader(
train_dataset,
batch_size=batch_size,
collate_fn=collate_fn,
num_workers=16,
shuffle=True,
drop_last=True,
pin_memory=False,
)
logging.info("len(train_dataloader) = {}".format(len(train_dataloader)))
val_dataloader = torch.utils.data.DataLoader(
val_dataset,
batch_size=int(batch_size / 4),
collate_fn=collate_fn,
num_workers=16,
shuffle=True,
drop_last=True,
pin_memory=False,
)
logging.info("len(val_dataloader) = {}".format(len(val_dataloader)))
return train_dataloader, val_dataloader
def build_test_dataloader(test_dataset, batch_size, collate_fn):
test_dataloader = torch.utils.data.DataLoader(
test_dataset,
batch_size=batch_size,
collate_fn=collate_fn,
num_workers=16,
shuffle=False,
drop_last=True,
pin_memory=True,
)
logging.info("len(test_dataloader) = {}".format(len(test_dataloader)))
return test_dataloader
def build_metrics(loss_fn, unk_idx_set: Set[int], pad_idx, ids_str):
from ignite.metrics import Loss, TopKCategoricalAccuracy
def strip(out, id_str="all"):
if id_str != "all":
ids = out[id_str]
y_pred = out["y_pred"][ids]
y = out["y"][ids]
else:
y_pred = out["y_pred"]
y = out["y"]
idx = y != pad_idx
return y_pred[idx], y[idx]
def topk_trans(id_str):
def wrapped(out):
y_pred, y = strip(out, id_str)
for idx in unk_idx_set: # non-existing tokens
y[y == idx] = -2
return y_pred, y
return wrapped
def topk_ex_unk_trans(id_str):
def wrapped(out):
y_pred, y = strip(out, id_str)
idx_tensor = torch.ones(y.shape, dtype=torch.bool)
for idx in unk_idx_set:
idx_tensor[y == idx] = False
return y_pred[idx_tensor], y[idx_tensor]
return wrapped
def loss_trans():
def wrapped(out):
return strip(out)
return wrapped
metrics = {"_loss": Loss(loss_fn, loss_trans())}
for id_str in ["attr_ids", "leaf_ids"] + ["all"]:
# reporting metrics for attr and leaf only
metrics["{}_acc".format(id_str)] = TopKCategoricalAccuracy(
1, topk_trans(id_str)
)
return metrics
def build_evaluator(model, metrics, metrics_fp, device, pad_idx):
from ignite.contrib.handlers import ProgressBar
from ignite.engine import Engine, Events
@Engine
@torch.no_grad()
def evaluator(engine, batch):
model.eval()
x, y, ext, rel, child, paths = prepare_data(batch, device)
y_pred = model(x, y, ext, rel, child, paths)
# here we pad out the indices that have been evaluated before
for i, ext_i in enumerate(ext):
y[i][:ext_i] = pad_idx
res = {"y_pred": y_pred.view(-1, y_pred.size(-1)), "y": y.view(-1)}
res.update(batch["ids"])
return res
for name, metric in metrics.items():
metric.attach(evaluator, name)
ProgressBar(bar_format="").attach(evaluator, metric_names=[])
@evaluator.on(Events.COMPLETED)
def log_val_metrics(engine):
metrics = engine.state.metrics
metrics = {name: "{:.4f}".format(num) for name, num in metrics.items()}
# mrr
metrics_str = json.dumps(metrics, indent=2, sort_keys=True)
logging.info("val metrics: {}".format(metrics_str))
with open(metrics_fp, "a") as fout:
fout.write(metrics_str)
fout.write("\n")
return evaluator
def build_trainer(
model,
loss_fn,
optimizer,
train_dataloader,
val_dataloader,
run_dir,
validator,
device,
score_fn=lambda engine: engine.state.metrics["all_acc"],
):
from ignite.contrib.handlers import ProgressBar
from ignite.engine import Engine, Events
from ignite.handlers import EarlyStopping, ModelCheckpoint, TerminateOnNan
from ignite.metrics import RunningAverage
@Engine
def trainer(engine, batch):
model.train()
x, y, ext, rel, child, paths = prepare_data(batch, device)
loss = model(x, y, ext, rel, child, paths, return_loss=True)
loss = loss.sum()
optimizer.zero_grad()
loss.backward()
optimizer.step()
return {"batchloss": loss.item()}
# # validation first
# @trainer.on(Events.STARTED)
# def validate(engine):
# validator.run(val_dataloader)
RunningAverage(output_transform=lambda out: out["batchloss"]).attach(
trainer, "batchloss"
)
ProgressBar(bar_format="").attach(trainer, metric_names=["batchloss"])
# store the model before validation
pre_model_handler = ModelCheckpoint(
dirname=run_dir,
filename_prefix="pre",
n_saved=100, # save all bests
save_interval=1,
require_empty=False,
)
trainer.add_event_handler(
Events.EPOCH_COMPLETED, pre_model_handler, {"model": model}
)
# validation
@trainer.on(Events.EPOCH_COMPLETED)
def validate(engine):
validator.run(val_dataloader)
# terminate on NaN
trainer.add_event_handler(Events.ITERATION_COMPLETED, TerminateOnNan())
# store the best model
best_model_handler = ModelCheckpoint(
dirname=run_dir,
filename_prefix="best",
n_saved=100, # save all bests
score_name="val_acc",
score_function=score_fn,
require_empty=False,
)
validator.add_event_handler(Events.COMPLETED, best_model_handler, {"model": model})
# Early stopping
es_handler = EarlyStopping(patience=5, score_function=score_fn, trainer=trainer)
validator.add_event_handler(Events.COMPLETED, es_handler)
return trainer
def train(
model,
vocab,
dataset,
metrics_fp,
loss_fn,
lr,
run_dir,
batch_size,
max_epochs,
device,
ids_str,
):
collate_fn = partial(dataset.collate, pad_idx=vocab.pad_idx)
train_dataloader, val_dataloader = build_dataloader(
dataset, batch_size=batch_size, collate_fn=collate_fn
)
metrics = build_metrics(loss_fn, vocab.unk_idx_set, vocab.pad_idx, ids_str)
# run the trainer and validator
validator = build_evaluator(
model=model,
metrics=metrics,
metrics_fp=metrics_fp,
device=device,
pad_idx=vocab.pad_idx,
)
trainer = build_trainer(
model=model,
loss_fn=loss_fn,
optimizer=torch.optim.Adam(model.parameters(), lr=lr),
train_dataloader=train_dataloader,
val_dataloader=val_dataloader,
run_dir=run_dir,
validator=validator,
device=device,
)
trainer.run(train_dataloader, max_epochs=max_epochs)
def eval_model(
model, vocab, test_dataset, metrics_fp, loss_fn, batch_size, device, ids_str
):
collate_fn = partial(test_dataset.collate, pad_idx=vocab.pad_idx)
test_dataloader = build_test_dataloader(
test_dataset, batch_size=batch_size, collate_fn=collate_fn
)
metrics = build_metrics(loss_fn, vocab.unk_idx_set, vocab.pad_idx, ids_str)
# run the evaluator
evaluator = build_evaluator(
model=model,
metrics=metrics,
metrics_fp=metrics_fp,
device=device,
pad_idx=vocab.pad_idx,
)
evaluator.run(test_dataloader)
| code-prediction-transformer-main | train.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import LSTMModel
import Dataset, Vocab
import json
import os
import torch
import argparse
import logging
def predict_with_seq(seq, converted=False, ):
rel = None
if not converted:
seq = vocab.convert([seq, 0]) # [data, ext] is the format expected
seqs = [[seq, {}]] # the {} is a mapping of attr/leaf locations, not needed here
batch = Dataset.collate(seqs, pad_idx)
x = batch["input_seq"]
y = batch["target_seq"]
ext = batch["extended"]
y_pred = model(x, y, ext, rel=rel, return_loss=False)
return y_pred.squeeze()
def get_top_pred(pred, k=10, print_results=True):
softmax = torch.nn.Softmax()
top_perc, top_idx = torch.topk(softmax(pred), k)
top_perc = top_perc.tolist()
top_tokens = [vocab.idx2vocab[i] for i in top_idx]
if print_results:
print('Top {} predictions:'.format(k))
for i, (perc, token) in enumerate(zip(top_perc, top_tokens)):
print('{}) {:<12} ({:.2f}%)'.format(i + 1, token, 100 * perc))
return top_perc, top_tokens
def predict_next(input_seq, k=10, print_results=False):
y_pred = predict_with_seq(input_seq + ['<pad_token>'])
top_perc, top_tokens = get_top_pred(y_pred[-1], k, print_results)
return top_perc, top_tokens
def demo_sequence(input_seq):
print(' '.join(input_seq))
top_perc, top_tokens = predict_next(input_seq, print_results=True)
def demo_datapoint(data, dp_raw, idxs, converted=False, print_results=True):
k = 10
# predict for the whole sequence in one shot
y_pred = predict_with_seq(data, converted)
for i in idxs:
context = dp_raw[max(0, i-5): i]
target = dp_raw[i]
print('Context: {}'.format('<before>...' + ' '.join(context)))
print('Target : {}'.format(target))
top_perc, top_tokens = get_top_pred(y_pred[i-1], k, print_results)
rank = top_tokens.index(target) if target in top_tokens else -2
print('Rank : {}'.format(rank + 1))
print()
def parse_args():
parser = argparse.ArgumentParser(description="Demo for a trained model")
parser.add_argument("--base_dir", "-b", default="/tmp/gpt2")
parser.add_argument("--model_fp", "-m", default="rnn.pth", help="Relative fp to best_model")
parser.add_argument("--vocab_fp", "-v", default="vocab.pkl", help="Relative fp to vocab pkl")
parser.add_argument("--dps_fp", help="Test filepath with raw data points")
parser.add_argument("--conv_fp", help="Test filepath with converted data points")
parser.add_argument(
"--ids_fp", help="Filepath with the ids that describe locations of various attrs/leaf/etc"
)
args = parser.parse_args()
logging.info("Base dir: {}".format(args.base_dir))
return args
def main():
global vocab
global model
global pad_idx
args = parse_args()
base_dir = args.base_dir
model_fp = os.path.join(base_dir, args.model_fp)
vocab = Vocab(os.path.join(base_dir, args.vocab_fp))
pad_idx = vocab.pad_idx
loss_fn = torch.nn.CrossEntropyLoss(ignore_index=vocab.pad_idx)
n_ctx=100
model = LSTMModel(
vocab_size=len(vocab),
n_embd=300,
loss_fn=loss_fn,
n_ctx=n_ctx,
)
print('Created {} model!'.format(model_fp))
# load model
new_checkpoint = {}
checkpoint = torch.load(model_fp, map_location=torch.device('cpu'))
for name, weights in checkpoint.items():
name = name.replace('module.', '')
new_checkpoint[name] = weights
del checkpoint
model.load_state_dict(new_checkpoint)
model.eval()
print('Loaded model from:', model_fp)
# 1. Try prediction with some made up sequence
input_seq = ['with', 'open', '(', 'raw_fp', ',', '"r"', ')', 'as', 'fin', ':', 'data_raw', '=', '[', 'json', '.', ]
demo_sequence(input_seq)
demo_sequence(input_seq + ['loads'])
# 2. Prediction on a sample from our dataset
# read dataset
if (args.dps_fp is not None):
raw_fp = os.path.join(base_dir, args.dps_fp)
with open(raw_fp, 'r') as fin:
data_raw = [json.loads(line) for line in fin.readlines()]
print('Read {} datapoints!'.format(len(data_raw)))
# TODO make these random
dp_i = 231
idx = 50
print('Raw data point [data, ext] = ', data_raw[dp_i])
dp_raw = data_raw[dp_i][0] # data_raw[dp_i][1] is an ext, we don't need it
demo_datapoint(dp_raw, dp_raw, {idx}, converted=False)
else:
return
# we can also predict from pred-converted data points
if (args.conv_fp is not None):
conv_fp = os.path.join(base_dir, args.conv_fp)
with open(conv_fp, 'r') as fin:
data_conv = [json.loads(line) for line in fin.readlines()]
print('Converted data point [data, ext] = ', data_conv[dp_i])
demo_datapoint(data_conv[dp_i], dp_raw, {idx}, converted=True)
# let's focus on the attrs in this data point
if (args.ids_fp is not None):
ids_fp = os.path.join(base_dir, args.ids_fp)
with open(ids_fp, 'r') as fin:
data_ids = [json.loads(line) for line in fin.readlines()]
print('Datapoint:\n{} .... <continued>'.format(' '.join(dp_raw[:100])))
print('# of value predictions:')
for name, lst in data_ids[dp_i].items():
print('{}: {}'.format(name, len(lst)))
attrs = data_ids[dp_i]["attr_ids"]
demo_datapoint(dp_raw, dp_raw, attrs, converted=False, print_results=False)
if __name__ == "__main__":
main()
| code-prediction-transformer-main | demo.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import logging
import pickle
import re
from collections import Counter
from utils import get_terminal_nodes, file_tqdm, tokenize
logging.basicConfig(level=logging.INFO)
UNK = "<unk_token>"
PAD = "<pad_token>"
PLACEHOLDER = "<placeholder_token>"
def get_value(line, vocab_type):
if vocab_type == "token":
return get_dfs(line)
elif vocab_type == "subtoken":
lst = []
for node in get_terminal_nodes(line):
lst += tokenize(node)
return lst
elif vocab_type == "output":
return get_terminal_nodes(line)
def main():
parser = argparse.ArgumentParser(
description="Create vocab for code2seq model for py150 dataset"
)
parser.add_argument("--n_vocab", "-n", type=int, default=100000)
parser.add_argument("--input_fp", "-i")
parser.add_argument("--out_fp", "-o", default="/tmp/vocab.pkl")
parser.add_argument(
"--vocab_type",
"-v",
choices=["token", "subtoken", "output"],
help="What type of vocab to get",
)
args = parser.parse_args()
logging.info("Reading from: {}".format(args.input_fp))
logging.info("Vocab type: {}".format(args.vocab_type))
vocab = Counter()
with open(args.input_fp, "r") as f:
for line in file_tqdm(f):
vocab.update(get_value(json.loads(line.strip()), args.vocab_type))
vocab_to_keep = [i[0] for i in vocab.most_common(args.n_vocab)]
top_total = sum(i[1] for i in vocab.most_common(args.n_vocab))
total = sum(vocab.values())
logging.info("Total # of vocab: {}".format(len(vocab)))
logging.info(
"Using {} top vocab covers: {:.2f}% of the entire dataset".format(
args.n_vocab, 100 * top_total / total
)
)
logging.info("Top 10 most common vocab:")
for v, i in vocab.most_common(10):
print(v, i)
# add unk and pad tokens
vocab_to_keep.append(UNK)
vocab_to_keep.append(PAD)
vocab_to_keep.append(PLACEHOLDER)
logging.info("Added {} and {} and {}".format(UNK, PAD, PLACEHOLDER))
# dump vocab to file
with open(args.out_fp, "wb") as fout:
pickle.dump(vocab_to_keep, fout)
logging.info("Wrote {} vocab to: {}".format(len(vocab_to_keep), args.out_fp))
if __name__ == "__main__":
main() | code-prediction-transformer-main | code2seq/generate_vocab.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import json
import logging
import torch
import utils
logging.basicConfig(level=logging.INFO)
UNK = "<unk_token>"
PAD = "<pad_token>"
PLACEHOLDER = "<placeholder_token>"
class Dataset(torch.utils.data.Dataset):
def __init__(self, fp):
super().__init__()
self.fp = fp
self._line_pos_dp = list(utils.line_positions(fp))
def __len__(self):
return len(self._line_pos_dp)
def __getitem__(self, idx):
line_pos = self._line_pos_dp[idx]
with open(self.fp) as f:
f.seek(line_pos)
dp_line = json.loads(f.readline().strip())
return dp_line
@staticmethod
def collate(batch, token_pad_idx, subtoken_pad_idx):
def combine(seqs, max_len, max_path_len, pad_idx):
if not seqs:
return torch.ones((max_len, max_path_len)).long() * pad_idx
paths = []
for path in seqs:
paths.append(path + [pad_idx] * (max_path_len - len(path)))
len_pad = torch.ones((max_len - len(paths), max_path_len)).long()
return torch.cat((torch.tensor(paths), len_pad))
max_len = max(len(i[1]) for i in batch)
max_start_len = max(
max([len(start) for start in seq[1]], default=0) for seq in batch
)
max_path_len = max(
max([len(path) for path in seq[2]], default=0) for seq in batch
)
max_end_len = max(
max([len(start) for start in seq[3]], default=0) for seq in batch
)
all_targets = []
all_starts = []
all_paths = []
all_ends = []
for (target, starts, paths, ends) in batch:
all_targets.append(target)
starts = combine(starts, max_len, max_start_len, subtoken_pad_idx)
paths = combine(paths, max_len, max_path_len, token_pad_idx)
ends = combine(ends, max_len, max_end_len, subtoken_pad_idx)
all_starts.append(starts)
all_ends.append(ends)
all_paths.append(paths)
results = {
"targets": torch.tensor(all_targets),
"starts": torch.stack(all_starts),
"paths": torch.stack(all_paths),
"ends": torch.stack(all_ends),
}
return results
| code-prediction-transformer-main | code2seq/dataset.py |
import argparse
import json
import os
import pickle
import random
import re
from collections import defaultdict
from itertools import chain, combinations, product
from utils import get_ancestors, get_terminal_nodes, parallelize, tokenize
from tqdm import tqdm
PLACEHOLDER = "<placeholder_token>"
UNK = "<unk_token>"
def get_leaf_nodes(ast, id_type):
# get ids for special leaf types: attr, num, name, param
if id_type == "attr":
types_ = {"attr"}
elif id_type == "num":
types_ = {"Num"}
elif id_type == "name":
types_ = {"NameLoad", "NameStore"}
elif id_type == "param":
types_ = {"NameParam"}
nodes = []
for i, node in enumerate(ast):
if "type" in node and node["type"] in types_:
nodes.append(i + 1)
return nodes
def get_value(d):
return d["value"] if "value" in d else d["type"]
def extract_paths(ast, max_length):
def dfs(i):
node = ast[i]
if "children" not in node:
full_paths = []
half_paths = [[i]]
else:
children = node["children"]
child_to_full_paths, child_to_half_paths = zip(
*(dfs(child_id) for child_id in children)
)
full_paths = list(chain.from_iterable(child_to_full_paths))
for i_child in range(len(children) - 1):
for j_child in range(i_child + 1, len(children)):
i_child_half_paths = child_to_half_paths[i_child]
j_child_half_paths = child_to_half_paths[j_child]
for i_half_path, j_half_path in product(
i_child_half_paths, j_child_half_paths
):
path_len = len(i_half_path) + len(j_half_path) + 1
if path_len > max_length:
continue
path = list(chain(i_half_path, [i], reversed(j_half_path)))
full_paths.append(path)
half_paths = [
half_path + [i]
for half_path in chain.from_iterable(child_to_half_paths)
if len(half_path) + 1 < max_length
]
return full_paths, half_paths
return dfs(0)[0]
def get_all_paths(ast, id_type, max_path_len, max_num_paths):
if id_type == "leaves":
nodes = get_terminal_nodes(ast)
else:
nodes = get_leaf_nodes(ast, id_type)
if not nodes:
return []
all_paths = extract_paths(ast, max_path_len)
ast_values = [get_value(i) for i in ast]
terminal_words = [get_value(ast[i]) for i in get_terminal_nodes(ast)]
tokenized_words = {word: tokenize(word) for word in terminal_words}
node_to_path_idx = {i: [] for i in range(len(ast))}
for i, path in enumerate(all_paths):
node_to_path_idx[path[-1]].append(i)
dps = []
paths_to_choose_from = []
prev_node = 0
for node in nodes:
for j in range(prev_node, node):
paths_to_choose_from += [
all_paths[path_i] for path_i in node_to_path_idx[j]
]
prev_node = node
paths_to_here = [all_paths[path_i] for path_i in node_to_path_idx[node]]
if len(paths_to_choose_from) + len(paths_to_here) <= max_num_paths:
paths = paths_to_choose_from.copy() + paths_to_here
else:
if len(paths_to_here) > max_num_paths:
paths = random.sample(paths_to_here, max_num_paths)
else:
paths = paths_to_here + random.sample(
paths_to_choose_from, max_num_paths - len(paths_to_here)
)
# convert to vocab
target = ast_values[node]
paths = [
[ast_values[i] if i != node else PLACEHOLDER for i in p] for p in paths
]
lefts = [tokenized_words[p[0]] for p in paths]
rights = [
tokenized_words[p[-1]] if p[-1] != PLACEHOLDER else [PLACEHOLDER]
for p in paths
]
dps.append([target, lefts, paths, rights])
return dps
def get_word2idx(out_fp):
with open(out_fp, "rb") as fin:
vocab = pickle.load(fin)
word2idx = {word: i for i, word in enumerate(vocab)}
word2idx = defaultdict(lambda: word2idx[UNK], word2idx)
print("Read vocab from: {}".format(out_fp))
return word2idx
def main():
parser = argparse.ArgumentParser(
description="Generate terminal to terminal paths from AST"
)
parser.add_argument("--ast_fp", "-a", help="Filepath with the ASTs to be parsed")
parser.add_argument(
"--out_fp", "-o", default="/tmp/dps.txt", help="Filepath for the output dps"
)
parser.add_argument("--max_path_len", type=int, default=9, help="Max path len.")
parser.add_argument("--max_num_paths", type=int, default=200)
parser.add_argument("--base_dir", "-b", type=str)
parser.add_argument(
"id_type",
choices=["attr", "num", "name", "param", "leaves"],
default="attr",
help="Which ids to generate. Default = attr",
)
args = parser.parse_args()
print("Max path len: {}".format(args.max_path_len))
print("Max num paths: {}".format(args.max_num_paths))
print("Writing to {}".format(args.out_fp))
# read the vocabs
base_dir = args.base_dir
token_vocab = get_word2idx(os.path.join(base_dir, "token_vocab.pkl"))
subtoken_vocab = get_word2idx(os.path.join(base_dir, "subtoken_vocab.pkl"))
output_vocab = get_word2idx(os.path.join(base_dir, "output_vocab.pkl"))
data = []
i = 0
c = 0
with open(args.ast_fp, "r") as f, open(args.out_fp, "w") as fout:
for _ in range(20):
i += 1
print("Starting {} / 50".format(i))
for _ in range(5000):
dp = json.loads(f.readline().strip())
if len(dp) <= 1:
continue
data.append(dp)
print(" > Finished reading: {}".format(len(data)))
for ast in tqdm(data):
dp = get_all_paths(ast, args.id_type, args.max_path_len, args.max_num_paths)
for target, lefts, paths, rights in dp:
target = output_vocab[target]
lefts = [[subtoken_vocab[t] for t in lst] for lst in lefts]
paths = [[token_vocab[t] for t in lst] for lst in paths]
rights = [[subtoken_vocab[t] for t in lst] for lst in rights]
json.dump([target, lefts, paths, rights], fout)
fout.write("\n")
c += 1
data = []
print(" > Finished writing to file")
print("Wrote {} datapoints to {}".format(c, args.out_fp))
if __name__ == "__main__":
main()
| code-prediction-transformer-main | code2seq/generate_data.py |
#!/usr/bin/env python3
# Copyright (c) 2019 Technion
# Copyright (c) Facebook, Inc. and its affiliates.
# ----------------------------------------------------------------------------
# MIT License
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ----------------------------------------------------------------------------
"""
Code2seq model is adapted from: https://github.com/tech-srl/code2seq
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
class EmbeddingAttentionLayer(nn.Module):
def __init__(self, dim: int) -> None:
super().__init__()
self.dim = dim
self.attention = torch.randn(1, dim)
self.attention = nn.Parameter(self.attention)
def compute_weights(self, embedded: torch.Tensor) -> torch.Tensor:
unnormalized_weights = embedded.matmul(self.attention.t())
attention_weights = F.softmax(unnormalized_weights, dim=1)
return attention_weights
def forward(self, embedded: torch.Tensor) -> torch.Tensor:
attention_weights = self.compute_weights(embedded)
weighted = torch.bmm(attention_weights.transpose(1, 2), embedded)
return weighted
class Code2SeqModel(nn.Module):
def __init__(
self,
token_vocab_size: int,
subtoken_vocab_size: int,
output_vocab_size: int,
token_pad_idx: int,
subtoken_pad_idx: int,
loss_fn: nn.Module,
n_embd: int = 128,
rnn_dropout: float = 0.5,
embed_dropout: float = 0.25,
):
super().__init__()
self.subtoken_embedding = nn.Embedding(
subtoken_vocab_size, n_embd, padding_idx=subtoken_pad_idx
)
self.node_embedding = nn.Embedding(
token_vocab_size, n_embd, padding_idx=token_pad_idx
)
self.path_lstm = nn.LSTM(
n_embd, n_embd, bidirectional=True, dropout=rnn_dropout, batch_first=True
)
self.combined_layer = nn.Linear(n_embd * 4, n_embd)
self.dropout = nn.Dropout(embed_dropout)
self.attn_layer = EmbeddingAttentionLayer(n_embd)
self.out_layer = nn.Linear(n_embd, output_vocab_size)
self.loss_fn = loss_fn
def reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def embed_paths(self, paths):
path_tokens_embedded = self.node_embedding(paths)
batch_size, bag_size, path_len, _ = path_tokens_embedded.shape
path_tokens_embedded = path_tokens_embedded.view(
(batch_size * bag_size, path_len, -1)
)
out, (h, c) = self.path_lstm(path_tokens_embedded)
paths_embedded = h.permute((1, 0, 2)).reshape(batch_size, bag_size, -1)
return paths_embedded
def embed_subtokens(self, subtokens):
tokens_embedded = self.subtoken_embedding(subtokens)
return tokens_embedded.sum(2)
def forward(self, starts, paths, ends, targets, return_loss=False):
# embed individual parts
starts_embedded = self.embed_subtokens(starts)
paths_embedded = self.embed_paths(paths)
ends_embedded = self.embed_subtokens(ends)
# combine by concacenating
combined_embedded = torch.cat(
(starts_embedded, paths_embedded, ends_embedded), dim=2
)
combined_embedded = self.dropout(combined_embedded)
combined_embedded = torch.tanh(self.combined_layer(combined_embedded))
# combine paths by simple attention
code_embedded = self.attn_layer(combined_embedded).squeeze()
y_pred = self.out_layer(code_embedded)
if not return_loss:
return y_pred
return self.loss_fn(y_pred, targets)
| code-prediction-transformer-main | code2seq/code2seq_model.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
from dataset.dataset import BaseDataset, BaseSetup, BaseVocab
class Setup(BaseSetup):
def _create_vocab(self):
return Vocab(self.filepaths["vocab"])
def _create_dataset(self, fp, ids_fp):
return Dataset(fp, ids_fp)
class Vocab(BaseVocab):
def convert(self, line):
dp, ext = line
dp_conv = [
self.vocab2idx[token] if token in self.vocab2idx else self.unk_idx
for token in dp
]
return [dp_conv, ext]
class Dataset(BaseDataset):
@staticmethod
def collate(seqs, pad_idx):
max_len = max(len(seq[0][0]) for seq in seqs)
max_len = max(max_len, 2)
input_seqs = []
target_seqs = []
extended = []
ids = {name: [] for name in seqs[0][1].keys()}
for i, ((seq, ext), ids_lst) in enumerate(seqs):
padding = [pad_idx] * (max_len - len(seq))
input_seqs.append(seq[:-1] + padding)
target_seqs.append(seq[1:] + padding)
extended.append(ext)
for name, lst in ids_lst.items():
ids[name] += [j - 1 + (max_len - 1) * i for j in lst]
return {
"input_seq": torch.tensor(input_seqs),
"target_seq": torch.tensor(target_seqs),
"extended": torch.tensor(extended),
"ids": ids,
}
| code-prediction-transformer-main | models/path_trans_variation/dataset.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.