python_code
stringlengths
0
992k
repo_name
stringlengths
8
46
file_path
stringlengths
5
162
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from dataclasses import dataclass import hydra from hydra.core.config_store import ConfigStore import logging from omegaconf import MISSING, OmegaConf import os import os.path as osp from pathlib import Path import subprocess from typing import Optional from fairseq.data.dictionary import Dictionary from fairseq.dataclass import FairseqDataclass script_dir = Path(__file__).resolve().parent config_path = script_dir / "config" logger = logging.getLogger(__name__) @dataclass class KaldiInitializerConfig(FairseqDataclass): data_dir: str = MISSING fst_dir: Optional[str] = None in_labels: str = MISSING out_labels: Optional[str] = None wav2letter_lexicon: Optional[str] = None lm_arpa: str = MISSING kaldi_root: str = MISSING blank_symbol: str = "<s>" silence_symbol: Optional[str] = None def create_units(fst_dir: Path, in_labels: str, vocab: Dictionary) -> Path: in_units_file = fst_dir / f"kaldi_dict.{in_labels}.txt" if not in_units_file.exists(): logger.info(f"Creating {in_units_file}") with open(in_units_file, "w") as f: print("<eps> 0", file=f) i = 1 for symb in vocab.symbols[vocab.nspecial :]: if not symb.startswith("madeupword"): print(f"{symb} {i}", file=f) i += 1 return in_units_file def create_lexicon( cfg: KaldiInitializerConfig, fst_dir: Path, unique_label: str, in_units_file: Path, out_words_file: Path, ) -> (Path, Path): disambig_in_units_file = fst_dir / f"kaldi_dict.{cfg.in_labels}_disambig.txt" lexicon_file = fst_dir / f"kaldi_lexicon.{unique_label}.txt" disambig_lexicon_file = fst_dir / f"kaldi_lexicon.{unique_label}_disambig.txt" if ( not lexicon_file.exists() or not disambig_lexicon_file.exists() or not disambig_in_units_file.exists() ): logger.info(f"Creating {lexicon_file} (in units file: {in_units_file})") assert cfg.wav2letter_lexicon is not None or cfg.in_labels == cfg.out_labels if cfg.wav2letter_lexicon is not None: lm_words = set() with open(out_words_file, "r") as lm_dict_f: for line in lm_dict_f: lm_words.add(line.split()[0]) num_skipped = 0 total = 0 with open(cfg.wav2letter_lexicon, "r") as w2l_lex_f, open( lexicon_file, "w" ) as out_f: for line in w2l_lex_f: items = line.rstrip().split("\t") assert len(items) == 2, items if items[0] in lm_words: print(items[0], items[1], file=out_f) else: num_skipped += 1 logger.debug( f"Skipping word {items[0]} as it was not found in LM" ) total += 1 if num_skipped > 0: logger.warning( f"Skipped {num_skipped} out of {total} words as they were not found in LM" ) else: with open(in_units_file, "r") as in_f, open(lexicon_file, "w") as out_f: for line in in_f: symb = line.split()[0] if symb != "<eps>" and symb != "<ctc_blank>" and symb != "<SIL>": print(symb, symb, file=out_f) lex_disambig_path = ( Path(cfg.kaldi_root) / "egs/wsj/s5/utils/add_lex_disambig.pl" ) res = subprocess.run( [lex_disambig_path, lexicon_file, disambig_lexicon_file], check=True, capture_output=True, ) ndisambig = int(res.stdout) disamib_path = Path(cfg.kaldi_root) / "egs/wsj/s5/utils/add_disambig.pl" res = subprocess.run( [disamib_path, "--include-zero", in_units_file, str(ndisambig)], check=True, capture_output=True, ) with open(disambig_in_units_file, "wb") as f: f.write(res.stdout) return disambig_lexicon_file, disambig_in_units_file def create_G( kaldi_root: Path, fst_dir: Path, lm_arpa: Path, arpa_base: str ) -> (Path, Path): out_words_file = fst_dir / f"kaldi_dict.{arpa_base}.txt" grammar_graph = fst_dir / f"G_{arpa_base}.fst" if not grammar_graph.exists() or not out_words_file.exists(): logger.info(f"Creating {grammar_graph}") arpa2fst = kaldi_root / "src/lmbin/arpa2fst" subprocess.run( [ arpa2fst, "--disambig-symbol=#0", f"--write-symbol-table={out_words_file}", lm_arpa, grammar_graph, ], check=True, ) return grammar_graph, out_words_file def create_L( kaldi_root: Path, fst_dir: Path, unique_label: str, lexicon_file: Path, in_units_file: Path, out_words_file: Path, ) -> Path: lexicon_graph = fst_dir / f"L.{unique_label}.fst" if not lexicon_graph.exists(): logger.info(f"Creating {lexicon_graph} (in units: {in_units_file})") make_lex = kaldi_root / "egs/wsj/s5/utils/make_lexicon_fst.pl" fstcompile = kaldi_root / "tools/openfst-1.6.7/bin/fstcompile" fstaddselfloops = kaldi_root / "src/fstbin/fstaddselfloops" fstarcsort = kaldi_root / "tools/openfst-1.6.7/bin/fstarcsort" def write_disambig_symbol(file): with open(file, "r") as f: for line in f: items = line.rstrip().split() if items[0] == "#0": out_path = str(file) + "_disamig" with open(out_path, "w") as out_f: print(items[1], file=out_f) return out_path return None in_disambig_sym = write_disambig_symbol(in_units_file) assert in_disambig_sym is not None out_disambig_sym = write_disambig_symbol(out_words_file) assert out_disambig_sym is not None try: with open(lexicon_graph, "wb") as out_f: res = subprocess.run( [make_lex, lexicon_file], capture_output=True, check=True ) assert len(res.stderr) == 0, res.stderr.decode("utf-8") res = subprocess.run( [ fstcompile, f"--isymbols={in_units_file}", f"--osymbols={out_words_file}", "--keep_isymbols=false", "--keep_osymbols=false", ], input=res.stdout, capture_output=True, ) assert len(res.stderr) == 0, res.stderr.decode("utf-8") res = subprocess.run( [fstaddselfloops, in_disambig_sym, out_disambig_sym], input=res.stdout, capture_output=True, check=True, ) res = subprocess.run( [fstarcsort, "--sort_type=olabel"], input=res.stdout, capture_output=True, check=True, ) out_f.write(res.stdout) except subprocess.CalledProcessError as e: logger.error(f"cmd: {e.cmd}, err: {e.stderr.decode('utf-8')}") os.remove(lexicon_graph) raise except AssertionError: os.remove(lexicon_graph) raise return lexicon_graph def create_LG( kaldi_root: Path, fst_dir: Path, unique_label: str, lexicon_graph: Path, grammar_graph: Path, ) -> Path: lg_graph = fst_dir / f"LG.{unique_label}.fst" if not lg_graph.exists(): logger.info(f"Creating {lg_graph}") fsttablecompose = kaldi_root / "src/fstbin/fsttablecompose" fstdeterminizestar = kaldi_root / "src/fstbin/fstdeterminizestar" fstminimizeencoded = kaldi_root / "src/fstbin/fstminimizeencoded" fstpushspecial = kaldi_root / "src/fstbin/fstpushspecial" fstarcsort = kaldi_root / "tools/openfst-1.6.7/bin/fstarcsort" try: with open(lg_graph, "wb") as out_f: res = subprocess.run( [fsttablecompose, lexicon_graph, grammar_graph], capture_output=True, check=True, ) res = subprocess.run( [ fstdeterminizestar, "--use-log=true", ], input=res.stdout, capture_output=True, ) res = subprocess.run( [fstminimizeencoded], input=res.stdout, capture_output=True, check=True, ) res = subprocess.run( [fstpushspecial], input=res.stdout, capture_output=True, check=True, ) res = subprocess.run( [fstarcsort, "--sort_type=ilabel"], input=res.stdout, capture_output=True, check=True, ) out_f.write(res.stdout) except subprocess.CalledProcessError as e: logger.error(f"cmd: {e.cmd}, err: {e.stderr.decode('utf-8')}") os.remove(lg_graph) raise return lg_graph def create_H( kaldi_root: Path, fst_dir: Path, disambig_out_units_file: Path, in_labels: str, vocab: Dictionary, blk_sym: str, silence_symbol: Optional[str], ) -> (Path, Path, Path): h_graph = ( fst_dir / f"H.{in_labels}{'_' + silence_symbol if silence_symbol else ''}.fst" ) h_out_units_file = fst_dir / f"kaldi_dict.h_out.{in_labels}.txt" disambig_in_units_file_int = Path(str(h_graph) + "isym_disambig.int") disambig_out_units_file_int = Path(str(disambig_out_units_file) + ".int") if ( not h_graph.exists() or not h_out_units_file.exists() or not disambig_in_units_file_int.exists() ): logger.info(f"Creating {h_graph}") eps_sym = "<eps>" num_disambig = 0 osymbols = [] with open(disambig_out_units_file, "r") as f, open( disambig_out_units_file_int, "w" ) as out_f: for line in f: symb, id = line.rstrip().split() if line.startswith("#"): num_disambig += 1 print(id, file=out_f) else: if len(osymbols) == 0: assert symb == eps_sym, symb osymbols.append((symb, id)) i_idx = 0 isymbols = [(eps_sym, 0)] imap = {} for i, s in enumerate(vocab.symbols): i_idx += 1 isymbols.append((s, i_idx)) imap[s] = i_idx fst_str = [] node_idx = 0 root_node = node_idx special_symbols = [blk_sym] if silence_symbol is not None: special_symbols.append(silence_symbol) for ss in special_symbols: fst_str.append("{} {} {} {}".format(root_node, root_node, ss, eps_sym)) for symbol, _ in osymbols: if symbol == eps_sym or symbol.startswith("#"): continue node_idx += 1 # 1. from root to emitting state fst_str.append("{} {} {} {}".format(root_node, node_idx, symbol, symbol)) # 2. from emitting state back to root fst_str.append("{} {} {} {}".format(node_idx, root_node, eps_sym, eps_sym)) # 3. from emitting state to optional blank state pre_node = node_idx node_idx += 1 for ss in special_symbols: fst_str.append("{} {} {} {}".format(pre_node, node_idx, ss, eps_sym)) # 4. from blank state back to root fst_str.append("{} {} {} {}".format(node_idx, root_node, eps_sym, eps_sym)) fst_str.append("{}".format(root_node)) fst_str = "\n".join(fst_str) h_str = str(h_graph) isym_file = h_str + ".isym" with open(isym_file, "w") as f: for sym, id in isymbols: f.write("{} {}\n".format(sym, id)) with open(h_out_units_file, "w") as f: for sym, id in osymbols: f.write("{} {}\n".format(sym, id)) with open(disambig_in_units_file_int, "w") as f: disam_sym_id = len(isymbols) for _ in range(num_disambig): f.write("{}\n".format(disam_sym_id)) disam_sym_id += 1 fstcompile = kaldi_root / "tools/openfst-1.6.7/bin/fstcompile" fstaddselfloops = kaldi_root / "src/fstbin/fstaddselfloops" fstarcsort = kaldi_root / "tools/openfst-1.6.7/bin/fstarcsort" try: with open(h_graph, "wb") as out_f: res = subprocess.run( [ fstcompile, f"--isymbols={isym_file}", f"--osymbols={h_out_units_file}", "--keep_isymbols=false", "--keep_osymbols=false", ], input=str.encode(fst_str), capture_output=True, check=True, ) res = subprocess.run( [ fstaddselfloops, disambig_in_units_file_int, disambig_out_units_file_int, ], input=res.stdout, capture_output=True, check=True, ) res = subprocess.run( [fstarcsort, "--sort_type=olabel"], input=res.stdout, capture_output=True, check=True, ) out_f.write(res.stdout) except subprocess.CalledProcessError as e: logger.error(f"cmd: {e.cmd}, err: {e.stderr.decode('utf-8')}") os.remove(h_graph) raise return h_graph, h_out_units_file, disambig_in_units_file_int def create_HLGa( kaldi_root: Path, fst_dir: Path, unique_label: str, h_graph: Path, lg_graph: Path, disambig_in_words_file_int: Path, ) -> Path: hlga_graph = fst_dir / f"HLGa.{unique_label}.fst" if not hlga_graph.exists(): logger.info(f"Creating {hlga_graph}") fsttablecompose = kaldi_root / "src/fstbin/fsttablecompose" fstdeterminizestar = kaldi_root / "src/fstbin/fstdeterminizestar" fstrmsymbols = kaldi_root / "src/fstbin/fstrmsymbols" fstrmepslocal = kaldi_root / "src/fstbin/fstrmepslocal" fstminimizeencoded = kaldi_root / "src/fstbin/fstminimizeencoded" try: with open(hlga_graph, "wb") as out_f: res = subprocess.run( [ fsttablecompose, h_graph, lg_graph, ], capture_output=True, check=True, ) res = subprocess.run( [fstdeterminizestar, "--use-log=true"], input=res.stdout, capture_output=True, check=True, ) res = subprocess.run( [fstrmsymbols, disambig_in_words_file_int], input=res.stdout, capture_output=True, check=True, ) res = subprocess.run( [fstrmepslocal], input=res.stdout, capture_output=True, check=True, ) res = subprocess.run( [fstminimizeencoded], input=res.stdout, capture_output=True, check=True, ) out_f.write(res.stdout) except subprocess.CalledProcessError as e: logger.error(f"cmd: {e.cmd}, err: {e.stderr.decode('utf-8')}") os.remove(hlga_graph) raise return hlga_graph def create_HLa( kaldi_root: Path, fst_dir: Path, unique_label: str, h_graph: Path, l_graph: Path, disambig_in_words_file_int: Path, ) -> Path: hla_graph = fst_dir / f"HLa.{unique_label}.fst" if not hla_graph.exists(): logger.info(f"Creating {hla_graph}") fsttablecompose = kaldi_root / "src/fstbin/fsttablecompose" fstdeterminizestar = kaldi_root / "src/fstbin/fstdeterminizestar" fstrmsymbols = kaldi_root / "src/fstbin/fstrmsymbols" fstrmepslocal = kaldi_root / "src/fstbin/fstrmepslocal" fstminimizeencoded = kaldi_root / "src/fstbin/fstminimizeencoded" try: with open(hla_graph, "wb") as out_f: res = subprocess.run( [ fsttablecompose, h_graph, l_graph, ], capture_output=True, check=True, ) res = subprocess.run( [fstdeterminizestar, "--use-log=true"], input=res.stdout, capture_output=True, check=True, ) res = subprocess.run( [fstrmsymbols, disambig_in_words_file_int], input=res.stdout, capture_output=True, check=True, ) res = subprocess.run( [fstrmepslocal], input=res.stdout, capture_output=True, check=True, ) res = subprocess.run( [fstminimizeencoded], input=res.stdout, capture_output=True, check=True, ) out_f.write(res.stdout) except subprocess.CalledProcessError as e: logger.error(f"cmd: {e.cmd}, err: {e.stderr.decode('utf-8')}") os.remove(hla_graph) raise return hla_graph def create_HLG( kaldi_root: Path, fst_dir: Path, unique_label: str, hlga_graph: Path, prefix: str = "HLG", ) -> Path: hlg_graph = fst_dir / f"{prefix}.{unique_label}.fst" if not hlg_graph.exists(): logger.info(f"Creating {hlg_graph}") add_self_loop = script_dir / "add-self-loop-simple" kaldi_src = kaldi_root / "src" kaldi_lib = kaldi_src / "lib" try: if not add_self_loop.exists(): fst_include = kaldi_root / "tools/openfst-1.6.7/include" add_self_loop_src = script_dir / "add-self-loop-simple.cc" subprocess.run( [ "c++", f"-I{kaldi_src}", f"-I{fst_include}", f"-L{kaldi_lib}", add_self_loop_src, "-lkaldi-base", "-lkaldi-fstext", "-o", add_self_loop, ], check=True, ) my_env = os.environ.copy() my_env["LD_LIBRARY_PATH"] = f"{kaldi_lib}:{my_env['LD_LIBRARY_PATH']}" subprocess.run( [ add_self_loop, hlga_graph, hlg_graph, ], check=True, capture_output=True, env=my_env, ) except subprocess.CalledProcessError as e: logger.error(f"cmd: {e.cmd}, err: {e.stderr.decode('utf-8')}") raise return hlg_graph def initalize_kaldi(cfg: KaldiInitializerConfig) -> Path: if cfg.fst_dir is None: cfg.fst_dir = osp.join(cfg.data_dir, "kaldi") if cfg.out_labels is None: cfg.out_labels = cfg.in_labels kaldi_root = Path(cfg.kaldi_root) data_dir = Path(cfg.data_dir) fst_dir = Path(cfg.fst_dir) fst_dir.mkdir(parents=True, exist_ok=True) arpa_base = osp.splitext(osp.basename(cfg.lm_arpa))[0] unique_label = f"{cfg.in_labels}.{arpa_base}" with open(data_dir / f"dict.{cfg.in_labels}.txt", "r") as f: vocab = Dictionary.load(f) in_units_file = create_units(fst_dir, cfg.in_labels, vocab) grammar_graph, out_words_file = create_G( kaldi_root, fst_dir, Path(cfg.lm_arpa), arpa_base ) disambig_lexicon_file, disambig_L_in_units_file = create_lexicon( cfg, fst_dir, unique_label, in_units_file, out_words_file ) h_graph, h_out_units_file, disambig_in_units_file_int = create_H( kaldi_root, fst_dir, disambig_L_in_units_file, cfg.in_labels, vocab, cfg.blank_symbol, cfg.silence_symbol, ) lexicon_graph = create_L( kaldi_root, fst_dir, unique_label, disambig_lexicon_file, disambig_L_in_units_file, out_words_file, ) lg_graph = create_LG( kaldi_root, fst_dir, unique_label, lexicon_graph, grammar_graph ) hlga_graph = create_HLGa( kaldi_root, fst_dir, unique_label, h_graph, lg_graph, disambig_in_units_file_int ) hlg_graph = create_HLG(kaldi_root, fst_dir, unique_label, hlga_graph) # for debugging # hla_graph = create_HLa(kaldi_root, fst_dir, unique_label, h_graph, lexicon_graph, disambig_in_units_file_int) # hl_graph = create_HLG(kaldi_root, fst_dir, unique_label, hla_graph, prefix="HL_looped") # create_HLG(kaldi_root, fst_dir, "phnc", h_graph, prefix="H_looped") return hlg_graph @hydra.main(config_path=config_path, config_name="kaldi_initializer") def cli_main(cfg: KaldiInitializerConfig) -> None: container = OmegaConf.to_container(cfg, resolve=True, enum_to_str=True) cfg = OmegaConf.create(container) OmegaConf.set_struct(cfg, True) initalize_kaldi(cfg) if __name__ == "__main__": logging.root.setLevel(logging.INFO) logging.basicConfig(level=logging.INFO) try: from hydra._internal.utils import ( get_args, ) # pylint: disable=import-outside-toplevel cfg_name = get_args().config_name or "kaldi_initializer" except ImportError: logger.warning("Failed to get config name from hydra args") cfg_name = "kaldi_initializer" cs = ConfigStore.instance() cs.store(name=cfg_name, node=KaldiInitializerConfig) cli_main()
KosmosX-API-main
kosmosX/fairseq/examples/speech_recognition/kaldi/kaldi_initializer.py
KosmosX-API-main
kosmosX/fairseq/examples/speech_recognition/kaldi/__init__.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from concurrent.futures import ThreadPoolExecutor import logging from omegaconf import MISSING import os from typing import Optional import warnings from dataclasses import dataclass from fairseq.dataclass import FairseqDataclass from .kaldi_initializer import KaldiInitializerConfig, initalize_kaldi logger = logging.getLogger(__name__) @dataclass class KaldiDecoderConfig(FairseqDataclass): hlg_graph_path: Optional[str] = None output_dict: str = MISSING kaldi_initializer_config: Optional[KaldiInitializerConfig] = None acoustic_scale: float = 0.5 max_active: int = 10000 beam_delta: float = 0.5 hash_ratio: float = 2.0 is_lattice: bool = False lattice_beam: float = 10.0 prune_interval: int = 25 determinize_lattice: bool = True prune_scale: float = 0.1 max_mem: int = 0 phone_determinize: bool = True word_determinize: bool = True minimize: bool = True num_threads: int = 1 class KaldiDecoder(object): def __init__( self, cfg: KaldiDecoderConfig, beam: int, nbest: int = 1, ): try: from kaldi.asr import FasterRecognizer, LatticeFasterRecognizer from kaldi.decoder import ( FasterDecoder, FasterDecoderOptions, LatticeFasterDecoder, LatticeFasterDecoderOptions, ) from kaldi.lat.functions import DeterminizeLatticePhonePrunedOptions from kaldi.fstext import read_fst_kaldi, SymbolTable except: warnings.warn( "pykaldi is required for this functionality. Please install from https://github.com/pykaldi/pykaldi" ) # set_verbose_level(2) self.acoustic_scale = cfg.acoustic_scale self.nbest = nbest if cfg.hlg_graph_path is None: assert ( cfg.kaldi_initializer_config is not None ), "Must provide hlg graph path or kaldi initializer config" cfg.hlg_graph_path = initalize_kaldi(cfg.kaldi_initializer_config) assert os.path.exists(cfg.hlg_graph_path), cfg.hlg_graph_path if cfg.is_lattice: self.dec_cls = LatticeFasterDecoder opt_cls = LatticeFasterDecoderOptions self.rec_cls = LatticeFasterRecognizer else: assert self.nbest == 1, "nbest > 1 requires lattice decoder" self.dec_cls = FasterDecoder opt_cls = FasterDecoderOptions self.rec_cls = FasterRecognizer self.decoder_options = opt_cls() self.decoder_options.beam = beam self.decoder_options.max_active = cfg.max_active self.decoder_options.beam_delta = cfg.beam_delta self.decoder_options.hash_ratio = cfg.hash_ratio if cfg.is_lattice: self.decoder_options.lattice_beam = cfg.lattice_beam self.decoder_options.prune_interval = cfg.prune_interval self.decoder_options.determinize_lattice = cfg.determinize_lattice self.decoder_options.prune_scale = cfg.prune_scale det_opts = DeterminizeLatticePhonePrunedOptions() det_opts.max_mem = cfg.max_mem det_opts.phone_determinize = cfg.phone_determinize det_opts.word_determinize = cfg.word_determinize det_opts.minimize = cfg.minimize self.decoder_options.det_opts = det_opts self.output_symbols = {} with open(cfg.output_dict, "r") as f: for line in f: items = line.rstrip().split() assert len(items) == 2 self.output_symbols[int(items[1])] = items[0] logger.info(f"Loading FST from {cfg.hlg_graph_path}") self.fst = read_fst_kaldi(cfg.hlg_graph_path) self.symbol_table = SymbolTable.read_text(cfg.output_dict) self.executor = ThreadPoolExecutor(max_workers=cfg.num_threads) def generate(self, models, sample, **unused): """Generate a batch of inferences.""" # model.forward normally channels prev_output_tokens into the decoder # separately, but SequenceGenerator directly calls model.encoder encoder_input = { k: v for k, v in sample["net_input"].items() if k != "prev_output_tokens" } emissions, padding = self.get_emissions(models, encoder_input) return self.decode(emissions, padding) def get_emissions(self, models, encoder_input): """Run encoder and normalize emissions""" model = models[0] all_encoder_out = [m(**encoder_input) for m in models] if len(all_encoder_out) > 1: if "encoder_out" in all_encoder_out[0]: encoder_out = { "encoder_out": sum(e["encoder_out"] for e in all_encoder_out) / len(all_encoder_out), "encoder_padding_mask": all_encoder_out[0]["encoder_padding_mask"], } padding = encoder_out["encoder_padding_mask"] else: encoder_out = { "logits": sum(e["logits"] for e in all_encoder_out) / len(all_encoder_out), "padding_mask": all_encoder_out[0]["padding_mask"], } padding = encoder_out["padding_mask"] else: encoder_out = all_encoder_out[0] padding = ( encoder_out["padding_mask"] if "padding_mask" in encoder_out else encoder_out["encoder_padding_mask"] ) if hasattr(model, "get_logits"): emissions = model.get_logits(encoder_out, normalize=True) else: emissions = model.get_normalized_probs(encoder_out, log_probs=True) return ( emissions.cpu().float().transpose(0, 1), padding.cpu() if padding is not None and padding.any() else None, ) def decode_one(self, logits, padding): from kaldi.matrix import Matrix decoder = self.dec_cls(self.fst, self.decoder_options) asr = self.rec_cls( decoder, self.symbol_table, acoustic_scale=self.acoustic_scale ) if padding is not None: logits = logits[~padding] mat = Matrix(logits.numpy()) out = asr.decode(mat) if self.nbest > 1: from kaldi.fstext import shortestpath from kaldi.fstext.utils import ( convert_compact_lattice_to_lattice, convert_lattice_to_std, convert_nbest_to_list, get_linear_symbol_sequence, ) lat = out["lattice"] sp = shortestpath(lat, nshortest=self.nbest) sp = convert_compact_lattice_to_lattice(sp) sp = convert_lattice_to_std(sp) seq = convert_nbest_to_list(sp) results = [] for s in seq: _, o, w = get_linear_symbol_sequence(s) words = list(self.output_symbols[z] for z in o) results.append( { "tokens": words, "words": words, "score": w.value, "emissions": logits, } ) return results else: words = out["text"].split() return [ { "tokens": words, "words": words, "score": out["likelihood"], "emissions": logits, } ] def decode(self, emissions, padding): if padding is None: padding = [None] * len(emissions) ret = list( map( lambda e, p: self.executor.submit(self.decode_one, e, p), emissions, padding, ) ) return ret
KosmosX-API-main
kosmosX/fairseq/examples/speech_recognition/kaldi/kaldi_decoder.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from __future__ import absolute_import, division, print_function, unicode_literals import argparse import concurrent.futures import json import multiprocessing import os from collections import namedtuple from itertools import chain import sentencepiece as spm from fairseq.data import Dictionary MILLISECONDS_TO_SECONDS = 0.001 def process_sample(aud_path, lable, utt_id, sp, tgt_dict): import torchaudio input = {} output = {} si, ei = torchaudio.info(aud_path) input["length_ms"] = int( si.length / si.channels / si.rate / MILLISECONDS_TO_SECONDS ) input["path"] = aud_path token = " ".join(sp.EncodeAsPieces(lable)) ids = tgt_dict.encode_line(token, append_eos=False) output["text"] = lable output["token"] = token output["tokenid"] = ", ".join(map(str, [t.tolist() for t in ids])) return {utt_id: {"input": input, "output": output}} def main(): parser = argparse.ArgumentParser() parser.add_argument( "--audio-dirs", nargs="+", default=["-"], required=True, help="input directories with audio files", ) parser.add_argument( "--labels", required=True, help="aggregated input labels with format <ID LABEL> per line", type=argparse.FileType("r", encoding="UTF-8"), ) parser.add_argument( "--spm-model", required=True, help="sentencepiece model to use for encoding", type=argparse.FileType("r", encoding="UTF-8"), ) parser.add_argument( "--dictionary", required=True, help="file to load fairseq dictionary from", type=argparse.FileType("r", encoding="UTF-8"), ) parser.add_argument("--audio-format", choices=["flac", "wav"], default="wav") parser.add_argument( "--output", required=True, type=argparse.FileType("w"), help="path to save json output", ) args = parser.parse_args() sp = spm.SentencePieceProcessor() sp.Load(args.spm_model.name) tgt_dict = Dictionary.load(args.dictionary) labels = {} for line in args.labels: (utt_id, label) = line.split(" ", 1) labels[utt_id] = label if len(labels) == 0: raise Exception("No labels found in ", args.labels_path) Sample = namedtuple("Sample", "aud_path utt_id") samples = [] for path, _, files in chain.from_iterable( os.walk(path) for path in args.audio_dirs ): for f in files: if f.endswith(args.audio_format): if len(os.path.splitext(f)) != 2: raise Exception("Expect <utt_id.extension> file name. Got: ", f) utt_id = os.path.splitext(f)[0] if utt_id not in labels: continue samples.append(Sample(os.path.join(path, f), utt_id)) utts = {} num_cpu = multiprocessing.cpu_count() with concurrent.futures.ThreadPoolExecutor(max_workers=num_cpu) as executor: future_to_sample = { executor.submit( process_sample, s.aud_path, labels[s.utt_id], s.utt_id, sp, tgt_dict ): s for s in samples } for future in concurrent.futures.as_completed(future_to_sample): try: data = future.result() except Exception as exc: print("generated an exception: ", exc) else: utts.update(data) json.dump({"utts": utts}, args.output, indent=4) if __name__ == "__main__": main()
KosmosX-API-main
kosmosX/fairseq/examples/speech_recognition/datasets/asr_prep_json.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from __future__ import absolute_import, division, print_function, unicode_literals import re from collections import deque from enum import Enum import numpy as np """ Utility modules for computation of Word Error Rate, Alignments, as well as more granular metrics like deletion, insersion and substitutions. """ class Code(Enum): match = 1 substitution = 2 insertion = 3 deletion = 4 class Token(object): def __init__(self, lbl="", st=np.nan, en=np.nan): if np.isnan(st): self.label, self.start, self.end = "", 0.0, 0.0 else: self.label, self.start, self.end = lbl, st, en class AlignmentResult(object): def __init__(self, refs, hyps, codes, score): self.refs = refs # std::deque<int> self.hyps = hyps # std::deque<int> self.codes = codes # std::deque<Code> self.score = score # float def coordinate_to_offset(row, col, ncols): return int(row * ncols + col) def offset_to_row(offset, ncols): return int(offset / ncols) def offset_to_col(offset, ncols): return int(offset % ncols) def trimWhitespace(str): return re.sub(" +", " ", re.sub(" *$", "", re.sub("^ *", "", str))) def str2toks(str): pieces = trimWhitespace(str).split(" ") toks = [] for p in pieces: toks.append(Token(p, 0.0, 0.0)) return toks class EditDistance(object): def __init__(self, time_mediated): self.time_mediated_ = time_mediated self.scores_ = np.nan # Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic> self.backtraces_ = ( np.nan ) # Eigen::Matrix<size_t, Eigen::Dynamic, Eigen::Dynamic> backtraces_; self.confusion_pairs_ = {} def cost(self, ref, hyp, code): if self.time_mediated_: if code == Code.match: return abs(ref.start - hyp.start) + abs(ref.end - hyp.end) elif code == Code.insertion: return hyp.end - hyp.start elif code == Code.deletion: return ref.end - ref.start else: # substitution return abs(ref.start - hyp.start) + abs(ref.end - hyp.end) + 0.1 else: if code == Code.match: return 0 elif code == Code.insertion or code == Code.deletion: return 3 else: # substitution return 4 def get_result(self, refs, hyps): res = AlignmentResult(refs=deque(), hyps=deque(), codes=deque(), score=np.nan) num_rows, num_cols = self.scores_.shape res.score = self.scores_[num_rows - 1, num_cols - 1] curr_offset = coordinate_to_offset(num_rows - 1, num_cols - 1, num_cols) while curr_offset != 0: curr_row = offset_to_row(curr_offset, num_cols) curr_col = offset_to_col(curr_offset, num_cols) prev_offset = self.backtraces_[curr_row, curr_col] prev_row = offset_to_row(prev_offset, num_cols) prev_col = offset_to_col(prev_offset, num_cols) res.refs.appendleft(curr_row - 1) # Note: this was .push_front() in C++ res.hyps.appendleft(curr_col - 1) if curr_row - 1 == prev_row and curr_col == prev_col: res.codes.appendleft(Code.deletion) elif curr_row == prev_row and curr_col - 1 == prev_col: res.codes.appendleft(Code.insertion) else: # assert(curr_row - 1 == prev_row and curr_col - 1 == prev_col) ref_str = refs[res.refs[0]].label hyp_str = hyps[res.hyps[0]].label if ref_str == hyp_str: res.codes.appendleft(Code.match) else: res.codes.appendleft(Code.substitution) confusion_pair = "%s -> %s" % (ref_str, hyp_str) if confusion_pair not in self.confusion_pairs_: self.confusion_pairs_[confusion_pair] = 1 else: self.confusion_pairs_[confusion_pair] += 1 curr_offset = prev_offset return res def align(self, refs, hyps): if len(refs) == 0 and len(hyps) == 0: return np.nan # NOTE: we're not resetting the values in these matrices because every value # will be overridden in the loop below. If this assumption doesn't hold, # be sure to set all entries in self.scores_ and self.backtraces_ to 0. self.scores_ = np.zeros((len(refs) + 1, len(hyps) + 1)) self.backtraces_ = np.zeros((len(refs) + 1, len(hyps) + 1)) num_rows, num_cols = self.scores_.shape for i in range(num_rows): for j in range(num_cols): if i == 0 and j == 0: self.scores_[i, j] = 0.0 self.backtraces_[i, j] = 0 continue if i == 0: self.scores_[i, j] = self.scores_[i, j - 1] + self.cost( None, hyps[j - 1], Code.insertion ) self.backtraces_[i, j] = coordinate_to_offset(i, j - 1, num_cols) continue if j == 0: self.scores_[i, j] = self.scores_[i - 1, j] + self.cost( refs[i - 1], None, Code.deletion ) self.backtraces_[i, j] = coordinate_to_offset(i - 1, j, num_cols) continue # Below here both i and j are greater than 0 ref = refs[i - 1] hyp = hyps[j - 1] best_score = self.scores_[i - 1, j - 1] + ( self.cost(ref, hyp, Code.match) if (ref.label == hyp.label) else self.cost(ref, hyp, Code.substitution) ) prev_row = i - 1 prev_col = j - 1 ins = self.scores_[i, j - 1] + self.cost(None, hyp, Code.insertion) if ins < best_score: best_score = ins prev_row = i prev_col = j - 1 delt = self.scores_[i - 1, j] + self.cost(ref, None, Code.deletion) if delt < best_score: best_score = delt prev_row = i - 1 prev_col = j self.scores_[i, j] = best_score self.backtraces_[i, j] = coordinate_to_offset( prev_row, prev_col, num_cols ) return self.get_result(refs, hyps) class WERTransformer(object): def __init__(self, hyp_str, ref_str, verbose=True): self.ed_ = EditDistance(False) self.id2oracle_errs_ = {} self.utts_ = 0 self.words_ = 0 self.insertions_ = 0 self.deletions_ = 0 self.substitutions_ = 0 self.process(["dummy_str", hyp_str, ref_str]) if verbose: print("'%s' vs '%s'" % (hyp_str, ref_str)) self.report_result() def process(self, input): # std::vector<std::string>&& input if len(input) < 3: print( "Input must be of the form <id> ... <hypo> <ref> , got ", len(input), " inputs:", ) return None # Align # std::vector<Token> hyps; # std::vector<Token> refs; hyps = str2toks(input[-2]) refs = str2toks(input[-1]) alignment = self.ed_.align(refs, hyps) if alignment is None: print("Alignment is null") return np.nan # Tally errors ins = 0 dels = 0 subs = 0 for code in alignment.codes: if code == Code.substitution: subs += 1 elif code == Code.insertion: ins += 1 elif code == Code.deletion: dels += 1 # Output row = input row.append(str(len(refs))) row.append(str(ins)) row.append(str(dels)) row.append(str(subs)) # print(row) # Accumulate kIdIndex = 0 kNBestSep = "/" pieces = input[kIdIndex].split(kNBestSep) if len(pieces) == 0: print( "Error splitting ", input[kIdIndex], " on '", kNBestSep, "', got empty list", ) return np.nan id = pieces[0] if id not in self.id2oracle_errs_: self.utts_ += 1 self.words_ += len(refs) self.insertions_ += ins self.deletions_ += dels self.substitutions_ += subs self.id2oracle_errs_[id] = [ins, dels, subs] else: curr_err = ins + dels + subs prev_err = np.sum(self.id2oracle_errs_[id]) if curr_err < prev_err: self.id2oracle_errs_[id] = [ins, dels, subs] return 0 def report_result(self): # print("---------- Summary ---------------") if self.words_ == 0: print("No words counted") return # 1-best best_wer = ( 100.0 * (self.insertions_ + self.deletions_ + self.substitutions_) / self.words_ ) print( "\tWER = %0.2f%% (%i utts, %i words, %0.2f%% ins, " "%0.2f%% dels, %0.2f%% subs)" % ( best_wer, self.utts_, self.words_, 100.0 * self.insertions_ / self.words_, 100.0 * self.deletions_ / self.words_, 100.0 * self.substitutions_ / self.words_, ) ) def wer(self): if self.words_ == 0: wer = np.nan else: wer = ( 100.0 * (self.insertions_ + self.deletions_ + self.substitutions_) / self.words_ ) return wer def stats(self): if self.words_ == 0: stats = {} else: wer = ( 100.0 * (self.insertions_ + self.deletions_ + self.substitutions_) / self.words_ ) stats = dict( { "wer": wer, "utts": self.utts_, "numwords": self.words_, "ins": self.insertions_, "dels": self.deletions_, "subs": self.substitutions_, "confusion_pairs": self.ed_.confusion_pairs_, } ) return stats def calc_wer(hyp_str, ref_str): t = WERTransformer(hyp_str, ref_str, verbose=0) return t.wer() def calc_wer_stats(hyp_str, ref_str): t = WERTransformer(hyp_str, ref_str, verbose=0) return t.stats() def get_wer_alignment_codes(hyp_str, ref_str): """ INPUT: hypothesis string, reference string OUTPUT: List of alignment codes (intermediate results from WER computation) """ t = WERTransformer(hyp_str, ref_str, verbose=0) return t.ed_.align(str2toks(ref_str), str2toks(hyp_str)).codes def merge_counts(x, y): # Merge two hashes which have 'counts' as their values # This can be used for example to merge confusion pair counts # conf_pairs = merge_counts(conf_pairs, stats['confusion_pairs']) for k, v in y.items(): if k not in x: x[k] = 0 x[k] += v return x
KosmosX-API-main
kosmosX/fairseq/examples/speech_recognition/utils/wer_utils.py
import importlib import os for file in sorted(os.listdir(os.path.dirname(__file__))): if file.endswith(".py") and not file.startswith("_"): model_name = file[: file.find(".py")] importlib.import_module("examples.speech_recognition.models." + model_name)
KosmosX-API-main
kosmosX/fairseq/examples/speech_recognition/models/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import math from collections.abc import Iterable import torch import torch.nn as nn from examples.speech_recognition.data.data_utils import lengths_to_encoder_padding_mask from fairseq import utils from fairseq.models import ( FairseqEncoder, FairseqEncoderDecoderModel, FairseqEncoderModel, FairseqIncrementalDecoder, register_model, register_model_architecture, ) from fairseq.modules import ( LinearizedConvolution, TransformerDecoderLayer, TransformerEncoderLayer, VGGBlock, ) @register_model("asr_vggtransformer") class VGGTransformerModel(FairseqEncoderDecoderModel): """ Transformers with convolutional context for ASR https://arxiv.org/abs/1904.11660 """ def __init__(self, encoder, decoder): super().__init__(encoder, decoder) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" parser.add_argument( "--input-feat-per-channel", type=int, metavar="N", help="encoder input dimension per input channel", ) parser.add_argument( "--vggblock-enc-config", type=str, metavar="EXPR", help=""" an array of tuples each containing the configuration of one vggblock: [(out_channels, conv_kernel_size, pooling_kernel_size, num_conv_layers, use_layer_norm), ...]) """, ) parser.add_argument( "--transformer-enc-config", type=str, metavar="EXPR", help="""" a tuple containing the configuration of the encoder transformer layers configurations: [(input_dim, num_heads, ffn_dim, normalize_before, dropout, attention_dropout, relu_dropout), ...]') """, ) parser.add_argument( "--enc-output-dim", type=int, metavar="N", help=""" encoder output dimension, can be None. If specified, projecting the transformer output to the specified dimension""", ) parser.add_argument( "--in-channels", type=int, metavar="N", help="number of encoder input channels", ) parser.add_argument( "--tgt-embed-dim", type=int, metavar="N", help="embedding dimension of the decoder target tokens", ) parser.add_argument( "--transformer-dec-config", type=str, metavar="EXPR", help=""" a tuple containing the configuration of the decoder transformer layers configurations: [(input_dim, num_heads, ffn_dim, normalize_before, dropout, attention_dropout, relu_dropout), ...] """, ) parser.add_argument( "--conv-dec-config", type=str, metavar="EXPR", help=""" an array of tuples for the decoder 1-D convolution config [(out_channels, conv_kernel_size, use_layer_norm), ...]""", ) @classmethod def build_encoder(cls, args, task): return VGGTransformerEncoder( input_feat_per_channel=args.input_feat_per_channel, vggblock_config=eval(args.vggblock_enc_config), transformer_config=eval(args.transformer_enc_config), encoder_output_dim=args.enc_output_dim, in_channels=args.in_channels, ) @classmethod def build_decoder(cls, args, task): return TransformerDecoder( dictionary=task.target_dictionary, embed_dim=args.tgt_embed_dim, transformer_config=eval(args.transformer_dec_config), conv_config=eval(args.conv_dec_config), encoder_output_dim=args.enc_output_dim, ) @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure that all args are properly defaulted # (in case there are any new ones) base_architecture(args) encoder = cls.build_encoder(args, task) decoder = cls.build_decoder(args, task) return cls(encoder, decoder) def get_normalized_probs(self, net_output, log_probs, sample=None): # net_output['encoder_out'] is a (B, T, D) tensor lprobs = super().get_normalized_probs(net_output, log_probs, sample) lprobs.batch_first = True return lprobs DEFAULT_ENC_VGGBLOCK_CONFIG = ((32, 3, 2, 2, False),) * 2 DEFAULT_ENC_TRANSFORMER_CONFIG = ((256, 4, 1024, True, 0.2, 0.2, 0.2),) * 2 # 256: embedding dimension # 4: number of heads # 1024: FFN # True: apply layerNorm before (dropout + resiaul) instead of after # 0.2 (dropout): dropout after MultiheadAttention and second FC # 0.2 (attention_dropout): dropout in MultiheadAttention # 0.2 (relu_dropout): dropout after ReLu DEFAULT_DEC_TRANSFORMER_CONFIG = ((256, 2, 1024, True, 0.2, 0.2, 0.2),) * 2 DEFAULT_DEC_CONV_CONFIG = ((256, 3, True),) * 2 # TODO: repace transformer encoder config from one liner # to explicit args to get rid of this transformation def prepare_transformer_encoder_params( input_dim, num_heads, ffn_dim, normalize_before, dropout, attention_dropout, relu_dropout, ): args = argparse.Namespace() args.encoder_embed_dim = input_dim args.encoder_attention_heads = num_heads args.attention_dropout = attention_dropout args.dropout = dropout args.activation_dropout = relu_dropout args.encoder_normalize_before = normalize_before args.encoder_ffn_embed_dim = ffn_dim return args def prepare_transformer_decoder_params( input_dim, num_heads, ffn_dim, normalize_before, dropout, attention_dropout, relu_dropout, ): args = argparse.Namespace() args.encoder_embed_dim = None args.decoder_embed_dim = input_dim args.decoder_attention_heads = num_heads args.attention_dropout = attention_dropout args.dropout = dropout args.activation_dropout = relu_dropout args.decoder_normalize_before = normalize_before args.decoder_ffn_embed_dim = ffn_dim return args class VGGTransformerEncoder(FairseqEncoder): """VGG + Transformer encoder""" def __init__( self, input_feat_per_channel, vggblock_config=DEFAULT_ENC_VGGBLOCK_CONFIG, transformer_config=DEFAULT_ENC_TRANSFORMER_CONFIG, encoder_output_dim=512, in_channels=1, transformer_context=None, transformer_sampling=None, ): """constructor for VGGTransformerEncoder Args: - input_feat_per_channel: feature dim (not including stacked, just base feature) - in_channel: # input channels (e.g., if stack 8 feature vector together, this is 8) - vggblock_config: configuration of vggblock, see comments on DEFAULT_ENC_VGGBLOCK_CONFIG - transformer_config: configuration of transformer layer, see comments on DEFAULT_ENC_TRANSFORMER_CONFIG - encoder_output_dim: final transformer output embedding dimension - transformer_context: (left, right) if set, self-attention will be focused on (t-left, t+right) - transformer_sampling: an iterable of int, must match with len(transformer_config), transformer_sampling[i] indicates sampling factor for i-th transformer layer, after multihead att and feedfoward part """ super().__init__(None) self.num_vggblocks = 0 if vggblock_config is not None: if not isinstance(vggblock_config, Iterable): raise ValueError("vggblock_config is not iterable") self.num_vggblocks = len(vggblock_config) self.conv_layers = nn.ModuleList() self.in_channels = in_channels self.input_dim = input_feat_per_channel self.pooling_kernel_sizes = [] if vggblock_config is not None: for _, config in enumerate(vggblock_config): ( out_channels, conv_kernel_size, pooling_kernel_size, num_conv_layers, layer_norm, ) = config self.conv_layers.append( VGGBlock( in_channels, out_channels, conv_kernel_size, pooling_kernel_size, num_conv_layers, input_dim=input_feat_per_channel, layer_norm=layer_norm, ) ) self.pooling_kernel_sizes.append(pooling_kernel_size) in_channels = out_channels input_feat_per_channel = self.conv_layers[-1].output_dim transformer_input_dim = self.infer_conv_output_dim( self.in_channels, self.input_dim ) # transformer_input_dim is the output dimension of VGG part self.validate_transformer_config(transformer_config) self.transformer_context = self.parse_transformer_context(transformer_context) self.transformer_sampling = self.parse_transformer_sampling( transformer_sampling, len(transformer_config) ) self.transformer_layers = nn.ModuleList() if transformer_input_dim != transformer_config[0][0]: self.transformer_layers.append( Linear(transformer_input_dim, transformer_config[0][0]) ) self.transformer_layers.append( TransformerEncoderLayer( prepare_transformer_encoder_params(*transformer_config[0]) ) ) for i in range(1, len(transformer_config)): if transformer_config[i - 1][0] != transformer_config[i][0]: self.transformer_layers.append( Linear(transformer_config[i - 1][0], transformer_config[i][0]) ) self.transformer_layers.append( TransformerEncoderLayer( prepare_transformer_encoder_params(*transformer_config[i]) ) ) self.encoder_output_dim = encoder_output_dim self.transformer_layers.extend( [ Linear(transformer_config[-1][0], encoder_output_dim), LayerNorm(encoder_output_dim), ] ) def forward(self, src_tokens, src_lengths, **kwargs): """ src_tokens: padded tensor (B, T, C * feat) src_lengths: tensor of original lengths of input utterances (B,) """ bsz, max_seq_len, _ = src_tokens.size() x = src_tokens.view(bsz, max_seq_len, self.in_channels, self.input_dim) x = x.transpose(1, 2).contiguous() # (B, C, T, feat) for layer_idx in range(len(self.conv_layers)): x = self.conv_layers[layer_idx](x) bsz, _, output_seq_len, _ = x.size() # (B, C, T, feat) -> (B, T, C, feat) -> (T, B, C, feat) -> (T, B, C * feat) x = x.transpose(1, 2).transpose(0, 1) x = x.contiguous().view(output_seq_len, bsz, -1) input_lengths = src_lengths.clone() for s in self.pooling_kernel_sizes: input_lengths = (input_lengths.float() / s).ceil().long() encoder_padding_mask, _ = lengths_to_encoder_padding_mask( input_lengths, batch_first=True ) if not encoder_padding_mask.any(): encoder_padding_mask = None subsampling_factor = int(max_seq_len * 1.0 / output_seq_len + 0.5) attn_mask = self.lengths_to_attn_mask(input_lengths, subsampling_factor) transformer_layer_idx = 0 for layer_idx in range(len(self.transformer_layers)): if isinstance(self.transformer_layers[layer_idx], TransformerEncoderLayer): x = self.transformer_layers[layer_idx]( x, encoder_padding_mask, attn_mask ) if self.transformer_sampling[transformer_layer_idx] != 1: sampling_factor = self.transformer_sampling[transformer_layer_idx] x, encoder_padding_mask, attn_mask = self.slice( x, encoder_padding_mask, attn_mask, sampling_factor ) transformer_layer_idx += 1 else: x = self.transformer_layers[layer_idx](x) # encoder_padding_maks is a (T x B) tensor, its [t, b] elements indicate # whether encoder_output[t, b] is valid or not (valid=0, invalid=1) return { "encoder_out": x, # (T, B, C) "encoder_padding_mask": encoder_padding_mask.t() if encoder_padding_mask is not None else None, # (B, T) --> (T, B) } def infer_conv_output_dim(self, in_channels, input_dim): sample_seq_len = 200 sample_bsz = 10 x = torch.randn(sample_bsz, in_channels, sample_seq_len, input_dim) for i, _ in enumerate(self.conv_layers): x = self.conv_layers[i](x) x = x.transpose(1, 2) mb, seq = x.size()[:2] return x.contiguous().view(mb, seq, -1).size(-1) def validate_transformer_config(self, transformer_config): for config in transformer_config: input_dim, num_heads = config[:2] if input_dim % num_heads != 0: msg = ( "ERROR in transformer config {}: ".format(config) + "input dimension {} ".format(input_dim) + "not dividable by number of heads {}".format(num_heads) ) raise ValueError(msg) def parse_transformer_context(self, transformer_context): """ transformer_context can be the following: - None; indicates no context is used, i.e., transformer can access full context - a tuple/list of two int; indicates left and right context, any number <0 indicates infinite context * e.g., (5, 6) indicates that for query at x_t, transformer can access [t-5, t+6] (inclusive) * e.g., (-1, 6) indicates that for query at x_t, transformer can access [0, t+6] (inclusive) """ if transformer_context is None: return None if not isinstance(transformer_context, Iterable): raise ValueError("transformer context must be Iterable if it is not None") if len(transformer_context) != 2: raise ValueError("transformer context must have length 2") left_context = transformer_context[0] if left_context < 0: left_context = None right_context = transformer_context[1] if right_context < 0: right_context = None if left_context is None and right_context is None: return None return (left_context, right_context) def parse_transformer_sampling(self, transformer_sampling, num_layers): """ parsing transformer sampling configuration Args: - transformer_sampling, accepted input: * None, indicating no sampling * an Iterable with int (>0) as element - num_layers, expected number of transformer layers, must match with the length of transformer_sampling if it is not None Returns: - A tuple with length num_layers """ if transformer_sampling is None: return (1,) * num_layers if not isinstance(transformer_sampling, Iterable): raise ValueError( "transformer_sampling must be an iterable if it is not None" ) if len(transformer_sampling) != num_layers: raise ValueError( "transformer_sampling {} does not match with the number " "of layers {}".format(transformer_sampling, num_layers) ) for layer, value in enumerate(transformer_sampling): if not isinstance(value, int): raise ValueError("Invalid value in transformer_sampling: ") if value < 1: raise ValueError( "{} layer's subsampling is {}.".format(layer, value) + " This is not allowed! " ) return transformer_sampling def slice(self, embedding, padding_mask, attn_mask, sampling_factor): """ embedding is a (T, B, D) tensor padding_mask is a (B, T) tensor or None attn_mask is a (T, T) tensor or None """ embedding = embedding[::sampling_factor, :, :] if padding_mask is not None: padding_mask = padding_mask[:, ::sampling_factor] if attn_mask is not None: attn_mask = attn_mask[::sampling_factor, ::sampling_factor] return embedding, padding_mask, attn_mask def lengths_to_attn_mask(self, input_lengths, subsampling_factor=1): """ create attention mask according to sequence lengths and transformer context Args: - input_lengths: (B, )-shape Int/Long tensor; input_lengths[b] is the length of b-th sequence - subsampling_factor: int * Note that the left_context and right_context is specified in the input frame-level while input to transformer may already go through subsampling (e.g., the use of striding in vggblock) we use subsampling_factor to scale the left/right context Return: - a (T, T) binary tensor or None, where T is max(input_lengths) * if self.transformer_context is None, None * if left_context is None, * attn_mask[t, t + right_context + 1:] = 1 * others = 0 * if right_context is None, * attn_mask[t, 0:t - left_context] = 1 * others = 0 * elsif * attn_mask[t, t - left_context: t + right_context + 1] = 0 * others = 1 """ if self.transformer_context is None: return None maxT = torch.max(input_lengths).item() attn_mask = torch.zeros(maxT, maxT) left_context = self.transformer_context[0] right_context = self.transformer_context[1] if left_context is not None: left_context = math.ceil(self.transformer_context[0] / subsampling_factor) if right_context is not None: right_context = math.ceil(self.transformer_context[1] / subsampling_factor) for t in range(maxT): if left_context is not None: st = 0 en = max(st, t - left_context) attn_mask[t, st:en] = 1 if right_context is not None: st = t + right_context + 1 st = min(st, maxT - 1) attn_mask[t, st:] = 1 return attn_mask.to(input_lengths.device) def reorder_encoder_out(self, encoder_out, new_order): encoder_out["encoder_out"] = encoder_out["encoder_out"].index_select( 1, new_order ) if encoder_out["encoder_padding_mask"] is not None: encoder_out["encoder_padding_mask"] = encoder_out[ "encoder_padding_mask" ].index_select(1, new_order) return encoder_out class TransformerDecoder(FairseqIncrementalDecoder): """ Transformer decoder consisting of *args.decoder_layers* layers. Each layer is a :class:`TransformerDecoderLayer`. Args: args (argparse.Namespace): parsed command-line arguments dictionary (~fairseq.data.Dictionary): decoding dictionary embed_tokens (torch.nn.Embedding): output embedding no_encoder_attn (bool, optional): whether to attend to encoder outputs. Default: ``False`` left_pad (bool, optional): whether the input is left-padded. Default: ``False`` """ def __init__( self, dictionary, embed_dim=512, transformer_config=DEFAULT_ENC_TRANSFORMER_CONFIG, conv_config=DEFAULT_DEC_CONV_CONFIG, encoder_output_dim=512, ): super().__init__(dictionary) vocab_size = len(dictionary) self.padding_idx = dictionary.pad() self.embed_tokens = Embedding(vocab_size, embed_dim, self.padding_idx) self.conv_layers = nn.ModuleList() for i in range(len(conv_config)): out_channels, kernel_size, layer_norm = conv_config[i] if i == 0: conv_layer = LinearizedConv1d( embed_dim, out_channels, kernel_size, padding=kernel_size - 1 ) else: conv_layer = LinearizedConv1d( conv_config[i - 1][0], out_channels, kernel_size, padding=kernel_size - 1, ) self.conv_layers.append(conv_layer) if layer_norm: self.conv_layers.append(nn.LayerNorm(out_channels)) self.conv_layers.append(nn.ReLU()) self.layers = nn.ModuleList() if conv_config[-1][0] != transformer_config[0][0]: self.layers.append(Linear(conv_config[-1][0], transformer_config[0][0])) self.layers.append( TransformerDecoderLayer( prepare_transformer_decoder_params(*transformer_config[0]) ) ) for i in range(1, len(transformer_config)): if transformer_config[i - 1][0] != transformer_config[i][0]: self.layers.append( Linear(transformer_config[i - 1][0], transformer_config[i][0]) ) self.layers.append( TransformerDecoderLayer( prepare_transformer_decoder_params(*transformer_config[i]) ) ) self.fc_out = Linear(transformer_config[-1][0], vocab_size) def forward(self, prev_output_tokens, encoder_out=None, incremental_state=None): """ Args: prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for input feeding/teacher forcing encoder_out (Tensor, optional): output from the encoder, used for encoder-side attention incremental_state (dict): dictionary used for storing state during :ref:`Incremental decoding` Returns: tuple: - the last decoder layer's output of shape `(batch, tgt_len, vocab)` - the last decoder layer's attention weights of shape `(batch, tgt_len, src_len)` """ target_padding_mask = ( (prev_output_tokens == self.padding_idx).to(prev_output_tokens.device) if incremental_state is None else None ) if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] # embed tokens x = self.embed_tokens(prev_output_tokens) # B x T x C -> T x B x C x = self._transpose_if_training(x, incremental_state) for layer in self.conv_layers: if isinstance(layer, LinearizedConvolution): x = layer(x, incremental_state) else: x = layer(x) # B x T x C -> T x B x C x = self._transpose_if_inference(x, incremental_state) # decoder layers for layer in self.layers: if isinstance(layer, TransformerDecoderLayer): x, *_ = layer( x, (encoder_out["encoder_out"] if encoder_out is not None else None), ( encoder_out["encoder_padding_mask"].t() if encoder_out["encoder_padding_mask"] is not None else None ), incremental_state, self_attn_mask=( self.buffered_future_mask(x) if incremental_state is None else None ), self_attn_padding_mask=( target_padding_mask if incremental_state is None else None ), ) else: x = layer(x) # T x B x C -> B x T x C x = x.transpose(0, 1) x = self.fc_out(x) return x, None def buffered_future_mask(self, tensor): dim = tensor.size(0) if ( not hasattr(self, "_future_mask") or self._future_mask is None or self._future_mask.device != tensor.device ): self._future_mask = torch.triu( utils.fill_with_neg_inf(tensor.new(dim, dim)), 1 ) if self._future_mask.size(0) < dim: self._future_mask = torch.triu( utils.fill_with_neg_inf(self._future_mask.resize_(dim, dim)), 1 ) return self._future_mask[:dim, :dim] def _transpose_if_training(self, x, incremental_state): if incremental_state is None: x = x.transpose(0, 1) return x def _transpose_if_inference(self, x, incremental_state): if incremental_state: x = x.transpose(0, 1) return x @register_model("asr_vggtransformer_encoder") class VGGTransformerEncoderModel(FairseqEncoderModel): def __init__(self, encoder): super().__init__(encoder) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" parser.add_argument( "--input-feat-per-channel", type=int, metavar="N", help="encoder input dimension per input channel", ) parser.add_argument( "--vggblock-enc-config", type=str, metavar="EXPR", help=""" an array of tuples each containing the configuration of one vggblock [(out_channels, conv_kernel_size, pooling_kernel_size,num_conv_layers), ...] """, ) parser.add_argument( "--transformer-enc-config", type=str, metavar="EXPR", help=""" a tuple containing the configuration of the Transformer layers configurations: [(input_dim, num_heads, ffn_dim, normalize_before, dropout, attention_dropout, relu_dropout), ]""", ) parser.add_argument( "--enc-output-dim", type=int, metavar="N", help="encoder output dimension, projecting the LSTM output", ) parser.add_argument( "--in-channels", type=int, metavar="N", help="number of encoder input channels", ) parser.add_argument( "--transformer-context", type=str, metavar="EXPR", help=""" either None or a tuple of two ints, indicating left/right context a transformer can have access to""", ) parser.add_argument( "--transformer-sampling", type=str, metavar="EXPR", help=""" either None or a tuple of ints, indicating sampling factor in each layer""", ) @classmethod def build_model(cls, args, task): """Build a new model instance.""" base_architecture_enconly(args) encoder = VGGTransformerEncoderOnly( vocab_size=len(task.target_dictionary), input_feat_per_channel=args.input_feat_per_channel, vggblock_config=eval(args.vggblock_enc_config), transformer_config=eval(args.transformer_enc_config), encoder_output_dim=args.enc_output_dim, in_channels=args.in_channels, transformer_context=eval(args.transformer_context), transformer_sampling=eval(args.transformer_sampling), ) return cls(encoder) def get_normalized_probs(self, net_output, log_probs, sample=None): # net_output['encoder_out'] is a (T, B, D) tensor lprobs = super().get_normalized_probs(net_output, log_probs, sample) # lprobs is a (T, B, D) tensor # we need to transoose to get (B, T, D) tensor lprobs = lprobs.transpose(0, 1).contiguous() lprobs.batch_first = True return lprobs class VGGTransformerEncoderOnly(VGGTransformerEncoder): def __init__( self, vocab_size, input_feat_per_channel, vggblock_config=DEFAULT_ENC_VGGBLOCK_CONFIG, transformer_config=DEFAULT_ENC_TRANSFORMER_CONFIG, encoder_output_dim=512, in_channels=1, transformer_context=None, transformer_sampling=None, ): super().__init__( input_feat_per_channel=input_feat_per_channel, vggblock_config=vggblock_config, transformer_config=transformer_config, encoder_output_dim=encoder_output_dim, in_channels=in_channels, transformer_context=transformer_context, transformer_sampling=transformer_sampling, ) self.fc_out = Linear(self.encoder_output_dim, vocab_size) def forward(self, src_tokens, src_lengths, **kwargs): """ src_tokens: padded tensor (B, T, C * feat) src_lengths: tensor of original lengths of input utterances (B,) """ enc_out = super().forward(src_tokens, src_lengths) x = self.fc_out(enc_out["encoder_out"]) # x = F.log_softmax(x, dim=-1) # Note: no need this line, because model.get_normalized_prob will call # log_softmax return { "encoder_out": x, # (T, B, C) "encoder_padding_mask": enc_out["encoder_padding_mask"], # (T, B) } def max_positions(self): """Maximum input length supported by the encoder.""" return (1e6, 1e6) # an arbitrary large number def Embedding(num_embeddings, embedding_dim, padding_idx): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) # nn.init.uniform_(m.weight, -0.1, 0.1) # nn.init.constant_(m.weight[padding_idx], 0) return m def Linear(in_features, out_features, bias=True, dropout=0): """Linear layer (input: N x T x C)""" m = nn.Linear(in_features, out_features, bias=bias) # m.weight.data.uniform_(-0.1, 0.1) # if bias: # m.bias.data.uniform_(-0.1, 0.1) return m def LinearizedConv1d(in_channels, out_channels, kernel_size, dropout=0, **kwargs): """Weight-normalized Conv1d layer optimized for decoding""" m = LinearizedConvolution(in_channels, out_channels, kernel_size, **kwargs) std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels)) nn.init.normal_(m.weight, mean=0, std=std) nn.init.constant_(m.bias, 0) return nn.utils.weight_norm(m, dim=2) def LayerNorm(embedding_dim): m = nn.LayerNorm(embedding_dim) return m # seq2seq models def base_architecture(args): args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 40) args.vggblock_enc_config = getattr( args, "vggblock_enc_config", DEFAULT_ENC_VGGBLOCK_CONFIG ) args.transformer_enc_config = getattr( args, "transformer_enc_config", DEFAULT_ENC_TRANSFORMER_CONFIG ) args.enc_output_dim = getattr(args, "enc_output_dim", 512) args.in_channels = getattr(args, "in_channels", 1) args.tgt_embed_dim = getattr(args, "tgt_embed_dim", 128) args.transformer_dec_config = getattr( args, "transformer_dec_config", DEFAULT_ENC_TRANSFORMER_CONFIG ) args.conv_dec_config = getattr(args, "conv_dec_config", DEFAULT_DEC_CONV_CONFIG) args.transformer_context = getattr(args, "transformer_context", "None") @register_model_architecture("asr_vggtransformer", "vggtransformer_1") def vggtransformer_1(args): args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80) args.vggblock_enc_config = getattr( args, "vggblock_enc_config", "[(64, 3, 2, 2, True), (128, 3, 2, 2, True)]" ) args.transformer_enc_config = getattr( args, "transformer_enc_config", "((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 14", ) args.enc_output_dim = getattr(args, "enc_output_dim", 1024) args.tgt_embed_dim = getattr(args, "tgt_embed_dim", 128) args.conv_dec_config = getattr(args, "conv_dec_config", "((256, 3, True),) * 4") args.transformer_dec_config = getattr( args, "transformer_dec_config", "((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 4", ) @register_model_architecture("asr_vggtransformer", "vggtransformer_2") def vggtransformer_2(args): args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80) args.vggblock_enc_config = getattr( args, "vggblock_enc_config", "[(64, 3, 2, 2, True), (128, 3, 2, 2, True)]" ) args.transformer_enc_config = getattr( args, "transformer_enc_config", "((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 16", ) args.enc_output_dim = getattr(args, "enc_output_dim", 1024) args.tgt_embed_dim = getattr(args, "tgt_embed_dim", 512) args.conv_dec_config = getattr(args, "conv_dec_config", "((256, 3, True),) * 4") args.transformer_dec_config = getattr( args, "transformer_dec_config", "((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 6", ) @register_model_architecture("asr_vggtransformer", "vggtransformer_base") def vggtransformer_base(args): args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80) args.vggblock_enc_config = getattr( args, "vggblock_enc_config", "[(64, 3, 2, 2, True), (128, 3, 2, 2, True)]" ) args.transformer_enc_config = getattr( args, "transformer_enc_config", "((512, 8, 2048, True, 0.15, 0.15, 0.15),) * 12" ) args.enc_output_dim = getattr(args, "enc_output_dim", 512) args.tgt_embed_dim = getattr(args, "tgt_embed_dim", 512) args.conv_dec_config = getattr(args, "conv_dec_config", "((256, 3, True),) * 4") args.transformer_dec_config = getattr( args, "transformer_dec_config", "((512, 8, 2048, True, 0.15, 0.15, 0.15),) * 6" ) # Size estimations: # Encoder: # - vggblock param: 64*1*3*3 + 64*64*3*3 + 128*64*3*3 + 128*128*3 = 258K # Transformer: # - input dimension adapter: 2560 x 512 -> 1.31M # - transformer_layers (x12) --> 37.74M # * MultiheadAttention: 512*512*3 (in_proj) + 512*512 (out_proj) = 1.048M # * FFN weight: 512*2048*2 = 2.097M # - output dimension adapter: 512 x 512 -> 0.26 M # Decoder: # - LinearizedConv1d: 512 * 256 * 3 + 256 * 256 * 3 * 3 # - transformer_layer: (x6) --> 25.16M # * MultiheadAttention (self-attention): 512*512*3 + 512*512 = 1.048M # * MultiheadAttention (encoder-attention): 512*512*3 + 512*512 = 1.048M # * FFN: 512*2048*2 = 2.097M # Final FC: # - FC: 512*5000 = 256K (assuming vocab size 5K) # In total: # ~65 M # CTC models def base_architecture_enconly(args): args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 40) args.vggblock_enc_config = getattr( args, "vggblock_enc_config", "[(32, 3, 2, 2, True)] * 2" ) args.transformer_enc_config = getattr( args, "transformer_enc_config", "((256, 4, 1024, True, 0.2, 0.2, 0.2),) * 2" ) args.enc_output_dim = getattr(args, "enc_output_dim", 512) args.in_channels = getattr(args, "in_channels", 1) args.transformer_context = getattr(args, "transformer_context", "None") args.transformer_sampling = getattr(args, "transformer_sampling", "None") @register_model_architecture("asr_vggtransformer_encoder", "vggtransformer_enc_1") def vggtransformer_enc_1(args): # vggtransformer_1 is the same as vggtransformer_enc_big, except the number # of layers is increased to 16 # keep it here for backward compatiablity purpose args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80) args.vggblock_enc_config = getattr( args, "vggblock_enc_config", "[(64, 3, 2, 2, True), (128, 3, 2, 2, True)]" ) args.transformer_enc_config = getattr( args, "transformer_enc_config", "((1024, 16, 4096, True, 0.15, 0.15, 0.15),) * 16", ) args.enc_output_dim = getattr(args, "enc_output_dim", 1024)
KosmosX-API-main
kosmosX/fairseq/examples/speech_recognition/models/vggtransformer.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch import torch.nn as nn import torch.nn.functional as F from fairseq.models import ( FairseqEncoder, FairseqEncoderModel, register_model, register_model_architecture, ) from fairseq.modules.fairseq_dropout import FairseqDropout default_conv_enc_config = """[ (400, 13, 170, 0.2), (440, 14, 0, 0.214), (484, 15, 0, 0.22898), (532, 16, 0, 0.2450086), (584, 17, 0, 0.262159202), (642, 18, 0, 0.28051034614), (706, 19, 0, 0.30014607037), (776, 20, 0, 0.321156295296), (852, 21, 0, 0.343637235966), (936, 22, 0, 0.367691842484), (1028, 23, 0, 0.393430271458), (1130, 24, 0, 0.42097039046), (1242, 25, 0, 0.450438317792), (1366, 26, 0, 0.481969000038), (1502, 27, 0, 0.51570683004), (1652, 28, 0, 0.551806308143), (1816, 29, 0, 0.590432749713), ]""" @register_model("asr_w2l_conv_glu_encoder") class W2lConvGluEncoderModel(FairseqEncoderModel): def __init__(self, encoder): super().__init__(encoder) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" parser.add_argument( "--input-feat-per-channel", type=int, metavar="N", help="encoder input dimension per input channel", ) parser.add_argument( "--in-channels", type=int, metavar="N", help="number of encoder input channels", ) parser.add_argument( "--conv-enc-config", type=str, metavar="EXPR", help=""" an array of tuples each containing the configuration of one conv layer [(out_channels, kernel_size, padding, dropout), ...] """, ) @classmethod def build_model(cls, args, task): """Build a new model instance.""" conv_enc_config = getattr(args, "conv_enc_config", default_conv_enc_config) encoder = W2lConvGluEncoder( vocab_size=len(task.target_dictionary), input_feat_per_channel=args.input_feat_per_channel, in_channels=args.in_channels, conv_enc_config=eval(conv_enc_config), ) return cls(encoder) def get_normalized_probs(self, net_output, log_probs, sample=None): lprobs = super().get_normalized_probs(net_output, log_probs, sample) lprobs.batch_first = False return lprobs class W2lConvGluEncoder(FairseqEncoder): def __init__( self, vocab_size, input_feat_per_channel, in_channels, conv_enc_config ): super().__init__(None) self.input_dim = input_feat_per_channel if in_channels != 1: raise ValueError("only 1 input channel is currently supported") self.conv_layers = nn.ModuleList() self.linear_layers = nn.ModuleList() self.dropouts = [] cur_channels = input_feat_per_channel for out_channels, kernel_size, padding, dropout in conv_enc_config: layer = nn.Conv1d(cur_channels, out_channels, kernel_size, padding=padding) layer.weight.data.mul_(math.sqrt(3)) # match wav2letter init self.conv_layers.append(nn.utils.weight_norm(layer)) self.dropouts.append( FairseqDropout(dropout, module_name=self.__class__.__name__) ) if out_channels % 2 != 0: raise ValueError("odd # of out_channels is incompatible with GLU") cur_channels = out_channels // 2 # halved by GLU for out_channels in [2 * cur_channels, vocab_size]: layer = nn.Linear(cur_channels, out_channels) layer.weight.data.mul_(math.sqrt(3)) self.linear_layers.append(nn.utils.weight_norm(layer)) cur_channels = out_channels // 2 def forward(self, src_tokens, src_lengths, **kwargs): """ src_tokens: padded tensor (B, T, C * feat) src_lengths: tensor of original lengths of input utterances (B,) """ B, T, _ = src_tokens.size() x = src_tokens.transpose(1, 2).contiguous() # (B, feat, T) assuming C == 1 for layer_idx in range(len(self.conv_layers)): x = self.conv_layers[layer_idx](x) x = F.glu(x, dim=1) x = self.dropouts[layer_idx](x) x = x.transpose(1, 2).contiguous() # (B, T, 908) x = self.linear_layers[0](x) x = F.glu(x, dim=2) x = self.dropouts[-1](x) x = self.linear_layers[1](x) assert x.size(0) == B assert x.size(1) == T encoder_out = x.transpose(0, 1) # (T, B, vocab_size) # need to debug this -- find a simpler/elegant way in pytorch APIs encoder_padding_mask = ( torch.arange(T).view(1, T).expand(B, -1).to(x.device) >= src_lengths.view(B, 1).expand(-1, T) ).t() # (B x T) -> (T x B) return { "encoder_out": encoder_out, # (T, B, vocab_size) "encoder_padding_mask": encoder_padding_mask, # (T, B) } def reorder_encoder_out(self, encoder_out, new_order): encoder_out["encoder_out"] = encoder_out["encoder_out"].index_select( 1, new_order ) encoder_out["encoder_padding_mask"] = encoder_out[ "encoder_padding_mask" ].index_select(1, new_order) return encoder_out def max_positions(self): """Maximum input length supported by the encoder.""" return (1e6, 1e6) # an arbitrary large number @register_model_architecture("asr_w2l_conv_glu_encoder", "w2l_conv_glu_enc") def w2l_conv_glu_enc(args): args.input_feat_per_channel = getattr(args, "input_feat_per_channel", 80) args.in_channels = getattr(args, "in_channels", 1) args.conv_enc_config = getattr(args, "conv_enc_config", default_conv_enc_config)
KosmosX-API-main
kosmosX/fairseq/examples/speech_recognition/models/w2l_conv_glu_enc.py
KosmosX-API-main
kosmosX/fairseq/examples/speech_recognition/new/__init__.py
#!/usr/bin/env python -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import ast import hashlib import logging import os import shutil import sys from dataclasses import dataclass, field, is_dataclass from pathlib import Path from typing import Any, Dict, List, Optional, Tuple, Union import editdistance import torch import torch.distributed as dist from examples.speech_recognition.new.decoders.decoder_config import ( DecoderConfig, FlashlightDecoderConfig, ) from examples.speech_recognition.new.decoders.decoder import Decoder from fairseq import checkpoint_utils, distributed_utils, progress_bar, tasks, utils from fairseq.data.data_utils import post_process from fairseq.dataclass.configs import ( CheckpointConfig, CommonConfig, CommonEvalConfig, DatasetConfig, DistributedTrainingConfig, FairseqDataclass, ) from fairseq.logging.meters import StopwatchMeter, TimeMeter from fairseq.logging.progress_bar import BaseProgressBar from fairseq.models.fairseq_model import FairseqModel from omegaconf import OmegaConf import hydra from hydra.core.config_store import ConfigStore logging.root.setLevel(logging.INFO) logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) config_path = Path(__file__).resolve().parent / "conf" @dataclass class DecodingConfig(DecoderConfig, FlashlightDecoderConfig): unique_wer_file: bool = field( default=False, metadata={"help": "If set, use a unique file for storing WER"}, ) results_path: Optional[str] = field( default=None, metadata={ "help": "If set, write hypothesis and reference sentences into this directory" }, ) @dataclass class InferConfig(FairseqDataclass): task: Any = None decoding: DecodingConfig = DecodingConfig() common: CommonConfig = CommonConfig() common_eval: CommonEvalConfig = CommonEvalConfig() checkpoint: CheckpointConfig = CheckpointConfig() distributed_training: DistributedTrainingConfig = DistributedTrainingConfig() dataset: DatasetConfig = DatasetConfig() is_ax: bool = field( default=False, metadata={ "help": "if true, assumes we are using ax for tuning and returns a tuple for ax to consume" }, ) def reset_logging(): root = logging.getLogger() for handler in root.handlers: root.removeHandler(handler) root.setLevel(os.environ.get("LOGLEVEL", "INFO").upper()) handler = logging.StreamHandler(sys.stdout) handler.setFormatter( logging.Formatter( fmt="%(asctime)s | %(levelname)s | %(name)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S", ) ) root.addHandler(handler) class InferenceProcessor: cfg: InferConfig def __init__(self, cfg: InferConfig) -> None: self.cfg = cfg self.task = tasks.setup_task(cfg.task) models, saved_cfg = self.load_model_ensemble() self.models = models self.saved_cfg = saved_cfg self.tgt_dict = self.task.target_dictionary self.task.load_dataset( self.cfg.dataset.gen_subset, task_cfg=saved_cfg.task, ) self.generator = Decoder(cfg.decoding, self.tgt_dict) self.gen_timer = StopwatchMeter() self.wps_meter = TimeMeter() self.num_sentences = 0 self.total_errors = 0 self.total_length = 0 self.hypo_words_file = None self.hypo_units_file = None self.ref_words_file = None self.ref_units_file = None self.progress_bar = self.build_progress_bar() def __enter__(self) -> "InferenceProcessor": if self.cfg.decoding.results_path is not None: self.hypo_words_file = self.get_res_file("hypo.word") self.hypo_units_file = self.get_res_file("hypo.units") self.ref_words_file = self.get_res_file("ref.word") self.ref_units_file = self.get_res_file("ref.units") return self def __exit__(self, *exc) -> bool: if self.cfg.decoding.results_path is not None: self.hypo_words_file.close() self.hypo_units_file.close() self.ref_words_file.close() self.ref_units_file.close() return False def __iter__(self) -> Any: for sample in self.progress_bar: if not self.cfg.common.cpu: sample = utils.move_to_cuda(sample) # Happens on the last batch. if "net_input" not in sample: continue yield sample def log(self, *args, **kwargs): self.progress_bar.log(*args, **kwargs) def print(self, *args, **kwargs): self.progress_bar.print(*args, **kwargs) def get_res_file(self, fname: str) -> None: fname = os.path.join(self.cfg.decoding.results_path, fname) if self.data_parallel_world_size > 1: fname = f"{fname}.{self.data_parallel_rank}" return open(fname, "w", buffering=1) def merge_shards(self) -> None: """Merges all shard files into shard 0, then removes shard suffix.""" shard_id = self.data_parallel_rank num_shards = self.data_parallel_world_size if self.data_parallel_world_size > 1: def merge_shards_with_root(fname: str) -> None: fname = os.path.join(self.cfg.decoding.results_path, fname) logger.info("Merging %s on shard %d", fname, shard_id) base_fpath = Path(f"{fname}.0") with open(base_fpath, "a") as out_file: for s in range(1, num_shards): shard_fpath = Path(f"{fname}.{s}") with open(shard_fpath, "r") as in_file: for line in in_file: out_file.write(line) shard_fpath.unlink() shutil.move(f"{fname}.0", fname) dist.barrier() # ensure all shards finished writing if shard_id == (0 % num_shards): merge_shards_with_root("hypo.word") if shard_id == (1 % num_shards): merge_shards_with_root("hypo.units") if shard_id == (2 % num_shards): merge_shards_with_root("ref.word") if shard_id == (3 % num_shards): merge_shards_with_root("ref.units") dist.barrier() def optimize_model(self, model: FairseqModel) -> None: model.make_generation_fast_() if self.cfg.common.fp16: model.half() if not self.cfg.common.cpu: model.cuda() def load_model_ensemble(self) -> Tuple[List[FairseqModel], FairseqDataclass]: arg_overrides = ast.literal_eval(self.cfg.common_eval.model_overrides) models, saved_cfg = checkpoint_utils.load_model_ensemble( utils.split_paths(self.cfg.common_eval.path, separator="\\"), arg_overrides=arg_overrides, task=self.task, suffix=self.cfg.checkpoint.checkpoint_suffix, strict=(self.cfg.checkpoint.checkpoint_shard_count == 1), num_shards=self.cfg.checkpoint.checkpoint_shard_count, ) for model in models: self.optimize_model(model) return models, saved_cfg def get_dataset_itr(self, disable_iterator_cache: bool = False) -> None: return self.task.get_batch_iterator( dataset=self.task.dataset(self.cfg.dataset.gen_subset), max_tokens=self.cfg.dataset.max_tokens, max_sentences=self.cfg.dataset.batch_size, max_positions=(sys.maxsize, sys.maxsize), ignore_invalid_inputs=self.cfg.dataset.skip_invalid_size_inputs_valid_test, required_batch_size_multiple=self.cfg.dataset.required_batch_size_multiple, seed=self.cfg.common.seed, num_shards=self.data_parallel_world_size, shard_id=self.data_parallel_rank, num_workers=self.cfg.dataset.num_workers, data_buffer_size=self.cfg.dataset.data_buffer_size, disable_iterator_cache=disable_iterator_cache, ).next_epoch_itr(shuffle=False) def build_progress_bar( self, epoch: Optional[int] = None, prefix: Optional[str] = None, default_log_format: str = "tqdm", ) -> BaseProgressBar: return progress_bar.progress_bar( iterator=self.get_dataset_itr(), log_format=self.cfg.common.log_format, log_interval=self.cfg.common.log_interval, epoch=epoch, prefix=prefix, tensorboard_logdir=self.cfg.common.tensorboard_logdir, default_log_format=default_log_format, ) @property def data_parallel_world_size(self): if self.cfg.distributed_training.distributed_world_size == 1: return 1 return distributed_utils.get_data_parallel_world_size() @property def data_parallel_rank(self): if self.cfg.distributed_training.distributed_world_size == 1: return 0 return distributed_utils.get_data_parallel_rank() def process_sentence( self, sample: Dict[str, Any], hypo: Dict[str, Any], sid: int, batch_id: int, ) -> Tuple[int, int]: speaker = None # Speaker can't be parsed from dataset. if "target_label" in sample: toks = sample["target_label"] else: toks = sample["target"] toks = toks[batch_id, :] # Processes hypothesis. hyp_pieces = self.tgt_dict.string(hypo["tokens"].int().cpu()) if "words" in hypo: hyp_words = " ".join(hypo["words"]) else: hyp_words = post_process(hyp_pieces, self.cfg.common_eval.post_process) # Processes target. target_tokens = utils.strip_pad(toks, self.tgt_dict.pad()) tgt_pieces = self.tgt_dict.string(target_tokens.int().cpu()) tgt_words = post_process(tgt_pieces, self.cfg.common_eval.post_process) if self.cfg.decoding.results_path is not None: print(f"{hyp_pieces} ({speaker}-{sid})", file=self.hypo_units_file) print(f"{hyp_words} ({speaker}-{sid})", file=self.hypo_words_file) print(f"{tgt_pieces} ({speaker}-{sid})", file=self.ref_units_file) print(f"{tgt_words} ({speaker}-{sid})", file=self.ref_words_file) if not self.cfg.common_eval.quiet: logger.info(f"HYPO: {hyp_words}") logger.info(f"REF: {tgt_words}") logger.info("---------------------") hyp_words, tgt_words = hyp_words.split(), tgt_words.split() return editdistance.eval(hyp_words, tgt_words), len(tgt_words) def process_sample(self, sample: Dict[str, Any]) -> None: self.gen_timer.start() hypos = self.task.inference_step( generator=self.generator, models=self.models, sample=sample, ) num_generated_tokens = sum(len(h[0]["tokens"]) for h in hypos) self.gen_timer.stop(num_generated_tokens) self.wps_meter.update(num_generated_tokens) for batch_id, sample_id in enumerate(sample["id"].tolist()): errs, length = self.process_sentence( sample=sample, sid=sample_id, batch_id=batch_id, hypo=hypos[batch_id][0], ) self.total_errors += errs self.total_length += length self.log({"wps": round(self.wps_meter.avg)}) if "nsentences" in sample: self.num_sentences += sample["nsentences"] else: self.num_sentences += sample["id"].numel() def log_generation_time(self) -> None: logger.info( "Processed %d sentences (%d tokens) in %.1fs %.2f " "sentences per second, %.2f tokens per second)", self.num_sentences, self.gen_timer.n, self.gen_timer.sum, self.num_sentences / self.gen_timer.sum, 1.0 / self.gen_timer.avg, ) def parse_wer(wer_file: Path) -> float: with open(wer_file, "r") as f: return float(f.readline().strip().split(" ")[1]) def get_wer_file(cfg: InferConfig) -> Path: """Hashes the decoding parameters to a unique file ID.""" base_path = "wer" if cfg.decoding.results_path is not None: base_path = os.path.join(cfg.decoding.results_path, base_path) if cfg.decoding.unique_wer_file: yaml_str = OmegaConf.to_yaml(cfg.decoding) fid = int(hashlib.md5(yaml_str.encode("utf-8")).hexdigest(), 16) return Path(f"{base_path}.{fid % 1000000}") else: return Path(base_path) def main(cfg: InferConfig) -> float: """Entry point for main processing logic. Args: cfg: The inferance configuration to use. wer: Optional shared memory pointer for returning the WER. If not None, the final WER value will be written here instead of being returned. Returns: The final WER if `wer` is None, otherwise None. """ yaml_str, wer_file = OmegaConf.to_yaml(cfg.decoding), get_wer_file(cfg) # Validates the provided configuration. if cfg.dataset.max_tokens is None and cfg.dataset.batch_size is None: cfg.dataset.max_tokens = 4000000 if not cfg.common.cpu and not torch.cuda.is_available(): raise ValueError("CUDA not found; set `cpu=True` to run without CUDA") logger.info(cfg.common_eval.path) with InferenceProcessor(cfg) as processor: for sample in processor: processor.process_sample(sample) processor.log_generation_time() if cfg.decoding.results_path is not None: processor.merge_shards() errs_t, leng_t = processor.total_errors, processor.total_length if cfg.common.cpu: logger.warning("Merging WER requires CUDA.") elif processor.data_parallel_world_size > 1: stats = torch.LongTensor([errs_t, leng_t]).cuda() dist.all_reduce(stats, op=dist.ReduceOp.SUM) errs_t, leng_t = stats[0].item(), stats[1].item() wer = errs_t * 100.0 / leng_t if distributed_utils.is_master(cfg.distributed_training): with open(wer_file, "w") as f: f.write( ( f"WER: {wer}\n" f"err / num_ref_words = {errs_t} / {leng_t}\n\n" f"{yaml_str}" ) ) return wer @hydra.main(config_path=config_path, config_name="infer") def hydra_main(cfg: InferConfig) -> Union[float, Tuple[float, Optional[float]]]: container = OmegaConf.to_container(cfg, resolve=True, enum_to_str=True) cfg = OmegaConf.create(container) OmegaConf.set_struct(cfg, True) if cfg.common.reset_logging: reset_logging() # logger.info("Config:\n%s", OmegaConf.to_yaml(cfg)) wer = float("inf") try: if cfg.common.profile: with torch.cuda.profiler.profile(): with torch.autograd.profiler.emit_nvtx(): distributed_utils.call_main(cfg, main) else: distributed_utils.call_main(cfg, main) wer = parse_wer(get_wer_file(cfg)) except BaseException as e: # pylint: disable=broad-except if not cfg.common.suppress_crashes: raise else: logger.error("Crashed! %s", str(e)) logger.info("Word error rate: %.4f", wer) if cfg.is_ax: return wer, None return wer def cli_main() -> None: try: from hydra._internal.utils import ( get_args, ) # pylint: disable=import-outside-toplevel cfg_name = get_args().config_name or "infer" except ImportError: logger.warning("Failed to get config name from hydra args") cfg_name = "infer" cs = ConfigStore.instance() cs.store(name=cfg_name, node=InferConfig) for k in InferConfig.__dataclass_fields__: if is_dataclass(InferConfig.__dataclass_fields__[k].type): v = InferConfig.__dataclass_fields__[k].default cs.store(name=k, node=v) hydra_main() # pylint: disable=no-value-for-parameter if __name__ == "__main__": cli_main()
KosmosX-API-main
kosmosX/fairseq/examples/speech_recognition/new/infer.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from typing import Union from fairseq.data.dictionary import Dictionary from .decoder_config import DecoderConfig, FlashlightDecoderConfig from .base_decoder import BaseDecoder def Decoder( cfg: Union[DecoderConfig, FlashlightDecoderConfig], tgt_dict: Dictionary ) -> BaseDecoder: if cfg.type == "viterbi": from .viterbi_decoder import ViterbiDecoder return ViterbiDecoder(tgt_dict) if cfg.type == "kenlm": from .flashlight_decoder import KenLMDecoder return KenLMDecoder(cfg, tgt_dict) if cfg.type == "fairseqlm": from .flashlight_decoder import FairseqLMDecoder return FairseqLMDecoder(cfg, tgt_dict) raise NotImplementedError(f"Invalid decoder name: {cfg.name}")
KosmosX-API-main
kosmosX/fairseq/examples/speech_recognition/new/decoders/decoder.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from typing import List, Dict from .base_decoder import BaseDecoder class ViterbiDecoder(BaseDecoder): def decode( self, emissions: torch.FloatTensor, ) -> List[List[Dict[str, torch.LongTensor]]]: def get_pred(e): toks = e.argmax(dim=-1).unique_consecutive() return toks[toks != self.blank] return [[{"tokens": get_pred(x), "score": 0}] for x in emissions]
KosmosX-API-main
kosmosX/fairseq/examples/speech_recognition/new/decoders/viterbi_decoder.py
KosmosX-API-main
kosmosX/fairseq/examples/speech_recognition/new/decoders/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import itertools as it from typing import Any, Dict, List import torch from fairseq.data.dictionary import Dictionary from fairseq.models.fairseq_model import FairseqModel class BaseDecoder: def __init__(self, tgt_dict: Dictionary) -> None: self.tgt_dict = tgt_dict self.vocab_size = len(tgt_dict) self.blank = ( tgt_dict.index("<ctc_blank>") if "<ctc_blank>" in tgt_dict.indices else tgt_dict.bos() ) if "<sep>" in tgt_dict.indices: self.silence = tgt_dict.index("<sep>") elif "|" in tgt_dict.indices: self.silence = tgt_dict.index("|") else: self.silence = tgt_dict.eos() def generate( self, models: List[FairseqModel], sample: Dict[str, Any], **unused ) -> List[List[Dict[str, torch.LongTensor]]]: encoder_input = { k: v for k, v in sample["net_input"].items() if k != "prev_output_tokens" } emissions = self.get_emissions(models, encoder_input) return self.decode(emissions) def get_emissions( self, models: List[FairseqModel], encoder_input: Dict[str, Any], ) -> torch.FloatTensor: model = models[0] encoder_out = model(**encoder_input) if hasattr(model, "get_logits"): emissions = model.get_logits(encoder_out) else: emissions = model.get_normalized_probs(encoder_out, log_probs=True) return emissions.transpose(0, 1).float().cpu().contiguous() def get_tokens(self, idxs: torch.IntTensor) -> torch.LongTensor: idxs = (g[0] for g in it.groupby(idxs)) idxs = filter(lambda x: x != self.blank, idxs) return torch.LongTensor(list(idxs)) def decode( self, emissions: torch.FloatTensor, ) -> List[List[Dict[str, torch.LongTensor]]]: raise NotImplementedError
KosmosX-API-main
kosmosX/fairseq/examples/speech_recognition/new/decoders/base_decoder.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from dataclasses import dataclass, field from typing import Optional from fairseq.dataclass.configs import FairseqDataclass from fairseq.dataclass.constants import ChoiceEnum from omegaconf import MISSING DECODER_CHOICES = ChoiceEnum(["viterbi", "kenlm", "fairseqlm"]) @dataclass class DecoderConfig(FairseqDataclass): type: DECODER_CHOICES = field( default="viterbi", metadata={"help": "The type of decoder to use"}, ) @dataclass class FlashlightDecoderConfig(FairseqDataclass): nbest: int = field( default=1, metadata={"help": "Number of decodings to return"}, ) unitlm: bool = field( default=False, metadata={"help": "If set, use unit language model"}, ) lmpath: str = field( default=MISSING, metadata={"help": "Language model for KenLM decoder"}, ) lexicon: Optional[str] = field( default=None, metadata={"help": "Lexicon for Flashlight decoder"}, ) beam: int = field( default=50, metadata={"help": "Number of beams to use for decoding"}, ) beamthreshold: float = field( default=50.0, metadata={"help": "Threshold for beam search decoding"}, ) beamsizetoken: Optional[int] = field( default=None, metadata={"help": "Beam size to use"} ) wordscore: float = field( default=-1, metadata={"help": "Word score for KenLM decoder"}, ) unkweight: float = field( default=-math.inf, metadata={"help": "Unknown weight for KenLM decoder"}, ) silweight: float = field( default=0, metadata={"help": "Silence weight for KenLM decoder"}, ) lmweight: float = field( default=2, metadata={"help": "Weight for LM while interpolating score"}, )
KosmosX-API-main
kosmosX/fairseq/examples/speech_recognition/new/decoders/decoder_config.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import gc import os.path as osp import warnings from collections import deque, namedtuple from typing import Any, Dict, Tuple import numpy as np import torch from fairseq import tasks from fairseq.data.dictionary import Dictionary from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.models.fairseq_model import FairseqModel from fairseq.utils import apply_to_sample from omegaconf import open_dict, OmegaConf from typing import List from .decoder_config import FlashlightDecoderConfig from .base_decoder import BaseDecoder try: from flashlight.lib.text.decoder import ( LM, CriterionType, DecodeResult, KenLM, LexiconDecoder, LexiconDecoderOptions, LexiconFreeDecoder, LexiconFreeDecoderOptions, LMState, SmearingMode, Trie, ) from flashlight.lib.text.dictionary import create_word_dict, load_words except ImportError: warnings.warn( "flashlight python bindings are required to use this functionality. " "Please install from " "https://github.com/facebookresearch/flashlight/tree/master/bindings/python" ) LM = object LMState = object class KenLMDecoder(BaseDecoder): def __init__(self, cfg: FlashlightDecoderConfig, tgt_dict: Dictionary) -> None: super().__init__(tgt_dict) self.nbest = cfg.nbest self.unitlm = cfg.unitlm if cfg.lexicon: self.lexicon = load_words(cfg.lexicon) self.word_dict = create_word_dict(self.lexicon) self.unk_word = self.word_dict.get_index("<unk>") self.lm = KenLM(cfg.lmpath, self.word_dict) self.trie = Trie(self.vocab_size, self.silence) start_state = self.lm.start(False) for word, spellings in self.lexicon.items(): word_idx = self.word_dict.get_index(word) _, score = self.lm.score(start_state, word_idx) for spelling in spellings: spelling_idxs = [tgt_dict.index(token) for token in spelling] assert ( tgt_dict.unk() not in spelling_idxs ), f"{word} {spelling} {spelling_idxs}" self.trie.insert(spelling_idxs, word_idx, score) self.trie.smear(SmearingMode.MAX) self.decoder_opts = LexiconDecoderOptions( beam_size=cfg.beam, beam_size_token=cfg.beamsizetoken or len(tgt_dict), beam_threshold=cfg.beamthreshold, lm_weight=cfg.lmweight, word_score=cfg.wordscore, unk_score=cfg.unkweight, sil_score=cfg.silweight, log_add=False, criterion_type=CriterionType.CTC, ) self.decoder = LexiconDecoder( self.decoder_opts, self.trie, self.lm, self.silence, self.blank, self.unk_word, [], self.unitlm, ) else: assert self.unitlm, "Lexicon-free decoding requires unit LM" d = {w: [[w]] for w in tgt_dict.symbols} self.word_dict = create_word_dict(d) self.lm = KenLM(cfg.lmpath, self.word_dict) self.decoder_opts = LexiconFreeDecoderOptions( beam_size=cfg.beam, beam_size_token=cfg.beamsizetoken or len(tgt_dict), beam_threshold=cfg.beamthreshold, lm_weight=cfg.lmweight, sil_score=cfg.silweight, log_add=False, criterion_type=CriterionType.CTC, ) self.decoder = LexiconFreeDecoder( self.decoder_opts, self.lm, self.silence, self.blank, [] ) def get_timesteps(self, token_idxs: List[int]) -> List[int]: """Returns frame numbers corresponding to every non-blank token. Parameters ---------- token_idxs : List[int] IDs of decoded tokens. Returns ------- List[int] Frame numbers corresponding to every non-blank token. """ timesteps = [] for i, token_idx in enumerate(token_idxs): if token_idx == self.blank: continue if i == 0 or token_idx != token_idxs[i-1]: timesteps.append(i) return timesteps def decode( self, emissions: torch.FloatTensor, ) -> List[List[Dict[str, torch.LongTensor]]]: B, T, N = emissions.size() hypos = [] for b in range(B): emissions_ptr = emissions.data_ptr() + 4 * b * emissions.stride(0) results = self.decoder.decode(emissions_ptr, T, N) nbest_results = results[: self.nbest] hypos.append( [ { "tokens": self.get_tokens(result.tokens), "score": result.score, "timesteps": self.get_timesteps(result.tokens), "words": [ self.word_dict.get_entry(x) for x in result.words if x >= 0 ], } for result in nbest_results ] ) return hypos FairseqLMState = namedtuple( "FairseqLMState", [ "prefix", "incremental_state", "probs", ], ) class FairseqLM(LM): def __init__(self, dictionary: Dictionary, model: FairseqModel) -> None: super().__init__() self.dictionary = dictionary self.model = model self.unk = self.dictionary.unk() self.save_incremental = False # this currently does not work properly self.max_cache = 20_000 if torch.cuda.is_available(): model.cuda() model.eval() model.make_generation_fast_() self.states = {} self.stateq = deque() def start(self, start_with_nothing: bool) -> LMState: state = LMState() prefix = torch.LongTensor([[self.dictionary.eos()]]) incremental_state = {} if self.save_incremental else None with torch.no_grad(): res = self.model(prefix.cuda(), incremental_state=incremental_state) probs = self.model.get_normalized_probs(res, log_probs=True, sample=None) if incremental_state is not None: incremental_state = apply_to_sample(lambda x: x.cpu(), incremental_state) self.states[state] = FairseqLMState( prefix.numpy(), incremental_state, probs[0, -1].cpu().numpy() ) self.stateq.append(state) return state def score( self, state: LMState, token_index: int, no_cache: bool = False, ) -> Tuple[LMState, int]: """ Evaluate language model based on the current lm state and new word Parameters: ----------- state: current lm state token_index: index of the word (can be lexicon index then you should store inside LM the mapping between indices of lexicon and lm, or lm index of a word) Returns: -------- (LMState, float): pair of (new state, score for the current word) """ curr_state = self.states[state] def trim_cache(targ_size: int) -> None: while len(self.stateq) > targ_size: rem_k = self.stateq.popleft() rem_st = self.states[rem_k] rem_st = FairseqLMState(rem_st.prefix, None, None) self.states[rem_k] = rem_st if curr_state.probs is None: new_incremental_state = ( curr_state.incremental_state.copy() if curr_state.incremental_state is not None else None ) with torch.no_grad(): if new_incremental_state is not None: new_incremental_state = apply_to_sample( lambda x: x.cuda(), new_incremental_state ) elif self.save_incremental: new_incremental_state = {} res = self.model( torch.from_numpy(curr_state.prefix).cuda(), incremental_state=new_incremental_state, ) probs = self.model.get_normalized_probs( res, log_probs=True, sample=None ) if new_incremental_state is not None: new_incremental_state = apply_to_sample( lambda x: x.cpu(), new_incremental_state ) curr_state = FairseqLMState( curr_state.prefix, new_incremental_state, probs[0, -1].cpu().numpy() ) if not no_cache: self.states[state] = curr_state self.stateq.append(state) score = curr_state.probs[token_index].item() trim_cache(self.max_cache) outstate = state.child(token_index) if outstate not in self.states and not no_cache: prefix = np.concatenate( [curr_state.prefix, torch.LongTensor([[token_index]])], -1 ) incr_state = curr_state.incremental_state self.states[outstate] = FairseqLMState(prefix, incr_state, None) if token_index == self.unk: score = float("-inf") return outstate, score def finish(self, state: LMState) -> Tuple[LMState, int]: """ Evaluate eos for language model based on the current lm state Returns: -------- (LMState, float): pair of (new state, score for the current word) """ return self.score(state, self.dictionary.eos()) def empty_cache(self) -> None: self.states = {} self.stateq = deque() gc.collect() class FairseqLMDecoder(BaseDecoder): def __init__(self, cfg: FlashlightDecoderConfig, tgt_dict: Dictionary) -> None: super().__init__(tgt_dict) self.nbest = cfg.nbest self.unitlm = cfg.unitlm self.lexicon = load_words(cfg.lexicon) if cfg.lexicon else None self.idx_to_wrd = {} checkpoint = torch.load(cfg.lmpath, map_location="cpu") if "cfg" in checkpoint and checkpoint["cfg"] is not None: lm_args = checkpoint["cfg"] else: lm_args = convert_namespace_to_omegaconf(checkpoint["args"]) if not OmegaConf.is_dict(lm_args): lm_args = OmegaConf.create(lm_args) with open_dict(lm_args.task): lm_args.task.data = osp.dirname(cfg.lmpath) task = tasks.setup_task(lm_args.task) model = task.build_model(lm_args.model) model.load_state_dict(checkpoint["model"], strict=False) self.trie = Trie(self.vocab_size, self.silence) self.word_dict = task.dictionary self.unk_word = self.word_dict.unk() self.lm = FairseqLM(self.word_dict, model) if self.lexicon: start_state = self.lm.start(False) for i, (word, spellings) in enumerate(self.lexicon.items()): if self.unitlm: word_idx = i self.idx_to_wrd[i] = word score = 0 else: word_idx = self.word_dict.index(word) _, score = self.lm.score(start_state, word_idx, no_cache=True) for spelling in spellings: spelling_idxs = [tgt_dict.index(token) for token in spelling] assert ( tgt_dict.unk() not in spelling_idxs ), f"{spelling} {spelling_idxs}" self.trie.insert(spelling_idxs, word_idx, score) self.trie.smear(SmearingMode.MAX) self.decoder_opts = LexiconDecoderOptions( beam_size=cfg.beam, beam_size_token=cfg.beamsizetoken or len(tgt_dict), beam_threshold=cfg.beamthreshold, lm_weight=cfg.lmweight, word_score=cfg.wordscore, unk_score=cfg.unkweight, sil_score=cfg.silweight, log_add=False, criterion_type=CriterionType.CTC, ) self.decoder = LexiconDecoder( self.decoder_opts, self.trie, self.lm, self.silence, self.blank, self.unk_word, [], self.unitlm, ) else: assert self.unitlm, "Lexicon-free decoding requires unit LM" d = {w: [[w]] for w in tgt_dict.symbols} self.word_dict = create_word_dict(d) self.lm = KenLM(cfg.lmpath, self.word_dict) self.decoder_opts = LexiconFreeDecoderOptions( beam_size=cfg.beam, beam_size_token=cfg.beamsizetoken or len(tgt_dict), beam_threshold=cfg.beamthreshold, lm_weight=cfg.lmweight, sil_score=cfg.silweight, log_add=False, criterion_type=CriterionType.CTC, ) self.decoder = LexiconFreeDecoder( self.decoder_opts, self.lm, self.silence, self.blank, [] ) def decode( self, emissions: torch.FloatTensor, ) -> List[List[Dict[str, torch.LongTensor]]]: B, T, N = emissions.size() hypos = [] def make_hypo(result: DecodeResult) -> Dict[str, Any]: hypo = { "tokens": self.get_tokens(result.tokens), "score": result.score, } if self.lexicon: hypo["words"] = [ self.idx_to_wrd[x] if self.unitlm else self.word_dict[x] for x in result.words if x >= 0 ] return hypo for b in range(B): emissions_ptr = emissions.data_ptr() + 4 * b * emissions.stride(0) results = self.decoder.decode(emissions_ptr, T, N) nbest_results = results[: self.nbest] hypos.append([make_hypo(result) for result in nbest_results]) self.lm.empty_cache() return hypos
KosmosX-API-main
kosmosX/fairseq/examples/speech_recognition/new/decoders/flashlight_decoder.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Replabel transforms for use with flashlight's ASG criterion. """ def replabel_symbol(i): """ Replabel symbols used in flashlight, currently just "1", "2", ... This prevents training with numeral tokens, so this might change in the future """ return str(i) def pack_replabels(tokens, dictionary, max_reps): """ Pack a token sequence so that repeated symbols are replaced by replabels """ if len(tokens) == 0 or max_reps <= 0: return tokens replabel_value_to_idx = [0] * (max_reps + 1) for i in range(1, max_reps + 1): replabel_value_to_idx[i] = dictionary.index(replabel_symbol(i)) result = [] prev_token = -1 num_reps = 0 for token in tokens: if token == prev_token and num_reps < max_reps: num_reps += 1 else: if num_reps > 0: result.append(replabel_value_to_idx[num_reps]) num_reps = 0 result.append(token) prev_token = token if num_reps > 0: result.append(replabel_value_to_idx[num_reps]) return result def unpack_replabels(tokens, dictionary, max_reps): """ Unpack a token sequence so that replabels are replaced by repeated symbols """ if len(tokens) == 0 or max_reps <= 0: return tokens replabel_idx_to_value = {} for i in range(1, max_reps + 1): replabel_idx_to_value[dictionary.index(replabel_symbol(i))] = i result = [] prev_token = -1 for token in tokens: try: for _ in range(replabel_idx_to_value[token]): result.append(prev_token) prev_token = -1 except KeyError: result.append(token) prev_token = token return result
KosmosX-API-main
kosmosX/fairseq/examples/speech_recognition/data/replabels.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .asr_dataset import AsrDataset __all__ = [ "AsrDataset", ]
KosmosX-API-main
kosmosX/fairseq/examples/speech_recognition/data/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ This module contains collection of classes which implement collate functionalities for various tasks. Collaters should know what data to expect for each sample and they should pack / collate them into batches """ from __future__ import absolute_import, division, print_function, unicode_literals import numpy as np import torch from fairseq.data import data_utils as fairseq_data_utils class Seq2SeqCollater(object): """ Implements collate function mainly for seq2seq tasks This expects each sample to contain feature (src_tokens) and targets. This collator is also used for aligned training task. """ def __init__( self, feature_index=0, label_index=1, pad_index=1, eos_index=2, move_eos_to_beginning=True, ): self.feature_index = feature_index self.label_index = label_index self.pad_index = pad_index self.eos_index = eos_index self.move_eos_to_beginning = move_eos_to_beginning def _collate_frames(self, frames): """Convert a list of 2d frames into a padded 3d tensor Args: frames (list): list of 2d frames of size L[i]*f_dim. Where L[i] is length of i-th frame and f_dim is static dimension of features Returns: 3d tensor of size len(frames)*len_max*f_dim where len_max is max of L[i] """ len_max = max(frame.size(0) for frame in frames) f_dim = frames[0].size(1) res = frames[0].new(len(frames), len_max, f_dim).fill_(0.0) for i, v in enumerate(frames): res[i, : v.size(0)] = v return res def collate(self, samples): """ utility function to collate samples into batch for speech recognition. """ if len(samples) == 0: return {} # parse samples into torch tensors parsed_samples = [] for s in samples: # skip invalid samples if s["data"][self.feature_index] is None: continue source = s["data"][self.feature_index] if isinstance(source, (np.ndarray, np.generic)): source = torch.from_numpy(source) target = s["data"][self.label_index] if isinstance(target, (np.ndarray, np.generic)): target = torch.from_numpy(target).long() elif isinstance(target, list): target = torch.LongTensor(target) parsed_sample = {"id": s["id"], "source": source, "target": target} parsed_samples.append(parsed_sample) samples = parsed_samples id = torch.LongTensor([s["id"] for s in samples]) frames = self._collate_frames([s["source"] for s in samples]) # sort samples by descending number of frames frames_lengths = torch.LongTensor([s["source"].size(0) for s in samples]) frames_lengths, sort_order = frames_lengths.sort(descending=True) id = id.index_select(0, sort_order) frames = frames.index_select(0, sort_order) target = None target_lengths = None prev_output_tokens = None if samples[0].get("target", None) is not None: ntokens = sum(len(s["target"]) for s in samples) target = fairseq_data_utils.collate_tokens( [s["target"] for s in samples], self.pad_index, self.eos_index, left_pad=False, move_eos_to_beginning=False, ) target = target.index_select(0, sort_order) target_lengths = torch.LongTensor( [s["target"].size(0) for s in samples] ).index_select(0, sort_order) prev_output_tokens = fairseq_data_utils.collate_tokens( [s["target"] for s in samples], self.pad_index, self.eos_index, left_pad=False, move_eos_to_beginning=self.move_eos_to_beginning, ) prev_output_tokens = prev_output_tokens.index_select(0, sort_order) else: ntokens = sum(len(s["source"]) for s in samples) batch = { "id": id, "ntokens": ntokens, "net_input": {"src_tokens": frames, "src_lengths": frames_lengths}, "target": target, "target_lengths": target_lengths, "nsentences": len(samples), } if prev_output_tokens is not None: batch["net_input"]["prev_output_tokens"] = prev_output_tokens return batch
KosmosX-API-main
kosmosX/fairseq/examples/speech_recognition/data/collaters.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch def calc_mean_invstddev(feature): if len(feature.size()) != 2: raise ValueError("We expect the input feature to be 2-D tensor") mean = feature.mean(0) var = feature.var(0) # avoid division by ~zero eps = 1e-8 if (var < eps).any(): return mean, 1.0 / (torch.sqrt(var) + eps) return mean, 1.0 / torch.sqrt(var) def apply_mv_norm(features): # If there is less than 2 spectrograms, the variance cannot be computed (is NaN) # and normalization is not possible, so return the item as it is if features.size(0) < 2: return features mean, invstddev = calc_mean_invstddev(features) res = (features - mean) * invstddev return res def lengths_to_encoder_padding_mask(lengths, batch_first=False): """ convert lengths (a 1-D Long/Int tensor) to 2-D binary tensor Args: lengths: a (B, )-shaped tensor Return: max_length: maximum length of B sequences encoder_padding_mask: a (max_length, B) binary mask, where [t, b] = 0 for t < lengths[b] and 1 otherwise TODO: kernelize this function if benchmarking shows this function is slow """ max_lengths = torch.max(lengths).item() bsz = lengths.size(0) encoder_padding_mask = torch.arange( max_lengths ).to( # a (T, ) tensor with [0, ..., T-1] lengths.device ).view( # move to the right device 1, max_lengths ).expand( # reshape to (1, T)-shaped tensor bsz, -1 ) >= lengths.view( # expand to (B, T)-shaped tensor bsz, 1 ).expand( -1, max_lengths ) if not batch_first: return encoder_padding_mask.t(), max_lengths else: return encoder_padding_mask, max_lengths def encoder_padding_mask_to_lengths( encoder_padding_mask, max_lengths, batch_size, device ): """ convert encoder_padding_mask (2-D binary tensor) to a 1-D tensor Conventionally, encoder output contains a encoder_padding_mask, which is a 2-D mask in a shape (T, B), whose (t, b) element indicate whether encoder_out[t, b] is a valid output (=0) or not (=1). Occasionally, we need to convert this mask tensor to a 1-D tensor in shape (B, ), where [b] denotes the valid length of b-th sequence Args: encoder_padding_mask: a (T, B)-shaped binary tensor or None; if None, indicating all are valid Return: seq_lengths: a (B,)-shaped tensor, where its (b, )-th element is the number of valid elements of b-th sequence max_lengths: maximum length of all sequence, if encoder_padding_mask is not None, max_lengths must equal to encoder_padding_mask.size(0) batch_size: batch size; if encoder_padding_mask is not None, max_lengths must equal to encoder_padding_mask.size(1) device: which device to put the result on """ if encoder_padding_mask is None: return torch.Tensor([max_lengths] * batch_size).to(torch.int32).to(device) assert encoder_padding_mask.size(0) == max_lengths, "max_lengths does not match" assert encoder_padding_mask.size(1) == batch_size, "batch_size does not match" return max_lengths - torch.sum(encoder_padding_mask, dim=0)
KosmosX-API-main
kosmosX/fairseq/examples/speech_recognition/data/data_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os import numpy as np from fairseq.data import FairseqDataset from . import data_utils from .collaters import Seq2SeqCollater class AsrDataset(FairseqDataset): """ A dataset representing speech and corresponding transcription. Args: aud_paths: (List[str]): A list of str with paths to audio files. aud_durations_ms (List[int]): A list of int containing the durations of audio files. tgt (List[torch.LongTensor]): A list of LongTensors containing the indices of target transcriptions. tgt_dict (~fairseq.data.Dictionary): target vocabulary. ids (List[str]): A list of utterance IDs. speakers (List[str]): A list of speakers corresponding to utterances. num_mel_bins (int): Number of triangular mel-frequency bins (default: 80) frame_length (float): Frame length in milliseconds (default: 25.0) frame_shift (float): Frame shift in milliseconds (default: 10.0) """ def __init__( self, aud_paths, aud_durations_ms, tgt, tgt_dict, ids, speakers, num_mel_bins=80, frame_length=25.0, frame_shift=10.0, ): assert frame_length > 0 assert frame_shift > 0 assert all(x > frame_length for x in aud_durations_ms) self.frame_sizes = [ int(1 + (d - frame_length) / frame_shift) for d in aud_durations_ms ] assert len(aud_paths) > 0 assert len(aud_paths) == len(aud_durations_ms) assert len(aud_paths) == len(tgt) assert len(aud_paths) == len(ids) assert len(aud_paths) == len(speakers) self.aud_paths = aud_paths self.tgt_dict = tgt_dict self.tgt = tgt self.ids = ids self.speakers = speakers self.num_mel_bins = num_mel_bins self.frame_length = frame_length self.frame_shift = frame_shift self.s2s_collater = Seq2SeqCollater( 0, 1, pad_index=self.tgt_dict.pad(), eos_index=self.tgt_dict.eos(), move_eos_to_beginning=True, ) def __getitem__(self, index): import torchaudio import torchaudio.compliance.kaldi as kaldi tgt_item = self.tgt[index] if self.tgt is not None else None path = self.aud_paths[index] if not os.path.exists(path): raise FileNotFoundError("Audio file not found: {}".format(path)) sound, sample_rate = torchaudio.load_wav(path) output = kaldi.fbank( sound, num_mel_bins=self.num_mel_bins, frame_length=self.frame_length, frame_shift=self.frame_shift, ) output_cmvn = data_utils.apply_mv_norm(output) return {"id": index, "data": [output_cmvn.detach(), tgt_item]} def __len__(self): return len(self.aud_paths) def collater(self, samples): """Merge a list of samples to form a mini-batch. Args: samples (List[int]): sample indices to collate Returns: dict: a mini-batch suitable for forwarding with a Model """ return self.s2s_collater.collate(samples) def num_tokens(self, index): return self.frame_sizes[index] def size(self, index): """Return an example's size as a float or tuple. This value is used when filtering a dataset with ``--max-positions``.""" return ( self.frame_sizes[index], len(self.tgt[index]) if self.tgt is not None else 0, ) def ordered_indices(self): """Return an ordered list of indices. Batches will be constructed based on this order.""" return np.arange(len(self))
KosmosX-API-main
kosmosX/fairseq/examples/speech_recognition/data/asr_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from __future__ import absolute_import, division, print_function, unicode_literals import logging import math import torch import torch.nn.functional as F from fairseq import utils from fairseq.criterions import FairseqCriterion, register_criterion @register_criterion("cross_entropy_acc") class CrossEntropyWithAccCriterion(FairseqCriterion): def __init__(self, task, sentence_avg): super().__init__(task) self.sentence_avg = sentence_avg def compute_loss(self, model, net_output, target, reduction, log_probs): # N, T -> N * T target = target.view(-1) lprobs = model.get_normalized_probs(net_output, log_probs=log_probs) if not hasattr(lprobs, "batch_first"): logging.warning( "ERROR: we need to know whether " "batch first for the net output; " "you need to set batch_first attribute for the return value of " "model.get_normalized_probs. Now, we assume this is true, but " "in the future, we will raise exception instead. " ) batch_first = getattr(lprobs, "batch_first", True) if not batch_first: lprobs = lprobs.transpose(0, 1) # N, T, D -> N * T, D lprobs = lprobs.view(-1, lprobs.size(-1)) loss = F.nll_loss( lprobs, target, ignore_index=self.padding_idx, reduction=reduction ) return lprobs, loss def get_logging_output(self, sample, target, lprobs, loss): target = target.view(-1) mask = target != self.padding_idx correct = torch.sum( lprobs.argmax(1).masked_select(mask) == target.masked_select(mask) ) total = torch.sum(mask) sample_size = ( sample["target"].size(0) if self.sentence_avg else sample["ntokens"] ) logging_output = { "loss": utils.item(loss.data), # * sample['ntokens'], "ntokens": sample["ntokens"], "nsentences": sample["target"].size(0), "sample_size": sample_size, "correct": utils.item(correct.data), "total": utils.item(total.data), "nframes": torch.sum(sample["net_input"]["src_lengths"]).item(), } return sample_size, logging_output def forward(self, model, sample, reduction="sum", log_probs=True): """Computes the cross entropy with accuracy metric for the given sample. This is similar to CrossEntropyCriterion in fairseq, but also computes accuracy metrics as part of logging Args: logprobs (Torch.tensor) of shape N, T, D i.e. batchsize, timesteps, dimensions targets (Torch.tensor) of shape N, T i.e batchsize, timesteps Returns: tuple: With three elements: 1) the loss 2) the sample size, which is used as the denominator for the gradient 3) logging outputs to display while training TODO: * Currently this Criterion will only work with LSTMEncoderModels or FairseqModels which have decoder, or Models which return TorchTensor as net_output. We need to make a change to support all FairseqEncoder models. """ net_output = model(**sample["net_input"]) target = model.get_targets(sample, net_output) lprobs, loss = self.compute_loss( model, net_output, target, reduction, log_probs ) sample_size, logging_output = self.get_logging_output( sample, target, lprobs, loss ) return loss, sample_size, logging_output @staticmethod def aggregate_logging_outputs(logging_outputs): """Aggregate logging outputs from data parallel training.""" correct_sum = sum(log.get("correct", 0) for log in logging_outputs) total_sum = sum(log.get("total", 0) for log in logging_outputs) loss_sum = sum(log.get("loss", 0) for log in logging_outputs) ntokens = sum(log.get("ntokens", 0) for log in logging_outputs) nsentences = sum(log.get("nsentences", 0) for log in logging_outputs) sample_size = sum(log.get("sample_size", 0) for log in logging_outputs) nframes = sum(log.get("nframes", 0) for log in logging_outputs) agg_output = { "loss": loss_sum / sample_size / math.log(2) if sample_size > 0 else 0.0, # if args.sentence_avg, then sample_size is nsentences, then loss # is per-sentence loss; else sample_size is ntokens, the loss # becomes per-output token loss "ntokens": ntokens, "nsentences": nsentences, "nframes": nframes, "sample_size": sample_size, "acc": correct_sum * 100.0 / total_sum if total_sum > 0 else 0.0, "correct": correct_sum, "total": total_sum, # total is the number of validate tokens } if sample_size != ntokens: agg_output["nll_loss"] = loss_sum / ntokens / math.log(2) # loss: per output token loss # nll_loss: per sentence loss return agg_output
KosmosX-API-main
kosmosX/fairseq/examples/speech_recognition/criterions/cross_entropy_acc.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from examples.speech_recognition.data.replabels import pack_replabels from fairseq import utils from fairseq.criterions import FairseqCriterion, register_criterion @register_criterion("asg_loss") class ASGCriterion(FairseqCriterion): @staticmethod def add_args(parser): group = parser.add_argument_group("ASG Loss") group.add_argument( "--asg-transitions-init", help="initial diagonal value of transition matrix", type=float, default=0.0, ) group.add_argument( "--max-replabel", help="maximum # of replabels", type=int, default=2 ) group.add_argument( "--linseg-updates", help="# of training updates to use LinSeg initialization", type=int, default=0, ) group.add_argument( "--hide-linseg-messages", help="hide messages about LinSeg initialization", action="store_true", ) def __init__( self, task, silence_token, asg_transitions_init, max_replabel, linseg_updates, hide_linseg_messages, ): from flashlight.lib.sequence.criterion import ASGLoss, CriterionScaleMode super().__init__(task) self.tgt_dict = task.target_dictionary self.eos = self.tgt_dict.eos() self.silence = ( self.tgt_dict.index(silence_token) if silence_token in self.tgt_dict else None ) self.max_replabel = max_replabel num_labels = len(self.tgt_dict) self.asg = ASGLoss(num_labels, scale_mode=CriterionScaleMode.TARGET_SZ_SQRT) self.asg.trans = torch.nn.Parameter( asg_transitions_init * torch.eye(num_labels), requires_grad=True ) self.linseg_progress = torch.nn.Parameter( torch.tensor([0], dtype=torch.int), requires_grad=False ) self.linseg_maximum = linseg_updates self.linseg_message_state = "none" if hide_linseg_messages else "start" @classmethod def build_criterion(cls, args, task): return cls( task, args.silence_token, args.asg_transitions_init, args.max_replabel, args.linseg_updates, args.hide_linseg_messages, ) def linseg_step(self): if not self.training: return False if self.linseg_progress.item() < self.linseg_maximum: if self.linseg_message_state == "start": print("| using LinSeg to initialize ASG") self.linseg_message_state = "finish" self.linseg_progress.add_(1) return True elif self.linseg_message_state == "finish": print("| finished LinSeg initialization") self.linseg_message_state = "none" return False def replace_eos_with_silence(self, tgt): if tgt[-1] != self.eos: return tgt elif self.silence is None or (len(tgt) > 1 and tgt[-2] == self.silence): return tgt[:-1] else: return tgt[:-1] + [self.silence] def forward(self, model, sample, reduce=True): """Compute the loss for the given sample. Returns a tuple with three elements: 1) the loss 2) the sample size, which is used as the denominator for the gradient 3) logging outputs to display while training """ net_output = model(**sample["net_input"]) emissions = net_output["encoder_out"].transpose(0, 1).contiguous() B = emissions.size(0) T = emissions.size(1) device = emissions.device target = torch.IntTensor(B, T) target_size = torch.IntTensor(B) using_linseg = self.linseg_step() for b in range(B): initial_target_size = sample["target_lengths"][b].item() if initial_target_size == 0: raise ValueError("target size cannot be zero") tgt = sample["target"][b, :initial_target_size].tolist() tgt = self.replace_eos_with_silence(tgt) tgt = pack_replabels(tgt, self.tgt_dict, self.max_replabel) tgt = tgt[:T] if using_linseg: tgt = [tgt[t * len(tgt) // T] for t in range(T)] target[b][: len(tgt)] = torch.IntTensor(tgt) target_size[b] = len(tgt) loss = self.asg.forward(emissions, target.to(device), target_size.to(device)) if reduce: loss = torch.sum(loss) sample_size = ( sample["target"].size(0) if self.args.sentence_avg else sample["ntokens"] ) logging_output = { "loss": utils.item(loss.data) if reduce else loss.data, "ntokens": sample["ntokens"], "nsentences": sample["target"].size(0), "sample_size": sample_size, } return loss, sample_size, logging_output @staticmethod def aggregate_logging_outputs(logging_outputs): """Aggregate logging outputs from data parallel training.""" loss_sum = sum(log.get("loss", 0) for log in logging_outputs) ntokens = sum(log.get("ntokens", 0) for log in logging_outputs) nsentences = sum(log.get("nsentences", 0) for log in logging_outputs) sample_size = sum(log.get("sample_size", 0) for log in logging_outputs) agg_output = { "loss": loss_sum / nsentences, "ntokens": ntokens, "nsentences": nsentences, "sample_size": sample_size, } return agg_output
KosmosX-API-main
kosmosX/fairseq/examples/speech_recognition/criterions/ASG_loss.py
import importlib import os # ASG loss requires flashlight bindings files_to_skip = set() try: import flashlight.lib.sequence.criterion except ImportError: files_to_skip.add("ASG_loss.py") for file in sorted(os.listdir(os.path.dirname(__file__))): if file.endswith(".py") and not file.startswith("_") and file not in files_to_skip: criterion_name = file[: file.find(".py")] importlib.import_module( "examples.speech_recognition.criterions." + criterion_name )
KosmosX-API-main
kosmosX/fairseq/examples/speech_recognition/criterions/__init__.py
#!/usr/bin/env python3 # # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import sys from sacremoses.normalize import MosesPunctNormalizer def main(args): normalizer = MosesPunctNormalizer(lang=args.lang, penn=args.penn) for line in sys.stdin: print(normalizer.normalize(line.rstrip()), flush=True) if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument("--lang", "-l", default="en") parser.add_argument("--penn", "-p", action="store_true") args = parser.parse_args() main(args)
KosmosX-API-main
kosmosX/fairseq/examples/constrained_decoding/normalize.py
#!/usr/bin/env python3 # # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import sys import sacremoses def main(args): """Tokenizes, preserving tabs""" mt = sacremoses.MosesTokenizer(lang=args.lang) def tok(s): return mt.tokenize(s, return_str=True) for line in sys.stdin: parts = list(map(tok, line.split("\t"))) print(*parts, sep="\t", flush=True) if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument("--lang", "-l", default="en") parser.add_argument("--penn", "-p", action="store_true") parser.add_argument("--fields", "-f", help="fields to tokenize") args = parser.parse_args() main(args)
KosmosX-API-main
kosmosX/fairseq/examples/constrained_decoding/tok.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from dataclasses import dataclass from typing import Dict, List, Optional import torch from fairseq.dataclass import FairseqDataclass from fairseq.models import ( FairseqIncrementalDecoder, FairseqLanguageModel, register_model, ) from .adaptive_span_model import TransformerSeq as AdaptiveSpanTransformerModel logger = logging.getLogger(__name__) @dataclass class AdaptiveSpanSmallConfig(FairseqDataclass): # defaults come from https://github.com/facebookresearch/adaptive-span/blob/master/experiments/enwik8_small.sh vocab_size: int = 50 d_model: int = 256 n_head: int = 4 d_inner: int = 1024 n_layer: int = 8 attn_span: int = 1024 dropout: float = 0.0 emb_dropout: float = 0.0 adapt_span_ramp: int = 32 adapt_span_init: float = 0.0 aux_loss_scaler: float = 0.000002 adapt_span_layer: bool = False @register_model("adaptive_span", dataclass=AdaptiveSpanSmallConfig) class AdaptiveSpanTransformer(FairseqLanguageModel): @classmethod def build_model(cls, cfg: AdaptiveSpanSmallConfig, task): return cls(AdaptiveSpanDecoder(cfg, task)) def get_aux_loss(self): return self.decoder.get_aux_loss() def get_current_max_span(self): return self.decoder.get_current_max_span() def get_current_avg_span(self): return self.decoder.get_current_avg_span() class AdaptiveSpanDecoder(FairseqIncrementalDecoder): def __init__(self, cfg, task): super().__init__(task.target_dictionary) self.config = cfg config = AdaptiveSpanSmallConfig( vocab_size=len(task.target_dictionary), d_model=cfg.d_model, n_head=cfg.n_head, d_inner=cfg.d_inner, n_layer=cfg.n_layer, attn_span=cfg.attn_span, dropout=cfg.dropout, emb_dropout=cfg.emb_dropout, adapt_span_ramp=cfg.adapt_span_ramp, adapt_span_init=cfg.adapt_span_init, aux_loss_scaler=cfg.aux_loss_scaler, adapt_span_layer=cfg.adapt_span_layer, ) logger.info(config) self.model = AdaptiveSpanTransformerModel(**config.__dict__) self._mems = None def forward( self, src_tokens, incremental_state: Optional[Dict[str, List[torch.Tensor]]] = None, encoder_out=None, ): bsz = src_tokens.size(0) if incremental_state is not None: # used during inference mems = self.get_incremental_state("mems") src_tokens = src_tokens[:, -1:] # only keep the most recent token else: mems = self._mems if mems is None: # first time init mems = self.init_hid_cache(bsz) output = self.model(x=src_tokens, h_cache=mems,) if incremental_state is not None: self.set_incremental_state(incremental_state, "mems", output[1]) else: self._mems = output[1] return (output[0],) def max_positions(self): return self.config.attn_span def init_hid_cache(self, batch_sz): hid = [] for layer in self.model.layers: param = next(self.model.parameters()) h = torch.zeros( batch_sz, layer.get_cache_size(), self.config.d_model, dtype=param.dtype, device=param.device, ) hid.append(h) return hid def get_aux_loss(self): return self.model.get_aux_loss() def get_current_max_span(self): return self.model.get_current_max_span() def get_current_avg_span(self): return self.model.get_current_avg_span() def reorder_incremental_state( self, incremental_state: Dict[str, Dict[str, Optional[torch.Tensor]]], new_order: torch.Tensor, ): """Reorder incremental state. This will be called when the order of the input has changed from the previous time step. A typical use case is beam search, where the input order changes between time steps based on the selection of beams. """ raise NotImplementedError("This is required for generation/beam search") # mems = self.get_incremental_state(incremental_state, "mems") # if mems is not None: # new_mems = [mems_i.index_select(1, new_order) for mems_i in mems] # self.set_incremental_state(incremental_state, "mems", new_mems)
KosmosX-API-main
kosmosX/fairseq/examples/adaptive_span/adaptive_span_model_wrapper.py
# Copyright (c) Facebook, Inc. and its affiliates. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. import math import torch import torch.nn as nn import torch.nn.functional as F from fairseq.modules.layer_norm import LayerNorm from .adaptive_span_attention import AdaptiveSpan # Size notations: # B = batch_size, H = d_model, M = block_size, L = attn_span def _skew(X, pad_value): """shift every row 1 step to right""" # X = B x M x L B, M, L = X.size() X = F.pad(X, (0, M + 1), value=pad_value) # B x M x (L+M+1) X = X.view(B, -1) # B x ML+MM+M X = X[:, :-M] # B x ML+MM X = X.view(B, M, M + L) # B x M x L+M return X def _unskew(X): """reverse _skew operation""" # X = B x M x L+M B, M, L = X.size() L -= M X = X.view(B, -1) # B x ML+MM X = F.pad(X, (0, M)) # B x ML+MM+M X = X.view(B, M, M + L + 1) # B x M x L+M+1 X = X[:, :, :L] # B x M x L return X class SeqAttention(nn.Module): """Sequential self-attention layer. Each token will attend to its previous fixed number of steps. Note that attention doesn't include the current step itself. """ def __init__(self, d_model, n_head, attn_span, dropout, adapt_span_layer, **kargs): nn.Module.__init__(self) self.dropout = nn.Dropout(dropout) self.d_model = d_model # size of a single head self.attn_span = attn_span self.adaptive_span = AdaptiveSpan( attn_span=attn_span, n_head=n_head, adapt_span_layer=adapt_span_layer, **kargs ) def forward(self, query, key, value, key_pe): # query size = B x M x H # key, value sizes = B x (M+L) x H key, value, key_pe = self.adaptive_span.trim_memory(query, key, value, key_pe) # compute attention from context # B x M (dest) x (M+L) (src) attn_cont = torch.matmul(query, key.transpose(-1, -2)) attn_cont = _unskew(attn_cont) # B x M x L # compute the effect of position embedding attn_pos = torch.matmul(query, key_pe) # B x M x L_pos attn = attn_cont + attn_pos attn = attn / math.sqrt(self.d_model) # B x M X L_pos attn = F.softmax(attn.float(), dim=-1).type_as(attn) # trim attention lengths according to the learned span attn = self.adaptive_span(attn) attn = self.dropout(attn) # B x M X L_pos attn_cont = _skew(attn, 0) # B x M X (L+M) out = torch.matmul(attn_cont, value) # B x M x H return out def get_cache_size(self): return self.adaptive_span.get_cache_size() class MultiHeadSeqAttention(nn.Module): def __init__(self, d_model, n_head, **kargs): nn.Module.__init__(self) assert d_model % n_head == 0 self.n_head = n_head self.head_dim = d_model // n_head self.attn = SeqAttention(d_model=self.head_dim, n_head=n_head, **kargs) self.proj_query = nn.Linear(d_model, d_model, bias=False) nn.init.xavier_normal_(self.proj_query.weight) self.proj_out = nn.Linear(d_model, d_model, bias=False) nn.init.xavier_normal_(self.proj_out.weight) self.proj_val = nn.Linear(d_model, d_model, bias=False) nn.init.xavier_normal_(self.proj_val.weight) self.proj_key = nn.Linear(d_model, d_model, bias=False) nn.init.xavier_normal_(self.proj_key.weight) def head_reshape(self, x): K = self.n_head D = self.head_dim x = x.view(x.size()[:-1] + (K, D)) # B x (M+L) x K x D x = x.transpose(1, 2).contiguous() # B x K x (M+L) x D x = x.view(-1, x.size(-2), x.size(-1)) # B_K x (M+L) x D return x def forward(self, query, key, value, key_pe): B = query.size(0) K = self.n_head D = self.head_dim M = query.size(1) query = self.proj_query(query) query = self.head_reshape(query) value = self.proj_val(value) value = self.head_reshape(value) key = self.proj_key(key) key = self.head_reshape(key) out = self.attn(query, key, value, key_pe) # B_K x M x D out = out.view(B, K, M, D) # B x K x M x D out = out.transpose(1, 2).contiguous() # B x M x K x D out = out.view(B, M, -1) # B x M x K_D out = self.proj_out(out) return out class FeedForwardLayer(nn.Module): def __init__(self, d_model, d_inner, dropout, **kargs): nn.Module.__init__(self) self.fc1 = nn.Linear(d_model, d_inner) self.fc2 = nn.Linear(d_inner, d_model) nn.init.xavier_uniform_(self.fc1.weight) nn.init.xavier_uniform_(self.fc2.weight) self.dropout = nn.Dropout(dropout) def forward(self, h): h1 = F.relu(self.fc1(h)) h1 = self.dropout(h1) h2 = self.fc2(h1) return h2 class TransformerSeqLayer(nn.Module): def __init__(self, d_model, **kargs): nn.Module.__init__(self) self.attn = MultiHeadSeqAttention(d_model=d_model, **kargs) self.norm1 = LayerNorm(d_model) self.ff = FeedForwardLayer(d_model=d_model, **kargs) self.norm2 = LayerNorm(d_model) def forward(self, h, h_cache, key_pe): # h = B x M x H # h_cache = B x L x H h_all = torch.cat([h_cache, h], dim=1) # B x (M+L) x H attn_out = self.attn(h, h_all, h_all, key_pe) h = self.norm1(h + attn_out) # B x M x H if self.ff is not None: ff_out = self.ff(h) out = self.norm2(h + ff_out) # B x M x H else: out = h return out def get_cache_size(self): return self.attn.attn.get_cache_size() class TransformerSeq(nn.Module): def __init__( self, vocab_size, d_model, n_head, n_layer, attn_span, emb_dropout, aux_loss_scaler, adapt_span_layer, **kargs ): nn.Module.__init__(self) # token embeddings self.in_emb = nn.Embedding(vocab_size, d_model) nn.init.normal_(self.in_emb.weight, mean=0, std=d_model ** -0.5) self.out_emb = nn.Linear(d_model, vocab_size) self.aux_loss_scaler = aux_loss_scaler if emb_dropout > 0: self.emb_dropout = nn.Dropout(emb_dropout) else: self.emb_dropout = None # position embeddings self.key_pe = nn.Parameter(torch.randn(1, d_model // n_head, attn_span)) self.layers = nn.ModuleList() self.layers.extend( TransformerSeqLayer( d_model=d_model, n_head=n_head, attn_span=attn_span, adapt_span_layer=adapt_span_layer, **kargs ) for _ in range(n_layer) ) def forward(self, x, h_cache, target=None): # x size = B x M block_size = x.size(1) h = self.in_emb(x) # B x M x H if self.emb_dropout is not None: h = self.emb_dropout(h) h_cache_next = [] for l, layer in enumerate(self.layers): cache_size = layer.attn.attn.get_cache_size() if cache_size > block_size: h_cache_next_l = torch.cat( [h_cache[l][:, -cache_size + block_size :, :], h], dim=1 ).detach() else: h_cache_next_l = h[:, -cache_size:, :].detach() h_cache_next.append(h_cache_next_l) h = layer(h, h_cache[l], self.key_pe) # B x M x H if self.emb_dropout is not None: h = self.emb_dropout(h) out = F.log_softmax(self.out_emb(h).float(), dim=-1).type_as(h) dummy_loss = None return out, h_cache_next, dummy_loss def get_aux_loss(self): loss = 0.0 for layer in self.layers: loss += layer.attn.attn.adaptive_span.get_loss() return self.aux_loss_scaler * loss def get_current_max_span(self): max_span = 0.0 for layer in self.layers: max_span = max( max_span, layer.attn.attn.adaptive_span.get_current_max_span() ) return max_span def get_current_avg_span(self): avg_span = 0.0 for layer in self.layers: avg_span += layer.attn.attn.adaptive_span.get_current_avg_span() return avg_span / len(self.layers)
KosmosX-API-main
kosmosX/fairseq/examples/adaptive_span/adaptive_span_model.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import importlib import os # automatically import any Python files in the current directory cur_dir = os.path.dirname(__file__) for file in os.listdir(cur_dir): path = os.path.join(cur_dir, file) if ( not file.startswith("_") and not file.startswith(".") and (file.endswith(".py") or os.path.isdir(path)) ): mod_name = file[: file.find(".py")] if file.endswith(".py") else file module = importlib.import_module(__name__ + "." + mod_name)
KosmosX-API-main
kosmosX/fairseq/examples/adaptive_span/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math from dataclasses import dataclass from fairseq import metrics, utils from fairseq.criterions import register_criterion from fairseq.criterions.cross_entropy import CrossEntropyCriterion from fairseq.dataclass import FairseqDataclass from omegaconf import II @dataclass class AdaptiveSpanCriterionConfig(FairseqDataclass): sentence_avg: bool = II("optimization.sentence_avg") @register_criterion("adaptive_span_loss", dataclass=AdaptiveSpanCriterionConfig) class AdaptiveSpanCriterion(CrossEntropyCriterion): def __init__(self, task, sentence_avg): super().__init__(task, sentence_avg) def forward(self, model, sample, reduce=True): """Compute the loss for the given sample. Returns a tuple with three elements: 1) the loss here is summed, different from the adaptive span code 2) the sample size, which is used as the denominator for the gradient 3) logging outputs to display while training """ net_output = model(**sample["net_input"]) loss, aux_loss, avg_span, max_span = self.compute_loss( model, net_output, sample, reduce=reduce ) sample_size = ( sample["target"].size(0) if self.sentence_avg else sample["ntokens"] ) loss /= sample_size total_loss = loss + aux_loss sample_size = 1 logging_output = { "loss": loss.data, "ntokens": sample["ntokens"], "nsentences": sample["target"].size(0), "sample_size": sample_size, "total_loss": total_loss.data, "avg_span": avg_span * sample_size, "max_span": max_span * sample_size, } return total_loss, sample_size, logging_output def compute_loss(self, model, net_output, sample, reduce=True): loss, _ = super().compute_loss(model, net_output, sample, reduce) aux_loss = model.get_aux_loss() avg_span = model.get_current_avg_span() max_span = model.get_current_max_span() return loss, aux_loss, avg_span, max_span @staticmethod def reduce_metrics(logging_outputs) -> None: """Aggregate logging outputs from data parallel training.""" loss_sum = sum(log.get("loss", 0) for log in logging_outputs) ntokens = sum(log.get("ntokens", 0) for log in logging_outputs) sample_size = sum(log.get("sample_size", 0) for log in logging_outputs) total_loss_sum = sum(log.get("total_loss", 0) for log in logging_outputs) avg_span_sum = sum(log.get("avg_span", 0) for log in logging_outputs) max_span_sum = sum(log.get("max_span", 0) for log in logging_outputs) # we divide by log(2) to convert the loss from base e to base 2 metrics.log_scalar( "loss", loss_sum / sample_size / math.log(2), sample_size, round=3 ) metrics.log_scalar("avg_span", avg_span_sum / sample_size, sample_size, round=3) metrics.log_scalar("max_span", max_span_sum / sample_size, sample_size, round=3) # total loss contains the L1 norm on adaptive-span metrics.log_scalar( "total_loss", total_loss_sum / sample_size / math.log(2), sample_size, round=3, ) if sample_size != ntokens: metrics.log_scalar( "nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3 ) metrics.log_derived( "ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg) ) else: metrics.log_derived( "ppl", lambda meters: utils.get_perplexity(meters["loss"].avg) ) @staticmethod def logging_outputs_can_be_summed() -> bool: """ Whether the logging outputs returned by `forward` can be summed across workers prior to calling `reduce_metrics`. Setting this to True will improves distributed training speed. """ return True
KosmosX-API-main
kosmosX/fairseq/examples/adaptive_span/adaptive_span_loss.py
../truncated_bptt/truncated_bptt_lm_task.py
KosmosX-API-main
kosmosX/fairseq/examples/adaptive_span/truncated_bptt_lm_task.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch import torch.nn as nn import torch.nn.functional as F class AdaptiveMask(nn.Module): """Soft masking function for adaptive size. It masks out the last K values of an input. The masking value goes from 1 to 0 gradually, so K can be learned with back-propagation. Args: max_size: maximum size (i.e. input dimension) ramp_size: size of the ramp going from 0 to 1 init_val: initial size proportion not to be masked out shape: learn multiple sizes independent of each other """ def __init__(self, max_size, ramp_size, init_val=0, shape=(1,)): nn.Module.__init__(self) self._max_size = max_size self._ramp_size = ramp_size self.current_val = nn.Parameter(torch.zeros(*shape) + init_val) mask_template = torch.linspace(1 - max_size, 0, steps=max_size) self.register_buffer("mask_template", mask_template) def forward(self, x): mask = self.mask_template.float() + self.current_val.float() * self._max_size mask = mask / self._ramp_size + 1 mask = mask.clamp(0, 1) if x.size(-1) < self._max_size: # the input could have been trimmed beforehand to save computation mask = mask.narrow(-1, self._max_size - x.size(-1), x.size(-1)) x = (x * mask).type_as(x) return x def get_current_max_size(self, include_ramp=True): current_size = math.ceil(self.current_val.max().item() * self._max_size) if include_ramp: current_size += self._ramp_size current_size = max(0, min(self._max_size, current_size)) return current_size def get_current_avg_size(self, include_ramp=True): current_size = math.ceil( self.current_val.float().mean().item() * self._max_size ) if include_ramp: current_size += self._ramp_size current_size = max(0, min(self._max_size, current_size)) return current_size def clamp_param(self): """this need to be called after each update""" self.current_val.data.clamp_(0, 1) class AdaptiveSpan(nn.Module): """Adaptive attention span for Transformerself. This module learns an attention span length from data for each self-attention head. Args: attn_span: maximum attention span adapt_span_loss: loss coefficient for the span length adapt_span_ramp: length of the masking ramp adapt_span_init: initial size ratio adapt_span_cache: adapt cache size to reduce memory usage """ def __init__( self, attn_span, adapt_span_ramp, adapt_span_init, n_head, adapt_span_layer, **kargs ): nn.Module.__init__(self) self._max_span = attn_span self._n_head = n_head self._adapt_span_layer = adapt_span_layer if self._adapt_span_layer: self._mask = AdaptiveMask( max_size=self._max_span, ramp_size=adapt_span_ramp, init_val=adapt_span_init, ) else: self._mask = AdaptiveMask( max_size=self._max_span, ramp_size=adapt_span_ramp, init_val=adapt_span_init, shape=(n_head, 1, 1), ) def forward(self, attn, normalize=True): """mask attention with the right span""" # batch and head dimensions are merged together, so separate them first self.clamp_param() if self._adapt_span_layer: attn = self._mask(attn) else: B = attn.size(0) # batch size M = attn.size(1) # block size attn = attn.reshape(B // self._n_head, self._n_head, M, -1) attn = self._mask(attn) attn = attn.view(B, M, -1) return attn def get_trim_len(self): """how much of memory can be trimmed to reduce computation""" L = self._max_span trim_len = min(L - 1, L - self._mask.get_current_max_size()) # too fine granularity might be bad for the memory management trim_len = math.floor(trim_len / 64) * 64 return trim_len def trim_memory(self, query, key, value, key_pe): """trim out unnecessary memory beforehand to reduce computation""" trim_len = self.get_trim_len() cache_size = key.size(1) - query.size(1) trim_len_cache = trim_len - (self._max_span - cache_size) if trim_len_cache > 0: key = key[:, trim_len_cache:, :] value = value[:, trim_len_cache:, :] elif trim_len_cache < 0: # cache is too short! this happens when validation resumes # after a lot of updates. key = F.pad(key, [0, 0, -trim_len_cache, 0]) value = F.pad(value, [0, 0, -trim_len_cache, 0]) if trim_len > 0: if key_pe is not None: key_pe = key_pe[:, :, trim_len:] return key, value, key_pe def get_cache_size(self): """determine how long the cache should be""" trim_len = self.get_trim_len() # give a buffer of 64 steps since a span might increase # in future updates return min(self._max_span, self._max_span - trim_len + 64) def get_loss(self): """a loss term for regularizing the span length""" return self._max_span * self._mask.current_val.float().mean() def get_current_max_span(self): return self._mask.get_current_max_size() def get_current_avg_span(self): return self._mask.get_current_avg_size() def clamp_param(self): self._mask.clamp_param()
KosmosX-API-main
kosmosX/fairseq/examples/adaptive_span/adaptive_span_attention.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from torch.optim import Adagrad from fairseq.optim import LegacyFairseqOptimizer, register_optimizer @register_optimizer("adagrad_with_grad_clip") class FairseqAdagradWithGradClip(LegacyFairseqOptimizer): def __init__(self, args, params): super().__init__(args) self._optimizer = AdagradWithGradClip(params, **self.optimizer_config) @staticmethod def add_args(parser): """Add optimizer-specific arguments to the parser.""" # fmt: off parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay') parser.add_argument('--adagrad-clip', default=0.0, type=float, metavar='D', help='internal grad clip') # fmt: on @property def optimizer_config(self): """ Return a kwarg dictionary that will be used to override optimizer args stored in checkpoints. This allows us to load a checkpoint and resume training using a different set of optimizer args, e.g., with a different learning rate. """ return { "lr": self.args.lr[0], "weight_decay": self.args.weight_decay, "grad_clip": self.args.adagrad_clip, } @property def supports_flat_params(self): return False def _clip_grad(clr, grad, group_grad_clip): if group_grad_clip > 0: norm = grad.norm(2).item() if norm > group_grad_clip: clr *= group_grad_clip / (norm + 1e-10) return clr class AdagradWithGradClip(Adagrad): """Adagrad algorithm with custom gradient clipping""" def __init__( self, params, lr=1e-2, lr_decay=0, weight_decay=0, initial_accumulator_value=0, grad_clip=0, ): Adagrad.__init__( self, params, lr=lr, lr_decay=lr_decay, weight_decay=weight_decay, initial_accumulator_value=initial_accumulator_value, ) self.defaults["grad_clip"] = grad_clip self.param_groups[0].setdefault("grad_clip", grad_clip) def step(self, closure=None): loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group["params"]: if p.grad is None: continue grad = p.grad.data state = self.state[p] state["step"] += 1 if group["weight_decay"] != 0: if p.grad.data.is_sparse: raise RuntimeError( "weight_decay option is " "not compatible with sparse " "gradients" ) grad = grad.add(group["weight_decay"], p.data) clr = group["lr"] / (1 + (state["step"] - 1) * group["lr_decay"]) # clip clr = _clip_grad(clr=clr, grad=grad, group_grad_clip=group["grad_clip"]) if grad.is_sparse: # the update is non-linear so indices must be unique grad = grad.coalesce() grad_indices = grad._indices() grad_values = grad._values() size = grad.size() def make_sparse(values): constructor = grad.new if grad_indices.dim() == 0 or values.dim() == 0: return constructor().resize_as_(grad) return constructor(grad_indices, values, size) state["sum"].add_(make_sparse(grad_values.pow(2))) std = state["sum"]._sparse_mask(grad) std_values = std._values().sqrt_().add_(1e-10) p.data.add_(-clr, make_sparse(grad_values / std_values)) else: state["sum"].addcmul_(1, grad, grad) std = state["sum"].sqrt().add_(1e-10) p.data.addcdiv_(-clr, grad, std) return loss
KosmosX-API-main
kosmosX/fairseq/examples/adaptive_span/adagrad_with_grad_clip.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from fairseq.models.bart import BARTModel import argparse XSUM_KWARGS = dict(beam=6, lenpen=1.0, max_len_b=60, min_len=10, no_repeat_ngram_size=3) CNN_KWARGS = dict(beam=4, lenpen=2.0, max_len_b=140, min_len=55, no_repeat_ngram_size=3) @torch.no_grad() def generate(bart, infile, outfile="bart_hypo.txt", bsz=32, n_obs=None, **eval_kwargs): count = 1 # if n_obs is not None: bsz = min(bsz, n_obs) with open(infile) as source, open(outfile, "w") as fout: sline = source.readline().strip() slines = [sline] for sline in source: if n_obs is not None and count > n_obs: break if count % bsz == 0: hypotheses_batch = bart.sample(slines, **eval_kwargs) for hypothesis in hypotheses_batch: fout.write(hypothesis + "\n") fout.flush() slines = [] slines.append(sline.strip()) count += 1 if slines != []: hypotheses_batch = bart.sample(slines, **eval_kwargs) for hypothesis in hypotheses_batch: fout.write(hypothesis + "\n") fout.flush() def main(): """ Usage:: python examples/bart/summarize.py \ --model-dir $HOME/bart.large.cnn \ --model-file model.pt \ --src $HOME/data-bin/cnn_dm/test.source """ parser = argparse.ArgumentParser() parser.add_argument( "--model-dir", required=True, type=str, default="bart.large.cnn/", help="path containing model file and src_dict.txt", ) parser.add_argument( "--model-file", default="checkpoint_best.pt", help="where in model_dir are weights saved", ) parser.add_argument( "--src", default="test.source", help="text to summarize", type=str ) parser.add_argument( "--out", default="test.hypo", help="where to save summaries", type=str ) parser.add_argument("--bsz", default=32, help="where to save summaries", type=int) parser.add_argument( "--n", default=None, help="how many examples to summarize", type=int ) parser.add_argument( "--xsum-kwargs", action="store_true", default=False, help="if true use XSUM_KWARGS else CNN_KWARGS", ) args = parser.parse_args() eval_kwargs = XSUM_KWARGS if args.xsum_kwargs else CNN_KWARGS if args.model_dir == "pytorch/fairseq": bart = torch.hub.load("pytorch/fairseq", args.model_file) else: bart = BARTModel.from_pretrained( args.model_dir, checkpoint_file=args.model_file, data_name_or_path=args.model_dir, ) bart = bart.eval() if torch.cuda.is_available(): bart = bart.cuda().half() generate( bart, args.src, bsz=args.bsz, n_obs=args.n, outfile=args.out, **eval_kwargs ) if __name__ == "__main__": main()
KosmosX-API-main
kosmosX/fairseq/examples/bart/summarize.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os import glob import argparse from utils.dedup import deup import sys WORKDIR_ROOT = os.environ.get('WORKDIR_ROOT', None) if WORKDIR_ROOT is None or not WORKDIR_ROOT.strip(): print('please specify your working directory root in OS environment variable WORKDIR_ROOT. Exitting..."') sys.exit(-1) def main(): parser = argparse.ArgumentParser() parser.add_argument("--from-folder", type=str, required=True, help="the data folder to be dedup") parser.add_argument("--to-folder", type=str, required=True, help="the data folder to save deduped data") parser.add_argument('--directions', type=str, default=None, required=False) args = parser.parse_args() if args.directions is None: raw_files = glob.glob(f'{args.from_folder}/train*') directions = [os.path.split(file_path)[-1].split('.')[1] for file_path in raw_files] else: directions = args.directions.split(',') directions = sorted(set(directions)) for direction in directions: src, tgt = direction.split('-') src_file = f'{args.from_folder}/train.{src}-{tgt}.{src}' tgt_file = f'{args.from_folder}/train.{src}-{tgt}.{tgt}' src_file_out = f'{args.to_folder}/train.{src}-{tgt}.{src}' tgt_file_out = f'{args.to_folder}/train.{src}-{tgt}.{tgt}' assert src_file != src_file_out assert tgt_file != tgt_file_out print(f'deduping {src_file}, {tgt_file}') deup(src_file, tgt_file, src_file_out, tgt_file_out) if __name__ == "__main__": main()
KosmosX-API-main
kosmosX/fairseq/examples/multilingual/data_scripts/dedup_all.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os import argparse import sys WORKDIR_ROOT = os.environ.get('WORKDIR_ROOT', None) if WORKDIR_ROOT is None or not WORKDIR_ROOT.strip(): print('please specify your working directory root in OS environment variable WORKDIR_ROOT. Exitting..."') sys.exit(-1) def load_langs(path): with open(path) as fr: langs = [l.strip() for l in fr] return langs def load_sentences(raw_data, split, direction): src, tgt = direction.split('-') src_path = f"{raw_data}/{split}.{direction}.{src}" tgt_path = f"{raw_data}/{split}.{direction}.{tgt}" if os.path.exists(src_path) and os.path.exists(tgt_path): return [(src, open(src_path).read().splitlines()), (tgt, open(tgt_path).read().splitlines())] else: return [] def swap_direction(d): src, tgt = d.split('-') return f'{tgt}-{src}' def get_all_test_data(raw_data, directions, split='test'): test_data = [ x for dd in directions for d in [dd, swap_direction(dd)] for x in load_sentences(raw_data, split, d) ] # all_test_data = {s for _, d in test_data for s in d} all_test_data = {} for lang, d in test_data: for s in d: s = s.strip() lgs = all_test_data.get(s, set()) lgs.add(lang) all_test_data[s] = lgs return all_test_data, test_data def check_train_sentences(src_path, tgt_path, direction, all_test_data, mess_up_train={}): # src, tgt = direction.split('-') print(f'check training data for {direction} in {src_path} and {tgt_path}') size = 0 overlapped_size_counted_dup = 0 if not os.path.exists(tgt_path) or not os.path.exists(src_path): return mess_up_train, size, overlapped_size_counted_dup with open(src_path) as f, open(tgt_path) as g: for src_line, tgt_line in zip(f, g): s = src_line.strip() t = tgt_line.strip() size += 1 if s in all_test_data: langs = mess_up_train.get(s, set()) langs.add(direction) mess_up_train[s] = langs overlapped_size_counted_dup += 1 if t in all_test_data: langs = mess_up_train.get(t, set()) langs.add(direction) mess_up_train[t] = langs overlapped_size_counted_dup += 1 print(f'{direction}: size={size}, overlapped={overlapped_size_counted_dup}') return mess_up_train, size, overlapped_size_counted_dup def check_train_all(raw_data, directions, all_test_data): mess_up_train = {} data_sizes = {} # raw_data = '~chau/data-bin/MineBART/multilingual_mined_100M/en_XX/et_EE-en_XX/all.{en_XX, et_EE}' print(f'checking training data againsts # {len(all_test_data)} sentences') print('example test data: ', [s for i, s in enumerate(all_test_data.keys()) if i < 10]) for direction in directions: src, tgt = direction.split('-') path = f'{raw_data}/en_XX/{direction}/all' src_path = f'{path}.{src}' tgt_path = f'{path}.{tgt}' print(f'checking {src_path} {tgt_path}') _, size, overlapped_size_counted_dup = check_train_sentences(src_path, tgt_path, direction, all_test_data, mess_up_train) data_sizes[direction] = (size, overlapped_size_counted_dup) return mess_up_train, data_sizes def main(): parser = argparse.ArgumentParser() parser.add_argument("--folder", type=str, required=True, help="the data folder ") parser.add_argument("--test-data", type=str, required=True, help="the test data folder ") parser.add_argument('--directions', type=str, default=None, required=False) args = parser.parse_args() directions = args.directions.split(',') directions = sorted(set(directions)) # print(f'checking where {args.split} split data are in training') # print(f'direction\tcommon_count\tsrc common\ttgt common\tfrom_size\tto_size') raw_data = args.folder all_test_data, test_data = get_all_test_data(args.test_data, directions, split='test') mess_up_train, data_sizes = check_train_all(raw_data, directions, all_test_data) print(data_sizes) if __name__ == "__main__": main()
KosmosX-API-main
kosmosX/fairseq/examples/multilingual/data_scripts/check_valid_test_overlaps.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os, sys import subprocess import re from subprocess import check_output WORKDIR_ROOT = os.environ.get('WORKDIR_ROOT', None) if WORKDIR_ROOT is None or not WORKDIR_ROOT.strip(): print('please specify your working directory root in OS environment variable WORKDIR_ROOT. Exitting..."') sys.exit(-1) BLEU_REGEX = re.compile("^BLEU\\S* = (\\S+) ") def run_eval_bleu(cmd): output = check_output(cmd, shell=True, stderr=subprocess.STDOUT).decode("utf-8").strip() print(output) bleu = -1.0 for line in output.strip().split('\n'): m = BLEU_REGEX.search(line) if m is not None: bleu = m.groups()[0] bleu = float(bleu) break return bleu def check_data_test_bleu(raw_folder, data_lang_pairs): not_matchings = [] for sacrebleu_set, src_tgts in data_lang_pairs: for src_tgt in src_tgts: print(f'checking test bleus for: {src_tgt} at {sacrebleu_set}') src, tgt = src_tgt.split('-') ssrc, stgt = src[:2], tgt[:2] if os.path.exists(f'{raw_folder}/test.{tgt}-{src}.{src}'): # reversed direction may have different test set test_src = f'{raw_folder}/test.{tgt}-{src}.{src}' else: test_src = f'{raw_folder}/test.{src}-{tgt}.{src}' cmd1 = f'cat {test_src} | sacrebleu -t "{sacrebleu_set}" -l {stgt}-{ssrc}; [ $? -eq 0 ] || echo ""' test_tgt = f'{raw_folder}/test.{src}-{tgt}.{tgt}' cmd2 = f'cat {test_tgt} | sacrebleu -t "{sacrebleu_set}" -l {ssrc}-{stgt}; [ $? -eq 0 ] || echo ""' bleu1 = run_eval_bleu(cmd1) if bleu1 != 100.0: not_matchings.append(f'{sacrebleu_set}:{src_tgt} source side not matching: {test_src}') bleu2 = run_eval_bleu(cmd2) if bleu2 != 100.0: not_matchings.append(f'{sacrebleu_set}:{src_tgt} target side not matching: {test_tgt}') return not_matchings if __name__ == "__main__": to_data_path = f'{WORKDIR_ROOT}/iwsltv2' not_matching = check_data_test_bleu( f'{to_data_path}/raw', [ ('iwslt17', ['en_XX-ar_AR', 'en_XX-ko_KR', 'ar_AR-en_XX', 'ko_KR-en_XX']), ('iwslt17', ['en_XX-it_IT', 'en_XX-nl_XX', 'it_IT-en_XX', 'nl_XX-en_XX']), ('iwslt17/tst2015', ['en_XX-vi_VN', "vi_VN-en_XX"]), ] ) if len(not_matching) > 0: print('the following datasets do not have matching test datasets:\n\t', '\n\t'.join(not_matching))
KosmosX-API-main
kosmosX/fairseq/examples/multilingual/data_scripts/check_iswlt_test_data.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import itertools import os import csv from collections import defaultdict from six.moves import zip import io import wget import sys from subprocess import check_call # scripts and data locations CWD = os.getcwd() UTILS = f"{CWD}/utils" MOSES = f"{UTILS}/mosesdecoder" WORKDIR_ROOT = os.environ.get('WORKDIR_ROOT', None) if WORKDIR_ROOT is None or not WORKDIR_ROOT.strip(): print('please specify your working directory root in OS environment variable WORKDIR_ROOT. Exitting..."') sys.exit(-1) # please donwload mosesdecoder here: detok_cmd = f'{MOSES}/scripts/tokenizer/detokenizer.perl' def call(cmd): print(f"Executing: {cmd}") check_call(cmd, shell=True) class MultiLingualAlignedCorpusReader(object): """A class to read TED talk dataset """ def __init__(self, corpus_path, delimiter='\t', target_token=True, bilingual=True, corpus_type='file', lang_dict={'source': ['fr'], 'target': ['en']}, eval_lang_dict=None, zero_shot=False, detok=True, ): self.empty_line_flag = 'NULL' self.corpus_path = corpus_path self.delimiter = delimiter self.bilingual = bilingual self.lang_dict = lang_dict self.lang_set = set() self.target_token = target_token self.zero_shot = zero_shot self.eval_lang_dict = eval_lang_dict self.corpus_type = corpus_type self.detok = detok for list_ in self.lang_dict.values(): for lang in list_: self.lang_set.add(lang) self.data = dict() self.data['train'] = self.read_aligned_corpus(split_type='train') self.data['test'] = self.read_aligned_corpus(split_type='test') self.data['dev'] = self.read_aligned_corpus(split_type='dev') def read_data(self, file_loc_): data_list = list() with io.open(file_loc_, 'r', encoding='utf8') as fp: for line in fp: try: text = line.strip() except IndexError: text = self.empty_line_flag data_list.append(text) return data_list def filter_text(self, dict_): if self.target_token: field_index = 1 else: field_index = 0 data_dict = defaultdict(list) list1 = dict_['source'] list2 = dict_['target'] for sent1, sent2 in zip(list1, list2): try: src_sent = ' '.join(sent1.split()[field_index: ]) except IndexError: src_sent = 'NULL' if src_sent.find(self.empty_line_flag) != -1 or len(src_sent) == 0: continue elif sent2.find(self.empty_line_flag) != -1 or len(sent2) == 0: continue else: data_dict['source'].append(sent1) data_dict['target'].append(sent2) return data_dict def read_file(self, split_type, data_type): return self.data[split_type][data_type] def save_file(self, path_, split_type, data_type, lang): tok_file = tok_file_name(path_, lang) with io.open(tok_file, 'w', encoding='utf8') as fp: for line in self.data[split_type][data_type]: fp.write(line + '\n') if self.detok: de_tok(tok_file, lang) def add_target_token(self, list_, lang_id): new_list = list() token = '__' + lang_id + '__' for sent in list_: new_list.append(token + ' ' + sent) return new_list def read_from_single_file(self, path_, s_lang, t_lang): data_dict = defaultdict(list) with io.open(path_, 'r', encoding='utf8') as fp: reader = csv.DictReader(fp, delimiter='\t', quoting=csv.QUOTE_NONE) for row in reader: data_dict['source'].append(row[s_lang]) data_dict['target'].append(row[t_lang]) if self.target_token: text = self.add_target_token(data_dict['source'], t_lang) data_dict['source'] = text return data_dict['source'], data_dict['target'] def read_aligned_corpus(self, split_type='train'): data_dict = defaultdict(list) iterable = [] s_list = [] t_list = [] if self.zero_shot: if split_type == "train": iterable = zip(self.lang_dict['source'], self.lang_dict['target']) else: iterable = zip(self.eval_lang_dict['source'], self.eval_lang_dict['target']) elif self.bilingual: iterable = itertools.product(self.lang_dict['source'], self.lang_dict['target']) for s_lang, t_lang in iterable: if s_lang == t_lang: continue if self.corpus_type == 'file': split_type_file_path = os.path.join(self.corpus_path, "all_talks_{}.tsv".format(split_type)) s_list, t_list = self.read_from_single_file(split_type_file_path, s_lang=s_lang, t_lang=t_lang) data_dict['source'] += s_list data_dict['target'] += t_list new_data_dict = self.filter_text(data_dict) return new_data_dict def read_langs(corpus_path): split_type_file_path = os.path.join(corpus_path, 'extracted', "all_talks_dev.tsv") with io.open(split_type_file_path, 'r', encoding='utf8') as fp: reader = csv.DictReader(fp, delimiter='\t', quoting=csv.QUOTE_NONE) header = next(reader) return [k for k in header.keys() if k != 'talk_name'] def extra_english(corpus_path, split): split_type_file_path = os.path.join(corpus_path, f"all_talks_{split}.tsv") output_split_type_file_path = os.path.join(corpus_path, f"all_talks_{split}.en") with io.open(split_type_file_path, 'r', encoding='utf8') as fp, io.open(output_split_type_file_path, 'w', encoding='utf8') as fw: reader = csv.DictReader(fp, delimiter='\t', quoting=csv.QUOTE_NONE) for row in reader: line = row['en'] fw.write(line + '\n') de_tok(output_split_type_file_path, 'en') def tok_file_name(filename, lang): seps = filename.split('.') seps.insert(-1, 'tok') tok_file = '.'.join(seps) return tok_file def de_tok(tok_file, lang): # seps = tok_file.split('.') # seps.insert(-1, 'detok') # de_tok_file = '.'.join(seps) de_tok_file = tok_file.replace('.tok.', '.') cmd = 'perl {detok_cmd} -l {lang} < {tok_file} > {de_tok_file}'.format( detok_cmd=detok_cmd, tok_file=tok_file, de_tok_file=de_tok_file, lang=lang[:2]) call(cmd) def extra_bitex( ted_data_path, lsrc_lang, ltrg_lang, target_token, output_data_path, ): def get_ted_lang(lang): long_langs = ['pt-br', 'zh-cn', 'zh-tw', 'fr-ca'] if lang[:5] in long_langs: return lang[:5] elif lang[:4] =='calv': return lang[:5] elif lang in ['pt_BR', 'zh_CN', 'zh_TW', 'fr_CA']: return lang.lower().replace('_', '-') return lang[:2] src_lang = get_ted_lang(lsrc_lang) trg_lang = get_ted_lang(ltrg_lang) train_lang_dict={'source': [src_lang], 'target': [trg_lang]} eval_lang_dict = {'source': [src_lang], 'target': [trg_lang]} obj = MultiLingualAlignedCorpusReader(corpus_path=ted_data_path, lang_dict=train_lang_dict, target_token=target_token, corpus_type='file', eval_lang_dict=eval_lang_dict, zero_shot=False, bilingual=True) os.makedirs(output_data_path, exist_ok=True) lsrc_lang = lsrc_lang.replace('-', '_') ltrg_lang = ltrg_lang.replace('-', '_') obj.save_file(output_data_path + f"/train.{lsrc_lang}-{ltrg_lang}.{lsrc_lang}", split_type='train', data_type='source', lang=src_lang) obj.save_file(output_data_path + f"/train.{lsrc_lang}-{ltrg_lang}.{ltrg_lang}", split_type='train', data_type='target', lang=trg_lang) obj.save_file(output_data_path + f"/test.{lsrc_lang}-{ltrg_lang}.{lsrc_lang}", split_type='test', data_type='source', lang=src_lang) obj.save_file(output_data_path + f"/test.{lsrc_lang}-{ltrg_lang}.{ltrg_lang}", split_type='test', data_type='target', lang=trg_lang) obj.save_file(output_data_path + f"/valid.{lsrc_lang}-{ltrg_lang}.{lsrc_lang}", split_type='dev', data_type='source', lang=src_lang) obj.save_file(output_data_path + f"/valid.{lsrc_lang}-{ltrg_lang}.{ltrg_lang}", split_type='dev', data_type='target', lang=trg_lang) def bar_custom(current, total, width=80): print("Downloading: %d%% [%d / %d] Ks" % (current / total * 100, current / 1000, total / 1000), end='\r') def download_and_extract(download_to, extract_to): url = 'http://phontron.com/data/ted_talks.tar.gz' filename = f"{download_to}/ted_talks.tar.gz" if os.path.exists(filename): print(f'{filename} has already been downloaded so skip') else: filename = wget.download(url, filename, bar=bar_custom) if os.path.exists(f'{extract_to}/all_talks_train.tsv'): print('Already extracted so skip') else: extract_cmd = f'tar xzfv "{filename}" -C "{extract_to}"' call(extract_cmd) if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument('--ted_data_path', type=str, default=WORKDIR_ROOT, required=False) parser.add_argument( '--direction-list', type=str, # default=None, #for ML50 default=( "bn_IN-en_XX,he_IL-en_XX,fa_IR-en_XX,id_ID-en_XX,sv_SE-en_XX,pt_XX-en_XX,ka_GE-en_XX,ka_GE-en_XX,th_TH-en_XX," "mr_IN-en_XX,hr_HR-en_XX,uk_UA-en_XX,az_AZ-en_XX,mk_MK-en_XX,gl_ES-en_XX,sl_SI-en_XX,mn_MN-en_XX," #non-english directions # "fr_XX-de_DE," # replaced with wmt20 # "ja_XX-ko_KR,es_XX-pt_XX,ru_RU-sv_SE,hi_IN-bn_IN,id_ID-ar_AR,cs_CZ-pl_PL,ar_AR-tr_TR" ), required=False) parser.add_argument('--target-token', action='store_true', default=False) parser.add_argument('--extract-all-english', action='store_true', default=False) args = parser.parse_args() import sys # TED Talks data directory ted_data_path = args.ted_data_path download_to = f'{ted_data_path}/downloads' extract_to = f'{ted_data_path}/extracted' #DESTDIR=${WORKDIR_ROOT}/ML50/raw/ output_path = f'{ted_data_path}/ML50/raw' os.makedirs(download_to, exist_ok=True) os.makedirs(extract_to, exist_ok=True) os.makedirs(output_path, exist_ok=True) download_and_extract(download_to, extract_to) if args.extract_all_english: for split in ['train', 'dev', 'test']: extra_english(ted_data_path, split) exit(0) if args.direction_list is not None: directions = args.direction_list.strip().split(',') directions = [tuple(d.strip().split('-', 1)) for d in directions if d] else: langs = read_langs(ted_data_path) # directions = [ # '{}.{}'.format(src, tgt) # for src in langs # for tgt in langs # if src < tgt # ] directions = [('en', tgt) for tgt in langs if tgt != 'en'] print(f'num directions={len(directions)}: {directions}') for src_lang, trg_lang in directions: print('--working on {}-{}'.format(src_lang, trg_lang)) extra_bitex( extract_to, src_lang, trg_lang, target_token=args.target_token, output_data_path=output_path )
KosmosX-API-main
kosmosX/fairseq/examples/multilingual/data_scripts/download_ted_and_extract.py
from typing import NamedTuple, List from urllib.parse import urlparse import os, sys import subprocess from subprocess import check_call, check_output import glob import wget import re import multiprocessing as mp from functools import partial import pathlib from collections import OrderedDict WORKDIR_ROOT = os.environ.get('WORKDIR_ROOT', None) if WORKDIR_ROOT is None or not WORKDIR_ROOT.strip(): print('please specify your working directory root in OS environment variable WORKDIR_ROOT. Exitting..."') sys.exit(-1) # scripts and data locations CWD = os.getcwd() UTILS = f"{CWD}/utils" MOSES = f"{UTILS}/mosesdecoder" SGM_TOOL = f'{MOSES}/scripts/ems/support/input-from-sgm.perl' TMX2CORPUS = f"{UTILS}/tmx2corpus" TMX_TOOL = f'python {TMX2CORPUS}/tmx2corpus.py' to_data_path = f'{WORKDIR_ROOT}/wmt' download_to = f'{to_data_path}/downloads' manually_downloads = f'{to_data_path}/downloads' extract_to = f'{to_data_path}/extracted' #DESTDIR=${WORKDIR_ROOT}/ML50/raw/ raw_data = f'{WORKDIR_ROOT}/ML50/raw' #### class DLDataset(NamedTuple): name: str train_urls: List[str] valid_urls: List[str] test_urls: List[str] train_files_patterns: List[str] = [] valid_files_patterns: List[str] = [] test_files_patterns: List[str] = [] def bar_custom(current, total, width=80): print("Downloading: %d%% [%d / %d] Ks" % (current / total * 100, current / 1000, total / 1000), end='\r') def get_downloaded_file(dl_folder, url): if isinstance(url, tuple): url, f = url else: url_f = urlparse(url) # f = os.path.split(url_f.path)[-1] f = '_'.join(url_f.path.split('/')[1:]) return url, f"{dl_folder}/{f}" def download_parts_and_combine(dl_folder, urls, filename): parts = [] for url_record in urls: url, part_file = get_downloaded_file(dl_folder, url_record) if os.path.exists(part_file): print(f'{part_file} has already been downloaded so skip') else: part_file = wget.download(url, part_file, bar=bar_custom) parts.append(part_file) def get_combine_cmd(parts): #default as tar.gz.?? return f'cat {" ".join(parts)} > {filename}' combine_cmd = get_combine_cmd(parts) call(combine_cmd, debug=True) return filename def download_a_url(dl_folder, url): url, filename = get_downloaded_file(dl_folder, url) if os.path.exists(filename): print(f'{filename} has already been downloaded so skip') return filename print(f'downloading {url} to {filename}') if isinstance(url, list) or isinstance(url, tuple): download_parts_and_combine(dl_folder, url, filename) else: wget.download(url, filename, bar=bar_custom) print(f'dowloaded: {filename}') return filename def download_files(dl_folder, urls, completed_urls={}): for url_record in urls: url, _ = get_downloaded_file(dl_folder, url_record) filename = download_a_url(dl_folder, url_record) completed_urls[str(url)] = filename return completed_urls def check_need_manual_downalod(dl_folder, to_manually_download_urls): to_be_manually_dowloaded = [] manually_completed_urls = {} for url_record, instruction in to_manually_download_urls: url, filename = get_downloaded_file(dl_folder, url_record) if not os.path.exists(filename): print(f'{url} need to be download manually, please download it manually following {instruction}; and copy it to {filename}') to_be_manually_dowloaded.append((url, filename)) else: manually_completed_urls[url] = filename # if len(to_be_manually_dowloaded) > 0: # raise ValueError('Missing files that need to be downloaded manually; stop the process now.') return to_be_manually_dowloaded def download_dataset(to_folder, dl_dataset, completed_urls={}): download_files(to_folder, dl_dataset.train_urls, completed_urls) download_files(to_folder, dl_dataset.valid_urls, completed_urls) download_files(to_folder, dl_dataset.test_urls, completed_urls) print('completed downloading') return completed_urls def call(cmd, debug=False): if debug: print(cmd) check_call(cmd, shell=True) def get_extract_name(file_path): path = os.path.split(file_path) return path[-1] + '_extract' #.split('.')[0] def extract_file(downloaded_file, extract_folder, get_extract_name=get_extract_name, debug=False): extract_name = get_extract_name(downloaded_file) extract_to = f'{extract_folder}/{extract_name}' os.makedirs(extract_to, exist_ok=True) if os.path.exists(f'{extract_to}/DONE'): print(f'{downloaded_file} has already been extracted to {extract_to} so skip') return extract_to def get_extract_cmd(filename): if filename.endswith('.tgz') or filename.endswith('tar.gz'): return f'tar xzfv {filename} -C {extract_to}' elif filename.endswith('.gz.tar'): return f'tar xfv {filename} -C {extract_to}; (cd {extract_to}; gzip -d *.gz; [ $? -eq 0 ] || gzip -d */*.gz)' elif filename.endswith('.tar'): return f'tar xfv {filename} -C {extract_to}' elif filename.endswith('.gz'): return f'cp {filename} {extract_to}; (cd {extract_to}; gzip -d *.gz)' elif filename.endswith('.zip'): return f'unzip {filename} -d {extract_to}' extract_cmd = get_extract_cmd(downloaded_file) print(f'extracting {downloaded_file}') if isinstance(extract_cmd, list): for c in extract_cmd: call(c, debug=debug) else: call(extract_cmd, debug=debug) call(f'echo DONE > {extract_to}/DONE') return extract_to def extract_all_files( completed_urls, extract_folder, get_extract_name=get_extract_name, completed_extraction={}, debug=False): extracted_folders = OrderedDict() for url, downloaded_file in set(completed_urls.items()): if downloaded_file in completed_extraction: print(f'{downloaded_file} is already extracted; so skip') continue folder = extract_file(downloaded_file, extract_folder, get_extract_name, debug) extracted_folders[url] = folder return extracted_folders def my_glob(folder): for p in [f'{folder}/*', f'{folder}/*/*', f'{folder}/*/*/*']: for f in glob.glob(p): yield f def sgm2raw(sgm, debug): to_file = sgm[0:len(sgm) - len('.sgm')] if os.path.exists(to_file): debug and print(f'{sgm} already converted to {to_file}; so skip') return to_file cmd = f'{SGM_TOOL} < {sgm} > {to_file}' call(cmd, debug) return to_file def tmx2raw(tmx, debug): to_file = tmx[0:len(tmx) - len('.tmx')] to_folder = os.path.join(*os.path.split(tmx)[:-1]) if os.path.exists(f'{to_folder}/bitext.en'): debug and print(f'{tmx} already extracted to {to_file}; so skip') return to_file cmd = f'(cd {to_folder}; {TMX_TOOL} {tmx})' call(cmd, debug) return to_file CZENG16_REGEX = re.compile(r'.*?data.plaintext-format/0[0-9]train$') WMT19_WIKITITLES_REGEX = re.compile(r'.*?wikititles-v1.(\w\w)-en.tsv.gz') TSV_REGEX = re.compile(r'.*?(\w\w)-(\w\w).tsv$') def cut_wikitles(wiki_file, debug): # different languages have different file names: if wiki_file.endswith('wiki/fi-en/titles.fi-en'): to_file1 = f'{wiki_file}.fi' to_file2 = f'{wiki_file}.en' BACKSLASH = '\\' cmd1 = f"cat {wiki_file} | sed 's/|||/{BACKSLASH}t/g' |cut -f1 |awk '{{$1=$1}};1' > {to_file1}" cmd2 = f"cat {wiki_file} | sed 's/|||/{BACKSLASH}t/g' |cut -f2 |awk '{{$1=$1}};1' > {to_file2}" # elif WMT19_WIKITITLES_REGEX.match(wiki_file): # src = WMT19_WIKITITLES_REGEX.match(wiki_file).groups()[0] # to_file1 = f'{wiki_file}.{src}' # to_file2 = f'{wiki_file}.en' # cmd1 = f"cat {wiki_file} | cut -f1 |awk '{{$1=$1}};1' > {to_file1}" # cmd2 = f"cat {wiki_file} | cut -f2 |awk '{{$1=$1}};1' > {to_file2}" else: return None if os.path.exists(to_file1) and os.path.exists(to_file2): debug and print(f'{wiki_file} already processed to {to_file1} and {to_file2}; so skip') return wiki_file call(cmd1, debug=debug) call(cmd2, debug=debug) return wiki_file def cut_tsv(file, debug): m = TSV_REGEX.match(file) if m is None: raise ValueError(f'{file} is not matching tsv pattern') src = m.groups()[0] tgt = m.groups()[1] to_file1 = f'{file}.{src}' to_file2 = f'{file}.{tgt}' cmd1 = f"cat {file} | cut -f1 |awk '{{$1=$1}};1' > {to_file1}" cmd2 = f"cat {file} | cut -f2 |awk '{{$1=$1}};1' > {to_file2}" if os.path.exists(to_file1) and os.path.exists(to_file2): debug and print(f'{file} already processed to {to_file1} and {to_file2}; so skip') return file call(cmd1, debug=debug) call(cmd2, debug=debug) return file def convert_file_if_needed(file, debug): if file.endswith('.sgm'): return sgm2raw(file, debug) elif file.endswith('.tmx'): return tmx2raw(file, debug) elif file.endswith('wiki/fi-en/titles.fi-en'): return cut_wikitles(file, debug) # elif WMT19_WIKITITLES_REGEX.match(file): # return cut_wikitles(file, debug) elif file.endswith('.tsv'): return cut_tsv(file, debug) elif CZENG16_REGEX.match(file): return convert2czeng17(file, debug) else: return file def convert_files_if_needed(extracted_foldrs, my_glob=my_glob, debug=False): return { url: list(sorted(set(convert_file_if_needed(f, debug)) for f in sorted(set(my_glob(folder))))) for url, folder in extracted_foldrs.items() } def match_patt(file_path, file_pattern, src, tgt, lang): return file_pattern.format(src=src, tgt=tgt, lang=lang) in file_path def match_patts(file_path, file_patterns, src, tgt, lang): for file_pattern in file_patterns: params = { k: v for k, v in [('src', src), ('tgt', tgt), ('lang', lang)] if k in file_pattern} matching = file_pattern.format(**params) if isinstance(file_pattern, tuple): pattern, directions = file_pattern if f'{src}-{tgt}' in directions and matching in file_path: return True else: if matching in file_path: return True return False def extracted_glob(extracted_folder, file_patterns, src, tgt, lang): def get_matching_pattern(file_pattern): params = { k: v for k, v in [('src', src), ('tgt', tgt), ('lang', lang)] if '{' + k + '}' in file_pattern } file_pattern = re.sub(r'{src:(.*?)}', r'\1' if lang == src else '', file_pattern) file_pattern = re.sub(r'{tgt:(.*?)}', r'\1' if lang == tgt else '', file_pattern) file_pattern = file_pattern.format(**params) return file_pattern for file_pattern in file_patterns: if isinstance(file_pattern, tuple): file_pattern, lang_pairs = file_pattern if f'{src}-{tgt}' not in lang_pairs: continue # print('working on pattern: ', file_pattern, lang_pairs ) matching_pattern = get_matching_pattern(file_pattern) if matching_pattern is None: continue glob_patterns = f'{extracted_folder}/{matching_pattern}' # print('glob_patterns: ', glob_patterns) for f in glob.glob(glob_patterns): yield f # for debug usage def all_extracted_files(split, src, tgt, extracted_folders, split_urls): def get_url(url): if isinstance(url, tuple): url, downloaded_file = url return url return [ f for url in split_urls for f in my_glob(extracted_folders[str(get_url(url))]) ] def concat_files(split, src, tgt, extracted_folders, split_urls, path_patterns, to_folder, debug=False): # if debug: # print('extracted files to be filtered by patterns: ', # '\n\t'.join(sorted(all_extracted_files(split, src, tgt, extracted_folders, split_urls)))) for lang in [src, tgt]: to_file = f'{to_folder}/{split}.{src}-{tgt}.{lang}' s_src, s_tgt, s_lang = src.split('_')[0], tgt.split('_')[0], lang.split('_')[0] files = [] for url in split_urls: if isinstance(url, tuple): url, downloaded_file = url if str(url) not in extracted_folders: print(f'warning: {url} not in extracted files') for extracted_file in set( extracted_glob( extracted_folders[str(url)], path_patterns, s_src, s_tgt, s_lang)): files.append(extracted_file) if len(files) == 0: print('warning: ', f'No files found for split {to_file}') continue files = sorted(set(files)) print(f'concating {len(files)} files into {to_file}') cmd = ['cat'] + [f'"{f}"' for f in files] + [f'>{to_file}'] cmd = " ".join(cmd) call(cmd, debug=debug) UTILS = os.path.join(pathlib.Path(__file__).parent, 'utils') LID_MODEL = f'{download_to}/lid.176.bin' LID_MULTI = f'{UTILS}/fasttext_multi_filter.py' def lid_filter(split, src, tgt, from_folder, to_folder, debug=False): if not os.path.exists(LID_MODEL): call(f'wget -nc https://dl.fbaipublicfiles.com/fasttext/supervised-models/lid.176.bin -O {LID_MODEL}') from_prefix = f'{from_folder}/{split}.{src}-{tgt}' to_prefix = f'{to_folder}/{split}.{src}-{tgt}' if os.path.exists(f'{from_prefix}.{src}') and os.path.exists(f'{from_prefix}.{tgt}'): s_src, s_tgt = src.split('_')[0], tgt.split('_')[0] cmd = ( f'python {LID_MULTI} --model {LID_MODEL} --inputs {from_prefix}.{src} {from_prefix}.{tgt} ' f'--langs {s_src} {s_tgt} --outputs {to_prefix}.{src} {to_prefix}.{tgt}' ) print(f'filtering {from_prefix}') call(cmd, debug=debug) def concat_into_splits(dl_dataset, src, tgt, extracted_folders, to_folder, debug): to_folder_tmp = f"{to_folder}_tmp" os.makedirs(to_folder_tmp, exist_ok=True) concat_files('train', src, tgt, extracted_folders, split_urls=dl_dataset.train_urls, path_patterns=dl_dataset.train_files_patterns, to_folder=to_folder_tmp, debug=debug) lid_filter('train', src, tgt, to_folder_tmp, to_folder, debug) concat_files('valid', src, tgt, extracted_folders, split_urls=dl_dataset.valid_urls, path_patterns=dl_dataset.valid_files_patterns, to_folder=to_folder, debug=debug) concat_files('test', src, tgt, extracted_folders, split_urls=dl_dataset.test_urls, path_patterns=dl_dataset.test_files_patterns, to_folder=to_folder, debug=debug) def download_multi(dl_folder, extract_folder, urls, num_processes=8, debug=False): pool = mp.Pool(processes=num_processes) download_f = partial(download_a_url, dl_folder) pool.imap_unordered(download_f, urls) pool.close() pool.join() BLEU_REGEX = re.compile("^BLEU\\S* = (\\S+) ") def run_eval_bleu(cmd): output = check_output(cmd, shell=True, stderr=subprocess.STDOUT).decode("utf-8").strip() print(output) bleu = -1.0 for line in output.strip().split('\n'): m = BLEU_REGEX.search(line) if m is not None: bleu = m.groups()[0] bleu = float(bleu) break return bleu def check_wmt_test_bleu(raw_folder, wmt_lang_pairs): not_matchings = [] for wmt, src_tgts in wmt_lang_pairs: for src_tgt in src_tgts: print(f'checking test bleus for: {src_tgt} at {wmt}') src, tgt = src_tgt.split('-') ssrc, stgt = src[:2], tgt[:2] if os.path.exists(f'{raw_folder}/test.{tgt}-{src}.{src}'): # reversed direction may have different test set test_src = f'{raw_folder}/test.{tgt}-{src}.{src}' else: test_src = f'{raw_folder}/test.{src}-{tgt}.{src}' cmd1 = f'cat {test_src} | sacrebleu -t "{wmt}" -l {stgt}-{ssrc}; [ $? -eq 0 ] || echo ""' test_tgt = f'{raw_folder}/test.{src}-{tgt}.{tgt}' cmd2 = f'cat {test_tgt} | sacrebleu -t "{wmt}" -l {ssrc}-{stgt}; [ $? -eq 0 ] || echo ""' bleu1 = run_eval_bleu(cmd1) if bleu1 != 100.0: not_matchings.append(f'{wmt}:{src_tgt} source side not matching: {test_src}') bleu2 = run_eval_bleu(cmd2) if bleu2 != 100.0: not_matchings.append(f'{wmt}:{src_tgt} target side not matching: {test_tgt}') return not_matchings def download_and_extract( to_folder, lang_pairs, dl_dataset, to_manually_download_urls, completed_urls={}, completed_extraction={}, debug=False): dl_folder = f'{to_folder}/downloads' extract_folder = f'{to_folder}/extracted' raw_folder = f'{to_folder}/raw' lid_filtered = f'{to_folder}/lid_filtered' os.makedirs(extract_folder, exist_ok=True) os.makedirs(raw_folder, exist_ok=True) os.makedirs(lid_filtered, exist_ok=True) check_need_manual_downalod(dl_folder, to_manually_download_urls) completed_urls = download_dataset( dl_folder, dl_dataset, completed_urls) if debug: print('completed urls: ', completed_urls) extracted_folders = extract_all_files( completed_urls, extract_folder=extract_folder, completed_extraction=completed_extraction, debug=debug) if debug: print('download files have been extracted to folders: ', extracted_folders) convert_files_if_needed(extracted_folders, debug=False) for src_tgt in lang_pairs: print(f'working on {dl_dataset.name}: {src_tgt}') src, tgt = src_tgt.split('-') concat_into_splits(dl_dataset, src=src, tgt=tgt, extracted_folders=extracted_folders, to_folder=raw_folder, debug=debug) print('completed data into: ', raw_folder) def download_czang16(download_to, username=None): wgets = [ f'wget --user={username} --password=czeng -P {download_to} http://ufallab.ms.mff.cuni.cz/~bojar/czeng16-data/data-plaintext-format.{i}.tar' for i in range(10)] cmds = [] for i, cmd in enumerate(wgets): filename = f'{download_to}/data-plaintext-format.{i}.tar' if os.path.exists(filename): print(f'{filename} has already been downloaded; so skip') continue cmds.append(cmd) if cmds and username is None: raise ValueError('No czeng username is given; please register at http://ufal.mff.cuni.cz/czeng/czeng16 to obtain username to download') for cmd in cmds: call(cmd) print('done with downloading czeng1.6') def download_czeng17_script(download_to, extract_folder, debug=False): url = 'http://ufal.mff.cuni.cz/czeng/download.php?f=convert_czeng16_to_17.pl.zip' filename = f'{download_to}/convert_czeng16_to_17.pl.zip' extract_to = f'{extract_folder}/{get_extract_name(filename)}' script_path = f'{extract_to}/convert_czeng16_to_17.pl' if not os.path.exists(script_path): wget.download(url, filename, bar=bar_custom) extract_to = extract_file(f'{download_to}/convert_czeng16_to_17.pl.zip', extract_folder, get_extract_name=get_extract_name, debug=debug) return script_path czeng17_script_path = "" def convert2czeng17(file, debug): en_file = f'{file}.en' cs_file = f'{file}.cs' if not os.path.exists(en_file) or not os.path.exists(cs_file): cs_cmd = f'cat {file} | perl {czeng17_script_path} | cut -f3 > {cs_file}' en_cmd = f'cat {file} | perl {czeng17_script_path} | cut -f4 > {en_file}' call(cs_cmd, debug) call(en_cmd, debug) else: print(f'already extracted: {en_file} and {cs_file}') return file def extract_czeng17(extract_folder, debug=False): url = 'http://ufal.mff.cuni.cz/czeng/download.php?f=convert_czeng16_to_17.pl.zip' filename = f'{download_to}/convert_czeng16_to_17.pl.zip' extract_to = f'{extract_folder}/{get_extract_name(filename)}' script_path = f'{extract_to}/convert_czeng16_to_17.pl' if not os.path.exists(script_path): wget.download(url, filename, bar=bar_custom) extract_to = extract_file(f'{download_to}/convert_czeng16_to_17.pl.zip', extract_folder, get_extract_name=get_extract_name, debug=debug) return script_path ######### # definitions of wmt data sources # for es-en # Punctuation in the official test sets will be encoded with ASCII characters (not complex Unicode characters) as much as possible. You may want to normalize your system's output before submission. You are able able to use a rawer version of the test sets that does not have this normalization. # script to normalize punctuation: http://www.statmt.org/wmt11/normalize-punctuation.perl wmt13_es_en = DLDataset( name='wmt13_es-en', train_urls=[ 'http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz', 'http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz', 'http://www.statmt.org/wmt13/training-parallel-un.tgz', 'http://www.statmt.org/wmt13/training-parallel-nc-v8.tgz', ], valid_urls=[ ('http://www.statmt.org/wmt13/dev.tgz', 'wmt13_dev.tgz') ], test_urls=[ ('http://www.statmt.org/wmt13/test.tgz', 'wmt13_test.tgz') ], train_files_patterns=[ ('*/europarl-v7.{src}-{tgt}.{lang}', ['es-en']), ('*commoncrawl.{src}-{tgt}.{lang}', ['es-en']), ('*/news-commentary-v8.{src}-{tgt}.{lang}', ['es-en']), ('un/*undoc.2000.{src}-{tgt}.{lang}', ['es-en']), ] , valid_files_patterns=[ ('dev/newstest2012.{lang}', ['es-en']) ], test_files_patterns=[ ('test/newstest*.{lang}', ['es-en']) ], ) wmt14_de_fr_en = DLDataset( name='wmt14_de_fr_en', train_urls=[ 'http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz', 'http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz', 'http://www.statmt.org/wmt13/training-parallel-un.tgz', 'http://www.statmt.org/wmt14/training-parallel-nc-v9.tgz', ('http://www.statmt.org/wmt10/training-giga-fren.tar', 'training-giga-fren.gz.tar'), #it is actuall a gz.tar ], valid_urls=[ ('http://www.statmt.org/wmt14/dev.tgz', 'wmt14_dev.tgz'), ], test_urls=[ ('http://www.statmt.org/wmt14/test-full.tgz', 'wmt14_test_full.tgz'), # cleaned test sets ], train_files_patterns=[ ('*/europarl-v7.{src}-{tgt}.{lang}', ['fr-en', 'de-en']), ('*commoncrawl.{src}-{tgt}.{lang}', ['fr-en', 'de-en']), ('*/*news-commentary-v9.{src}-{tgt}.{lang}', ['fr-en', 'de-en']), ('un/undoc.2000.{src}-{tgt}.{lang}', ['fr-en']), ('*giga-{src}{tgt}*{lang}', ['fr-en']) ], valid_files_patterns=[ ('dev/newstest2013.{lang}', ['fr-en', 'de-en']) ], test_files_patterns=[ ('test-full/newstest*{src}{tgt}-{src:src}{tgt:ref}.{lang}', ['en-de', 'de-en', 'fr-en', 'en-fr']), ], ) # pip install git+https://github.com/amake/tmx2corpus.git wmt16_ro_en = DLDataset( name='wmt16_ro-en', train_urls=[ ('http://data.statmt.org/wmt16/translation-task/training-parallel-ep-v8.tgz', 'wmt16_training-parallel-ep-v8.tgz'), ('http://opus.nlpl.eu/download.php?f=SETIMES/v2/tmx/en-ro.tmx.gz', 'en-ro.tmx.gz'), ], valid_urls=[ ('http://data.statmt.org/wmt16/translation-task/dev-romanian-updated.tgz', 'wmt16_dev.tgz') ], test_urls=[ ('http://data.statmt.org/wmt16/translation-task/test.tgz', 'wmt16_test.tgz') ], train_files_patterns=[ ('*/*europarl-v8.{src}-{tgt}.{lang}', ['ro-en']), ('bitext.{lang}', ['ro-en']) #setimes from tmux ] , valid_files_patterns=[ ('dev/newsdev2016*{src}{tgt}*.{lang}', ['ro-en', 'ro-en']) ], test_files_patterns=[ ('test/newstest*{src}{tgt}*.{lang}', ['ro-en', 'en-ro']) ], ) cwmt_wmt_instruction = 'cwmt download instruction at: http://nlp.nju.edu.cn/cwmt-wmt' wmt17_fi_lv_tr_zh_en_manual_downloads = [ # fake urls to have unique keys for the data ( ('http://nlp.nju.edu.cn/cwmt-wmt/CASIA2015.zip', 'CASIA2015.zip'), cwmt_wmt_instruction), ( ('http://nlp.nju.edu.cn/cwmt-wmt/CASICT2011.zip', 'CASICT2011.zip'), cwmt_wmt_instruction), ( ('http://nlp.nju.edu.cn/cwmt-wmt/CASICT2015.zip', 'CASICT2015.zip'), cwmt_wmt_instruction), ( ('http://nlp.nju.edu.cn/cwmt-wmt/Datum2015.zip', 'Datum2015.zip'), cwmt_wmt_instruction), ( ('http://nlp.nju.edu.cn/cwmt-wmt/Datum2017.zip', 'Datum2017.zip'), cwmt_wmt_instruction), ( ('http://nlp.nju.edu.cn/cwmt-wmt/NEU2017.zip', 'NEU2017.zip'), cwmt_wmt_instruction), ] wmt17_fi_lv_tr_zh_en = DLDataset( name='wmt17_fi_lv_tr_zh_en', train_urls=[ ('http://data.statmt.org/wmt17/translation-task/training-parallel-ep-v8.tgz', 'wmt17_training-parallel-ep-v8.tgz'), 'http://data.statmt.org/wmt17/translation-task/training-parallel-nc-v12.tgz', 'http://www.statmt.org/wmt15/wiki-titles.tgz', ('http://opus.nlpl.eu/download.php?f=SETIMES/v2/tmx/en-tr.tmx.gz', 'en-tr.tmx.gz'), ('http://data.statmt.org/wmt17/translation-task/rapid2016.tgz', 'wmt17_rapid2016.tgz'), 'http://data.statmt.org/wmt17/translation-task/leta.v1.tgz', 'http://data.statmt.org/wmt17/translation-task/dcep.lv-en.v1.tgz', 'http://data.statmt.org/wmt17/translation-task/books.lv-en.v1.tgz', (('https://stuncorpusprod.blob.core.windows.net/corpusfiles/UNv1.0.en-zh.tar.gz.00', 'https://stuncorpusprod.blob.core.windows.net/corpusfiles/UNv1.0.en-zh.tar.gz.01',), 'UNv1.0.en-zh.tar.gz'), #manually download files: ('http://nlp.nju.edu.cn/cwmt-wmt/CASIA2015.zip', 'CASIA2015.zip'), ('http://nlp.nju.edu.cn/cwmt-wmt/CASICT2011.zip', 'CASICT2011.zip'), ('http://nlp.nju.edu.cn/cwmt-wmt/CASICT2015.zip', 'CASICT2015.zip'), ('http://nlp.nju.edu.cn/cwmt-wmt/Datum2015.zip', 'Datum2015.zip'), ('http://nlp.nju.edu.cn/cwmt-wmt/Datum2017.zip', 'Datum2017.zip'), ('http://nlp.nju.edu.cn/cwmt-wmt/NEU2017.zip', 'NEU2017.zip'), ], valid_urls=[ ('http://data.statmt.org/wmt17/translation-task/dev.tgz', 'wmt17_dev.tgz'), ], test_urls=[ #NEW: Improved translations for zh test sets ('http://data.statmt.org/wmt17/translation-task/test-update-1.tgz', 'wmt17_test_zh_en.tgz'), ('http://data.statmt.org/wmt17/translation-task/test.tgz', 'wmt17_test_others.tgz') ], train_files_patterns=[ ('casict*/cas*{src:ch}{tgt:en}.txt', ['zh-en', 'zh-en'] ), ('casia*/cas*{src:ch}{tgt:en}.txt', ['zh-en', 'zh-en'] ), ('dataum*/Book*{src:cn}{tgt:en}.txt', ['zh-en', 'zh-en']), ('neu*/NEU*{src:cn}{tgt:en}.txt', ['zh-en', 'zh-en'] ), ('*/*UNv1.0.en-zh.{src:zh}{tgt:en}', ['zh-en']), ('training/*news-commentary-v12.{src}-{tgt}.{lang}', ['zh-en', ]), ('*/*europarl-v8.{src}-{tgt}.{lang}', ['fi-en', 'lv-en']), ('wiki/fi-en/titles.{src}-{tgt}.{lang}', ['fi-en', ]), ('rapid2016.{tgt}-{src}.{lang}', ['fi-en', 'lv-en']), ('*/leta.{lang}', ['lv-en']), ('*/dcep.{lang}', ['lv-en']), ('*/farewell.{lang}', ['lv-en']), ('bitext.{lang}', ['tr-en']), ] , valid_files_patterns=[ ('dev/newsdev2017*{src}{tgt}-{src:src}{tgt:ref}.{lang}', [ 'fi-en', 'lv-en', 'tr-en', 'zh-en', 'en-fi', 'en-lv', 'en-tr', 'en-zh' ]), ('dev/newstest2016*{src}{tgt}-{src:src}{tgt:ref}.{lang}', [ 'fi-en', 'tr-en', 'en-fi', 'en-tr', ]), ], test_files_patterns=[ ('test/newstest2017-{src}{tgt}-{src:src}{tgt:ref}.{lang}', [ 'fi-en', 'lv-en', 'tr-en', 'en-fi', 'en-lv', 'en-tr', ]), ('newstest2017-{src}{tgt}-{src:src}{tgt:ref}.{lang}', [ 'zh-en', 'en-zh' ]), ], ) czeng_instruction = 'download instruction at: http://ufal.mff.cuni.cz/czeng/czeng16' #alternative: use the prepared data but detokenize it? wmt18_cs_et_en_manual_downloads = [ #for cs, need to register and download; Register and download CzEng 1.6. #Better results can be obtained by using a subset of sentences, released under a new version name CzEng 1.7. # ((f'http://ufallab.ms.mff.cuni.cz/~bojar/czeng16-data/data-plaintext-format.{i}.tar', # f'data-plaintext-format.{i}.tar'), czeng_instruction) # for i in range(10) ] wmt18_cs_et_en = DLDataset( name='wmt18_cs_et_en', train_urls=[ 'http://www.statmt.org/wmt13/training-parallel-europarl-v7.tgz', 'http://data.statmt.org/wmt18/translation-task/training-parallel-ep-v8.tgz', 'https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-cs.zipporah0-dedup-clean.tgz', 'https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-et.zipporah0-dedup-clean.tgz', 'http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz', 'http://data.statmt.org/wmt18/translation-task/training-parallel-nc-v13.tgz', ('http://data.statmt.org/wmt18/translation-task/rapid2016.tgz', 'wmt18_rapid2016.tgz'), # (tuple( # (f'http://ufallab.ms.mff.cuni.cz/~bojar/czeng16-data/data-plaintext-format.{i}.tar', # f'data-plaintext-format.{i}.tar') # for i in range(10) # ), # 'czeng16_data_plaintext.gz.tar'), ], valid_urls=[ ('http://data.statmt.org/wmt18/translation-task/dev.tgz', 'wmt18_dev.tgz'), ], test_urls=[ ('http://data.statmt.org/wmt18/translation-task/test.tgz', 'wmt18_test.tgz'), ], train_files_patterns=[ # ('*/*europarl-v7.{src}-{tgt}.{lang}', ['cs-en']), ('*/*europarl-v8.{src}-{tgt}.{lang}', ['et-en']), # ('*paracrawl-release1.{tgt}-{src}.zipporah0-dedup-clean.{lang}', ['cs-en', 'et-en']), ('*paracrawl-release1.{tgt}-{src}.zipporah0-dedup-clean.{lang}', ['et-en']), # ('*commoncrawl.{src}-{tgt}.{lang}', ['cs-en']), # ('*/news-commentary-v13.{src}-{tgt}.{lang}', ['cs-en']), # ('data.plaintext-format/*train.{lang}', ['cs-en']), ('rapid2016.{tgt}-{src}.{lang}', ['et-en']), ] , valid_files_patterns=[ ('dev/newsdev2018*{src}{tgt}-{src:src}{tgt:ref}.{lang}', ['et-en']), # ('dev/newstest2017*{src}{tgt}-{src:src}{tgt:ref}.{lang}', ['cs-en']) ], test_files_patterns=[ ('test/newstest2018-{src}{tgt}-{src:src}{tgt:ref}.{lang}', # ['cs-en', 'et-en']), ['et-en']), ] ) ru_en_yandex_instruction = 'Yandex Corpus download instruction at: https://translate.yandex.ru/corpus?lang=en' wmt19_ru_gu_kk_lt_manual_downloads = [ (('https://translate.yandex.ru/corpus?lang=en', 'wmt19_1mcorpus.zip'), ru_en_yandex_instruction) ] wmt19_ru_gu_kk_lt = DLDataset( name='wmt19_ru_gu_kk_lt', train_urls=[ 'http://www.statmt.org/europarl/v9/training/europarl-v9.lt-en.tsv.gz', 'https://s3.amazonaws.com/web-language-models/paracrawl/release3/en-lt.bicleaner07.tmx.gz', 'https://s3.amazonaws.com/web-language-models/paracrawl/release1/paracrawl-release1.en-ru.zipporah0-dedup-clean.tgz', 'http://www.statmt.org/wmt13/training-parallel-commoncrawl.tgz', 'http://data.statmt.org/news-commentary/v14/training/news-commentary-v14-wmt19.en-kk.tsv.gz', 'http://data.statmt.org/news-commentary/v14/training/news-commentary-v14.en-ru.tsv.gz', 'http://data.statmt.org/wikititles/v1/wikititles-v1.kk-en.tsv.gz', 'http://data.statmt.org/wikititles/v1/wikititles-v1.ru-en.tsv.gz', 'http://data.statmt.org/wikititles/v1/wikititles-v1.kk-en.tsv.gz', 'http://data.statmt.org/wikititles/v1/wikititles-v1.lt-en.tsv.gz', 'http://data.statmt.org/wikititles/v1/wikititles-v1.gu-en.tsv.gz', (('https://stuncorpusprod.blob.core.windows.net/corpusfiles/UNv1.0.en-ru.tar.gz.00', 'https://stuncorpusprod.blob.core.windows.net/corpusfiles/UNv1.0.en-ru.tar.gz.01', 'https://stuncorpusprod.blob.core.windows.net/corpusfiles/UNv1.0.en-ru.tar.gz.02',), 'wmt19_UNv1.0.en-ru.tar.gz'), 'https://tilde-model.s3-eu-west-1.amazonaws.com/rapid2016.en-lt.tmx.zip', ('https://translate.yandex.ru/corpus?lang=en', 'wmt19_1mcorpus.zip'), ], valid_urls=[ ('http://data.statmt.org/wmt19/translation-task/dev.tgz', 'wmt19_dev.tgz'), ], test_urls=[ ('http://data.statmt.org/wmt19/translation-task/test.tgz', 'wmt19_test.tgz'), ], train_files_patterns=[ ('*europarl-v9.{src}-{tgt}.tsv.{lang}', ['lt-en']), #paracrawl ('*paracrawl-release1.{tgt}-{src}.zipporah0-dedup-clean.{lang}', ['ru-en']), ('bitext.{lang}', ['lt-en',]), ('*commoncrawl.{src}-{tgt}.{lang}', ['ru-en',]), ('*news-commentary-v14-wmt19.{tgt}-{src}.tsv.{lang}', ['kk-en', ]), ('*news-commentary-v14.{tgt}-{src}.tsv.{lang}', ['ru-en']), #yandex ('corpus.{tgt}_{src}.1m.{lang}', ['ru-en']), ('wikititles_v1_wikititles-v1.{src}-{tgt}.tsv.{lang}', ['ru-en', 'kk-en', 'lt-en', 'gu-en']), ('*/UNv1.0.{tgt}-{src}.{lang}', ['ru-en']), #rapid ('bitext.{lang}', ['lt-en']) ], valid_files_patterns=[ ('dev/newsdev2019*{src}{tgt}-{src:src}{tgt:ref}.{lang}', ['gu-en', 'kk-en', 'lt-en']), ('dev/newstest2018*{src}{tgt}-{src:src}{tgt:ref}.{lang}', ['ru-en']), ], test_files_patterns=[ ('sgm/newstest2019-{src}{tgt}-{src:src}{tgt:ref}.{lang}', ['ru-en', 'gu-en', 'kk-en', 'lt-en', 'en-ru', 'en-gu', 'en-kk', 'en-lt']), ] ) ######### if __name__ == "__main__": # speed up the downloads with multiple processing dl_folder = f'{to_data_path}/downloads' extract_folder = f'{to_data_path}/extracted' urls = [ url for dataset in [wmt13_es_en, wmt14_de_fr_en, wmt16_ro_en, wmt18_cs_et_en, wmt19_ru_gu_kk_lt] for urls in [dataset.train_urls, dataset.valid_urls, dataset.test_urls] for url in urls ] urls = set(urls) download_multi(dl_folder, extract_folder, urls, num_processes=8, debug=True) # check manually downlaods to_manually_download_urls = ( wmt17_fi_lv_tr_zh_en_manual_downloads + wmt18_cs_et_en_manual_downloads + wmt19_ru_gu_kk_lt_manual_downloads ) to_be_manually_dowloaded = check_need_manual_downalod(dl_folder, to_manually_download_urls) if len(to_be_manually_dowloaded) > 0: print('Missing files that need to be downloaded manually; stop the process now.') exit(-1) completed_urls = {} completed_extraction = {} def work_on_wmt(directions, wmt_data): download_and_extract( to_data_path, directions, wmt_data, to_manually_download_urls=to_manually_download_urls, completed_urls=completed_urls, completed_extraction=completed_extraction, debug=True) work_on_wmt( ['es_XX-en_XX'], wmt13_es_en,) work_on_wmt( [ 'fr_XX-en_XX', 'en_XX-fr_XX', # 'en_XX-de_DE', 'de_DE-en_XX', ], wmt14_de_fr_en,) work_on_wmt( ['ro_RO-en_XX', 'en_XX-ro_XX'], wmt16_ro_en,) work_on_wmt( [ # 'zh_CN-en_XX', 'lv_LV-en_XX', 'fi_FI-en_XX', 'tr_TR-en_XX', #in case the reversed directions have different train/valid/test data # 'en_XX-zh_CN', 'en_XX-lv_LV', 'en_XX-fi_FI', 'en_XX-tr_TR', ], wmt17_fi_lv_tr_zh_en, ) # czeng17_script_path = download_czeng17_script(download_to, extract_to, debug=False) # cz_username = None work_on_wmt( [ # 'cs_CZ-en_XX', 'et_EE-en_XX'], wmt18_cs_et_en,) work_on_wmt( [ # 'ru_RU-en_XX', 'en_XX-ru_RU', 'gu_IN-en_XX', 'kk_KZ-en_XX', 'lt_LT-en_XX', #in case the reversed directions have different train/valid/test data 'en_XX-gu_IN', 'en_XX-kk_KZ', 'en_XX-lt_LT' ], wmt19_ru_gu_kk_lt,) not_matching = check_wmt_test_bleu( f'{to_data_path}/raw', [ ('wmt13', ['es_XX-en_XX']), ('wmt14/full', ['fr_XX-en_XX',]), ('wmt16', ['ro_RO-en_XX',]), # ('wmt17/improved', ['zh_CN-en_XX']), ('wmt17', [ 'lv_LV-en_XX', 'fi_FI-en_XX', 'tr_TR-en_XX']), ('wmt18', ['cs_CZ-en_XX', 'et_EE-en_XX']), ('wmt19', ['gu_IN-en_XX', 'kk_KZ-en_XX', 'lt_LT-en_XX']), #'ru_RU-en_XX', ] ) if len(not_matching) > 0: print('the following datasets do not have matching test datasets:\n\t', '\n\t'.join(not_matching))
KosmosX-API-main
kosmosX/fairseq/examples/multilingual/data_scripts/download_wmt19_and_before.py
import os, sys import glob, itertools import pandas as pd WORKDIR_ROOT = os.environ.get('WORKDIR_ROOT', None) if WORKDIR_ROOT is None or not WORKDIR_ROOT.strip(): print('please specify your working directory root in OS environment variable WORKDIR_ROOT. Exitting..."') sys.exit(-1) def load_langs(path): with open(path) as fr: langs = [l.strip() for l in fr] return langs def load_sentences(raw_data, split, direction): src, tgt = direction.split('-') src_path = f"{raw_data}/{split}.{direction}.{src}" tgt_path = f"{raw_data}/{split}.{direction}.{tgt}" if os.path.exists(src_path) and os.path.exists(tgt_path): return [(src, open(src_path).read().splitlines()), (tgt, open(tgt_path).read().splitlines())] else: return [] def swap_direction(d): src, tgt = d.split('-') return f'{tgt}-{src}' def get_all_test_data(raw_data, directions, split='test'): test_data = [ x for dd in directions for d in [dd, swap_direction(dd)] for x in load_sentences(raw_data, split, d) ] # all_test_data = {s for _, d in test_data for s in d} all_test_data = {} for lang, d in test_data: for s in d: s = s.strip() lgs = all_test_data.get(s, set()) lgs.add(lang) all_test_data[s] = lgs return all_test_data, test_data def check_train_sentences(raw_data, direction, all_test_data, mess_up_train={}): src, tgt = direction.split('-') tgt_path = f"{raw_data}/train.{direction}.{tgt}" src_path = f"{raw_data}/train.{direction}.{src}" print(f'check training data in {raw_data}/train.{direction}') size = 0 if not os.path.exists(tgt_path) or not os.path.exists(src_path): return mess_up_train, size with open(src_path) as f, open(tgt_path) as g: for src_line, tgt_line in zip(f, g): s = src_line.strip() t = tgt_line.strip() size += 1 if s in all_test_data: langs = mess_up_train.get(s, set()) langs.add(direction) mess_up_train[s] = langs if t in all_test_data: langs = mess_up_train.get(t, set()) langs.add(direction) mess_up_train[t] = langs return mess_up_train, size def check_train_all(raw_data, directions, all_test_data): mess_up_train = {} data_sizes = {} for direction in directions: _, size = check_train_sentences(raw_data, direction, all_test_data, mess_up_train) data_sizes[direction] = size return mess_up_train, data_sizes def count_train_in_other_set(mess_up_train): train_in_others = [(direction, s) for s, directions in mess_up_train.items() for direction in directions] counts = {} for direction, s in train_in_others: counts[direction] = counts.get(direction, 0) + 1 return counts def train_size_if_remove_in_otherset(data_sizes, mess_up_train): counts_in_other = count_train_in_other_set(mess_up_train) remain_sizes = [] for direction, count in counts_in_other.items(): remain_sizes.append((direction, data_sizes[direction] - count, data_sizes[direction], count, 100 * count / data_sizes[direction] )) return remain_sizes def remove_messed_up_sentences(raw_data, direction, mess_up_train, mess_up_train_pairs, corrected_langs): split = 'train' src_lang, tgt_lang = direction.split('-') tgt = f"{raw_data}/{split}.{direction}.{tgt_lang}" src = f"{raw_data}/{split}.{direction}.{src_lang}" print(f'working on {direction}: ', src, tgt) if not os.path.exists(tgt) or not os.path.exists(src) : return corrected_tgt = f"{to_folder}/{split}.{direction}.{tgt_lang}" corrected_src = f"{to_folder}/{split}.{direction}.{src_lang}" line_num = 0 keep_num = 0 with open(src, encoding='utf8',) as fsrc, \ open(tgt, encoding='utf8',) as ftgt, \ open(corrected_src, 'w', encoding='utf8') as fsrc_corrected, \ open(corrected_tgt, 'w', encoding='utf8') as ftgt_corrected: for s, t in zip(fsrc, ftgt): s = s.strip() t = t.strip() if t not in mess_up_train \ and s not in mess_up_train \ and (s, t) not in mess_up_train_pairs \ and (t, s) not in mess_up_train_pairs: corrected_langs.add(direction) print(s, file=fsrc_corrected) print(t, file=ftgt_corrected) keep_num += 1 line_num += 1 if line_num % 1000 == 0: print(f'completed {line_num} lines', end='\r') return line_num, keep_num ########## def merge_valid_test_messup(mess_up_train_valid, mess_up_train_test): merged_mess = [] for s in set(list(mess_up_train_valid.keys()) + list(mess_up_train_test.keys())): if not s: continue valid = mess_up_train_valid.get(s, set()) test = mess_up_train_test.get(s, set()) merged_mess.append((s, valid | test)) return dict(merged_mess) ######### def check_train_pairs(raw_data, direction, all_test_data, mess_up_train={}): src, tgt = direction.split('-') #a hack; TODO: check the reversed directions path1 = f"{raw_data}/train.{src}-{tgt}.{src}" path2 = f"{raw_data}/train.{src}-{tgt}.{tgt}" if not os.path.exists(path1) or not os.path.exists(path2) : return with open(path1) as f1, open(path2) as f2: for src_line, tgt_line in zip(f1, f2): s = src_line.strip() t = tgt_line.strip() if (s, t) in all_test_data or (t, s) in all_test_data: langs = mess_up_train.get( (s, t), set()) langs.add(src) langs.add(tgt) mess_up_train[(s, t)] = langs def load_pairs(raw_data, split, direction): src, tgt = direction.split('-') src_f = f"{raw_data}/{split}.{direction}.{src}" tgt_f = f"{raw_data}/{split}.{direction}.{tgt}" if tgt != 'en_XX': src_f, tgt_f = tgt_f, src_f if os.path.exists(src_f) and os.path.exists(tgt_f): return list(zip(open(src_f).read().splitlines(), open(tgt_f).read().splitlines(), )) else: return [] # skip_langs = ['cs_CZ', 'en_XX', 'tl_XX', 'tr_TR'] def get_messed_up_test_pairs(split, directions): test_pairs = [ (d, load_pairs(raw_data, split, d)) for d in directions ] # all_test_data = {s for _, d in test_data for s in d} all_test_pairs = {} for direction, d in test_pairs: src, tgt = direction.split('-') for s in d: langs = all_test_pairs.get(s, set()) langs.add(src) langs.add(tgt) all_test_pairs[s] = langs mess_up_train_pairs = {} for direction in directions: check_train_pairs(raw_data, direction, all_test_pairs, mess_up_train_pairs) return all_test_pairs, mess_up_train_pairs if __name__ == "__main__": ####### import argparse parser = argparse.ArgumentParser() parser.add_argument( '--from-folder', required=True, type=str) parser.add_argument( '--to-folder', required=True, type=str) parser.add_argument( '--directions', default=None, type=str) args = parser.parse_args() raw_data = args.from_folder to_folder = args.to_folder os.makedirs(to_folder, exist_ok=True) if args.directions: directions = args.directions.split(',') else: raw_files = itertools.chain( glob.glob(f'{raw_data}/train*'), glob.glob(f'{raw_data}/valid*'), glob.glob(f'{raw_data}/test*'), ) directions = [os.path.split(file_path)[-1].split('.')[1] for file_path in raw_files] print('working on directions: ', directions) ########## all_test_data, test_data = get_all_test_data(raw_data, directions, 'test') print('==loaded test data==') all_valid_data, valid_data = get_all_test_data(raw_data, directions, 'valid') print('==loaded valid data==') all_valid_test_data = merge_valid_test_messup(all_test_data, all_valid_data) mess_up_train, data_sizes = check_train_all(raw_data, directions, all_valid_test_data) print('training messing up with valid, test data:', len(mess_up_train)) data_situation = train_size_if_remove_in_otherset(data_sizes, mess_up_train) df = pd.DataFrame(data_situation, columns=['direction', 'train_size_after_remove', 'orig_size', 'num_to_remove', 'remove_percent']) df.sort_values('remove_percent', ascending=False) df.to_csv(f'{raw_data}/clean_summary.tsv', sep='\t') print(f'projected data clean summary in: {raw_data}/clean_summary.tsv') # correct the dataset: all_test_pairs, mess_up_test_train_pairs = get_messed_up_test_pairs('test', directions) all_valid_pairs, mess_up_valid_train_pairs = get_messed_up_test_pairs('valid', directions) all_messed_pairs = set(mess_up_test_train_pairs.keys()).union(set(mess_up_valid_train_pairs.keys())) corrected_directions = set() real_data_situation = [] for direction in directions: org_size, new_size = remove_messed_up_sentences(raw_data, direction, mess_up_train, all_messed_pairs, corrected_directions) if org_size == 0: print(f"{direction} has size 0") continue real_data_situation.append( (direction, new_size, org_size, org_size - new_size, (org_size - new_size) / org_size * 100) ) print('corrected directions: ', corrected_directions) df = pd.DataFrame(real_data_situation, columns=['direction', 'train_size_after_remove', 'orig_size', 'num_to_remove', 'remove_percent']) df.sort_values('remove_percent', ascending=False) df.to_csv(f'{raw_data}/actual_clean_summary.tsv', sep='\t') print(f'actual data clean summary (which can be different from the projected one because of duplications) in: {raw_data}/actual_clean_summary.tsv') import shutil for direction in directions: src_lang, tgt_lang = direction.split('-') for split in ['train', 'valid', 'test']: # copying valid, test and uncorrected train if direction in corrected_directions and split == 'train': continue tgt = f"{raw_data}/{split}.{direction}.{tgt_lang}" src = f"{raw_data}/{split}.{direction}.{src_lang}" if not (os.path.exists(src) and os.path.exists(tgt)): continue corrected_tgt = f"{to_folder}/{split}.{direction}.{tgt_lang}" corrected_src = f"{to_folder}/{split}.{direction}.{src_lang}" print(f'copying {src} to {corrected_src}') shutil.copyfile(src, corrected_src) print(f'copying {tgt} to {corrected_tgt}') shutil.copyfile(tgt, corrected_tgt) print('completed')
KosmosX-API-main
kosmosX/fairseq/examples/multilingual/data_scripts/remove_valid_test_in_train.py
import shutil import os, sys from subprocess import check_call, check_output import glob import argparse import shutil import itertools def call_output(cmd): print(f"Executing: {cmd}") ret = check_output(cmd, shell=True) print(ret) return ret def call(cmd): print(cmd) check_call(cmd, shell=True) WORKDIR_ROOT = os.environ.get('WORKDIR_ROOT', None) if WORKDIR_ROOT is None or not WORKDIR_ROOT.strip(): print('please specify your working directory root in OS environment variable WORKDIR_ROOT. Exitting..."') sys.exit(-1) SPM_PATH = os.environ.get('SPM_PATH', None) if SPM_PATH is None or not SPM_PATH.strip(): print("Please install sentence piecence from https://github.com/google/sentencepiece and set SPM_PATH pointing to the installed spm_encode.py. Exitting...") sys.exit(-1) SPM_MODEL = f'{WORKDIR_ROOT}/sentence.bpe.model' SPM_VOCAB = f'{WORKDIR_ROOT}/dict_250k.txt' SPM_ENCODE = f'{SPM_PATH}' if not os.path.exists(SPM_MODEL): call(f"wget https://dl.fbaipublicfiles.com/fairseq/models/mbart50/sentence.bpe.model -O {SPM_MODEL}") if not os.path.exists(SPM_VOCAB): call(f"wget https://dl.fbaipublicfiles.com/fairseq/models/mbart50/dict_250k.txt -O {SPM_VOCAB}") def get_data_size(raw): cmd = f'wc -l {raw}' ret = call_output(cmd) return int(ret.split()[0]) def encode_spm(model, direction, prefix='', splits=['train', 'test', 'valid'], pairs_per_shard=None): src, tgt = direction.split('-') for split in splits: src_raw, tgt_raw = f'{RAW_DIR}/{split}{prefix}.{direction}.{src}', f'{RAW_DIR}/{split}{prefix}.{direction}.{tgt}' if os.path.exists(src_raw) and os.path.exists(tgt_raw): cmd = f"""python {SPM_ENCODE} \ --model {model}\ --output_format=piece \ --inputs {src_raw} {tgt_raw} \ --outputs {BPE_DIR}/{direction}{prefix}/{split}.bpe.{src} {BPE_DIR}/{direction}{prefix}/{split}.bpe.{tgt} """ print(cmd) call(cmd) def binarize_( bpe_dir, databin_dir, direction, spm_vocab=SPM_VOCAB, splits=['train', 'test', 'valid'], ): src, tgt = direction.split('-') try: shutil.rmtree(f'{databin_dir}', ignore_errors=True) os.mkdir(f'{databin_dir}') except OSError as error: print(error) cmds = [ "fairseq-preprocess", f"--source-lang {src} --target-lang {tgt}", f"--destdir {databin_dir}/", "--workers 8", ] if isinstance(spm_vocab, tuple): src_vocab, tgt_vocab = spm_vocab cmds.extend( [ f"--srcdict {src_vocab}", f"--tgtdict {tgt_vocab}", ] ) else: cmds.extend( [ "--joined-dictionary", f"--srcdict {spm_vocab}", ] ) input_options = [] if 'train' in splits and glob.glob(f"{bpe_dir}/train.bpe*"): input_options.append( f"--trainpref {bpe_dir}/train.bpe", ) if 'valid' in splits and glob.glob(f"{bpe_dir}/valid.bpe*"): input_options.append(f"--validpref {bpe_dir}/valid.bpe") if 'test' in splits and glob.glob(f"{bpe_dir}/test.bpe*"): input_options.append(f"--testpref {bpe_dir}/test.bpe") if len(input_options) > 0: cmd = " ".join(cmds + input_options) print(cmd) call(cmd) def binarize( databin_dir, direction, spm_vocab=SPM_VOCAB, prefix='', splits=['train', 'test', 'valid'], pairs_per_shard=None, ): def move_databin_files(from_folder, to_folder): for bin_file in glob.glob(f"{from_folder}/*.bin") \ + glob.glob(f"{from_folder}/*.idx") \ + glob.glob(f"{from_folder}/dict*"): try: shutil.move(bin_file, to_folder) except OSError as error: print(error) bpe_databin_dir = f"{BPE_DIR}/{direction}{prefix}_databin" bpe_dir = f"{BPE_DIR}/{direction}{prefix}" if pairs_per_shard is None: binarize_(bpe_dir, bpe_databin_dir, direction, spm_vocab=spm_vocab, splits=splits) move_databin_files(bpe_databin_dir, databin_dir) else: # binarize valid and test which will not be sharded binarize_( bpe_dir, bpe_databin_dir, direction, spm_vocab=spm_vocab, splits=[s for s in splits if s != "train"]) for shard_bpe_dir in glob.glob(f"{bpe_dir}/shard*"): path_strs = os.path.split(shard_bpe_dir) shard_str = path_strs[-1] shard_folder = f"{bpe_databin_dir}/{shard_str}" databin_shard_folder = f"{databin_dir}/{shard_str}" print(f'working from {shard_folder} to {databin_shard_folder}') os.makedirs(databin_shard_folder, exist_ok=True) binarize_( shard_bpe_dir, shard_folder, direction, spm_vocab=spm_vocab, splits=["train"]) for test_data in glob.glob(f"{bpe_databin_dir}/valid.*") + glob.glob(f"{bpe_databin_dir}/test.*"): filename = os.path.split(test_data)[-1] try: os.symlink(test_data, f"{databin_shard_folder}/{filename}") except OSError as error: print(error) move_databin_files(shard_folder, databin_shard_folder) def load_langs(path): with open(path) as fr: langs = [l.strip() for l in fr] return langs if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument("--data_root", default=f"{WORKDIR_ROOT}/ML50") parser.add_argument("--raw-folder", default='raw') parser.add_argument("--bpe-folder", default='bpe') parser.add_argument("--databin-folder", default='databin') args = parser.parse_args() DATA_PATH = args.data_root #'/private/home/yuqtang/public_data/ML50' RAW_DIR = f'{DATA_PATH}/{args.raw_folder}' BPE_DIR = f'{DATA_PATH}/{args.bpe_folder}' DATABIN_DIR = f'{DATA_PATH}/{args.databin_folder}' os.makedirs(BPE_DIR, exist_ok=True) raw_files = itertools.chain( glob.glob(f'{RAW_DIR}/train*'), glob.glob(f'{RAW_DIR}/valid*'), glob.glob(f'{RAW_DIR}/test*'), ) directions = [os.path.split(file_path)[-1].split('.')[1] for file_path in raw_files] for direction in directions: prefix = "" splits = ['train', 'valid', 'test'] try: shutil.rmtree(f'{BPE_DIR}/{direction}{prefix}', ignore_errors=True) os.mkdir(f'{BPE_DIR}/{direction}{prefix}') os.makedirs(DATABIN_DIR, exist_ok=True) except OSError as error: print(error) spm_model, spm_vocab = SPM_MODEL, SPM_VOCAB encode_spm(spm_model, direction=direction, splits=splits) binarize(DATABIN_DIR, direction, spm_vocab=spm_vocab, splits=splits)
KosmosX-API-main
kosmosX/fairseq/examples/multilingual/data_scripts/binarize.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os import glob import argparse import sys WORKDIR_ROOT = os.environ.get('WORKDIR_ROOT', None) if WORKDIR_ROOT is None or not WORKDIR_ROOT.strip(): print('please specify your working directory root in OS environment variable WORKDIR_ROOT. Exitting..."') sys.exit(-1) def get_directions(folder): raw_files = glob.glob(f'{folder}/train*') directions = [os.path.split(file_path)[-1].split('.')[1] for file_path in raw_files] return directions def diff_list(lhs, rhs): return set(lhs).difference(set(rhs)) def check_diff( from_src_file, from_tgt_file, to_src_file, to_tgt_file, ): seen_in_from = set() seen_src_in_from = set() seen_tgt_in_from = set() from_count = 0 with open(from_src_file, encoding='utf-8') as fsrc, \ open(from_tgt_file, encoding='utf-8') as ftgt: for s, t in zip(fsrc, ftgt): seen_in_from.add((s, t)) seen_src_in_from.add(s) seen_tgt_in_from.add(t) from_count += 1 common = 0 common_src = 0 common_tgt = 0 to_count = 0 seen = set() with open(to_src_file, encoding='utf-8') as fsrc, \ open(to_tgt_file, encoding='utf-8') as ftgt: for s, t in zip(fsrc, ftgt): to_count += 1 if (s, t) not in seen: if (s, t) in seen_in_from: common += 1 if s in seen_src_in_from: common_src += 1 seen_src_in_from.remove(s) if t in seen_tgt_in_from: common_tgt += 1 seen_tgt_in_from.remove(t) seen.add((s, t)) return common, common_src, common_tgt, from_count, to_count def main(): parser = argparse.ArgumentParser() parser.add_argument("--folder", type=str, required=True, help="the data folder ") parser.add_argument("--split", type=str, default='test', help="split (valid, test) to check against training data") parser.add_argument('--directions', type=str, default=None, required=False) args = parser.parse_args() if args.directions is None: directions = set(get_directions(args.folder)) directions = sorted(directions) else: directions = args.directions.split(',') directions = sorted(set(directions)) results = [] print(f'checking where {args.split} split data are in training') print('direction\tcommon_count\tsrc common\ttgt common\tfrom_size\tto_size') for direction in directions: src, tgt = direction.split('-') from_src_file = f'{args.folder}/{args.split}.{src}-{tgt}.{src}' from_tgt_file = f'{args.folder}/{args.split}.{src}-{tgt}.{tgt}' if not os.path.exists(from_src_file): # some test/valid data might in reverse directinos: from_src_file = f'{args.folder}/{args.split}.{tgt}-{src}.{src}' from_tgt_file = f'{args.folder}/{args.split}.{tgt}-{src}.{tgt}' to_src_file = f'{args.folder}/train.{src}-{tgt}.{src}' to_tgt_file = f'{args.folder}/train.{src}-{tgt}.{tgt}' if not os.path.exists(to_src_file) or not os.path.exists(from_src_file): continue r = check_diff(from_src_file, from_tgt_file, to_src_file, to_tgt_file) results.append(r) print(f'{direction}\t', '\t'.join(map(str, r))) if __name__ == "__main__": main()
KosmosX-API-main
kosmosX/fairseq/examples/multilingual/data_scripts/check_self_overlaps.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. #!/bin/python import fasttext from multiprocessing import Pool import contextlib import sys import argparse from functools import partial import io model = None def init(model_path): global model model = fasttext.load_model(model_path) def pred(lines): return lines, [model.predict(line.strip())[0][0][9:] for line in lines] def main(): parser = argparse.ArgumentParser() parser.add_argument("--model", type=str, required=True, help="model to load") parser.add_argument("--inputs", nargs="+", default=['-'], help="input files to filter") parser.add_argument("--langs", nargs="+", required=True, help="lang ids of each input file") parser.add_argument("--outputs", nargs="+", default=['-'], help="path to save lid filtered outputs") parser.add_argument("--num-workers", type=int, metavar="N", default=10, help="number of processes in parallel") args = parser.parse_args() assert len(args.inputs) == len(args.langs) and len(args.inputs) == len(args.outputs) with contextlib.ExitStack() as stack: inputs = [ stack.enter_context(open(input, "r", encoding="utf-8", newline="\n", errors="replace")) if input != "-" else io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8', errors="replace") for input in args.inputs ] outputs = [ stack.enter_context(open(output, "w", encoding="utf-8", newline="\n")) if output != "-" else sys.stdout for output in args.outputs ] with Pool(args.num_workers, initializer=partial(init, args.model)) as p: skip_cnt = 0 for lines, preds in p.imap(pred, list(zip(*inputs)), chunksize=500): if not all(a == b for a, b in zip(preds, args.langs)): skip_cnt += 1 continue for line, output_h in zip(lines, outputs): print(line.strip(), file=output_h) print(f"Skipped {skip_cnt} lines.") if __name__ == "__main__": main()
KosmosX-API-main
kosmosX/fairseq/examples/multilingual/data_scripts/utils/fasttext_multi_filter.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse def deup(src_file, tgt_file, src_file_out, tgt_file_out): seen = set() dup_count = 0 with open(src_file, encoding='utf-8') as fsrc, \ open(tgt_file, encoding='utf-8') as ftgt, \ open(src_file_out, 'w', encoding='utf-8') as fsrc_out, \ open(tgt_file_out, 'w', encoding='utf-8') as ftgt_out: for s, t in zip(fsrc, ftgt): if (s, t) not in seen: fsrc_out.write(s) ftgt_out.write(t) seen.add((s, t)) else: dup_count += 1 print(f'number of duplication: {dup_count}') def main(): parser = argparse.ArgumentParser() parser.add_argument("--src-file", type=str, required=True, help="src file") parser.add_argument("--tgt-file", type=str, required=True, help="tgt file") parser.add_argument("--src-file-out", type=str, required=True, help="src ouptut file") parser.add_argument("--tgt-file-out", type=str, required=True, help="tgt ouput file") args = parser.parse_args() deup(args.src_file, args.tgt_file, args.src_file_out, args.tgt_file_out) if __name__ == "__main__": main()
KosmosX-API-main
kosmosX/fairseq/examples/multilingual/data_scripts/utils/dedup.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch import torch.nn as nn import torch.nn.functional as F from fairseq import options, utils from fairseq.models import ( FairseqEncoder, FairseqIncrementalDecoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) @register_model("laser_lstm") class LSTMModel(FairseqEncoderDecoderModel): def __init__(self, encoder, decoder): super().__init__(encoder, decoder) def forward( self, src_tokens, src_lengths, prev_output_tokens=None, tgt_tokens=None, tgt_lengths=None, target_language_id=None, dataset_name="", ): assert target_language_id is not None src_encoder_out = self.encoder(src_tokens, src_lengths, dataset_name) return self.decoder( prev_output_tokens, src_encoder_out, lang_id=target_language_id ) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" parser.add_argument( "--dropout", default=0.1, type=float, metavar="D", help="dropout probability", ) parser.add_argument( "--encoder-embed-dim", type=int, metavar="N", help="encoder embedding dimension", ) parser.add_argument( "--encoder-embed-path", default=None, type=str, metavar="STR", help="path to pre-trained encoder embedding", ) parser.add_argument( "--encoder-hidden-size", type=int, metavar="N", help="encoder hidden size" ) parser.add_argument( "--encoder-layers", type=int, metavar="N", help="number of encoder layers" ) parser.add_argument( "--encoder-bidirectional", action="store_true", help="make all layers of encoder bidirectional", ) parser.add_argument( "--decoder-embed-dim", type=int, metavar="N", help="decoder embedding dimension", ) parser.add_argument( "--decoder-embed-path", default=None, type=str, metavar="STR", help="path to pre-trained decoder embedding", ) parser.add_argument( "--decoder-hidden-size", type=int, metavar="N", help="decoder hidden size" ) parser.add_argument( "--decoder-layers", type=int, metavar="N", help="number of decoder layers" ) parser.add_argument( "--decoder-out-embed-dim", type=int, metavar="N", help="decoder output embedding dimension", ) parser.add_argument( "--decoder-zero-init", type=str, metavar="BOOL", help="initialize the decoder hidden/cell state to zero", ) parser.add_argument( "--decoder-lang-embed-dim", type=int, metavar="N", help="decoder language embedding dimension", ) parser.add_argument( "--fixed-embeddings", action="store_true", help="keep embeddings fixed (ENCODER ONLY)", ) # TODO Also apply to decoder embeddings? # Granular dropout settings (if not specified these default to --dropout) parser.add_argument( "--encoder-dropout-in", type=float, metavar="D", help="dropout probability for encoder input embedding", ) parser.add_argument( "--encoder-dropout-out", type=float, metavar="D", help="dropout probability for encoder output", ) parser.add_argument( "--decoder-dropout-in", type=float, metavar="D", help="dropout probability for decoder input embedding", ) parser.add_argument( "--decoder-dropout-out", type=float, metavar="D", help="dropout probability for decoder output", ) @classmethod def build_model(cls, args, task): """Build a new model instance.""" # make sure that all args are properly defaulted (in case there are any new ones) base_architecture(args) def load_pretrained_embedding_from_file(embed_path, dictionary, embed_dim): num_embeddings = len(dictionary) padding_idx = dictionary.pad() embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx) embed_dict = utils.parse_embedding(embed_path) utils.print_embed_overlap(embed_dict, dictionary) return utils.load_embedding(embed_dict, dictionary, embed_tokens) pretrained_encoder_embed = None if args.encoder_embed_path: pretrained_encoder_embed = load_pretrained_embedding_from_file( args.encoder_embed_path, task.source_dictionary, args.encoder_embed_dim ) pretrained_decoder_embed = None if args.decoder_embed_path: pretrained_decoder_embed = load_pretrained_embedding_from_file( args.decoder_embed_path, task.target_dictionary, args.decoder_embed_dim ) num_langs = task.num_tasks if hasattr(task, "num_tasks") else 0 encoder = LSTMEncoder( dictionary=task.source_dictionary, embed_dim=args.encoder_embed_dim, hidden_size=args.encoder_hidden_size, num_layers=args.encoder_layers, dropout_in=args.encoder_dropout_in, dropout_out=args.encoder_dropout_out, bidirectional=args.encoder_bidirectional, pretrained_embed=pretrained_encoder_embed, fixed_embeddings=args.fixed_embeddings, ) decoder = LSTMDecoder( dictionary=task.target_dictionary, embed_dim=args.decoder_embed_dim, hidden_size=args.decoder_hidden_size, out_embed_dim=args.decoder_out_embed_dim, num_layers=args.decoder_layers, dropout_in=args.decoder_dropout_in, dropout_out=args.decoder_dropout_out, zero_init=options.eval_bool(args.decoder_zero_init), encoder_embed_dim=args.encoder_embed_dim, encoder_output_units=encoder.output_units, pretrained_embed=pretrained_decoder_embed, num_langs=num_langs, lang_embed_dim=args.decoder_lang_embed_dim, ) return cls(encoder, decoder) class LSTMEncoder(FairseqEncoder): """LSTM encoder.""" def __init__( self, dictionary, embed_dim=512, hidden_size=512, num_layers=1, dropout_in=0.1, dropout_out=0.1, bidirectional=False, left_pad=True, pretrained_embed=None, padding_value=0.0, fixed_embeddings=False, ): super().__init__(dictionary) self.num_layers = num_layers self.dropout_in = dropout_in self.dropout_out = dropout_out self.bidirectional = bidirectional self.hidden_size = hidden_size num_embeddings = len(dictionary) self.padding_idx = dictionary.pad() if pretrained_embed is None: self.embed_tokens = Embedding(num_embeddings, embed_dim, self.padding_idx) else: self.embed_tokens = pretrained_embed if fixed_embeddings: self.embed_tokens.weight.requires_grad = False self.lstm = LSTM( input_size=embed_dim, hidden_size=hidden_size, num_layers=num_layers, dropout=self.dropout_out if num_layers > 1 else 0.0, bidirectional=bidirectional, ) self.left_pad = left_pad self.padding_value = padding_value self.output_units = hidden_size if bidirectional: self.output_units *= 2 def forward(self, src_tokens, src_lengths, dataset_name): if self.left_pad: # convert left-padding to right-padding src_tokens = utils.convert_padding_direction( src_tokens, self.padding_idx, left_to_right=True, ) bsz, seqlen = src_tokens.size() # embed tokens x = self.embed_tokens(src_tokens) x = F.dropout(x, p=self.dropout_in, training=self.training) # B x T x C -> T x B x C x = x.transpose(0, 1) # pack embedded source tokens into a PackedSequence try: packed_x = nn.utils.rnn.pack_padded_sequence(x, src_lengths.data.tolist()) except BaseException: raise Exception(f"Packing failed in dataset {dataset_name}") # apply LSTM if self.bidirectional: state_size = 2 * self.num_layers, bsz, self.hidden_size else: state_size = self.num_layers, bsz, self.hidden_size h0 = x.data.new(*state_size).zero_() c0 = x.data.new(*state_size).zero_() packed_outs, (final_hiddens, final_cells) = self.lstm(packed_x, (h0, c0)) # unpack outputs and apply dropout x, _ = nn.utils.rnn.pad_packed_sequence( packed_outs, padding_value=self.padding_value ) x = F.dropout(x, p=self.dropout_out, training=self.training) assert list(x.size()) == [seqlen, bsz, self.output_units] if self.bidirectional: def combine_bidir(outs): return torch.cat( [ torch.cat([outs[2 * i], outs[2 * i + 1]], dim=0).view( 1, bsz, self.output_units ) for i in range(self.num_layers) ], dim=0, ) final_hiddens = combine_bidir(final_hiddens) final_cells = combine_bidir(final_cells) encoder_padding_mask = src_tokens.eq(self.padding_idx).t() # Set padded outputs to -inf so they are not selected by max-pooling padding_mask = src_tokens.eq(self.padding_idx).t().unsqueeze(-1) if padding_mask.any(): x = x.float().masked_fill_(padding_mask, float("-inf")).type_as(x) # Build the sentence embedding by max-pooling over the encoder outputs sentemb = x.max(dim=0)[0] return { "sentemb": sentemb, "encoder_out": (x, final_hiddens, final_cells), "encoder_padding_mask": encoder_padding_mask if encoder_padding_mask.any() else None, } def reorder_encoder_out(self, encoder_out_dict, new_order): encoder_out_dict["sentemb"] = encoder_out_dict["sentemb"].index_select( 0, new_order ) encoder_out_dict["encoder_out"] = tuple( eo.index_select(1, new_order) for eo in encoder_out_dict["encoder_out"] ) if encoder_out_dict["encoder_padding_mask"] is not None: encoder_out_dict["encoder_padding_mask"] = encoder_out_dict[ "encoder_padding_mask" ].index_select(1, new_order) return encoder_out_dict def max_positions(self): """Maximum input length supported by the encoder.""" return int(1e5) # an arbitrary large number class LSTMDecoder(FairseqIncrementalDecoder): """LSTM decoder.""" def __init__( self, dictionary, embed_dim=512, hidden_size=512, out_embed_dim=512, num_layers=1, dropout_in=0.1, dropout_out=0.1, zero_init=False, encoder_embed_dim=512, encoder_output_units=512, pretrained_embed=None, num_langs=1, lang_embed_dim=0, ): super().__init__(dictionary) self.dropout_in = dropout_in self.dropout_out = dropout_out self.hidden_size = hidden_size num_embeddings = len(dictionary) padding_idx = dictionary.pad() if pretrained_embed is None: self.embed_tokens = Embedding(num_embeddings, embed_dim, padding_idx) else: self.embed_tokens = pretrained_embed self.layers = nn.ModuleList( [ LSTMCell( input_size=encoder_output_units + embed_dim + lang_embed_dim if layer == 0 else hidden_size, hidden_size=hidden_size, ) for layer in range(num_layers) ] ) if hidden_size != out_embed_dim: self.additional_fc = Linear(hidden_size, out_embed_dim) self.fc_out = Linear(out_embed_dim, num_embeddings, dropout=dropout_out) if zero_init: self.sentemb2init = None else: self.sentemb2init = Linear( encoder_output_units, 2 * num_layers * hidden_size ) if lang_embed_dim == 0: self.embed_lang = None else: self.embed_lang = nn.Embedding(num_langs, lang_embed_dim) nn.init.uniform_(self.embed_lang.weight, -0.1, 0.1) def forward( self, prev_output_tokens, encoder_out_dict, incremental_state=None, lang_id=0 ): sentemb = encoder_out_dict["sentemb"] encoder_out = encoder_out_dict["encoder_out"] if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] bsz, seqlen = prev_output_tokens.size() # get outputs from encoder encoder_outs, _, _ = encoder_out[:3] srclen = encoder_outs.size(0) # embed tokens x = self.embed_tokens(prev_output_tokens) x = F.dropout(x, p=self.dropout_in, training=self.training) # embed language identifier if self.embed_lang is not None: lang_ids = prev_output_tokens.data.new_full((bsz,), lang_id) langemb = self.embed_lang(lang_ids) # TODO Should we dropout here??? # B x T x C -> T x B x C x = x.transpose(0, 1) # initialize previous states (or get from cache during incremental generation) cached_state = utils.get_incremental_state( self, incremental_state, "cached_state" ) if cached_state is not None: prev_hiddens, prev_cells, input_feed = cached_state else: num_layers = len(self.layers) if self.sentemb2init is None: prev_hiddens = [ x.data.new(bsz, self.hidden_size).zero_() for i in range(num_layers) ] prev_cells = [ x.data.new(bsz, self.hidden_size).zero_() for i in range(num_layers) ] else: init = self.sentemb2init(sentemb) prev_hiddens = [ init[:, (2 * i) * self.hidden_size : (2 * i + 1) * self.hidden_size] for i in range(num_layers) ] prev_cells = [ init[ :, (2 * i + 1) * self.hidden_size : (2 * i + 2) * self.hidden_size, ] for i in range(num_layers) ] input_feed = x.data.new(bsz, self.hidden_size).zero_() attn_scores = x.data.new(srclen, seqlen, bsz).zero_() outs = [] for j in range(seqlen): if self.embed_lang is None: input = torch.cat((x[j, :, :], sentemb), dim=1) else: input = torch.cat((x[j, :, :], sentemb, langemb), dim=1) for i, rnn in enumerate(self.layers): # recurrent cell hidden, cell = rnn(input, (prev_hiddens[i], prev_cells[i])) # hidden state becomes the input to the next layer input = F.dropout(hidden, p=self.dropout_out, training=self.training) # save state for next time step prev_hiddens[i] = hidden prev_cells[i] = cell out = hidden out = F.dropout(out, p=self.dropout_out, training=self.training) # input feeding input_feed = out # save final output outs.append(out) # cache previous states (no-op except during incremental generation) utils.set_incremental_state( self, incremental_state, "cached_state", (prev_hiddens, prev_cells, input_feed), ) # collect outputs across time steps x = torch.cat(outs, dim=0).view(seqlen, bsz, self.hidden_size) # T x B x C -> B x T x C x = x.transpose(1, 0) # srclen x tgtlen x bsz -> bsz x tgtlen x srclen attn_scores = attn_scores.transpose(0, 2) # project back to size of vocabulary if hasattr(self, "additional_fc"): x = self.additional_fc(x) x = F.dropout(x, p=self.dropout_out, training=self.training) x = self.fc_out(x) return x, attn_scores def reorder_incremental_state(self, incremental_state, new_order): super().reorder_incremental_state(incremental_state, new_order) cached_state = utils.get_incremental_state( self, incremental_state, "cached_state" ) if cached_state is None: return def reorder_state(state): if isinstance(state, list): return [reorder_state(state_i) for state_i in state] return state.index_select(0, new_order) new_state = tuple(map(reorder_state, cached_state)) utils.set_incremental_state(self, incremental_state, "cached_state", new_state) def max_positions(self): """Maximum output length supported by the decoder.""" return int(1e5) # an arbitrary large number def Embedding(num_embeddings, embedding_dim, padding_idx): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) nn.init.uniform_(m.weight, -0.1, 0.1) nn.init.constant_(m.weight[padding_idx], 0) return m def LSTM(input_size, hidden_size, **kwargs): m = nn.LSTM(input_size, hidden_size, **kwargs) for name, param in m.named_parameters(): if "weight" in name or "bias" in name: param.data.uniform_(-0.1, 0.1) return m def LSTMCell(input_size, hidden_size, **kwargs): m = nn.LSTMCell(input_size, hidden_size, **kwargs) for name, param in m.named_parameters(): if "weight" in name or "bias" in name: param.data.uniform_(-0.1, 0.1) return m def Linear(in_features, out_features, bias=True, dropout=0): """Weight-normalized Linear layer (input: N x T x C)""" m = nn.Linear(in_features, out_features, bias=bias) m.weight.data.uniform_(-0.1, 0.1) if bias: m.bias.data.uniform_(-0.1, 0.1) return m @register_model_architecture("laser_lstm", "laser_lstm") def base_architecture(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_hidden_size = getattr( args, "encoder_hidden_size", args.encoder_embed_dim ) args.encoder_layers = getattr(args, "encoder_layers", 1) args.encoder_bidirectional = getattr(args, "encoder_bidirectional", False) args.encoder_dropout_in = getattr(args, "encoder_dropout_in", args.dropout) args.encoder_dropout_out = getattr(args, "encoder_dropout_out", args.dropout) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_hidden_size = getattr( args, "decoder_hidden_size", args.decoder_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 1) args.decoder_out_embed_dim = getattr(args, "decoder_out_embed_dim", 512) args.decoder_dropout_in = getattr(args, "decoder_dropout_in", args.dropout) args.decoder_dropout_out = getattr(args, "decoder_dropout_out", args.dropout) args.decoder_zero_init = getattr(args, "decoder_zero_init", "0") args.decoder_lang_embed_dim = getattr(args, "decoder_lang_embed_dim", 0) args.fixed_embeddings = getattr(args, "fixed_embeddings", False)
KosmosX-API-main
kosmosX/fairseq/examples/laser/laser_src/laser_lstm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging from typing import Any, Dict, List, Optional from torch import Tensor import torch import torch.nn as nn from fairseq.models import ( FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.models.transformer import ( base_architecture, Embedding, TransformerModel, TransformerEncoder, TransformerDecoder, ) from fairseq.modules import ( TransformerDecoderLayer, ) logger = logging.getLogger(__name__) @register_model("laser_transformer") class LaserTransformerModel(FairseqEncoderDecoderModel): """Train Transformer for LASER task Requires --task laser """ def __init__(self, encoder, decoder): super().__init__(encoder, decoder) def forward( self, src_tokens, src_lengths, prev_output_tokens=None, tgt_tokens=None, tgt_lengths=None, target_language_id=-1, dataset_name="", ): laser_encoder_out = self.encoder(src_tokens, src_lengths) return self.decoder( prev_output_tokens, laser_encoder_out, lang_id=target_language_id ) @staticmethod def add_args(parser): """Add model-specific arguments to the parser.""" TransformerModel.add_args(parser) parser.add_argument( "--decoder-lang-embed-dim", type=int, metavar="N", help="decoder language embedding dimension", ) @classmethod def build_model(cls, args, task): base_laser_transformer_architecture(args) num_langs = task.num_tasks if hasattr(task, "num_tasks") else 0 def load_embed_tokens(dictionary, embed_dim): num_embeddings = len(dictionary) padding_idx = dictionary.pad() return Embedding(num_embeddings, embed_dim, padding_idx) encoder_embed_tokens = load_embed_tokens( task.source_dictionary, args.encoder_embed_dim ) decoder_embed_tokens = load_embed_tokens( task.target_dictionary, args.decoder_embed_dim ) num_langs = task.num_tasks if hasattr(task, "num_tasks") else 0 encoder = LaserTransformerEncoder( args, task.source_dictionary, encoder_embed_tokens ) decoder = LaserTransformerDecoder( args, task.target_dictionary, decoder_embed_tokens, num_langs=num_langs, lang_embed_dim=args.decoder_lang_embed_dim, ) return cls(encoder, decoder) class LaserTransformerEncoder(TransformerEncoder): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def forward(self, src_tokens, *args, **kwargs): encoder_out = super().forward(src_tokens, *args, **kwargs) x = encoder_out["encoder_out"][0] # T x B x C padding_mask = src_tokens.eq(self.padding_idx).t().unsqueeze(-1) if padding_mask.any(): x = x.float().masked_fill_(padding_mask, float("-inf")).type_as(x) # Build the sentence embedding by max-pooling over the encoder outputs sentemb = x.max(dim=0)[0] # The Pytorch Mobile lite interpreter does not supports returning NamedTuple in # `foward` so we use a dictionary instead. # TorchScript does not support mixed values so the values are all lists. # The empty list is equivalent to None. return {"sentemb": [sentemb]} # B x C @torch.jit.export def reorder_encoder_out(self, encoder_out: Dict[str, List[Tensor]], new_order): """ Same as the one in transformer.py, with new_sentemb """ if len(encoder_out["sentemb"]) == 0: new_sentemb = [] else: new_sentemb = [encoder_out["sentemb"][0].index_select(0, new_order)] return { "sentemb": new_sentemb, # B x C } class LaserTransformerDecoder(TransformerDecoder): def __init__(self, args, dictionary, *kargs, **kwargs): self.num_langs = kwargs.get("num_langs", 1) self.lang_embed_dim = kwargs.get("lang_embed_dim", 0) kwargs.pop("num_langs", None) kwargs.pop("lang_embed_dim", None) super().__init__(args, dictionary, *kargs, **kwargs, no_encoder_attn=True) if self.lang_embed_dim == 0: self.embed_lang = None else: self.embed_lang = nn.Embedding(self.num_langs, self.lang_embed_dim) nn.init.uniform_(self.embed_lang.weight, -0.1, 0.1) if self.output_projection is not None: laser_output_embed_dim = ( self.output_embed_dim + self.lang_embed_dim + args.encoder_embed_dim ) self.output_projection = nn.Linear( laser_output_embed_dim, len(dictionary), bias=False ) nn.init.normal_( self.output_projection.weight, mean=0, std=laser_output_embed_dim ** -0.5, ) def build_decoder_layer(self, args, no_encoder_attn=False): decoder_embed_dim = args.decoder_embed_dim args.decoder_embed_dim = ( decoder_embed_dim + self.lang_embed_dim + args.encoder_embed_dim ) res = TransformerDecoderLayer(args, no_encoder_attn=True) args.decoder_embed_dim = decoder_embed_dim return res def extract_features( self, prev_output_tokens, encoder_out: Optional[Dict[str, List[Tensor]]], incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, full_context_alignment: bool = False, alignment_layer: Optional[int] = None, alignment_heads: Optional[int] = None, lang_id: Optional[int] = None, ): """ Similar to *forward* but only return features. Includes several features from "Jointly Learning to Align and Translate with Transformer Models" (Garg et al., EMNLP 2019). Args: full_context_alignment (bool, optional): don't apply auto-regressive mask to self-attention (default: False). alignment_layer (int, optional): return mean alignment over heads at this layer (default: last layer). alignment_heads (int, optional): only average alignment over this many heads (default: all heads). Returns: tuple: - the decoder's features of shape `(batch, tgt_len, embed_dim)` - a dictionary with any model-specific outputs """ if alignment_layer is None: alignment_layer = self.num_layers - 1 # embed positions positions = ( self.embed_positions( prev_output_tokens, incremental_state=incremental_state ) if self.embed_positions is not None else None ) if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] if positions is not None: positions = positions[:, -1:] bsz, seqlen = prev_output_tokens.size() # embed tokens and positions x = self.embed_scale * self.embed_tokens(prev_output_tokens) if self.quant_noise is not None: x = self.quant_noise(x) if self.project_in_dim is not None: x = self.project_in_dim(x) if positions is not None: x += positions if self.layernorm_embedding is not None: x = self.layernorm_embedding(x) x = self.dropout_module(x) # B x T x C -> T x B x C x = x.transpose(0, 1) if self.embed_lang is not None: lang_ids = prev_output_tokens.data.new_full((bsz,), lang_id) langemb = self.embed_lang(lang_ids) langemb = langemb.unsqueeze(0) repeat_vals = [x.shape[0] // langemb.shape[0]] + [-1] * ( len(langemb.shape) - 1 ) x = torch.cat((x, langemb.expand(*repeat_vals)), dim=-1) sentemb = encoder_out["sentemb"][0] sentemb = sentemb.unsqueeze(0) repeat_vals = [x.shape[0] // sentemb.shape[0]] + [-1] * (len(sentemb.shape) - 1) x = torch.cat((x, sentemb.expand(*repeat_vals)), dim=-1) self_attn_padding_mask: Optional[Tensor] = None if self.cross_self_attention or prev_output_tokens.eq(self.padding_idx).any(): self_attn_padding_mask = prev_output_tokens.eq(self.padding_idx) # decoder layers attn: Optional[Tensor] = None inner_states: List[Optional[Tensor]] = [x] for idx, layer in enumerate(self.layers): if incremental_state is None and not full_context_alignment: self_attn_mask = self.buffered_future_mask(x) else: self_attn_mask = None x, layer_attn, _ = layer( x, None, None, incremental_state, self_attn_mask=self_attn_mask, self_attn_padding_mask=self_attn_padding_mask, need_attn=bool((idx == alignment_layer)), need_head_weights=bool((idx == alignment_layer)), ) inner_states.append(x) if layer_attn is not None and idx == alignment_layer: attn = layer_attn.float().to(x) if attn is not None: if alignment_heads is not None: attn = attn[:alignment_heads] # average probabilities over heads attn = attn.mean(dim=0) if self.layer_norm is not None: x = self.layer_norm(x) # T x B x C -> B x T x C x = x.transpose(0, 1) if self.project_out_dim is not None: x = self.project_out_dim(x) return x, {"attn": [attn], "inner_states": inner_states} def forward( self, prev_output_tokens, encoder_out: Optional[Dict[str, List[Tensor]]] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, features_only: bool = False, alignment_layer: Optional[int] = None, alignment_heads: Optional[int] = None, src_lengths: Optional[Any] = None, return_all_hiddens: bool = False, lang_id: Optional[int] = None, ): """ Args: prev_output_tokens (LongTensor): previous decoder outputs of shape `(batch, tgt_len)`, for teacher forcing encoder_out (optional): output from the encoder, used for encoder-side attention incremental_state (dict): dictionary used for storing state during :ref:`Incremental decoding` features_only (bool, optional): only return features without applying output layer (default: False). Returns: tuple: - the decoder's output of shape `(batch, tgt_len, vocab)` - a dictionary with any model-specific outputs """ assert lang_id is not None x, extra = self.extract_features( prev_output_tokens, encoder_out=encoder_out, incremental_state=incremental_state, alignment_layer=alignment_layer, alignment_heads=alignment_heads, lang_id=lang_id, ) if not features_only: x = self.output_layer(x) return x, extra @register_model_architecture("laser_transformer", "laser_transformer") def base_laser_transformer_architecture(args): base_architecture(args) args.decoder_lang_embed_dim = getattr(args, "decoder_lang_embed_dim", 0)
KosmosX-API-main
kosmosX/fairseq/examples/laser/laser_src/laser_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from collections import OrderedDict import numpy as np from fairseq.data import BaseWrapperDataset, FairseqDataset, iterators class MultiItr(object): def __init__(self, itr): self.itr = itr self._counts = [0 for x in itr] def __len__(self): return sum(len(itr) for itr in self.itr) def __iter__(self): return self def __next__(self): ratios = [count / len(itr) for count, itr in zip(self._counts, self.itr)] idx = ratios.index(min(ratios)) self._counts[idx] += 1 return next(self.itr[idx]) class MultidatasetEpochBatchIterator(iterators.EpochBatchIterating): """A wrapper around multiple epoch batch iterators.""" def __init__( self, dataset, batch_sampler, seed=1, num_shards=1, shard_id=0, num_workers=0, epoch=1, ): assert isinstance(dataset, OrderedDict) assert len(dataset) assert isinstance(dataset[next(iter(dataset))], FairseqDataset) self.iterators = [] self.epoch = epoch for key, dt in dataset.items(): epoch_iter = iterators.EpochBatchIterator( dataset=dt, collate_fn=dt.collater, batch_sampler=batch_sampler[key], seed=seed, num_shards=num_shards, shard_id=shard_id, num_workers=0, epoch=epoch, ) self.iterators.append(epoch_iter) def __len__(self): return sum(len(itr) for itr in self.iterators) def next_epoch_itr(self, shuffle=True, fix_batches_to_gpus=False): # `self.epoch += 1` should be handled by underlying `EpochBatchIterator`s. return MultiItr( [ itr.next_epoch_itr( shuffle=shuffle, fix_batches_to_gpus=fix_batches_to_gpus ) for itr in self.iterators ] ) def end_of_epoch(self): return all(itr.end_of_epoch() for itr in self.iterators) @property def next_epoch_idx(self): """Return the epoch index after *next_epoch_itr* is called.""" epochs = [itr.next_epoch_idx for itr in self.iterators] self.epoch = epochs[0] assert all(epoch == self.epoch for epoch in epochs) return self.epoch @property def iterations_in_epoch(self): return sum(itr.iterations_in_epoch for itr in self.iterators) def state_dict(self): return { "iterators": [it.state_dict() for it in self.iterators], "epoch": self.epoch, } def load_state_dict(self, state_dict): self.epoch = state_dict["epoch"] for it, d in zip(self.iterators, state_dict["iterators"]): it.load_state_dict(d) class MultitaskDatasetWrapper(BaseWrapperDataset): """A wrapper for a multitask dataset.""" def __init__(self, dataset, target_language_id, sample=1.0, name=""): super().__init__(dataset) self.target_language_id = target_language_id self.sample = sample self.name = name def collater(self, *args, **kwargs): ans = self.dataset.collater(*args, **kwargs) if "net_input" in ans: ans["net_input"]["target_language_id"] = self.target_language_id ans["net_input"]["dataset_name"] = self.name return ans def num_tokens(self, *args, **kwargs): return self.dataset.num_tokens(*args, **kwargs) def ordered_indices(self, *args, **kwargs): indices = self.dataset.ordered_indices(*args, **kwargs) # Hacky solution for sampling size = int(self.sample * indices.shape[0]) return indices.take(np.sort(np.random.permutation(indices.shape[0])[:size])) def size(self, index: int): return self.dataset.size(index) @property def supports_prefetch(self): """Whether this dataset supports prefetching.""" return getattr(self.dataset, "supports_prefetch", False) def prefetch(self, indices): return self.dataset.prefetch(indices)
KosmosX-API-main
kosmosX/fairseq/examples/laser/laser_src/multitask_data_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .laser_task import * # noqa from .laser_lstm import * # noqa from .laser_transformer import * # noqa
KosmosX-API-main
kosmosX/fairseq/examples/laser/laser_src/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from collections import OrderedDict, defaultdict import json import os import logging from argparse import ArgumentError from fairseq import options, models from fairseq.data import ( data_utils, Dictionary, LanguagePairDataset, IndexedDataset, FairseqDataset, ) from .multitask_data_utils import ( MultitaskDatasetWrapper, MultidatasetEpochBatchIterator, ) from fairseq.tasks import LegacyFairseqTask, register_task logger = logging.getLogger(__name__) @register_task("laser") class LaserTask(LegacyFairseqTask): @staticmethod def add_args(parser): """Add task-specific arguments to the parser.""" parser.add_argument( "configfile", metavar="PATH", help="dataset configuration file in json" ) parser.add_argument( "--weighting-alpha", type=float, default=None, help="alpha for automatic weighting", ) parser.add_argument( "--raw-text", action="store_true", help="load raw text dataset" ) parser.add_argument( "--left-pad-source", default="True", type=str, metavar="BOOL", help="pad the source on the left (default: True)", ) parser.add_argument( "--left-pad-target", default="False", type=str, metavar="BOOL", help="pad the target on the left (default: False)", ) try: parser.add_argument( "--max-source-positions", default=1024, type=int, metavar="N", help="max number of tokens in the source sequence", ) parser.add_argument( "--max-target-positions", default=1024, type=int, metavar="N", help="max number of tokens in the target sequence", ) except ArgumentError: # this might have already been defined. Once we transition this to hydra it should be fine to add it here. pass def __init__(self, args, config, src_dictionary, tgt_dictionary, num_tasks): super().__init__(args) self.config = config self.src_dictionary = src_dictionary self.tgt_dictionary = tgt_dictionary self.num_tasks = num_tasks @classmethod def setup_task(cls, args, **kwargs): with open(args.configfile, "r") as f: config = json.load(f) num_tasks = max(dataset["id"] for dataset in config["train"]) + 1 args.left_pad_source = options.eval_bool(args.left_pad_source) args.left_pad_target = options.eval_bool(args.left_pad_target) src_dictionary = Dictionary.load(config["src_vocab"]) tgt_dictionary = Dictionary.load(config["tgt_vocab"]) logger.info( "| src Dictionary {} : {} types".format( config["src_vocab"], len(src_dictionary) ) ) logger.info( "| tgt Dictionary {} : {} types".format( config["tgt_vocab"], len(tgt_dictionary) ) ) return cls(args, config, src_dictionary, tgt_dictionary, num_tasks) # Experimental overriding for backtranslation def build_model(self, args, from_checkpoint=False): model = models.build_model(args, self) return model def dataset(self, split): if split not in self.datasets: raise KeyError("Dataset not loaded: " + split) return self.datasets[split] def load_dataset(self, split, epoch=1, **kwargs): """Load a dataset split.""" def indexed_dataset(path, dictionary): if self.args.raw_text: raise Exception("Unable to handle raw text.") dataset = IndexedDataset(path, fix_lua_indexing=True) return dataset pair_datasets = OrderedDict() if split == "valid": self.datasets[split] = pair_datasets return if split not in self.config: raise FileNotFoundError( "Dataset not found in config file: {}".format(split) ) size_by_corpus = defaultdict(int) size_sum = 0 size_sum_with_subsampling = 0 init_pair_datasets = {} for dataset_config in self.config[split]: src_path = os.path.dirname(dataset_config["src"]) corpus_name = src_path.split("/")[-2] language_pair_name = src_path.split("/")[-1] pair_datasets_key = corpus_name + "-" + language_pair_name logger.info(f"loading... {pair_datasets_key}") if "src" in dataset_config: src_dataset = indexed_dataset( dataset_config["src"], self.src_dictionary ) else: src_dataset = None if "tgt" in dataset_config: tgt_dataset = indexed_dataset( dataset_config["tgt"], self.tgt_dictionary ) else: tgt_dataset = None dataset = LanguagePairDataset( src_dataset, src_dataset.sizes, self.src_dictionary, tgt_dataset, tgt_dataset.sizes, self.tgt_dictionary, left_pad_source=self.args.left_pad_source, left_pad_target=self.args.left_pad_target, ) if pair_datasets_key in init_pair_datasets: logger.warning( f"Ignoring already added {pair_datasets_key}. " f"Consider using `sample` key in order to upsample." ) else: init_pair_datasets[pair_datasets_key] = { "dataset": dataset, "sample": dataset_config.get("sample", None), "id": dataset_config.get("id", None), "len": len(dataset), } length_sum = 0 weighted_freqs_sum = 0 freq_per_dataset = {} vmax = 0 vmin = 1 weighted_freq_per_dataset = {} if self.args.weighting_alpha: for key in init_pair_datasets: if init_pair_datasets[key]["sample"] is None: length_sum += len(init_pair_datasets[key]["dataset"]) for key in init_pair_datasets: if init_pair_datasets[key]["sample"] is None: val = float(init_pair_datasets[key]["len"]) / length_sum freq_per_dataset[key] = val weighted_freqs_sum += val ** self.args.weighting_alpha for key in freq_per_dataset: val = ( freq_per_dataset[key] ** self.args.weighting_alpha / weighted_freqs_sum ) vmin = min(vmin, val) vmax = max(vmax, val) weighted_freq_per_dataset[key] = val for pair_datasets_key in init_pair_datasets: dataset_config = init_pair_datasets[pair_datasets_key] dataset = dataset_config["dataset"] sample = dataset_config["sample"] if sample is None: sample = 1.0 if pair_datasets_key in weighted_freq_per_dataset: w = vmax / weighted_freq_per_dataset[pair_datasets_key] sample = w sample = round(sample) initial_sample = sample initial_pair_datasets_key = pair_datasets_key while sample >= 1.0: assert ( pair_datasets_key not in pair_datasets ), f"{pair_datasets_key} already in" size_sum_with_subsampling += len(dataset) pair_datasets[pair_datasets_key] = MultitaskDatasetWrapper( dataset, dataset_config.get("id", 0), 1.0, name=pair_datasets_key ) size_sum += len(dataset) sample -= 1.0 pair_datasets_key += "-up" assert sample < 1e-6, f"sample remains > 0 {pair_datasets_key}" logger.info( f"added pair {initial_pair_datasets_key} length {len(dataset)} new_length = {len(dataset)*initial_sample}" ) size_by_corpus[corpus_name] += len(dataset) self.datasets[split] = pair_datasets logger.info( f"Datasets number = {len(self.datasets[split])} size = {size_sum} size_sum_with_subsampling = {size_sum_with_subsampling}" ) @property def source_dictionary(self): return self.src_dictionary @property def target_dictionary(self): return self.tgt_dictionary def get_batch_iterator( self, dataset, max_tokens=None, max_sentences=None, max_positions=None, ignore_invalid_inputs=False, required_batch_size_multiple=1, seed=1, num_shards=1, shard_id=0, num_workers=0, epoch=1, data_buffer_size=0, disable_iterator_cache=False, grouped_shuffling=False, update_epoch_batch_itr=False, **kwargs, ): assert isinstance(dataset, OrderedDict) assert len(dataset) assert isinstance(dataset[next(iter(dataset))], FairseqDataset) # initialize the dataset with the correct starting epoch for _, dt in dataset.items(): dt.set_epoch(epoch) indices = OrderedDict() batch_sampler = OrderedDict() with data_utils.numpy_seed(seed + epoch): for key, dt in dataset.items(): logger.info(f"\t ordered_indices {key}") indices[key] = dt.ordered_indices() # filter examples that are too large if max_positions is not None: for key, dt in dataset.items(): logger.info(f"\t filter_by_size {key}") indices[key], ignored = dt.filter_indices_by_size( indices[key], max_positions ) for key, dt in dataset.items(): logger.info(f"\t batch_by_size {key}") batch_sampler[key] = data_utils.batch_by_size( indices[key], dt.num_tokens, max_tokens=max_tokens, max_sentences=max_sentences, required_batch_size_multiple=required_batch_size_multiple, ) epoch_iter = MultidatasetEpochBatchIterator( dataset=dataset, batch_sampler=batch_sampler, seed=seed, num_shards=num_shards, shard_id=shard_id, num_workers=num_workers, epoch=epoch, ) return epoch_iter
KosmosX-API-main
kosmosX/fairseq/examples/laser/laser_src/laser_task.py
import setuptools with open("README.md", "r") as fh: long_description = fh.read() setuptools.setup( name="mmpt", version="0.0.1", author="Hu Xu, Po-yao Huang", author_email="[email protected]", description="A package for multimodal pretraining.", long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/pytorch/fairseq/examples/MMPT", packages=setuptools.find_packages(), install_requires=[ ], classifiers=[ "Programming Language :: Python :: 3", "License :: CC-BY-NC", "Operating System :: OS Independent", ], python_requires='>=3.6', )
KosmosX-API-main
kosmosX/fairseq/examples/MMPT/setup.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import os from omegaconf import OmegaConf from mmpt.utils import recursive_config, overwrite_dir from mmpt_cli.localjob import LocalJob class JobLauncher(object): JOB_CONFIG = { "local": LocalJob, } def __init__(self, yaml_file): self.yaml_file = yaml_file job_key = "local" if yaml_file.endswith(".yaml"): config = recursive_config(yaml_file) if config.task_type is not None: job_key = config.task_type.split("_")[0] else: raise ValueError("unknown extension of job file:", yaml_file) self.job_key = job_key def __call__(self, job_type=None, dryrun=False): if job_type is not None: self.job_key = job_type.split("_")[0] print("[JobLauncher] job_key", self.job_key) job = JobLauncher.JOB_CONFIG[self.job_key]( self.yaml_file, job_type=job_type, dryrun=dryrun) return job.submit() class Pipeline(object): """a job that loads yaml config.""" def __init__(self, fn): """ load a yaml config of a job and save generated configs as yaml for each task. return: a list of files to run as specified by `run_task`. """ if fn.endswith(".py"): # a python command. self.backend = "python" self.run_yamls = [fn] return job_config = recursive_config(fn) if job_config.base_dir is None: # single file job config. self.run_yamls = [fn] return self.project_dir = os.path.join("projects", job_config.project_dir) self.run_dir = os.path.join("runs", job_config.project_dir) if job_config.run_task is not None: run_yamls = [] for stage in job_config.run_task: # each stage can have multiple tasks running in parallel. if OmegaConf.is_list(stage): stage_yamls = [] for task_file in stage: stage_yamls.append( os.path.join(self.project_dir, task_file)) run_yamls.append(stage_yamls) else: run_yamls.append(os.path.join(self.project_dir, stage)) self.run_yamls = run_yamls configs_to_save = self._overwrite_task(job_config) self._save_configs(configs_to_save) def __getitem__(self, idx): yaml_files = self.run_yamls[idx] if isinstance(yaml_files, list): return [JobLauncher(yaml_file) for yaml_file in yaml_files] return [JobLauncher(yaml_files)] def __len__(self): return len(self.run_yamls) def _save_configs(self, configs_to_save: dict): # save os.makedirs(self.project_dir, exist_ok=True) for config_file in configs_to_save: config = configs_to_save[config_file] print("saving", config_file) OmegaConf.save(config=config, f=config_file) def _overwrite_task(self, job_config): configs_to_save = {} self.base_project_dir = os.path.join("projects", job_config.base_dir) self.base_run_dir = os.path.join("runs", job_config.base_dir) for config_sets in job_config.task_group: overwrite_config = job_config.task_group[config_sets] if ( overwrite_config.task_list is None or len(overwrite_config.task_list) == 0 ): print( "[warning]", job_config.task_group, "has no task_list specified.") # we don't want this added to a final config. task_list = overwrite_config.pop("task_list", None) for config_file in task_list: config_file_path = os.path.join( self.base_project_dir, config_file) config = recursive_config(config_file_path) # overwrite it. if overwrite_config: config = OmegaConf.merge(config, overwrite_config) overwrite_dir(config, self.run_dir, basedir=self.base_run_dir) save_file_path = os.path.join(self.project_dir, config_file) configs_to_save[save_file_path] = config return configs_to_save def main(args): job_type = args.jobtype if args.jobtype else None # parse multiple pipelines. pipelines = [Pipeline(fn) for fn in args.yamls.split(",")] for pipe_id, pipeline in enumerate(pipelines): if not hasattr(pipeline, "project_dir"): for job in pipeline[0]: job(job_type=job_type, dryrun=args.dryrun) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("yamls", type=str) parser.add_argument( "--dryrun", action="store_true", help="run config and prepare to submit without launch the job.", ) parser.add_argument( "--jobtype", type=str, default="", help="force to run jobs as specified.") args = parser.parse_args() main(args)
KosmosX-API-main
kosmosX/fairseq/examples/MMPT/locallaunch.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. try: # fairseq user dir from .datasets import FairseqMMDataset from .losses import FairseqCriterion from .models import FairseqMMModel from .tasks import FairseqMMTask except ImportError: pass
KosmosX-API-main
kosmosX/fairseq/examples/MMPT/mmpt/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .loss import * from .nce import * try: from .fairseqmmloss import * except ImportError: pass try: from .expnce import * except ImportError: pass
KosmosX-API-main
kosmosX/fairseq/examples/MMPT/mmpt/losses/__init__.py
# Copyright (c) Facebook, Inc. All Rights Reserved import torch from torch import nn class Loss(object): def __call__(self, *args, **kwargs): raise NotImplementedError # Dummy Loss for testing. class DummyLoss(Loss): def __init__(self): self.loss = nn.CrossEntropyLoss() def __call__(self, logits, targets, **kwargs): return self.loss(logits, targets) class DummyK400Loss(Loss): """dummy k400 loss for MViT.""" def __init__(self): self.loss = nn.CrossEntropyLoss() def __call__(self, logits, targets, **kwargs): return self.loss( logits, torch.randint(0, 400, (logits.size(0),), device=logits.device)) class CrossEntropy(Loss): def __init__(self): self.loss = nn.CrossEntropyLoss() def __call__(self, logits, targets, **kwargs): return self.loss(logits.reshape(-1, logits.size(-1)), targets.reshape(-1)) class ArgmaxCrossEntropy(Loss): def __init__(self): self.loss = nn.CrossEntropyLoss() def __call__(self, logits, targets, **kwargs): return self.loss(logits, targets.argmax(dim=1)) class BCE(Loss): def __init__(self): self.loss = nn.BCEWithLogitsLoss() def __call__(self, logits, targets, **kwargs): targets = targets.squeeze(0) return self.loss(logits, targets) class NLGLoss(Loss): def __init__(self): self.loss = nn.CrossEntropyLoss() def __call__(self, logits, text_label, **kwargs): targets = text_label[text_label != -100] return self.loss(logits, targets) class MSE(Loss): def __init__(self): self.loss = nn.MSELoss() def __call__(self, logits, targets, **kwargs): return self.loss(logits, targets) class L1(Loss): def __init__(self): self.loss = nn.L1Loss() def __call__(self, logits, targets, **kwargs): return self.loss(logits, targets) class SmoothL1(Loss): def __init__(self): self.loss = nn.SmoothL1Loss() def __call__(self, logits, targets, **kwargs): return self.loss(logits, targets)
KosmosX-API-main
kosmosX/fairseq/examples/MMPT/mmpt/losses/loss.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ TODO (huxu): a general fairseq criterion for all your pre-defined losses. """ from fairseq.criterions import FairseqCriterion, register_criterion from fairseq import metrics @register_criterion("mmloss") class MMCriterion(FairseqCriterion): def __init__(self, task): super().__init__(task) # TODO (huxu): wrap forward call of loss_fn and eval_fn into task. self.mmtask = task.mmtask def forward(self, model, sample): """Compute the loss for the given sample. Returns a tuple with three elements: 1) the loss 2) the sample size, which is used as the denominator for the gradient 3) logging outputs to display while training """ outputs = self.mmtask(model, sample) loss, loss_scalar, max_len, batch_size, sample_size = ( outputs["loss"], outputs["loss_scalar"], outputs["max_len"], outputs["batch_size"], outputs["sample_size"], ) logging_output = { "loss": loss_scalar, "ntokens": max_len * batch_size, # dummy report. "nsentences": batch_size, # dummy report. "sample_size": sample_size, } return loss, 1, logging_output @staticmethod def reduce_metrics(logging_outputs) -> None: """Aggregate logging outputs from data parallel training.""" """since we use NCE, our actual batch_size is 1 per GPU. Then we take the mean of each worker.""" loss_sum = sum(log.get("loss", 0.0) for log in logging_outputs) sample_size = sum(log.get("sample_size", 0) for log in logging_outputs) metrics.log_scalar("loss", loss_sum / sample_size, round=3) @staticmethod def logging_outputs_can_be_summed() -> bool: """ Whether the logging outputs returned by `forward` can be summed across workers prior to calling `reduce_metrics`. Setting this to True will improves distributed training speed. """ return True
KosmosX-API-main
kosmosX/fairseq/examples/MMPT/mmpt/losses/fairseqmmloss.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ softmax-based NCE loss, used by this project. """ import torch from torch import nn from .loss import Loss class NCE(Loss): def __init__(self): # TODO (huxu): define temperature. self.loss = nn.CrossEntropyLoss() def __call__(self, align_scores, **kargs): # note: we reuse the same shape as cls head in BERT (batch_size, 2) # but NCE only needs one logits. # (so we drop all weights in the second neg logits.) align_scores = align_scores[:, :1] # duplicate negative examples batch_size = align_scores.size(0) // 2 pos_scores = align_scores[:batch_size] neg_scores = align_scores[batch_size:].view(1, batch_size).repeat( batch_size, 1) scores = torch.cat([pos_scores, neg_scores], dim=1) return self.loss( scores, torch.zeros( (batch_size,), dtype=torch.long, device=align_scores.device), ) class T2VContraLoss(Loss): """NCE for MM joint space, on softmax text2video matrix. """ def __init__(self): # TODO (huxu): define temperature. self.loss = nn.CrossEntropyLoss() def __call__(self, pooled_video, pooled_text, **kargs): batch_size = pooled_video.size(0) logits = torch.mm(pooled_text, pooled_video.transpose(1, 0)) targets = torch.arange( batch_size, dtype=torch.long, device=pooled_video.device) return self.loss(logits, targets) class V2TContraLoss(Loss): """NCE for MM joint space, with softmax on video2text matrix.""" def __init__(self): # TODO (huxu): define temperature. self.loss = nn.CrossEntropyLoss() def __call__(self, pooled_video, pooled_text, **kargs): batch_size = pooled_video.size(0) logits = torch.mm(pooled_video, pooled_text.transpose(1, 0)) targets = torch.arange( batch_size, dtype=torch.long, device=pooled_video.device) return self.loss(logits, targets) class MMContraLoss(Loss): def __init__(self): self.loss = nn.CrossEntropyLoss() def __call__(self, pooled_video, pooled_text, **kwargs): logits_per_video = pooled_video @ pooled_text.t() logits_per_text = pooled_text @ pooled_video.t() targets = torch.arange( pooled_video.size(0), dtype=torch.long, device=pooled_video.device) loss_video = self.loss(logits_per_video, targets) loss_text = self.loss(logits_per_text, targets) return loss_video + loss_text class MTM(Loss): """Combination of MFM and MLM.""" def __init__(self): self.loss = nn.CrossEntropyLoss() def __call__( self, video_logits, text_logits, video_label, text_label, **kwargs ): text_logits = torch.cat([ text_logits, torch.zeros( (text_logits.size(0), 1), device=text_logits.device) ], dim=1) vt_logits = torch.cat([video_logits, text_logits], dim=0) # loss for video. video_label = torch.zeros( (video_logits.size(0),), dtype=torch.long, device=video_logits.device ) # loss for text. text_label = text_label.reshape(-1) labels_mask = text_label != -100 selected_text_label = text_label[labels_mask] vt_label = torch.cat([video_label, selected_text_label], dim=0) return self.loss(vt_logits, vt_label) class MFMMLM(Loss): """Combination of MFM and MLM.""" def __init__(self): self.loss = nn.CrossEntropyLoss() def __call__( self, video_logits, text_logits, video_label, text_label, **kwargs ): # loss for video. video_label = torch.zeros( (video_logits.size(0),), dtype=torch.long, device=video_logits.device ) masked_frame_loss = self.loss(video_logits, video_label) # loss for text. text_label = text_label.reshape(-1) labels_mask = text_label != -100 selected_text_label = text_label[labels_mask] masked_lm_loss = self.loss(text_logits, selected_text_label) return masked_frame_loss + masked_lm_loss
KosmosX-API-main
kosmosX/fairseq/examples/MMPT/mmpt/losses/nce.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from .. import tasks from .. import models from .. import losses from ..datasets import MMDataset from .. import processors class Task(object): """ A task refers to one generic training task (e.g., training one model). """ @classmethod def config_task(cls, config): """ determine whether to load a hard-coded task or config from a generic one. via if a task string is available in config. """ if config.task is not None: # TODO (huxu): expand the search scope. task_cls = getattr(tasks, config.task) return task_cls(config) else: return Task(config) def __init__(self, config): self.config = config self.train_data = None self.val_data = None self.test_data = None self.model = None self.loss_fn = None self.eval_fn = None def build_dataset(self): """TODO (huxu): move processor breakdown to MMDataset.""" """fill-in `self.train_data`, `self.val_data` and `self.test_data`.""" meta_processor_cls = getattr( processors, self.config.dataset.meta_processor) video_processor_cls = getattr( processors, self.config.dataset.video_processor) text_processor_cls = getattr( processors, self.config.dataset.text_processor) aligner_cls = getattr( processors, self.config.dataset.aligner) if self.config.dataset.train_path is not None: self.config.dataset.split = "train" # may be used by meta processor. # meta_processor controls different dataset. meta_processor = meta_processor_cls(self.config.dataset) video_processor = video_processor_cls(self.config.dataset) text_processor = text_processor_cls(self.config.dataset) aligner = aligner_cls(self.config.dataset) self.train_data = MMDataset( meta_processor, video_processor, text_processor, aligner ) print("train_len", len(self.train_data)) output = self.train_data[0] self.train_data.print_example(output) if self.config.dataset.val_path is not None: self.config.dataset.split = "valid" # may be used by meta processor. meta_processor = meta_processor_cls(self.config.dataset) video_processor = video_processor_cls(self.config.dataset) text_processor = text_processor_cls(self.config.dataset) aligner = aligner_cls(self.config.dataset) self.val_data = MMDataset( meta_processor, video_processor, text_processor, aligner ) print("val_len", len(self.val_data)) output = self.val_data[0] self.val_data.print_example(output) if self.config.dataset.split == "test": # the following is run via lauching fairseq-validate. meta_processor = meta_processor_cls(self.config.dataset) video_processor = video_processor_cls(self.config.dataset) text_processor = text_processor_cls(self.config.dataset) self.test_data = MMDataset( meta_processor, video_processor, text_processor, aligner ) print("test_len", len(self.test_data)) output = self.test_data[0] self.test_data.print_example(output) def build_model(self, checkpoint=None): if self.model is None: model_cls = getattr(models, self.config.model.model_cls) self.model = model_cls(self.config) if checkpoint is not None: self.load_checkpoint(checkpoint) return self.model def load_checkpoint(self, checkpoint): if self.model is None: raise ValueError("model is not initialized.") state_dict = torch.load(checkpoint) state_dict = self._trim_state_dict(state_dict) self.model.load_state_dict(state_dict, strict=False) # if it's a fp16 model, turn it back. if next(self.model.parameters()).dtype == torch.float16: self.model = self.model.float() return self.model def _trim_state_dict(self, state_dict): from collections import OrderedDict if "state_dict" in state_dict: state_dict = state_dict["state_dict"] if "model" in state_dict: # fairseq checkpoint format. state_dict = state_dict["model"] ret_state_dict = OrderedDict() for ( key, value, ) in state_dict.items(): # remove fairseq wrapper since this is a task. if key.startswith("mmmodel"): key = key[len("mmmodel."):] ret_state_dict[key] = value return ret_state_dict def build_loss(self): if self.loss_fn is None and self.config.loss is not None: loss_cls = getattr(losses, self.config.loss.loss_cls) self.loss_fn = loss_cls() return self.loss_fn def flat_subsample(self, tensor): size = tensor.size() if len(size) >= 2: batch_size = size[0] * size[1] expanded_size = ( (batch_size,) + size[2:] if len(size) > 2 else (batch_size,) ) tensor = tensor.view(expanded_size) return tensor def reshape_subsample(self, sample): if ( hasattr(self.config.dataset, "subsampling") and self.config.dataset.subsampling is not None and self.config.dataset.subsampling > 1 ): for key in sample: if torch.is_tensor(sample[key]): sample[key] = self.flat_subsample(sample[key]) return sample def __call__(self, model, sample): loss = None loss_scalar = float("inf") sample = self.reshape_subsample(sample) outputs = self.model(**sample) sample.update(outputs) if self.loss_fn is not None: loss = self.loss_fn(**sample) loss_scalar = loss.item() batch_size = sample["caps"].size(0) sample_size = 1 return { "loss": loss, "loss_scalar": loss_scalar, "max_len": self.config.dataset.max_len, "batch_size": batch_size, "sample_size": sample_size, } def build_dataloader(self): """only used for trainer that lacks building loaders.""" raise NotImplementedError
KosmosX-API-main
kosmosX/fairseq/examples/MMPT/mmpt/tasks/task.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ make a general fairseq task for MM pretraining. """ import random from fairseq.tasks import LegacyFairseqTask, register_task from .task import Task from .retritask import RetriTask from ..datasets import FairseqMMDataset from .. import utils @register_task("mmtask") class FairseqMMTask(LegacyFairseqTask): @staticmethod def add_args(parser): # Add some command-line arguments for specifying where the data is # located and the maximum supported input length. parser.add_argument( "taskconfig", metavar="FILE", help=("taskconfig to load all configurations" "outside fairseq parser."), ) @classmethod def setup_task(cls, args, **kwargs): return FairseqMMTask(args) def __init__(self, args): super().__init__(args) config = utils.load_config(args) self.mmtask = Task.config_task(config) self.mmtask.build_dataset() self.mmtask.build_model() self.mmtask.build_loss() def load_dataset(self, split, **kwargs): split_map = { "train": self.mmtask.train_data, "valid": self.mmtask.val_data, "test": self.mmtask.test_data, } if split not in split_map: raise ValueError("unknown split type.") if split_map[split] is not None: self.datasets[split] = FairseqMMDataset(split_map[split]) def get_batch_iterator( self, dataset, max_tokens=None, max_sentences=None, max_positions=None, ignore_invalid_inputs=False, required_batch_size_multiple=1, seed=1, num_shards=1, shard_id=0, num_workers=0, epoch=1, data_buffer_size=0, disable_iterator_cache=False, skip_remainder_batch=False, grouped_shuffling=False, update_epoch_batch_itr=False, ): random.seed(epoch) if dataset.mmdataset.split == "train" and isinstance(self.mmtask, RetriTask): if epoch >= self.mmtask.config.retri_epoch: if not hasattr(self.mmtask, "retri_dataloader"): self.mmtask.build_dataloader() self.mmtask.retrive_candidates(epoch) return super().get_batch_iterator( dataset, max_tokens, max_sentences, max_positions, ignore_invalid_inputs, required_batch_size_multiple, seed, num_shards, shard_id, num_workers, epoch, data_buffer_size, disable_iterator_cache, grouped_shuffling, update_epoch_batch_itr, ) @property def source_dictionary(self): return None @property def target_dictionary(self): return None
KosmosX-API-main
kosmosX/fairseq/examples/MMPT/mmpt/tasks/fairseqmmtask.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .task import * from .vlmtask import * from .retritask import * try: from .fairseqmmtask import * except ImportError: pass try: from .milncetask import * except ImportError: pass try: from .expretritask import * except ImportError: pass
KosmosX-API-main
kosmosX/fairseq/examples/MMPT/mmpt/tasks/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os import torch import pickle import random from tqdm import tqdm from torch.utils.data import DataLoader from torch.utils.data.distributed import DistributedSampler from ..processors import ( ShardedHow2MetaProcessor, ShardedVideoProcessor, ShardedTextProcessor, VariedLenAligner, ) from ..datasets import MMDataset from .task import Task from ..modules import vectorpool from ..evaluators.predictor import Predictor from ..utils import set_seed, get_local_rank, get_world_size class RetriTask(Task): """abstract class for task with retrival.""" def reshape_subsample(self, sample): for key in sample: if torch.is_tensor(sample[key]): sample[key] = self.flat_subsample(sample[key]) return sample def flat_subsample(self, tensor): if tensor.size(0) == 1: tensor = tensor.squeeze(0) return tensor def build_dataloader(self): """called by `get_batch_iterator` in fairseqmmtask. """ # TODO: hard-code dataloader for retri for now and configurable in .yaml. # reuse the `train.lst`. self.config.dataset.split = "train" meta_processor = ShardedHow2MetaProcessor(self.config.dataset) video_processor = ShardedVideoProcessor(self.config.dataset) text_processor = ShardedTextProcessor(self.config.dataset) aligner = VariedLenAligner(self.config.dataset) aligner.subsampling = self.config.dataset.clip_per_video self.retri_data = MMDataset( meta_processor, video_processor, text_processor, aligner ) retri_sampler = DistributedSampler(self.retri_data) infer_scale = 16 batch_size = self.config.dataset.num_video_per_batch \ * infer_scale self.retri_dataloader = DataLoader( self.retri_data, collate_fn=self.retri_data.collater, batch_size=batch_size, shuffle=False, sampler=retri_sampler, num_workers=self.config.fairseq.dataset.num_workers ) return self.retri_dataloader def retrive_candidates(self, epoch, dataloader=None): if get_local_rank() == 0: print("running retrieval model.") out_dir = os.path.join( self.config.fairseq.checkpoint.save_dir, "retri") os.makedirs(out_dir, exist_ok=True) if not os.path.isfile( os.path.join( out_dir, "batched_e" + str(epoch) + "_videos0.pkl") ): if dataloader is None: dataloader = self.retri_dataloader self.model.eval() self.model.is_train = False assert self.retri_data.meta_processor.data == \ self.train_data.meta_processor.data # video_ids not mutated. self._retri_predict(epoch, dataloader) self.model.train() self.model.is_train = True torch.distributed.barrier() output = self._retri_sync(epoch, out_dir) torch.distributed.barrier() self.train_data.meta_processor.set_candidates(output) return output class VideoRetriTask(RetriTask): """RetriTask on video level.""" def reshape_subsample(self, sample): if ( hasattr(self.config.dataset, "clip_per_video") and self.config.dataset.clip_per_video is not None and self.config.dataset.clip_per_video > 1 ): for key in sample: if torch.is_tensor(sample[key]): sample[key] = self.flat_subsample(sample[key]) return sample def flat_subsample(self, tensor): if tensor.size(0) == 1: tensor = tensor.squeeze(0) return Task.flat_subsample(self, tensor) def _retri_predict(self, epoch, dataloader): set_seed(epoch) # save for retrival. predictor = VideoPredictor(self.config) predictor.predict_loop( self.model, dataloader) set_seed(epoch) # get the same text clips. # retrival. retri_predictor = VideoRetriPredictor( self.config) retri_predictor.predict_loop( self.model, predictor.vecpool.retriver, epoch) del predictor del retri_predictor def _retri_sync(self, epoch, out_dir): # gpu do the same merge. batched_videos = [] for local_rank in range(get_world_size()): fn = os.path.join( out_dir, "batched_e" + str(epoch) + "_videos" + str(local_rank) + ".pkl") with open(fn, "rb") as fr: batched_videos.extend(pickle.load(fr)) print( "[INFO] batched_videos", len(batched_videos), len(batched_videos[0])) return batched_videos class VideoPredictor(Predictor): def __init__(self, config): vectorpool_cls = getattr(vectorpool, config.vectorpool_cls) self.vecpool = vectorpool_cls(config) def predict_loop( self, model, dataloader, early_stop=-1, ): with torch.no_grad(): if get_local_rank() == 0: dataloader = tqdm(dataloader) for batch_idx, batch in enumerate(dataloader): if batch_idx == early_stop: break self(batch, model) return self.finalize() def __call__(self, sample, model, **kwargs): param = next(model.parameters()) dtype = param.dtype device = param.device subsample = sample["vfeats"].size(1) sample = self.to_ctx(sample, device, dtype) for key in sample: if torch.is_tensor(sample[key]): size = sample[key].size() if len(size) >= 2: batch_size = size[0] * size[1] expanded_size = ( (batch_size,) + size[2:] if len(size) > 2 else (batch_size,) ) sample[key] = sample[key].view(expanded_size) outputs = model(**sample) sample.update(outputs) self.vecpool(sample, subsample) def finalize(self): print("[INFO]", self.vecpool) if not self.vecpool.retriver.db.is_trained: self.vecpool.retriver.finalize_training() return self.vecpool.retriver class VideoRetriPredictor(Predictor): """ Online Retrieval Predictor for Clips (used by RetriTask). TODO: merge this with VisPredictor? """ def __init__(self, config): self.pred_dir = os.path.join( config.fairseq.checkpoint.save_dir, "retri") self.num_cands = config.num_cands self.num_video_per_batch = config.dataset.num_video_per_batch def predict_loop( self, model, retriver, epoch, early_stop=-1 ): # a fake loop that only try to recover video vector # from video_id. batched_videos = [] # obtain available video_ids. video_ids = list(retriver.videoid_to_vectoridx.keys()) dataloader = random.sample( video_ids, len(video_ids) // self.num_video_per_batch ) if get_local_rank() == 0: dataloader = tqdm(dataloader) for batch_idx, batch in enumerate(dataloader): # batch is one video id. if batch_idx == early_stop: break video_ids = retriver.search_by_video_ids( [batch], self.num_cands)[0] if len(video_ids) > self.num_video_per_batch: # we moved the center to make cluster robust. video_ids = random.sample(video_ids, self.num_video_per_batch) batched_videos.append(video_ids) return self.finalize(batched_videos, epoch) def finalize(self, batched_videos, epoch): fn = os.path.join( self.pred_dir, "batched_e" + str(epoch) + "_videos" + str(get_local_rank()) + ".pkl") with open(fn, "wb") as fw: pickle.dump(batched_videos, fw, pickle.HIGHEST_PROTOCOL) return batched_videos
KosmosX-API-main
kosmosX/fairseq/examples/MMPT/mmpt/tasks/retritask.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from .task import Task class MILNCETask(Task): def reshape_subsample(self, sample): if ( hasattr(self.config.dataset, "subsampling") and self.config.dataset.subsampling is not None and self.config.dataset.subsampling > 1 ): for key in sample: if torch.is_tensor(sample[key]): tensor = self.flat_subsample(sample[key]) if key in ["caps", "cmasks"]: size = tensor.size() batch_size = size[0] * size[1] expanded_size = (batch_size,) + size[2:] tensor = tensor.view(expanded_size) sample[key] = tensor return sample
KosmosX-API-main
kosmosX/fairseq/examples/MMPT/mmpt/tasks/milncetask.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from .task import Task class VLMTask(Task): """A VLM task for reproducibility. the collator split subsamples into two sub-batches. This has should have no logic changes. but changed the randomness in frame masking. """ def flat_subsample(self, tensor): size = tensor.size() if len(size) >= 2: batch_size = size[0] * (size[1] // 2) expanded_size = ( (batch_size, 2) + size[2:] if len(size) > 2 else (batch_size, 2) ) tensor = tensor.view(expanded_size) tensor = torch.cat([tensor[:, 0], tensor[:, 1]], dim=0) return tensor
KosmosX-API-main
kosmosX/fairseq/examples/MMPT/mmpt/tasks/vlmtask.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ TODO (huxu): fairseq wrapper class for all dataset you defined: mostly MMDataset. """ from collections import OrderedDict from torch.utils.data import Dataset from torch.utils.data.dataloader import default_collate from fairseq.data import FairseqDataset, data_utils class FairseqMMDataset(FairseqDataset): """ A wrapper class for MMDataset for fairseq. """ def __init__(self, mmdataset): if not isinstance(mmdataset, Dataset): raise TypeError("mmdataset must be of type `torch.utils.data.dataset`.") self.mmdataset = mmdataset def set_epoch(self, epoch, **unused): super().set_epoch(epoch) self.epoch = epoch def __getitem__(self, idx): with data_utils.numpy_seed(43211, self.epoch, idx): return self.mmdataset[idx] def __len__(self): return len(self.mmdataset) def collater(self, samples): if hasattr(self.mmdataset, "collator"): return self.mmdataset.collator(samples) if len(samples) == 0: return {} if isinstance(samples[0], dict): batch = OrderedDict() for key in samples[0]: if samples[0][key] is not None: batch[key] = default_collate([sample[key] for sample in samples]) return batch else: return default_collate(samples) def size(self, index): """dummy implementation: we don't use --max-tokens""" return 1 def num_tokens(self, index): """dummy implementation: we don't use --max-tokens""" return 1
KosmosX-API-main
kosmosX/fairseq/examples/MMPT/mmpt/datasets/fairseqmmdataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .mmdataset import * try: from .fairseqmmdataset import * except ImportError: pass
KosmosX-API-main
kosmosX/fairseq/examples/MMPT/mmpt/datasets/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import torch from collections import OrderedDict from torch.utils.data import Dataset from torch.utils.data.dataloader import default_collate from ..utils import set_seed class MMDataset(Dataset): """ A generic multi-modal dataset. Args: `meta_processor`: a meta processor, handling loading meta data and return video_id and text_id. `video_processor`: a video processor, handling e.g., decoding, loading .np files. `text_processor`: a text processor, handling e.g., tokenization. `aligner`: combine the video and text feature as one training example. """ def __init__( self, meta_processor, video_processor, text_processor, align_processor, ): self.split = meta_processor.split self.meta_processor = meta_processor self.video_processor = video_processor self.text_processor = text_processor self.align_processor = align_processor def __len__(self): return len(self.meta_processor) def __getitem__(self, idx): if self.split == "test": set_seed(idx) video_id, text_id = self.meta_processor[idx] video_feature = self.video_processor(video_id) text_feature = self.text_processor(text_id) output = self.align_processor(video_id, video_feature, text_feature) # TODO (huxu): the following is for debug purpose. output.update({"idx": idx}) return output def collater(self, samples): """This collator is deprecated. set self.collator = MMDataset.collater. see collator in FairseqMMDataset. """ if len(samples) == 0: return {} if isinstance(samples[0], dict): batch = OrderedDict() for key in samples[0]: if samples[0][key] is not None: batch[key] = default_collate( [sample[key] for sample in samples]) # if torch.is_tensor(batch[key]): # print(key, batch[key].size()) # else: # print(key, len(batch[key])) return batch else: return default_collate(samples) def print_example(self, output): print("[one example]", output["video_id"]) if ( hasattr(self.align_processor, "subsampling") and self.align_processor.subsampling is not None and self.align_processor.subsampling > 1 ): for key in output: if torch.is_tensor(output[key]): output[key] = output[key][0] # search tokenizer to translate ids back. tokenizer = None if hasattr(self.text_processor, "tokenizer"): tokenizer = self.text_processor.tokenizer elif hasattr(self.align_processor, "tokenizer"): tokenizer = self.align_processor.tokenizer if tokenizer is not None: caps = output["caps"].tolist() if isinstance(caps[0], list): caps = caps[0] print("caps", tokenizer.decode(caps)) print("caps", tokenizer.convert_ids_to_tokens(caps)) for key, value in output.items(): if torch.is_tensor(value): if len(value.size()) >= 3: # attention_mask. print(key, value.size()) print(key, "first", value[0, :, :]) print(key, "last", value[-1, :, :]) else: print(key, value) print("[end of one example]")
KosmosX-API-main
kosmosX/fairseq/examples/MMPT/mmpt/datasets/mmdataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import random import numpy as np import torch from .shardedtensor import * from .load_config import * def set_seed(seed=43211): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) if torch.backends.cudnn.enabled: torch.backends.cudnn.benchmark = False torch.backends.cudnn.deterministic = True def get_world_size(): if torch.distributed.is_initialized(): world_size = torch.distributed.get_world_size() else: world_size = 1 return world_size def get_local_rank(): return torch.distributed.get_rank() \ if torch.distributed.is_initialized() else 0 def print_on_rank0(func): local_rank = get_local_rank() if local_rank == 0: print("[INFO]", func) class RetriMeter(object): """ Statistics on whether retrieval yields a better pair. """ def __init__(self, freq=1024): self.freq = freq self.total = 0 self.replace = 0 self.updates = 0 def __call__(self, data): if isinstance(data, np.ndarray): self.replace += data.shape[0] - int((data[:, 0] == -1).sum()) self.total += data.shape[0] elif torch.is_tensor(data): self.replace += int(data.sum()) self.total += data.size(0) else: raise ValueError("unsupported RetriMeter data type.", type(data)) self.updates += 1 if get_local_rank() == 0 and self.updates % self.freq == 0: print("[INFO]", self) def __repr__(self): return "RetriMeter (" + str(self.replace / self.total) \ + "/" + str(self.replace) + "/" + str(self.total) + ")"
KosmosX-API-main
kosmosX/fairseq/examples/MMPT/mmpt/utils/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os import omegaconf from omegaconf import OmegaConf def load_config(args=None, config_file=None, overwrite_fairseq=False): """TODO (huxu): move fairseq overwrite to another function.""" if args is not None: config_file = args.taskconfig config = recursive_config(config_file) if config.dataset.subsampling is not None: batch_size = config.fairseq.dataset.batch_size // config.dataset.subsampling print( "adjusting batch_size to {} due to subsampling {}.".format( batch_size, config.dataset.subsampling ) ) config.fairseq.dataset.batch_size = batch_size is_test = config.dataset.split is not None and config.dataset.split == "test" if not is_test: if ( config.fairseq.checkpoint is None or config.fairseq.checkpoint.save_dir is None ): raise ValueError("fairseq save_dir or save_path must be specified.") save_dir = config.fairseq.checkpoint.save_dir os.makedirs(save_dir, exist_ok=True) if config.fairseq.common.tensorboard_logdir is not None: tb_run_dir = suffix_rundir( save_dir, config.fairseq.common.tensorboard_logdir ) config.fairseq.common.tensorboard_logdir = tb_run_dir print( "update tensorboard_logdir as", config.fairseq.common.tensorboard_logdir ) os.makedirs(save_dir, exist_ok=True) OmegaConf.save(config=config, f=os.path.join(save_dir, "config.yaml")) if overwrite_fairseq and config.fairseq is not None and args is not None: # flatten fields. for group in config.fairseq: for field in config.fairseq[group]: print("overwrite args." + field, "as", config.fairseq[group][field]) setattr(args, field, config.fairseq[group][field]) return config def recursive_config(config_path): """allows for stacking of configs in any depth.""" config = OmegaConf.load(config_path) if config.includes is not None: includes = config.includes config.pop("includes") base_config = recursive_config(includes) config = OmegaConf.merge(base_config, config) return config def suffix_rundir(save_dir, run_dir): max_id = -1 for search_dir in os.listdir(save_dir): if search_dir.startswith(run_dir): splits = search_dir.split("_") cur_id = int(splits[1]) if len(splits) > 1 else 0 max_id = max(max_id, cur_id) return os.path.join(save_dir, run_dir + "_" + str(max_id + 1)) def overwrite_dir(config, replace, basedir): for key in config: if isinstance(config[key], str) and config[key].startswith(basedir): config[key] = config[key].replace(basedir, replace) if isinstance(config[key], omegaconf.dictconfig.DictConfig): overwrite_dir(config[key], replace, basedir)
KosmosX-API-main
kosmosX/fairseq/examples/MMPT/mmpt/utils/load_config.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np class ShardedTensor(object): def __init__(self, data, starts): self.data = data self.starts = starts assert self.starts[0] == 0 assert self.starts[-1] == len(self.data) assert (self.starts[1:] >= self.starts[:-1]).all() assert (self.starts > -1).all() @staticmethod def from_list(xs): starts = np.full((len(xs) + 1,), -1, dtype=np.long) data = np.concatenate(xs, axis=0) starts[0] = 0 for i, x in enumerate(xs): starts[i + 1] = starts[i] + x.shape[0] assert (starts > -1).all() return ShardedTensor(data, starts) def __getitem__(self, i): return self.data[self.starts[i] : self.starts[i + 1]] def __len__(self): return len(self.starts) - 1 def lengths(self): return self.starts[1:] - self.starts[:-1] def save(self, path): np.save(path + "_starts", self.starts) np.save(path + "_data", self.data) @staticmethod def load(path, mmap_mode=None): starts = np.load(path + "_starts.npy", mmap_mode) data = np.load(path + "_data.npy", mmap_mode) return ShardedTensor(data, starts)
KosmosX-API-main
kosmosX/fairseq/examples/MMPT/mmpt/utils/shardedtensor.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Copyright (c) Facebook, Inc. All Rights Reserved import torch from torch import nn try: from transformers.modeling_bert import ( BertPreTrainedModel, BertModel, BertEncoder, BertPredictionHeadTransform, ) except ImportError: pass from ..modules import VideoTokenMLP, MMBertEmbeddings # --------------- fine-tuning models --------------- class MMBertForJoint(BertPreTrainedModel): """A BertModel with isolated attention mask to separate modality.""" def __init__(self, config): super().__init__(config) self.videomlp = VideoTokenMLP(config) self.bert = MMBertModel(config) self.init_weights() def forward( self, input_ids=None, input_video_embeds=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, next_sentence_label=None, output_attentions=None, output_hidden_states=None, return_dict=None, separate_forward_split=None, ): return_dict = ( return_dict if return_dict is not None else self.config.use_return_dict ) video_tokens = self.videomlp(input_video_embeds) outputs = self.bert( input_ids, video_tokens, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, separate_forward_split=separate_forward_split, ) return outputs class MMBertForTokenClassification(BertPreTrainedModel): """A BertModel similar to MMJointUni, with extra wrapper layer to be fine-tuned from other pretrained MMFusion model.""" def __init__(self, config): super().__init__(config) self.videomlp = VideoTokenMLP(config) self.bert = MMBertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) # TODO(huxu): 779 is the number of classes for COIN: move to config? self.classifier = nn.Linear(config.hidden_size, 779) self.init_weights() def forward( self, input_ids=None, input_video_embeds=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, next_sentence_label=None, output_attentions=None, output_hidden_states=None, return_dict=None, separate_forward_split=None, ): return_dict = ( return_dict if return_dict is not None else self.config.use_return_dict ) video_tokens = self.videomlp(input_video_embeds) outputs = self.bert( input_ids, video_tokens, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, separate_forward_split=separate_forward_split, ) return (self.classifier(outputs[0]),) # ------------ pre-training models ---------------- class MMBertForEncoder(BertPreTrainedModel): """A BertModel for Contrastive Learning.""" def __init__(self, config): super().__init__(config) self.videomlp = VideoTokenMLP(config) self.bert = MMBertModel(config) self.init_weights() def forward( self, input_ids=None, input_video_embeds=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): return_dict = ( return_dict if return_dict is not None else self.config.use_return_dict ) if input_video_embeds is not None: video_tokens = self.videomlp(input_video_embeds) else: video_tokens = None outputs = self.bert( input_ids, video_tokens, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) return outputs class MMBertForMFMMLM(BertPreTrainedModel): """A BertModel with shared prediction head on MFM-MLM.""" def __init__(self, config): super().__init__(config) self.videomlp = VideoTokenMLP(config) self.bert = MMBertModel(config) self.cls = MFMMLMHead(config) self.hidden_size = config.hidden_size self.init_weights() def get_output_embeddings(self): return self.cls.predictions.decoder def forward( self, input_ids=None, input_video_embeds=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, masked_frame_labels=None, target_video_hidden_states=None, non_masked_frame_mask=None, masked_lm_labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): return_dict = ( return_dict if return_dict is not None else self.config.use_return_dict ) if input_video_embeds is not None: video_tokens = self.videomlp(input_video_embeds) else: video_tokens = None if target_video_hidden_states is not None: target_video_hidden_states = self.videomlp( target_video_hidden_states) non_masked_frame_hidden_states = video_tokens.masked_select( non_masked_frame_mask.unsqueeze(-1) ).view(-1, self.hidden_size) outputs = self.bert( input_ids, video_tokens, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] mfm_scores, prediction_scores = None, None if masked_frame_labels is not None and masked_lm_labels is not None: # split the sequence. text_offset = masked_frame_labels.size(1) + 1 # [CLS] video_sequence_output = sequence_output[ :, 1:text_offset ] # remove [SEP] as not in video_label. text_sequence_output = torch.cat( [sequence_output[:, :1], sequence_output[:, text_offset:]], dim=1 ) hidden_size = video_sequence_output.size(-1) selected_video_output = video_sequence_output.masked_select( masked_frame_labels.unsqueeze(-1) ).view(-1, hidden_size) # only compute select tokens to training to speed up. hidden_size = text_sequence_output.size(-1) # masked_lm_labels = masked_lm_labels.reshape(-1) labels_mask = masked_lm_labels != -100 selected_text_output = text_sequence_output.masked_select( labels_mask.unsqueeze(-1) ).view(-1, hidden_size) mfm_scores, prediction_scores = self.cls( selected_video_output, target_video_hidden_states, non_masked_frame_hidden_states, selected_text_output, ) output = ( mfm_scores, prediction_scores, ) + outputs return output class BertMFMMLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = BertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear( config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly # resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward( self, video_hidden_states=None, target_video_hidden_states=None, non_masked_frame_hidden_states=None, text_hidden_states=None, ): video_logits, text_logits = None, None if video_hidden_states is not None: video_hidden_states = self.transform(video_hidden_states) non_masked_frame_logits = torch.mm( video_hidden_states, non_masked_frame_hidden_states.transpose(1, 0) ) masked_frame_logits = torch.bmm( video_hidden_states.unsqueeze(1), target_video_hidden_states.unsqueeze(-1), ).squeeze(-1) video_logits = torch.cat( [masked_frame_logits, non_masked_frame_logits], dim=1 ) if text_hidden_states is not None: text_hidden_states = self.transform(text_hidden_states) text_logits = self.decoder(text_hidden_states) return video_logits, text_logits class MFMMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = BertMFMMLMPredictionHead(config) def forward( self, video_hidden_states=None, target_video_hidden_states=None, non_masked_frame_hidden_states=None, text_hidden_states=None, ): video_logits, text_logits = self.predictions( video_hidden_states, target_video_hidden_states, non_masked_frame_hidden_states, text_hidden_states, ) return video_logits, text_logits class MMBertForMTM(MMBertForMFMMLM): def __init__(self, config): BertPreTrainedModel.__init__(self, config) self.videomlp = VideoTokenMLP(config) self.bert = MMBertModel(config) self.cls = MTMHead(config) self.hidden_size = config.hidden_size self.init_weights() class BertMTMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = BertPredictionHeadTransform(config) self.decoder = nn.Linear( config.hidden_size, config.vocab_size, bias=False) def forward( self, video_hidden_states=None, target_video_hidden_states=None, non_masked_frame_hidden_states=None, text_hidden_states=None, ): non_masked_frame_hidden_states = non_masked_frame_hidden_states.transpose(1, 0) video_logits, text_logits = None, None if video_hidden_states is not None: video_hidden_states = self.transform(video_hidden_states) masked_frame_logits = torch.bmm( video_hidden_states.unsqueeze(1), target_video_hidden_states.unsqueeze(-1), ).squeeze(-1) non_masked_frame_logits = torch.mm( video_hidden_states, non_masked_frame_hidden_states ) video_on_vocab_logits = self.decoder(video_hidden_states) video_logits = torch.cat([ masked_frame_logits, non_masked_frame_logits, video_on_vocab_logits], dim=1) if text_hidden_states is not None: text_hidden_states = self.transform(text_hidden_states) # text first so label does not need to be shifted. text_on_vocab_logits = self.decoder(text_hidden_states) text_on_video_logits = torch.mm( text_hidden_states, non_masked_frame_hidden_states ) text_logits = torch.cat([ text_on_vocab_logits, text_on_video_logits ], dim=1) return video_logits, text_logits class MTMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = BertMTMPredictionHead(config) def forward( self, video_hidden_states=None, target_video_hidden_states=None, non_masked_frame_hidden_states=None, text_hidden_states=None, ): video_logits, text_logits = self.predictions( video_hidden_states, target_video_hidden_states, non_masked_frame_hidden_states, text_hidden_states, ) return video_logits, text_logits class MMBertModel(BertModel): """MMBertModel has MMBertEmbedding to support video tokens.""" def __init__(self, config, add_pooling_layer=True): super().__init__(config) # overwrite embedding self.embeddings = MMBertEmbeddings(config) self.encoder = MultiLayerAttentionMaskBertEncoder(config) self.init_weights() def forward( self, input_ids=None, input_video_embeds=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, output_attentions=None, output_hidden_states=None, return_dict=None, separate_forward_split=None, ): output_attentions = ( output_attentions if output_attentions is not None else self.config.output_attentions ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = ( return_dict if return_dict is not None else self.config.use_return_dict ) if input_ids is not None and inputs_embeds is not None: raise ValueError( "You cannot specify both input_ids " "and inputs_embeds at the same time" ) elif input_ids is not None: if input_video_embeds is not None: input_shape = ( input_ids.size(0), input_ids.size(1) + input_video_embeds.size(1), ) else: input_shape = ( input_ids.size(0), input_ids.size(1), ) elif inputs_embeds is not None: if input_video_embeds is not None: input_shape = ( inputs_embeds.size(0), inputs_embeds.size(1) + input_video_embeds.size(1), ) else: input_shape = ( input_ids.size(0), input_ids.size(1), ) else: raise ValueError( "You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None \ else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) if token_type_ids is None: token_type_ids = torch.zeros( input_shape, dtype=torch.long, device=device) # We can provide a self-attention mask of dimensions # [batch_size, from_seq_length, to_seq_length] # ourselves in which case # we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = \ self.get_extended_attention_mask( attention_mask, input_shape, device) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to # [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: ( encoder_batch_size, encoder_sequence_length, _, ) = encoder_hidden_states.size() encoder_hidden_shape = ( encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones( encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask( encoder_attention_mask ) else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or # [num_hidden_layers x num_heads] # and head_mask is converted to shape # [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask( head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids, input_video_embeds, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, ) if separate_forward_split is not None: split_embedding_output = \ embedding_output[:, :separate_forward_split] split_extended_attention_mask = extended_attention_mask[ :, :, :, :separate_forward_split, :separate_forward_split ] split_encoder_outputs = self.encoder( split_embedding_output, attention_mask=split_extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) assert ( len(split_encoder_outputs) <= 2 ), "we do not support merge on attention for now." encoder_outputs = [] encoder_outputs.append([split_encoder_outputs[0]]) if len(split_encoder_outputs) == 2: encoder_outputs.append([]) for _all_hidden_states in split_encoder_outputs[1]: encoder_outputs[-1].append([_all_hidden_states]) split_embedding_output = \ embedding_output[:, separate_forward_split:] split_extended_attention_mask = extended_attention_mask[ :, :, :, separate_forward_split:, separate_forward_split: ] split_encoder_outputs = self.encoder( split_embedding_output, attention_mask=split_extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) assert ( len(split_encoder_outputs) <= 2 ), "we do not support merge on attention for now." encoder_outputs[0].append(split_encoder_outputs[0]) encoder_outputs[0] = torch.cat(encoder_outputs[0], dim=1) if len(split_encoder_outputs) == 2: for layer_idx, _all_hidden_states in enumerate( split_encoder_outputs[1] ): encoder_outputs[1][layer_idx].append(_all_hidden_states) encoder_outputs[1][layer_idx] = torch.cat( encoder_outputs[1][layer_idx], dim=1 ) encoder_outputs = tuple(encoder_outputs) else: encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = ( self.pooler(sequence_output) if self.pooler is not None else None ) return (sequence_output, pooled_output) + encoder_outputs[1:] def get_extended_attention_mask(self, attention_mask, input_shape, device): """This is borrowed from `modeling_utils.py` with the support of multi-layer attention masks. The second dim is expected to be number of layers. See `MMAttentionMaskProcessor`. Makes broadcastable attention and causal masks so that future and masked tokens are ignored. Arguments: attention_mask (:obj:`torch.Tensor`): Mask with ones indicating tokens to attend to, zeros for tokens to ignore. input_shape (:obj:`Tuple[int]`): The shape of the input to the model. device: (:obj:`torch.device`): The device of the input to the model. Returns: :obj:`torch.Tensor` The extended attention mask, \ with a the same dtype as :obj:`attention_mask.dtype`. """ # We can provide a self-attention mask of dimensions # [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable # to all heads. if attention_mask.dim() == 4: extended_attention_mask = attention_mask[:, :, None, :, :] extended_attention_mask = extended_attention_mask.to( dtype=self.dtype ) # fp16 compatibility extended_attention_mask = (1.0 - extended_attention_mask) \ * -10000.0 return extended_attention_mask else: return super().get_extended_attention_mask( attention_mask, input_shape, device ) class MultiLayerAttentionMaskBertEncoder(BertEncoder): """extend BertEncoder with the capability of multiple layers of attention mask.""" def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, output_attentions=False, output_hidden_states=False, return_dict=False, ): all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None layer_attention_mask = ( attention_mask[:, i, :, :, :] if attention_mask.dim() == 5 else attention_mask ) if getattr(self.config, "gradient_checkpointing", False): def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(layer_module), hidden_states, layer_attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, ) else: layer_outputs = layer_module( hidden_states, layer_attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) return tuple( v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None )
KosmosX-API-main
kosmosX/fairseq/examples/MMPT/mmpt/models/transformermodel.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .mmfusion import * from .transformermodel import * from .mmfusionnlg import * try: from .fairseqmmmodel import * except ImportError: pass try: from .expmmfusion import * except ImportError: pass
KosmosX-API-main
kosmosX/fairseq/examples/MMPT/mmpt/models/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from fairseq.models import ( BaseFairseqModel, register_model, register_model_architecture ) @register_model("mmmodel") class FairseqMMModel(BaseFairseqModel): """a fairseq wrapper of model built by `task`.""" @classmethod def build_model(cls, args, task): return FairseqMMModel(task.mmtask.model) def __init__(self, mmmodel): super().__init__() self.mmmodel = mmmodel def forward(self, *args, **kwargs): return self.mmmodel(*args, **kwargs) def upgrade_state_dict_named(self, state_dict, name): super().upgrade_state_dict_named(state_dict, name) keys_to_delete = [] for key in state_dict: if key not in self.state_dict(): keys_to_delete.append(key) for key in keys_to_delete: print("[INFO]", key, "not used anymore.") del state_dict[key] # copy any newly defined parameters. for key in self.state_dict(): if key not in state_dict: print("[INFO] adding", key) state_dict[key] = self.state_dict()[key] # a dummy arch, we config the model. @register_model_architecture("mmmodel", "mmarch") def mmarch(args): pass
KosmosX-API-main
kosmosX/fairseq/examples/MMPT/mmpt/models/fairseqmmmodel.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors, Facebook AI Research authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Copyright (c) Facebook, Inc. All Rights Reserved import torch from torch.nn import functional as F from typing import Optional, Iterable try: from transformers import BertPreTrainedModel from transformers.modeling_bert import BertOnlyMLMHead from transformers.file_utils import ModelOutput from transformers.modeling_outputs import CausalLMOutput from transformers.generation_utils import ( BeamHypotheses, top_k_top_p_filtering ) except ImportError: pass from .mmfusion import MMFusion from .transformermodel import MMBertModel from ..modules import VideoTokenMLP class MMFusionNLG(MMFusion): def __init__(self, config, **kwargs): super().__init__(config) if config.model.max_decode_length is not None: self.max_length = min( config.model.max_decode_length, config.dataset.max_len - config.dataset.max_video_len - 3 ) else: self.max_length = \ config.dataset.max_len - config.dataset.max_video_len - 3 self.gen_param = config.gen_param if config.gen_param is not None \ else {} def forward( self, caps, cmasks, vfeats, vmasks, attention_mask, video_label=None, text_label=None, **kwargs ): """use pre-trained LM header for generation.""" attention_mask, token_type_ids = self._mm_on_the_fly( cmasks, vmasks, attention_mask) outputs = self.mm_encoder( input_ids=caps, input_video_embeds=vfeats, attention_mask=attention_mask, token_type_ids=token_type_ids, masked_lm_labels=text_label, ) return {"logits": outputs[0]} @torch.no_grad() def generate( self, caps, cmasks, vfeats, vmasks, attention_mask=None, bos_token_id=None, eos_token_id=None, **kwargs ): # a simplified interface from # https://huggingface.co/transformers/v3.4.0/_modules/transformers/generation_utils.html#GenerationMixin.generate # caps now only have # [CLS], [SEP] (for video) and [CLS] (as bos_token) assert caps.size(1) == 3 attention_mask, token_type_ids = self._mm_on_the_fly( cmasks, vmasks, attention_mask) output = self.mm_encoder.generate( input_ids=caps, input_video_embeds=vfeats, attention_mask=attention_mask, token_type_ids=token_type_ids, bos_token_id=bos_token_id, eos_token_id=eos_token_id, max_length=self.max_length, **self.gen_param ) return output class MMBertForNLG(BertPreTrainedModel): def __init__(self, config): super().__init__(config) self.bert = MMBertModel(config) self.videomlp = VideoTokenMLP(config) # we do not use `BertGenerationOnlyLMHead` # because we can reuse pretraining. self.cls = BertOnlyMLMHead(config) self.hidden_size = config.hidden_size self.init_weights() def get_output_embeddings(self): return self.cls.predictions.decoder def forward( self, input_ids=None, input_video_embeds=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, masked_lm_labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): # similar to MMBertForMFMMLM without MFM. video_tokens = self.videomlp(input_video_embeds) outputs = self.bert( input_ids, video_tokens, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = None if masked_lm_labels is not None: text_offset = input_video_embeds.size(1) + 1 # [CLS] # recover caps format: [CLS] [SEP] text [SEP] text_sequence_output = torch.cat( [sequence_output[:, :1], sequence_output[:, text_offset:]], dim=1 ) # only compute select tokens to training to speed up. hidden_size = text_sequence_output.size(-1) # masked_lm_labels = masked_lm_labels.reshape(-1) labels_mask = masked_lm_labels != -100 selected_text_output = text_sequence_output.masked_select( labels_mask.unsqueeze(-1) ).view(-1, hidden_size) prediction_scores = self.cls(selected_text_output) if not return_dict: output = ( prediction_scores, ) + outputs[2:] return output # for generation. text_offset = input_video_embeds.size(1) + 2 # [CLS] text_sequence_output = sequence_output[:, text_offset:] prediction_scores = self.cls(text_sequence_output) return CausalLMOutput( loss=None, logits=prediction_scores, ) def prepare_inputs_for_generation( self, input_ids, input_video_embeds, attention_mask=None, token_type_ids=None, **model_kwargs ): # must return a dictionary. seq_len = input_ids.size(1) + input_video_embeds.size(1) if attention_mask is not None: if len(attention_mask.size()) == 4: attention_mask = attention_mask[:, :, :seq_len, :seq_len] elif len(attention_mask.size()) == 3: attention_mask = attention_mask[:, :seq_len, :seq_len] else: attention_mask = attention_mask[:, :seq_len] if token_type_ids is not None: token_type_ids = token_type_ids[:, :seq_len] return { "input_ids": input_ids, "input_video_embeds": input_video_embeds, "attention_mask": attention_mask, "token_type_ids": token_type_ids, } @torch.no_grad() def generate( self, input_ids: Optional[torch.LongTensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, max_length: Optional[int] = None, min_length: Optional[int] = None, do_sample: Optional[bool] = None, early_stopping: Optional[bool] = None, num_beams: Optional[int] = None, temperature: Optional[float] = None, top_k: Optional[int] = None, top_p: Optional[float] = None, repetition_penalty: Optional[float] = None, bad_words_ids: Optional[Iterable[int]] = None, bos_token_id: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, length_penalty: Optional[float] = None, no_repeat_ngram_size: Optional[int] = None, num_return_sequences: Optional[int] = None, attention_mask: Optional[torch.LongTensor] = None, decoder_start_token_id: Optional[int] = None, use_cache: Optional[bool] = None, **model_kwargs ) -> torch.LongTensor: r""" Generates sequences for models with a language modeling head. The method currently supports greedy decoding, beam-search decoding, sampling with temperature, sampling with top-k or nucleus sampling. Adapted in part from `Facebook's XLM beam search code <https://github.com/facebookresearch/XLM/blob/9e6f6814d17be4fe5b15f2e6c43eb2b2d76daeb4/src/model/transformer.py#L529>`__. Apart from :obj:`input_ids` and :obj:`attention_mask`, all the arguments below will default to the value of the attribute of the same name inside the :class:`~transformers.PretrainedConfig` of the model. The default values indicated are the default values of those config. Most of these parameters are explained in more detail in `this blog post <https://huggingface.co/blog/how-to-generate>`__. Parameters: input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): The sequence used as a prompt for the generation. If :obj:`None` the method initializes it as an empty :obj:`torch.LongTensor` of shape :obj:`(1,)`. decoder_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): initial input_ids for the decoder of encoder-decoder type models. If :obj:`None` then only decoder_start_token_id is passed as the first token to the decoder. max_length (:obj:`int`, `optional`, defaults to 20): The maximum length of the sequence to be generated. min_length (:obj:`int`, `optional`, defaults to 10): The minimum length of the sequence to be generated. do_sample (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to use sampling ; use greedy decoding otherwise. early_stopping (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether to stop the beam search when at least ``num_beams`` sentences are finished per batch or not. num_beams (:obj:`int`, `optional`, defaults to 1): Number of beams for beam search. 1 means no beam search. temperature (:obj:`float`, `optional`, defaults tp 1.0): The value used to module the next token probabilities. top_k (:obj:`int`, `optional`, defaults to 50): The number of highest probability vocabulary tokens to keep for top-k-filtering. top_p (:obj:`float`, `optional`, defaults to 1.0): If set to float < 1, only the most probable tokens with probabilities that add up to ``top_p`` or higher are kept for generation. repetition_penalty (:obj:`float`, `optional`, defaults to 1.0): The parameter for repetition penalty. 1.0 means no penalty. See `this paper <https://arxiv.org/pdf/1909.05858.pdf>`__ for more details. pad_token_id (:obj:`int`, `optional`): The id of the `padding` token. bos_token_id (:obj:`int`, `optional`): The id of the `beginning-of-sequence` token. eos_token_id (:obj:`int`, `optional`): The id of the `end-of-sequence` token. length_penalty (:obj:`float`, `optional`, defaults to 1.0): Exponential penalty to the length. 1.0 means no penalty. Set to values < 1.0 in order to encourage the model to generate shorter sequences, to a value > 1.0 in order to encourage the model to produce longer sequences. no_repeat_ngram_size (:obj:`int`, `optional`, defaults to 0): If set to int > 0, all ngrams of that size can only occur once. bad_words_ids(:obj:`List[int]`, `optional`): List of token ids that are not allowed to be generated. In order to get the tokens of the words that should not appear in the generated text, use :obj:`tokenizer.encode(bad_word, add_prefix_space=True)`. num_return_sequences(:obj:`int`, `optional`, defaults to 1): The number of independently computed returned sequences for each element in the batch. attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on padding token indices. Mask values are in ``[0, 1]``, 1 for tokens that are not masked, and 0 for masked tokens. If not provided, will default to a tensor the same shape as :obj:`input_ids` that masks the pad token. `What are attention masks? <../glossary.html#attention-mask>`__ decoder_start_token_id (:obj:`int`, `optional`): If an encoder-decoder model starts decoding with a different token than `bos`, the id of that token. use_cache: (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not the model should use the past last key/values attentions (if applicable to the model) to speed up decoding. model_kwargs: Additional model specific kwargs will be forwarded to the :obj:`forward` function of the model. Return: :obj:`torch.LongTensor` of shape :obj:`(batch_size * num_return_sequences, sequence_length)`: The generated sequences. The second dimension (sequence_length) is either equal to :obj:`max_length` or shorter if all batches finished early due to the :obj:`eos_token_id`. Examples:: tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer model = AutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache. outputs = model.generate(max_length=40) # do greedy decoding print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True))) tokenizer = AutoTokenizer.from_pretrained('openai-gpt') # Initialize tokenizer model = AutoModelWithLMHead.from_pretrained('openai-gpt') # Download model and configuration from S3 and cache. input_context = 'The dog' input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context outputs = model.generate(input_ids=input_ids, num_beams=5, num_return_sequences=3, temperature=1.5) # generate 3 independent sequences using beam search decoding (5 beams) with sampling from initial context 'The dog' for i in range(3): # 3 output sequences were generated print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True))) tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer model = AutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache. input_context = 'The dog' input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context outputs = model.generate(input_ids=input_ids, max_length=40, temperature=0.7, num_return_sequences=3, do_sample=True) # generate 3 candidates using sampling for i in range(3): # 3 output sequences were generated print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True))) tokenizer = AutoTokenizer.from_pretrained('ctrl') # Initialize tokenizer model = AutoModelWithLMHead.from_pretrained('ctrl') # Download model and configuration from S3 and cache. input_context = 'Legal My neighbor is' # "Legal" is one of the control codes for ctrl input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context outputs = model.generate(input_ids=input_ids, max_length=50, temperature=0.7, repetition_penalty=1.2) # generate sequences print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True))) tokenizer = AutoTokenizer.from_pretrained('gpt2') # Initialize tokenizer model = AutoModelWithLMHead.from_pretrained('gpt2') # Download model and configuration from S3 and cache. input_context = 'My cute dog' # "Legal" is one of the control codes for ctrl bad_words_ids = [tokenizer.encode(bad_word, add_prefix_space=True) for bad_word in ['idiot', 'stupid', 'shut up']] input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context outputs = model.generate(input_ids=input_ids, max_length=100, do_sample=True, bad_words_ids=bad_words_ids) # generate sequences without allowing bad_words to be generated """ # We cannot generate if the model does not have a LM head if self.get_output_embeddings() is None: raise AttributeError( "You tried to generate sequences with a model that does not have a LM Head." "Please use another model class (e.g. `OpenAIGPTLMHeadModel`, `XLNetLMHeadModel`, `GPT2LMHeadModel`, `CTRLLMHeadModel`, `T5WithLMHeadModel`, `TransfoXLLMHeadModel`, `XLMWithLMHeadModel`, `BartForConditionalGeneration` )" ) max_length = max_length if max_length is not None else self.config.max_length min_length = min_length if min_length is not None else self.config.min_length do_sample = do_sample if do_sample is not None else self.config.do_sample early_stopping = early_stopping if early_stopping is not None else self.config.early_stopping use_cache = use_cache if use_cache is not None else self.config.use_cache num_beams = num_beams if num_beams is not None else self.config.num_beams temperature = temperature if temperature is not None else self.config.temperature top_k = top_k if top_k is not None else self.config.top_k top_p = top_p if top_p is not None else self.config.top_p repetition_penalty = repetition_penalty if repetition_penalty is not None else self.config.repetition_penalty bos_token_id = bos_token_id if bos_token_id is not None else self.config.bos_token_id pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id length_penalty = length_penalty if length_penalty is not None else self.config.length_penalty no_repeat_ngram_size = ( no_repeat_ngram_size if no_repeat_ngram_size is not None else self.config.no_repeat_ngram_size ) bad_words_ids = bad_words_ids if bad_words_ids is not None else self.config.bad_words_ids num_return_sequences = ( num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences ) decoder_start_token_id = ( decoder_start_token_id if decoder_start_token_id is not None else self.config.decoder_start_token_id ) if input_ids is not None: batch_size = input_ids.shape[0] # overriden by the input batch_size else: batch_size = 1 assert isinstance(max_length, int) and max_length > 0, "`max_length` should be a strictly positive integer." assert isinstance(min_length, int) and min_length >= 0, "`min_length` should be a positive integer." assert isinstance(do_sample, bool), "`do_sample` should be a boolean." assert isinstance(early_stopping, bool), "`early_stopping` should be a boolean." assert isinstance(use_cache, bool), "`use_cache` should be a boolean." assert isinstance(num_beams, int) and num_beams > 0, "`num_beams` should be a strictly positive integer." assert temperature > 0, "`temperature` should be strictly positive." assert isinstance(top_k, int) and top_k >= 0, "`top_k` should be a positive integer." assert 0 <= top_p <= 1, "`top_p` should be between 0 and 1." assert repetition_penalty >= 1.0, "`repetition_penalty` should be >= 1." assert input_ids is not None or ( isinstance(bos_token_id, int) and bos_token_id >= 0 ), "If input_ids is not defined, `bos_token_id` should be a positive integer." assert pad_token_id is None or ( isinstance(pad_token_id, int) and (pad_token_id >= 0) ), "`pad_token_id` should be a positive integer." assert (eos_token_id is None) or ( isinstance(eos_token_id, int) and (eos_token_id >= 0) ), "`eos_token_id` should be a positive integer." assert length_penalty > 0, "`length_penalty` should be strictly positive." assert ( isinstance(no_repeat_ngram_size, int) and no_repeat_ngram_size >= 0 ), "`no_repeat_ngram_size` should be a positive integer." assert ( isinstance(num_return_sequences, int) and num_return_sequences > 0 ), "`num_return_sequences` should be a strictly positive integer." assert ( bad_words_ids is None or isinstance(bad_words_ids, list) and isinstance(bad_words_ids[0], list) ), "`bad_words_ids` is either `None` or a list of lists of tokens that should not be generated" if input_ids is None: assert isinstance(bos_token_id, int) and bos_token_id >= 0, ( "you should either supply a context to complete as `input_ids` input " "or a `bos_token_id` (integer >= 0) as a first token to start the generation." ) input_ids = torch.full( (batch_size, 1), bos_token_id, dtype=torch.long, device=next(self.parameters()).device, ) else: assert input_ids.dim() == 2, "Input prompt should be of shape (batch_size, sequence length)." # not allow to duplicate outputs when greedy decoding if do_sample is False: if num_beams == 1: # no_beam_search greedy generation conditions assert ( num_return_sequences == 1 ), "Greedy decoding will always produce the same output for num_beams == 1 and num_return_sequences > 1. Please set num_return_sequences = 1" else: # beam_search greedy generation conditions assert ( num_beams >= num_return_sequences ), "Greedy beam search decoding cannot return more sequences than it has beams. Please set num_beams >= num_return_sequences" # create attention mask if necessary # TODO (PVP): this should later be handled by the forward fn() in each model in the future see PR 3140 if (attention_mask is None) and (pad_token_id is not None) and (pad_token_id in input_ids): attention_mask = input_ids.ne(pad_token_id).long() elif attention_mask is None: attention_mask = input_ids.new_ones(input_ids.shape) # set pad_token_id to eos_token_id if not set. Important that this is done after # attention_mask is created if pad_token_id is None and eos_token_id is not None: print( "Setting `pad_token_id` to {} (first `eos_token_id`) to generate sequence".format(eos_token_id) ) pad_token_id = eos_token_id # vocab size if hasattr(self.config, "vocab_size"): vocab_size = self.config.vocab_size elif ( self.config.is_encoder_decoder and hasattr(self.config, "decoder") and hasattr(self.config.decoder, "vocab_size") ): vocab_size = self.config.decoder.vocab_size else: raise ValueError("either self.config.vocab_size or self.config.decoder.vocab_size needs to be defined") # set effective batch size and effective batch multiplier according to do_sample if do_sample: effective_batch_size = batch_size * num_return_sequences effective_batch_mult = num_return_sequences else: effective_batch_size = batch_size effective_batch_mult = 1 if self.config.is_encoder_decoder: if decoder_start_token_id is None: # see if BOS token can be used for decoder_start_token_id if bos_token_id is not None: decoder_start_token_id = bos_token_id elif ( hasattr(self.config, "decoder") and hasattr(self.config.decoder, "bos_token_id") and self.config.decoder.bos_token_id is not None ): decoder_start_token_id = self.config.decoder.bos_token_id else: raise ValueError( "decoder_start_token_id or bos_token_id has to be defined for encoder-decoder generation" ) assert hasattr(self, "get_encoder"), "{} should have a 'get_encoder' function defined".format(self) assert callable(self.get_encoder), "{} should be a method".format(self.get_encoder) # get encoder and store encoder outputs encoder = self.get_encoder() encoder_outputs: ModelOutput = encoder(input_ids, attention_mask=attention_mask, return_dict=True) # Expand input ids if num_beams > 1 or num_return_sequences > 1 if num_return_sequences > 1 or num_beams > 1: # TODO: make this a call-back function. # input_ids=caps, # input_video_embeds=vfeats, # attention_mask=attention_mask, # token_type_ids=token_type_ids, input_video_embeds = model_kwargs.pop("input_video_embeds", None) token_type_ids = model_kwargs.pop("token_type_ids", None) input_ids_len = input_ids.shape[-1] input_ids = input_ids.unsqueeze(1).expand( batch_size, effective_batch_mult * num_beams, input_ids_len) input_video_embeds_len, input_video_embeds_hidden = input_video_embeds.size(1), input_video_embeds.size(2) input_video_embeds = input_video_embeds.unsqueeze(1).expand( batch_size, effective_batch_mult * num_beams, input_video_embeds_len, input_video_embeds_hidden) attention_mask_from_len, attention_mask_to_len = attention_mask.size(1), attention_mask.size(2) attention_mask = attention_mask.unsqueeze(1).expand( batch_size, effective_batch_mult * num_beams, attention_mask_from_len, attention_mask_to_len ) token_type_ids_len = token_type_ids.size(1) token_type_ids = token_type_ids.unsqueeze(1).expand( batch_size, effective_batch_mult * num_beams, token_type_ids_len ) # contiguous ... input_ids = input_ids.contiguous().view( effective_batch_size * num_beams, input_ids_len ) # shape: (batch_size * num_return_sequences * num_beams, cur_len) input_video_embeds = input_video_embeds.contiguous().view( effective_batch_size * num_beams, input_video_embeds_len, input_video_embeds_hidden) attention_mask = attention_mask.contiguous().view( effective_batch_size * num_beams, attention_mask_from_len, attention_mask_to_len ) # shape: (batch_size * num_return_sequences * num_beams, cur_len) token_type_ids = token_type_ids.contiguous().view( effective_batch_size * num_beams, token_type_ids_len ) model_kwargs["input_video_embeds"] = input_video_embeds model_kwargs["token_type_ids"] = token_type_ids if self.config.is_encoder_decoder: device = next(self.parameters()).device if decoder_input_ids is not None: # give initial decoder input ids input_ids = decoder_input_ids.repeat(effective_batch_size * num_beams, 1).to(device) else: # create empty decoder input_ids input_ids = torch.full( (effective_batch_size * num_beams, 1), decoder_start_token_id, dtype=torch.long, device=device, ) cur_len = input_ids.shape[-1] assert ( batch_size == encoder_outputs.last_hidden_state.shape[0] ), f"expected encoder_outputs.last_hidden_state to have 1st dimension bs={batch_size}, got {encoder_outputs.last_hidden_state.shape[0]} " # expand batch_idx to assign correct encoder output for expanded input_ids (due to num_beams > 1 and num_return_sequences > 1) expanded_batch_idxs = ( torch.arange(batch_size) .view(-1, 1) .repeat(1, num_beams * effective_batch_mult) .view(-1) .to(input_ids.device) ) # expand encoder_outputs encoder_outputs["last_hidden_state"] = encoder_outputs.last_hidden_state.index_select( 0, expanded_batch_idxs ) # save encoder_outputs in `model_kwargs` model_kwargs["encoder_outputs"] = encoder_outputs else: cur_len = input_ids.shape[-1] assert ( cur_len < max_length ), f"The context has {cur_len} number of tokens, but `max_length` is only {max_length}. Please make sure that `max_length` is bigger than the number of tokens, by setting either `generate(max_length=...,...)` or `config.max_length = ...`" if num_beams > 1: output = self._generate_beam_search( input_ids, cur_len=cur_len, max_length=max_length, min_length=min_length, do_sample=do_sample, early_stopping=early_stopping, temperature=temperature, top_k=top_k, top_p=top_p, repetition_penalty=repetition_penalty, no_repeat_ngram_size=no_repeat_ngram_size, bad_words_ids=bad_words_ids, pad_token_id=pad_token_id, eos_token_id=eos_token_id, batch_size=effective_batch_size, num_return_sequences=num_return_sequences, length_penalty=length_penalty, num_beams=num_beams, vocab_size=vocab_size, attention_mask=attention_mask, use_cache=use_cache, model_kwargs=model_kwargs, ) else: output = self._generate_no_beam_search( input_ids, cur_len=cur_len, max_length=max_length, min_length=min_length, do_sample=do_sample, temperature=temperature, top_k=top_k, top_p=top_p, repetition_penalty=repetition_penalty, no_repeat_ngram_size=no_repeat_ngram_size, bad_words_ids=bad_words_ids, pad_token_id=pad_token_id, eos_token_id=eos_token_id, batch_size=effective_batch_size, attention_mask=attention_mask, use_cache=use_cache, model_kwargs=model_kwargs, ) return output def _generate_beam_search( self, input_ids, cur_len, max_length, min_length, do_sample, early_stopping, temperature, top_k, top_p, repetition_penalty, no_repeat_ngram_size, bad_words_ids, pad_token_id, eos_token_id, batch_size, num_return_sequences, length_penalty, num_beams, vocab_size, attention_mask, use_cache, model_kwargs, ): """Generate sequences for each example with beam search.""" # generated hypotheses generated_hyps = [ BeamHypotheses(num_beams, max_length, length_penalty, early_stopping=early_stopping) for _ in range(batch_size) ] # scores for each sentence in the beam beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device) # for greedy decoding it is made sure that only tokens of the first beam are considered to avoid sampling the exact same tokens three times if do_sample is False: beam_scores[:, 1:] = -1e9 beam_scores = beam_scores.view(-1) # shape (batch_size * num_beams,) # cache compute states past = None # done sentences done = [False for _ in range(batch_size)] while cur_len < max_length: model_inputs = self.prepare_inputs_for_generation( input_ids, past=past, attention_mask=attention_mask, use_cache=use_cache, **model_kwargs ) outputs = self(**model_inputs, return_dict=True) # (batch_size * num_beams, cur_len, vocab_size) next_token_logits = outputs.logits[:, -1, :] # (batch_size * num_beams, vocab_size) # if model has past, then set the past variable to speed up decoding if "past_key_values" in outputs: past = outputs.past_key_values elif "mems" in outputs: past = outputs.mems if self.config.is_encoder_decoder and do_sample is False: # TODO (PVP) still a bit hacky here - there might be a better solution next_token_logits = self.adjust_logits_during_generation( next_token_logits, cur_len=cur_len, max_length=max_length ) scores = F.log_softmax(next_token_logits, dim=-1) # (batch_size * num_beams, vocab_size) scores = self.postprocess_next_token_scores( scores=scores, input_ids=input_ids, no_repeat_ngram_size=no_repeat_ngram_size, bad_words_ids=bad_words_ids, cur_len=cur_len, min_length=min_length, max_length=max_length, eos_token_id=eos_token_id, repetition_penalty=repetition_penalty, batch_size=batch_size, num_beams=num_beams, ) assert scores.shape == (batch_size * num_beams, vocab_size), "Shapes of scores: {} != {}".format( scores.shape, (batch_size * num_beams, vocab_size) ) if do_sample: _scores = scores + beam_scores[:, None].expand_as(scores) # (batch_size * num_beams, vocab_size) # Temperature if temperature != 1.0: _scores = _scores / temperature # Top-p/top-k filtering _scores = top_k_top_p_filtering( _scores, top_k=top_k, top_p=top_p, min_tokens_to_keep=2 ) # (batch_size * num_beams, vocab_size) # re-organize to group the beam together to sample from all beam_idxs _scores = _scores.contiguous().view( batch_size, num_beams * vocab_size ) # (batch_size, num_beams * vocab_size) # Sample 2 next tokens for each beam (so we have some spare tokens and match output of greedy beam search) probs = F.softmax(_scores, dim=-1) next_tokens = torch.multinomial(probs, num_samples=2 * num_beams) # (batch_size, num_beams * 2) # Compute next scores next_scores = torch.gather(_scores, -1, next_tokens) # (batch_size, num_beams * 2) # sort the sampled vector to make sure that the first num_beams samples are the best next_scores, next_scores_indices = torch.sort(next_scores, descending=True, dim=1) next_tokens = torch.gather(next_tokens, -1, next_scores_indices) # (batch_size, num_beams * 2) else: next_scores = scores + beam_scores[:, None].expand_as(scores) # (batch_size * num_beams, vocab_size) # re-organize to group the beam together (we are keeping top hypothesis accross beams) next_scores = next_scores.view( batch_size, num_beams * vocab_size ) # (batch_size, num_beams * vocab_size) next_scores, next_tokens = torch.topk(next_scores, 2 * num_beams, dim=1, largest=True, sorted=True) assert next_scores.size() == next_tokens.size() == (batch_size, 2 * num_beams) # next batch beam content next_batch_beam = [] # for each sentence for batch_idx in range(batch_size): # if we are done with this sentence, add a pad token if done[batch_idx]: assert ( len(generated_hyps[batch_idx]) >= num_beams ), "Batch can only be done if at least {} beams have been generated".format(num_beams) assert ( eos_token_id is not None and pad_token_id is not None ), "generated beams >= num_beams -> eos_token_id and pad_token have to be defined" next_batch_beam.extend([(0, pad_token_id, 0)] * num_beams) # pad the batch continue # next sentence beam content, this will get added to next_batch_beam next_sent_beam = [] # next tokens for this sentence for beam_token_rank, (beam_token_id, beam_token_score) in enumerate( zip(next_tokens[batch_idx], next_scores[batch_idx]) ): # get beam and token IDs beam_id = beam_token_id // vocab_size token_id = beam_token_id % vocab_size effective_beam_id = batch_idx * num_beams + beam_id # add to generated hypotheses if end of sentence if (eos_token_id is not None) and (token_id.item() == eos_token_id): # if beam_token does not belong to top num_beams tokens, it should not be added is_beam_token_worse_than_top_num_beams = beam_token_rank >= num_beams if is_beam_token_worse_than_top_num_beams: continue generated_hyps[batch_idx].add( input_ids[effective_beam_id].clone(), beam_token_score.item(), ) else: # add next predicted token since it is not eos_token next_sent_beam.append((beam_token_score, token_id, effective_beam_id)) # once the beam for next step is full, don't add more tokens to it. if len(next_sent_beam) == num_beams: break # Check if we are done so that we can save a pad step if all(done) done[batch_idx] = done[batch_idx] or generated_hyps[batch_idx].is_done( next_scores[batch_idx].max().item(), cur_len ) # update next beam content assert len(next_sent_beam) == num_beams, "Beam should always be full" next_batch_beam.extend(next_sent_beam) assert len(next_batch_beam) == num_beams * (batch_idx + 1), "We should have added num_beams each step" # stop when we are done with each sentence if all(done): break # sanity check / prepare next batch assert len(next_batch_beam) == batch_size * num_beams beam_scores = beam_scores.new([x[0] for x in next_batch_beam]) beam_tokens = input_ids.new([x[1] for x in next_batch_beam]) beam_idx = input_ids.new([x[2] for x in next_batch_beam]) # re-order batch and update current length input_ids = input_ids[beam_idx, :] input_ids = torch.cat([input_ids, beam_tokens.unsqueeze(1)], dim=-1) cur_len = cur_len + 1 # re-order internal states if past is not None: past = self._reorder_cache(past, beam_idx) # extend attention_mask for new generated input if only decoder # (huxu): move out since we trim attention_mask by ourselves. # if self.config.is_encoder_decoder is False: # attention_mask = torch.cat( # [attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1 # ) # finalize all open beam hypotheses and add to generated hypotheses for batch_idx in range(batch_size): if done[batch_idx]: continue # test that beam scores match previously calculated scores if not eos and batch_idx not done if eos_token_id is not None and all( (token_id % vocab_size).item() != eos_token_id for token_id in next_tokens[batch_idx] ): assert torch.all( next_scores[batch_idx, :num_beams] == beam_scores.view(batch_size, num_beams)[batch_idx] ), "If batch_idx is not done, final next scores: {} have to equal to accumulated beam_scores: {}".format( next_scores[:, :num_beams][batch_idx], beam_scores.view(batch_size, num_beams)[batch_idx], ) # need to add best num_beams hypotheses to generated hyps for beam_id in range(num_beams): effective_beam_id = batch_idx * num_beams + beam_id final_score = beam_scores[effective_beam_id].item() final_tokens = input_ids[effective_beam_id] generated_hyps[batch_idx].add(final_tokens, final_score) # depending on whether greedy generation is wanted or not define different output_batch_size and output_num_return_sequences_per_batch output_batch_size = batch_size if do_sample else batch_size * num_return_sequences output_num_return_sequences_per_batch = 1 if do_sample else num_return_sequences # select the best hypotheses sent_lengths = input_ids.new(output_batch_size) best = [] # retrieve best hypotheses for i, hypotheses in enumerate(generated_hyps): sorted_hyps = sorted(hypotheses.beams, key=lambda x: x[0]) for j in range(output_num_return_sequences_per_batch): effective_batch_idx = output_num_return_sequences_per_batch * i + j best_hyp = sorted_hyps.pop()[1] sent_lengths[effective_batch_idx] = len(best_hyp) best.append(best_hyp) # prepare for adding eos sent_max_len = min(sent_lengths.max().item() + 1, max_length) decoded = input_ids.new(output_batch_size, sent_max_len) # shorter batches are padded if needed if sent_lengths.min().item() != sent_lengths.max().item(): assert pad_token_id is not None, "`pad_token_id` has to be defined" decoded.fill_(pad_token_id) # fill with hypotheses and eos_token_id if the latter fits in for i, hypo in enumerate(best): decoded[i, : sent_lengths[i]] = hypo if sent_lengths[i] < max_length: decoded[i, sent_lengths[i]] = eos_token_id return decoded def _generate_no_beam_search( self, input_ids, cur_len, max_length, min_length, do_sample, temperature, top_k, top_p, repetition_penalty, no_repeat_ngram_size, bad_words_ids, pad_token_id, eos_token_id, batch_size, attention_mask, use_cache, model_kwargs, ): """Generate sequences for each example without beam search (num_beams == 1). All returned sequence are generated independantly. """ # length of generated sentences / unfinished sentences unfinished_sents = input_ids.new(batch_size).fill_(1) sent_lengths = input_ids.new(batch_size).fill_(max_length) past = None while cur_len < max_length: model_inputs = self.prepare_inputs_for_generation( input_ids, past=past, attention_mask=attention_mask, use_cache=use_cache, **model_kwargs ) outputs = self(**model_inputs, return_dict=True) next_token_logits = outputs.logits[:, -1, :] scores = self.postprocess_next_token_scores( scores=next_token_logits, input_ids=input_ids, no_repeat_ngram_size=no_repeat_ngram_size, bad_words_ids=bad_words_ids, cur_len=cur_len, min_length=min_length, max_length=max_length, eos_token_id=eos_token_id, repetition_penalty=repetition_penalty, batch_size=batch_size, num_beams=1, ) # if model has past, then set the past variable to speed up decoding if "past_key_values" in outputs: past = outputs.past_key_values elif "mems" in outputs: past = outputs.mems if do_sample: # Temperature (higher temperature => more likely to sample low probability tokens) if temperature != 1.0: scores = scores / temperature # Top-p/top-k filtering next_token_logscores = top_k_top_p_filtering(scores, top_k=top_k, top_p=top_p) # Sample probs = F.softmax(next_token_logscores, dim=-1) next_token = torch.multinomial(probs, num_samples=1).squeeze(1) else: # Greedy decoding next_token = torch.argmax(next_token_logits, dim=-1) # print(next_token_logits[0,next_token[0]], next_token_logits[0,eos_token_id]) # update generations and finished sentences if eos_token_id is not None: # pad finished sentences if eos_token_id exist tokens_to_add = next_token * unfinished_sents + (pad_token_id) * (1 - unfinished_sents) else: tokens_to_add = next_token # add token and increase length by one input_ids = torch.cat([input_ids, tokens_to_add.unsqueeze(-1)], dim=-1) cur_len = cur_len + 1 if eos_token_id is not None: eos_in_sents = tokens_to_add == eos_token_id # if sentence is unfinished and the token to add is eos, sent_lengths is filled with current length is_sents_unfinished_and_token_to_add_is_eos = unfinished_sents.mul(eos_in_sents.long()).bool() sent_lengths.masked_fill_(is_sents_unfinished_and_token_to_add_is_eos, cur_len) # unfinished_sents is set to zero if eos in sentence unfinished_sents.mul_((~eos_in_sents).long()) # stop when there is a </s> in each sentence, or if we exceed the maximul length if unfinished_sents.max() == 0: break # extend attention_mask for new generated input if only decoder # if self.config.is_encoder_decoder is False: # attention_mask = torch.cat( # [attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1 # ) return input_ids
KosmosX-API-main
kosmosX/fairseq/examples/MMPT/mmpt/models/mmfusionnlg.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Copyright (c) Facebook, Inc. All Rights Reserved import torch from torch import nn try: from transformers import AutoConfig, AutoTokenizer except ImportError: pass from . import transformermodel class MMPTModel(nn.Module): """An e2e wrapper of inference model. """ @classmethod def from_pretrained(cls, config, checkpoint="checkpoint_best.pt"): import os from ..utils import recursive_config from ..tasks import Task config = recursive_config(config) mmtask = Task.config_task(config) checkpoint_path = os.path.join(config.eval.save_path, checkpoint) mmtask.build_model(checkpoint=checkpoint_path) # TODO(huxu): make the video encoder configurable. from ..processors.models.s3dg import S3D video_encoder = S3D('pretrained_models/s3d_dict.npy', 512) video_encoder.load_state_dict( torch.load('pretrained_models/s3d_howto100m.pth')) from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained( config.dataset.bert_name, use_fast=config.dataset.use_fast ) from ..processors import Aligner aligner = Aligner(config.dataset) return ( MMPTModel(config, mmtask.model, video_encoder), tokenizer, aligner ) def __init__(self, config, model, video_encoder, **kwargs): super().__init__() self.max_video_len = config.dataset.max_video_len self.video_encoder = video_encoder self.model = model def forward(self, video_frames, caps, cmasks, return_score=False): bsz = video_frames.size(0) assert bsz == 1, "only bsz=1 is supported now." seq_len = video_frames.size(1) video_frames = video_frames.view(-1, *video_frames.size()[2:]) vfeats = self.video_encoder(video_frames.permute(0, 4, 1, 2, 3)) vfeats = vfeats['video_embedding'] vfeats = vfeats.view(bsz, seq_len, vfeats.size(-1)) padding = torch.zeros( bsz, self.max_video_len - seq_len, vfeats.size(-1)) vfeats = torch.cat([vfeats, padding], dim=1) vmasks = torch.cat([ torch.ones((bsz, seq_len), dtype=torch.bool), torch.zeros((bsz, self.max_video_len - seq_len), dtype=torch.bool) ], dim=1 ) output = self.model(caps, cmasks, vfeats, vmasks) if return_score: output = {"score": torch.bmm( output["pooled_video"][:, None, :], output["pooled_text"][:, :, None] ).squeeze(-1).squeeze(-1)} return output class MMFusion(nn.Module): """a MMPT wrapper class for MMBert style models. TODO: move isolated mask to a subclass. """ def __init__(self, config, **kwargs): super().__init__() transformer_config = AutoConfig.from_pretrained( config.dataset.bert_name) self.hidden_size = transformer_config.hidden_size self.is_train = False if config.dataset.train_path is not None: self.is_train = True # 0 means no iso; 1-12 means iso up to that layer. self.num_hidden_layers = transformer_config.num_hidden_layers self.last_iso_layer = 0 if config.dataset.num_iso_layer is not None: self.last_iso_layer = config.dataset.num_iso_layer - 1 + 1 if config.model.mm_encoder_cls is not None: mm_encoder_cls = getattr(transformermodel, config.model.mm_encoder_cls) model_config = AutoConfig.from_pretrained(config.dataset.bert_name) model_config.max_video_len = config.dataset.max_video_len # TODO: a general way to add parameter for a model. model_config.use_seg_emb = config.model.use_seg_emb self.mm_encoder = mm_encoder_cls.from_pretrained( config.dataset.bert_name, config=model_config) elif config.model.video_encoder_cls is not None\ and config.model.text_encoder_cls is not None: video_encoder_cls = getattr(transformermodel, config.model.video_encoder_cls) model_config = AutoConfig.from_pretrained(config.dataset.bert_name) model_config.max_video_len = config.dataset.max_video_len # TODO: make each model a set of config class. if hasattr(model_config, "num_layers"): model_config.num_layers = config.model.num_hidden_video_layers else: model_config.num_hidden_layers = config.model.num_hidden_video_layers self.video_encoder = video_encoder_cls.from_pretrained( config.dataset.bert_name, config=model_config) # exact same NLP model from Huggingface. text_encoder_cls = getattr(transformermodel, config.model.text_encoder_cls) self.text_encoder = text_encoder_cls.from_pretrained( config.dataset.bert_name) else: raise ValueError("the encoder must be either MM or two backbones.") def forward( self, caps, cmasks, vfeats, vmasks, **kwargs ): raise NotImplementedError( "Please derive MMFusion module." ) def _mm_on_the_fly( self, cmasks, vmasks, attention_mask ): """helper function for mask, seg_ids and token_type_ids.""" if attention_mask is None: attention_mask = self._mm_attention_mask(cmasks, vmasks) """ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | """ token_type_ids = torch.cat( [ torch.zeros( (vmasks.size(0), vmasks.size(1) + 2), dtype=torch.long, device=vmasks.device, ), torch.ones( (cmasks.size(0), cmasks.size(1) - 2), dtype=torch.long, device=cmasks.device, ), ], dim=1, ) return attention_mask, token_type_ids def _mm_attention_mask(self, cmasks, vmasks): assert cmasks.size(0) == vmasks.size(0), "{}, {}, {}, {}".format( str(cmasks.size()), str(vmasks.size()), str(cmasks.size(0)), str(vmasks.size(0)), ) mm_mask = torch.cat([cmasks[:, :1], vmasks, cmasks[:, 1:]], dim=1) if self.last_iso_layer == 0: # hard attention mask. return mm_mask else: # a gpu iso mask; 0 : num_iso_layer is isolated; # num_iso_layer: are MM-fused. # make an iso layer batch_size = cmasks.size(0) iso_mask = self._make_iso_mask(batch_size, cmasks, vmasks) mm_mask = mm_mask[:, None, :].repeat(1, mm_mask.size(-1), 1) iso_mm_masks = [] # hard attention mask. iso_mask = iso_mask[:, None, :, :].repeat( 1, self.last_iso_layer, 1, 1) iso_mm_masks.append(iso_mask) if self.last_iso_layer < self.num_hidden_layers: mm_mask = mm_mask[:, None, :, :].repeat( 1, self.num_hidden_layers - self.last_iso_layer, 1, 1 ) iso_mm_masks.append(mm_mask) iso_mm_masks = torch.cat(iso_mm_masks, dim=1) return iso_mm_masks def _make_iso_mask(self, batch_size, cmasks, vmasks): cls_self_mask = torch.cat( [ torch.ones( (batch_size, 1), dtype=torch.bool, device=cmasks.device), torch.zeros( (batch_size, cmasks.size(1) + vmasks.size(1) - 1), dtype=torch.bool, device=cmasks.device) ], dim=1) iso_video_mask = torch.cat( [ # [CLS] is not used. torch.zeros( (batch_size, 1), dtype=torch.bool, device=cmasks.device ), vmasks, # assume to be 1. cmasks[:, 1:2], # 2 means [CLS] + [SEP] torch.zeros( (batch_size, cmasks.size(1) - 2), dtype=torch.bool, device=cmasks.device, ), ], dim=1, ) iso_text_mask = torch.cat( [ torch.zeros( (batch_size, 2 + vmasks.size(1)), dtype=torch.bool, device=cmasks.device, ), # [CLS] is not used. cmasks[:, 2:], # assume to be 1. ], dim=1, ) cls_self_mask = cls_self_mask[:, None, :] iso_video_mask = iso_video_mask[:, None, :].repeat( 1, vmasks.size(1) + 1, 1) iso_text_mask = iso_text_mask[:, None, :].repeat( 1, cmasks.size(1) - 2, 1) return torch.cat([cls_self_mask, iso_video_mask, iso_text_mask], dim=1) def _pooling_vt_layer( self, layered_sequence_output, cmasks, vmasks ): layer_idx = self.last_iso_layer \ if self.last_iso_layer > 0 else self.num_hidden_layers hidden_state = layered_sequence_output[layer_idx] # also output pooled_video and pooled_text. batch_size = cmasks.size(0) # pool the modality. text_offset = vmasks.size(1) + 2 # [CLS] + [SEP] # video tokens + [SEP] video_outputs = hidden_state[:, 1:text_offset] video_attention_mask = torch.cat( [ vmasks, torch.ones( (batch_size, 1), dtype=torch.bool, device=vmasks.device), ], dim=1, ) assert video_outputs.size(1) == video_attention_mask.size(1) pooled_video = torch.sum( video_outputs * video_attention_mask.unsqueeze(-1), dim=1 ) / video_attention_mask.sum(1, keepdim=True) # pooled_video = torch.mean(video_outputs[0], dim=1) # text tokens + [SEP] text_attention_mask = cmasks[:, 2:] text_outputs = hidden_state[:, text_offset:] assert text_outputs.size(1) == text_attention_mask.size(1) pooled_text = torch.sum( text_outputs * text_attention_mask.unsqueeze(-1), dim=1 ) / text_attention_mask.sum(1, keepdim=True) return pooled_video, pooled_text class MMFusionMFMMLM(MMFusion): """forward function for MFM and MLM.""" def forward( self, caps, cmasks, vfeats, vmasks, attention_mask=None, video_label=None, text_label=None, **kwargs ): output_hidden_states = False if self.is_train else True target_vfeats, non_masked_frame_mask = None, None if video_label is not None: target_vfeats = vfeats.masked_select( video_label.unsqueeze(-1)).view( -1, vfeats.size(-1) ) # mask video token. vfeats[video_label] = 0.0 non_masked_frame_mask = vmasks.clone() non_masked_frame_mask[video_label] = False attention_mask, token_type_ids = self._mm_on_the_fly( cmasks, vmasks, attention_mask) outputs = self.mm_encoder( input_ids=caps, input_video_embeds=vfeats, attention_mask=attention_mask, token_type_ids=token_type_ids, masked_frame_labels=video_label, target_video_hidden_states=target_vfeats, non_masked_frame_mask=non_masked_frame_mask, masked_lm_labels=text_label, output_hidden_states=output_hidden_states, ) video_logits, text_logits = outputs[0], outputs[1] if self.is_train: # return earlier for training. return { "video_logits": video_logits, "text_logits": text_logits, } pooled_video, pooled_text = self._pooling_vt_layer( outputs[2], cmasks, vmasks) return {"pooled_video": pooled_video, "pooled_text": pooled_text} class MMFusionMTM(MMFusionMFMMLM): def __init__(self, config, **kwargs): super().__init__(config) """ For reproducibility: self.mm_encoder will be initialized then discarded. """ from .transformermodel import MMBertForMTM model_config = AutoConfig.from_pretrained(config.dataset.bert_name) model_config.max_video_len = config.dataset.max_video_len model_config.use_seg_emb = config.model.use_seg_emb self.mm_encoder = MMBertForMTM.from_pretrained( config.dataset.bert_name, config=model_config) class MMFusionShare(MMFusion): """A retrival wrapper using mm_encoder as both video/text backbone. TODO: move formally. """ def forward( self, caps, cmasks, vfeats, vmasks, attention_mask=None, video_label=None, text_label=None, output_hidden_states=False, **kwargs ): pooled_video = self.forward_video( vfeats, vmasks, caps, cmasks, output_hidden_states ) pooled_text = self.forward_text( caps, cmasks, output_hidden_states ) return {"pooled_video": pooled_video, "pooled_text": pooled_text} def forward_video( self, vfeats, vmasks, caps, cmasks, output_hidden_states=False, **kwargs ): input_ids = caps[:, :2] attention_mask = torch.cat([ cmasks[:, :1], vmasks, cmasks[:, 1:2] ], dim=1) token_type_ids = torch.zeros( (vmasks.size(0), vmasks.size(1) + 2), dtype=torch.long, device=vmasks.device) outputs = self.mm_encoder( input_ids=input_ids, input_video_embeds=vfeats, attention_mask=attention_mask, token_type_ids=token_type_ids, output_hidden_states=True ) video_outputs = outputs[0] if output_hidden_states: return video_outputs batch_size = cmasks.size(0) video_attention_mask = torch.cat( [ torch.zeros( (batch_size, 1), dtype=torch.bool, device=vmasks.device), vmasks, torch.ones( (batch_size, 1), dtype=torch.bool, device=vmasks.device), ], dim=1, ) assert video_outputs.size(1) == video_attention_mask.size(1) video_attention_mask = video_attention_mask.type(video_outputs.dtype) \ / video_attention_mask.sum(1, keepdim=True) pooled_video = torch.bmm( video_outputs.transpose(2, 1), video_attention_mask.unsqueeze(2) ).squeeze(-1) return pooled_video # video_outputs def forward_text( self, caps, cmasks, output_hidden_states=False, **kwargs ): input_ids = torch.cat([ caps[:, :1], caps[:, 2:], ], dim=1) attention_mask = torch.cat([ cmasks[:, :1], cmasks[:, 2:] ], dim=1) token_type_ids = torch.cat([ torch.zeros( (cmasks.size(0), 1), dtype=torch.long, device=cmasks.device), torch.ones( (cmasks.size(0), cmasks.size(1) - 2), dtype=torch.long, device=cmasks.device) ], dim=1) outputs = self.mm_encoder( input_ids=input_ids, input_video_embeds=None, attention_mask=attention_mask, token_type_ids=token_type_ids, output_hidden_states=True ) text_outputs = outputs[0] if output_hidden_states: return text_outputs batch_size = caps.size(0) # text tokens + [SEP] text_attention_mask = torch.cat([ torch.zeros( (batch_size, 1), dtype=torch.bool, device=cmasks.device), cmasks[:, 2:] ], dim=1) assert text_outputs.size(1) == text_attention_mask.size(1) text_attention_mask = text_attention_mask.type(text_outputs.dtype) \ / text_attention_mask.sum(1, keepdim=True) pooled_text = torch.bmm( text_outputs.transpose(2, 1), text_attention_mask.unsqueeze(2) ).squeeze(-1) return pooled_text # text_outputs class MMFusionSeparate(MMFusionShare): def forward_video( self, vfeats, vmasks, caps, cmasks, output_hidden_states=False, **kwargs ): input_ids = caps[:, :2] attention_mask = torch.cat([ cmasks[:, :1], vmasks, cmasks[:, 1:2] ], dim=1) token_type_ids = torch.zeros( (vmasks.size(0), vmasks.size(1) + 2), dtype=torch.long, device=vmasks.device) outputs = self.video_encoder( input_ids=input_ids, input_video_embeds=vfeats, attention_mask=attention_mask, token_type_ids=token_type_ids, output_hidden_states=True ) video_outputs = outputs[0] if output_hidden_states: return video_outputs batch_size = cmasks.size(0) video_attention_mask = torch.cat( [ torch.zeros( (batch_size, 1), dtype=torch.bool, device=vmasks.device), vmasks, torch.ones( (batch_size, 1), dtype=torch.bool, device=vmasks.device), ], dim=1, ) assert video_outputs.size(1) == video_attention_mask.size(1) video_attention_mask = video_attention_mask.type(video_outputs.dtype) \ / video_attention_mask.sum(1, keepdim=True) pooled_video = torch.bmm( video_outputs.transpose(2, 1), video_attention_mask.unsqueeze(2) ).squeeze(-1) return pooled_video # video_outputs def forward_text( self, caps, cmasks, output_hidden_states=False, **kwargs ): input_ids = torch.cat([ caps[:, :1], caps[:, 2:], ], dim=1) attention_mask = torch.cat([ cmasks[:, :1], cmasks[:, 2:] ], dim=1) # different from sharing, we use all-0 type. token_type_ids = torch.zeros( (cmasks.size(0), cmasks.size(1) - 1), dtype=torch.long, device=cmasks.device) outputs = self.text_encoder( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, output_hidden_states=True ) text_outputs = outputs[0] if output_hidden_states: return text_outputs batch_size = caps.size(0) # text tokens + [SEP] text_attention_mask = torch.cat([ torch.zeros( (batch_size, 1), dtype=torch.bool, device=cmasks.device), cmasks[:, 2:] ], dim=1) assert text_outputs.size(1) == text_attention_mask.size(1) text_attention_mask = text_attention_mask.type(text_outputs.dtype) \ / text_attention_mask.sum(1, keepdim=True) pooled_text = torch.bmm( text_outputs.transpose(2, 1), text_attention_mask.unsqueeze(2) ).squeeze(-1) return pooled_text # text_outputs class MMFusionJoint(MMFusion): """fine-tuning wrapper for retrival task.""" def forward( self, caps, cmasks, vfeats, vmasks, attention_mask=None, video_label=None, text_label=None, **kwargs ): # TODO (huxu): other ways to do negative examples; move the following # into your criterion forward. output_hidden_states = True attention_mask, token_type_ids = self._mm_on_the_fly( cmasks, vmasks, attention_mask) separate_forward_split = ( None if self.is_train else vmasks.size(1) + 2 ) # [CLS] + [SEP] outputs = self.mm_encoder( input_ids=caps, input_video_embeds=vfeats, attention_mask=attention_mask, token_type_ids=token_type_ids, output_hidden_states=output_hidden_states, separate_forward_split=separate_forward_split, ) pooled_video, pooled_text = self._pooling_vt_layer( outputs[2], cmasks, vmasks) return {"pooled_video": pooled_video, "pooled_text": pooled_text} class MMFusionActionSegmentation(MMFusion): """Fine-tuning wrapper for action segmentation. TODO: rename this for VLM. """ def forward( self, caps, cmasks, vfeats, vmasks, attention_mask=None, **kwargs ): # ActionLocalization assume of batch_size=1, squeeze it. caps = caps.view(-1, caps.size(-1)) cmasks = cmasks.view(-1, cmasks.size(-1)) vfeats = vfeats.view(-1, vfeats.size(2), vfeats.size(3)) vmasks = vmasks.view(-1, vmasks.size(-1)) # this may not cover all shapes of attention_mask. attention_mask = attention_mask.view( -1, attention_mask.size(2), attention_mask.size(3)) \ if attention_mask is not None else None # TODO (huxu): other ways to do negative examples; move the following # into your criterion forward. output_hidden_states = True # video forwarding, text is dummy; never use attention_mask. attention_mask, token_type_ids = self._mm_on_the_fly( cmasks, vmasks, attention_mask) logits = self.mm_encoder( input_ids=caps, input_video_embeds=vfeats, attention_mask=attention_mask, token_type_ids=token_type_ids, output_hidden_states=output_hidden_states, ) return {"logits": logits[0][:, 1:vmasks.size(1)+1]} class MMFusionActionLocalization(MMFusion): """fine-tuning model for retrival task.""" def __init__(self, config, **kwargs): super().__init__(config) tokenizer = AutoTokenizer.from_pretrained( config.dataset.bert_name) self.cls_token_id = tokenizer.cls_token_id self.sep_token_id = tokenizer.sep_token_id self.pad_token_id = tokenizer.pad_token_id def forward( self, caps, cmasks, vfeats, vmasks, attention_mask=None, **kwargs ): # ActionLocalization assume of batch_size=1, squeeze it. caps = caps.squeeze(0) cmasks = cmasks.squeeze(0) vfeats = vfeats.squeeze(0) vmasks = vmasks.squeeze(0) attention_mask = attention_mask.squeeze(0) if attention_mask is not None else None # TODO (huxu): other ways to do negative examples; move the following # into your criterion forward. output_hidden_states = True # a len1 dummy video token. dummy_vfeats = torch.zeros( (caps.size(0), 1, vfeats.size(-1)), device=vfeats.device, dtype=vfeats.dtype) dummy_vmasks = torch.ones( (caps.size(0), 1), dtype=torch.bool, device=vfeats.device) dummy_caps = torch.LongTensor( [[self.cls_token_id, self.sep_token_id, self.pad_token_id, self.sep_token_id]], ).to(caps.device).repeat(vfeats.size(0), 1) dummy_cmasks = torch.BoolTensor( [[0, 1, 0, 1]] # pad are valid for attention. ).to(caps.device).repeat(vfeats.size(0), 1) # video forwarding, text is dummy; never use attention_mask. attention_mask, token_type_ids = self._mm_on_the_fly( dummy_cmasks, vmasks, None) outputs = self.mm_encoder( input_ids=dummy_caps, input_video_embeds=vfeats, attention_mask=attention_mask, token_type_ids=token_type_ids, output_hidden_states=output_hidden_states, ) layer_idx = self.last_iso_layer \ if self.last_iso_layer > 0 else self.num_hidden_layers video_seq = outputs[2][layer_idx][:, 1:vmasks.size(1)+1].masked_select( vmasks.unsqueeze(-1) ).view(-1, self.hidden_size) # text forwarding, video is dummy attention_mask, token_type_ids = self._mm_on_the_fly( cmasks, dummy_vmasks, None) outputs = self.mm_encoder( input_ids=caps, input_video_embeds=dummy_vfeats, attention_mask=attention_mask, token_type_ids=token_type_ids, output_hidden_states=output_hidden_states, ) _, pooled_text = self._pooling_vt_layer( outputs[2], cmasks, dummy_vmasks) # this line is not right. logits = torch.mm(video_seq, pooled_text.transpose(1, 0)) return {"logits": logits} # --------------- MMFusionSeparate for end tasks --------------- class MMFusionSeparateActionSegmentation(MMFusionSeparate): """Fine-tuning wrapper for action segmentation.""" def forward( self, caps, cmasks, vfeats, vmasks, attention_mask=None, **kwargs ): # ActionLocalization assume of batch_size=1, squeeze it. caps = caps.view(-1, caps.size(-1)) cmasks = cmasks.view(-1, cmasks.size(-1)) vfeats = vfeats.view(-1, vfeats.size(2), vfeats.size(3)) vmasks = vmasks.view(-1, vmasks.size(-1)) logits = self.forward_video( vfeats, vmasks, caps, cmasks, output_hidden_states=True ) return {"logits": logits[:, 1:vmasks.size(1)+1]} class MMFusionSeparateActionLocalization(MMFusionSeparate): def __init__(self, config, **kwargs): super().__init__(config) tokenizer = AutoTokenizer.from_pretrained( config.dataset.bert_name) self.cls_token_id = tokenizer.cls_token_id self.sep_token_id = tokenizer.sep_token_id self.pad_token_id = tokenizer.pad_token_id def forward( self, caps, cmasks, vfeats, vmasks, **kwargs ): # ActionLocalization assume of batch_size=1, squeeze it. caps = caps.squeeze(0) cmasks = cmasks.squeeze(0) vfeats = vfeats.squeeze(0) vmasks = vmasks.squeeze(0) # TODO (huxu): other ways to do negative examples; move the following # into your criterion forward. dummy_caps = torch.LongTensor( [[self.cls_token_id, self.sep_token_id, self.pad_token_id, self.sep_token_id]], ).to(caps.device).repeat(vfeats.size(0), 1) dummy_cmasks = torch.BoolTensor( [[0, 1, 0, 1]] # pad are valid for attention. ).to(caps.device).repeat(vfeats.size(0), 1) outputs = self.forward_video( vfeats, vmasks, dummy_caps, dummy_cmasks, output_hidden_states=True ) video_seq = outputs[:, 1:vmasks.size(1)+1].masked_select( vmasks.unsqueeze(-1) ).view(-1, self.hidden_size) pooled_text = self.forward_text( caps, cmasks, output_hidden_states=False ) # this line is not right. logits = torch.mm(video_seq, pooled_text.transpose(1, 0)) return {"logits": logits} class MMFusionShareActionLocalization(MMFusionShare): def __init__(self, config, **kwargs): super().__init__(config) tokenizer = AutoTokenizer.from_pretrained( config.dataset.bert_name) self.cls_token_id = tokenizer.cls_token_id self.sep_token_id = tokenizer.sep_token_id self.pad_token_id = tokenizer.pad_token_id def forward( self, caps, cmasks, vfeats, vmasks, **kwargs ): # ActionLocalization assume of batch_size=1, squeeze it. caps = caps.squeeze(0) cmasks = cmasks.squeeze(0) vfeats = vfeats.squeeze(0) vmasks = vmasks.squeeze(0) # TODO (huxu): other ways to do negative examples; move the following # into your criterion forward. dummy_caps = torch.LongTensor( [[self.cls_token_id, self.sep_token_id, self.pad_token_id, self.sep_token_id]], ).to(caps.device).repeat(vfeats.size(0), 1) dummy_cmasks = torch.BoolTensor( [[0, 1, 0, 1]] # pad are valid for attention. ).to(caps.device).repeat(vfeats.size(0), 1) outputs = self.forward_video( vfeats, vmasks, dummy_caps, dummy_cmasks, output_hidden_states=True ) video_seq = outputs[:, 1:vmasks.size(1)+1].masked_select( vmasks.unsqueeze(-1) ).view(-1, self.hidden_size) pooled_text = self.forward_text( caps, cmasks, output_hidden_states=False ) # this line is not right. logits = torch.mm(video_seq, pooled_text.transpose(1, 0)) return {"logits": logits}
KosmosX-API-main
kosmosX/fairseq/examples/MMPT/mmpt/models/mmfusion.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os import random import json import numpy as np import torch import pickle import math from tqdm import tqdm class Predictor(object): """this base class is used to save predictions to disk (and being called by a evaluator later). Predictor has minimum support of single gpu prediction. """ def __init__(self, config): self.pred_dir = None # on-the-fly eval does not save the results. if hasattr(config, "eval") and config.eval is not None: self.pred_dir = config.eval.save_path os.makedirs(self.pred_dir, exist_ok=True) def __call__(self, outputs): """extract the prediction and save it.""" raise NotImplementedError def predict_loop(self, model, eval_dataloader, output_file=None): """on-the-fly prediction on a single gpu.""" self.full_scores = [] model.eval() model = model.to(0) with torch.no_grad(): for data in eval_dataloader: data = self.to_ctx(data) outputs = model(**data) outputs.update(data) self(outputs) return self.finalize(output_file) def finalize(self, output_file): pass def to_ctx(self, data, ctx=0, dtype=None): if isinstance(data, dict): for key in data: if torch.is_tensor(data[key]): if dtype is not None and data[key].dtype == torch.float32: data[key] = data[key].to(dtype) data[key] = data[key].to(ctx) return data else: raise ValueError("non-dict type of batch is not supported yet.") class NLGPredictor(Predictor): """Predicting Text from MMFusion models.""" """TODO: make a context.""" def __init__(self, config): super().__init__(config) from transformers import AutoTokenizer self.tokenizer = AutoTokenizer.from_pretrained( config.dataset.bert_name, bos_token="[CLS]", eos_token="[SEP]") self.bos_token_id = self.tokenizer.bos_token_id self.eos_token_id = self.tokenizer.eos_token_id def predict_loop(self, model, eval_dataloader, output_file=None): """TODO: refactor base classes.""" ctx = 0 outputs = {"outputs": [], "targets": [[]]} model.eval() model = model.to(ctx) with torch.no_grad(): for data in tqdm(eval_dataloader): data = self.to_ctx(data, ctx) self(data, model, outputs) return self.finalize(outputs, output_file) def __call__(self, data, model, outputs): data.update({ "bos_token_id": self.bos_token_id, "eos_token_id": self.eos_token_id }) output = model.generate(**data) assert len(output) == len(data["ref"]) for idx, _output in enumerate(output): generated_text = self.tokenizer.decode( _output, skip_special_tokens=True) if generated_text == "": generated_text = "none" outputs["outputs"].append(generated_text) outputs["targets"][0].append(data["ref"][idx]) if random.random() < 0.001: print("_output", _output) print("generated_text", generated_text) print("ref", data["ref"][idx]) def finalize(self, outputs, output_file=None): if output_file is not None: with open(os.path.join( self.pred_dir, output_file + ".json"), "w") as fw: json.dump(outputs, fw, indent=4) return outputs class RetrievalPredictor(Predictor): """generated `pooled_video` and `pooled_text`.""" def __init__(self, config): super().__init__(config) from transformers import AutoTokenizer self.tokenizer = AutoTokenizer.from_pretrained( config.dataset.bert_name) def predict_loop( self, model, eval_dataloader, output_file="retrieval.npy" ): """on-the-fly prediction on a single gpu.""" full_scores = [] texts = [] model.eval() model = model.cuda() with torch.no_grad(): for data in eval_dataloader: # convert to dict. if not isinstance(data, dict): data = { "caps": data[0], "cmasks": data[1], "vfeats": data[2], "vmasks": data[3], "video_id": data[4] } data = self.to_ctx(data) outputs = model(**data) outputs.update(data) self(outputs, full_scores) for _cap in data["caps"]: texts.append( self.tokenizer.decode(_cap, skip_special_tokens=True) ) return self.finalize(full_scores, texts, output_file) def __call__(self, sample, full_scores): scores = self._get_pooled_outputs(sample) self._append_scores(scores, full_scores) def finalize(self, full_scores, texts, output_file=None): outputs = self._aggregate_scores(full_scores) if output_file is not None: np.save(os.path.join(self.pred_dir, output_file + ".npy"), outputs) return {"outputs": outputs, "texts": texts} def _get_pooled_outputs(self, outputs): if "pooled_video" in outputs: return outputs["pooled_video"], outputs["pooled_text"] else: raise ValueError("unknown format of outputs.") def _append_scores(self, scores, full_scores): assert len(scores) == 2 if len(full_scores) == 0: full_scores.append([]) full_scores.append([]) full_scores[0].append(scores[0].cpu().detach().numpy()) full_scores[1].append(scores[1].cpu().detach().numpy()) def _aggregate_scores(self, scores): assert len(scores) == 2 video_hidden = np.concatenate(scores[0], axis=0) text_hidden = np.concatenate(scores[1], axis=0) # clear up. self.full_scores = [] return np.matmul(text_hidden, video_hidden.T) class QAPredictor(Predictor): """generated `pooled_video` and `pooled_text`.""" def __init__(self, config): super().__init__(config) """predictor maintains scores and aggregate them.""" def predict_loop(self, model, eval_dataloader, output_file="qa.npy"): """on-the-fly prediction on a single gpu.""" self.full_scores = [] model.eval() model = model.cuda() with torch.no_grad(): for data in eval_dataloader: # reshape ans and dup video 5 times. v_len = data["vfeats"].size(1) hidden_size = data["vfeats"].size(2) data["vfeats"] = data["vfeats"].unsqueeze(1).repeat(1, 5, 1, 1).view(-1, v_len, hidden_size) data["vmasks"] = data["vmasks"].unsqueeze(1).repeat(1, 5, 1).view(-1, v_len) t_len = data["caps"].size(-1) data["caps"] = data["caps"].view(-1, t_len) data["cmasks"] = data["cmasks"].view(-1, t_len) data = self.to_ctx(data) outputs = model(**data) outputs.update(data) self(outputs) return self.finalize(output_file) def __call__(self, sample): hidden_size = sample["pooled_video"].size(-1) pooled_video = sample["pooled_video"].view(-1, 5, hidden_size) pooled_text = sample["pooled_text"].view(-1, 5, hidden_size) scores = torch.bmm(pooled_video, pooled_text.transpose(2, 1)) scores = scores.argmax(-1) self._append_scores(scores[:, 0], sample["answers"], self.full_scores) def finalize(self, output_file=None): outputs, targets = self._aggregate_scores(self.full_scores) if output_file is not None: np.save(os.path.join(self.pred_dir, output_file + ".npy"), outputs) return {"outputs": outputs, "targets": targets} def _append_scores(self, scores, answers, full_scores): if len(full_scores) == 0: full_scores.append([]) full_scores.append([]) full_scores[0].append(scores.cpu().detach().numpy()) full_scores[1].append(answers.cpu().detach().numpy()) def _aggregate_scores(self, scores): assert len(scores) == 2 outputs = np.concatenate(scores[0], axis=0) targets = np.concatenate(scores[1], axis=0) # clear up. self.full_scores = [] return outputs, targets class CrossTaskPredictor(Predictor): """ CrossTaskPredictor needs to compute the average of logits for overlapped sliding-window. """ def __init__(self, config): super().__init__(config) self.lsm = torch.nn.LogSoftmax(dim=1) self.max_video_len = config.dataset.max_video_len self.sliding_window = config.dataset.sliding_window self.sliding_window_size = config.dataset.sliding_window_size self.annotation_path = config.dataset.annotation_path def predict_loop(self, model, eval_dataloader, output_file="result.pkl"): """refactored from line 144: https://github.com/DmZhukov/CrossTask/blob/master/train.py """ ctx = 0 model.eval() model = model.to(ctx) # this is not a loss but just compute neg_log_prob. Y_pred = {} Y_true = {} with torch.no_grad(): for batch in eval_dataloader: self(batch, model, Y_pred, Y_true) return self.finalize(Y_pred, Y_true, output_file) def __call__(self, sample, model, Y_pred, Y_true): # please install dp from `https://github.com/DmZhukov/CrossTask` from dp import dp vid, task = sample['video_id'][0], sample['task'][0] sample = self.to_ctx(sample) # compute the average logits over sliding windows. output = model(**sample) batch_logits = output["logits"].cpu() video_len = sample["video_len"][0] # the following version is slow. logits = torch.zeros((video_len, batch_logits.size(1))) logits_counts = torch.zeros((video_len, 1), dtype=torch.long) # use the same loop as aligner to recover. batch_logit_idx = 0 for window_start in range(0, video_len, self.sliding_window): video_end = min(video_len - window_start, self.sliding_window_size) logits[window_start: window_start + video_end] += batch_logits[ batch_logit_idx: batch_logit_idx + video_end] batch_logit_idx += video_end logits_counts[window_start: window_start + video_end] += torch.ones((video_end, 1), dtype=torch.long) if (video_len - window_start) <= self.sliding_window_size: break logits /= logits_counts assert logits.size() == (video_len, batch_logits.size(1)), "{}, {}".format(logits.size(), video_len) O = self.lsm(logits) y = np.zeros(O.size(), dtype=np.float32) dp(y, -O.detach().cpu().numpy()) if task not in Y_pred: Y_pred[task] = {} Y_pred[task][vid] = y annot_path = os.path.join( self.annotation_path, task+'_'+vid+'.csv') if os.path.exists(annot_path): if task not in Y_true: Y_true[task] = {} Y_true[task][vid] = self._read_assignment( *y.shape, annot_path) def finalize(self, Y_pred, Y_true, output_file=None): if output_file is not None: with open( os.path.join(self.pred_dir, output_file + ".pkl"), "wb") as fw: pickle.dump( {"Y_pred": Y_pred, "Y_true": Y_true}, fw, protocol=pickle.HIGHEST_PROTOCOL) return {"outputs": Y_pred, "targets": Y_true} def _read_assignment(self, T, K, path): """ refactored from https://github.com/DmZhukov/CrossTask/blob/master/data.py Howto interpret contraints on loss that is going to be minimized: lambd is a big number; self.lambd * C is a big number for all valid position (csv stores invalids) def forward(self, O, Y, C): return (Y*(self.lambd * C - self.lsm(O))).mean(dim=0).sum() This will load the csv file and fill-in the step col from start to end rows. """ Y = np.zeros([T, K], dtype=np.uint8) with open(path, 'r') as f: for line in f: step, start, end = line.strip().split(',') start = int(math.floor(float(start))) end = int(math.ceil(float(end))) step = int(step) - 1 Y[start:end, step] = 1 return Y class COINPredictor(Predictor): """ COINPredictor is similar to CrossTask on sliding windows. """ def __init__(self, config): super().__init__(config) self.max_video_len = config.dataset.max_video_len self.sliding_window = config.dataset.sliding_window self.sliding_window_size = config.dataset.sliding_window_size def predict_loop(self, model, eval_dataloader, output_file="result.pkl"): """refactored from line 144: https://github.com/DmZhukov/CrossTask/blob/master/train.py """ ctx = 0 model.eval() model = model.to(ctx) # this is not a loss but just compute neg_log_prob. Y_pred = [] Y_true = [] with torch.no_grad(): for batch in eval_dataloader: self(batch, model, Y_pred, Y_true) return self.finalize(Y_pred, Y_true, output_file) def __call__(self, sample, model, Y_pred, Y_true): sample = self.to_ctx(sample) # compute the average logits over sliding windows. output = model(**sample) logits = self._merge_windows(sample, output) Y_pred.append(logits.argmax(dim=1)) Y_true.append(sample["video_targets"].squeeze(0).cpu()) def _merge_windows(self, sample, output): targets = sample["targets"].reshape(-1).cpu() valid_mask = targets != -100 targets = targets[valid_mask] batch_logits = output["logits"].cpu() batch_logits = batch_logits.reshape(-1, batch_logits.size(-1)) batch_logits = batch_logits[valid_mask] video_len = sample["video_len"][0] # the following version is slow. logits = torch.zeros((video_len, batch_logits.size(1))) logits_counts = torch.zeros((video_len, 1), dtype=torch.long) # use the same loop as aligner to recover. batch_logit_idx = 0 for window_start in range(0, video_len, self.sliding_window): video_end = min(video_len - window_start, self.sliding_window_size) logits[window_start: window_start + video_end] += batch_logits[ batch_logit_idx: batch_logit_idx + video_end] batch_logit_idx += video_end logits_counts[window_start: window_start + video_end] += torch.ones((video_end, 1), dtype=torch.long) if (video_len - window_start) <= self.sliding_window_size: break logits /= logits_counts assert logits.size() == (video_len, batch_logits.size(1)), "{}, {}".format(logits.size(), video_len) return logits def finalize(self, Y_pred, Y_true, output_file=None): Y_pred = torch.cat(Y_pred, dim=0).numpy() Y_true = torch.cat(Y_true, dim=0).numpy() assert len(Y_pred) == len(Y_true) error_mask = Y_pred != Y_true print("sample error", Y_pred[error_mask][:10], Y_true[error_mask][:10]) print("sample error", Y_pred[error_mask][10:20], Y_true[error_mask][10:20]) if output_file is not None: with open( os.path.join(self.pred_dir, output_file + ".pkl"), "wb") as fw: pickle.dump( {"Y_pred": Y_pred, "Y_true": Y_true}, fw, protocol=pickle.HIGHEST_PROTOCOL) return {"outputs": Y_pred, "targets": Y_true} class COINZSPredictor(COINPredictor): """ COINZSPredictor for COIN zero-shot prediction. """ def __init__(self, config): super().__init__(config) self.dataset_config = config.dataset def predict_loop(self, model, eval_dataloader, output_file="result.pkl"): """refactored from line 144: https://github.com/DmZhukov/CrossTask/blob/master/train.py """ ctx = 0 model.eval() model = model.to(ctx) with torch.no_grad(): outputs = eval_dataloader.dataset.meta_processor.meta_text_labels( self.dataset_config) outputs = self.to_ctx(outputs, ctx) label_hidden_states = model.forward_text(**outputs).cpu() label_sim = label_hidden_states @ label_hidden_states.t() num_labels = label_sim.size(0) eye_mask = ~torch.eye(num_labels, dtype=torch.bool) label_sim = label_sim.masked_select(eye_mask).view(num_labels, num_labels - 1) lbd = label_sim.max() # this is not a loss but just compute neg_log_prob. Y_pred = [] Y_true = [] with torch.no_grad(): for batch in eval_dataloader: self(batch, label_hidden_states, model, lbd, Y_pred, Y_true) return self.finalize(Y_pred, Y_true, output_file) def reshape_subsample(self, sample): for key in sample: if torch.is_tensor(sample[key]): sample[key] = self.flat_subsample(sample[key]) return sample def flat_subsample(self, tensor): if len(tensor.size()) > 1 and tensor.size(0) == 1: tensor = tensor.squeeze(0) return tensor def __call__(self, sample, label_hidden_states, model, lbd, Y_pred, Y_true): sample = self.reshape_subsample(sample) sample = self.to_ctx(sample) # compute the average logits over sliding windows. sample["output_hidden_states"] = True video_outputs = model.forward_video(**sample).cpu() output = {"logits": video_outputs[:, 1:sample["vmasks"].size(1)+1] @ label_hidden_states.t()} logits = self._merge_windows(sample, output) # logic of zero-shot for sequence labeling. logits_argmax = logits.argmax(dim=1) + 1 # 0 is "O" label. logits_max = logits.max(dim=1)[0] pred = torch.zeros_like(logits_argmax) label_select = logits_max > lbd # 73 or 74 pred[label_select] = logits_argmax[label_select] Y_pred.append(pred) Y_true.append(sample["video_targets"].squeeze(0).cpu()) def finalize(self, Y_pred, Y_true, output_file=None): Y_pred = torch.cat(Y_pred, dim=0).numpy() Y_true = torch.cat(Y_true, dim=0).numpy() assert len(Y_pred) == len(Y_true) error_mask = Y_pred != Y_true print("sample error", Y_pred[error_mask][:10], Y_true[error_mask][:10]) print("sample error", Y_pred[error_mask][10:20], Y_true[error_mask][10:20]) if output_file is not None: with open( os.path.join(self.pred_dir, output_file + ".pkl"), "wb") as fw: pickle.dump( {"Y_pred": Y_pred, "Y_true": Y_true}, fw, protocol=pickle.HIGHEST_PROTOCOL) return {"outputs": Y_pred, "targets": Y_true} class DiDeMoPredictor(Predictor): """reference: https://github.com/LisaAnne/LocalizingMoments/blob/master/utils/eval.py https://github.com/LisaAnne/LocalizingMoments/blob/master/utils/data_processing.py """ def __init__(self, config): super().__init__(config) # load targets. with open(config.dataset.test_path) as data_file: self.test_data = json.load(data_file) def predict_loop(self, model, eval_dataloader, output_file="didemo.npy"): """ TODO: two solutions here. """ import itertools # 21 chunks. self.possible_segments = [(0,0), (1,1), (2,2), (3,3), (4,4), (5,5)] for i in itertools.combinations(range(6), 2): self.possible_segments.append(i) # pick segments from a video. """on-the-fly prediction on a single gpu.""" self.full_scores = [] model.eval() model = model.cuda() with torch.no_grad(): for data in eval_dataloader: # TODO special forwarding logic here. data = self.to_ctx(data) data["output_hidden_states"] = True hidden_video = model.forward_video(**data) data["output_hidden_states"] = False pooled_text = model.forward_text(**data) outputs = { "hidden_video": hidden_video, "pooled_text": pooled_text } outputs.update(data) self(outputs) return self.finalize(output_file) def __call__(self, sample): # TODO: make an index select from self.possible_segments. hidden_video = sample["hidden_video"] pooled_text = sample["pooled_text"] vmasks = sample["vmasks"] # probably maintain valid results here. hidden_video = hidden_video[:, 1:-1, :] # probably maintain valid results here. pooled_video = [] for s, e in self.possible_segments: pooled_video.append( torch.mean( hidden_video[:, int(s*5):int((e+1)*5), :], dim=1, keepdim=True) ) pooled_video = torch.cat(pooled_video, dim=1) scores = torch.bmm( pooled_video, pooled_text.unsqueeze(-1)).squeeze(-1).cpu() ranks = scores.argsort(dim=-1, descending=True) for batch_idx, rank in enumerate(ranks): rank_of_moment = [] for m_idx, moment in enumerate(rank): s, e = self.possible_segments[moment.item()] if torch.any( vmasks[batch_idx, int(s*5):int((e+1)*5)] ): rank_of_moment.append((s, e)) self.full_scores.append(rank_of_moment) def finalize(self, output_file=None): outputs = self._aggregate_scores(self.full_scores) if output_file is not None: np.save(os.path.join(self.pred_dir, output_file + ".npy"), outputs) return {"outputs": outputs, "targets": self.test_data} def _aggregate_scores(self, scores): self.full_scores = [] return scores
KosmosX-API-main
kosmosX/fairseq/examples/MMPT/mmpt/evaluators/predictor.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .metric import * from .evaluator import * # experimental. try: from .expmetric import * except ImportError: pass
KosmosX-API-main
kosmosX/fairseq/examples/MMPT/mmpt/evaluators/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import json class Metric(object): def __init__(self, config, metric_names): self.metric_names = metric_names def best_metric(self, metric): return metric[self.metric_names[0]] def save_metrics(self, fn, metrics): with open(fn, "w") as fw: json.dump(fw, metrics) def print_computed_metrics(self, metrics): raise NotImplementedError class RetrievalMetric(Metric): """ this is modified from `howto100m/metrics.py`. History of changes: refactor as a class. add metric_key in __init__ """ def __init__(self, config, metric_names=["R1", "R5", "R10", "MR"]): super().__init__(config, metric_names) self.error = False # TODO(huxu): add to config to print error. def compute_metrics(self, outputs, texts, **kwargs): x = outputs sx = np.sort(-x, axis=1) d = np.diag(-x) d = d[:, np.newaxis] ind = sx - d ind = np.where(ind == 0) ind = ind[1] metrics = {} metrics["R1"] = float(np.sum(ind == 0)) / len(ind) metrics["R5"] = float(np.sum(ind < 5)) / len(ind) metrics["R10"] = float(np.sum(ind < 10)) / len(ind) metrics["MR"] = np.median(ind) + 1 max_idx = np.argmax(outputs, axis=1) if self.error: # print top-20 errors. error = [] for ex_idx in range(20): error.append((texts[ex_idx], texts[max_idx[ex_idx]])) metrics["error"] = error return metrics def print_computed_metrics(self, metrics): r1 = metrics["R1"] r5 = metrics["R5"] r10 = metrics["R10"] mr = metrics["MR"] print( "R@1: {:.4f} - R@5: {:.4f} - R@10: {:.4f} - Median R: {}".format( r1, r5, r10, mr ) ) if "error" in metrics: print(metrics["error"]) class DiDeMoMetric(Metric): """ History of changes: python 2.x to python 3.x. merge utils.py into eval to save one file. reference: https://github.com/LisaAnne/LocalizingMoments/blob/master/utils/eval.py Code to evaluate your results on the DiDeMo dataset. """ def __init__(self, config, metric_names=["rank1", "rank5", "miou"]): super().__init__(config, metric_names) def compute_metrics(self, outputs, targets, **kwargs): assert len(outputs) == len(targets) rank1, rank5, miou = self._eval_predictions(outputs, targets) metrics = { "rank1": rank1, "rank5": rank5, "miou": miou } return metrics def print_computed_metrics(self, metrics): rank1 = metrics["rank1"] rank5 = metrics["rank5"] miou = metrics["miou"] # print("Average rank@1: %f" % rank1) # print("Average rank@5: %f" % rank5) # print("Average iou: %f" % miou) print( "Average rank@1: {:.4f} Average rank@5: {:.4f} Average iou: {:.4f}".format( rank1, rank5, miou ) ) def _iou(self, pred, gt): intersection = max(0, min(pred[1], gt[1]) + 1 - max(pred[0], gt[0])) union = max(pred[1], gt[1]) + 1 - min(pred[0], gt[0]) return float(intersection)/union def _rank(self, pred, gt): return pred.index(tuple(gt)) + 1 def _eval_predictions(self, segments, data): ''' Inputs: segments: For each item in the ground truth data, rank possible video segments given the description and video. In DiDeMo, there are 21 posible moments extracted for each video so the list of video segments will be of length 21. The first video segment should be the video segment that best corresponds to the text query. There are 4180 sentence in the validation data, so when evaluating a model on the val dataset, segments should be a list of lenght 4180, and each item in segments should be a list of length 21. data: ground truth data ''' average_ranks = [] average_iou = [] for s, d in zip(segments, data): pred = s[0] ious = [self._iou(pred, t) for t in d['times']] average_iou.append(np.mean(np.sort(ious)[-3:])) ranks = [self._rank(s, t) for t in d['times'] if tuple(t) in s] # if t in s] is added for s, e not in prediction. average_ranks.append(np.mean(np.sort(ranks)[:3])) rank1 = np.sum(np.array(average_ranks) <= 1)/float(len(average_ranks)) rank5 = np.sum(np.array(average_ranks) <= 5)/float(len(average_ranks)) miou = np.mean(average_iou) # print("Average rank@1: %f" % rank1) # print("Average rank@5: %f" % rank5) # print("Average iou: %f" % miou) return rank1, rank5, miou class NLGMetric(Metric): def __init__( self, config, metric_names=[ "Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4", "METEOR", "ROUGE_L", "CIDEr" ] ): super().__init__(config, metric_names) # please install NLGEval from `https://github.com/Maluuba/nlg-eval` from nlgeval import NLGEval self.nlg = NLGEval() def compute_metrics(self, outputs, targets, **kwargs): return self.nlg.compute_metrics( hyp_list=outputs, ref_list=targets) def print_computed_metrics(self, metrics): Bleu_1 = metrics["Bleu_1"] Bleu_2 = metrics["Bleu_2"] Bleu_3 = metrics["Bleu_3"] Bleu_4 = metrics["Bleu_4"] METEOR = metrics["METEOR"] ROUGE_L = metrics["ROUGE_L"] CIDEr = metrics["CIDEr"] print( "Bleu_1: {:.4f} - Bleu_2: {:.4f} - Bleu_3: {:.4f} - Bleu_4: {:.4f} - METEOR: {:.4f} - ROUGE_L: {:.4f} - CIDEr: {:.4f}".format( Bleu_1, Bleu_2, Bleu_3, Bleu_4, METEOR, ROUGE_L, CIDEr ) ) class QAMetric(Metric): def __init__( self, config, metric_names=["acc"] ): super().__init__(config, metric_names) def compute_metrics(self, outputs, targets, **kwargs): from sklearn.metrics import accuracy_score return {"acc": accuracy_score(targets, outputs)} def print_computed_metrics(self, metrics): print("acc: {:.4f}".format(metrics["acc"])) class COINActionSegmentationMetric(Metric): """ COIN dataset listed 3 repos for Action Segmentation. Action Sets, NeuralNetwork-Viterbi, TCFPN-ISBA. The first and second are the same. https://github.com/alexanderrichard/action-sets/blob/master/eval.py Future reference for the third: `https://github.com/Zephyr-D/TCFPN-ISBA/blob/master/utils/metrics.py` """ def __init__(self, config, metric_name=["frame_acc"]): super().__init__(config, metric_name) def compute_metrics(self, outputs, targets): n_frames = 0 n_errors = 0 n_errors = sum(outputs != targets) n_frames = len(targets) return {"frame_acc": 1.0 - float(n_errors) / n_frames} def print_computed_metrics(self, metrics): fa = metrics["frame_acc"] print("frame accuracy:", fa) class CrossTaskMetric(Metric): def __init__(self, config, metric_names=["recall"]): super().__init__(config, metric_names) def compute_metrics(self, outputs, targets, **kwargs): """refactored from line 166: https://github.com/DmZhukov/CrossTask/blob/master/train.py""" recalls = self._get_recalls(Y_true=targets, Y_pred=outputs) results = {} for task, rec in recalls.items(): results[str(task)] = rec avg_recall = np.mean(list(recalls.values())) results["recall"] = avg_recall return results def print_computed_metrics(self, metrics): print('Recall: {0:0.3f}'.format(metrics["recall"])) for task in metrics: if task != "recall": print('Task {0}. Recall = {1:0.3f}'.format( task, metrics[task])) def _get_recalls(self, Y_true, Y_pred): """refactored from https://github.com/DmZhukov/CrossTask/blob/master/train.py""" step_match = {task: 0 for task in Y_true.keys()} step_total = {task: 0 for task in Y_true.keys()} for task, ys_true in Y_true.items(): ys_pred = Y_pred[task] for vid in set(ys_pred.keys()).intersection(set(ys_true.keys())): y_true = ys_true[vid] y_pred = ys_pred[vid] step_total[task] += (y_true.sum(axis=0) > 0).sum() step_match[task] += (y_true*y_pred).sum() recalls = { task: step_match[task] / n for task, n in step_total.items()} return recalls class ActionRecognitionMetric(Metric): def __init__( self, config, metric_names=["acc", "acc_splits", "r1_splits", "r5_splits", "r10_splits"] ): super().__init__(config, metric_names) def compute_metrics(self, outputs, targets, splits, **kwargs): all_video_embd = outputs labels = targets split1, split2, split3 = splits accs = [] r1s = [] r5s = [] r10s = [] for split in range(3): if split == 0: s = split1 elif split == 1: s = split2 else: s = split3 X_pred = all_video_embd[np.where(s == 2)[0]] label_test = labels[np.where(s == 2)[0]] logits = X_pred X_pred = np.argmax(X_pred, axis=1) acc = np.sum(X_pred == label_test) / float(len(X_pred)) accs.append(acc) # compute recall. sorted_pred = (-logits).argsort(axis=-1) label_test_sp = label_test.reshape(-1, 1) r1 = np.mean((sorted_pred[:, :1] == label_test_sp).sum(axis=1), axis=0) r5 = np.mean((sorted_pred[:, :5] == label_test_sp).sum(axis=1), axis=0) r10 = np.mean((sorted_pred[:, :10] == label_test_sp).sum(axis=1), axis=0) r1s.append(r1) r5s.append(r5) r10s.append(r10) return {"acc": accs[0], "acc_splits": accs, "r1_splits": r1s, "r5_splits": r5s, "r10_splits": r10s} def print_computed_metrics(self, metrics): for split, acc in enumerate(metrics["acc_splits"]): print("Top 1 accuracy on split {}: {}; r1 {}; r5 {}; r10 {}".format( split + 1, acc, metrics["r1_splits"][split], metrics["r5_splits"][split], metrics["r10_splits"][split], ) )
KosmosX-API-main
kosmosX/fairseq/examples/MMPT/mmpt/evaluators/metric.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os import glob import numpy as np from . import metric as metric_path from . import predictor as predictor_path class Evaluator(object): """ perform evaluation on a single (downstream) task. make this both offline and online. TODO(huxu) saving evaluation results. """ def __init__(self, config, eval_dataloader=None): if config.metric is None: raise ValueError("config.metric is", config.metric) metric_cls = getattr(metric_path, config.metric) self.metric = metric_cls(config) if config.predictor is None: raise ValueError("config.predictor is", config.predictor) predictor_cls = getattr(predictor_path, config.predictor) self.predictor = predictor_cls(config) self.eval_dataloader = eval_dataloader def __call__(self): try: print(self.predictor.pred_dir) for pred_file in glob.glob( self.predictor.pred_dir + "/*_merged.npy"): outputs = np.load(pred_file) results = self.metric.compute_metrics(outputs) self.metric.print_computed_metrics(results) outputs = np.load(os.path.join( self.predictor.pred_dir, "merged.npy")) results = self.metric.compute_metrics(outputs) return {"results": results, "metric": self.metric} except FileNotFoundError: print("\n[missing]", self.predictor.pred_dir) return {} def evaluate(self, model, eval_dataloader=None, output_file="merged"): if eval_dataloader is None: eval_dataloader = self.eval_dataloader outputs = self.predictor.predict_loop( model, eval_dataloader, output_file) results = self.metric.compute_metrics(**outputs) return results
KosmosX-API-main
kosmosX/fairseq/examples/MMPT/mmpt/evaluators/evaluator.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import random import json import pickle from tqdm import tqdm import os import numpy as np class CaptionDedupProcessor(object): """remove overlapping of caption sentences(clip). Some statistics: caption: {'t_clip_len': 246.6448431320854, 'video_len': 281.09174795676245, 'clip_tps': 0.8841283727427481, 'video_tps': 0.7821156477732097, 'min_clip_len': 0.0, 'max_clip_len': 398.3, 'mean_clip_len': 3.196580003006861, 'num_clip': 77.15897706301081} raw_caption: {'t_clip_len': 238.95908778424115, 'video_len': 267.5914859862507, 'clip_tps': 2.4941363624267963, 'video_tps': 2.258989769647173, 'min_clip_len': 0.0, 'max_clip_len': 398.3, 'mean_clip_len': 3.0537954186814265, 'num_clip': 78.24986779481756} """ def __init__(self, pkl_file): with open(pkl_file, "rb") as fd: self.data = pickle.load(fd) self.stat = { "t_clip_len": [], "video_len": [], "clip_tps": [], "video_tps": [], "clip_len": [], } def __call__(self): for idx, video_id in enumerate(tqdm(self.data)): caption = json.loads(self.data[video_id]) caption = self._dedup(caption) if idx < 4096: # for the first 4096 examples, compute the statistics. self.save_stat(video_id, caption) self.data[video_id] = json.dumps(caption) self.print_stat() def single(self, video_id): caption = json.loads(self.data[video_id]) for clip_idx, (start, end, text) in enumerate( zip(caption["start"], caption["end"], caption["text"]) ): print(start, end, text) print("@" * 100) caption = self._dedup(caption) for clip_idx, (start, end, text) in enumerate( zip(caption["start"], caption["end"], caption["text"]) ): print(start, end, text) print("#" * 100) self.save_stat(video_id, caption) self.print_stat() def finalize(self, tgt_fn): with open(tgt_fn, "wb") as fw: pickle.dump(self.data, fw, pickle.HIGHEST_PROTOCOL) def save_stat(self, video_id, caption): video_fn = os.path.join( "data/feat/feat_how2_s3d", video_id + ".npy" ) if os.path.isfile(video_fn): with open(video_fn, "rb", 1) as fr: # 24 is the buffer size. buffered version = np.lib.format.read_magic(fr) shape, fortran, dtype = np.lib.format._read_array_header(fr, version) video_len = shape[0] t_clip_len = 0.0 t_tokens = 0 for idx, (start, end, text) in enumerate( zip(caption["start"], caption["end"], caption["text"]) ): clip_len = ( (end - max(caption["end"][idx - 1], start)) if idx > 0 else end - start ) t_clip_len += clip_len t_tokens += len(text.split(" ")) self.stat["clip_len"].append(clip_len) self.stat["t_clip_len"].append(t_clip_len) self.stat["video_len"].append(video_len) self.stat["clip_tps"].append(t_tokens / t_clip_len) self.stat["video_tps"].append(t_tokens / video_len) def print_stat(self): result = { "t_clip_len": np.mean(self.stat["t_clip_len"]), "video_len": np.mean(self.stat["video_len"]), "clip_tps": np.mean(self.stat["clip_tps"]), "video_tps": np.mean(self.stat["video_tps"]), "min_clip_len": min(self.stat["clip_len"]), "max_clip_len": max(self.stat["clip_len"]), "mean_clip_len": np.mean(self.stat["clip_len"]), "num_clip": len(self.stat["clip_len"]) / len(self.stat["video_tps"]), } print(result) def _dedup(self, caption): def random_merge(end_idx, start, end, text, starts, ends, texts): if random.random() > 0.5: # print(clip_idx, "[PARTIAL INTO PREV]", end_idx) # overlapped part goes to the end of previous. ends[-1] = max(ends[-1], start) # ? rest_text = text[end_idx:].strip() if rest_text: starts.append(max(ends[-1], start)) ends.append(max(end, starts[-1])) texts.append(rest_text) else: # goes to the beginning of the current. # strip the previous. left_text = texts[-1][:-end_idx].strip() if left_text: # print(clip_idx, "[PREV PARTIAL INTO CUR]", end_idx) ends[-1] = min(ends[-1], start) texts[-1] = left_text else: # print(clip_idx, "[PREV LEFT NOTHING ALL INTO CUR]", end_idx) starts.pop(-1) ends.pop(-1) texts.pop(-1) starts.append(start) ends.append(end) texts.append(text) starts, ends, texts = [], [], [] for clip_idx, (start, end, text) in enumerate( zip(caption["start"], caption["end"], caption["text"]) ): if not isinstance(text, str): continue text = text.replace("\n", " ").strip() if len(text) == 0: continue starts.append(start) ends.append(end) texts.append(text) break for clip_idx, (start, end, text) in enumerate( zip( caption["start"][clip_idx + 1:], caption["end"][clip_idx + 1:], caption["text"][clip_idx + 1:], ) ): if not isinstance(text, str): continue text = text.replace("\n", " ").strip() if len(text) == 0: continue # print(clip_idx, texts[-5:]) # print(clip_idx, start, end, text) if texts[-1].endswith(text): # subset of prev caption -> merge # print(clip_idx, "[MERGE INTO PREV]") ends[-1] = max(ends[-1], end) elif text.startswith(texts[-1]): # superset of prev caption -> merge # print(clip_idx, "[PREV MERGE INTO CUR]") texts[-1] = text starts[-1] = min(starts[-1], start) ends[-1] = max(ends[-1], end) else: # overlapping or non-overlapping. for end_idx in range(1, len(text) + 1): if texts[-1].endswith(text[:end_idx]): random_merge(end_idx, start, end, text, starts, ends, texts) break else: starts.append(start) ends.append(end) texts.append(text) assert (ends[-1] + 0.001) >= starts[-1] and len( texts[-1] ) > 0, "{} {} {} <- {} {} {}, {} {} {}".format( str(starts[-1]), str(ends[-1]), texts[-1], caption["start"][clip_idx - 1], caption["end"][clip_idx - 1], caption["text"][clip_idx - 1], str(start), str(end), text, ) return {"start": starts, "end": ends, "text": texts} if __name__ == "__main__": import argparse parser = argparse.ArgumentParser(description="dedup how2 caption") parser.add_argument('--how2dir', default="data/how2") args = parser.parse_args() raw_caption_json = os.path.join(args.how2dir, "raw_caption.json") raw_caption_pickle = os.path.join(args.how2dir, "raw_caption.pkl") raw_caption_dedup_pickle = os.path.join(args.how2dir, "raw_caption_dedup.pkl") def convert_to_pickle(src_fn, tgt_fn): with open(src_fn) as fd: captions = json.load(fd) for video_id in captions: captions[video_id] = json.dumps(captions[video_id]) with open(tgt_fn, "wb") as fw: pickle.dump(captions, fw, pickle.HIGHEST_PROTOCOL) if not os.path.isfile(raw_caption_pickle): convert_to_pickle(raw_caption_json, raw_caption_pickle) deduper = CaptionDedupProcessor(raw_caption_pickle) deduper() deduper.finalize(raw_caption_dedup_pickle) """ # demo deduper = CaptionDedupProcessor("data/how2/raw_caption.pkl") deduper.single("HfIeQ9pzL5U") """
KosmosX-API-main
kosmosX/fairseq/examples/MMPT/mmpt/processors/dedupprocessor.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .how2processor import ( ShardedHow2MetaProcessor, ShardedVideoProcessor, ShardedTextProcessor, VariedLenAligner, OverlappedAligner ) class ShardedHow2VideoRetriMetaProcessor(ShardedHow2MetaProcessor): def __init__(self, config): super().__init__(config) self.num_video_per_batch = config.num_video_per_batch self.cands = [ self.data[batch_offset:batch_offset + self.num_video_per_batch] for batch_offset in range(0, (len(self.data) // (8 * self.num_video_per_batch)) * 8 * self.num_video_per_batch, self.num_video_per_batch)] def __len__(self): return len(self.cands) def set_candidates(self, cands): # no changes on num of batches. print(len(self.cands), "->", len(cands)) # assert len(self.cands) == len(cands) self.cands = cands def __getitem__(self, idx): video_ids = self.cands[idx] assert isinstance(video_ids, list) sharded_video_idxs = [] for video_id in video_ids: shard_id, video_idx = self.video_id_to_shard[video_id] sharded_video_idxs.append((video_id, -1, shard_id, video_idx)) return sharded_video_idxs, sharded_video_idxs class ShardedVideoRetriVideoProcessor(ShardedVideoProcessor): """In retrival case the video_id is a list of tuples: `(shard_id, video_idx)` .""" def __call__(self, sharded_video_idxs): assert isinstance(sharded_video_idxs, list) cand_feats = [] for shared_video_idx in sharded_video_idxs: feat = super().__call__(shared_video_idx) cand_feats.append(feat) return cand_feats class ShardedVideoRetriTextProcessor(ShardedTextProcessor): """In retrival case the video_id is a list of tuples: `(shard_id, video_idx)` .""" def __call__(self, sharded_video_idxs): assert isinstance(sharded_video_idxs, list) cand_caps = [] for shared_video_idx in sharded_video_idxs: caps = super().__call__(shared_video_idx) cand_caps.append(caps) return cand_caps class VideoRetriAligner(VariedLenAligner): # Retritask will trim dim-0. def __call__(self, sharded_video_idxs, video_features, text_features): from transformers import default_data_collator batch, video_ids = [], [] for video_id, video_feature, text_feature in \ zip(sharded_video_idxs, video_features, text_features): sub_batch = super().__call__(video_id, video_feature, text_feature) batch.append(sub_batch) if isinstance(video_id, tuple): video_id = video_id[0] video_ids.append(video_id) batch = default_data_collator(batch) batch["video_id"] = video_ids return batch class VideoRetriOverlappedAligner(OverlappedAligner): # Retritask will trim dim-0. def __call__(self, sharded_video_idxs, video_features, text_features): from transformers import default_data_collator batch, video_ids = [], [] for video_id, video_feature, text_feature in \ zip(sharded_video_idxs, video_features, text_features): sub_batch = super().__call__(video_id, video_feature, text_feature) batch.append(sub_batch) if isinstance(video_id, tuple): video_id = video_id[0] video_ids.append(video_id) batch = default_data_collator(batch) batch["video_id"] = video_ids return batch
KosmosX-API-main
kosmosX/fairseq/examples/MMPT/mmpt/processors/how2retriprocessor.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .processor import * from .how2processor import * from .how2retriprocessor import * from .dsprocessor import * try: from .rawvideoprocessor import * from .codecprocessor import * from .webvidprocessor import * from .expprocessor import * from .exphow2processor import * from .exphow2retriprocessor import * from .expcodecprocessor import * from .expfeatureencoder import * from .expdsprocessor import * except ImportError: pass
KosmosX-API-main
kosmosX/fairseq/examples/MMPT/mmpt/processors/__init__.py
# Copyright (c) Facebook, Inc. All Rights Reserved import numpy as np import os import torch class Processor(object): """ A generic processor for video (codec, feature etc.) and text. """ def __call__(self, **kwargs): raise NotImplementedError class MetaProcessor(Processor): """ A meta processor is expected to load the metadata of a dataset: (e.g., video_ids, or captions). You must implement the `__getitem__` (meta datasets are rather diverse.). """ def __init__(self, config): self.split = config.split def __len__(self): return len(self.data) def __getitem__(self, idx): raise NotImplementedError def _get_split_path(self, config): splits = { "train": config.train_path, "valid": config.val_path, "test": config.test_path, } if config.split is not None: return splits[config.split] return config.train_path class TextProcessor(Processor): """ A generic Text processor: rename this as `withTokenizer`. tokenize a string of text on-the-fly. Warning: mostly used for end tasks. (on-the-fly tokenization is slow for how2.) TODO(huxu): move this class as a subclass. """ def __init__(self, config): self.bert_name = str(config.bert_name) self.use_fast = config.use_fast from transformers import AutoTokenizer self.tokenizer = AutoTokenizer.from_pretrained( self.bert_name, use_fast=self.use_fast ) def __call__(self, text_id): caption = self.tokenizer(text_id, add_special_tokens=False) return caption["input_ids"] class VideoProcessor(Processor): """ A generic video processor: load a numpy video tokens by default. """ def __init__(self, config): self.vfeat_dir = config.vfeat_dir def __call__(self, video_fn): if isinstance(video_fn, tuple): video_fn = video_fn[0] assert isinstance(video_fn, str) video_fn = os.path.join(self.vfeat_dir, video_fn + ".npy") feat = np.load(video_fn) return feat class Aligner(object): """ An alignprocessor align video and text and output a dict of tensors (for a model). """ def __init__(self, config): """__init__ needs to be light weight for more workers/threads.""" self.split = config.split self.max_video_len = config.max_video_len self.max_len = config.max_len from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained( str(config.bert_name), use_fast=config.use_fast ) self.cls_token_id = tokenizer.cls_token_id self.sep_token_id = tokenizer.sep_token_id self.pad_token_id = tokenizer.pad_token_id self.mask_token_id = tokenizer.mask_token_id def __call__(self, video_id, video_feature, text_feature): raise NotImplementedError def _build_video_seq(self, video_feature, video_clips=None): """ `video_feature`: available video tokens. `video_clips`: video clip sequence to build. """ if not isinstance(video_feature, np.ndarray): raise ValueError( "unsupported type of video_feature", type(video_feature) ) if video_clips is None: # this is borrowed from DSAligner video_start = 0 video_end = min(len(video_feature), self.max_video_len) # the whole sequence is a single clip. video_clips = {"start": [video_start], "end": [video_end]} vfeats = np.zeros( (self.max_video_len, video_feature.shape[1]), dtype=np.float32 ) vmasks = torch.zeros((self.max_video_len,), dtype=torch.bool) video_len = 0 for start, end in zip(video_clips["start"], video_clips["end"]): clip_len = min(self.max_video_len - video_len, (end - start)) if clip_len > 0: vfeats[video_len: video_len + clip_len] = video_feature[ start: start + clip_len ] vmasks[video_len: video_len + clip_len] = 1 video_len += clip_len vfeats = torch.from_numpy(vfeats) return vfeats, vmasks def _build_text_seq(self, text_feature, text_clip_indexs=None): """ `text_feature`: all available clips. `text_clip_indexes`: clip sequence to build. """ if text_clip_indexs is None: text_clip_indexs = [0] full_caps = [] if isinstance(text_feature, dict): for clip_idx in text_clip_indexs: full_caps.extend(text_feature["cap"][clip_idx]) else: full_caps = text_feature max_text_len = self.max_len - self.max_video_len - 3 full_caps = full_caps[:max_text_len] full_caps = ( [self.cls_token_id, self.sep_token_id] + full_caps + [self.sep_token_id] ) text_pad_len = self.max_len - len(full_caps) - self.max_video_len padded_full_caps = full_caps + [self.pad_token_id] * text_pad_len caps = torch.LongTensor(padded_full_caps) cmasks = torch.zeros((len(padded_full_caps),), dtype=torch.bool) cmasks[: len(full_caps)] = 1 return caps, cmasks def batch_post_processing(self, batch, video_feature): return batch class MMAttentionMask2DProcessor(Processor): """text generation requires 2d mask that is harder to generate by GPU at this stage.""" def __call__(self, vmask, cmask, mtype): if mtype == "textgen": return self._build_textgeneration_mask(vmask, cmask) elif mtype == "videogen": return self._build_videogeneration_mask(vmask, cmask) else: return self._build_mm_mask(vmask, cmask) def _build_mm_mask(self, vmask, cmask): mask_1d = torch.cat([cmask[:1], vmask, cmask[1:]], dim=0) return mask_1d[None, :].repeat(mask_1d.size(0), 1) def _build_videogeneration_mask(self, vmask, cmask): # cls_mask is only about text otherwise it will leak generation. cls_text_mask = torch.cat([ # [CLS] torch.ones( (1,), dtype=torch.bool, device=cmask.device), # video tokens and [SEP] for video. torch.zeros( (vmask.size(0) + 1,), dtype=torch.bool, device=cmask.device), cmask[2:] ], dim=0) # concat horizontially. video_len = int(vmask.sum()) video_masks = torch.cat([ # [CLS] torch.ones( (video_len, 1), dtype=torch.bool, device=cmask.device ), torch.tril( torch.ones( (video_len, video_len), dtype=torch.bool, device=cmask.device)), # video_padding torch.zeros( (video_len, vmask.size(0) - video_len), dtype=torch.bool, device=cmask.device ), # [SEP] for video (unused). torch.zeros( (video_len, 1), dtype=torch.bool, device=cmask.device ), cmask[2:].unsqueeze(0).repeat(video_len, 1) ], dim=1) text_masks = cls_text_mask[None, :].repeat( cmask.size(0) - 2, 1) video_padding_masks = cls_text_mask[None, :].repeat( vmask.size(0) - video_len, 1) return torch.cat([ cls_text_mask[None, :], video_masks, video_padding_masks, torch.cat([cmask[:1], vmask, cmask[1:]], dim=0)[None,:], text_masks ], dim=0) def _build_textgeneration_mask(self, vmask, cmask): # cls_mask is only about video otherwise it will leak generation. cls_video_mask = torch.cat([ # [CLS] torch.ones( (1,), dtype=torch.bool, device=cmask.device), vmask, # [SEP] torch.ones((1,), dtype=torch.bool, device=cmask.device), torch.zeros( (cmask.size(0)-2,), dtype=torch.bool, device=cmask.device) ], dim=0) # concat horizontially. text_len = int(cmask[2:].sum()) text_masks = torch.cat([ # [CLS] torch.ones( (text_len, 1), dtype=torch.bool, device=cmask.device ), vmask.unsqueeze(0).repeat(text_len, 1), # [SEP] for video. torch.ones( (text_len, 1), dtype=torch.bool, device=cmask.device ), torch.tril( torch.ones( (text_len, text_len), dtype=torch.bool, device=cmask.device)), # padding. torch.zeros( (text_len, cmask.size(0) - text_len - 2), dtype=torch.bool, device=cmask.device ) ], dim=1) cls_video_masks = cls_video_mask[None, :].repeat( vmask.size(0) + 2, 1) text_padding_masks = cls_video_mask[None, :].repeat( cmask.size(0) - text_len - 2, 1) return torch.cat([ cls_video_masks, text_masks, text_padding_masks], dim=0)
KosmosX-API-main
kosmosX/fairseq/examples/MMPT/mmpt/processors/processor.py
# Copyright (c) Facebook, Inc. All Rights Reserved """ Processors for all downstream (ds) tasks. """ import json import os import pickle import random import math import numpy as np import torch from collections import defaultdict from .processor import ( MetaProcessor, VideoProcessor, TextProcessor, Aligner, MMAttentionMask2DProcessor, ) from .how2processor import TextGenerationProcessor # ------------- A General Aligner for all downstream tasks----------------- class DSAligner(Aligner): """ Downstream (DS) aligner shared by all datasets. """ def __call__(self, video_id, video_feature, text_feature, wps=0.7): # random sample a starting sec for video. video_start = 0 video_end = min(len(video_feature), self.max_video_len) # the whole sequence is a single clip. video_clips = {"start": [video_start], "end": [video_end]} text_feature = { "cap": [text_feature], "start": [video_start], "end": [len(text_feature) / wps], } text_clip_indexs = [0] vfeats, vmasks = self._build_video_seq( video_feature, video_clips ) caps, cmasks = self._build_text_seq( text_feature, text_clip_indexs ) return { "caps": caps, "cmasks": cmasks, "vfeats": vfeats, "vmasks": vmasks, "video_id": video_id, } class NLGTextProcessor(TextProcessor): """ Also return the original text as ref. """ def __call__(self, text_id): return super().__call__(text_id), text_id class DSNLGAligner(DSAligner): """extend with the capability of 2d mask for generation.""" def __init__(self, config): super().__init__(config) self.attnmasker = MMAttentionMask2DProcessor() from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained( self.bert_name, use_fast=self.use_fast, bos_token="[CLS]", eos_token="[SEP]" ) self.tokenizer = tokenizer self.bos_token_id = tokenizer.bos_token_id self.eos_token_id = tokenizer.eos_token_id self.textgen = TextGenerationProcessor(tokenizer) def __call__(self, video_id, video_feature, text_feature): output = super().__call__(video_id, video_feature, text_feature[0]) if self.split == "test": # output.update({"ref": text_feature[1]}) output.update({"ref": self.tokenizer.decode( output["caps"], skip_special_tokens=True)}) text_label = output["caps"] cmasks = torch.BoolTensor([1] * text_label.size(0)) caps = torch.LongTensor([ self.cls_token_id, self.sep_token_id, self.bos_token_id]) else: caps, text_label = self.textgen(output["caps"]) cmasks = output["cmasks"] attention_mask = self.attnmasker( output["vmasks"], cmasks, "textgen") output.update({ "caps": caps, "cmasks": cmasks, "text_label": text_label, "attention_mask": attention_mask, }) return output # -------------------- MSRVTT ------------------------ class MSRVTTMetaProcessor(MetaProcessor): """MSRVTT dataset. reference: `howto100m/msrvtt_dataloader.py` """ def __init__(self, config): super().__init__(config) import pandas as pd data = pd.read_csv(self._get_split_path(config)) # TODO: add a text1ka flag. if config.split == "train" \ and config.full_test_path is not None \ and config.jsfusion_path is not None: # add testing videos from full_test_path not used by jfusion. additional_data = pd.read_csv(config.full_test_path) jsfusion_data = pd.read_csv(config.jsfusion_path) for video_id in additional_data["video_id"]: if video_id not in jsfusion_data["video_id"].values: data = data.append( {"video_id": video_id}, ignore_index=True) if config.dup is not None and config.split == "train": data = data.append([data] * (config.dup - 1), ignore_index=True) self.data = data def __len__(self): return len(self.data) def __getitem__(self, idx): """slightly modify with if condition to combine train/test.""" vid, sentence = None, None vid = self.data["video_id"].values[idx] if "sentence" in self.data: # for testing. sentence = self.data["sentence"].values[idx] else: # for training. sentence = vid return vid, sentence class MSRVTTTextProcessor(TextProcessor): """MSRVTT dataset. reference: `msrvtt_dataloader.py` `MSRVTT_TrainDataLoader`. TODO (huxu): add max_words. """ def __init__(self, config): super().__init__(config) self.sentences = None if config.json_path is not None and config.split == "train": with open(config.json_path) as fd: self.data = json.load(fd) self.sentences = defaultdict(list) for s in self.data["sentences"]: self.sentences[s["video_id"]].append(s["caption"]) def __call__(self, text_id): if self.sentences is not None: rind = random.randint(0, len(self.sentences[text_id]) - 1) sentence = self.sentences[text_id][rind] else: sentence = text_id caption = self.tokenizer(sentence, add_special_tokens=False) return caption["input_ids"] class MSRVTTNLGTextProcessor(MSRVTTTextProcessor): """TODO: change dsaligner and merge to avoid any NLG text processor.""" def __call__(self, text_id): if self.sentences is not None: rind = random.randint(0, len(self.sentences[text_id]) - 1) sentence = self.sentences[text_id][rind] else: sentence = text_id caption = self.tokenizer(sentence, add_special_tokens=False) return caption["input_ids"], sentence class MSRVTTQAMetaProcessor(MetaProcessor): """MSRVTT-QA: retrieval-based multi-choice QA from JSFusion dataset. For simplicity, we use the train retrieval model. reference: `https://github.com/yj-yu/lsmdc` """ def __init__(self, config): super().__init__(config) import pandas as pd csv_data = pd.read_csv(self._get_split_path(config), sep="\t") data = [] for video_id, a1, a2, a3, a4, a5, answer in zip( csv_data["vid_key"].values, csv_data["a1"].values, csv_data["a2"].values, csv_data["a3"].values, csv_data["a4"].values, csv_data["a5"].values, csv_data["answer"].values): video_id = video_id.replace("msr", "video") data.append((video_id, (answer, [a1, a2, a3, a4, a5]))) self.data = data def __len__(self): return len(self.data) def __getitem__(self, idx): return self.data[idx] class MSRVTTQATextProcessor(TextProcessor): """MSRVTT-QA dataset. text_ans is of format `(answer, [a1, a2, a3, a4, a5])`. """ def __call__(self, text_ans): for ans_idx, ans in enumerate(text_ans[1]): if isinstance(ans, str): text_ans[1][ans_idx] = self.tokenizer(ans, add_special_tokens=False)["input_ids"] return text_ans class MSRVTTQAAligner(DSAligner): """MSRVTT dataset. similar to sample in how2. we call __call__ multiple times. """ def __call__(self, video_id, video_feature, text_feature, wps=0.7): caps = [] cmasks = [] answer = text_feature[0] for ans_idx, _text_feature in enumerate(text_feature[1]): output = super().__call__( video_id, video_feature, _text_feature, wps) caps.append(output["caps"]) cmasks.append(output["cmasks"]) output.update({ "caps": torch.stack(caps), "cmasks": torch.stack(cmasks), "answers": torch.LongTensor([answer]), }) return output # -------------------- Youcook ----------------------- class YoucookMetaProcessor(MetaProcessor): """Youcook dataset. reference: `howto100m/youcook_dataloader.py` note that the data can be different as the (1) some videos already in Howto100m are removed. (2) stop words are removed from caption TODO (huxu): make a flag to load the original caption. (see youcookii_annotations_trainval.json). The max_video_len can be 264 and text can be 64 tokens. In reality we may not need that long. see projects/task/youcook.yaml """ def __init__(self, config): super().__init__(config) vfeat_dir = config.vfeat_dir print(self._get_split_path(config)) with open(self._get_split_path(config), "rb") as fd: data = pickle.load(fd) all_valid_video_ids = set( [os.path.splitext(fn)[0] for fn in os.listdir(vfeat_dir)] ) recs = [] video_ids = set() valid_video_ids = set() for rec in data: # filter videos not available. udl_idx = rec["id"].rindex("_") video_id = rec["id"][:udl_idx] video_ids.add(video_id) if video_id in all_valid_video_ids: valid_video_ids.add(video_id) recs.append(rec) print("total video_ids in .pkl", len(video_ids)) print("valid video_ids in .pkl", len(valid_video_ids)) print("please verify {train,val}_list.txt") data = recs self.data = data with open(config.trainval_annotation) as fd: self.youcook_annotation = json.load(fd)["database"] if config.use_annotation_text is True: print("using text in annotation.") self.use_annotation_caption = True else: self.use_annotation_caption = False def __getitem__(self, idx): def _get_video_and_caption(rec): vid = rec["id"] udl_idx = vid.rindex("_") video_id, clip_id = vid[:udl_idx], int(vid[udl_idx + 1:]) clip = self.youcook_annotation[video_id]["annotations"][clip_id] start, end = clip["segment"] if self.use_annotation_caption: caption = clip["sentence"] else: caption = rec["caption"] return (video_id, start, end), caption rec = self.data[idx] video_info, text_info = _get_video_and_caption(rec) return video_info, text_info class YoucookVideoProcessor(VideoProcessor): """video_fn is a tuple of (video_id, start, end) now.""" def __call__(self, video_fn): video_id, start, end = video_fn feat = np.load(os.path.join(self.vfeat_dir, video_id + ".npy")) return feat[start:end] class YoucookNLGMetaProcessor(MetaProcessor): """NLG uses the original split: `train_list.txt` and `val_list.txt` """ def __init__(self, config): super().__init__(config) vfeat_dir = config.vfeat_dir print(self._get_split_path(config)) with open(self._get_split_path(config)) as fd: video_ids = [ line.strip().split("/")[1] for line in fd.readlines()] print("total video_ids in train/val_list.txt", len(video_ids)) all_valid_video_ids = set( [os.path.splitext(fn)[0] for fn in os.listdir(vfeat_dir)] ) video_ids = [ video_id for video_id in video_ids if video_id in all_valid_video_ids] print("valid video_ids in train/val_list.txt", len(video_ids)) with open(config.trainval_annotation) as fd: self.youcook_annotation = json.load(fd)["database"] data = [] for video_id in video_ids: for clip in self.youcook_annotation[video_id]["annotations"]: start, end = clip["segment"] caption = clip["sentence"] data.append(((video_id, start, end), caption)) self.data = data def __getitem__(self, idx): return self.data[idx] # --------------------- CrossTask ------------------------- class CrossTaskMetaProcessor(MetaProcessor): def __init__(self, config): super().__init__(config) np.random.seed(0) # deterministic random split. task_vids = self._get_vids( config.train_csv_path, config.vfeat_dir, config.annotation_path) val_vids = self._get_vids( config.val_csv_path, config.vfeat_dir, config.annotation_path) # filter out those task and vids appear in val_vids. task_vids = { task: [ vid for vid in vids if task not in val_vids or vid not in val_vids[task]] for task, vids in task_vids.items()} primary_info = self._read_task_info(config.primary_path) test_tasks = set(primary_info['steps'].keys()) # if args.use_related: related_info = self._read_task_info(config.related_path) task_steps = {**primary_info['steps'], **related_info['steps']} n_steps = {**primary_info['n_steps'], **related_info['n_steps']} # else: # task_steps = primary_info['steps'] # n_steps = primary_info['n_steps'] all_tasks = set(n_steps.keys()) # filter and keep task in primary or related. task_vids = { task: vids for task, vids in task_vids.items() if task in all_tasks} # vocab-by-step matrix (A) and vocab (M) # (huxu): we do not use BoW. # A, M = self._get_A(task_steps, share="words") train_vids, test_vids = self._random_split( task_vids, test_tasks, config.n_train) print("train_num_videos", sum(len(vids) for vids in train_vids.values())) print("test_num_videos", sum(len(vids) for vids in test_vids.values())) # added by huxu to automatically determine the split. split_map = { "train": train_vids, "valid": test_vids, "test": test_vids } task_vids = split_map[config.split] self.vids = [] for task, vids in task_vids.items(): self.vids.extend([(task, vid) for vid in vids]) self.task_steps = task_steps self.n_steps = n_steps def __getitem__(self, idx): task, vid = self.vids[idx] n_steps = self.n_steps[task] steps = self.task_steps[task] assert len(steps) == n_steps return (task, vid, steps, n_steps), (task, vid, steps, n_steps) def __len__(self): return len(self.vids) def _random_split(self, task_vids, test_tasks, n_train): train_vids = {} test_vids = {} for task, vids in task_vids.items(): if task in test_tasks and len(vids) > n_train: train_vids[task] = np.random.choice( vids, n_train, replace=False).tolist() test_vids[task] = [ vid for vid in vids if vid not in train_vids[task]] else: train_vids[task] = vids return train_vids, test_vids def _get_vids(self, path, vfeat_dir, annotation_path): """refactored from https://github.com/DmZhukov/CrossTask/blob/master/data.py changes: add `vfeat_dir` to check if the video is available. add `annotation_path` to check if the video is available. """ task_vids = {} with open(path, 'r') as f: for line in f: task, vid, url = line.strip().split(',') # double check the video is available. if not os.path.exists( os.path.join(vfeat_dir, vid + ".npy")): continue # double check the annotation is available. if not os.path.exists(os.path.join( annotation_path, task + "_" + vid + ".csv")): continue if task not in task_vids: task_vids[task] = [] task_vids[task].append(vid) return task_vids def _read_task_info(self, path): titles = {} urls = {} n_steps = {} steps = {} with open(path, 'r') as f: idx = f.readline() while idx != '': idx = idx.strip() titles[idx] = f.readline().strip() urls[idx] = f.readline().strip() n_steps[idx] = int(f.readline().strip()) steps[idx] = f.readline().strip().split(',') next(f) idx = f.readline() return { 'title': titles, 'url': urls, 'n_steps': n_steps, 'steps': steps } def _get_A(self, task_steps, share="words"): raise ValueError("running get_A is not allowed for BERT.") """Step-to-component matrices.""" if share == 'words': # share words task_step_comps = { task: [step.split(' ') for step in steps] for task, steps in task_steps.items()} elif share == 'task_words': # share words within same task task_step_comps = { task: [[task+'_'+tok for tok in step.split(' ')] for step in steps] for task, steps in task_steps.items()} elif share == 'steps': # share whole step descriptions task_step_comps = { task: [[step] for step in steps] for task, steps in task_steps.items()} else: # no sharing task_step_comps = { task: [[task+'_'+step] for step in steps] for task, steps in task_steps.items()} # BERT tokenizer here? vocab = [] for task, steps in task_step_comps.items(): for step in steps: vocab.extend(step) vocab = {comp: m for m, comp in enumerate(set(vocab))} M = len(vocab) A = {} for task, steps in task_step_comps.items(): K = len(steps) a = torch.zeros(M, K) for k, step in enumerate(steps): a[[vocab[comp] for comp in step], k] = 1 a /= a.sum(dim=0) A[task] = a return A, M class CrossTaskVideoProcessor(VideoProcessor): def __call__(self, video_fn): task, vid, steps, n_steps = video_fn video_fn = os.path.join(self.vfeat_dir, vid + ".npy") feat = np.load(video_fn) return feat class CrossTaskTextProcessor(TextProcessor): def __call__(self, text_id): task, vid, steps, n_steps = text_id step_ids = [] for step_str in steps: step_ids.append( self.tokenizer(step_str, add_special_tokens=False)["input_ids"] ) return step_ids class CrossTaskAligner(Aligner): """ TODO: it's not clear yet the formulation of the task; finish this later. """ def __init__(self, config): super().__init__(config) self.annotation_path = config.annotation_path self.sliding_window = config.sliding_window self.sliding_window_size = config.sliding_window_size def __call__(self, video_id, video_feature, text_feature): task, vid, steps, n_steps = video_id annot_path = os.path.join( self.annotation_path, task + '_' + vid + '.csv') video_len = len(video_feature) labels = torch.from_numpy(self._read_assignment( video_len, n_steps, annot_path)).float() vfeats, vmasks, targets = [], [], [] # sliding window on video features and targets. for window_start in range(0, video_len, self.sliding_window): video_start = 0 video_end = min(video_len - window_start, self.sliding_window_size) video_clip = {"start": [video_start], "end": [video_end]} vfeat, vmask = self._build_video_seq( video_feature[window_start: window_start + video_end], video_clip ) target = labels[window_start: window_start + video_end] assert len(vfeat) >= len(target), "{},{}".format(len(vfeat), len(target)) # TODO: randomly drop all zero targets for training ? # if self.split == "train" and target.sum() == 0: # continue vfeats.append(vfeat) vmasks.append(vmask) targets.append(target) if (video_len - window_start) <= self.sliding_window_size: break vfeats = torch.stack(vfeats) vmasks = torch.stack(vmasks) targets = torch.cat(targets, dim=0) caps, cmasks = [], [] for step in text_feature: step_text_feature = {"start": [0], "end": [1], "cap": [step]} step_text_clip_index = [0] cap, cmask = self._build_text_seq( step_text_feature, step_text_clip_index ) caps.append(cap) cmasks.append(cmask) caps = torch.stack(caps) cmasks = torch.stack(cmasks) return { "caps": caps, "cmasks": cmasks, "vfeats": vfeats, # X for original code. "vmasks": vmasks, "targets": targets, "video_id": vid, "task": task, "video_len": video_len # for later checking. } def _read_assignment(self, T, K, path): """ refactored from https://github.com/DmZhukov/CrossTask/blob/master/data.py Howto interpret contraints on loss that is going to be minimized: lambd is a big number; self.lambd * C is a big number for all valid position (csv stores invalids) def forward(self, O, Y, C): return (Y*(self.lambd * C - self.lsm(O))).mean(dim=0).sum() This will load the csv file and fill-in the step col from start to end rows. """ Y = np.zeros([T, K], dtype=np.uint8) with open(path, 'r') as f: for line in f: step, start, end = line.strip().split(',') start = int(math.floor(float(start))) end = int(math.ceil(float(end))) step = int(step) - 1 Y[start:end, step] = 1 return Y # --------------------- COIN ------------------------- class MetaTextBinarizer(Aligner): def __call__(self, text_feature): text_feature = { "cap": [text_feature], "start": [0.], "end": [100.], } text_clip_indexs = [0] caps, cmasks = self._build_text_seq( text_feature, text_clip_indexs ) return {"caps": caps, "cmasks": cmasks} class COINActionSegmentationMetaProcessor(MetaProcessor): split_map = { "train": "training", "valid": "testing", "test": "testing", } def __init__(self, config): super().__init__(config) with open(self._get_split_path(config)) as fr: database = json.load(fr)["database"] id2label = {} data = [] # filter the data by split. for video_id, rec in database.items(): # always use testing to determine label_set if rec["subset"] == "testing": for segment in rec["annotation"]: id2label[int(segment["id"])] = segment["label"] # text_labels is used for ZS setting self.text_labels = ["none"] * len(id2label) for label_id in id2label: self.text_labels[label_id-1] = id2label[label_id] id2label[0] = "O" print("num of labels", len(id2label)) for video_id, rec in database.items(): if not os.path.isfile(os.path.join(config.vfeat_dir, video_id + ".npy")): continue if rec["subset"] == COINActionSegmentationMetaProcessor.split_map[self.split]: starts, ends, labels = [], [], [] for segment in rec["annotation"]: start, end = segment["segment"] label = int(segment["id"]) starts.append(start) ends.append(end) labels.append(label) data.append( (video_id, {"start": starts, "end": ends, "label": labels})) self.data = data def meta_text_labels(self, config): from transformers import default_data_collator from ..utils import get_local_rank text_processor = TextProcessor(config) binarizer = MetaTextBinarizer(config) # TODO: add prompts to .yaml. text_labels = [label for label in self.text_labels] if get_local_rank() == 0: print(text_labels) outputs = [] for text_label in text_labels: text_feature = text_processor(text_label) outputs.append(binarizer(text_feature)) return default_data_collator(outputs) def __getitem__(self, idx): return self.data[idx] class COINActionSegmentationTextProcessor(TextProcessor): def __call__(self, text_label): return text_label class COINActionSegmentationAligner(Aligner): def __init__(self, config): super().__init__(config) self.sliding_window = config.sliding_window self.sliding_window_size = config.sliding_window_size def __call__(self, video_id, video_feature, text_feature): starts, ends, label_ids = text_feature["start"], text_feature["end"], text_feature["label"] # sliding window. video_len = len(video_feature) vfeats, vmasks, targets = [], [], [] # sliding window on video features and targets. for window_start in range(0, video_len, self.sliding_window): video_start = 0 video_end = min(video_len - window_start, self.sliding_window_size) video_clip = {"start": [video_start], "end": [video_end]} vfeat, vmask = self._build_video_seq( video_feature[window_start: window_start + video_end], video_clip ) # covers video length only. target = torch.full_like(vmask, -100, dtype=torch.long) target[vmask] = 0 for start, end, label_id in zip(starts, ends, label_ids): if (window_start < end) and (start < (window_start + video_end)): start_offset = max(0, math.floor(start) - window_start) end_offset = min(video_end, math.ceil(end) - window_start) target[start_offset:end_offset] = label_id vfeats.append(vfeat) vmasks.append(vmask) targets.append(target) if (video_len - window_start) <= self.sliding_window_size: break vfeats = torch.stack(vfeats) vmasks = torch.stack(vmasks) targets = torch.stack(targets) video_targets = torch.full((video_len,), 0) for start, end, label_id in zip(starts, ends, label_ids): start_offset = max(0, math.floor(start)) end_offset = min(video_len, math.ceil(end)) video_targets[start_offset:end_offset] = label_id caps = torch.LongTensor( [[self.cls_token_id, self.sep_token_id, self.pad_token_id, self.sep_token_id]], ).repeat(vfeats.size(0), 1) cmasks = torch.BoolTensor( [[0, 1, 0, 1]] # pad are valid for attention. ).repeat(vfeats.size(0), 1) return { "caps": caps, "cmasks": cmasks, "vfeats": vfeats, # X for original code. "vmasks": vmasks, "targets": targets, "video_id": video_id, "video_len": video_len, # for later checking. "video_targets": video_targets } class DiDeMoMetaProcessor(MetaProcessor): """reference: https://github.com/LisaAnne/LocalizingMoments/blob/master/utils/eval.py https://github.com/LisaAnne/LocalizingMoments/blob/master/utils/data_processing.py """ def __init__(self, config): super().__init__(config) assert "test" in self._get_split_path(config), "DiDeMo only supports zero-shot testing for now." with open(self._get_split_path(config)) as data_file: json_data = json.load(data_file) data = [] for record in json_data: data.append((record["video"], record["description"])) self.data = data def __len__(self): return len(self.data) def __getitem__(self, idx): return self.data[idx] class DiDeMoTextProcessor(TextProcessor): """reference: https://github.com/LisaAnne/LocalizingMoments/blob/master/utils/eval.py https://github.com/LisaAnne/LocalizingMoments/blob/master/utils/data_processing.py """ def __call__(self, text): return self.tokenizer(text, add_special_tokens=False)["input_ids"] class DiDeMoAligner(DSAligner): """ check video length. """ def __call__(self, video_id, video_feature, text_feature): # print(video_feature.shape[0]) return super().__call__(video_id, video_feature, text_feature)
KosmosX-API-main
kosmosX/fairseq/examples/MMPT/mmpt/processors/dsprocessor.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Copyright (c) Facebook, Inc. All Rights Reserved import torch import math import pickle import random import os import numpy as np from collections import deque from typing import Optional, Tuple, List from .processor import ( Processor, MetaProcessor, TextProcessor, Aligner, MMAttentionMask2DProcessor ) from ..utils import ShardedTensor class How2MetaProcessor(MetaProcessor): def __init__(self, config): super().__init__(config) path = self._get_split_path(config) with open(path) as fd: self.data = [line.strip() for line in fd] def __getitem__(self, idx): video_id = self.data[idx] return video_id, video_id class ShardedHow2MetaProcessor(How2MetaProcessor): def __init__(self, config): super().__init__(config) self.split = str(config.split) self.vfeat_dir = config.vfeat_dir self._init_shard() def _init_shard(self): if self.split == "train": meta_fn = os.path.join(self.vfeat_dir, "train" + "_meta.pkl") with open(meta_fn, "rb") as fr: meta = pickle.load(fr) elif self.split == "valid": meta_fn = os.path.join(self.vfeat_dir, "val" + "_meta.pkl") with open(meta_fn, "rb") as fr: meta = pickle.load(fr) elif self.split == "test": print("use how2 val as test.") meta_fn = os.path.join(self.vfeat_dir, "val" + "_meta.pkl") with open(meta_fn, "rb") as fr: meta = pickle.load(fr) else: raise ValueError("unsupported for MetaProcessor:", self.split) video_id_to_shard = {} for shard_id in meta: for video_idx, video_id in enumerate(meta[shard_id]): video_id_to_shard[video_id] = (shard_id, video_idx) self.video_id_to_shard = video_id_to_shard def __getitem__(self, idx): video_id, video_id = super().__getitem__(idx) shard_id, shard_idx = self.video_id_to_shard[video_id] meta = (video_id, idx, shard_id, shard_idx) return meta, meta class ShardedVideoProcessor(Processor): """ mmaped shards of numpy video features. """ def __init__(self, config): self.split = str(config.split) self.vfeat_dir = config.vfeat_dir def __call__(self, video_id): _, _, shard_id, video_idx = video_id if self.split == "train": shard = ShardedTensor.load( os.path.join(self.vfeat_dir, "train" + "_" + str(shard_id)), "r" ) elif self.split == "valid": shard = ShardedTensor.load( os.path.join(self.vfeat_dir, "val" + "_" + str(shard_id)), "r" ) elif self.split == "test": shard = ShardedTensor.load( os.path.join(self.vfeat_dir, "val" + "_" + str(shard_id)), "r" ) else: raise ValueError("unknown split", self.split) feat = shard[video_idx] return feat class ShardedTextProcessor(Processor): def __init__(self, config): self.tfeat_dir = str(config.tfeat_dir) self.split = str(config.split) def __call__(self, video_id): _, _, shard_id, shard_idx = video_id if self.split == "train": target_path = self.tfeat_dir + "train" + "_" + str(shard_id) elif self.split == "valid": target_path = self.tfeat_dir + "val" + "_" + str(shard_id) elif self.split == "test": target_path = self.tfeat_dir + "val" + "_" + str(shard_id) else: raise ValueError("unknown split", self.split) startend = ShardedTensor.load( target_path + ".startends", "r")[shard_idx] cap_ids = ShardedTensor.load( target_path + ".caps_ids", "r")[shard_idx] cap = [] for clip_idx in range(len(cap_ids)): clip = cap_ids[clip_idx] cap.append(clip[clip != -1].tolist()) start, end = startend[:, 0].tolist(), startend[:, 1].tolist() return {"start": start, "end": end, "cap": cap} class FixedLenAligner(Aligner): """ In the model we assume text is on the left (closer to BERT formulation) and video is on the right. We fix the total length of text + video. max_video_len is in number of secs. max_text_len is in number of tokens. special tokens formats: we use the format [CLS] [SEP] text tokens [SEP] [PAD] ... [CLS] will be splitted out into: [CLS] video tokens [SEP] text tokens [SEP] [PAD] ... token_type_ids will be generated by the model (for now). 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | so each sequence owns a [SEP] token for no-ops. """ def __init__(self, config): super().__init__(config) self.text_clip_sampler = TextClipSamplingProcessor( self.max_len - self.max_video_len - 3 ) """ decide subsampling: `config.subsampling` will change batch_size in trainer. `config.clip_per_video` (used by RetriTask) doesn't change batch_size in trainer. """ subsampling = config.subsampling \ if config.subsampling is not None else None if config.clip_per_video is not None: subsampling = config.clip_per_video self.subsampling = subsampling def _get_text_maxlen(self): # use max text len return self.text_clip_sampler.max_text_len def __call__(self, video_id, video_feature, text_feature): from transformers import default_data_collator video_idx = video_id[1] if self.subsampling is not None and self.subsampling >= 1: batch = [] for _ in range(self.subsampling): centerclip_idx = random.randint( 0, len(text_feature["start"]) - 1) batch.append( self.sampling( video_idx, video_feature, text_feature, centerclip_idx, self._get_text_maxlen() )) batch = self.batch_post_processing(batch, video_feature) batch = default_data_collator(batch) else: raise ValueError( "dataset.subsampling must be >= 1 for efficient video loading.") batch = self.sampling(video_idx, video_feature, text_feature) batch = self.batch_post_processing(batch, video_feature) batch["video_id"] = video_id if isinstance(video_id, str) \ else video_id[0] # e2e: make sure frame ids is into tensor. assert torch.is_tensor(batch["vfeats"]) return batch def sampling( self, video_idx, video_feature, text_feature, centerclip_idx=None, sampled_max_text_len=None, ): text_clip_indexs = self.text_clip_sampler( text_feature, centerclip_idx, sampled_max_text_len ) if isinstance(video_feature, np.ndarray): video_len = len(video_feature) else: video_len = math.ceil(text_feature["end"][-1]) video_end = min( math.ceil(text_feature["end"][text_clip_indexs[-1]]), video_len ) video_start = max( min( math.floor(text_feature["start"][text_clip_indexs[0]]), video_end), 0 ) video_clips = {"start": [video_start], "end": [video_end]} # tensorize. vfeats, vmasks = self._build_video_seq( video_feature, video_clips ) caps, cmasks = self._build_text_seq( text_feature, text_clip_indexs ) text_start = text_clip_indexs[0] text_end = text_clip_indexs[-1] + 1 return { "caps": caps, "cmasks": cmasks, "vfeats": vfeats, "vmasks": vmasks, "video_start": video_start, "video_end": video_end, "text_start": text_start, "text_end": text_end, } class VariedLenAligner(FixedLenAligner): def __init__(self, config): super().__init__(config) self.sampled_min_len = config.sampled_min_len self.sampled_max_len = config.sampled_max_len def _get_text_maxlen(self): return random.randint(self.sampled_min_len, self.sampled_max_len) class StartClipAligner(VariedLenAligner): def sampling( self, video_idx, video_feature, text_feature, centerclip_idx=None, sampled_max_text_len=None, ): return super().sampling( video_idx, video_feature, text_feature, 0) class OverlappedAligner(VariedLenAligner): """video clip and text clip has overlappings but may not be the same start/end.""" def __init__(self, config): super().__init__(config) self.sampled_video_min_len = config.sampled_video_min_len self.sampled_video_max_len = config.sampled_video_max_len self.video_clip_sampler = VideoClipSamplingProcessor() def _get_video_maxlen(self): return random.randint( self.sampled_video_min_len, self.sampled_video_max_len) def sampling( self, video_idx, video_feature, text_feature, centerclip_idx=None, sampled_max_text_len=None, ): text_clip_indexs = self.text_clip_sampler( text_feature, centerclip_idx, sampled_max_text_len ) if isinstance(video_feature, np.ndarray): video_len = len(video_feature) else: video_len = math.ceil(text_feature["end"][-1]) low = math.floor(text_feature["start"][text_clip_indexs[0]]) high = math.ceil(text_feature["end"][text_clip_indexs[-1]]) if low < high: center = random.randint(low, high) else: center = int((low + high) // 2) center = max(0, min(video_feature.shape[0] - 1, center)) assert 0 <= center < video_feature.shape[0] video_clips = self.video_clip_sampler( video_len, self._get_video_maxlen(), center ) video_start = video_clips["start"][0] video_end = video_clips["end"][0] # tensorize. vfeats, vmasks = self._build_video_seq( video_feature, video_clips ) caps, cmasks = self._build_text_seq( text_feature, text_clip_indexs ) text_start = text_clip_indexs[0] text_end = text_clip_indexs[-1] + 1 return { "caps": caps, "cmasks": cmasks, "vfeats": vfeats, "vmasks": vmasks, "video_start": video_start, "video_end": video_end, "text_start": text_start, "text_end": text_end, } class MFMMLMAligner(FixedLenAligner): """ `FixedLenAligner` with Masked Language Model and Masked Frame Model. """ def __init__(self, config): super().__init__(config) keep_prob = config.keep_prob if config.keep_prob is not None else 1.0 self.text_clip_sampler = TextClipSamplingProcessor( self.max_len - self.max_video_len - 3, keep_prob ) self.sampled_min_len = config.sampled_min_len self.sampled_max_len = config.sampled_max_len self.masked_token_sampler = TextMaskingProcessor(config) self.mm_type = config.mm_type \ if config.mm_type is not None else "full" self.attnmasker = MMAttentionMask2DProcessor() \ if self.mm_type == "textgen" else None self.masked_frame_sampler = FrameMaskingProcessor(config) self.lazy_vfeat_mask = ( False if config.lazy_vfeat_mask is None else config.lazy_vfeat_mask ) self.mm_prob = config.mm_prob if config.mm_prob is not None else 0. def __call__(self, video_id, video_feature, text_feature): from transformers import default_data_collator if self.subsampling is not None and self.subsampling > 1: batch = [] for _ in range(self.subsampling): centerclip_idx = random.randint( 0, len(text_feature["start"]) - 1) sampled_max_text_len = random.randint( self.sampled_min_len, self.sampled_max_len ) batch.append( self.sampling( video_id, video_feature, text_feature, centerclip_idx, sampled_max_text_len, ) ) batch = self.batch_post_processing(batch, video_feature) batch = default_data_collator(batch) else: batch = self.sampling(video_id, video_feature, text_feature) batch = self.batch_post_processing(batch, video_feature) batch["video_id"] = video_id if isinstance(video_id, str) \ else video_id[0] return batch def sampling( self, video_id, video_feature, text_feature, centerclip_idx=None, sampled_max_text_len=None, ): output = FixedLenAligner.sampling(self, video_id, video_feature, text_feature, centerclip_idx, sampled_max_text_len) masking_text, masking_video = None, None if random.random() < self.mm_prob: if random.random() > 0.5: masking_text, masking_video = self.mm_type, "no" else: masking_text, masking_video = "no", "full" video_feats = output["vfeats"] if not self.lazy_vfeat_mask else None video_label = self.masked_frame_sampler( output["vmasks"], masking_video, vfeats=video_feats) caps, text_label = self.masked_token_sampler( output["caps"], masking_text) output.update({ "caps": caps, "video_label": video_label, "text_label": text_label, }) if self.attnmasker is not None: attention_mask = self.attnmasker( output["vmasks"], output["cmasks"], masking_text) output.update({ "attention_mask": attention_mask }) return output class FrameMaskingProcessor(Processor): def __init__(self, config): self.mfm_probability = 0.15 if config.mfm_probability is not None: self.mfm_probability = config.mfm_probability def __call__(self, vmasks, modality_masking=None, vfeats=None): """ We perform lazy masking to save data transfer time. It only generates video_labels by default and MFM model will do actualy masking. Return: `video_label` is a binary mask. """ video_label = vmasks.clone() if modality_masking is not None: if modality_masking == "full": probability_matrix = torch.full(video_label.shape, 1.) elif modality_masking == "no": probability_matrix = torch.full(video_label.shape, 0.) elif modality_masking == "inverse": probability_matrix = torch.full( video_label.shape, 1. - self.mfm_probability) else: raise ValueError("unknown modality masking.", modality_masking) else: probability_matrix = torch.full( video_label.shape, self.mfm_probability) masked_indices = torch.bernoulli(probability_matrix).bool() # We only compute loss on masked tokens video_label[~masked_indices] = 0 if vfeats is not None: vfeats[video_label, :] = 0.0 return video_label class TextGenerationProcessor(Processor): def __init__(self, tokenizer): self.bos_token_id = tokenizer.bos_token_id self.pad_token_id = tokenizer.pad_token_id def __call__(self, inputs): labels = inputs.clone() # [CLS] [SEP] for video labels[:2] = -100 # keep [SEP] for text. pad_mask = labels == self.pad_token_id labels[pad_mask] = -100 inputs[2:] = torch.cat([ torch.LongTensor([self.bos_token_id]), inputs[2:-1]]) inputs[pad_mask] = self.pad_token_id assert len(inputs) == len(labels) return inputs, labels class TextMaskingProcessor(Processor): def __init__(self, config): """this function is borrowed from `transformers/data/data_collator.DataCollatorForLanguageModeling`""" self.mlm_probability = 0.15 if config.mlm_probability is not None: self.mlm_probability = config.mlm_probability self.bert_name = config.bert_name # [CLS] is used as bos_token and [SEP] is used as eos_token. # https://huggingface.co/transformers/master/model_doc/bertgeneration.html from transformers import AutoTokenizer self.tokenizer = AutoTokenizer.from_pretrained( self.bert_name, bos_token="[CLS]", eos_token="[SEP]") self.textgen = TextGenerationProcessor(self.tokenizer) def __call__( self, inputs: torch.Tensor, modality_masking=None, special_tokens_mask: Optional[torch.Tensor] = None ) -> Tuple[torch.Tensor, torch.Tensor]: """ expand modality_masking into None: traditional bert masking. "no": no masking. "full": all [MASK] token for generation. "gen": autoregressive generation. """ """ Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. """ labels = inputs.clone() # We sample a few tokens in each sequence for MLM training # (with probability `self.mlm_probability`) if modality_masking is not None: if modality_masking == "full": probability_matrix = torch.full(labels.shape, 1.) elif modality_masking == "no": probability_matrix = torch.full(labels.shape, 0.) elif modality_masking.startswith("textgen"): # [CLS] [SEP] <s> ... inputs, labels = self.textgen(inputs) if "mask" not in modality_masking: return inputs, labels inputs = self.mask_input(inputs, special_tokens_mask) return inputs, labels elif modality_masking == "mask": inputs = self.mask_input(inputs, special_tokens_mask) labels = torch.full(inputs.shape, -100) return inputs, labels elif modality_masking == "inverse": probability_matrix = torch.full(labels.shape, 1. - self.mlm_probability) else: raise ValueError("unknown modality masking.", modality_masking) else: probability_matrix = torch.full(labels.shape, self.mlm_probability) if special_tokens_mask is None: special_tokens_mask = self.get_special_tokens_mask( labels.tolist(), already_has_special_tokens=True ) special_tokens_mask = torch.tensor( special_tokens_mask, dtype=torch.bool) else: special_tokens_mask = special_tokens_mask.bool() probability_matrix.masked_fill_(special_tokens_mask, value=0.0) masked_indices = torch.bernoulli(probability_matrix).bool() labels[~masked_indices] = -100 # We only compute loss on masked tokens # 80% of the time, # we replace masked input tokens with tokenizer.mask_token ([MASK]) indices_replaced = ( torch.bernoulli( torch.full(labels.shape, 0.8)).bool() & masked_indices ) inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids( self.tokenizer.mask_token ) # 10% of the time, we replace masked input tokens with random word indices_random = ( torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced ) random_words = torch.randint( len(self.tokenizer), labels.shape, dtype=torch.long ) inputs[indices_random] = random_words[indices_random] # The rest of the time (10% of the time) we keep the masked input # tokens unchanged return inputs, labels def mask_input(self, inputs, special_tokens_mask=None): # the following is new with masked autoregressive. probability_matrix = torch.full( inputs.shape, self.mlm_probability) if special_tokens_mask is None: special_tokens_mask = self.get_special_tokens_mask( inputs.tolist(), already_has_special_tokens=True ) special_tokens_mask = torch.tensor( special_tokens_mask, dtype=torch.bool) else: special_tokens_mask = special_tokens_mask.bool() probability_matrix.masked_fill_(special_tokens_mask, value=0.0) masked_indices = torch.bernoulli(probability_matrix).bool() indices_replaced = ( torch.bernoulli( torch.full(inputs.shape, 0.8)).bool() & masked_indices ) inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids( self.tokenizer.mask_token ) # 10% of the time, we replace masked input tokens with random word indices_random = ( torch.bernoulli(torch.full(inputs.shape, 0.5)).bool() & masked_indices & ~indices_replaced ) random_words = torch.randint( len(self.tokenizer), inputs.shape, dtype=torch.long ) inputs[indices_random] = random_words[indices_random] return inputs def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Note: the version from transformers do not consider pad as special tokens. """ if already_has_special_tokens: if token_ids_1 is not None: raise ValueError( "You should not supply a second sequence if" "the provided sequence of " "ids is already formated with special tokens " "for the model." ) return list(map(lambda x: 1 if x in [ self.tokenizer.sep_token_id, self.tokenizer.cls_token_id, self.tokenizer.pad_token_id] else 0, token_ids_0)) if token_ids_1 is not None: return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] return [1] + ([0] * len(token_ids_0)) + [1] class TextClipSamplingProcessor(Processor): def __init__(self, max_text_len, keep_prob=1.0): self.max_text_len = max_text_len self.max_video_len = 256 # always hold. self.keep_prob = keep_prob def __call__( self, text_feature, centerclip_idx=None, sampled_max_text_len=None, sampled_max_video_len=None, ): # Let's use all caps for now and see if 256 can cover all of them. if sampled_max_text_len is not None: max_text_len = sampled_max_text_len else: max_text_len = self.max_text_len if sampled_max_video_len is not None: max_video_len = sampled_max_video_len else: max_video_len = self.max_video_len t_num_clips = len(text_feature["start"]) if centerclip_idx is None: centerclip_idx = random.randint(0, t_num_clips - 1) start_idx, end_idx = centerclip_idx, centerclip_idx + 1 text_clip_indexs = deque() text_clip_indexs.append(start_idx) text_len = len(text_feature["cap"][start_idx]) video_len = max( 0, text_feature["end"][start_idx] - text_feature["start"][start_idx], ) while ( (start_idx > 0 or end_idx < t_num_clips) and text_len < max_text_len and video_len < max_video_len ): if random.random() > 0.5 and end_idx < t_num_clips: # skip the next one? if random.random() > self.keep_prob and (end_idx + 1) < t_num_clips: end_idx = end_idx + 1 text_clip_indexs.append(end_idx) text_len += len(text_feature["cap"][end_idx]) end_idx += 1 elif start_idx > 0: if random.random() > self.keep_prob and (start_idx - 1) > 0: start_idx = start_idx - 1 start_idx -= 1 text_clip_indexs.insert(0, start_idx) text_len += len(text_feature["cap"][start_idx]) else: if end_idx < t_num_clips: if random.random() > self.keep_prob and (end_idx + 1) < t_num_clips: end_idx = end_idx + 1 text_clip_indexs.append(end_idx) text_len += len(text_feature["cap"][end_idx]) end_idx += 1 else: return text_clip_indexs video_len = max( 0, text_feature["end"][text_clip_indexs[-1]] - text_feature["start"][text_clip_indexs[0]], ) return text_clip_indexs class VideoClipSamplingProcessor(Processor): def __call__(self, video_len, max_video_len, center): """ `video_len`: length of the video. `max_video_len`: maximum video tokens allowd in a sequence. `center`: initial starting index. """ assert center >= 0 and center < video_len t_clip_len = 0 start, end = center, center while (start > 0 or end < video_len) and t_clip_len < max_video_len: # decide the direction to grow. if start <= 0: end += 1 elif end >= video_len: start -= 1 elif random.random() > 0.5: end += 1 else: start -= 1 t_clip_len += 1 return {"start": [start], "end": [end]} class How2MILNCEAligner(FixedLenAligner): """reference: `antoine77340/MIL-NCE_HowTo100M/video_loader.py`""" def __init__(self, config): super().__init__(config) self.num_candidates = 4 self.min_time = 5.0 self.num_sec = 3.2 # self.num_sec = self.num_frames / float(self.fps) num_frames=16 / fps = 5 # self.num_frames = 16 def sampling( self, video_id, video_feature, text_feature, centerclip_idx=None, # will be ignored. sampled_max_text_len=None # will be ignored. ): text, start, end = self._get_text(text_feature) video = self._get_video(video_feature, start, end) vfeats = torch.zeros((self.max_video_len, video_feature.shape[1])) vmasks = torch.zeros((self.max_video_len,), dtype=torch.bool) vfeats[: video.shape[0]] = torch.from_numpy(np.array(video)) vmasks[: video.shape[0]] = 1 caps, cmasks = [], [] for words in text: cap, cmask = self._build_text_seq(text_feature, words) caps.append(cap) cmasks.append(cmask) caps = torch.stack(caps) cmasks = torch.stack(cmasks) # video of shape: (video_len) # text of shape (num_candidates, max_text_len) return { "caps": caps, "cmasks": cmasks, "vfeats": vfeats, "vmasks": vmasks, # "video_id": video_id, } def _get_video(self, video_feature, start, end): start_seek = random.randint(start, int(max(start, end - self.num_sec))) # duration = self.num_sec + 0.1 return video_feature[start_seek : int(start_seek + self.num_sec)] def _get_text(self, cap): ind = random.randint(0, len(cap["start"]) - 1) if self.num_candidates == 1: words = [ind] else: words = [] cap_start = self._find_nearest_candidates(cap, ind) for i in range(self.num_candidates): words.append([max(0, min(len(cap["cap"]) - 1, cap_start + i))]) start, end = cap["start"][ind], cap["end"][ind] # TODO: May need to be improved for edge cases. # expand the min time. if end - start < self.min_time: diff = self.min_time - end + start start = max(0, start - diff / 2) end = start + self.min_time return words, int(start), int(end) def _find_nearest_candidates(self, caption, ind): """find the range of the clips.""" start, end = ind, ind #diff = caption["end"][end] - caption["start"][start] n_candidate = 1 while n_candidate < self.num_candidates: # the first clip if start == 0: return 0 # we add () in the following condition to fix the bug. elif end == (len(caption["start"]) - 1): return start - (self.num_candidates - n_candidate) elif (caption["end"][end] - caption["start"][start - 1]) < ( caption["end"][end + 1] - caption["start"][start] ): start -= 1 else: end += 1 n_candidate += 1 return start class PKLJSONStrTextProcessor(TextProcessor): """`caption.json` from howto100m are preprocessed as a dict `[video_id, json_str]`. Json parsing tokenization are conducted on-the-fly and cached into dict. """ def __init__(self, config, max_clip_text_len=96): print("[Warning] PKLJSONStrTextProcessor is slow for num_workers > 0.") self.caption_pkl_path = str(config.caption_pkl_path) with open(self.caption_pkl_path, "rb") as fd: self.data = pickle.load(fd) self.max_clip_text_len = max_clip_text_len from transformers import AutoTokenizer self.tokenizer = AutoTokenizer.from_pretrained( str(config.bert_name), use_fast=config.use_fast ) def __call__(self, video_id): caption = self.data[video_id] if isinstance(caption, str): import json caption = json.loads(caption) cap = [] for clip_idx, text_clip in enumerate(caption["text"]): clip_ids = [] if isinstance(text_clip, str): clip_ids = self.tokenizer( text_clip[: self.max_clip_text_len], add_special_tokens=False )["input_ids"] cap.append(clip_ids) caption["cap"] = cap caption.pop("text") # save space. self.data[video_id] = caption return caption
KosmosX-API-main
kosmosX/fairseq/examples/MMPT/mmpt/processors/how2processor.py
# This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """Contains a PyTorch definition for Gated Separable 3D network (S3D-G) with a text module for computing joint text-video embedding from raw text and video input. The following code will enable you to load the HowTo100M pretrained S3D Text-Video model from: A. Miech, J.-B. Alayrac, L. Smaira, I. Laptev, J. Sivic and A. Zisserman, End-to-End Learning of Visual Representations from Uncurated Instructional Videos. https://arxiv.org/abs/1912.06430. S3D-G was proposed by: S. Xie, C. Sun, J. Huang, Z. Tu and K. Murphy, Rethinking Spatiotemporal Feature Learning For Video Understanding. https://arxiv.org/abs/1712.04851. Tensorflow code: https://github.com/tensorflow/models/blob/master/research/slim/nets/s3dg.py The S3D architecture was slightly modified with a space to depth trick for TPU optimization. """ import torch as th import torch.nn.functional as F import torch.nn as nn import numpy as np import re class InceptionBlock(nn.Module): def __init__( self, input_dim, num_outputs_0_0a, num_outputs_1_0a, num_outputs_1_0b, num_outputs_2_0a, num_outputs_2_0b, num_outputs_3_0b, gating=True, ): super(InceptionBlock, self).__init__() self.conv_b0 = STConv3D(input_dim, num_outputs_0_0a, [1, 1, 1]) self.conv_b1_a = STConv3D(input_dim, num_outputs_1_0a, [1, 1, 1]) self.conv_b1_b = STConv3D( num_outputs_1_0a, num_outputs_1_0b, [3, 3, 3], padding=1, separable=True ) self.conv_b2_a = STConv3D(input_dim, num_outputs_2_0a, [1, 1, 1]) self.conv_b2_b = STConv3D( num_outputs_2_0a, num_outputs_2_0b, [3, 3, 3], padding=1, separable=True ) self.maxpool_b3 = th.nn.MaxPool3d((3, 3, 3), stride=1, padding=1) self.conv_b3_b = STConv3D(input_dim, num_outputs_3_0b, [1, 1, 1]) self.gating = gating self.output_dim = ( num_outputs_0_0a + num_outputs_1_0b + num_outputs_2_0b + num_outputs_3_0b ) if gating: self.gating_b0 = SelfGating(num_outputs_0_0a) self.gating_b1 = SelfGating(num_outputs_1_0b) self.gating_b2 = SelfGating(num_outputs_2_0b) self.gating_b3 = SelfGating(num_outputs_3_0b) def forward(self, input): """Inception block """ b0 = self.conv_b0(input) b1 = self.conv_b1_a(input) b1 = self.conv_b1_b(b1) b2 = self.conv_b2_a(input) b2 = self.conv_b2_b(b2) b3 = self.maxpool_b3(input) b3 = self.conv_b3_b(b3) if self.gating: b0 = self.gating_b0(b0) b1 = self.gating_b1(b1) b2 = self.gating_b2(b2) b3 = self.gating_b3(b3) return th.cat((b0, b1, b2, b3), dim=1) class SelfGating(nn.Module): def __init__(self, input_dim): super(SelfGating, self).__init__() self.fc = nn.Linear(input_dim, input_dim) def forward(self, input_tensor): """Feature gating as used in S3D-G. """ spatiotemporal_average = th.mean(input_tensor, dim=[2, 3, 4]) weights = self.fc(spatiotemporal_average) weights = th.sigmoid(weights) return weights[:, :, None, None, None] * input_tensor class STConv3D(nn.Module): def __init__( self, input_dim, output_dim, kernel_size, stride=1, padding=0, separable=False ): super(STConv3D, self).__init__() self.separable = separable self.relu = nn.ReLU(inplace=True) assert len(kernel_size) == 3 if separable and kernel_size[0] != 1: spatial_kernel_size = [1, kernel_size[1], kernel_size[2]] temporal_kernel_size = [kernel_size[0], 1, 1] if isinstance(stride, list) and len(stride) == 3: spatial_stride = [1, stride[1], stride[2]] temporal_stride = [stride[0], 1, 1] else: spatial_stride = [1, stride, stride] temporal_stride = [stride, 1, 1] if isinstance(padding, list) and len(padding) == 3: spatial_padding = [0, padding[1], padding[2]] temporal_padding = [padding[0], 0, 0] else: spatial_padding = [0, padding, padding] temporal_padding = [padding, 0, 0] if separable: self.conv1 = nn.Conv3d( input_dim, output_dim, kernel_size=spatial_kernel_size, stride=spatial_stride, padding=spatial_padding, bias=False, ) self.bn1 = nn.BatchNorm3d(output_dim) self.conv2 = nn.Conv3d( output_dim, output_dim, kernel_size=temporal_kernel_size, stride=temporal_stride, padding=temporal_padding, bias=False, ) self.bn2 = nn.BatchNorm3d(output_dim) else: self.conv1 = nn.Conv3d( input_dim, output_dim, kernel_size=kernel_size, stride=stride, padding=padding, bias=False, ) self.bn1 = nn.BatchNorm3d(output_dim) def forward(self, input): out = self.relu(self.bn1(self.conv1(input))) if self.separable: out = self.relu(self.bn2(self.conv2(out))) return out class MaxPool3dTFPadding(th.nn.Module): def __init__(self, kernel_size, stride=None, padding="SAME"): super(MaxPool3dTFPadding, self).__init__() if padding == "SAME": padding_shape = self._get_padding_shape(kernel_size, stride) self.padding_shape = padding_shape self.pad = th.nn.ConstantPad3d(padding_shape, 0) self.pool = th.nn.MaxPool3d(kernel_size, stride, ceil_mode=True) def _get_padding_shape(self, filter_shape, stride): def _pad_top_bottom(filter_dim, stride_val): pad_along = max(filter_dim - stride_val, 0) pad_top = pad_along // 2 pad_bottom = pad_along - pad_top return pad_top, pad_bottom padding_shape = [] for filter_dim, stride_val in zip(filter_shape, stride): pad_top, pad_bottom = _pad_top_bottom(filter_dim, stride_val) padding_shape.append(pad_top) padding_shape.append(pad_bottom) depth_top = padding_shape.pop(0) depth_bottom = padding_shape.pop(0) padding_shape.append(depth_top) padding_shape.append(depth_bottom) return tuple(padding_shape) def forward(self, inp): inp = self.pad(inp) out = self.pool(inp) return out class Sentence_Embedding(nn.Module): def __init__( self, embd_dim, num_embeddings=66250, word_embedding_dim=300, token_to_word_path="dict.npy", max_words=16, output_dim=2048, ): super(Sentence_Embedding, self).__init__() self.word_embd = nn.Embedding(num_embeddings, word_embedding_dim) self.fc1 = nn.Linear(word_embedding_dim, output_dim) self.fc2 = nn.Linear(output_dim, embd_dim) self.word_to_token = {} self.max_words = max_words token_to_word = np.load(token_to_word_path) for i, t in enumerate(token_to_word): self.word_to_token[t] = i + 1 def _zero_pad_tensor_token(self, tensor, size): if len(tensor) >= size: return tensor[:size] else: zero = th.zeros(size - len(tensor)).long() return th.cat((tensor, zero), dim=0) def _split_text(self, sentence): w = re.findall(r"[\w']+", str(sentence)) return w def _words_to_token(self, words): words = [ self.word_to_token[word] for word in words if word in self.word_to_token ] if words: we = self._zero_pad_tensor_token(th.LongTensor(words), self.max_words) return we else: return th.zeros(self.max_words).long() def _words_to_ids(self, x): split_x = [self._words_to_token(self._split_text(sent.lower())) for sent in x] return th.stack(split_x, dim=0) def forward(self, x): x = self._words_to_ids(x) x = self.word_embd(x) x = F.relu(self.fc1(x)) x = th.max(x, dim=1)[0] x = self.fc2(x) return {'text_embedding': x} class S3D(nn.Module): def __init__(self, dict_path, num_classes=512, gating=True, space_to_depth=True): super(S3D, self).__init__() self.num_classes = num_classes self.gating = gating self.space_to_depth = space_to_depth if space_to_depth: self.conv1 = STConv3D( 24, 64, [2, 4, 4], stride=1, padding=(1, 2, 2), separable=False ) else: self.conv1 = STConv3D( 3, 64, [3, 7, 7], stride=2, padding=(1, 3, 3), separable=False ) self.conv_2b = STConv3D(64, 64, [1, 1, 1], separable=False) self.conv_2c = STConv3D(64, 192, [3, 3, 3], padding=1, separable=True) self.gating = SelfGating(192) self.maxpool_2a = MaxPool3dTFPadding( kernel_size=(1, 3, 3), stride=(1, 2, 2), padding="SAME" ) self.maxpool_3a = MaxPool3dTFPadding( kernel_size=(1, 3, 3), stride=(1, 2, 2), padding="SAME" ) self.mixed_3b = InceptionBlock(192, 64, 96, 128, 16, 32, 32) self.mixed_3c = InceptionBlock( self.mixed_3b.output_dim, 128, 128, 192, 32, 96, 64 ) self.maxpool_4a = MaxPool3dTFPadding( kernel_size=(3, 3, 3), stride=(2, 2, 2), padding="SAME" ) self.mixed_4b = InceptionBlock( self.mixed_3c.output_dim, 192, 96, 208, 16, 48, 64 ) self.mixed_4c = InceptionBlock( self.mixed_4b.output_dim, 160, 112, 224, 24, 64, 64 ) self.mixed_4d = InceptionBlock( self.mixed_4c.output_dim, 128, 128, 256, 24, 64, 64 ) self.mixed_4e = InceptionBlock( self.mixed_4d.output_dim, 112, 144, 288, 32, 64, 64 ) self.mixed_4f = InceptionBlock( self.mixed_4e.output_dim, 256, 160, 320, 32, 128, 128 ) self.maxpool_5a = self.maxPool3d_5a_2x2 = MaxPool3dTFPadding( kernel_size=(2, 2, 2), stride=(2, 2, 2), padding="SAME" ) self.mixed_5b = InceptionBlock( self.mixed_4f.output_dim, 256, 160, 320, 32, 128, 128 ) self.mixed_5c = InceptionBlock( self.mixed_5b.output_dim, 384, 192, 384, 48, 128, 128 ) self.fc = nn.Linear(self.mixed_5c.output_dim, num_classes) self.text_module = Sentence_Embedding(num_classes, token_to_word_path=dict_path) def _space_to_depth(self, input): """3D space to depth trick for TPU optimization. """ B, C, T, H, W = input.shape input = input.view(B, C, T // 2, 2, H // 2, 2, W // 2, 2) input = input.permute(0, 3, 5, 7, 1, 2, 4, 6) input = input.contiguous().view(B, 8 * C, T // 2, H // 2, W // 2) return input def forward(self, inputs): """Defines the S3DG base architecture.""" if self.space_to_depth: inputs = self._space_to_depth(inputs) net = self.conv1(inputs) if self.space_to_depth: # we need to replicate 'SAME' tensorflow padding net = net[:, :, 1:, 1:, 1:] net = self.maxpool_2a(net) net = self.conv_2b(net) net = self.conv_2c(net) if self.gating: net = self.gating(net) net = self.maxpool_3a(net) net = self.mixed_3b(net) net = self.mixed_3c(net) net = self.maxpool_4a(net) net = self.mixed_4b(net) net = self.mixed_4c(net) net = self.mixed_4d(net) net = self.mixed_4e(net) net = self.mixed_4f(net) net = self.maxpool_5a(net) net = self.mixed_5b(net) net = self.mixed_5c(net) net = th.mean(net, dim=[2, 3, 4]) return {'video_embedding': self.fc(net), 'mixed_5c': net}
KosmosX-API-main
kosmosX/fairseq/examples/MMPT/mmpt/processors/models/s3dg.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os import numpy as np import pickle import time try: import faiss except ImportError: pass from collections import defaultdict from ..utils import get_local_rank class VectorRetriever(object): """ How2 Video Retriver. Reference usage of FAISS: https://github.com/fairinternal/fairseq-py/blob/paraphrase_pretraining/fairseq/data/multilingual_faiss_dataset.py """ def __init__(self, hidden_size, cent, db_type, examples_per_cent_to_train): if db_type == "flatl2": quantizer = faiss.IndexFlatL2(hidden_size) # the other index self.db = faiss.IndexIVFFlat( quantizer, hidden_size, cent, faiss.METRIC_L2) elif db_type == "pq": self.db = faiss.index_factory( hidden_size, f"IVF{cent}_HNSW32,PQ32" ) else: raise ValueError("unknown type of db", db_type) self.train_thres = cent * examples_per_cent_to_train self.train_cache = [] self.train_len = 0 self.videoid_to_vectoridx = {} self.vectoridx_to_videoid = None self.make_direct_maps_done = False def make_direct_maps(self): faiss.downcast_index(self.db).make_direct_map() def __len__(self): return self.db.ntotal def save(self, out_dir): faiss.write_index( self.db, os.path.join(out_dir, "faiss_idx") ) with open( os.path.join( out_dir, "videoid_to_vectoridx.pkl"), "wb") as fw: pickle.dump( self.videoid_to_vectoridx, fw, protocol=pickle.HIGHEST_PROTOCOL ) def load(self, out_dir): fn = os.path.join(out_dir, "faiss_idx") self.db = faiss.read_index(fn) with open( os.path.join(out_dir, "videoid_to_vectoridx.pkl"), "rb") as fr: self.videoid_to_vectoridx = pickle.load(fr) def add(self, hidden_states, video_ids, last=False): assert len(hidden_states) == len(video_ids), "{}, {}".format( str(len(hidden_states)), str(len(video_ids))) assert len(hidden_states.shape) == 2 assert hidden_states.dtype == np.float32 valid_idx = [] for idx, video_id in enumerate(video_ids): if video_id not in self.videoid_to_vectoridx: valid_idx.append(idx) self.videoid_to_vectoridx[video_id] = \ len(self.videoid_to_vectoridx) hidden_states = hidden_states[valid_idx] if not self.db.is_trained: self.train_cache.append(hidden_states) self.train_len += hidden_states.shape[0] if self.train_len < self.train_thres: return self.finalize_training() else: self.db.add(hidden_states) def finalize_training(self): hidden_states = np.concatenate(self.train_cache, axis=0) del self.train_cache local_rank = get_local_rank() if local_rank == 0: start = time.time() print("training db on", self.train_thres, "/", self.train_len) self.db.train(hidden_states[:self.train_thres]) if local_rank == 0: print("training db for", time.time() - start) self.db.add(hidden_states) def search( self, query_hidden_states, orig_dist, ): if len(self.videoid_to_vectoridx) != self.db.ntotal: raise ValueError( "cannot search: size mismatch in-between index and db", len(self.videoid_to_vectoridx), self.db.ntotal ) if self.vectoridx_to_videoid is None: self.vectoridx_to_videoid = { self.videoid_to_vectoridx[videoid]: videoid for videoid in self.videoid_to_vectoridx } assert len(self.vectoridx_to_videoid) \ == len(self.videoid_to_vectoridx) # MultilingualFaissDataset uses the following; not sure the purpose. # faiss.ParameterSpace().set_index_parameter(self.db, "nprobe", 10) queried_dist, index = self.db.search(query_hidden_states, 1) queried_dist, index = queried_dist[:, 0], index[:, 0] outputs = np.array( [self.vectoridx_to_videoid[_index] if _index != -1 else (-1, -1, -1) for _index in index], dtype=np.int32) outputs[queried_dist <= orig_dist] = -1 return outputs def search_by_video_ids( self, video_ids, retri_factor ): if len(self.videoid_to_vectoridx) != self.db.ntotal: raise ValueError( len(self.videoid_to_vectoridx), self.db.ntotal ) if not self.make_direct_maps_done: self.make_direct_maps() if self.vectoridx_to_videoid is None: self.vectoridx_to_videoid = { self.videoid_to_vectoridx[videoid]: videoid for videoid in self.videoid_to_vectoridx } assert len(self.vectoridx_to_videoid) \ == len(self.videoid_to_vectoridx) query_hidden_states = [] vector_ids = [] for video_id in video_ids: vector_id = self.videoid_to_vectoridx[video_id] vector_ids.append(vector_id) query_hidden_state = self.db.reconstruct(vector_id) query_hidden_states.append(query_hidden_state) query_hidden_states = np.stack(query_hidden_states) # MultilingualFaissDataset uses the following; not sure the reason. # faiss.ParameterSpace().set_index_parameter(self.db, "nprobe", 10) _, index = self.db.search(query_hidden_states, retri_factor) outputs = [] for sample_idx, sample in enumerate(index): # the first video_id is always the video itself. cands = [video_ids[sample_idx]] for vector_idx in sample: if vector_idx >= 0 \ and vector_ids[sample_idx] != vector_idx: cands.append( self.vectoridx_to_videoid[vector_idx] ) outputs.append(cands) return outputs class VectorRetrieverDM(VectorRetriever): """ with direct map. How2 Video Retriver. Reference usage of FAISS: https://github.com/fairinternal/fairseq-py/blob/paraphrase_pretraining/fairseq/data/multilingual_faiss_dataset.py """ def __init__( self, hidden_size, cent, db_type, examples_per_cent_to_train ): super().__init__( hidden_size, cent, db_type, examples_per_cent_to_train) self.make_direct_maps_done = False def make_direct_maps(self): faiss.downcast_index(self.db).make_direct_map() self.make_direct_maps_done = True def search( self, query_hidden_states, orig_dist, ): if len(self.videoid_to_vectoridx) != self.db.ntotal: raise ValueError( len(self.videoid_to_vectoridx), self.db.ntotal ) if not self.make_direct_maps_done: self.make_direct_maps() if self.vectoridx_to_videoid is None: self.vectoridx_to_videoid = { self.videoid_to_vectoridx[videoid]: videoid for videoid in self.videoid_to_vectoridx } assert len(self.vectoridx_to_videoid) \ == len(self.videoid_to_vectoridx) # MultilingualFaissDataset uses the following; not sure the reason. # faiss.ParameterSpace().set_index_parameter(self.db, "nprobe", 10) queried_dist, index = self.db.search(query_hidden_states, 1) outputs = [] for sample_idx, sample in enumerate(index): # and queried_dist[sample_idx] < thres \ if sample >= 0 \ and queried_dist[sample_idx] < orig_dist[sample_idx]: outputs.append(self.vectoridx_to_videoid[sample]) else: outputs.append(None) return outputs def search_by_video_ids( self, video_ids, retri_factor=8 ): if len(self.videoid_to_vectoridx) != self.db.ntotal: raise ValueError( len(self.videoid_to_vectoridx), self.db.ntotal ) if not self.make_direct_maps_done: self.make_direct_maps() if self.vectoridx_to_videoid is None: self.vectoridx_to_videoid = { self.videoid_to_vectoridx[videoid]: videoid for videoid in self.videoid_to_vectoridx } assert len(self.vectoridx_to_videoid) \ == len(self.videoid_to_vectoridx) query_hidden_states = [] vector_ids = [] for video_id in video_ids: vector_id = self.videoid_to_vectoridx[video_id] vector_ids.append(vector_id) query_hidden_state = self.db.reconstruct(vector_id) query_hidden_states.append(query_hidden_state) query_hidden_states = np.stack(query_hidden_states) # MultilingualFaissDataset uses the following; not sure the reason. # faiss.ParameterSpace().set_index_parameter(self.db, "nprobe", 10) _, index = self.db.search(query_hidden_states, retri_factor) outputs = [] for sample_idx, sample in enumerate(index): # the first video_id is always the video itself. cands = [video_ids[sample_idx]] for vector_idx in sample: if vector_idx >= 0 \ and vector_ids[sample_idx] != vector_idx: cands.append( self.vectoridx_to_videoid[vector_idx] ) outputs.append(cands) return outputs class MMVectorRetriever(VectorRetrieverDM): """ multimodal vector retriver: text retrieve video or video retrieve text. """ def __init__(self, hidden_size, cent, db_type, examples_per_cent_to_train): super().__init__( hidden_size, cent, db_type, examples_per_cent_to_train) video_db = self.db super().__init__( hidden_size, cent, db_type, examples_per_cent_to_train) text_db = self.db self.db = {"video": video_db, "text": text_db} self.video_to_videoid = defaultdict(list) def __len__(self): assert self.db["video"].ntotal == self.db["text"].ntotal return self.db["video"].ntotal def make_direct_maps(self): faiss.downcast_index(self.db["video"]).make_direct_map() faiss.downcast_index(self.db["text"]).make_direct_map() def save(self, out_dir): faiss.write_index( self.db["video"], os.path.join(out_dir, "video_faiss_idx") ) faiss.write_index( self.db["text"], os.path.join(out_dir, "text_faiss_idx") ) with open( os.path.join( out_dir, "videoid_to_vectoridx.pkl"), "wb") as fw: pickle.dump( self.videoid_to_vectoridx, fw, protocol=pickle.HIGHEST_PROTOCOL ) def load(self, out_dir): fn = os.path.join(out_dir, "video_faiss_idx") video_db = faiss.read_index(fn) fn = os.path.join(out_dir, "text_faiss_idx") text_db = faiss.read_index(fn) self.db = {"video": video_db, "text": text_db} with open( os.path.join(out_dir, "videoid_to_vectoridx.pkl"), "rb") as fr: self.videoid_to_vectoridx = pickle.load(fr) self.video_to_videoid = defaultdict(list) def add(self, hidden_states, video_ids): """hidden_states is a pair `(video, text)`""" assert len(hidden_states) == len(video_ids), "{}, {}".format( str(len(hidden_states)), str(len(video_ids))) assert len(hidden_states.shape) == 3 assert len(self.video_to_videoid) == 0 valid_idx = [] for idx, video_id in enumerate(video_ids): if video_id not in self.videoid_to_vectoridx: valid_idx.append(idx) self.videoid_to_vectoridx[video_id] = \ len(self.videoid_to_vectoridx) batch_size = hidden_states.shape[0] hidden_states = hidden_states[valid_idx] hidden_states = np.transpose(hidden_states, (1, 0, 2)).copy() if not self.db["video"].is_trained: self.train_cache.append(hidden_states) train_len = batch_size * len(self.train_cache) if train_len < self.train_thres: return hidden_states = np.concatenate(self.train_cache, axis=1) del self.train_cache self.db["video"].train(hidden_states[0, :self.train_thres]) self.db["text"].train(hidden_states[1, :self.train_thres]) self.db["video"].add(hidden_states[0]) self.db["text"].add(hidden_states[1]) def get_clips_by_video_id(self, video_id): if not self.video_to_videoid: for video_id, video_clip, text_clip in self.videoid_to_vectoridx: self.video_to_videoid[video_id].append( (video_id, video_clip, text_clip)) return self.video_to_videoid[video_id] def search( self, video_ids, target_modality, retri_factor=8 ): if len(self.videoid_to_vectoridx) != len(self): raise ValueError( len(self.videoid_to_vectoridx), len(self) ) if not self.make_direct_maps_done: self.make_direct_maps() if self.vectoridx_to_videoid is None: self.vectoridx_to_videoid = { self.videoid_to_vectoridx[videoid]: videoid for videoid in self.videoid_to_vectoridx } assert len(self.vectoridx_to_videoid) \ == len(self.videoid_to_vectoridx) src_modality = "text" if target_modality == "video" else "video" query_hidden_states = [] vector_ids = [] for video_id in video_ids: vector_id = self.videoid_to_vectoridx[video_id] vector_ids.append(vector_id) query_hidden_state = self.db[src_modality].reconstruct(vector_id) query_hidden_states.append(query_hidden_state) query_hidden_states = np.stack(query_hidden_states) # MultilingualFaissDataset uses the following; not sure the reason. # faiss.ParameterSpace().set_index_parameter(self.db, "nprobe", 10) _, index = self.db[target_modality].search( query_hidden_states, retri_factor) outputs = [] for sample_idx, sample in enumerate(index): cands = [] for vector_idx in sample: if vector_idx >= 0: cands.append( self.vectoridx_to_videoid[vector_idx] ) outputs.append(cands) return outputs
KosmosX-API-main
kosmosX/fairseq/examples/MMPT/mmpt/modules/retri.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from .mm import * try: from .expmm import * except ImportError: pass
KosmosX-API-main
kosmosX/fairseq/examples/MMPT/mmpt/modules/__init__.py
# Copyright (c) Facebook, Inc. All Rights Reserved import torch import os import numpy as np import pickle from . import retri from ..utils import get_local_rank class VectorPool(object): """ Base class of retrieval space. """ def __init__(self, config): from transformers import AutoConfig self.hidden_size = AutoConfig.from_pretrained( config.dataset.bert_name).hidden_size self.retriever_cls = getattr(retri, config.retriever_cls) def __call__(self, sample, **kwargs): raise NotImplementedError def build_retriver( self, retriever_cls=None, hidden_size=None, centroids=512, db_type="flatl2", examples_per_cent_to_train=48 ): """merge results from multiple gpus and return a retriver..""" self.retriver = retriever_cls( hidden_size, centroids, db_type, examples_per_cent_to_train) return self.retriver def __repr__(self): if hasattr(self, "retriver"): retriver_name = str(len(self.retriver)) else: retriver_name = "no retriver field yet" return self.__class__.__name__ \ + "(" + retriver_name + ")" class VideoVectorPool(VectorPool): """ average clips of a video as video representation. """ def __init__(self, config): super().__init__(config) self.build_retriver(self.retriever_cls, self.hidden_size) def __call__(self, sample, subsampling, **kwargs): hidden_states = ( sample["pooled_video"] + sample["pooled_text"]) / 2. hidden_states = hidden_states.view( -1, subsampling, hidden_states.size(-1)) hidden_states = torch.mean(hidden_states, dim=1) hidden_states = hidden_states.cpu().detach().numpy() video_ids = [] for offset_idx, video_id in enumerate(sample["video_id"]): if isinstance(video_id, tuple) and len(video_id) == 3: # a sharded video_id. video_id = video_id[0] video_ids.append(video_id) assert len(video_ids) == len(hidden_states) self.retriver.add( hidden_states.astype("float32"), video_ids ) class DistributedVectorPool(VectorPool): """ support sync of multiple gpus/nodes. """ def __init__(self, config): super().__init__(config) self.out_dir = os.path.join( config.fairseq.checkpoint.save_dir, "retri") os.makedirs(self.out_dir, exist_ok=True) self.hidden_states = [] self.video_ids = [] def build_retriver( self, retriever_cls=None, hidden_size=None, centroids=4096, db_type="flatl2", examples_per_cent_to_train=48 ): if retriever_cls is None: retriever_cls = self.retriever_cls if hidden_size is None: hidden_size = self.hidden_size """merge results from multiple gpus and return a retriver..""" if torch.distributed.is_initialized(): self.save() # sync saving. torch.distributed.barrier() world_size = torch.distributed.get_world_size() else: world_size = 1 self.retriver = retriever_cls( hidden_size, centroids, db_type, examples_per_cent_to_train) # each gpu process has its own retriever. for local_rank in range(world_size): if get_local_rank() == 0: print("load local_rank", local_rank) hidden_states, video_ids = self.load(local_rank) hidden_states = hidden_states.astype("float32") self.retriver.add(hidden_states, video_ids) return self.retriver def load(self, local_rank): hidden_states = np.load( os.path.join( self.out_dir, "hidden_state" + str(local_rank) + ".npy" ) ) with open( os.path.join( self.out_dir, "video_id" + str(local_rank) + ".pkl"), "rb") as fr: video_ids = pickle.load(fr) return hidden_states, video_ids def save(self): hidden_states = np.vstack(self.hidden_states) assert len(hidden_states) == len(self.video_ids), "{}, {}".format( len(hidden_states), len(self.video_ids) ) local_rank = torch.distributed.get_rank() \ if torch.distributed.is_initialized() else 0 np.save( os.path.join( self.out_dir, "hidden_state" + str(local_rank) + ".npy"), hidden_states) with open( os.path.join( self.out_dir, "video_id" + str(local_rank) + ".pkl"), "wb") as fw: pickle.dump( self.video_ids, fw, protocol=pickle.HIGHEST_PROTOCOL ) class DistributedVideoVectorPool(DistributedVectorPool): """ average clips of a video as video representation. """ def __call__(self, sample, subsampling, **kwargs): hidden_states = ( sample["pooled_video"] + sample["pooled_text"]) / 2. hidden_states = hidden_states.view( -1, subsampling, hidden_states.size(-1)) hidden_states = torch.mean(hidden_states, dim=1) hidden_states = hidden_states.cpu().detach().numpy() video_ids = [] for offset_idx, video_id in enumerate(sample["video_id"]): if isinstance(video_id, tuple) and len(video_id) == 3: # a sharded video_id. video_id = video_id[0] video_ids.append(video_id) assert len(video_ids) == len(hidden_states) self.hidden_states.append(hidden_states) self.video_ids.extend(video_ids) # ------------ the following are deprecated -------------- class TextClipVectorPool(VectorPool): def __init__(self, config): from transformers import AutoConfig hidden_size = AutoConfig.from_pretrained( config.dataset.bert_name).hidden_size retriever_cls = getattr(retri, config.retriever_cls) self.build_retriver(retriever_cls, hidden_size) def __call__(self, sample, **kwargs): clip_meta = sample["clip_meta"].cpu() assert torch.all(torch.le(clip_meta[:, 4], clip_meta[:, 5])) text_meta = [tuple(item.tolist()) for item in clip_meta[:, 3:]] if hasattr(self, "retriver"): # build_retriver is called. self.retriver.add( sample["pooled_text"].cpu().numpy().astype("float32"), text_meta ) else: raise NotImplementedError class MMClipVectorPool(VectorPool): """ Multimodal Clip-level vector pool. """ def __init__(self, out_dir): """use hidden_states to store `(video, text)`.""" """use video_ids to store `(video_id, start, end)`.""" super().__init__(out_dir) def __call__(self, sample, **kwargs): pooled_video = sample["pooled_video"].cpu().unsqueeze(1).numpy() pooled_text = sample["pooled_text"].cpu().unsqueeze(1).numpy() self.hidden_states.append( np.concatenate([pooled_video, pooled_text], axis=1) ) video_starts = sample["video_start"].cpu() video_ends = sample["video_end"].cpu() assert torch.all(torch.le(video_starts, video_ends)) text_starts = sample["text_start"].cpu() text_ends = sample["text_end"].cpu() assert torch.all(torch.le(text_starts, text_ends)) subsample_size = sample["pooled_video"].size(0) // len(sample["video_id"]) video_ids = [video_id for video_id in sample["video_id"] for _ in range(subsample_size) ] for video_id, video_start, video_end, text_start, text_end in zip( video_ids, video_starts, video_ends, text_starts, text_ends): self.video_ids.append(( video_id, (int(video_start), int(video_end)), (int(text_start), int(text_end)) ))
KosmosX-API-main
kosmosX/fairseq/examples/MMPT/mmpt/modules/vectorpool.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Copyright (c) Facebook, Inc. All Rights Reserved import torch from torch import nn try: from transformers.modeling_bert import ( BertEmbeddings, ACT2FN, ) except ImportError: pass class VideoTokenMLP(nn.Module): def __init__(self, config): super().__init__() input_dim = config.input_dim if hasattr(config, "input_dim") else 512 self.linear1 = nn.Linear(input_dim, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size) self.activation = ACT2FN[config.hidden_act] self.linear2 = nn.Linear(config.hidden_size, config.hidden_size) def forward(self, hidden_states): hidden_states = self.linear1(hidden_states) hidden_states = self.activation(hidden_states) hidden_states = self.LayerNorm(hidden_states) hidden_states = self.linear2(hidden_states) return hidden_states class MMBertEmbeddings(BertEmbeddings): def __init__(self, config): super().__init__(config) self.max_video_len = config.max_video_len if hasattr(config, "use_seg_emb") and config.use_seg_emb: """the original VLM paper uses seg_embeddings for temporal space. although not used it changed the randomness of initialization. we keep it for reproducibility. """ self.seg_embeddings = nn.Embedding(256, config.hidden_size) def forward( self, input_ids, input_video_embeds, token_type_ids=None, position_ids=None, inputs_embeds=None, ): input_tensor = input_ids if input_ids is not None else inputs_embeds if input_video_embeds is not None: input_shape = ( input_tensor.size(0), input_tensor.size(1) + input_video_embeds.size(1), ) else: input_shape = (input_tensor.size(0), input_tensor.size(1)) if position_ids is None: """ Auto skip position embeddings for text only case. use cases: (1) action localization and segmentation: feed in len-1 dummy video token needs text part to skip input_video_embeds.size(1) for the right position_ids for video [SEP] and rest text tokens. (2) MMFusionShare for two forward passings: in `forward_text`: input_video_embeds is None. need to skip video [SEP] token. # video_len + 1: [CLS] + video_embed # self.max_video_len + 1: [SEP] for video. # self.max_video_len + 2: [SEP] for video. # self.max_video_len + input_ids.size(1): rest for text. """ if input_video_embeds is not None: video_len = input_video_embeds.size(1) starting_offset = self.max_video_len + 1 # video [SEP] ending_offset = self.max_video_len + input_ids.size(1) else: video_len = 0 starting_offset = self.max_video_len + 2 # first text token. ending_offset = self.max_video_len + input_ids.size(1) + 1 position_ids = torch.cat([ self.position_ids[:, :video_len + 1], self.position_ids[:, starting_offset:ending_offset] ], dim=1) if token_type_ids is None: token_type_ids = torch.zeros( input_shape, dtype=torch.long, device=self.position_ids.device ) """ the format of input_ids is [CLS] [SEP] caption [SEP] padding. the goal is to build [CLS] video tokens [SEP] caption [SEP] . """ if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) if input_video_embeds is not None: inputs_mm_embeds = torch.cat([ inputs_embeds[:, :1], input_video_embeds, inputs_embeds[:, 1:] ], dim=1) else: # text only for `MMFusionShare`. inputs_mm_embeds = inputs_embeds position_embeddings = self.position_embeddings(position_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_mm_embeds + position_embeddings embeddings += token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class AlignHead(nn.Module): """this will load pre-trained weights for NSP, which is desirable.""" def __init__(self, config): super().__init__() self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, dropout_pooled_output): logits = self.seq_relationship(dropout_pooled_output) return logits
KosmosX-API-main
kosmosX/fairseq/examples/MMPT/mmpt/modules/mm.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import os import pickle from mmpt.utils import ShardedTensor class Shard(object): def __init__( self, vfeat_dir, tfeat_dir, target_dir, file_paths, shard_size=4096 ): self.vfeat_dir = vfeat_dir self.tfeat_dir = tfeat_dir self.target_dir = target_dir self.video_ids = {} for split, file_path in zip(["train", "val"], file_paths): with open(file_path) as fr: self.video_ids[split] = [ line.strip() for line in fr.readlines()] self.shard_size = shard_size def __call__(self, split="train"): for split in ["train", "val"]: meta = {} for shard_idx, shard_offset in enumerate( range(0, len(self.video_ids[split]), self.shard_size) ): print(shard_idx) meta_shard = [] video_shard = [] for video_id in self.video_ids[split][shard_offset:shard_offset+self.shard_size]: meta_shard.append(video_id) npy_file = os.path.join(self.vfeat_dir, video_id + ".npy") video_shard.append(np.load(npy_file)) meta[shard_idx] = meta_shard video_shard = ShardedTensor.from_list(video_shard) target_path = os.path.join( self.target_dir, split + "_" + str(shard_idx)) video_shard.save(target_path) target_path = os.path.join(self.target_dir, split + "_meta") with open(target_path + ".pkl", "wb") as fw: pickle.dump(meta, fw, pickle.HIGHEST_PROTOCOL) if __name__ == "__main__": shard = Shard( "data/feat/feat_how2_s3d", "data/how2/raw_caption_dedup.bert-base-uncased", "data/feat/feat_how2_s3d_shard_small", ["data/how2/how2_s3d_train.lst", "data/how2/how2_s3d_val.lst"] ) shard()
KosmosX-API-main
kosmosX/fairseq/examples/MMPT/scripts/video_feature_extractor/shard_feature.py
# Copyright Howto100M authors. # Copyright (c) Facebook, Inc. All Rights Reserved import torch as th import pandas as pd import os import numpy as np import ffmpeg import random from torch.utils.data import Dataset class VideoLoader(Dataset): """modified from how2's video_feature_extractor.""" def __init__( self, csv=None, video_dict=None, framerate=1, size=112, centercrop=False, hflip=False, **kwargs ): if csv is None and video_dict is None: raise ValueError("csv and video_dict cannot be both None.") if csv is not None: self.csv = pd.read_csv(csv) if video_dict is not None: self.csv = pd.DataFrame.from_dict(video_dict) self.centercrop = centercrop self.size = size self.framerate = framerate self.hflip = hflip def __len__(self): return len(self.csv) def _get_video_dim(self, video_path): probe = ffmpeg.probe(video_path) video_stream = next((stream for stream in probe['streams'] if stream['codec_type'] == 'video'), None) width = int(video_stream['width']) height = int(video_stream['height']) return height, width def _get_video_info(self, video_path): probe = ffmpeg.probe(video_path) video_stream = next((stream for stream in probe['streams'] if stream['codec_type'] == 'video'), None) return video_stream def _get_output_dim(self, h, w): if isinstance(self.size, tuple) and len(self.size) == 2: return self.size elif h >= w: return int(h * self.size / w), self.size else: return self.size, int(w * self.size / h) def __getitem__(self, idx): video_path = self.csv['video_path'].values[idx] output_file = self.csv['feature_path'].values[idx] return self._decode(output_file, video_path) def _decode(self, output_file, video_path): if not(os.path.isfile(output_file)) and os.path.isfile(video_path): try: h, w = self._get_video_dim(video_path) except Exception: print('ffprobe failed at: {}'.format(video_path)) return {'video': th.zeros(1), 'input': video_path, 'output': output_file} try: os.makedirs(os.path.dirname(output_file), exist_ok=True) height, width = self._get_output_dim(h, w) cmd = ( ffmpeg .input(video_path) .filter('fps', fps=self.framerate) .filter('scale', width, height) ) if self.hflip: cmd = cmd.filter('hflip') if self.centercrop: x = int((width - self.size) / 2.0) y = int((height - self.size) / 2.0) cmd = cmd.crop(x, y, self.size, self.size) video = self._run(cmd, output_file) except Exception: video = th.zeros(1) else: video = th.zeros(1) return {'video': video, 'input': video_path, 'output': output_file} def _run(self, cmd, output_file): out, _ = ( cmd.output('pipe:', format='rawvideo', pix_fmt='rgb24') .run(capture_stdout=True, quiet=True) ) if self.centercrop and isinstance(self.size, int): height, width = self.size, self.size video = np.frombuffer(out, np.uint8).reshape([-1, height, width, 3]) video = th.from_numpy(video.astype('float32')) return video.permute(0, 3, 1, 2) class VideoVerifier(VideoLoader): def __getitem__(self, idx): video_path = self.csv['video_path'].values[idx] try: return self._get_video_info(video_path) except Exception: # print('ffprobe failed at: {}'.format(video_path)) return None class VideoCompressor(VideoLoader): def __init__( self, csv=None, video_dict=None, framerate=1, size=112, centercrop=False, hflip=False, crf=32, **kwargs ): super().__init__( csv, video_dict, framerate, size, centercrop, hflip ) self.crf = crf def _run(self, cmd, output_file): out, _ = ( cmd.output(filename=output_file, crf=self.crf) .run(quiet=True) ) video = None return video class VideoDownloader(VideoCompressor): """download""" def __getitem__(self, idx): video_path = self.csv['video_path'].values[idx] output_file = self.csv['feature_path'].values[idx] if not(os.path.isfile(output_file)): os.makedirs(os.path.dirname(output_file), exist_ok=True) cmd = "wget -O" + output_file + " " + video_path # import subprocess # subprocess.check_output( # cmd, # stderr=subprocess.STDOUT, shell=True) os.system(cmd) return {'video': None, 'input': video_path, 'output': output_file} class AvKeyframeVideoCompressor(VideoLoader): """extract keyframes from a video and save it as jpg. TODO: consider to merge with `CodecProcessor`. """ def __init__( self, csv=None, video_dict=None, framerate=1, size=112, centercrop=False, max_num_frames=5, **kwargs ): super().__init__(csv, video_dict, framerate, size, centercrop) self.max_num_frames = max_num_frames def _get_video_dim(self, video_fn): """decord cannot probe the size of a video, we use pyav instead.""" import av with av.open(video_fn) as container: height = container.streams.video[0].codec_context.height width = container.streams.video[0].codec_context.width return height, width def _get_output_dim(self, height, width): """ keep the shorter side be `self.size`, strech the other. """ if height >= width: return int(height * self.size / width), self.size else: return self.size, int(width * self.size / height) def __getitem__(self, idx): import av video_path = self.csv['video_path'].values[idx] output_file = self.csv['feature_path'].values[idx] if not(os.path.isdir(output_file)) and os.path.isfile(video_path): try: h, w = self._get_video_dim(video_path) except Exception: print('probe failed at: {}'.format(video_path)) return {'video': th.zeros(1), 'input': video_path, 'output': output_file} try: height, width = self._get_output_dim(h, w) # new for av. with av.open(video_path) as container: container.streams.video[0].thread_type = "AUTO" container.streams.video[0].codec_context.height = height container.streams.video[0].codec_context.width = width if self.framerate == 0: # keyframe. container.streams.video[0].codec_context.skip_frame = 'NONKEY' frames = [] for frame in container.decode(video=0): frames.append(frame) frames = random.sample(frames, self.max_num_frames) os.makedirs(output_file, exist_ok=True) for frame in frames: frame.to_image().save( os.path.join( output_file, "%04d.jpg" % frame.index)) except Exception: print('extract failed at: {}'.format(video_path)) return {'video': th.zeros(1), 'input': video_path, 'output': output_file} video = th.zeros(1) return {'video': video, 'input': video_path, 'output': output_file}
KosmosX-API-main
kosmosX/fairseq/examples/MMPT/scripts/video_feature_extractor/videoreader.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os import urllib.parse import json from tqdm import tqdm # TODO: extending to other datasets. supported_formats = {} class PathBuilder(object): @classmethod def build(cls, video_dirs, feature_dir, ext, shards=0, split=None): meta_fn = os.path.join(feature_dir, "meta_plan.json") os.makedirs(feature_dir, exist_ok=True) if os.path.isfile(meta_fn): with open(meta_fn) as fr: meta = json.load(fr) return meta print("searching videos...") video_id_to_path = {} for video_dir in video_dirs.split(","): # TODO: add supports of recursive listdir. if video_dir in supported_formats: supported_formats[video_dir].load(video_dir, video_id_to_path) else: for idx, fn in enumerate(tqdm(os.listdir(video_dir))): video_fn = os.path.join(video_dir, fn) if os.path.isfile(video_fn): video_id = os.path.splitext(fn)[0] video_id_to_path[video_id] = video_fn elif os.path.isdir(video_fn): # shards of folders. shard_dir = video_fn for idx, fn in enumerate(os.listdir(shard_dir)): video_fn = os.path.join(shard_dir, fn) if os.path.isfile(video_fn): video_id = os.path.splitext(fn)[0] video_id_to_path[video_id] = video_fn video_path, feature_path = [], [] valid_ext = set() for idx, video_id in enumerate(video_id_to_path): video_path.append(video_id_to_path[video_id]) if ext is None: # use original file ext for format compatibility. video_id_to_path[video_id] path = urllib.parse.urlparse(video_id_to_path[video_id]).path ext = os.path.splitext(path)[1] if ext not in valid_ext: valid_ext.add(ext) print("adding", ext) if shards: shard_id = str(idx % shards) feature_fn = os.path.join( feature_dir, shard_id, video_id + ext) else: feature_fn = os.path.join( feature_dir, video_id + ext) feature_path.append(feature_fn) print("targeting", len(feature_path), "videos") meta = { "video_path": video_path, "feature_path": feature_path} with open(meta_fn, "w") as fw: json.dump(meta, fw) if split is not None: splits = split.split("/") assert len(splits) == 2 cur, total = int(splits[0]), int(splits[1]) assert cur < total import math chunk = math.ceil(len(meta["video_path"]) / total) start = cur * chunk end = (cur + 1) * chunk meta = { "video_path": meta["video_path"][start:end], "feature_path": meta["feature_path"][start:end] } return meta
KosmosX-API-main
kosmosX/fairseq/examples/MMPT/scripts/video_feature_extractor/pathbuilder.py
# Copyright (c) Howto100M authors and Facebook, Inc. All Rights Reserved import torch as th from torch import nn class GlobalAvgPool(nn.Module): def __init__(self): super(GlobalAvgPool, self).__init__() def forward(self, x): return th.mean(x, dim=[-2, -1]) def get_model(args): assert args.type in ['2d', '3d', 'vmz', 's3d', 'vae'] if args.type == '2d': print('Loading 2D-ResNet-152 ...') import torchvision.models as models model = models.resnet152(pretrained=True) model = nn.Sequential(*list(model.children())[:-2], GlobalAvgPool()) model = model.cuda() elif args.type == 'vmz': print('Loading VMZ ...') from vmz34 import r2plus1d_34 model = r2plus1d_34(pretrained_path=args.vmz_model_path, pretrained_num_classes=487) model = model.cuda() elif args.type == 's3d': # we use one copy of s3d instead of dup another one for feature extraction. from mmpt.processors.models.s3dg import S3D model = S3D('pretrained_models/s3d_dict.npy', 512) model.load_state_dict(th.load('pretrained_models/s3d_howto100m.pth')) model = model.cuda() elif args.type == '3d': print('Loading 3D-ResneXt-101 ...') from videocnn.models import resnext model = resnext.resnet101( num_classes=400, shortcut_type='B', cardinality=32, sample_size=112, sample_duration=16, last_fc=False) model = model.cuda() model_data = th.load(args.resnext101_model_path) model.load_state_dict(model_data) elif args.type == 'vae': from openaivae import OpenAIParallelDiscreteVAE model = OpenAIParallelDiscreteVAE() model = model.cuda() else: raise ValueError("model not supported yet.") model.eval() print('loaded') return model
KosmosX-API-main
kosmosX/fairseq/examples/MMPT/scripts/video_feature_extractor/model.py
# Copyright Howto100m authors. # Copyright (c) Facebook, Inc. All Rights Reserved import torch as th class Normalize(object): def __init__(self, mean, std): self.mean = th.FloatTensor(mean).view(1, 3, 1, 1) self.std = th.FloatTensor(std).view(1, 3, 1, 1) def __call__(self, tensor): tensor = (tensor - self.mean) / (self.std + 1e-8) return tensor class Preprocessing(object): def __init__(self, type): self.type = type if type == '2d': self.norm = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) elif type == '3d': self.norm = Normalize(mean=[110.6, 103.2, 96.3], std=[1.0, 1.0, 1.0]) elif type == 'vmz': self.norm = Normalize(mean=[110.201, 100.64, 95.997], std=[58.1489, 56.4701, 55.3324]) def _zero_pad(self, tensor, size): n = size - len(tensor) % size if n == size: return tensor else: z = th.zeros(n, tensor.shape[1], tensor.shape[2], tensor.shape[3]) return th.cat((tensor, z), 0) def __call__(self, tensor): if self.type == '2d': tensor = tensor / 255.0 tensor = self.norm(tensor) elif self.type == 'vmz': #tensor = self._zero_pad(tensor, 8) tensor = self._zero_pad(tensor, 10) tensor = self.norm(tensor) #tensor = tensor.view(-1, 8, 3, 112, 112) tensor = tensor.view(-1, 10, 3, 112, 112) tensor = tensor.transpose(1, 2) elif self.type == '3d': tensor = self._zero_pad(tensor, 16) tensor = self.norm(tensor) tensor = tensor.view(-1, 16, 3, 112, 112) tensor = tensor.transpose(1, 2) elif self.type == 's3d': tensor = tensor / 255.0 tensor = self._zero_pad(tensor, 30) tensor = tensor.view(-1, 30, 3, 224, 224) # N x 30 x 3 x H x W tensor = tensor.transpose(1, 2) # N x 3 x 30 x H x W # for vae do nothing return tensor
KosmosX-API-main
kosmosX/fairseq/examples/MMPT/scripts/video_feature_extractor/preprocessing.py
# Copyright Howto100M authors. # Copyright (c) Facebook, Inc. All Rights Reserved import torch as th import torch.nn.functional as F import math import numpy as np import argparse from torch.utils.data import DataLoader from model import get_model from preprocessing import Preprocessing from random_sequence_shuffler import RandomSequenceSampler from tqdm import tqdm from pathbuilder import PathBuilder from videoreader import VideoLoader parser = argparse.ArgumentParser(description='Easy video feature extractor') parser.add_argument('--vdir', type=str) parser.add_argument('--fdir', type=str) parser.add_argument('--hflip', type=int, default=0) parser.add_argument('--batch_size', type=int, default=64, help='batch size') parser.add_argument('--type', type=str, default='2d', help='CNN type') parser.add_argument('--half_precision', type=int, default=0, help='output half precision float') parser.add_argument('--num_decoding_thread', type=int, default=4, help='Num parallel thread for video decoding') parser.add_argument('--l2_normalize', type=int, default=1, help='l2 normalize feature') parser.add_argument('--resnext101_model_path', type=str, default='model/resnext101.pth', help='Resnext model path') parser.add_argument('--vmz_model_path', type=str, default='model/r2plus1d_34_clip8_ig65m_from_scratch-9bae36ae.pth', help='vmz model path') args = parser.parse_args() # TODO: refactor all args into config. (current code is from different people.) CONFIGS = { "2d": { "fps": 1, "size": 224, "centercrop": False, "shards": 0, }, "3d": { "fps": 24, "size": 112, "centercrop": True, "shards": 0, }, "s3d": { "fps": 30, "size": 224, "centercrop": True, "shards": 0, }, "vmz": { "fps": 24, "size": 112, "centercrop": True, "shards": 0, }, "vae": { "fps": 2, "size": 256, "centercrop": True, "shards": 100, } } config = CONFIGS[args.type] video_dirs = args.vdir feature_dir = args.fdir video_dict = PathBuilder.build(video_dirs, feature_dir, ".npy", config["shards"]) dataset = VideoLoader( video_dict=video_dict, framerate=config["fps"], size=config["size"], centercrop=config["centercrop"], hflip=args.hflip ) n_dataset = len(dataset) sampler = RandomSequenceSampler(n_dataset, 10) loader = DataLoader( dataset, batch_size=1, shuffle=False, num_workers=args.num_decoding_thread, sampler=sampler if n_dataset > 10 else None, ) preprocess = Preprocessing(args.type) model = get_model(args) with th.no_grad(): for k, data in tqdm(enumerate(loader), total=loader.__len__(), ascii=True): input_file = data['input'][0] output_file = data['output'][0] if len(data['video'].shape) > 3: video = data['video'].squeeze() if len(video.shape) == 4: video = preprocess(video) n_chunk = len(video) if args.type == 'vmz': n_chunk = math.ceil(n_chunk/float(3)) features = th.cuda.FloatTensor(n_chunk, 512).fill_(0) elif args.type == 's3d': features = th.cuda.FloatTensor(n_chunk, 512).fill_(0) elif args.type == "vae": features = th.cuda.LongTensor(n_chunk, 1024).fill_(0) else: features = th.cuda.FloatTensor(n_chunk, 2048).fill_(0) n_iter = int(math.ceil(n_chunk / float(args.batch_size))) for i in range(n_iter): factor = 1 if args.type == 'vmz': factor = 3 min_ind = factor * i * args.batch_size max_ind = factor * (i + 1) * args.batch_size video_batch = video[min_ind:max_ind:factor].cuda() if args.type == '2d': batch_features = model(video_batch) # (51, 487), (51, 512) elif args.type == 's3d': batch_features = model(video_batch) batch_features = batch_features['video_embedding'] elif args.type == "vae": # image_code. batch_features = model(video_batch) else: batch_pred, batch_features = model(video_batch) # (51, 487), (51, 512) if args.l2_normalize: batch_features = F.normalize(batch_features, dim=1) features[i*args.batch_size:(i+1)*args.batch_size] = batch_features features = features.cpu().numpy() if args.half_precision: if args.type == "vae": features = features.astype(np.int16) else: features = features.astype('float16') else: if args.type == "vae": features = features.astype(np.int32) else: features = features.astype('float32') np.save(output_file, features) else: print('Video {} error.'.format(input_file))
KosmosX-API-main
kosmosX/fairseq/examples/MMPT/scripts/video_feature_extractor/extract.py
# Copyright (c) Facebook, Inc. All Rights Reserved import numpy as np from torch.utils.data.sampler import Sampler class RandomSequenceSampler(Sampler): def __init__(self, n_sample, seq_len): self.n_sample = n_sample self.seq_len = seq_len def _pad_ind(self, ind): zeros = np.zeros(self.seq_len - self.n_sample % self.seq_len) ind = np.concatenate((ind, zeros)) return ind def __iter__(self): idx = np.arange(self.n_sample) if self.n_sample % self.seq_len != 0: idx = self._pad_ind(idx) idx = np.reshape(idx, (-1, self.seq_len)) np.random.shuffle(idx) idx = np.reshape(idx, (-1)) return iter(idx.astype(int)) def __len__(self): return self.n_sample + (self.seq_len - self.n_sample % self.seq_len)
KosmosX-API-main
kosmosX/fairseq/examples/MMPT/scripts/video_feature_extractor/random_sequence_shuffler.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import pickle import os import argparse import numpy as np from torch.utils.data import Dataset, DataLoader from mmpt.processors import PKLJSONStrTextProcessor from mmpt.utils import ShardedTensor, recursive_config class TokenizerDataset(Dataset): def __init__(self, config): self.text_processor = PKLJSONStrTextProcessor(config) self.video_ids = list(self.text_processor.data.keys()) def __getitem__(self, idx): video_id = self.video_ids[idx] return video_id, self.text_processor(video_id) def __len__(self): return len(self.video_ids) def numpify(shard_idx, video_ids, captions, target_dir, split, prefix, max_cap_len=32): startends = [] caps_ids = [] for video_id in video_ids: caption = captions[video_id] startend = [] cap_ids = [] for start, end, cap in zip( caption["start"], caption["end"], caption["cap"]): startend.append(np.array([start, end]).astype("float32")) cap_id = np.full((max_cap_len,), -1, dtype=np.int32) cap = cap[:max_cap_len] cap_id[:len(cap)] = cap cap_ids.append(cap_id) startends.append(np.stack(startend)) caps_ids.append(np.stack(cap_ids)) startends = ShardedTensor.from_list(startends) target_path = os.path.join( target_dir, prefix + split + "_" + str(shard_idx) ) print("save to", target_path) startends.save(target_path + ".startends") caps_ids = ShardedTensor.from_list(caps_ids) caps_ids.save(target_path + ".caps_ids") def sharding(config, out_file): with open(out_file, "rb") as fr: captions = pickle.load(fr) target_dir = config.target_dir prefix = os.path.basename( os.path.splitext(config.caption_pkl_path)[0] ) + "." + config.bert_name + "." for split in ["train", "val"]: target_path = os.path.join(target_dir, split + "_meta") with open(target_path + ".pkl", "rb") as fr: meta = pickle.load(fr) print("load meta", target_path, len(meta)) for shard_id in meta: numpify( shard_id, meta[shard_id], captions, target_dir, split, prefix ) def tokenize(config, out_file): def collator(samples): return samples dataset = TokenizerDataset(config) data = {} for idx, batch in enumerate( DataLoader(dataset, collate_fn=collator, num_workers=16)): for video_id, caption in batch: data[video_id] = caption if idx % 5000 == 0: print(idx) with open(out_file, "wb") as fw: pickle.dump(data, fw, pickle.HIGHEST_PROTOCOL) def main(args): config = recursive_config(args.config).dataset out_file = os.path.splitext(config.caption_pkl_path)[0] \ + "." + config.bert_name + ".pkl" if not os.path.isfile(out_file): tokenize(config, out_file) sharding(config, out_file) if __name__ == "__main__": parser = argparse.ArgumentParser( description="pretokenize (raw_)caption.json into pkl.") parser.add_argument('config', type=str) args = parser.parse_args() main(args)
KosmosX-API-main
kosmosX/fairseq/examples/MMPT/scripts/text_token_extractor/pretokenization.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os import glob import argparse import pprint import omegaconf from omegaconf import OmegaConf from torch.utils.data import DataLoader from mmpt.utils import load_config from mmpt.evaluators import Evaluator from mmpt.evaluators import predictor as predictor_path from mmpt.tasks import Task from mmpt import processors from mmpt.datasets import MMDataset def get_dataloader(config): meta_processor_cls = getattr(processors, config.dataset.meta_processor) video_processor_cls = getattr(processors, config.dataset.video_processor) text_processor_cls = getattr(processors, config.dataset.text_processor) aligner_cls = getattr(processors, config.dataset.aligner) meta_processor = meta_processor_cls(config.dataset) video_processor = video_processor_cls(config.dataset) text_processor = text_processor_cls(config.dataset) aligner = aligner_cls(config.dataset) test_data = MMDataset( meta_processor, video_processor, text_processor, aligner, ) print("test_len", len(test_data)) output = test_data[0] test_data.print_example(output) test_dataloader = DataLoader( test_data, batch_size=config.fairseq.dataset.batch_size, shuffle=False, num_workers=6, collate_fn=test_data.collater, ) return test_dataloader def main(args): config = load_config(args) if isinstance(config, omegaconf.dictconfig.DictConfig): print(OmegaConf.to_yaml(config)) else: pp = pprint.PrettyPrinter(indent=4) pp.print(config) mmtask = Task.config_task(config) mmtask.build_model() test_dataloader = get_dataloader(config) checkpoint_search_path = os.path.dirname(config.eval.save_path) results = [] prefix = os.path.basename(args.taskconfig) if prefix.startswith("test"): # loop all checkpoint for datasets without validation set. if "best" not in config.fairseq.common_eval.path: print("eval each epoch.") for checkpoint in glob.glob(checkpoint_search_path + "/checkpoint*"): model = mmtask.load_checkpoint(checkpoint) ckpt = os.path.basename(checkpoint) evaluator = Evaluator(config) output = evaluator.evaluate( model, test_dataloader, ckpt + "_merged") results.append((checkpoint, output)) # use the one specified by the config lastly. model = mmtask.load_checkpoint(config.fairseq.common_eval.path) evaluator = Evaluator(config) output = evaluator.evaluate(model, test_dataloader) results.append((config.fairseq.common_eval.path, output)) best_result = None best_metric = 0. for checkpoint, result in results: print(checkpoint) evaluator.metric.print_computed_metrics(result) best_score = evaluator.metric.best_metric(result) if best_score > best_metric: best_result = (checkpoint, result) best_metric = best_score print("best results:") print(best_result[0]) evaluator.metric.print_computed_metrics(best_result[1]) elif prefix.startswith("vis"): model = mmtask.load_checkpoint(config.fairseq.common_eval.path) predictor_cls = getattr(predictor_path, config.predictor) predictor = predictor_cls(config) predictor.predict_loop(model, test_dataloader, mmtask, None) else: raise ValueError("unknown prefix of the config file", args.taskconfig) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("taskconfig", type=str) args = parser.parse_args() main(args)
KosmosX-API-main
kosmosX/fairseq/examples/MMPT/mmpt_cli/predict.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os from mmpt.utils import recursive_config class BaseJob(object): def __init__(self, yaml_file, dryrun=False): self.yaml_file = yaml_file self.config = recursive_config(yaml_file) self.dryrun = dryrun def submit(self, **kwargs): raise NotImplementedError def _normalize_cmd(self, cmd_list): cmd_list = list(cmd_list) yaml_index = cmd_list.index("[yaml]") cmd_list[yaml_index] = self.yaml_file return cmd_list class LocalJob(BaseJob): CMD_CONFIG = { "local_single": [ "fairseq-train", "[yaml]", "--user-dir", "mmpt", "--task", "mmtask", "--arch", "mmarch", "--criterion", "mmloss", ], "local_small": [ "fairseq-train", "[yaml]", "--user-dir", "mmpt", "--task", "mmtask", "--arch", "mmarch", "--criterion", "mmloss", "--distributed-world-size", "2" ], "local_big": [ "fairseq-train", "[yaml]", "--user-dir", "mmpt", "--task", "mmtask", "--arch", "mmarch", "--criterion", "mmloss", "--distributed-world-size", "8" ], "local_predict": ["python", "mmpt_cli/predict.py", "[yaml]"], } def __init__(self, yaml_file, job_type=None, dryrun=False): super().__init__(yaml_file, dryrun) if job_type is None: self.job_type = "local_single" if self.config.task_type is not None: self.job_type = self.config.task_type else: self.job_type = job_type if self.job_type in ["local_single", "local_small"]: if self.config.fairseq.dataset.batch_size > 32: print("decreasing batch_size to 32 for local testing?") def submit(self): cmd_list = self._normalize_cmd(LocalJob.CMD_CONFIG[self.job_type]) if "predict" not in self.job_type: # append fairseq args. from mmpt.utils import load_config config = load_config(config_file=self.yaml_file) for field in config.fairseq: for key in config.fairseq[field]: if key in ["fp16", "reset_optimizer", "reset_dataloader", "reset_meters"]: # a list of binary flag. param = ["--" + key.replace("_", "-")] else: if key == "lr": value = str(config.fairseq[field][key][0]) elif key == "adam_betas": value = "'"+str(config.fairseq[field][key])+"'" else: value = str(config.fairseq[field][key]) param = [ "--" + key.replace("_", "-"), value ] cmd_list.extend(param) print("launching", " ".join(cmd_list)) if not self.dryrun: os.system(" ".join(cmd_list)) return JobStatus("12345678") class JobStatus(object): def __init__(self, job_id): self.job_id = job_id def __repr__(self): return self.job_id def __str__(self): return self.job_id def done(self): return False def running(self): return False def result(self): if self.done(): return "{} is done.".format(self.job_id) else: return "{} is running.".format(self.job_id) def stderr(self): return self.result() def stdout(self): return self.result()
KosmosX-API-main
kosmosX/fairseq/examples/MMPT/mmpt_cli/localjob.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from . import rxf_src # noqa
KosmosX-API-main
kosmosX/fairseq/examples/rxf/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. from . import label_smoothed_cross_entropy_r3f, sentence_prediction_r3f # noqa
KosmosX-API-main
kosmosX/fairseq/examples/rxf/rxf_src/__init__.py