text
stringlengths 5
22M
| id
stringlengths 12
177
| metadata
dict | __index_level_0__
int64 0
1.37k
|
---|---|---|---|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
DATA_DIR=../../data/PubMedQA
prefix=pqal_qcl_ansis
RAW_DATA_DIR=${DATA_DIR}/raw
OUTPUT_DIR=${DATA_DIR}/${prefix}-bin
if [ -d "${OUTPUT_DIR}" ]; then
rm -rf ${OUTPUT_DIR}
fi
python rebuild_data.py ${RAW_DATA_DIR} ${prefix}
cp ${DATA_DIR}/../dict.txt ${RAW_DATA_DIR}/
cp ${DATA_DIR}/../bpecodes ${RAW_DATA_DIR}/
SPLIT=(train valid test)
for ff in ${SPLIT[@]}; do
if [ -f "${RAW_DATA_DIR}/${prefix}_$ff.y" ]; then
echo "Preprocessing ${ff}"
perl ${MOSES}/scripts/tokenizer/tokenizer.perl -l en -a -threads 8 < ${RAW_DATA_DIR}/${prefix}_$ff.x > ${RAW_DATA_DIR}/${prefix}_$ff.tok.x
perl ${MOSES}/scripts/tokenizer/tokenizer.perl -l en -a -threads 8 < ${RAW_DATA_DIR}/${prefix}_$ff.y > ${RAW_DATA_DIR}/${prefix}_$ff.tok.y
${FASTBPE}/fast applybpe ${RAW_DATA_DIR}/${prefix}_$ff.tok.bpe.x ${RAW_DATA_DIR}/${prefix}_$ff.tok.x ${RAW_DATA_DIR}/bpecodes
${FASTBPE}/fast applybpe ${RAW_DATA_DIR}/${prefix}_$ff.tok.bpe.y ${RAW_DATA_DIR}/${prefix}_$ff.tok.y ${RAW_DATA_DIR}/bpecodes
rm ${RAW_DATA_DIR}/${prefix}_$ff.tok.x ${RAW_DATA_DIR}/${prefix}_$ff.tok.y
fi
done
# do binarize
fairseq-preprocess \
-s x -t y --workers 8 \
--joined-dictionary \
--trainpref ${RAW_DATA_DIR}/${prefix}_train.tok.bpe \
--validpref ${RAW_DATA_DIR}/${prefix}_valid.tok.bpe \
--testpref ${RAW_DATA_DIR}/${prefix}_test.tok.bpe \
--destdir ${OUTPUT_DIR} \
--srcdict ${RAW_DATA_DIR}/dict.txt
|
BioGPT/examples/QA-PubMedQA/preprocess.sh/0
|
{
"file_path": "BioGPT/examples/QA-PubMedQA/preprocess.sh",
"repo_id": "BioGPT",
"token_count": 725
}
| 143 |
# Relation Extraction on KD-DTI
## Data
According to the original [KD-DTI dataset](https://github.com/bert-nmt/BERT-DTI), before processing the data, you should first register a DrugBank account, download the xml dataset and replace the entity id with the entity name in the drugbank.
Then, you can process the data by:
``` bash
bash preprocess.sh
```
For more details, please see [here](https://github.com/bert-nmt/BERT-DTI).
## Training
You can fine-tune the pre-trained BioGPT on the task by:
``` bash
bash train.sh
```
## Model Checkpoint
We provide our fine-tuned model on the task. See [here](../../README.md#pre-trained-models)
## Inference and Evaluating
You can inference and evalaute the model on the test set by:
``` bash
bash infer.sh
```
|
BioGPT/examples/RE-DTI/README.md/0
|
{
"file_path": "BioGPT/examples/RE-DTI/README.md",
"repo_id": "BioGPT",
"token_count": 235
}
| 144 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import logging
from dataclasses import dataclass, field
from typing import Optional, Dict, List, Tuple
from argparse import Namespace
import torch
from torch import Tensor
import torch.nn as nn
import torch.nn.functional as F
from fairseq import options, utils
from fairseq.dataclass import ChoiceEnum, FairseqDataclass
from fairseq.models import (
FairseqLanguageModel,
register_model,
register_model_architecture,
)
from fairseq.models.transformer import (
DEFAULT_MIN_PARAMS_TO_WRAP, Embedding, TransformerDecoder
)
from fairseq.modules import AdaptiveInput, CharacterTokenEmbedder
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.models.transformer_lm import (
TransformerLanguageModelConfig,
TransformerLanguageModel,
transformer_lm_gpt2_small,
transformer_lm_gpt2_big,
)
from omegaconf import II, DictConfig
logger = logging.getLogger(__name__)
@register_model("transformer_lm_prompt", dataclass=TransformerLanguageModelConfig)
class TransformerLanguageModelPrompt(TransformerLanguageModel):
def load_state_dict(
self,
state_dict,
strict=True,
model_cfg: Optional[DictConfig] = None,
args: Optional[Namespace] = None,
):
"""Copies parameters and buffers from *state_dict* into this module and
its descendants.
Overrides the method in :class:`nn.Module`. Compared with that method
this additionally "upgrades" *state_dicts* from old checkpoints.
"""
if model_cfg is None and args is not None:
logger.warn("using 'args' is deprecated, please update your code to use dataclass config")
model_cfg = convert_namespace_to_omegaconf(args).model
self.upgrade_state_dict(state_dict)
device = state_dict["decoder.embed_tokens.weight"].device
if self.decoder.embed_tokens.weight.shape[0] > state_dict["decoder.embed_tokens.weight"].shape[0]:
shape = state_dict["decoder.embed_tokens.weight"].shape
state_dict["decoder.embed_tokens.weight"] = torch.cat(
[state_dict["decoder.embed_tokens.weight"],
self.decoder.embed_tokens.weight[shape[0]:].to(device)]
)
if self.decoder.output_projection.weight.shape[0] > state_dict["decoder.output_projection.weight"].shape[0]:
shape = state_dict["decoder.output_projection.weight"].shape
device = state_dict["decoder.output_projection.weight"].device
state_dict["decoder.output_projection.weight"] = torch.cat(
[state_dict["decoder.output_projection.weight"],
self.decoder.output_projection.weight[shape[0]:].to(device)]
)
from fairseq.checkpoint_utils import prune_state_dict
new_state_dict = prune_state_dict(state_dict, model_cfg)
return super().load_state_dict(new_state_dict, strict)
@register_model_architecture("transformer_lm_prompt", "transformer_lm_prompt_biogpt")
def transformer_lm_prompt_biogpt(args):
transformer_lm_gpt2_small(args)
@register_model_architecture("transformer_lm_prompt", "transformer_lm_prompt_biogpt_large")
def transformer_lm_prompt_gpt2_big(args):
transformer_lm_gpt2_big(args)
|
BioGPT/src/transformer_lm_prompt.py/0
|
{
"file_path": "BioGPT/src/transformer_lm_prompt.py",
"repo_id": "BioGPT",
"token_count": 1294
}
| 145 |
---
license: mit
---
This is a BitBLAS Implementation for the reproduced 1.58bit model from [1bitLLM/bitnet_b1_58-3B](https://huggingface.co/1bitLLM/bitnet_b1_58-3B). We replaced the original simulated Int8x3bit Quantized Inference Kernel with BitBLAS INT8xINT2 Kernel. We also evaluated the model's correctness and performance through `eval_correctness.py` and `benchmark_inference_latency.py`.
## BitBLAS Results
### Performance
**Note:** To reproduce the results of BitBLAS, Please checkout the `benchmark_inference_latency.py`. To reproduce the results of the original model, Please checkout the [1bitLLM/bitnet_b1_58-3B](https://huggingface.co/1bitLLM/bitnet_b1_58-3B) repo.
| Model | Device | batchsize | in_seq | model | bitnet-1.58b-3b-huggingface | bitnet-1.58b-3b-bitblas |
|:---------------:|:------:|:---------:|:------:|:--------:|:---------------------------:|:-----------------------:|
| bitnet_b1_58-3B | A100 | 1 | 1 | LLAMA-3B | 177.6729107 | 64.17962909 |
| bitnet_b1_58-3B | A100 | 128 | 1 | LLAMA-3B | 188.6145592 | 63.48158518 |
| bitnet_b1_58-3B | A100 | 1 | 2048 | LLAMA-3B | 348.7066031 | 202.6877999 |
### On-the-Fly GPU Memory Footprint
We measured the GPU memory footprint through the `nvidia-smi` command. Please checkout `nvidia_measure_memory.sh` to get the real-time GPU memory usage. And then start a `benchmark_model_10k_loops.py` workload to measure the overall GPU memory usage.
| **Model** | **Device** | **batchsize** | **in_seq** | **bitnet-1.58b-3b-huggingface** | **bitnet-1.58b-3b-bitblas** |
|:---------------:|:----------:|:-------------:|:----------:|:-------------------------------:|:---------------------------:|
| bitnet_b1_58-3B | A100 | 1 | 1 | 7595 MB | 1729 MB |
| bitnet_b1_58-3B | A100 | 128 | 1 | 7677 MB | 1789 MB |
| bitnet_b1_58-3B | A100 | 1 | 2048 | 8731 MB | 3163 MB |
## PPL and Zero-shot Accuracy
The number is Reported from the [1bitLLM/bitnet_b1_58-3B](https://huggingface.co/1bitLLM/bitnet_b1_58-3B), Please checkout the `eval_ppl.py`.
PPL and zero-shot accuracy:
| Models | PPL| ARCe| ARCc| HS | BQ | OQ | PQ | WGe | Avg
|-------|-------|-------|-------|-------|-------|-------|-------|-------|-------|
| FP16 700M (reported) | 12.33 | 54.7 | 23.0 | 37.0 | 60.0 | 20.2 | 68.9 | 54.8 | 45.5 |
| BitNet b1.58 700M (reported) | 12.87 | 51.8 | 21.4 | 35.1 | 58.2 | 20.0 | 68.1 | 55.2 | 44.3 |
| BitNet b1.58 700M (reproduced) | 12.78 | 51.4 | 21.8 | 35.0 | 59.6 | 20.6 | 67.5 | 55.4 | 44.5 |
| FP16 1.3B (reported) | 11.25 | 56.9 | 23.5 | 38.5 | 59.1 | 21.6 | 70.0 | 53.9 | 46.2
| BitNet b1.58 1.3B (reported) | 11.29 | 54.9 | 24.2 | 37.7 | 56.7 | 19.6 | 68.8 | 55.8 | 45.4 |
| BitNet b1.58 1.3B (reproduced) | 11.19 | 55.8 | 23.7 | 37.6 | 59.0 | 20.2 | 69.2 | 56.0 | 45.9
| FP16 3B (reported) | 10.04 | 62.1 | 25.6 | 43.3 | 61.8 | 24.6 | 72.1 | 58.2 | 49.7
| BitNet b1.58 3B (reported) | 9.91 | 61.4 | 28.3 | 42.9 | 61.5 | 26.6 | 71.5 | 59.3 | 50.2
| BitNet b1.58 3B (reproduced) | 9.88 | 60.9 | 28.0 | 42.3 | 58.3 | 26.0 | 71.4 | 60.3 | 49.6 |
The differences between the reported numbers and the reproduced results are possibly variances from the training data processing, seeds, or other random factors.
## Citations
```bibtex
@article{ma2024era,
title={The Era of 1-bit LLMs: All Large Language Models are in 1.58 Bits},
author={Ma, Shuming and Wang, Hongyu and Ma, Lingxiao and Wang, Lei and Wang, Wenhui and Huang, Shaohan and Dong, Li and Wang, Ruiping and Xue, Jilong and Wei, Furu},
journal={arXiv preprint arXiv:2402.17764},
year={2024}
}
```
|
BitBLAS/integration/BitNet/README.md/0
|
{
"file_path": "BitBLAS/integration/BitNet/README.md",
"repo_id": "BitBLAS",
"token_count": 1775
}
| 146 |
#!/bin/bash
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
echo "Check MIT License boilerplate..."
PWD="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
# To source code root
pushd "${PWD}/../../" > /dev/null
EXITCODE=0
for SRC_FILE in $(find . -path './3rdparty' -prune -false -o -path './build' -prune -false -o -type f -not -name '*apply_mit_license.sh' \
-not -name '*check_mit_license.sh' -and \( -name 'CMakeLists.txt' -or -name '*.cpp' -or -name '*.cu' -or -name '*.h' -or -name '*.hpp' \
-or -name '*.py' -or -name '*.sh' -or -name '*.dockerfile' -or -name '*.yaml' \) ); do
# Skip files that already contain the Apache License
if grep -q "Apache License" "${SRC_FILE}"; then
continue
fi
if !(grep -q "Copyright (c) Microsoft Corporation." "${SRC_FILE}") || !(grep -q "Licensed under the MIT License." "${SRC_FILE}") \
|| (grep -q -i -P "Microsoft( |)\(c\)" "${SRC_FILE}"); then
echo "[ERROR] Require: MIT License boilerplate" "${SRC_FILE}"
EXITCODE=1
fi
done
echo "Done."
popd > /dev/null
exit $EXITCODE
|
BitBLAS/maint/scripts/check_mit_license.sh/0
|
{
"file_path": "BitBLAS/maint/scripts/check_mit_license.sh",
"repo_id": "BitBLAS",
"token_count": 464
}
| 147 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
"""Benifit For BitBLAS Schedule"""
class Block:
def __init__(self, start, end, is_free):
self.start = start
self.end = end
self.is_free = is_free
def size(self) -> int:
return self.end - self.start
def merge(self, other):
assert self.is_free == other.is_free
self.start = min(self.start, other.start)
self.end = max(self.end, other.end)
def __repr__(self) -> str:
return "<Block offset={} size={}>".format(self.start, self.size())
class BestFit:
def __init__(self, align=32):
self.limit = 0
self.list = []
self.align = align
def malloc(self, size) -> Block:
size = (size + self.align - 1) // self.align * self.align
found = None
for block in self.list:
if block.is_free and block.size() >= size:
if not found or found.size() > block.size():
found = block
if found:
found.is_free = False
remain = found.size() - size
if remain != 0:
found.end -= remain
self.list.insert(
self.list.index(found) + 1, Block(found.end, found.end + remain, True)
)
return found
elif len(self.list) > 0 and self.list[-1].is_free:
add = size - self.list[-1].size()
self.list[-1].end += add
self.limit = self.list[-1].end
self.list[-1].is_free = False
return self.list[-1]
else:
block = Block(self.limit, self.limit + size, False)
self.list.append(block)
self.limit += size
return block
def free(self, block: Block) -> None:
assert not block.is_free
idx = self.list.index(block)
self.list[idx] = Block(block.start, block.end, True)
if idx + 1 < len(self.list) and self.list[idx + 1].is_free:
self.list[idx].merge(self.list[idx + 1])
self.list.pop(idx + 1)
if idx - 1 >= 0 and self.list[idx - 1].is_free:
self.list[idx].merge(self.list[idx - 1])
self.list.pop(idx - 1)
|
BitBLAS/python/bitblas/base/roller/bestfit.py/0
|
{
"file_path": "BitBLAS/python/bitblas/base/roller/bestfit.py",
"repo_id": "BitBLAS",
"token_count": 1117
}
| 148 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
class BitBLASGenerator:
def __init__(self):
# Initialize the generator with configuration
pass
def generate_cuda_code(self):
pass
def generate_header(self):
pass
|
BitBLAS/python/bitblas/generator.py/0
|
{
"file_path": "BitBLAS/python/bitblas/generator.py",
"repo_id": "BitBLAS",
"token_count": 104
}
| 149 |
# Copyright 2018 The apache/tvm Authors. All Rights Reserved.
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Modifications Copyright (c) Microsoft.
# The code below is mostly copied from apache/tvm rmsnorm.py in dlight.
# pylint: disable=missing-docstring
"""A RMS norm schedule rule for GPU operators."""
import tvm
from tvm import tir
from tvm.tir import Block, BufferStore
from tvm.tir.expr import Cast, BufferLoad, Call
from tvm.target import Target
from ..base import ScheduleRule
def identify_cast_or_load_block(block: Block) -> bool:
if len(block.reads) != 1 or len(block.writes) != 1:
return False
if not isinstance(block.body, BufferStore):
return False
store = block.body
# check types
if isinstance(store.value, BufferLoad):
load = store.value
elif isinstance(store.value, Cast):
load = store.value.value
if not isinstance(load, BufferLoad):
return False
else:
return False
# check indices
if len(load.indices) != len(store.indices):
return False
for lhs, rhs in zip(load.indices, store.indices):
if not lhs.same_as(rhs):
return False
return True
def identify_rsqrt_block(block: Block) -> bool:
if len(block.reads) != 1 or len(block.writes) != 1:
return False
if not isinstance(block.body, BufferStore):
return False
store = block.body
if not isinstance(store.value, Call):
return False
call = store.value
op = call.op
return op == tvm.ir.op.Op.get("tir.rsqrt")
class RMSNorm(ScheduleRule):
"""A rule for RMS norm."""
def apply( # pylint: disable=too-many-locals,missing-docstring
self,
func: tir.PrimFunc,
target: Target,
_: bool,
) -> tir.Schedule:
if target.kind.name == "cuda":
num_tx = 512
else:
num_tx = 64
sch = tir.Schedule(func)
root = sch.get_block(name="root", func_name="main")
blocks = sch.get_child_blocks(root)
if not any([identify_rsqrt_block(sch.get(block)) for block in blocks]):
return None
read = sch.cache_read(block=blocks[0], read_buffer_index=0, storage_scope="local")
write = sch.cache_write(block=blocks[-1], write_buffer_index=0, storage_scope="local")
for block in blocks:
if identify_cast_or_load_block(sch.get(block)):
sch.compute_inline(block)
blocks = sch.get_child_blocks(root)
read, sqr, redsum, rsqrt, norm, write = blocks
if not identify_rsqrt_block(sch.get(rsqrt)):
return None
for name in [read, sqr, redsum, rsqrt, norm, write]:
loops = sch.get_loops(name)
sch.fuse(*loops[:-1])
block_loop, loops = sch.get_loops(block=read)
thread_loop, _, _ = sch.split(
loop=loops, factors=[num_tx, None, 8], preserve_unit_iters=True
)
sch.bind(block_loop, thread_axis="blockIdx.x")
sch.bind(thread_loop, thread_axis="threadIdx.x")
sch.vectorize(sch.get_loops(block=read)[-1])
sch.reverse_compute_at(block=sqr, loop=thread_loop)
sch.reverse_compute_at(block=redsum, loop=thread_loop)
sch.reverse_compute_at(block=rsqrt, loop=block_loop, index=-1)
sch.reverse_compute_at(block=norm, loop=block_loop, index=-1)
block_loop, loops = sch.get_loops(block=norm)
thread_loop, _, _ = sch.split(
loop=loops, factors=[num_tx, None, 8], preserve_unit_iters=True
)
sch.bind(thread_loop, thread_axis="threadIdx.x")
sch.reverse_compute_at(block=write, loop=thread_loop, index=-1)
sch.vectorize(sch.get_loops(block=write)[-1])
sch.set_scope(block=sqr, buffer_index=0, storage_scope="local")
sch.set_scope(block=redsum, buffer_index=0, storage_scope="local")
sch.set_scope(block=rsqrt, buffer_index=0, storage_scope="shared")
sch.set_scope(block=norm, buffer_index=0, storage_scope="local")
return sch
|
BitBLAS/python/bitblas/gpu/rmsnorm.py/0
|
{
"file_path": "BitBLAS/python/bitblas/gpu/rmsnorm.py",
"repo_id": "BitBLAS",
"token_count": 1961
}
| 150 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import tvm
from tvm.target import Target
from bitblas.base.roller.arch.cuda import CUDA
from typing import Any, List, Literal, Optional, Tuple, Union
from .operator import Operator, TransformKind
from .impl.matmul_dequantize_impl import select_implementation
from ..base.utils import tensor_replace_dp4a, tensor_remove_make_int4
from bitblas.utils.tensor_adapter import tvm_tensor_to_torch
from dataclasses import dataclass
from .ladder_permutate import LadderPermutate, LadderPermutateConfig
from .lop3_permutate import LOP3Permutate, LOP3PermutateConfig
import logging
logger = logging.getLogger(__name__)
class OPExecutorCPU:
def __init__(self, operators: Optional[List[Operator]] = None):
if operators is None:
operators = []
self.operators = operators
def append(self, op):
self.operators.append(op)
def is_none(self):
return len(self.operators) == 0
def forward(self, weight):
inputs = [weight]
for op in self.operators:
inputs.append(tvm_tensor_to_torch(op.get_profile_tensors()[-1]).cpu())
inputs = [op.forward(*inputs)]
return inputs[-1]
def __call__(self, *args: Any, **kwds: Any) -> Any:
return self.forward(*args, **kwds)
@property
def size(self):
return len(self.operators)
@dataclass(frozen=True)
class MatmulWeightOnlyDequantizeConfig:
M: Union[int, Tuple[int]]
N: int
K: int
in_dtype: str = "float16"
out_dtype: str = "float16"
accum_dtype: str = "float16"
bit: int = 4
storage_dtype: str = "int8"
# documents for source_format:
# the format of the source data, which can be "int", "uint", "fp", "nf"
# "int": dequantize_weight = (target)((int)(quantize_weight - fixed_zero_point)) * scale
# where the fixed_zero_point is 2^(bit - 1) - 1
# "uint": dequantize_weight = (target)((uint)(quantize_weight - zero_point)) * scale
# where the zero_point is manually set by zeros tensor
# "fp": dequantize_weight = (quantize_weight - zero_point) * scale
# "nf": dequantize_weight = (lut[quantize_weight] - zero_point) * scale
source_format: Literal["int", "uint", "fp", "nf"] = "int"
with_scaling: bool = False
with_zeros: bool = False
group_size: int = -1
fast_decoding: bool = False
with_bias: bool = False
propagate_a: TransformKind = TransformKind.NonTransform
propagate_b: TransformKind = TransformKind.NonTransform
layout: str = "nt"
# documents for zeros_mode:
# original: target = (dequantize_weight - zero_point) * scale
# rescale: target = dequantize_weight * scale - zero_point
# quantized: target = (dequantize_weight - dequantize_zeros) * scale
# The auto-gptq framework prefer "quantized" and "original" for alignment with cuda.
zeros_mode: Literal["original", "rescale", "quantized"] = "original"
def __post_init__(self):
# set M to tuple if it is list
# otherwise, M is not hashable
object.__setattr__(self, "M", tuple(self.M) if isinstance(self.M, list) else self.M)
if isinstance(self.propagate_a, bool):
object.__setattr__(
self,
"propagate_a",
(TransformKind.IntraWarpTransform
if self.propagate_a else TransformKind.NonTransform),
)
elif isinstance(self.propagate_a, int):
object.__setattr__(self, "propagate_a", TransformKind(self.propagate_a))
if isinstance(self.propagate_b, bool):
object.__setattr__(
self,
"propagate_b",
(TransformKind.IntraWarpTransform
if self.propagate_b else TransformKind.NonTransform),
)
elif isinstance(self.propagate_b, int):
object.__setattr__(self, "propagate_b", TransformKind(self.propagate_b))
class MatmulWeightOnlyDequantize(Operator):
def __init__(
self,
config: MatmulWeightOnlyDequantizeConfig,
name: str = "matmul_weight_only_dequantize",
target: Target = "cuda",
enable_tuning: bool = False,
from_database: bool = False,
):
super().__init__(name, config, target)
target = self.target
if target.kind.name != "cuda":
raise ValueError("Currently only support cuda target")
self.arch = CUDA(target)
if isinstance(self.M, Tuple):
self.dynamic_range = {"m": self.M}
self.prim_func_mod["main"] = self.prim_func_mod["main"].with_attrs(
{"opt_shapes": self.dynamic_range})
else:
self.dynamic_range = None
if not from_database:
self._build_default_module(target)
if self.propagate_a:
ladder_permutate_config = LadderPermutateConfig(
M=self.M,
N=self.K,
datatype=self.in_dtype,
storage_dtype=self.in_dtype,
propagate_kind="A",
transpose_matrix=False,
transform_kind=self.propagate_a,
)
self.ladder_permutate_a = LadderPermutate(
config=ladder_permutate_config,
target=tvm.target.Target("llvm"),
)
else:
self.ladder_permutate_a = None
if self.propagate_b:
ladder_permutate_config = LadderPermutateConfig(
M=self.N,
N=self.K,
datatype=self.in_dtype,
dequantize_bits=self.bit,
storage_dtype=self.storage_dtype,
propagate_kind="B",
transpose_matrix=self.layout == "nt",
transform_kind=self.propagate_b,
)
self.ladder_permutate_b = LadderPermutate(
config=ladder_permutate_config,
target=tvm.target.Target("llvm"),
)
else:
self.ladder_permutate_b = None
if self.fast_decoding:
lop3_permutate_config = LOP3PermutateConfig(
M=self.N,
N=self.K,
datatype=self.in_dtype,
dequantize_bits=self.bit,
storage_dtype=self.storage_dtype,
)
self.lop3_permutate = LOP3Permutate(
config=lop3_permutate_config,
target=tvm.target.Target("llvm"),
)
else:
self.lop3_permutate = None
input_executors = OPExecutorCPU()
if self.ladder_permutate_a is not None:
input_executors.append(self.ladder_permutate_a)
self.input_executors = input_executors
weight_executors = OPExecutorCPU()
if self.lop3_permutate is not None:
weight_executors.append(self.lop3_permutate)
if self.ladder_permutate_b is not None:
weight_executors.append(self.ladder_permutate_b)
self.weight_executors = weight_executors
if enable_tuning:
self.hardware_aware_finetune()
def _build_default_module(self, target: Target):
try:
self.optimized_func = self.apply_default_schedule(self.prim_func_mod, target)
except Exception:
self.optimized_func = None
logger.warning(
"[BitBLAS][Warning] Apply default schedule failed, should do hardware-aware optimization manually."
)
self._build_runtime_module(target)
def _select_implementation(self):
return select_implementation(
M=self.M,
N=self.N,
K=self.K,
in_dtype=self.in_dtype,
out_dtype=self.out_dtype,
accum_dtype=self.accum_dtype,
bit=self.bit,
storage_dtype=self.storage_dtype,
source_format=self.source_format,
with_scaling=self.with_scaling,
with_zeros=self.with_zeros,
group_size=self.group_size,
fast_decoding=self.fast_decoding,
with_bias=self.with_bias,
layout=self.layout,
zeros_mode=self.zeros_mode,
propagate_a=self.propagate_a,
propagate_b=self.propagate_b,
)
def post_process(self, code: str) -> str:
code = tensor_replace_dp4a(code)
code = tensor_remove_make_int4(code)
return code
def retrieve_weight_shape(self):
return [int(i) for i in self.prim_func.buffer_map[self.prim_func.params[1]].shape]
def forward(self, *args) -> Any:
if self.lib is None:
self._forward_from_torch_func(*args)
dynamic_symbolic = []
if self.dynamic_range is not None:
# assume we only have one dynamic range
m = args[0].shape[0]
dynamic_symbolic.append(m)
self._forward_from_prebuild_lib(*args, *dynamic_symbolic)
@property
def M(self):
return self.config.M
@property
def N(self):
return self.config.N
@property
def K(self):
return self.config.K
@property
def in_dtype(self):
return self.config.in_dtype
@property
def out_dtype(self):
return self.config.out_dtype
@property
def accum_dtype(self):
return self.config.accum_dtype
@property
def bit(self):
return self.config.bit
@property
def storage_dtype(self):
return self.config.storage_dtype
@property
def source_format(self):
return self.config.source_format
@property
def with_scaling(self):
return self.config.with_scaling
@property
def with_zeros(self):
return self.config.with_zeros
@property
def group_size(self):
return self.config.group_size
@property
def fast_decoding(self):
return self.config.fast_decoding
@property
def with_bias(self):
return self.config.with_bias
@property
def propagate_a(self):
return self.config.propagate_a
@property
def propagate_b(self):
return self.config.propagate_b
@property
def layout(self):
return self.config.layout
@property
def zeros_mode(self):
return self.config.zeros_mode
@property
def input_transform(self):
return self.input_executors if self.input_executors.size else None
@property
def weight_transform(self):
return self.weight_executors if self.weight_executors.size else None
|
BitBLAS/python/bitblas/ops/matmul_dequantize.py/0
|
{
"file_path": "BitBLAS/python/bitblas/ops/matmul_dequantize.py",
"repo_id": "BitBLAS",
"token_count": 4993
}
| 151 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import tvm
from typing import Optional, List, Dict, Union
from tvm import IRModule
from bitblas import TileDevice
from tvm.runtime import ndarray
from bitblas.utils import match_global_kernel
import re
import ctypes
import os
import tempfile
import subprocess
import logging
from tvm.driver import lower
from tvm.target import Target
logger = logging.getLogger(__name__)
_TYPE_MAP = {
"float32": "float",
"float16": "half",
"bfloat16": "__nv_bfloat162",
"e4m3_float8": "__nv_fp8_e4m3",
"e5m2_float8": "__nv_fp8_e5m2",
"float64": "double",
"int64": "int64_t",
"int32": "int",
"uint32": "unsigned int",
"bool": "int8_t",
"int8": "int8_t",
"uint8": "uint8_t",
"int16": "int16_t",
"uchar": "uint8_t",
}
def get_annotated_device_mod(mod: IRModule, target: Target):
"""
Lower the given IRModule and create a device module for the specified target.
Parameters:
- mod: The input IRModule.
- target: The compilation target.
Returns:
- A device module ready for execution.
"""
input_mod = lower(mod)
target_input_mod = {target: input_mod}
annotated_mods = {}
runtime = None
target_host = None
for tgt, mod in target_input_mod.items():
if not isinstance(tgt, (str, Target)):
raise ValueError("The key of inputs must be str or "
"Target when inputs is dict.")
if not isinstance(mod, tvm.IRModule):
raise ValueError("inputs must be Schedule, IRModule, "
"or dict of str to IRModule.")
annotated_mods[tgt] = mod.with_attr("runtime", runtime)
annotated_mods, target_host = Target.canon_target_map_and_host(annotated_mods, target_host)
if not target_host:
for tar, _ in annotated_mods.items():
device_type = ndarray.device(tar.kind.name, 0).device_type
if device_type == ndarray.cpu(0).device_type:
target_host = tar
break
if not target_host:
target_host = "llvm" if tvm.runtime.enabled("llvm") else "stackvm"
annotated_mods, target_host = Target.canon_target_map_and_host(annotated_mods, target_host)
for target, mod in annotated_mods.items():
mixed_mod_passes = tvm.get_global_func("driver.mixed_mod_passes")
device_mod_passes = tvm.get_global_func("driver.device_mod_passes")
mod = mixed_mod_passes(mod, target)(mod)
device_mod = device_mod_passes(mod, target)(mod)
return device_mod
def get_thread_block_information(mod: IRModule):
"""
Extracts the thread block and grid dimensions for the reduction block within a given IRModule.
Parameters:
- mod: The input IRModule from which to extract thread block and grid information.
Returns:
A tuple containing two lists:
- The first list contains the dimensions of the thread block (threadIdx.x, threadIdx.y, threadIdx.z).
- The second list contains the dimensions of the grid (blockIdx.x, blockIdx.y, blockIdx.z).
"""
# Initialize the schedule from the IRModule
sch = tvm.tir.Schedule(mod)
# Get the root block and its child blocks
root_block = sch.get_block("root")
child_blocks = sch.get_child_blocks(root_block)
# Initialize default block and grid dimensions (1, 1, 1)
block_dims, grid_dims = [1, 1, 1], [1, 1, 1]
for block in child_blocks:
# Get the loops surrounding the main block
loops = sch.get_loops(block)
# Iterate over each loop to extract thread and block bindings
for loop in loops:
stmt = sch.get(loop)
thread_binding = stmt.thread_binding
extent = int(stmt.extent)
# Skip loops without thread binding
if thread_binding:
if "threadIdx" in thread_binding.thread_tag:
block_dims["xyz".index(thread_binding.thread_tag[-1])] = extent
elif "blockIdx" in thread_binding.thread_tag:
grid_dims["xyz".index(thread_binding.thread_tag[-1])] = extent
return block_dims, grid_dims
class CUDASourceWrapper(object):
def __init__(self, optimized_mod: IRModule, source: str, arch: TileDevice):
self.mod = optimized_mod
self.arch = arch
self.source = source
self.function_name: Optional[str] = None
self.dynamic_smem_buf: Optional[int] = None
self.block_info: Union[List[int], Dict] = [1, 1, 1]
self.grid_info: Union[List[int], Dict] = [1, 1, 1]
self.parse_source_information()
self.src_name: Optional[str] = None
self.lib_name: Optional[str] = None
self.lib_code: Optional[str] = self.update_lib_code(source)
def load_lib(self):
return ctypes.CDLL(self.lib_name)
def remove_lib(self):
if self.lib_name:
os.remove(self.lib_name)
self.lib_name = None
def compile_lib(self, timeout: float = None):
arch = self.arch
src = tempfile.NamedTemporaryFile(mode="w", suffix=".cu", delete=False)
compute_version = arch.compute_capability
lib_name = src.name.replace(".cu", ".so")
command = [
"nvcc",
"-std=c++17",
"-Xcudafe",
"--diag_suppress=177",
"--compiler-options",
"'-fPIC'",
"-lineinfo",
"--shared",
src.name,
"-lcuda",
f"-gencode=arch=compute_{compute_version},code=compute_{compute_version}",
"-o",
lib_name,
]
src.write(self.lib_code)
src.flush()
try:
ret = subprocess.run(command, timeout=timeout)
except subprocess.TimeoutExpired:
logger.warning(f"Compilation Timeout! {command}")
return None
if ret.returncode != 0:
logger.warning(f"Compilation Failed! {command}")
return None
self.src_name = src.name
self.lib_name = lib_name
def parse_source_information(self):
device_mod = get_annotated_device_mod(self.mod, self.arch.target)
assert (len(device_mod.functions) == 1
), "Only support one function in the module for static shape kernel."
for g_var, func in device_mod.functions.items():
self.function_name = g_var.name_hint
attrs = func.attrs
if "dyn_shared_memory_buf" in attrs:
self.dynamic_smem_buf = int(attrs["dyn_shared_memory_buf"])
if "thread_extent" in attrs:
thread_extent = attrs["thread_extent"]
for tag, extent in thread_extent.items():
if "threadIdx" in tag:
self.block_info["xyz".index(tag[-1])] = extent
elif "blockIdx" in tag:
self.grid_info["xyz".index(tag[-1])] = extent
def get_dynamic_symbolic_set(self, prim_func):
# Determine the set of dynamic symbols used in the function
dynamic_symbolic_set = set()
for param in prim_func.params:
buffer = prim_func.buffer_map[param]
for dim in buffer.shape:
if isinstance(dim, tvm.tir.Var):
dynamic_symbolic_set.add(dim.name)
return dynamic_symbolic_set
def get_cuda_init_func(self):
# Initialize an empty string for the CUDA function call
call_str = """"""
# If dynamic shared memory buffer is specified, prepare the cudaFuncSetAttribute call
if self.dynamic_smem_buf is not None:
call_str = """
cudaFuncSetAttribute({},
cudaFuncAttributeMaxDynamicSharedMemorySize, {});
""".format(self.function_name, self.dynamic_smem_buf)
# Format the initialization function using the call_str
init_funcs = """
extern "C" void init() {{
{}
}}
""".format(call_str)
return init_funcs
def update_lib_code(self, code: str):
# Update the library code with the given code string
self.lib_code = code
# Find the index of the global kernel function in the code
index = match_global_kernel(code)
# Extract the declaration of the function starting from the found index
declaration = code[index:].split(";")[0]
function_name = self.function_name
# Get the CUDA initialization function
init_func = self.get_cuda_init_func()
# Locate the opening brace of the function to insert arguments
index = code.index("{", index)
function_args = []
# Populate the function arguments from the primary function's parameters and buffers
for param in self.prim_func.params:
buffer = self.prim_func.buffer_map[param]
function_args.append({
"name": buffer.name,
"type": _TYPE_MAP[buffer.dtype] + "* __restrict__",
})
dynamic_symbolic_set = self.get_dynamic_symbolic_set(self.prim_func)
# Add dynamic symbolic parameters as integers to the function arguments
for dyn_sym in dynamic_symbolic_set:
function_args.append({"name": dyn_sym, "type": "int"})
function_args.append({"name": "stream=cudaStreamDefault", "type": "cudaStream_t"},)
# Format the function arguments for declaration
def_args = ", ".join([f"{arg['type']} {arg['name']}" for arg in function_args])
def func_call_args(s, function_args):
# Extract the function call arguments matching the function definition
pattern = r"[,\s]*(?:\w+\s*\*+\s*__restrict__\s+)?(\w+)"
matches = re.findall(pattern, s)
call_args = []
for match in matches:
for arg in function_args:
if arg["name"] == match:
call_args.append(match)
return call_args
call_args = ", ".join(func_call_args(declaration, function_args))
block_info, grid_info = self.block_info, self.grid_info
def legalize_c(p):
# Convert TIR expressions to legal C expressions
# Directly convert to string since the special case handling
# does not alter the string representation for `tvm.tir.Var` and `IntImm`.
# Replace Python's floor division operator with C's division operator
if isinstance(p, tvm.tir.IntImm):
p = int(p)
return str(p).replace("//", "/")
# Prepare the block and grid dimensions for the CUDA kernel launch
block_str = "dim3({}, {}, {})".format(
legalize_c(block_info[0]),
legalize_c(block_info[1]),
legalize_c(block_info[2]),
)
grid_str = "dim3({}, {}, {})".format(
legalize_c(grid_info[0]), legalize_c(grid_info[1]), legalize_c(grid_info[2]))
# Determine the shared memory size, defaulting to 0 if not specified
smem_str = 0 if self.dynamic_smem_buf is None else self.dynamic_smem_buf
# Format the CUDA kernel launch string
call_str = "{}<<<{}, {}, {}, 0>>>({});".format(function_name, grid_str, block_str, smem_str,
call_args)
# Create the host function wrapper for the CUDA kernel
host_func = """
extern "C" void call({}) {{
{}
}}
""".format(def_args, call_str)
# Combine the source, initialization function, and host function to form the complete library code
lib_code = self.source + init_func + host_func
return lib_code
@property
def prim_func(self):
return self.mod["main"]
class CUDASourceWrapperWithDynamic(CUDASourceWrapper):
def __init__(self, optimized_mod: IRModule, source: str, arch: TileDevice):
super().__init__(optimized_mod, source, arch)
def get_cuda_init_func(self):
# Initialize an empty string to accumulate CUDA function calls for setting dynamic shared memory
call_str = """"""
# Iterate over functions and their dynamic shared memory requirements
for function_name, dynamic_smem_buf in self.dynamic_smem_buf.items():
if dynamic_smem_buf is not None:
# Format the cudaFuncSetAttribute call for dynamic shared memory
call_str += """
cudaFuncSetAttribute({},
cudaFuncAttributeMaxDynamicSharedMemorySize, {});
""".format(function_name, dynamic_smem_buf)
# Define the init function that will set the attributes for each kernel
init_funcs = """
extern "C" void init() {{
{}
}}
""".format(call_str)
return init_funcs
def create_dispatch_func(self, code, function_informations):
# Extract the set of dynamic symbolic names used in the primary function
dynamic_symbolic_set = self.get_dynamic_symbolic_set(self.prim_func)
# Find the location of the global kernel function in the code
index = match_global_kernel(code)
# Analyze the function declaration to prepare for argument extraction
dummy_declaration = code[index:].split(";")[0]
function_name = self.function_name
# Identify the start of the function body to insert arguments
index = code.index("{", index)
function_args = []
# Collect function arguments based on primary function's parameters and buffer mappings
for param in self.prim_func.params:
buffer = self.prim_func.buffer_map[param]
function_args.append({
"name": buffer.name,
"type": _TYPE_MAP[buffer.dtype] + "* __restrict__",
})
# Add dynamic symbols as integer arguments
for dyn_sym in dynamic_symbolic_set:
function_args.append({"name": dyn_sym, "type": "int"})
function_args.append({"name": "stream=cudaStreamDefault", "type": "cudaStream_t"},)
# Format the argument definitions for function declaration
def_args = ", ".join([f"{arg['type']} {arg['name']}" for arg in function_args])
def func_call_args(s: str, function_args):
# Extract and clean the function call arguments to match the declaration
pattern = r"[,\s]*(?:\w+\s*\*+\s*__restrict__\s+)?(\w+)"
matches = re.findall(pattern, s)
call_args = []
for match in matches:
match = re.sub(r"\d+", "", match) # Remove numbers
match = re.sub(r"_", "", match) # Remove underscores
for arg in function_args:
if arg["name"] == match:
call_args.append(match)
return call_args
call_args = ", ".join(func_call_args(dummy_declaration, function_args))
def legalize_c(p):
# Convert TIR expressions to legal C expressions
# Directly convert to string since the special case handling
# does not alter the string representation for `tvm.tir.Var` and `IntImm`.
# Replace Python's floor division operator with C's division operator
if isinstance(p, tvm.tir.IntImm):
p = int(p)
return str(p).replace("//", "/")
last_range = 0
num_items = len(function_informations)
_call_str = """"""
for function_name, info in function_informations.items():
# Prepare block and grid configurations for kernel launches
block_info, grid_info = info["block_info"], info["grid_info"]
block_str = "dim3({}, {}, {})".format(
legalize_c(block_info[0]),
legalize_c(block_info[1]),
legalize_c(block_info[2]),
)
grid_str = "dim3({}, {}, {})".format(
legalize_c(grid_info[0]),
legalize_c(grid_info[1]),
legalize_c(grid_info[2]),
)
# Handle dynamic shared memory specification
smem_str = (0 if info["dynamic_smem_buf"] is None else info["dynamic_smem_buf"])
opt_shapes = info["opt_shapes"]
# Generate conditional kernel launch code based on dynamic symbolic ranges
(symbolic,) = list(dynamic_symbolic_set)
range_str = opt_shapes[symbolic]
if last_range == 0:
call_str = "if ({} <= {}) {{\n\t\t\t {}<<<{}, {}, {}, stream>>>({}); \n\t\t}}\n".format(
symbolic,
range_str,
function_name,
grid_str,
block_str,
smem_str,
call_args,
)
else:
call_str = "\t\telse if ({} <= {}) {{\n\t\t\t {}<<<{}, {}, {}, stream>>>({}); \n\t\t}}\n".format(
symbolic,
range_str,
function_name,
grid_str,
block_str,
smem_str,
call_args,
)
if last_range == num_items - 1:
call_str += (
"\t\telse {{\n\t\t\t {}<<<{}, {}, {}, stream>>>({}); \n\t\t}}\n".format(
function_name, grid_str, block_str, smem_str, call_args))
last_range += 1
_call_str += call_str
# Wrap the kernel dispatch logic in an external C function
host_func = """
extern "C" void call({}) {{
{}
}}
""".format(def_args, _call_str)
return host_func
def parse_source_information(self):
# Parse device module to extract execution configurations for each function
device_mod = get_annotated_device_mod(self.mod, self.arch.target)
block_info_map = {}
grid_info_map = {}
dynamic_smem_buf_map = {}
for g_var, func in device_mod.functions.items():
# Default block and grid configurations
block_info = [1, 1, 1]
grid_info = [1, 1, 1]
function_name = g_var.name_hint
attrs = func.attrs
dynamic_smem_buf = None
if "dyn_shared_memory_buf" in attrs:
dynamic_smem_buf = int(attrs["dyn_shared_memory_buf"])
if "thread_extent" in attrs:
# Extract block and grid sizes from thread extents
thread_extent = attrs["thread_extent"]
for tag, extent in thread_extent.items():
if "threadIdx" in tag:
block_info["xyz".index(tag[-1])] = extent
elif "blockIdx" in tag:
grid_info["xyz".index(tag[-1])] = extent
# Map the extracted configurations to each function
block_info_map[function_name] = block_info
grid_info_map[function_name] = grid_info
dynamic_smem_buf_map[function_name] = dynamic_smem_buf
# Store the mappings for use in code generation
self.block_info = block_info_map
self.grid_info = grid_info_map
self.dynamic_smem_buf = dynamic_smem_buf_map
def update_lib_code(self, code: str):
# Organize function information for code generation
function_informations = {}
for g_var, func in self.mod.functions.items():
if g_var.name_hint == "main":
continue
function_name = g_var.name_hint
attrs = func.attrs
assert "opt_shapes" in attrs
opt_shapes = attrs["opt_shapes"]
function_informations[function_name] = {
"function_name": function_name,
"opt_shapes": opt_shapes,
"block_info": self.block_info[function_name],
"grid_info": self.grid_info[function_name],
"dynamic_smem_buf": self.dynamic_smem_buf[function_name],
}
def compare_map_objects(map_obj):
comparable_representation = list(map_obj.values())
return comparable_representation
function_informations = dict(
sorted(
function_informations.items(),
key=lambda item: compare_map_objects(item[1]["opt_shapes"])))
self.lib_code = code
# Generate the initialization and dispatch functions
init_func = self.get_cuda_init_func()
host_func = self.create_dispatch_func(code, function_informations)
# Concatenate source code with generated code segments
lib_code = self.source + init_func + host_func
return lib_code
@property
def prim_func(self):
return self.mod["main"]
|
BitBLAS/python/bitblas/wrapper/general.py/0
|
{
"file_path": "BitBLAS/python/bitblas/wrapper/general.py",
"repo_id": "BitBLAS",
"token_count": 9565
}
| 152 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import pytest
import bitblas
from bitblas.ops.ladder_permutate import LadderPermutate, LadderPermutateConfig
import tvm
target = tvm.target.Target("llvm")
# fmt: off
@pytest.mark.parametrize(
"M,N,datatype,dequantize_bits,storage_dtype,propagate_kind,transpose_matrix,transform_kind,target_instruction",
[
(1024, 1024, "float16", -1, "float16", "B", True, 0, "nvidia-mma"),
(1024, 1024, "float16", -1, "float16", "B", True, 1, "nvidia-mma"),
(1024, 1024, "float16", -1, "float16", "B", True, 2, "nvidia-mma"),
# dequantize propagation
(1024, 1024, "float16", 4, "uint32", "B", True, 2, "nvidia-mma"),
])
def test_ladder_permutate_profile_latency(
M,
N,
datatype,
dequantize_bits,
storage_dtype,
propagate_kind,
transpose_matrix,
transform_kind,
target_instruction,
):
ladder_permutate_config = LadderPermutateConfig(
M=M,
N=N,
datatype=datatype,
dequantize_bits=dequantize_bits,
storage_dtype=storage_dtype,
propagate_kind=propagate_kind,
transpose_matrix=transpose_matrix,
transform_kind=transform_kind,
target_instruction=target_instruction,
)
ladder_permutate = LadderPermutate(
config=ladder_permutate_config,
target=target,
)
latency = ladder_permutate.profile_latency()
assert latency
@pytest.mark.parametrize(
"M,N,datatype,dequantize_bits,storage_dtype,propagate_kind,transpose_matrix,transform_kind,target_instruction",
[
(1024, 1024, "float16", -1, "float16", "A", True, 0, "nvidia-mma"),
(1024, 1024, "float16", -1, "float16", "A", True, 1, "nvidia-mma"),
(1024, 1024, "float16", -1, "float16", "A", True, 2, "nvidia-mma"),
# dequantize propagation
(1024, 1024, "float16", 4, "uint32", "A", True, 2, "nvidia-mma"),
])
def test_ladder_permutate_profile_latency_cuda(
M,
N,
datatype,
dequantize_bits,
storage_dtype,
propagate_kind,
transpose_matrix,
transform_kind,
target_instruction,
):
ladder_permutate_config = LadderPermutateConfig(
M=M,
N=N,
datatype=datatype,
dequantize_bits=dequantize_bits,
storage_dtype=storage_dtype,
propagate_kind=propagate_kind,
transpose_matrix=transpose_matrix,
transform_kind=transform_kind,
target_instruction=target_instruction,
)
ladder_permutate = LadderPermutate(
config=ladder_permutate_config,
target="cuda",
)
# ladder_permutate.hardware_aware_finetune()
latency = ladder_permutate.profile_latency()
print(latency)
assert latency
# fmt: on
if __name__ == "__main__":
bitblas.testing.main()
|
BitBLAS/testing/python/operators/test_ladder_permutate_ops.py/0
|
{
"file_path": "BitBLAS/testing/python/operators/test_ladder_permutate_ops.py",
"repo_id": "BitBLAS",
"token_count": 1273
}
| 153 |
from ..datasets import SNLIDataset
from .datamodule_base import BaseDataModule
from collections import defaultdict
class SNLIDataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return SNLIDataset
@property
def dataset_name(self):
return "snli"
|
BridgeTower/src/datamodules/snli_datamodule.py/0
|
{
"file_path": "BridgeTower/src/datamodules/snli_datamodule.py",
"repo_id": "BridgeTower",
"token_count": 146
}
| 154 |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""
import math
import os
import warnings
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
import torch.utils.checkpoint
from packaging import version
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from transformers.activations import ACT2FN
from transformers.file_utils import (
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from transformers.modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
MaskedLMOutput,
MultipleChoiceModelOutput,
NextSentencePredictorOutput,
QuestionAnsweringModelOutput,
SequenceClassifierOutput,
TokenClassifierOutput,
)
from transformers.modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from transformers.utils import logging
from transformers.models.bert.configuration_bert import BertConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "bert-base-uncased"
_CONFIG_FOR_DOC = "BertConfig"
_TOKENIZER_FOR_DOC = "BertTokenizer"
BERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"bert-base-uncased",
"bert-large-uncased",
"bert-base-cased",
"bert-large-cased",
"bert-base-multilingual-uncased",
"bert-base-multilingual-cased",
"bert-base-chinese",
"bert-base-german-cased",
"bert-large-uncased-whole-word-masking",
"bert-large-cased-whole-word-masking",
"bert-large-uncased-whole-word-masking-finetuned-squad",
"bert-large-cased-whole-word-masking-finetuned-squad",
"bert-base-cased-finetuned-mrpc",
"bert-base-german-dbmdz-cased",
"bert-base-german-dbmdz-uncased",
"cl-tohoku/bert-base-japanese",
"cl-tohoku/bert-base-japanese-whole-word-masking",
"cl-tohoku/bert-base-japanese-char",
"cl-tohoku/bert-base-japanese-char-whole-word-masking",
"TurkuNLP/bert-base-finnish-cased-v1",
"TurkuNLP/bert-base-finnish-uncased-v1",
"wietsedv/bert-base-dutch-cased",
# See all BERT models at https://huggingface.co/models?filter=bert
]
def load_tf_weights_in_bert(model, config, tf_checkpoint_path):
"""Load tf checkpoints in a pytorch model."""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}")
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split("/")
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
):
logger.info(f"Skipping {'/'.join(name)}")
continue
pointer = model
for m_name in name:
if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
scope_names = re.split(r"_(\d+)", m_name)
else:
scope_names = [m_name]
if scope_names[0] == "kernel" or scope_names[0] == "gamma":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
pointer = getattr(pointer, "bias")
elif scope_names[0] == "output_weights":
pointer = getattr(pointer, "weight")
elif scope_names[0] == "squad":
pointer = getattr(pointer, "classifier")
else:
try:
pointer = getattr(pointer, scope_names[0])
except AttributeError:
logger.info(f"Skipping {'/'.join(name)}")
continue
if len(scope_names) >= 2:
num = int(scope_names[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
if pointer.shape != array.shape:
raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info(f"Initialize PyTorch weight {name}")
pointer.data = torch.from_numpy(array)
return model
class BertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
if version.parse(torch.__version__) > version.parse("1.6.0"):
self.register_buffer(
"token_type_ids",
torch.zeros(self.position_ids.size(), dtype=torch.long),
persistent=False,
)
def forward(
self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0
):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length]
# Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
# when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
# issue #5664
if token_type_ids is None:
if hasattr(self, "token_type_ids"):
buffered_token_type_ids = self.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
if self.position_embedding_type == "absolute":
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config, position_embedding_type=None):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = position_embedding_type or getattr(
config, "position_embedding_type", "absolute"
)
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
self.is_decoder = config.is_decoder
def save_attn_gradients(self, attn_gradients):
self.attn_gradients = attn_gradients
def get_attn_gradients(self):
return self.attn_gradients
def save_attention_map(self, attention_map):
self.attention_map = attention_map
def get_attention_map(self):
return self.attention_map
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
mixed_query_layer = self.query(hidden_states)
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
is_cross_attention = encoder_hidden_states is not None
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_layer = past_key_value[0]
value_layer = past_key_value[1]
attention_mask = encoder_attention_mask
elif is_cross_attention:
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
attention_mask = encoder_attention_mask
elif past_key_value is not None:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
else:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_layer, value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
seq_length = hidden_states.size()[1]
position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
if self.position_embedding_type == "relative_key":
relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == "relative_key_query":
relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in BertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
#if True:
if False:
self.save_attention_map(attention_probs)
attention_probs.register_hook(self.save_attn_gradients)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
if self.is_decoder:
outputs = outputs + (past_key_value,)
return outputs
class BertSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config, position_embedding_type=None):
super().__init__()
self.self = BertSelfAttention(config, position_embedding_type=position_embedding_type)
self.output = BertSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class BertIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertCrossLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = BertAttention(config)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
self.crossattention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(
self,
hidden_states,
encoder_hidden_states,
attention_mask=None,
encoder_attention_mask=None,
output_attentions=False,
):
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = None #past_key_value[:2] if past_key_value is not None else None
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask=None,
output_attentions=output_attentions,
past_key_value=None,
)
attention_output = self_attention_outputs[0]
# if decoder, the last output is tuple of self-attn cache
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
cross_attn_present_key_value = None
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
None,
encoder_hidden_states,
encoder_attention_mask,
None,
output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = BertAttention(config)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
if not self.is_decoder:
raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
self.crossattention = BertAttention(config, position_embedding_type="absolute")
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
past_key_value=self_attn_past_key_value,
)
attention_output = self_attention_outputs[0]
# if decoder, the last output is tuple of self-attn cache
if self.is_decoder:
outputs = self_attention_outputs[1:-1]
present_key_value = self_attention_outputs[-1]
else:
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
cross_attn_present_key_value = None
if self.is_decoder and encoder_hidden_states is not None:
if not hasattr(self, "crossattention"):
raise ValueError(
f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
)
# cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
cross_attn_past_key_value,
output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
# add cross-attn cache to positions 3,4 of present_key_value tuple
cross_attn_present_key_value = cross_attention_outputs[-1]
present_key_value = present_key_value + cross_attn_present_key_value
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
# if decoder, return the attn key/values as the last output
if self.is_decoder:
outputs = outputs + (present_key_value,)
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class BertEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
next_decoder_cache = () if use_cache else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
if self.gradient_checkpointing and self.training:
if use_cache:
logger.warning(
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, past_key_value, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
class BertPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BertLMPredictionHead(config)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertOnlyNSPHead(nn.Module):
def __init__(self, config):
super().__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class BertPreTrainingHeads(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BertLMPredictionHead(config)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class BertPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = BertConfig
load_tf_weights = load_tf_weights_in_bert
base_model_prefix = "bert"
supports_gradient_checkpointing = True
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def _set_gradient_checkpointing(self, module, value=False):
if isinstance(module, BertEncoder):
module.gradient_checkpointing = value
@dataclass
class BertForPreTrainingOutput(ModelOutput):
"""
Output type of [`BertForPreTraining`].
Args:
loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
Total loss as the sum of the masked language modeling loss and the next sequence prediction
(classification) loss.
prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
seq_relationship_logits (`torch.FloatTensor` of shape `(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
before SoftMax).
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[torch.FloatTensor] = None
prediction_logits: torch.FloatTensor = None
seq_relationship_logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
BERT_START_DOCSTRING = r"""
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
and behavior.
Parameters:
config ([`BertConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
BERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (`torch.LongTensor` of shape `({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`BertTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare Bert Model transformer outputting raw hidden-states without any specific head on top.",
BERT_START_DOCSTRING,
)
class BertModel(BertPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in [Attention is
all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set
to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and
`add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass.
"""
def __init__(self, config, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPoolingAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
`decoder_input_ids` of shape `(batch_size, sequence_length)`.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past_key_values`).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
batch_size, seq_length = input_shape
device = input_ids.device if input_ids is not None else inputs_embeds.device
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
if token_type_ids is None:
if hasattr(self.embeddings, "token_type_ids"):
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
@add_start_docstrings(
"""
Bert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next
sentence prediction (classification)` head.
""",
BERT_START_DOCSTRING,
)
class BertForPreTraining(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.cls = BertPreTrainingHeads(config)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=BertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
next_sentence_label=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked),
the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
next_sentence_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence
pair (see `input_ids` docstring) Indices should be in `[0, 1]`:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
kwargs (`Dict[str, any]`, optional, defaults to *{}*):
Used to hide legacy arguments that have been deprecated.
Returns:
Example:
```python
>>> from transformers import BertTokenizer, BertForPreTraining
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
>>> model = BertForPreTraining.from_pretrained("bert-base-uncased")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.prediction_logits
>>> seq_relationship_logits = outputs.seq_relationship_logits
```
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output, pooled_output = outputs[:2]
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
total_loss = None
if labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
if not return_dict:
output = (prediction_scores, seq_relationship_score) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return BertForPreTrainingOutput(
loss=total_loss,
prediction_logits=prediction_scores,
seq_relationship_logits=seq_relationship_score,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""Bert Model with a `language modeling` head on top for CLM fine-tuning.""", BERT_START_DOCSTRING
)
class BertLMHeadModel(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
def __init__(self, config):
super().__init__(config)
if not config.is_decoder:
logger.warning("If you want to use `BertLMHeadModel` as a standalone, add `is_decoder=True.`")
self.bert = BertModel(config, add_pooling_layer=False)
self.cls = BertOnlyMLMHead(config)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
if the model is configured as a decoder.
encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used
in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be
in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100`
are ignored (masked), the loss is only computed for the tokens with labels n `[0, ...,
config.vocab_size]`
past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up
decoding.
If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
use_cache (`bool`, *optional*):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
(see `past_key_values`).
Returns:
Example:
```python
>>> from transformers import BertTokenizer, BertLMHeadModel, BertConfig
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained("bert-base-cased")
>>> config = BertConfig.from_pretrained("bert-base-cased")
>>> config.is_decoder = True
>>> model = BertLMHeadModel.from_pretrained("bert-base-cased", config=config)
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> prediction_logits = outputs.logits
```
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
use_cache = False
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
lm_loss = None
if labels is not None:
# we are doing next-token prediction; shift prediction scores and input ids by one
shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
labels = labels[:, 1:].contiguous()
loss_fct = CrossEntropyLoss()
lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((lm_loss,) + output) if lm_loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=lm_loss,
logits=prediction_scores,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_shape)
# cut decoder_input_ids if past is used
if past is not None:
input_ids = input_ids[:, -1:]
return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past}
def _reorder_cache(self, past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
return reordered_past
@add_start_docstrings("""Bert Model with a `language modeling` head on top.""", BERT_START_DOCSTRING)
class BertForMaskedLM(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
_keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"]
def __init__(self, config):
super().__init__(config)
if config.is_decoder:
logger.warning(
"If you want to use `BertForMaskedLM` make sure `config.is_decoder=False` for "
"bi-directional self-attention."
)
self.bert = BertModel(config, add_pooling_layer=False)
self.cls = BertOnlyMLMHead(config)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss() # -100 index = padding token
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return MaskedLMOutput(
loss=masked_lm_loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs):
input_shape = input_ids.shape
effective_batch_size = input_shape[0]
# add a dummy token
if self.config.pad_token_id is None:
raise ValueError("The PAD token should be defined for generation")
attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1)
dummy_token = torch.full(
(effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device
)
input_ids = torch.cat([input_ids, dummy_token], dim=1)
return {"input_ids": input_ids, "attention_mask": attention_mask}
@add_start_docstrings(
"""Bert Model with a `next sentence prediction (classification)` head on top.""",
BERT_START_DOCSTRING,
)
class BertForNextSentencePrediction(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
self.cls = BertOnlyNSPHead(config)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
**kwargs,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
(see `input_ids` docstring). Indices should be in `[0, 1]`:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
Returns:
Example:
```python
>>> from transformers import BertTokenizer, BertForNextSentencePrediction
>>> import torch
>>> tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
>>> model = BertForNextSentencePrediction.from_pretrained("bert-base-uncased")
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
>>> encoding = tokenizer(prompt, next_sentence, return_tensors="pt")
>>> outputs = model(**encoding, labels=torch.LongTensor([1]))
>>> logits = outputs.logits
>>> assert logits[0, 0] < logits[0, 1] # next sentence was random
```
"""
if "next_sentence_label" in kwargs:
warnings.warn(
"The `next_sentence_label` argument is deprecated and will be removed in a future version, use `labels` instead.",
FutureWarning,
)
labels = kwargs.pop("next_sentence_label")
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
seq_relationship_scores = self.cls(pooled_output)
next_sentence_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), labels.view(-1))
if not return_dict:
output = (seq_relationship_scores,) + outputs[2:]
return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output
return NextSentencePredictorOutput(
loss=next_sentence_loss,
logits=seq_relationship_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
output) e.g. for GLUE tasks.
""",
BERT_START_DOCSTRING,
)
class BertForSequenceClassification(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.bert = BertModel(config)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=SequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
softmax) e.g. for RocStories/SWAG tasks.
""",
BERT_START_DOCSTRING,
)
class BertForMultipleChoice(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.bert = BertModel(config)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, 1)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=MultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
`input_ids` above)
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
inputs_embeds = (
inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
if inputs_embeds is not None
else None
)
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
if not return_dict:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return MultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
BERT_START_DOCSTRING,
)
class BertForTokenClassification(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config, add_pooling_layer=False)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@add_start_docstrings(
"""
Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
BERT_START_DOCSTRING,
)
class BertForQuestionAnswering(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config, add_pooling_layer=False)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=QuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
|
BridgeTower/src/modules/bert_model.py/0
|
{
"file_path": "BridgeTower/src/modules/bert_model.py",
"repo_id": "BridgeTower",
"token_count": 35244
}
| 155 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from data.base_dataset import BaseDataset, get_params, get_transform
from PIL import Image
import util.util as util
import os
import torch
class FaceTestDataset(BaseDataset):
@staticmethod
def modify_commandline_options(parser, is_train):
parser.add_argument(
"--no_pairing_check",
action="store_true",
help="If specified, skip sanity check of correct label-image file pairing",
)
# parser.set_defaults(contain_dontcare_label=False)
# parser.set_defaults(no_instance=True)
return parser
def initialize(self, opt):
self.opt = opt
image_path = os.path.join(opt.dataroot, opt.old_face_folder)
label_path = os.path.join(opt.dataroot, opt.old_face_label_folder)
image_list = os.listdir(image_path)
image_list = sorted(image_list)
# image_list=image_list[:opt.max_dataset_size]
self.label_paths = label_path ## Just the root dir
self.image_paths = image_list ## All the image name
self.parts = [
"skin",
"hair",
"l_brow",
"r_brow",
"l_eye",
"r_eye",
"eye_g",
"l_ear",
"r_ear",
"ear_r",
"nose",
"mouth",
"u_lip",
"l_lip",
"neck",
"neck_l",
"cloth",
"hat",
]
size = len(self.image_paths)
self.dataset_size = size
def __getitem__(self, index):
params = get_params(self.opt, (-1, -1))
image_name = self.image_paths[index]
image_path = os.path.join(self.opt.dataroot, self.opt.old_face_folder, image_name)
image = Image.open(image_path)
image = image.convert("RGB")
transform_image = get_transform(self.opt, params)
image_tensor = transform_image(image)
img_name = image_name[:-4]
transform_label = get_transform(self.opt, params, method=Image.NEAREST, normalize=False)
full_label = []
cnt = 0
for each_part in self.parts:
part_name = img_name + "_" + each_part + ".png"
part_url = os.path.join(self.label_paths, part_name)
if os.path.exists(part_url):
label = Image.open(part_url).convert("RGB")
label_tensor = transform_label(label) ## 3 channels and pixel [0,1]
full_label.append(label_tensor[0])
else:
current_part = torch.zeros((self.opt.load_size, self.opt.load_size))
full_label.append(current_part)
cnt += 1
full_label_tensor = torch.stack(full_label, 0)
input_dict = {
"label": full_label_tensor,
"image": image_tensor,
"path": image_path,
}
return input_dict
def __len__(self):
return self.dataset_size
|
Bringing-Old-Photos-Back-to-Life/Face_Enhancement/data/face_dataset.py/0
|
{
"file_path": "Bringing-Old-Photos-Back-to-Life/Face_Enhancement/data/face_dataset.py",
"repo_id": "Bringing-Old-Photos-Back-to-Life",
"token_count": 1513
}
| 156 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import torch
import torch.nn.parallel
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
class Downsample(nn.Module):
# https://github.com/adobe/antialiased-cnns
def __init__(self, pad_type="reflect", filt_size=3, stride=2, channels=None, pad_off=0):
super(Downsample, self).__init__()
self.filt_size = filt_size
self.pad_off = pad_off
self.pad_sizes = [
int(1.0 * (filt_size - 1) / 2),
int(np.ceil(1.0 * (filt_size - 1) / 2)),
int(1.0 * (filt_size - 1) / 2),
int(np.ceil(1.0 * (filt_size - 1) / 2)),
]
self.pad_sizes = [pad_size + pad_off for pad_size in self.pad_sizes]
self.stride = stride
self.off = int((self.stride - 1) / 2.0)
self.channels = channels
# print('Filter size [%i]'%filt_size)
if self.filt_size == 1:
a = np.array([1.0,])
elif self.filt_size == 2:
a = np.array([1.0, 1.0])
elif self.filt_size == 3:
a = np.array([1.0, 2.0, 1.0])
elif self.filt_size == 4:
a = np.array([1.0, 3.0, 3.0, 1.0])
elif self.filt_size == 5:
a = np.array([1.0, 4.0, 6.0, 4.0, 1.0])
elif self.filt_size == 6:
a = np.array([1.0, 5.0, 10.0, 10.0, 5.0, 1.0])
elif self.filt_size == 7:
a = np.array([1.0, 6.0, 15.0, 20.0, 15.0, 6.0, 1.0])
filt = torch.Tensor(a[:, None] * a[None, :])
filt = filt / torch.sum(filt)
self.register_buffer("filt", filt[None, None, :, :].repeat((self.channels, 1, 1, 1)))
self.pad = get_pad_layer(pad_type)(self.pad_sizes)
def forward(self, inp):
if self.filt_size == 1:
if self.pad_off == 0:
return inp[:, :, :: self.stride, :: self.stride]
else:
return self.pad(inp)[:, :, :: self.stride, :: self.stride]
else:
return F.conv2d(self.pad(inp), self.filt, stride=self.stride, groups=inp.shape[1])
def get_pad_layer(pad_type):
if pad_type in ["refl", "reflect"]:
PadLayer = nn.ReflectionPad2d
elif pad_type in ["repl", "replicate"]:
PadLayer = nn.ReplicationPad2d
elif pad_type == "zero":
PadLayer = nn.ZeroPad2d
else:
print("Pad type [%s] not recognized" % pad_type)
return PadLayer
|
Bringing-Old-Photos-Back-to-Life/Global/detection_models/antialiasing.py/0
|
{
"file_path": "Bringing-Old-Photos-Back-to-Life/Global/detection_models/antialiasing.py",
"repo_id": "Bringing-Old-Photos-Back-to-Life",
"token_count": 1278
}
| 157 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import time
from collections import OrderedDict
from options.train_options import TrainOptions
from data.data_loader import CreateDataLoader
from models.models import create_da_model
import util.util as util
from util.visualizer import Visualizer
import os
import numpy as np
import torch
import torchvision.utils as vutils
from torch.autograd import Variable
opt = TrainOptions().parse()
if opt.debug:
opt.display_freq = 1
opt.print_freq = 1
opt.niter = 1
opt.niter_decay = 0
opt.max_dataset_size = 10
data_loader = CreateDataLoader(opt)
dataset = data_loader.load_data()
dataset_size = len(dataset) * opt.batchSize
print('#training images = %d' % dataset_size)
path = os.path.join(opt.checkpoints_dir, opt.name, 'model.txt')
visualizer = Visualizer(opt)
iter_path = os.path.join(opt.checkpoints_dir, opt.name, 'iter.txt')
if opt.continue_train:
try:
start_epoch, epoch_iter = np.loadtxt(iter_path, delimiter=',', dtype=int)
except:
start_epoch, epoch_iter = 1, 0
visualizer.print_save('Resuming from epoch %d at iteration %d' % (start_epoch - 1, epoch_iter))
else:
start_epoch, epoch_iter = 1, 0
# opt.which_epoch=start_epoch-1
model = create_da_model(opt)
fd = open(path, 'w')
fd.write(str(model.module.netG))
fd.write(str(model.module.netD))
fd.close()
total_steps = (start_epoch - 1) * dataset_size + epoch_iter
display_delta = total_steps % opt.display_freq
print_delta = total_steps % opt.print_freq
save_delta = total_steps % opt.save_latest_freq
for epoch in range(start_epoch, opt.niter + opt.niter_decay + 1):
epoch_start_time = time.time()
if epoch != start_epoch:
epoch_iter = epoch_iter % dataset_size
for i, data in enumerate(dataset, start=epoch_iter):
iter_start_time = time.time()
total_steps += opt.batchSize
epoch_iter += opt.batchSize
# whether to collect output images
save_fake = total_steps % opt.display_freq == display_delta
############## Forward Pass ######################
losses, generated = model(Variable(data['label']), Variable(data['inst']),
Variable(data['image']), Variable(data['feat']), infer=save_fake)
# sum per device losses
losses = [torch.mean(x) if not isinstance(x, int) else x for x in losses]
loss_dict = dict(zip(model.module.loss_names, losses))
# calculate final loss scalar
loss_D = (loss_dict['D_fake'] + loss_dict['D_real']) * 0.5
loss_featD=(loss_dict['featD_fake'] + loss_dict['featD_real']) * 0.5
loss_G = loss_dict['G_GAN'] + loss_dict.get('G_GAN_Feat', 0) + loss_dict.get('G_VGG', 0) + loss_dict['G_KL'] + loss_dict['G_featD']
############### Backward Pass ####################
# update generator weights
model.module.optimizer_G.zero_grad()
loss_G.backward()
model.module.optimizer_G.step()
# update discriminator weights
model.module.optimizer_D.zero_grad()
loss_D.backward()
model.module.optimizer_D.step()
model.module.optimizer_featD.zero_grad()
loss_featD.backward()
model.module.optimizer_featD.step()
# call(["nvidia-smi", "--format=csv", "--query-gpu=memory.used,memory.free"])
############## Display results and errors ##########
### print out errors
if total_steps % opt.print_freq == print_delta:
errors = {k: v.data if not isinstance(v, int) else v for k, v in loss_dict.items()}
t = (time.time() - iter_start_time) / opt.batchSize
visualizer.print_current_errors(epoch, epoch_iter, errors, t, model.module.old_lr)
visualizer.plot_current_errors(errors, total_steps)
### display output images
if save_fake:
if not os.path.exists(opt.outputs_dir + opt.name):
os.makedirs(opt.outputs_dir + opt.name)
imgs_num = data['label'].shape[0]
imgs = torch.cat((data['label'], generated.data.cpu(), data['image']), 0)
imgs = (imgs + 1.) / 2.0
try:
image_grid = vutils.save_image(imgs, opt.outputs_dir + opt.name + '/' + str(epoch) + '_' + str(
total_steps) + '.png',
nrow=imgs_num, padding=0, normalize=True)
except OSError as err:
print(err)
if epoch_iter >= dataset_size:
break
# end of epoch
iter_end_time = time.time()
print('End of epoch %d / %d \t Time Taken: %d sec' %
(epoch, opt.niter + opt.niter_decay, time.time() - epoch_start_time))
### save model for this epoch
if epoch % opt.save_epoch_freq == 0:
print('saving the model at the end of epoch %d, iters %d' % (epoch, total_steps))
model.module.save('latest')
model.module.save(epoch)
np.savetxt(iter_path, (epoch + 1, 0), delimiter=',', fmt='%d')
### instead of only training the local enhancer, train the entire network after certain iterations
if (opt.niter_fix_global != 0) and (epoch == opt.niter_fix_global):
model.module.update_fixed_params()
### linearly decay learning rate after certain iterations
if epoch > opt.niter:
model.module.update_learning_rate()
|
Bringing-Old-Photos-Back-to-Life/Global/train_domain_A.py/0
|
{
"file_path": "Bringing-Old-Photos-Back-to-Life/Global/train_domain_A.py",
"repo_id": "Bringing-Old-Photos-Back-to-Life",
"token_count": 2281
}
| 158 |
# TEXT ENCODER CONFIG
text_model: 'bert-base-uncased'
text_len: 100
transformer_embed_dim: 768
freeze_text_encoder_weights: True
# AUDIO ENCODER CONFIG
audioenc_name: 'Cnn14'
out_emb: 2048
sampling_rate: 44100
duration: 5
fmin: 50
fmax: 14000
n_fft: 1028
hop_size: 320
mel_bins: 64
window_size: 1024
# PROJECTION SPACE CONFIG
d_proj: 1024
temperature: 0.003
# TRAINING AND EVALUATION CONFIG
num_classes: 527
batch_size: 1024
demo: False
|
CLAP/msclap/configs/config_2022.yml/0
|
{
"file_path": "CLAP/msclap/configs/config_2022.yml",
"repo_id": "CLAP",
"token_count": 178
}
| 159 |
.. _Command-line Tools:
Command-line Tools
==================
Fairseq provides several command-line tools for training and evaluating models:
- :ref:`fairseq-preprocess`: Data pre-processing: build vocabularies and binarize training data
- :ref:`fairseq-train`: Train a new model on one or multiple GPUs
- :ref:`fairseq-generate`: Translate pre-processed data with a trained model
- :ref:`fairseq-interactive`: Translate raw text with a trained model
- :ref:`fairseq-score`: BLEU scoring of generated translations against reference translations
- :ref:`fairseq-eval-lm`: Language model evaluation
.. _fairseq-preprocess:
fairseq-preprocess
~~~~~~~~~~~~~~~~~~
.. automodule:: fairseq_cli.preprocess
.. argparse::
:module: fairseq.options
:func: get_preprocessing_parser
:prog: fairseq-preprocess
.. _fairseq-train:
fairseq-train
~~~~~~~~~~~~~
.. automodule:: fairseq_cli.train
.. argparse::
:module: fairseq.options
:func: get_training_parser
:prog: fairseq-train
.. _fairseq-generate:
fairseq-generate
~~~~~~~~~~~~~~~~
.. automodule:: fairseq_cli.generate
.. argparse::
:module: fairseq.options
:func: get_generation_parser
:prog: fairseq-generate
.. _fairseq-interactive:
fairseq-interactive
~~~~~~~~~~~~~~~~~~~
.. automodule:: fairseq_cli.interactive
.. argparse::
:module: fairseq.options
:func: get_interactive_generation_parser
:prog: fairseq-interactive
.. _fairseq-score:
fairseq-score
~~~~~~~~~~~~~
.. automodule:: fairseq_cli.score
.. argparse::
:module: fairseq_cli.score
:func: get_parser
:prog: fairseq-score
.. _fairseq-eval-lm:
fairseq-eval-lm
~~~~~~~~~~~~~~~
.. automodule:: fairseq_cli.eval_lm
.. argparse::
:module: fairseq.options
:func: get_eval_lm_parser
:prog: fairseq-eval-lm
|
COCO-LM/fairseq/docs/command_line_tools.rst/0
|
{
"file_path": "COCO-LM/fairseq/docs/command_line_tools.rst",
"repo_id": "COCO-LM",
"token_count": 714
}
| 160 |
#!/bin/bash
SCRIPTS=mosesdecoder/scripts
TOKENIZER=$SCRIPTS/tokenizer/tokenizer.perl
NORM_PUNC=$SCRIPTS/tokenizer/normalize-punctuation.perl
REM_NON_PRINT_CHAR=$SCRIPTS/tokenizer/remove-non-printing-char.perl
BPEROOT=subword-nmt/subword_nmt
BPE_CODE=wmt18_en_de/code
SUBSAMPLE_SIZE=25000000
LANG=de
OUTDIR=wmt18_${LANG}_mono
orig=orig
tmp=$OUTDIR/tmp
mkdir -p $OUTDIR $tmp
URLS=(
"http://www.statmt.org/wmt14/training-monolingual-news-crawl/news.2007.de.shuffled.gz"
"http://www.statmt.org/wmt14/training-monolingual-news-crawl/news.2008.de.shuffled.gz"
"http://www.statmt.org/wmt14/training-monolingual-news-crawl/news.2009.de.shuffled.gz"
"http://www.statmt.org/wmt14/training-monolingual-news-crawl/news.2010.de.shuffled.gz"
"http://www.statmt.org/wmt14/training-monolingual-news-crawl/news.2011.de.shuffled.gz"
"http://www.statmt.org/wmt14/training-monolingual-news-crawl/news.2012.de.shuffled.gz"
"http://www.statmt.org/wmt14/training-monolingual-news-crawl/news.2013.de.shuffled.gz"
"http://www.statmt.org/wmt15/training-monolingual-news-crawl-v2/news.2014.de.shuffled.v2.gz"
"http://data.statmt.org/wmt16/translation-task/news.2015.de.shuffled.gz"
"http://data.statmt.org/wmt17/translation-task/news.2016.de.shuffled.gz"
"http://data.statmt.org/wmt18/translation-task/news.2017.de.shuffled.deduped.gz"
)
FILES=(
"news.2007.de.shuffled.gz"
"news.2008.de.shuffled.gz"
"news.2009.de.shuffled.gz"
"news.2010.de.shuffled.gz"
"news.2011.de.shuffled.gz"
"news.2012.de.shuffled.gz"
"news.2013.de.shuffled.gz"
"news.2014.de.shuffled.v2.gz"
"news.2015.de.shuffled.gz"
"news.2016.de.shuffled.gz"
"news.2017.de.shuffled.deduped.gz"
)
cd $orig
for ((i=0;i<${#URLS[@]};++i)); do
file=${FILES[i]}
if [ -f $file ]; then
echo "$file already exists, skipping download"
else
url=${URLS[i]}
wget "$url"
fi
done
cd ..
if [ -f $tmp/monolingual.${SUBSAMPLE_SIZE}.${LANG} ]; then
echo "found monolingual sample, skipping shuffle/sample/tokenize"
else
gzip -c -d -k $(for FILE in "${FILES[@]}"; do echo $orig/$FILE; done) \
| shuf -n $SUBSAMPLE_SIZE \
| perl $NORM_PUNC $LANG \
| perl $REM_NON_PRINT_CHAR \
| perl $TOKENIZER -threads 8 -a -l $LANG \
> $tmp/monolingual.${SUBSAMPLE_SIZE}.${LANG}
fi
if [ -f $tmp/bpe.monolingual.${SUBSAMPLE_SIZE}.${LANG} ]; then
echo "found BPE monolingual sample, skipping BPE step"
else
python $BPEROOT/apply_bpe.py -c $BPE_CODE \
< $tmp/monolingual.${SUBSAMPLE_SIZE}.${LANG} \
> $tmp/bpe.monolingual.${SUBSAMPLE_SIZE}.${LANG}
fi
if [ -f $tmp/bpe.monolingual.dedup.${SUBSAMPLE_SIZE}.${LANG} ]; then
echo "found deduplicated monolingual sample, skipping deduplication step"
else
python deduplicate_lines.py $tmp/bpe.monolingual.${SUBSAMPLE_SIZE}.${LANG} \
> $tmp/bpe.monolingual.dedup.${SUBSAMPLE_SIZE}.${LANG}
fi
if [ -f $OUTDIR/bpe.monolingual.dedup.00.de ]; then
echo "found sharded data, skipping sharding step"
else
split --lines 1000000 --numeric-suffixes \
--additional-suffix .${LANG} \
$tmp/bpe.monolingual.dedup.${SUBSAMPLE_SIZE}.${LANG} \
$OUTDIR/bpe.monolingual.dedup.
fi
|
COCO-LM/fairseq/examples/backtranslation/prepare-de-monolingual.sh/0
|
{
"file_path": "COCO-LM/fairseq/examples/backtranslation/prepare-de-monolingual.sh",
"repo_id": "COCO-LM",
"token_count": 1519
}
| 161 |
# Convolutional Sequence to Sequence Learning (Gehring et al., 2017)
## Pre-trained models
Description | Dataset | Model | Test set(s)
---|---|---|---
Convolutional <br> ([Gehring et al., 2017](https://arxiv.org/abs/1705.03122)) | [WMT14 English-French](http://statmt.org/wmt14/translation-task.html#Download) | [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/models/wmt14.v2.en-fr.fconv-py.tar.bz2) | newstest2014: <br> [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt14.v2.en-fr.newstest2014.tar.bz2) <br> newstest2012/2013: <br> [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt14.v2.en-fr.ntst1213.tar.bz2)
Convolutional <br> ([Gehring et al., 2017](https://arxiv.org/abs/1705.03122)) | [WMT14 English-German](http://statmt.org/wmt14/translation-task.html#Download) | [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/models/wmt14.en-de.fconv-py.tar.bz2) | newstest2014: <br> [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt14.en-de.newstest2014.tar.bz2)
Convolutional <br> ([Gehring et al., 2017](https://arxiv.org/abs/1705.03122)) | [WMT17 English-German](http://statmt.org/wmt17/translation-task.html#Download) | [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/models/wmt17.v2.en-de.fconv-py.tar.bz2) | newstest2014: <br> [download (.tar.bz2)](https://dl.fbaipublicfiles.com/fairseq/data/wmt17.v2.en-de.newstest2014.tar.bz2)
## Example usage
See the [translation README](../translation/README.md) for instructions on reproducing results for WMT'14 En-De and
WMT'14 En-Fr using the `fconv_wmt_en_de` and `fconv_wmt_en_fr` model architectures.
## Citation
```bibtex
@inproceedings{gehring2017convs2s,
title = {Convolutional Sequence to Sequence Learning},
author = {Gehring, Jonas, and Auli, Michael and Grangier, David and Yarats, Denis and Dauphin, Yann N},
booktitle = {Proc. of ICML},
year = 2017,
}
```
|
COCO-LM/fairseq/examples/conv_seq2seq/README.md/0
|
{
"file_path": "COCO-LM/fairseq/examples/conv_seq2seq/README.md",
"repo_id": "COCO-LM",
"token_count": 786
}
| 162 |
# Fully Sharded Data Parallel (FSDP)
## Overview
Recent work by [Microsoft](https://arxiv.org/abs/1910.02054) and
[Google](https://arxiv.org/abs/2004.13336) has shown that data parallel
training can be made significantly more efficient by sharding the model
parameters and optimizer state across data parallel workers. These ideas are
encapsulated in the new **`FullyShardedDataParallel` (FSDP)** wrapper provided
by [fairscale](https://github.com/facebookresearch/fairscale/).
Compared to PyTorch DDP:
* FSDP produces identical results as PyTorch DDP (it's still synchronous data parallel training)
* FSDP shards parameters (FP16 + FP32) and optimizer state across data parallel GPUs
* FSDP is faster than PyTorch DDP because the optimizer step is sharded, and the communication can be overlapped with the forward pass
* FSDP enables training 13B parameter models on 8 GPUs and 175B parameter models on 128 GPUs
FSDP is fully supported in fairseq via the following new arguments:
* `--ddp-backend=fully_sharded`: enables full sharding via FSDP
* `--cpu-offload`: offloads the optimizer state and FP32 model copy to CPU (combine with `--optimizer=cpu_adam`)
* `--no-reshard-after-forward`: increases training speed for large models (1B+ params) and is similar to ZeRO stage 2
* other popular options (`--fp16`, `--update-freq`, `--checkpoint-activations`, `--offload-activations`, etc.) continue to work as normal
<details><summary>Limitations</summary><p>
FSDP currently has several limitations compared to fairseq's default DDP backend (PyTorch DDP):
* while FSDP is full compatible with pointwise Optimizers (e.g., Adam, AdamW, Adadelta, Adamax, SGD, etc.), it is not currently compatible with non-pointwise Optimizers (e.g., Adagrad, Adafactor, LAMB, etc.)
* FSDP depends on flattening the parameters, so models that currently require `--fp16-no-flatten-grads` may not be supported
See the [fairscale docs](https://fairscale.readthedocs.io/en/latest/api/nn/fsdp_tips.html) for a more detailed
explanation of these and other limitations.
</p></details>
<details><summary>How it works</summary><p>
<img width="800" alt="Fully Sharded Data Parallel" src="https://user-images.githubusercontent.com/231798/110406775-c2de0000-8050-11eb-9718-fbfc4510a76a.png">
See the [fairscale docs](https://fairscale.readthedocs.io/en/latest/api/nn/fsdp_tips.html) for a more detailed
explanation of how FSDP works.
</p></details>
## Example usage
The following examples illustrate how to train a very large language model with
13 billion parameters on 1 GPU by offloading parameters and optimizer states to
CPU, or on 8 GPUs by fully sharding the params and optimizer states across GPUs.
These examples use the WikiText-103 dataset for demonstration purposes, but
in practice a much larger dataset will be needed to achieve good results.
Follow the [instructions here](https://github.com/pytorch/fairseq/blob/master/examples/roberta/README.pretraining.md#1-preprocess-the-data)
to preprocess the WikiText-103 dataset using the GPT-2/RoBERTa vocabulary.
### 13B params on 1 V100 GPU (with CPU offloading)
The following command trains a 13B parameter GPT-3 model on a single V100 GPU
using the `--cpu-offload` feature to offload parameters and optimizer states to
CPU. In this setting, the optimizer step (Adam) happens on CPU. We also use the
`--checkpoint-activations` feature (sometimes called [gradient checkpointing](https://pytorch.org/docs/stable/checkpoint.html)),
which further saves memory in exchange for a small increase in computation.
**Requirements:**
- Install the latest master version of fairscale: `pip install git+https://github.com/facebookresearch/fairscale.git@master`
- You'll need 32GB of GPU memory and ~256GB of system memory to train the 13B param model.
- If you have less system memory, the 6.7B param model can be trained with ~128GB of system memory, just set `--arch transformer_lm_gpt3_6_7`
- We use the CPU Adam optimizer from [DeepSpeed](https://github.com/microsoft/DeepSpeed), so you'll need to `pip install deepspeed` before running the command.
**Notes:**
- The command will take ~5 minutes to start training, during which time it will appear to be hung, since randomly initializing 13B weights can be slow.
- The `--cpu-offload` feature requires training in mixed precision (`--fp16`).
- Tune the `OMP_NUM_THREADS` env variable for best performance with CPU offloading.
- The example command below stops training after 10 steps (`--max-update 10`) and does not save checkpoints (`--no-save`).
```bash
OMP_NUM_THREADS=20 CUDA_VISIBLE_DEVICES=0 \
fairseq-train data-bin/wikitext-103-roberta-bpe-bin \
--ddp-backend fully_sharded --fp16 --fp16-init-scale 4 \
--cpu-offload --checkpoint-activations \
--task language_modeling --tokens-per-sample 2048 --batch-size 8 \
--arch transformer_lm_gpt3_13 \
--optimizer cpu_adam --adam-betas "(0.9,0.98)" \
--lr 0.0001 --lr-scheduler polynomial_decay --warmup-updates 5 --total-num-update 10 \
--max-update 10 --no-save --log-format json --log-interval 1
```
<details><summary>Example output</summary><p>
```
(...)
2021-03-08 12:29:51 | INFO | fairseq_cli.train | num. model params: 13,110,865,920 (num. trained: 13,110,865,920)
(...)
2021-03-08 12:29:51 | INFO | fairseq_cli.train | training on 1 devices (GPUs/TPUs)
2021-03-08 12:29:51 | INFO | fairseq_cli.train | max tokens per GPU = None and batch size per GPU = 8
(...)
Adam Optimizer #0 is created with AVX2 arithmetic capability.
Config: alpha=0.000100, betas=(0.900000, 0.980000), weight_decay=0.000000, adam_w=1
(...)
2021-03-08 12:31:36 | INFO | train_inner | {"epoch": 1, "update": 0.0, "loss": "16.475", "ppl": "91120.8", "wps": "0", "ups": "0", "wpb": "16384", "bsz": "8", "num_updates": "1", "lr": "2e-05", "gnorm": "20.751", "loss_scale": "4", "train_wall": "99", "gb_free": "9.3", "wall": "105"}
2021-03-08 12:32:33 | INFO | train_inner | {"epoch": 1, "update": 0.0, "loss": "16.446", "ppl": "89281.6", "wps": "288.7", "ups": "0.02", "wpb": "16384", "bsz": "8", "num_updates": "2", "lr": "4e-05", "gnorm": "19.777", "loss_scale": "4", "train_wall": "57", "gb_free": "9.3", "wall": "161"}
2021-03-08 12:33:12 | INFO | fairseq.trainer | NOTE: gradient overflow detected, ignoring gradient, setting loss scale to: 2.0
2021-03-08 12:33:51 | INFO | fairseq.trainer | NOTE: gradient overflow detected, ignoring gradient, setting loss scale to: 1.0
2021-03-08 12:34:45 | INFO | train_inner | {"epoch": 1, "update": 0.001, "loss": "25.22", "ppl": "3.90691e+07", "wps": "123.4", "ups": "0.01", "wpb": "16384", "bsz": "8", "num_updates": "3", "lr": "6e-05", "gnorm": "131.281", "loss_scale": "1", "train_wall": "133", "gb_free": "9.3", "wall": "294"}
2021-03-08 12:35:43 | INFO | train_inner | {"epoch": 1, "update": 0.001, "loss": "18.079", "ppl": "276809", "wps": "285.5", "ups": "0.02", "wpb": "16384", "bsz": "8", "num_updates": "4", "lr": "8e-05", "gnorm": "13.776", "loss_scale": "1", "train_wall": "57", "gb_free": "9.3", "wall": "351"}
2021-03-08 12:36:35 | INFO | train_inner | {"epoch": 1, "update": 0.001, "loss": "23.729", "ppl": "1.39088e+07", "wps": "316.7", "ups": "0.02", "wpb": "16384", "bsz": "8", "num_updates": "5", "lr": "0.0001", "gnorm": "72.774", "loss_scale": "1", "train_wall": "52", "gb_free": "9.3", "wall": "403"}
2021-03-08 12:37:28 | INFO | train_inner | {"epoch": 1, "update": 0.001, "loss": "20.429", "ppl": "1.41203e+06", "wps": "307.6", "ups": "0.02", "wpb": "16384", "bsz": "8", "num_updates": "6", "lr": "8e-05", "gnorm": "60.846", "loss_scale": "1", "train_wall": "53", "gb_free": "9.3", "wall": "456"}
2021-03-08 12:38:27 | INFO | train_inner | {"epoch": 1, "update": 0.001, "loss": "18.965", "ppl": "511684", "wps": "279.4", "ups": "0.02", "wpb": "16384", "bsz": "8", "num_updates": "7", "lr": "6e-05", "gnorm": "22.687", "loss_scale": "1", "train_wall": "59", "gb_free": "9.3", "wall": "515"}
2021-03-08 12:39:18 | INFO | train_inner | {"epoch": 1, "update": 0.001, "loss": "18.345", "ppl": "332887", "wps": "319.1", "ups": "0.02", "wpb": "16384", "bsz": "8", "num_updates": "8", "lr": "4e-05", "gnorm": "8.451", "loss_scale": "1", "train_wall": "51", "gb_free": "9.3", "wall": "566"}
2021-03-08 12:40:11 | INFO | train_inner | {"epoch": 1, "update": 0.002, "loss": "18.262", "ppl": "314336", "wps": "305.9", "ups": "0.02", "wpb": "16384", "bsz": "8", "num_updates": "9", "lr": "2e-05", "gnorm": "6.457", "loss_scale": "1", "train_wall": "54", "gb_free": "9.3", "wall": "620"}
2021-03-08 12:41:04 | INFO | train_inner | {"epoch": 1, "update": 0.002, "loss": "17.556", "ppl": "192686", "wps": "311.8", "ups": "0.02", "wpb": "16384", "bsz": "8", "num_updates": "10", "lr": "0", "gnorm": "5.796", "loss_scale": "1", "train_wall": "53", "gb_free": "9.3", "wall": "673"}
2021-03-08 12:41:04 | INFO | fairseq_cli.train | Stopping training due to num_updates: 10 >= max_update: 10
2021-03-08 12:41:04 | INFO | fairseq_cli.train | begin validation on "valid" subset
2021-03-08 12:43:15 | INFO | valid | {"epoch": 1, "valid_loss": "17.953", "valid_ppl": "253807", "valid_wps": "1868.4", "valid_wpb": "15400.2", "valid_bsz": "7.6", "valid_num_updates": "10"}
2021-03-08 12:43:15 | INFO | fairseq_cli.train | end of epoch 1 (average epoch stats below)
2021-03-08 12:43:15 | INFO | train | {"epoch": 1, "train_loss": "19.351", "train_ppl": "668509", "train_wps": "210.9", "train_ups": "0.01", "train_wpb": "16384", "train_bsz": "8", "train_num_updates": "10", "train_lr": "0", "train_gnorm": "36.26", "train_loss_scale": "1", "train_train_wall": "667", "train_gb_free": "9.3", "train_wall": "804"}
2021-03-08 12:43:15 | INFO | fairseq_cli.train | done training in 798.6 seconds
```
</p></details>
### 13B params on 8 V100 GPUs (with full parameter + optimizer state sharding)
FSDP can also shard the parameters and optimizer states across multiple GPUs,
reducing memory requirements significantly. On 8 x 32GB GPUs, sharding enables
training the same 13B parameter model *without offloading the parameters to
CPU*. However, without CPU offloading we'd only be able to fit a batch size of
1 per GPU, which would cause training speed to suffer.
We obtain the best performance on 8 GPUs by combining full sharding and CPU
offloading. The following command trains the same 13B parameter GPT-3 model as
before on 8 x 32GB V100 GPUs; training speed increases superlinearly from ~310
words per second to ~3200 words per second.
```bash
OMP_NUM_THREADS=20 CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 \
fairseq-train data-bin/wikitext-103-roberta-bpe-bin \
--ddp-backend fully_sharded --fp16 --fp16-init-scale 4 \
--cpu-offload --checkpoint-activations \
--task language_modeling --tokens-per-sample 2048 --batch-size 8 \
--arch transformer_lm_gpt3_13 \
--optimizer cpu_adam --adam-betas "(0.9,0.98)" \
--lr 0.0001 --lr-scheduler polynomial_decay --warmup-updates 5 --total-num-update 10 \
--max-update 10 --no-save --log-format json --log-interval 1
```
<details><summary>Example output</summary><p>
```
(...)
2021-03-08 18:04:09 | INFO | fairseq_cli.train | num. model params: 13,110,865,920 (num. trained: 13,110,865,920)
(...)
2021-03-08 18:04:09 | INFO | fairseq_cli.train | training on 8 devices (GPUs/TPUs)
2021-03-08 18:04:09 | INFO | fairseq_cli.train | max tokens per GPU = None and batch size per GPU = 8
(...)
Adam Optimizer #0 is created with AVX2 arithmetic capability.
Config: alpha=0.000100, betas=(0.900000, 0.980000), weight_decay=0.000000, adam_w=1
(...)
2021-03-08 18:05:06 | INFO | train_inner | {"epoch": 1, "update": 0.001, "loss": "16.408", "ppl": "86945.6", "wps": "0", "ups": "0", "wpb": "131072", "bsz": "64", "num_updates": "1", "lr": "2e-05", "gnorm": "18.27", "loss_scale": "4", "train_wall": "47", "gb_free": "9.3", "wall": "56"}
2021-03-08 18:05:45 | INFO | train_inner | {"epoch": 1, "update": 0.002, "loss": "16.352", "ppl": "83644.3", "wps": "3283.4", "ups": "0.03", "wpb": "131072", "bsz": "64", "num_updates": "2", "lr": "4e-05", "gnorm": "18.411", "loss_scale": "4", "train_wall": "40", "gb_free": "9.3", "wall": "96"}
2021-03-08 18:06:21 | INFO | fairseq.trainer | NOTE: gradient overflow detected, ignoring gradient, setting loss scale to: 2.0
2021-03-08 18:06:56 | INFO | fairseq.trainer | NOTE: gradient overflow detected, ignoring gradient, setting loss scale to: 1.0
2021-03-08 18:07:37 | INFO | train_inner | {"epoch": 1, "update": 0.006, "loss": "23.682", "ppl": "1.34537e+07", "wps": "1176.6", "ups": "0.01", "wpb": "131072", "bsz": "64", "num_updates": "3", "lr": "6e-05", "gnorm": "119.682", "loss_scale": "1", "train_wall": "111", "gb_free": "9.3", "wall": "208"}
2021-03-08 18:08:18 | INFO | train_inner | {"epoch": 1, "update": 0.007, "loss": "18.988", "ppl": "519921", "wps": "3189.1", "ups": "0.02", "wpb": "131072", "bsz": "64", "num_updates": "4", "lr": "8e-05", "gnorm": "14.934", "loss_scale": "1", "train_wall": "41", "gb_free": "9.3", "wall": "249"}
2021-03-08 18:08:59 | INFO | train_inner | {"epoch": 1, "update": 0.008, "loss": "20.08", "ppl": "1.10798e+06", "wps": "3223.1", "ups": "0.02", "wpb": "131072", "bsz": "64", "num_updates": "5", "lr": "0.0001", "gnorm": "59.92", "loss_scale": "1", "train_wall": "41", "gb_free": "9.3", "wall": "289"}
2021-03-08 18:09:39 | INFO | train_inner | {"epoch": 1, "update": 0.009, "loss": "18.323", "ppl": "327980", "wps": "3256.6", "ups": "0.02", "wpb": "131072", "bsz": "64", "num_updates": "6", "lr": "8e-05", "gnorm": "37.425", "loss_scale": "1", "train_wall": "40", "gb_free": "9.3", "wall": "330"}
2021-03-08 18:10:20 | INFO | train_inner | {"epoch": 1, "update": 0.01, "loss": "17.264", "ppl": "157354", "wps": "3188.7", "ups": "0.02", "wpb": "131072", "bsz": "64", "num_updates": "7", "lr": "6e-05", "gnorm": "10.824", "loss_scale": "1", "train_wall": "41", "gb_free": "9.3", "wall": "371"}
2021-03-08 18:11:01 | INFO | train_inner | {"epoch": 1, "update": 0.011, "loss": "16.794", "ppl": "113647", "wps": "3230", "ups": "0.02", "wpb": "131072", "bsz": "64", "num_updates": "8", "lr": "4e-05", "gnorm": "5.616", "loss_scale": "1", "train_wall": "41", "gb_free": "9.3", "wall": "411"}
2021-03-08 18:11:39 | INFO | train_inner | {"epoch": 1, "update": 0.012, "loss": "16.706", "ppl": "106938", "wps": "3384", "ups": "0.03", "wpb": "131072", "bsz": "64", "num_updates": "9", "lr": "2e-05", "gnorm": "5.318", "loss_scale": "1", "train_wall": "39", "gb_free": "9.3", "wall": "450"}
2021-03-08 18:12:19 | INFO | train_inner | {"epoch": 1, "update": 0.013, "loss": "16.548", "ppl": "95796.2", "wps": "3274.4", "ups": "0.02", "wpb": "131072", "bsz": "64", "num_updates": "10", "lr": "0", "gnorm": "5.22", "loss_scale": "1", "train_wall": "40", "gb_free": "9.3", "wall": "490"}
2021-03-08 18:12:19 | INFO | fairseq_cli.train | Stopping training due to num_updates: 10 >= max_update: 10
2021-03-08 18:12:19 | INFO | fairseq_cli.train | begin validation on "valid" subset
2021-03-08 18:12:45 | INFO | valid | {"epoch": 1, "valid_loss": "16.624", "valid_ppl": "101000", "valid_wps": "10855.9", "valid_wpb": "123202", "valid_bsz": "60.5", "valid_num_updates": "10"}
2021-03-08 18:12:45 | INFO | fairseq_cli.train | end of epoch 1 (average epoch stats below)
2021-03-08 18:12:45 | INFO | train | {"epoch": 1, "train_loss": "18.114", "train_ppl": "283776", "train_wps": "2567.8", "train_ups": "0.02", "train_wpb": "131072", "train_bsz": "64", "train_num_updates": "10", "train_lr": "0", "train_gnorm": "29.562", "train_loss_scale": "1", "train_train_wall": "480", "train_gb_free": "9.3", "train_wall": "516"}
2021-03-08 18:12:45 | INFO | fairseq_cli.train | done training in 509.9 seconds
```
</p></details>
|
COCO-LM/fairseq/examples/fully_sharded_data_parallel/README.md/0
|
{
"file_path": "COCO-LM/fairseq/examples/fully_sharded_data_parallel/README.md",
"repo_id": "COCO-LM",
"token_count": 6076
}
| 163 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Dict, Optional, Tuple
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.incremental_decoding_utils import with_incremental_state
from fairseq.modules.quant_noise import quant_noise
from torch import Tensor, nn
from torch.nn import Parameter
@with_incremental_state
class MultiheadLinearAttention(nn.Module):
"""Multi-headed linformer attention.
Projects the key and values down to the compressed dimension, before computing self-attention.
See "Linformer: Self-Attention with Linear Complexity" for more details.
"""
def __init__(
self,
embed_dim,
num_heads,
kdim=None,
vdim=None,
dropout=0.0,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
self_attention=False,
encoder_decoder_attention=False,
q_noise=0.0,
qn_block_size=8,
compressed=1,
max_seq_len=256,
shared_kv_compressed=0,
shared_compress_layer=None,
freeze_compress=0,
):
super().__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim ** -0.5
self.self_attention = self_attention
self.encoder_decoder_attention = encoder_decoder_attention
assert not self.self_attention or self.qkv_same_dim, (
"Self-attention requires query, key and " "value to be of the same size"
)
self.k_proj = quant_noise(
nn.Linear(self.kdim, embed_dim, bias=bias), q_noise, qn_block_size
)
self.v_proj = quant_noise(
nn.Linear(self.vdim, embed_dim, bias=bias), q_noise, qn_block_size
)
self.q_proj = quant_noise(
nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size
)
# used for compress sequence to subsequence
if shared_compress_layer is None:
self.compress_seq_len = max_seq_len // compressed
self.compress_k = nn.Linear(max_seq_len, self.compress_seq_len, bias=False)
if shared_kv_compressed == 0:
self.compress_v = nn.Linear(
max_seq_len, self.compress_seq_len, bias=False
)
self.layerwise_sharing = False
else:
self.compress_k = shared_compress_layer
if shared_kv_compressed == 0:
self.compress_v = shared_compress_layer
self.layerwise_sharing = True
self.shared_kv_compressed = shared_kv_compressed
self.out_proj = quant_noise(
nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size
)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.reset_parameters()
if freeze_compress == 1:
self.compress_k.weight.requires_grad = False
if shared_kv_compressed == 0:
self.compress_v.weight.requires_grad = False
self.onnx_trace = False
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def reset_parameters(self):
if self.qkv_same_dim:
# Empirically observed the convergence to be much better with
# the scaled initialization
nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))
if (
not self.layerwise_sharing
): # otherwise, we already initialize the parameters
nn.init.xavier_uniform_(self.compress_k.weight, gain=1 / math.sqrt(2))
if self.shared_kv_compressed == 0:
nn.init.xavier_uniform_(
self.compress_v.weight, gain=1 / math.sqrt(2)
)
else:
nn.init.xavier_uniform_(self.k_proj.weight)
nn.init.xavier_uniform_(self.v_proj.weight)
nn.init.xavier_uniform_(self.q_proj.weight)
if (
not self.layerwise_sharing
): # otherwise, we already initialize the parameters
nn.init.xavier_uniform_(self.compress_k.weight)
if self.shared_kv_compressed == 0:
nn.init.xavier_uniform_(self.compress_v.weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.out_proj.bias is not None:
nn.init.constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def forward(
self,
query,
key: Optional[Tensor],
value: Optional[Tensor],
key_padding_mask: Optional[Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
need_weights: bool = True,
static_kv: bool = False,
attn_mask: Optional[Tensor] = None,
before_softmax: bool = False,
need_head_weights: bool = False,
) -> Tuple[Tensor, Optional[Tensor]]:
"""Input shape: Time x Batch x Channel
Args:
key_padding_mask (ByteTensor, optional): mask to exclude
keys that are pads, of shape `(batch, src_len)`, where
padding elements are indicated by 1s.
need_weights (bool, optional): return the attention weights,
averaged over heads (default: False).
attn_mask (ByteTensor, optional): typically used to
implement causal attention, where the mask prevents the
attention from looking forward in time (default: None).
before_softmax (bool, optional): return the raw attention
weights and values before the attention softmax.
need_head_weights (bool, optional): return the attention
weights for each head. Implies *need_weights*. Default:
return the average attention weights over all heads.
"""
if need_head_weights:
need_weights = True
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if saved_state is not None and "prev_key" in saved_state:
# previous time steps are cached - no need to recompute
# key and value if they are static
if static_kv:
assert self.encoder_decoder_attention and not self.self_attention
key = value = None
else:
saved_state = None
if self.self_attention:
q = self.q_proj(query)
k_input = query.permute(1, 2, 0).contiguous() # B * C * T
k_input = (
F.linear(k_input, self.compress_k.weight[:, 0:tgt_len])
.permute(2, 0, 1)
.contiguous()
)
k = self.k_proj(k_input)
v_input = query.permute(1, 2, 0).contiguous() # B * C * T
if self.shared_kv_compressed == 0:
v_input = (
F.linear(v_input, self.compress_v.weight[:, 0:tgt_len])
.permute(2, 0, 1)
.contiguous()
)
if self.shared_kv_compressed == 1: # use shared kv compressed linear layer
v_input = (
F.linear(v_input, self.compress_k.weight[:, 0:tgt_len])
.permute(2, 0, 1)
.contiguous()
)
v = self.v_proj(v_input)
elif self.encoder_decoder_attention:
# encoder-decoder attention
q = self.q_proj(query)
if key is None:
assert value is None
k = v = None
else:
k = self.k_proj(key)
v = self.v_proj(key)
else:
assert key is not None and value is not None
q = self.q_proj(query)
k = self.k_proj(key)
v = self.v_proj(value)
q *= self.scaling
if self.bias_k is not None:
assert self.bias_v is not None
k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = torch.cat(
[attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[
key_padding_mask,
key_padding_mask.new_zeros(key_padding_mask.size(0), 1),
],
dim=1,
)
q = (
q.contiguous()
.view(tgt_len, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if k is not None:
k = (
k.contiguous()
.view(-1, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if v is not None:
v = (
v.contiguous()
.view(-1, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if saved_state is not None:
# saved states are stored with shape (bsz, num_heads, seq_len, head_dim)
if "prev_key" in saved_state:
_prev_key = saved_state["prev_key"]
assert _prev_key is not None
prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
k = prev_key
else:
assert k is not None
k = torch.cat([prev_key, k], dim=1)
if "prev_value" in saved_state:
_prev_value = saved_state["prev_value"]
assert _prev_value is not None
prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
v = prev_value
else:
assert v is not None
v = torch.cat([prev_value, v], dim=1)
prev_key_padding_mask: Optional[Tensor] = None
if "prev_key_padding_mask" in saved_state:
prev_key_padding_mask = saved_state["prev_key_padding_mask"]
assert k is not None and v is not None
key_padding_mask = MultiheadLinearAttention._append_prev_key_padding_mask(
key_padding_mask=key_padding_mask,
prev_key_padding_mask=prev_key_padding_mask,
batch_size=bsz,
src_len=k.size(1),
static_kv=static_kv,
)
saved_state["prev_key"] = k.view(bsz, self.num_heads, -1, self.head_dim)
saved_state["prev_value"] = v.view(bsz, self.num_heads, -1, self.head_dim)
saved_state["prev_key_padding_mask"] = key_padding_mask
# In this branch incremental_state is never None
assert incremental_state is not None
incremental_state = self._set_input_buffer(incremental_state, saved_state)
assert k is not None
src_len = k.size(1)
if self.add_zero_attn:
assert v is not None
src_len += 1
k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1)
v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1)
if attn_mask is not None:
attn_mask = torch.cat(
[attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
)
attn_weights = torch.bmm(q, k.transpose(1, 2))
attn_weights = MultiheadLinearAttention.apply_sparse_mask(
attn_weights, tgt_len, src_len, bsz
)
assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(0)
if self.onnx_trace:
attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1)
attn_weights += attn_mask
if before_softmax:
return attn_weights, v
attn_weights_float = utils.softmax(
attn_weights, dim=-1, onnx_trace=self.onnx_trace
)
attn_weights = attn_weights_float.type_as(attn_weights)
attn_probs = F.dropout(
attn_weights,
p=self.dropout,
training=self.training,
)
assert v is not None
attn = torch.bmm(attn_probs, v)
assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]
if self.onnx_trace and attn.size(1) == 1:
# when ONNX tracing a single decoder step (sequence length == 1)
# the transpose is a no-op copy before view, thus unnecessary
attn = attn.contiguous().view(tgt_len, bsz, embed_dim)
else:
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn = self.out_proj(attn)
attn_weights: Optional[Tensor] = None
if need_weights:
attn_weights = attn_weights_float.view(
bsz, self.num_heads, tgt_len, src_len
).transpose(1, 0)
if not need_head_weights:
# average attention weights over heads
attn_weights = attn_weights.mean(dim=0)
return attn, attn_weights
@staticmethod
def _append_prev_key_padding_mask(
key_padding_mask: Optional[Tensor],
prev_key_padding_mask: Optional[Tensor],
batch_size: int,
src_len: int,
static_kv: bool,
) -> Optional[Tensor]:
# saved key padding masks have shape (bsz, seq_len)
if prev_key_padding_mask is not None and static_kv:
new_key_padding_mask = prev_key_padding_mask
elif prev_key_padding_mask is not None and key_padding_mask is not None:
new_key_padding_mask = torch.cat(
[prev_key_padding_mask.float(), key_padding_mask.float()], dim=1
)
# During incremental decoding, as the padding token enters and
# leaves the frame, there will be a time when prev or current
# is None
elif prev_key_padding_mask is not None:
filler = torch.zeros(
(batch_size, src_len - prev_key_padding_mask.size(1)),
device=prev_key_padding_mask.device,
)
new_key_padding_mask = torch.cat(
[prev_key_padding_mask.float(), filler.float()], dim=1
)
elif key_padding_mask is not None:
filler = torch.zeros(
(batch_size, src_len - key_padding_mask.size(1)),
device=key_padding_mask.device,
)
new_key_padding_mask = torch.cat(
[filler.float(), key_padding_mask.float()], dim=1
)
else:
new_key_padding_mask = prev_key_padding_mask
return new_key_padding_mask
@torch.jit.export
def reorder_incremental_state(
self,
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
new_order: Tensor,
):
"""Reorder buffered internal state (for incremental generation)."""
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
for k in input_buffer.keys():
input_buffer_k = input_buffer[k]
if input_buffer_k is not None:
if self.encoder_decoder_attention and input_buffer_k.size(
0
) == new_order.size(0):
break
input_buffer[k] = input_buffer_k.index_select(0, new_order)
incremental_state = self._set_input_buffer(incremental_state, input_buffer)
return incremental_state
def _get_input_buffer(
self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]]
) -> Dict[str, Optional[Tensor]]:
result = self.get_incremental_state(incremental_state, "attn_state")
if result is not None:
return result
else:
empty_result: Dict[str, Optional[Tensor]] = {}
return empty_result
def _set_input_buffer(
self,
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
buffer: Dict[str, Optional[Tensor]],
):
return self.set_incremental_state(incremental_state, "attn_state", buffer)
def apply_sparse_mask(attn_weights, tgt_len: int, src_len: int, bsz: int):
return attn_weights
def upgrade_state_dict_named(self, state_dict, name):
prefix = name + "." if name != "" else ""
items_to_add = {}
keys_to_remove = []
for k in state_dict.keys():
if k.endswith(prefix + "in_proj_weight"):
# in_proj_weight used to be q + k + v with same dimensions
dim = int(state_dict[k].shape[0] / 3)
items_to_add[prefix + "q_proj.weight"] = state_dict[k][:dim]
items_to_add[prefix + "k_proj.weight"] = state_dict[k][dim : 2 * dim]
items_to_add[prefix + "v_proj.weight"] = state_dict[k][2 * dim :]
keys_to_remove.append(k)
k_bias = prefix + "in_proj_bias"
if k_bias in state_dict.keys():
dim = int(state_dict[k].shape[0] / 3)
items_to_add[prefix + "q_proj.bias"] = state_dict[k_bias][:dim]
items_to_add[prefix + "k_proj.bias"] = state_dict[k_bias][
dim : 2 * dim
]
items_to_add[prefix + "v_proj.bias"] = state_dict[k_bias][2 * dim :]
keys_to_remove.append(prefix + "in_proj_bias")
for k in keys_to_remove:
del state_dict[k]
for key, value in items_to_add.items():
state_dict[key] = value
|
COCO-LM/fairseq/examples/linformer/linformer_src/modules/multihead_linear_attention.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/linformer/linformer_src/modules/multihead_linear_attention.py",
"repo_id": "COCO-LM",
"token_count": 9896
}
| 164 |
# Megatron-11b
Megatron-11b is a unidirectional language model with `11B` parameters based on [Megatron-LM](https://arxiv.org/pdf/1909.08053.pdf). Following the original Megatron work, we trained the model using intra-layer model parallelism with each layer's parameters split across 8 GPUs.
Megatron-11b is trained on the same data and uses the same byte-pair encoding (BPE) as [RoBERTa](https://arxiv.org/pdf/1907.11692.pdf).
## Pre-trained models
Model | Description | # params | # filesize | Download
---|---|---|---|---
`megatron_11b` | megatron_11b unidirectional language model | 11B | 19Gb | [megatron_11b.tar.gz](https://dl.fbaipublicfiles.com/fairseq/models/model_parallel/megatron_11b.tar.gz)
#### Architecture:
Param | Value
---|---
embed_dim | 3072
ffn_dim | 3072 * 6
layers | 72
attention heads | 32
#### Training details:
Param | value
---|---
bsz | 512
num_updates | 300,000
peak_lr | 1.5e-04
lr scheduler | inverse_sqrt
clip norm | 0.0
## Example training command (model parallel)
Megatron-11b contains too many parameters to train on a single GPU. Following
the original Megatron work, we adopt an intra-layer model parallel training
approach in which each layer's parameters are split across multiple GPUs and
activations and gradients are communicated during the forward/backward pass,
respectively. We similarly split the loss computation using the
`vocab_parallel_cross_entropy` criterion.
The following training command illustrates how to do model parallel training in
fairseq. We assume that each machine (node) has 8 GPUs among which to split the
model parameters (`--model-parallel-size 8`). If you have access to multiple
nodes, you may combine this with data parallel training by increasing
`--distributed-world-size`.
To train Megatron-11b on a single node:
```bash
fairseq-train <DATA_PATH> \
--distributed-world-size 8 \
--memory-efficient-fp16 \
--num-workers 2 \
--model-parallel-size 8 \
--criterion vocab_parallel_cross_entropy \
--task language_modeling \
--sample-break-mode none \
--tokens-per-sample 1024 \
--arch transformer_lm_megatron_11b \
--share-decoder-input-output-embed \
--optimizer adam --adam-betas "(0.9, 0.98)" --adam-eps 1e-08 --clip-norm 0.0 \
--lr-scheduler inverse_sqrt --lr 0.00015 \
--warmup-updates 3000 --weight-decay 0.01 \
--dropout 0.1 --attention-dropout 0.1 \
--batch-size 2 \
--max-update 300000;
```
Note: Above was tested on `DGX-1` box, with `8xV100-32Gb` GPUs.
## Results
**[Wikitext103](https://blog.einstein.ai/the-wikitext-long-term-dependency-language-modeling-dataset/)**
Model | Valid perplexity | Test perplexity
---|---|---
`megatron_11b` | 10.64 | 10.54
## Evaluating `megatron_11b` on Wikitext-103
#### 1. Downloading Megatron-11b
```bash
# WARNING: this file is 19GB
wget https://dl.fbaipublicfiles.com/fairseq/models/model_parallel/megatron_11b.tar.gz
tar -xzvf megatron_11b.tar.gz
```
#### 2. Download Wikitext-103
```bash
wget https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-103-raw-v1.zip
unzip wikitext-103-raw-v1.zip
```
#### 3. Detokenize test tokens
Megatron-11b uses a byte-level BPE that expects raw (untokenized) input. Since
the wikitext-103 dataset comes tokenized, we apply a simple detokenization
process to restore the untokenized test set:
```bash
python -m examples.megatron_11b.detok wikitext-103-raw/wiki.test.raw > wikitext-103-raw/wiki.test.detok
```
#### 4. BPE encoding
```bash
wget -N 'https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/encoder.json'
wget -N 'https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/vocab.bpe'
python -m examples.roberta.multiprocessing_bpe_encoder \
--encoder-json encoder.json \
--vocab-bpe vocab.bpe \
--inputs "wikitext-103-raw/wiki.test.detok" \
--outputs "wikitext-103-raw/wiki.test.bpe" \
--workers 60;
```
#### 5. Fairseq binarize
```bash
fairseq-preprocess \
--only-source \
--testpref wikitext-103-raw/wiki.test.bpe \
--srcdict megatron_11b/dict.txt \
--destdir wikitext103-bin;
```
#### 6. Evaluating perplexity.
We can now evaluate perplexity on the test set. Note that because we've modified
the test set (via detokenization and BPE), the perplexity reported by
`fairseq-eval-lm` needs to be renormalized.
Compute unnormalized perplexity:
```bash
DATA_PATH=wikitext103-bin/
fairseq-eval-lm \
$DATA_PATH \
--path megatron_11b/model.pt \
--task language_modeling \
--gen-subset test \
--batch-size 8 \
--criterion cross_entropy \
--context-window 992 \
--distributed-world-size 8 \
--model-parallel-size 8;
# Expected PPL (unnormalized_ppl): [8.46]
# Note: the eval command needs to run on 8 GPUs for the released model
```
Renormalizing formula: `2 ^ ( log_2(unnormalized_PPL) * (270847 / 245566))`.
PPL After normalization: `10.54`
To renormalize the perplexity, we must account for the change in token count
after detokenizing and appling BPE. The formula for this is:
`2 ^ ( log_2(unnormalized_PPL) * (new_token_cnt / orig_token_cnt))`
For the wikitext-103 test set, the original token count is `245566` and the
token count after detokenization and applying BPE is `270847`.
The perplexity after renormalization is:
`2 ^ ( log_2(8.46) * (270847 / 245566)) = 10.54`
|
COCO-LM/fairseq/examples/megatron_11b/README.md/0
|
{
"file_path": "COCO-LM/fairseq/examples/megatron_11b/README.md",
"repo_id": "COCO-LM",
"token_count": 1857
}
| 165 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import os
import csv
from collections import defaultdict
from six.moves import zip
import io
import wget
import sys
from subprocess import check_call, check_output
# scripts and data locations
CWD = os.getcwd()
UTILS = f"{CWD}/utils"
MOSES = f"{UTILS}/mosesdecoder"
WORKDIR_ROOT = os.environ.get('WORKDIR_ROOT', None)
if WORKDIR_ROOT is None or not WORKDIR_ROOT.strip():
print('please specify your working directory root in OS environment variable WORKDIR_ROOT. Exitting..."')
sys.exit(-1)
# please donwload mosesdecoder here:
detok_cmd = f'{MOSES}/scripts/tokenizer/detokenizer.perl'
def call(cmd):
print(f"Executing: {cmd}")
check_call(cmd, shell=True)
class MultiLingualAlignedCorpusReader(object):
"""A class to read TED talk dataset
"""
def __init__(self, corpus_path, delimiter='\t',
target_token=True, bilingual=True, corpus_type='file',
lang_dict={'source': ['fr'], 'target': ['en']},
eval_lang_dict=None, zero_shot=False,
detok=True,
):
self.empty_line_flag = 'NULL'
self.corpus_path = corpus_path
self.delimiter = delimiter
self.bilingual = bilingual
self.lang_dict = lang_dict
self.lang_set = set()
self.target_token = target_token
self.zero_shot = zero_shot
self.eval_lang_dict = eval_lang_dict
self.corpus_type = corpus_type
self.detok = detok
for list_ in self.lang_dict.values():
for lang in list_:
self.lang_set.add(lang)
self.data = dict()
self.data['train'] = self.read_aligned_corpus(split_type='train')
self.data['test'] = self.read_aligned_corpus(split_type='test')
self.data['dev'] = self.read_aligned_corpus(split_type='dev')
def read_data(self, file_loc_):
data_list = list()
with io.open(file_loc_, 'r', encoding='utf8') as fp:
for line in fp:
try:
text = line.strip()
except IndexError:
text = self.empty_line_flag
data_list.append(text)
return data_list
def filter_text(self, dict_):
if self.target_token:
field_index = 1
else:
field_index = 0
data_dict = defaultdict(list)
list1 = dict_['source']
list2 = dict_['target']
for sent1, sent2 in zip(list1, list2):
try:
src_sent = ' '.join(sent1.split()[field_index: ])
except IndexError:
src_sent = 'NULL'
if src_sent.find(self.empty_line_flag) != -1 or len(src_sent) == 0:
continue
elif sent2.find(self.empty_line_flag) != -1 or len(sent2) == 0:
continue
else:
data_dict['source'].append(sent1)
data_dict['target'].append(sent2)
return data_dict
def read_file(self, split_type, data_type):
return self.data[split_type][data_type]
def save_file(self, path_, split_type, data_type, lang):
tok_file = tok_file_name(path_, lang)
with io.open(tok_file, 'w', encoding='utf8') as fp:
for line in self.data[split_type][data_type]:
fp.write(line + '\n')
if self.detok:
de_tok(tok_file, lang)
def add_target_token(self, list_, lang_id):
new_list = list()
token = '__' + lang_id + '__'
for sent in list_:
new_list.append(token + ' ' + sent)
return new_list
def read_from_single_file(self, path_, s_lang, t_lang):
data_dict = defaultdict(list)
with io.open(path_, 'r', encoding='utf8') as fp:
reader = csv.DictReader(fp, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
data_dict['source'].append(row[s_lang])
data_dict['target'].append(row[t_lang])
if self.target_token:
text = self.add_target_token(data_dict['source'], t_lang)
data_dict['source'] = text
return data_dict['source'], data_dict['target']
def read_aligned_corpus(self, split_type='train'):
data_dict = defaultdict(list)
iterable = []
s_list = []
t_list = []
if self.zero_shot:
if split_type == "train":
iterable = zip(self.lang_dict['source'], self.lang_dict['target'])
else:
iterable = zip(self.eval_lang_dict['source'], self.eval_lang_dict['target'])
elif self.bilingual:
iterable = itertools.product(self.lang_dict['source'], self.lang_dict['target'])
for s_lang, t_lang in iterable:
if s_lang == t_lang:
continue
if self.corpus_type == 'file':
split_type_file_path = os.path.join(self.corpus_path,
"all_talks_{}.tsv".format(split_type))
s_list, t_list = self.read_from_single_file(split_type_file_path,
s_lang=s_lang,
t_lang=t_lang)
data_dict['source'] += s_list
data_dict['target'] += t_list
new_data_dict = self.filter_text(data_dict)
return new_data_dict
def read_langs(corpus_path):
split_type_file_path = os.path.join(corpus_path, 'extracted',
"all_talks_dev.tsv")
with io.open(split_type_file_path, 'r', encoding='utf8') as fp:
reader = csv.DictReader(fp, delimiter='\t', quoting=csv.QUOTE_NONE)
header = next(reader)
return [k for k in header.keys() if k != 'talk_name']
def extra_english(corpus_path, split):
split_type_file_path = os.path.join(corpus_path,
f"all_talks_{split}.tsv")
output_split_type_file_path = os.path.join(corpus_path,
f"all_talks_{split}.en")
with io.open(split_type_file_path, 'r', encoding='utf8') as fp, io.open(output_split_type_file_path, 'w', encoding='utf8') as fw:
reader = csv.DictReader(fp, delimiter='\t', quoting=csv.QUOTE_NONE)
for row in reader:
line = row['en']
fw.write(line + '\n')
de_tok(output_split_type_file_path, 'en')
def tok_file_name(filename, lang):
seps = filename.split('.')
seps.insert(-1, 'tok')
tok_file = '.'.join(seps)
return tok_file
def de_tok(tok_file, lang):
# seps = tok_file.split('.')
# seps.insert(-1, 'detok')
# de_tok_file = '.'.join(seps)
de_tok_file = tok_file.replace('.tok.', '.')
cmd = 'perl {detok_cmd} -l {lang} < {tok_file} > {de_tok_file}'.format(
detok_cmd=detok_cmd, tok_file=tok_file,
de_tok_file=de_tok_file, lang=lang[:2])
call(cmd)
def extra_bitex(
ted_data_path,
lsrc_lang,
ltrg_lang,
target_token,
output_data_path,
):
def get_ted_lang(lang):
long_langs = ['pt-br', 'zh-cn', 'zh-tw', 'fr-ca']
if lang[:5] in long_langs:
return lang[:5]
elif lang[:4] =='calv':
return lang[:5]
elif lang in ['pt_BR', 'zh_CN', 'zh_TW', 'fr_CA']:
return lang.lower().replace('_', '-')
return lang[:2]
src_lang = get_ted_lang(lsrc_lang)
trg_lang = get_ted_lang(ltrg_lang)
train_lang_dict={'source': [src_lang], 'target': [trg_lang]}
eval_lang_dict = {'source': [src_lang], 'target': [trg_lang]}
obj = MultiLingualAlignedCorpusReader(corpus_path=ted_data_path,
lang_dict=train_lang_dict,
target_token=target_token,
corpus_type='file',
eval_lang_dict=eval_lang_dict,
zero_shot=False,
bilingual=True)
os.makedirs(output_data_path, exist_ok=True)
lsrc_lang = lsrc_lang.replace('-', '_')
ltrg_lang = ltrg_lang.replace('-', '_')
obj.save_file(output_data_path + f"/train.{lsrc_lang}-{ltrg_lang}.{lsrc_lang}",
split_type='train', data_type='source', lang=src_lang)
obj.save_file(output_data_path + f"/train.{lsrc_lang}-{ltrg_lang}.{ltrg_lang}",
split_type='train', data_type='target', lang=trg_lang)
obj.save_file(output_data_path + f"/test.{lsrc_lang}-{ltrg_lang}.{lsrc_lang}",
split_type='test', data_type='source', lang=src_lang)
obj.save_file(output_data_path + f"/test.{lsrc_lang}-{ltrg_lang}.{ltrg_lang}",
split_type='test', data_type='target', lang=trg_lang)
obj.save_file(output_data_path + f"/valid.{lsrc_lang}-{ltrg_lang}.{lsrc_lang}",
split_type='dev', data_type='source', lang=src_lang)
obj.save_file(output_data_path + f"/valid.{lsrc_lang}-{ltrg_lang}.{ltrg_lang}",
split_type='dev', data_type='target', lang=trg_lang)
def bar_custom(current, total, width=80):
print("Downloading: %d%% [%d / %d] Ks" % (current / total * 100, current / 1000, total / 1000), end='\r')
def download_and_extract(download_to, extract_to):
url = 'http://phontron.com/data/ted_talks.tar.gz'
filename = f"{download_to}/ted_talks.tar.gz"
if os.path.exists(filename):
print(f'{filename} has already been downloaded so skip')
else:
filename = wget.download(url, filename, bar=bar_custom)
if os.path.exists(f'{extract_to}/all_talks_train.tsv'):
print(f'Already extracted so skip')
else:
extract_cmd = f'tar xzfv "{filename}" -C "{extract_to}"'
call(extract_cmd)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--ted_data_path', type=str, default=WORKDIR_ROOT, required=False)
parser.add_argument(
'--direction-list',
type=str,
# default=None,
#for ML50
default=(
"bn_IN-en_XX,he_IL-en_XX,fa_IR-en_XX,id_ID-en_XX,sv_SE-en_XX,pt_XX-en_XX,ka_GE-en_XX,ka_GE-en_XX,th_TH-en_XX,"
"mr_IN-en_XX,hr_HR-en_XX,uk_UA-en_XX,az_AZ-en_XX,mk_MK-en_XX,gl_ES-en_XX,sl_SI-en_XX,mn_MN-en_XX,"
#non-english directions
# "fr_XX-de_DE," # replaced with wmt20
# "ja_XX-ko_KR,es_XX-pt_XX,ru_RU-sv_SE,hi_IN-bn_IN,id_ID-ar_AR,cs_CZ-pl_PL,ar_AR-tr_TR"
),
required=False)
parser.add_argument('--target-token', action='store_true', default=False)
parser.add_argument('--extract-all-english', action='store_true', default=False)
args = parser.parse_args()
import sys
import json
# TED Talks data directory
ted_data_path = args.ted_data_path
download_to = f'{ted_data_path}/downloads'
extract_to = f'{ted_data_path}/extracted'
#DESTDIR=${WORKDIR_ROOT}/ML50/raw/
output_path = f'{ted_data_path}/ML50/raw'
os.makedirs(download_to, exist_ok=True)
os.makedirs(extract_to, exist_ok=True)
os.makedirs(output_path, exist_ok=True)
download_and_extract(download_to, extract_to)
if args.extract_all_english:
for split in ['train', 'dev', 'test']:
extra_english(ted_data_path, split)
exit(0)
if args.direction_list is not None:
directions = args.direction_list.strip().split(',')
directions = [tuple(d.strip().split('-', 1)) for d in directions if d]
else:
langs = read_langs(ted_data_path)
# directions = [
# '{}.{}'.format(src, tgt)
# for src in langs
# for tgt in langs
# if src < tgt
# ]
directions = [('en', tgt) for tgt in langs if tgt != 'en']
print(f'num directions={len(directions)}: {directions}')
for src_lang, trg_lang in directions:
print('--working on {}-{}'.format(src_lang, trg_lang))
extra_bitex(
extract_to,
src_lang,
trg_lang,
target_token=args.target_token,
output_data_path=output_path
)
|
COCO-LM/fairseq/examples/multilingual/data_scripts/download_ted_and_extract.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/multilingual/data_scripts/download_ted_and_extract.py",
"repo_id": "COCO-LM",
"token_count": 6376
}
| 166 |
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Generate n-best translations using a trained model.
"""
import os
import subprocess
from contextlib import redirect_stdout
from fairseq import options
from fairseq_cli import generate, preprocess
from examples.noisychannel import rerank_options, rerank_utils
def gen_and_reprocess_nbest(args):
if args.score_dict_dir is None:
args.score_dict_dir = args.data
if args.prefix_len is not None:
assert (
args.right_to_left1 is False
), "prefix length not compatible with right to left models"
assert (
args.right_to_left2 is False
), "prefix length not compatible with right to left models"
if args.nbest_list is not None:
assert args.score_model2 is None
if args.backwards1:
scorer1_src = args.target_lang
scorer1_tgt = args.source_lang
else:
scorer1_src = args.source_lang
scorer1_tgt = args.target_lang
store_data = (
os.path.join(os.path.dirname(__file__)) + "/rerank_data/" + args.data_dir_name
)
if not os.path.exists(store_data):
os.makedirs(store_data)
(
pre_gen,
left_to_right_preprocessed_dir,
right_to_left_preprocessed_dir,
backwards_preprocessed_dir,
lm_preprocessed_dir,
) = rerank_utils.get_directories(
args.data_dir_name,
args.num_rescore,
args.gen_subset,
args.gen_model_name,
args.shard_id,
args.num_shards,
args.sampling,
args.prefix_len,
args.target_prefix_frac,
args.source_prefix_frac,
)
assert not (
args.right_to_left1 and args.backwards1
), "backwards right to left not supported"
assert not (
args.right_to_left2 and args.backwards2
), "backwards right to left not supported"
assert not (
args.prefix_len is not None and args.target_prefix_frac is not None
), "target prefix frac and target prefix len incompatible"
# make directory to store generation results
if not os.path.exists(pre_gen):
os.makedirs(pre_gen)
rerank1_is_gen = (
args.gen_model == args.score_model1 and args.source_prefix_frac is None
)
rerank2_is_gen = (
args.gen_model == args.score_model2 and args.source_prefix_frac is None
)
if args.nbest_list is not None:
rerank2_is_gen = True
# make directories to store preprossed nbest list for reranking
if not os.path.exists(left_to_right_preprocessed_dir):
os.makedirs(left_to_right_preprocessed_dir)
if not os.path.exists(right_to_left_preprocessed_dir):
os.makedirs(right_to_left_preprocessed_dir)
if not os.path.exists(lm_preprocessed_dir):
os.makedirs(lm_preprocessed_dir)
if not os.path.exists(backwards_preprocessed_dir):
os.makedirs(backwards_preprocessed_dir)
score1_file = rerank_utils.rescore_file_name(
pre_gen,
args.prefix_len,
args.model1_name,
target_prefix_frac=args.target_prefix_frac,
source_prefix_frac=args.source_prefix_frac,
backwards=args.backwards1,
)
if args.score_model2 is not None:
score2_file = rerank_utils.rescore_file_name(
pre_gen,
args.prefix_len,
args.model2_name,
target_prefix_frac=args.target_prefix_frac,
source_prefix_frac=args.source_prefix_frac,
backwards=args.backwards2,
)
predictions_bpe_file = pre_gen + "/generate_output_bpe.txt"
using_nbest = args.nbest_list is not None
if using_nbest:
print("Using predefined n-best list from interactive.py")
predictions_bpe_file = args.nbest_list
else:
if not os.path.isfile(predictions_bpe_file):
print("STEP 1: generate predictions using the p(T|S) model with bpe")
print(args.data)
param1 = [
args.data,
"--path",
args.gen_model,
"--shard-id",
str(args.shard_id),
"--num-shards",
str(args.num_shards),
"--nbest",
str(args.num_rescore),
"--batch-size",
str(args.batch_size),
"--beam",
str(args.num_rescore),
"--batch-size",
str(args.num_rescore),
"--gen-subset",
args.gen_subset,
"--source-lang",
args.source_lang,
"--target-lang",
args.target_lang,
]
if args.sampling:
param1 += ["--sampling"]
gen_parser = options.get_generation_parser()
input_args = options.parse_args_and_arch(gen_parser, param1)
print(input_args)
with open(predictions_bpe_file, "w") as f:
with redirect_stdout(f):
generate.main(input_args)
gen_output = rerank_utils.BitextOutputFromGen(
predictions_bpe_file,
bpe_symbol=args.post_process,
nbest=using_nbest,
prefix_len=args.prefix_len,
target_prefix_frac=args.target_prefix_frac,
)
if args.diff_bpe:
rerank_utils.write_reprocessed(
gen_output.no_bpe_source,
gen_output.no_bpe_hypo,
gen_output.no_bpe_target,
pre_gen + "/source_gen_bpe." + args.source_lang,
pre_gen + "/target_gen_bpe." + args.target_lang,
pre_gen + "/reference_gen_bpe." + args.target_lang,
)
bitext_bpe = args.rescore_bpe_code
bpe_src_param = [
"-c",
bitext_bpe,
"--input",
pre_gen + "/source_gen_bpe." + args.source_lang,
"--output",
pre_gen + "/rescore_data." + args.source_lang,
]
bpe_tgt_param = [
"-c",
bitext_bpe,
"--input",
pre_gen + "/target_gen_bpe." + args.target_lang,
"--output",
pre_gen + "/rescore_data." + args.target_lang,
]
subprocess.call(
[
"python",
os.path.join(
os.path.dirname(__file__), "subword-nmt/subword_nmt/apply_bpe.py"
),
]
+ bpe_src_param,
shell=False,
)
subprocess.call(
[
"python",
os.path.join(
os.path.dirname(__file__), "subword-nmt/subword_nmt/apply_bpe.py"
),
]
+ bpe_tgt_param,
shell=False,
)
if (not os.path.isfile(score1_file) and not rerank1_is_gen) or (
args.score_model2 is not None
and not os.path.isfile(score2_file)
and not rerank2_is_gen
):
print(
"STEP 2: process the output of generate.py so we have clean text files with the translations"
)
rescore_file = "/rescore_data"
if args.prefix_len is not None:
prefix_len_rescore_file = rescore_file + "prefix" + str(args.prefix_len)
if args.target_prefix_frac is not None:
target_prefix_frac_rescore_file = (
rescore_file + "target_prefix_frac" + str(args.target_prefix_frac)
)
if args.source_prefix_frac is not None:
source_prefix_frac_rescore_file = (
rescore_file + "source_prefix_frac" + str(args.source_prefix_frac)
)
if not args.right_to_left1 or not args.right_to_left2:
if not args.diff_bpe:
rerank_utils.write_reprocessed(
gen_output.source,
gen_output.hypo,
gen_output.target,
pre_gen + rescore_file + "." + args.source_lang,
pre_gen + rescore_file + "." + args.target_lang,
pre_gen + "/reference_file",
bpe_symbol=args.post_process,
)
if args.prefix_len is not None:
bw_rescore_file = prefix_len_rescore_file
rerank_utils.write_reprocessed(
gen_output.source,
gen_output.hypo,
gen_output.target,
pre_gen + prefix_len_rescore_file + "." + args.source_lang,
pre_gen + prefix_len_rescore_file + "." + args.target_lang,
pre_gen + "/reference_file",
prefix_len=args.prefix_len,
bpe_symbol=args.post_process,
)
elif args.target_prefix_frac is not None:
bw_rescore_file = target_prefix_frac_rescore_file
rerank_utils.write_reprocessed(
gen_output.source,
gen_output.hypo,
gen_output.target,
pre_gen
+ target_prefix_frac_rescore_file
+ "."
+ args.source_lang,
pre_gen
+ target_prefix_frac_rescore_file
+ "."
+ args.target_lang,
pre_gen + "/reference_file",
bpe_symbol=args.post_process,
target_prefix_frac=args.target_prefix_frac,
)
else:
bw_rescore_file = rescore_file
if args.source_prefix_frac is not None:
fw_rescore_file = source_prefix_frac_rescore_file
rerank_utils.write_reprocessed(
gen_output.source,
gen_output.hypo,
gen_output.target,
pre_gen
+ source_prefix_frac_rescore_file
+ "."
+ args.source_lang,
pre_gen
+ source_prefix_frac_rescore_file
+ "."
+ args.target_lang,
pre_gen + "/reference_file",
bpe_symbol=args.post_process,
source_prefix_frac=args.source_prefix_frac,
)
else:
fw_rescore_file = rescore_file
if args.right_to_left1 or args.right_to_left2:
rerank_utils.write_reprocessed(
gen_output.source,
gen_output.hypo,
gen_output.target,
pre_gen + "/right_to_left_rescore_data." + args.source_lang,
pre_gen + "/right_to_left_rescore_data." + args.target_lang,
pre_gen + "/right_to_left_reference_file",
right_to_left=True,
bpe_symbol=args.post_process,
)
print("STEP 3: binarize the translations")
if (
not args.right_to_left1
or args.score_model2 is not None
and not args.right_to_left2
or not rerank1_is_gen
):
if args.backwards1 or args.backwards2:
if args.backwards_score_dict_dir is not None:
bw_dict = args.backwards_score_dict_dir
else:
bw_dict = args.score_dict_dir
bw_preprocess_param = [
"--source-lang",
scorer1_src,
"--target-lang",
scorer1_tgt,
"--trainpref",
pre_gen + bw_rescore_file,
"--srcdict",
bw_dict + "/dict." + scorer1_src + ".txt",
"--tgtdict",
bw_dict + "/dict." + scorer1_tgt + ".txt",
"--destdir",
backwards_preprocessed_dir,
]
preprocess_parser = options.get_preprocessing_parser()
input_args = preprocess_parser.parse_args(bw_preprocess_param)
preprocess.main(input_args)
preprocess_param = [
"--source-lang",
scorer1_src,
"--target-lang",
scorer1_tgt,
"--trainpref",
pre_gen + fw_rescore_file,
"--srcdict",
args.score_dict_dir + "/dict." + scorer1_src + ".txt",
"--tgtdict",
args.score_dict_dir + "/dict." + scorer1_tgt + ".txt",
"--destdir",
left_to_right_preprocessed_dir,
]
preprocess_parser = options.get_preprocessing_parser()
input_args = preprocess_parser.parse_args(preprocess_param)
preprocess.main(input_args)
if args.right_to_left1 or args.right_to_left2:
preprocess_param = [
"--source-lang",
scorer1_src,
"--target-lang",
scorer1_tgt,
"--trainpref",
pre_gen + "/right_to_left_rescore_data",
"--srcdict",
args.score_dict_dir + "/dict." + scorer1_src + ".txt",
"--tgtdict",
args.score_dict_dir + "/dict." + scorer1_tgt + ".txt",
"--destdir",
right_to_left_preprocessed_dir,
]
preprocess_parser = options.get_preprocessing_parser()
input_args = preprocess_parser.parse_args(preprocess_param)
preprocess.main(input_args)
return gen_output
def cli_main():
parser = rerank_options.get_reranking_parser()
args = options.parse_args_and_arch(parser)
gen_and_reprocess_nbest(args)
if __name__ == "__main__":
cli_main()
|
COCO-LM/fairseq/examples/noisychannel/rerank_generate.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/noisychannel/rerank_generate.py",
"repo_id": "COCO-LM",
"token_count": 7803
}
| 167 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from itertools import zip_longest
def replace_oovs(source_in, target_in, vocabulary, source_out, target_out):
"""Replaces out-of-vocabulary words in source and target text with <unk-N>,
where N in is the position of the word in the source sequence.
"""
def format_unk(pos):
return "<unk-{}>".format(pos)
if target_in is None:
target_in = []
for seq_num, (source_seq, target_seq) in enumerate(
zip_longest(source_in, target_in)
):
source_seq_out = []
target_seq_out = []
word_to_pos = dict()
for position, token in enumerate(source_seq.strip().split()):
if token in vocabulary:
token_out = token
else:
if token in word_to_pos:
oov_pos = word_to_pos[token]
else:
word_to_pos[token] = position
oov_pos = position
token_out = format_unk(oov_pos)
source_seq_out.append(token_out)
source_out.write(" ".join(source_seq_out) + "\n")
if target_seq is not None:
for token in target_seq.strip().split():
if token in word_to_pos:
token_out = format_unk(word_to_pos[token])
else:
token_out = token
target_seq_out.append(token_out)
if target_out is not None:
target_out.write(" ".join(target_seq_out) + "\n")
def main():
parser = argparse.ArgumentParser(
description="Replaces out-of-vocabulary words in both source and target "
"sequences with tokens that indicate the position of the word "
"in the source sequence."
)
parser.add_argument(
"--source", type=str, help="text file with source sequences", required=True
)
parser.add_argument(
"--target", type=str, help="text file with target sequences", default=None
)
parser.add_argument("--vocab", type=str, help="vocabulary file", required=True)
parser.add_argument(
"--source-out",
type=str,
help="where to write source sequences with <unk-N> entries",
required=True,
)
parser.add_argument(
"--target-out",
type=str,
help="where to write target sequences with <unk-N> entries",
default=None,
)
args = parser.parse_args()
with open(args.vocab, encoding="utf-8") as vocab:
vocabulary = vocab.read().splitlines()
target_in = (
open(args.target, "r", encoding="utf-8") if args.target is not None else None
)
target_out = (
open(args.target_out, "w", encoding="utf-8")
if args.target_out is not None
else None
)
with open(args.source, "r", encoding="utf-8") as source_in, open(
args.source_out, "w", encoding="utf-8"
) as source_out:
replace_oovs(source_in, target_in, vocabulary, source_out, target_out)
if target_in is not None:
target_in.close()
if target_out is not None:
target_out.close()
if __name__ == "__main__":
main()
|
COCO-LM/fairseq/examples/pointer_generator/preprocess.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/pointer_generator/preprocess.py",
"repo_id": "COCO-LM",
"token_count": 1473
}
| 168 |
# Finetuning RoBERTa on Winograd Schema Challenge (WSC) data
The following instructions can be used to finetune RoBERTa on the WSC training
data provided by [SuperGLUE](https://super.gluebenchmark.com/).
Note that there is high variance in the results. For our GLUE/SuperGLUE
submission we swept over the learning rate (1e-5, 2e-5, 3e-5), batch size (16,
32, 64) and total number of updates (500, 1000, 2000, 3000), as well as the
random seed. Out of ~100 runs we chose the best 7 models and ensembled them.
**Approach:** The instructions below use a slightly different loss function than
what's described in the original RoBERTa arXiv paper. In particular,
[Kocijan et al. (2019)](https://arxiv.org/abs/1905.06290) introduce a margin
ranking loss between `(query, candidate)` pairs with tunable hyperparameters
alpha and beta. This is supported in our code as well with the `--wsc-alpha` and
`--wsc-beta` arguments. However, we achieved slightly better (and more robust)
results on the development set by instead using a single cross entropy loss term
over the log-probabilities for the query and all mined candidates. **The
candidates are mined using spaCy from each input sentence in isolation, so the
approach remains strictly pointwise.** This reduces the number of
hyperparameters and our best model achieved 92.3% development set accuracy,
compared to ~90% accuracy for the margin loss. Later versions of the RoBERTa
arXiv paper will describe this updated formulation.
### 1) Download the WSC data from the SuperGLUE website:
```bash
wget https://dl.fbaipublicfiles.com/glue/superglue/data/v2/WSC.zip
unzip WSC.zip
# we also need to copy the RoBERTa dictionary into the same directory
wget -O WSC/dict.txt https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/dict.txt
```
### 2) Finetune over the provided training data:
```bash
TOTAL_NUM_UPDATES=2000 # Total number of training steps.
WARMUP_UPDATES=250 # Linearly increase LR over this many steps.
LR=2e-05 # Peak LR for polynomial LR scheduler.
MAX_SENTENCES=16 # Batch size per GPU.
SEED=1 # Random seed.
ROBERTA_PATH=/path/to/roberta/model.pt
# we use the --user-dir option to load the task and criterion
# from the examples/roberta/wsc directory:
FAIRSEQ_PATH=/path/to/fairseq
FAIRSEQ_USER_DIR=${FAIRSEQ_PATH}/examples/roberta/wsc
CUDA_VISIBLE_DEVICES=0,1,2,3 fairseq-train WSC/ \
--restore-file $ROBERTA_PATH \
--reset-optimizer --reset-dataloader --reset-meters \
--no-epoch-checkpoints --no-last-checkpoints --no-save-optimizer-state \
--best-checkpoint-metric accuracy --maximize-best-checkpoint-metric \
--valid-subset val \
--fp16 --ddp-backend legacy_ddp \
--user-dir $FAIRSEQ_USER_DIR \
--task wsc --criterion wsc --wsc-cross-entropy \
--arch roberta_large --bpe gpt2 --max-positions 512 \
--dropout 0.1 --attention-dropout 0.1 --weight-decay 0.01 \
--optimizer adam --adam-betas '(0.9, 0.98)' --adam-eps 1e-06 \
--lr-scheduler polynomial_decay --lr $LR \
--warmup-updates $WARMUP_UPDATES --total-num-update $TOTAL_NUM_UPDATES \
--batch-size $MAX_SENTENCES \
--max-update $TOTAL_NUM_UPDATES \
--log-format simple --log-interval 100 \
--seed $SEED
```
The above command assumes training on 4 GPUs, but you can achieve the same
results on a single GPU by adding `--update-freq=4`.
### 3) Evaluate
```python
from fairseq.models.roberta import RobertaModel
from examples.roberta.wsc import wsc_utils # also loads WSC task and criterion
roberta = RobertaModel.from_pretrained('checkpoints', 'checkpoint_best.pt', 'WSC/')
roberta.cuda()
nsamples, ncorrect = 0, 0
for sentence, label in wsc_utils.jsonl_iterator('WSC/val.jsonl', eval=True):
pred = roberta.disambiguate_pronoun(sentence)
nsamples += 1
if pred == label:
ncorrect += 1
print('Accuracy: ' + str(ncorrect / float(nsamples)))
# Accuracy: 0.9230769230769231
```
## RoBERTa training on WinoGrande dataset
We have also provided `winogrande` task and criterion for finetuning on the
[WinoGrande](https://mosaic.allenai.org/projects/winogrande) like datasets
where there are always two candidates and one is correct.
It's more efficient implementation for such subcases.
```bash
TOTAL_NUM_UPDATES=23750 # Total number of training steps.
WARMUP_UPDATES=2375 # Linearly increase LR over this many steps.
LR=1e-05 # Peak LR for polynomial LR scheduler.
MAX_SENTENCES=32 # Batch size per GPU.
SEED=1 # Random seed.
ROBERTA_PATH=/path/to/roberta/model.pt
# we use the --user-dir option to load the task and criterion
# from the examples/roberta/wsc directory:
FAIRSEQ_PATH=/path/to/fairseq
FAIRSEQ_USER_DIR=${FAIRSEQ_PATH}/examples/roberta/wsc
cd fairseq
CUDA_VISIBLE_DEVICES=0 fairseq-train winogrande_1.0/ \
--restore-file $ROBERTA_PATH \
--reset-optimizer --reset-dataloader --reset-meters \
--no-epoch-checkpoints --no-last-checkpoints --no-save-optimizer-state \
--best-checkpoint-metric accuracy --maximize-best-checkpoint-metric \
--valid-subset val \
--fp16 --ddp-backend legacy_ddp \
--user-dir $FAIRSEQ_USER_DIR \
--task winogrande --criterion winogrande \
--wsc-margin-alpha 5.0 --wsc-margin-beta 0.4 \
--arch roberta_large --bpe gpt2 --max-positions 512 \
--dropout 0.1 --attention-dropout 0.1 --weight-decay 0.01 \
--optimizer adam --adam-betas '(0.9, 0.98)' --adam-eps 1e-06 \
--lr-scheduler polynomial_decay --lr $LR \
--warmup-updates $WARMUP_UPDATES --total-num-update $TOTAL_NUM_UPDATES \
--batch-size $MAX_SENTENCES \
--max-update $TOTAL_NUM_UPDATES \
--log-format simple --log-interval 100
```
|
COCO-LM/fairseq/examples/roberta/wsc/README.md/0
|
{
"file_path": "COCO-LM/fairseq/examples/roberta/wsc/README.md",
"repo_id": "COCO-LM",
"token_count": 2057
}
| 169 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq.modules import LayerNorm, TransformerDecoderLayer, TransformerEncoderLayer
from . import build_monotonic_attention
class TransformerMonotonicEncoderLayer(TransformerEncoderLayer):
def forward(self, x, encoder_padding_mask):
seq_len, _, _ = x.size()
attn_mask = x.new_ones([seq_len, seq_len]).triu(1)
attn_mask = attn_mask.masked_fill(attn_mask.bool(), float("-inf"))
return super().forward(x, encoder_padding_mask, attn_mask)
class TransformerMonotonicDecoderLayer(TransformerDecoderLayer):
def __init__(
self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False
):
super().__init__(
args,
no_encoder_attn=True,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
)
assert args.simul_type is not None, "A --simul-type is needed."
self.encoder_attn = build_monotonic_attention(args)
self.encoder_attn_layer_norm = LayerNorm(
self.embed_dim, export=getattr(args, "char_inputs", False)
)
def get_head_steps(self, incremental_state):
return self.encoder_attn._get_monotonic_buffer(incremental_state).get(
"head_step"
)
def prune_incremental_state(self, incremental_state):
def prune(module):
input_buffer = module._get_input_buffer(incremental_state)
for key in ["prev_key", "prev_value"]:
if input_buffer[key].size(2) > 1:
input_buffer[key] = input_buffer[key][:, :, :-1, :]
else:
input_buffer = {}
break
module._set_input_buffer(incremental_state, input_buffer)
prune(self.self_attn)
def get_steps(self, incremental_state):
return self.encoder_attn._get_monotonic_buffer(incremental_state).get("step", 0)
|
COCO-LM/fairseq/examples/simultaneous_translation/modules/monotonic_transformer_layer.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/simultaneous_translation/modules/monotonic_transformer_layer.py",
"repo_id": "COCO-LM",
"token_count": 932
}
| 170 |
#!/usr/bin/env bash
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Prepare librispeech dataset
base_url=www.openslr.org/resources/12
train_dir=train_960
if [ "$#" -ne 2 ]; then
echo "Usage: $0 <download_dir> <out_dir>"
echo "e.g.: $0 /tmp/librispeech_raw/ ~/data/librispeech_final"
exit 1
fi
download_dir=${1%/}
out_dir=${2%/}
fairseq_root=~/fairseq-py/
mkdir -p ${out_dir}
cd ${out_dir} || exit
nbpe=5000
bpemode=unigram
if [ ! -d "$fairseq_root" ]; then
echo "$0: Please set correct fairseq_root"
exit 1
fi
echo "Data Download"
for part in dev-clean test-clean dev-other test-other train-clean-100 train-clean-360 train-other-500; do
url=$base_url/$part.tar.gz
if ! wget -P $download_dir $url; then
echo "$0: wget failed for $url"
exit 1
fi
if ! tar -C $download_dir -xvzf $download_dir/$part.tar.gz; then
echo "$0: error un-tarring archive $download_dir/$part.tar.gz"
exit 1
fi
done
echo "Merge all train packs into one"
mkdir -p ${download_dir}/LibriSpeech/${train_dir}/
for part in train-clean-100 train-clean-360 train-other-500; do
mv ${download_dir}/LibriSpeech/${part}/* $download_dir/LibriSpeech/${train_dir}/
done
echo "Merge train text"
find ${download_dir}/LibriSpeech/${train_dir}/ -name '*.txt' -exec cat {} \; >> ${download_dir}/LibriSpeech/${train_dir}/text
# Use combined dev-clean and dev-other as validation set
find ${download_dir}/LibriSpeech/dev-clean/ ${download_dir}/LibriSpeech/dev-other/ -name '*.txt' -exec cat {} \; >> ${download_dir}/LibriSpeech/valid_text
find ${download_dir}/LibriSpeech/test-clean/ -name '*.txt' -exec cat {} \; >> ${download_dir}/LibriSpeech/test-clean/text
find ${download_dir}/LibriSpeech/test-other/ -name '*.txt' -exec cat {} \; >> ${download_dir}/LibriSpeech/test-other/text
dict=data/lang_char/${train_dir}_${bpemode}${nbpe}_units.txt
encoded=data/lang_char/${train_dir}_${bpemode}${nbpe}_encoded.txt
fairseq_dict=data/lang_char/${train_dir}_${bpemode}${nbpe}_fairseq_dict.txt
bpemodel=data/lang_char/${train_dir}_${bpemode}${nbpe}
echo "dictionary: ${dict}"
echo "Dictionary preparation"
mkdir -p data/lang_char/
echo "<unk> 3" > ${dict}
echo "</s> 2" >> ${dict}
echo "<pad> 1" >> ${dict}
cut -f 2- -d" " ${download_dir}/LibriSpeech/${train_dir}/text > data/lang_char/input.txt
spm_train --input=data/lang_char/input.txt --vocab_size=${nbpe} --model_type=${bpemode} --model_prefix=${bpemodel} --input_sentence_size=100000000 --unk_id=3 --eos_id=2 --pad_id=1 --bos_id=-1 --character_coverage=1
spm_encode --model=${bpemodel}.model --output_format=piece < data/lang_char/input.txt > ${encoded}
cat ${encoded} | tr ' ' '\n' | sort | uniq | awk '{print $0 " " NR+3}' >> ${dict}
cat ${encoded} | tr ' ' '\n' | sort | uniq -c | awk '{print $2 " " $1}' > ${fairseq_dict}
wc -l ${dict}
echo "Prepare train and test jsons"
for part in train_960 test-other test-clean; do
python ${fairseq_root}/examples/speech_recognition/datasets/asr_prep_json.py --audio-dirs ${download_dir}/LibriSpeech/${part} --labels ${download_dir}/LibriSpeech/${part}/text --spm-model ${bpemodel}.model --audio-format flac --dictionary ${fairseq_dict} --output ${part}.json
done
# fairseq expects to find train.json and valid.json during training
mv train_960.json train.json
echo "Prepare valid json"
python ${fairseq_root}/examples/speech_recognition/datasets/asr_prep_json.py --audio-dirs ${download_dir}/LibriSpeech/dev-clean ${download_dir}/LibriSpeech/dev-other --labels ${download_dir}/LibriSpeech/valid_text --spm-model ${bpemodel}.model --audio-format flac --dictionary ${fairseq_dict} --output valid.json
cp ${fairseq_dict} ./dict.txt
cp ${bpemodel}.model ./spm.model
|
COCO-LM/fairseq/examples/speech_recognition/datasets/prepare-librispeech.sh/0
|
{
"file_path": "COCO-LM/fairseq/examples/speech_recognition/datasets/prepare-librispeech.sh",
"repo_id": "COCO-LM",
"token_count": 1499
}
| 171 |
[[Back]](..)
# S2T Example: ST on CoVoST
We replicate the experiments in
[CoVoST 2 and Massively Multilingual Speech-to-Text Translation (Wang et al., 2020)](https://arxiv.org/abs/2007.10310).
## Data Preparation
[Download](https://commonvoice.mozilla.org/en/datasets) and unpack Common Voice v4 to a path
`${COVOST_ROOT}/${SOURCE_LANG_ID}`, then preprocess it with
```bash
# additional Python packages for S2T data processing/model training
pip install pandas torchaudio sentencepiece
# En ASR
python examples/speech_to_text/prep_covost_data.py \
--data-root ${COVOST_ROOT} --vocab-type char --src-lang en
# ST
python examples/speech_to_text/prep_covost_data.py \
--data-root ${COVOST_ROOT} --vocab-type char \
--src-lang fr --tgt-lang en
```
The generated files (manifest, features, vocabulary and data configuration) will be added to
`${COVOST_ROOT}/${SOURCE_LANG_ID}`.
Download our vocabulary files if you want to use our pre-trained models:
- ASR: [En](https://dl.fbaipublicfiles.com/fairseq/s2t/covost2_en_asr_vocab_char.zip)
- ST: [Fr-En](https://dl.fbaipublicfiles.com/fairseq/s2t/covost2_fr_en_st_vocab_char.zip), [De-En](https://dl.fbaipublicfiles.com/fairseq/s2t/covost2_de_en_st_vocab_char.zip), [Es-En](https://dl.fbaipublicfiles.com/fairseq/s2t/covost2_es_en_st_vocab_char.zip), [Ca-En](https://dl.fbaipublicfiles.com/fairseq/s2t/covost2_ca_en_st_vocab_char.zip), [En-De](https://dl.fbaipublicfiles.com/fairseq/s2t/covost2_en_de_st_vocab_char.zip), [En-Ca](https://dl.fbaipublicfiles.com/fairseq/s2t/covost2_en_ca_st_vocab_char.zip), [En-Fa](https://dl.fbaipublicfiles.com/fairseq/s2t/covost2_en_fa_st_vocab_char.zip), [En-Et](https://dl.fbaipublicfiles.com/fairseq/s2t/covost2_en_et_st_vocab_char.zip)
## ASR
#### Training
We train an En ASR model for encoder pre-training of all ST models:
```bash
fairseq-train ${COVOST_ROOT}/en \
--config-yaml config_asr_en.yaml --train-subset train_asr_en --valid-subset dev_asr_en \
--save-dir ${ASR_SAVE_DIR} --num-workers 4 --max-tokens 40000 --max-update 60000 \
--task speech_to_text --criterion label_smoothed_cross_entropy --report-accuracy \
--arch s2t_transformer_s --optimizer adam --lr 2e-3 --lr-scheduler inverse_sqrt \
--warmup-updates 10000 --clip-norm 10.0 --seed 1 --update-freq 8
```
where `ASR_SAVE_DIR` is the checkpoint root path. We set `--update-freq 8` to simulate 8 GPUs with 1 GPU.
You may want to update it accordingly when using more than 1 GPU.
#### Inference & Evaluation
```bash
CHECKPOINT_FILENAME=avg_last_10_checkpoint.pt
python scripts/average_checkpoints.py \
--inputs ${ASR_SAVE_DIR} --num-epoch-checkpoints 10 \
--output "${ASR_SAVE_DIR}/${CHECKPOINT_FILENAME}"
fairseq-generate ${COVOST_ROOT}/en \
--config-yaml config_asr_en.yaml --gen-subset test_asr_en --task speech_to_text \
--path ${ASR_SAVE_DIR}/${CHECKPOINT_FILENAME} --max-tokens 50000 --beam 5 \
--scoring wer --wer-tokenizer 13a --wer-lowercase --wer-remove-punct
```
#### Results
| --arch | Params | En | Model |
|---|---|---|---|
| s2t_transformer_s | 31M | 25.6 | [Download](https://dl.fbaipublicfiles.com/fairseq/s2t/covost2_en_asr_transformer_s.pt) |
## ST
#### Training
Fr-En as example:
```bash
fairseq-train ${COVOST_ROOT}/fr \
--config-yaml config_st_fr_en.yaml --train-subset train_st_fr_en --valid-subset dev_st_fr_en \
--save-dir ${ST_SAVE_DIR} --num-workers 4 --max-tokens 40000 --max-update 60000 \
--task speech_to_text --criterion label_smoothed_cross_entropy --report-accuracy \
--arch s2t_transformer_s --optimizer adam --lr 2e-3 --lr-scheduler inverse_sqrt \
--warmup-updates 10000 --clip-norm 10.0 --seed 1 --update-freq 8 \
--load-pretrained-encoder-from ${ASR_SAVE_DIR}/${CHECKPOINT_FILENAME}
```
where `ST_SAVE_DIR` is the checkpoint root path. The ST encoder is pre-trained by En ASR for faster training and better
performance: `--load-pretrained-encoder-from <ASR checkpoint path>`. We set `--update-freq 8` to simulate 8 GPUs with 1 GPU.
You may want to update it accordingly when using more than 1 GPU.
#### Inference & Evaluation
Average the last 10 checkpoints and evaluate on test split:
```bash
CHECKPOINT_FILENAME=avg_last_10_checkpoint.pt
python scripts/average_checkpoints.py \
--inputs ${ST_SAVE_DIR} --num-epoch-checkpoints 10 \
--output "${ST_SAVE_DIR}/${CHECKPOINT_FILENAME}"
fairseq-generate ${COVOST_ROOT}/fr \
--config-yaml config_st_fr_en.yaml --gen-subset test_st_fr_en --task speech_to_text \
--path ${ST_SAVE_DIR}/${CHECKPOINT_FILENAME} \
--max-tokens 50000 --beam 5 --scoring sacrebleu
```
## Interactive Decoding
Launch the interactive console via
```bash
fairseq-interactive ${COVOST_ROOT}/fr --config-yaml config_st_fr_en.yaml \
--task speech_to_text --path ${SAVE_DIR}/${CHECKPOINT_FILENAME} \
--max-tokens 50000 --beam 5
```
Type in WAV/FLAC/OGG audio paths (one per line) after the prompt.
#### Results
| --arch | Params | Fr-En | De-En | Es-En | Ca-En | En-De | En-Ca | En-Fa | En-Et | Model |
|---|---|---|---|---|---|---|---|---|---|---|
| s2t_transformer_s | 31M | [26.3](https://dl.fbaipublicfiles.com/fairseq/s2t/covost2_fr_en_st_transformer_s.pt) | [17.1](https://dl.fbaipublicfiles.com/fairseq/s2t/covost2_de_en_st_transformer_s.pt) | [23.0](https://dl.fbaipublicfiles.com/fairseq/s2t/covost2_es_en_st_transformer_s.pt) | [18.8](https://dl.fbaipublicfiles.com/fairseq/s2t/covost2_ca_en_st_transformer_s.pt) | [16.3](https://dl.fbaipublicfiles.com/fairseq/s2t/covost2_en_de_st_transformer_s.pt) | [21.8](https://dl.fbaipublicfiles.com/fairseq/s2t/covost2_en_ca_st_transformer_s.pt) | [13.0](https://dl.fbaipublicfiles.com/fairseq/s2t/covost2_en_fa_st_transformer_s.pt) | [13.2](https://dl.fbaipublicfiles.com/fairseq/s2t/covost2_en_et_st_transformer_s.pt) | (<-Download) |
[[Back]](..)
|
COCO-LM/fairseq/examples/speech_to_text/docs/covost_example.md/0
|
{
"file_path": "COCO-LM/fairseq/examples/speech_to_text/docs/covost_example.md",
"repo_id": "COCO-LM",
"token_count": 2329
}
| 172 |
# @package _group_
common:
fp16: true
log_format: json
log_interval: 200
checkpoint:
no_epoch_checkpoints: true
best_checkpoint_metric: wer
task:
_name: audio_pretraining
data: ???
normalize: false
labels: ltr
dataset:
num_workers: 6
max_tokens: 3200000
skip_invalid_size_inputs_valid_test: true
valid_subset: dev_other
distributed_training:
ddp_backend: legacy_ddp
distributed_world_size: 2
criterion:
_name: ctc
zero_infinity: true
optimization:
max_update: 80000
lr: [0.00003]
sentence_avg: true
update_freq: [4]
optimizer:
_name: adam
adam_betas: (0.9,0.98)
adam_eps: 1e-08
lr_scheduler:
_name: tri_stage
phase_ratio: [0.1, 0.4, 0.5]
final_lr_scale: 0.05
model:
_name: wav2vec_ctc
w2v_path: ???
apply_mask: true
mask_prob: 0.65
mask_channel_prob: 0.5
mask_channel_length: 64
layerdrop: 0.1
activation_dropout: 0.1
feature_grad_mult: 0.0
freeze_finetune_updates: 0
|
COCO-LM/fairseq/examples/wav2vec/config/finetuning/base_100h.yaml/0
|
{
"file_path": "COCO-LM/fairseq/examples/wav2vec/config/finetuning/base_100h.yaml",
"repo_id": "COCO-LM",
"token_count": 419
}
| 173 |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Helper script to pre-compute embeddings for a flashlight (previously called wav2letter++) dataset
"""
import argparse
import glob
import os
from shutil import copy
import h5py
import numpy as np
import soundfile as sf
import torch
import tqdm
import fairseq
from torch import nn
def read_audio(fname):
""" Load an audio file and return PCM along with the sample rate """
wav, sr = sf.read(fname)
assert sr == 16e3
return wav, 16e3
class PretrainedWav2VecModel(nn.Module):
def __init__(self, fname):
super().__init__()
model, cfg, task = fairseq.checkpoint_utils.load_model_ensemble_and_task([fname])
model = model[0]
model.eval()
self.model = model
def forward(self, x):
with torch.no_grad():
z = self.model.feature_extractor(x)
if isinstance(z, tuple):
z = z[0]
c = self.model.feature_aggregator(z)
return z, c
class EmbeddingWriterConfig(argparse.ArgumentParser):
def __init__(self):
super().__init__("Pre-compute embeddings for flashlight datasets")
kwargs = {"action": "store", "type": str, "required": True}
self.add_argument("--input", "-i", help="Input Directory", **kwargs)
self.add_argument("--output", "-o", help="Output Directory", **kwargs)
self.add_argument("--model", help="Path to model checkpoint", **kwargs)
self.add_argument("--split", help="Dataset Splits", nargs="+", **kwargs)
self.add_argument(
"--ext", default="wav", required=False, help="Audio file extension"
)
self.add_argument(
"--no-copy-labels",
action="store_true",
help="Do not copy label files. Useful for large datasets, use --targetdir in flashlight then.",
)
self.add_argument(
"--use-feat",
action="store_true",
help="Use the feature vector ('z') instead of context vector ('c') for features",
)
self.add_argument("--gpu", help="GPU to use", default=0, type=int)
class Prediction:
""" Lightweight wrapper around a fairspeech embedding model """
def __init__(self, fname, gpu=0):
self.gpu = gpu
self.model = PretrainedWav2VecModel(fname).cuda(gpu)
def __call__(self, x):
x = torch.from_numpy(x).float().cuda(self.gpu)
with torch.no_grad():
z, c = self.model(x.unsqueeze(0))
return z.squeeze(0).cpu().numpy(), c.squeeze(0).cpu().numpy()
class H5Writer:
""" Write features as hdf5 file in flashlight compatible format """
def __init__(self, fname):
self.fname = fname
os.makedirs(os.path.dirname(self.fname), exist_ok=True)
def write(self, data):
channel, T = data.shape
with h5py.File(self.fname, "w") as out_ds:
data = data.T.flatten()
out_ds["features"] = data
out_ds["info"] = np.array([16e3 // 160, T, channel])
class EmbeddingDatasetWriter(object):
"""Given a model and a flashlight dataset, pre-compute and store embeddings
Args:
input_root, str :
Path to the flashlight dataset
output_root, str :
Desired output directory. Will be created if non-existent
split, str :
Dataset split
"""
def __init__(
self,
input_root,
output_root,
split,
model_fname,
extension="wav",
gpu=0,
verbose=False,
use_feat=False,
):
assert os.path.exists(model_fname)
self.model_fname = model_fname
self.model = Prediction(self.model_fname, gpu)
self.input_root = input_root
self.output_root = output_root
self.split = split
self.verbose = verbose
self.extension = extension
self.use_feat = use_feat
assert os.path.exists(self.input_path), "Input path '{}' does not exist".format(
self.input_path
)
def _progress(self, iterable, **kwargs):
if self.verbose:
return tqdm.tqdm(iterable, **kwargs)
return iterable
def require_output_path(self, fname=None):
path = self.get_output_path(fname)
os.makedirs(path, exist_ok=True)
@property
def input_path(self):
return self.get_input_path()
@property
def output_path(self):
return self.get_output_path()
def get_input_path(self, fname=None):
if fname is None:
return os.path.join(self.input_root, self.split)
return os.path.join(self.get_input_path(), fname)
def get_output_path(self, fname=None):
if fname is None:
return os.path.join(self.output_root, self.split)
return os.path.join(self.get_output_path(), fname)
def copy_labels(self):
self.require_output_path()
labels = list(
filter(
lambda x: self.extension not in x, glob.glob(self.get_input_path("*"))
)
)
for fname in tqdm.tqdm(labels):
copy(fname, self.output_path)
@property
def input_fnames(self):
return sorted(glob.glob(self.get_input_path("*.{}".format(self.extension))))
def __len__(self):
return len(self.input_fnames)
def write_features(self):
paths = self.input_fnames
fnames_context = map(
lambda x: os.path.join(
self.output_path, x.replace("." + self.extension, ".h5context")
),
map(os.path.basename, paths),
)
for name, target_fname in self._progress(
zip(paths, fnames_context), total=len(self)
):
wav, sr = read_audio(name)
z, c = self.model(wav)
feat = z if self.use_feat else c
writer = H5Writer(target_fname)
writer.write(feat)
def __repr__(self):
return "EmbeddingDatasetWriter ({n_files} files)\n\tinput:\t{input_root}\n\toutput:\t{output_root}\n\tsplit:\t{split})".format(
n_files=len(self), **self.__dict__
)
if __name__ == "__main__":
args = EmbeddingWriterConfig().parse_args()
for split in args.split:
writer = EmbeddingDatasetWriter(
input_root=args.input,
output_root=args.output,
split=split,
model_fname=args.model,
gpu=args.gpu,
extension=args.ext,
use_feat=args.use_feat,
)
print(writer)
writer.require_output_path()
print("Writing Features...")
writer.write_features()
print("Done.")
if not args.no_copy_labels:
print("Copying label data...")
writer.copy_labels()
print("Done.")
|
COCO-LM/fairseq/examples/wav2vec/wav2vec_featurize.py/0
|
{
"file_path": "COCO-LM/fairseq/examples/wav2vec/wav2vec_featurize.py",
"repo_id": "COCO-LM",
"token_count": 3207
}
| 174 |
/**
* Copyright 2017-present, Facebook, Inc.
* All rights reserved.
*
* This source code is licensed under the license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <Python.h>
static PyMethodDef method_def[] = {
{NULL, NULL, 0, NULL}
};
static struct PyModuleDef module_def = {
PyModuleDef_HEAD_INIT,
"libbleu", /* name of module */
NULL, /* module documentation, may be NULL */
-1, /* size of per-interpreter state of the module,
or -1 if the module keeps state in global variables. */
method_def
};
#if PY_MAJOR_VERSION == 2
PyMODINIT_FUNC init_libbleu()
#else
PyMODINIT_FUNC PyInit_libbleu()
#endif
{
PyObject *m = PyModule_Create(&module_def);
if (!m) {
return NULL;
}
return m;
}
|
COCO-LM/fairseq/fairseq/clib/libbleu/module.cpp/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/clib/libbleu/module.cpp",
"repo_id": "COCO-LM",
"token_count": 293
}
| 175 |
# @package _group_
activation: gelu
vq_type: gumbel
vq_depth: 2
combine_groups: true
|
COCO-LM/fairseq/fairseq/config/model/wav2vec/vq_wav2vec_gumbel.yaml/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/config/model/wav2vec/vq_wav2vec_gumbel.yaml",
"repo_id": "COCO-LM",
"token_count": 35
}
| 176 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn.functional as F
import scipy.stats as stats
import numpy as np
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
@register_criterion("sentence_prediction")
class SentencePredictionCriterion(FairseqCriterion):
def __init__(self, task, classification_head_name, regression_target):
super().__init__(task)
self.classification_head_name = classification_head_name
self.regression_target = regression_target
@staticmethod
def add_args(parser):
# fmt: off
parser.add_argument('--classification-head-name',
default='sentence_classification_head',
help='name of the classification head to use')
# fmt: on
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
assert (
hasattr(model, "classification_heads")
and self.classification_head_name in model.classification_heads
), "model must provide sentence classification head for --criterion=sentence_prediction"
logits, _ = model(
**sample["net_input"],
features_only=True,
classification_head_name=self.classification_head_name,
)
targets = model.get_targets(sample, [logits]).view(-1)
sample_size = targets.numel()
num_class = logits.size(1)
if not self.regression_target:
lprobs = F.log_softmax(logits, dim=-1, dtype=torch.float32)
loss = F.nll_loss(lprobs, targets, reduction="sum")
if num_class == 2:
tp = ((logits[:, 0] <= logits[:, 1]) & (targets == 1)).long().sum()
fp = ((logits[:, 0] <= logits[:, 1]) & (targets == 0)).long().sum()
fn = ((logits[:, 0] > logits[:, 1]) & (targets == 1)).long().sum()
tn = ((logits[:, 0] > logits[:, 1]) & (targets == 0)).long().sum()
assert (tp + fp + tn + fn) == targets.size(0), "invalid size"
else:
logits = logits.view(-1).float()
targets = targets.float()
loss = F.mse_loss(logits, targets, reduction="sum")
logging_output = {
"loss": loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample_size,
"sample_size": sample_size,
}
if not self.regression_target:
preds = logits.argmax(dim=1)
logging_output["ncorrect"] = (preds == targets).sum()
if num_class == 2:
logging_output.update(tp=utils.item(tp.data) if reduce else tp.data)
logging_output.update(fp=utils.item(fp.data) if reduce else fp.data)
logging_output.update(fn=utils.item(fn.data) if reduce else fn.data)
logging_output.update(tn=utils.item(tn.data) if reduce else tn.data)
else:
logging_output.update(x=logits.detach().cpu().numpy())
logging_output.update(y=targets.detach().cpu().numpy())
return loss, sample_size, logging_output
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
metrics.log_scalar("loss", loss_sum / sample_size / math.log(2), sample_size, round=3)
if sample_size != ntokens:
metrics.log_scalar("nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3)
if len(logging_outputs) > 0 and "ncorrect" in logging_outputs[0]:
ncorrect = sum(log.get("ncorrect", 0) for log in logging_outputs)
metrics.log_scalar("accuracy", float(ncorrect) / nsentences, sample_size, round=6)
tp_sum = float(sum(log.get("tp", 0) for log in logging_outputs))
fp_sum = float(sum(log.get("fp", 0) for log in logging_outputs))
fn_sum = float(sum(log.get("fn", 0) for log in logging_outputs))
tn_sum = float(sum(log.get("tn", 0) for log in logging_outputs))
if tp_sum + fp_sum + fn_sum + tn_sum > 0:
assert tp_sum + fp_sum + fn_sum + tn_sum == sample_size, "invalid size when aggregating"
acc = (tp_sum + tn_sum) / sample_size
tmp = 2 * tp_sum + fp_sum + fn_sum
f1 = (2 * tp_sum) / tmp if tmp else 0
tmp = (tp_sum + fp_sum) * (tp_sum + fn_sum) * (tn_sum + fp_sum) * (tn_sum + fn_sum)
mcc = (tp_sum * tn_sum - fp_sum * fn_sum) / (tmp ** 0.5) if tmp else 0
metrics.log_scalar("sample_size", sample_size)
metrics.log_scalar("f1", f1)
metrics.log_scalar("mcc", mcc)
metrics.log_scalar("acc_f1", 0.5 * (acc + f1))
if len(logging_outputs) > 0 and "x" in logging_outputs[0]:
x = np.concatenate([log.get("x", np.array([])) for log in logging_outputs])
y = np.concatenate([log.get("y", np.array([])) for log in logging_outputs])
pearson = stats.pearsonr(x, y)[0]
spearman = stats.spearmanr(x, y)[0]
metrics.log_scalar("pearson", pearson)
metrics.log_scalar("spearman", spearman)
metrics.log_scalar("pearson_spearman", 0.5 * (pearson + spearman))
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return True
|
COCO-LM/fairseq/fairseq/criterions/sentence_prediction.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/criterions/sentence_prediction.py",
"repo_id": "COCO-LM",
"token_count": 2879
}
| 177 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from torch.utils.data.dataloader import default_collate
from . import FairseqDataset
class BaseWrapperDataset(FairseqDataset):
def __init__(self, dataset):
super().__init__()
self.dataset = dataset
def __getitem__(self, index):
return self.dataset[index]
def __len__(self):
return len(self.dataset)
def collater(self, samples):
if hasattr(self.dataset, "collater"):
return self.dataset.collater(samples)
else:
return default_collate(samples)
@property
def sizes(self):
return self.dataset.sizes
def num_tokens(self, index):
return self.dataset.num_tokens(index)
def size(self, index):
return self.dataset.size(index)
def ordered_indices(self):
return self.dataset.ordered_indices()
@property
def supports_prefetch(self):
return getattr(self.dataset, "supports_prefetch", False)
def attr(self, attr: str, index: int):
return self.dataset.attr(attr, index)
def prefetch(self, indices):
self.dataset.prefetch(indices)
def get_batch_shapes(self):
return self.dataset.get_batch_shapes()
def batch_by_size(
self,
indices,
max_tokens=None,
max_sentences=None,
required_batch_size_multiple=1,
):
return self.dataset.batch_by_size(
indices,
max_tokens=max_tokens,
max_sentences=max_sentences,
required_batch_size_multiple=required_batch_size_multiple,
)
def filter_indices_by_size(self, indices, max_sizes):
return self.dataset.filter_indices_by_size(indices, max_sizes)
@property
def can_reuse_epoch_itr_across_epochs(self):
return self.dataset.can_reuse_epoch_itr_across_epochs
def set_epoch(self, epoch):
super().set_epoch(epoch)
if hasattr(self.dataset, "set_epoch"):
self.dataset.set_epoch(epoch)
|
COCO-LM/fairseq/fairseq/data/base_wrapper_dataset.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/data/base_wrapper_dataset.py",
"repo_id": "COCO-LM",
"token_count": 972
}
| 178 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
from fairseq import file_utils
from fairseq.data.encoders import register_bpe
from fairseq.dataclass import FairseqDataclass
from .gpt2_bpe_utils import get_encoder
DEFAULT_ENCODER_JSON = "https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/encoder.json"
DEFAULT_VOCAB_BPE = "https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/vocab.bpe"
@dataclass
class GPT2BPEConfig(FairseqDataclass):
gpt2_encoder_json: str = field(
default=DEFAULT_ENCODER_JSON, metadata={"help": "path to encoder.json"}
)
gpt2_vocab_bpe: str = field(
default=DEFAULT_VOCAB_BPE, metadata={"help": "path to vocab.bpe"}
)
@register_bpe("gpt2", dataclass=GPT2BPEConfig)
class GPT2BPE(object):
def __init__(self, cfg):
encoder_json = file_utils.cached_path(cfg.gpt2_encoder_json)
vocab_bpe = file_utils.cached_path(cfg.gpt2_vocab_bpe)
self.bpe = get_encoder(encoder_json, vocab_bpe)
def encode(self, x: str) -> str:
return " ".join(map(str, self.bpe.encode(x)))
def decode(self, x: str) -> str:
return self.bpe.decode(
[int(tok) if tok not in {"<unk>", "<mask>"} else tok for tok in x.split()]
)
def is_beginning_of_word(self, x: str) -> bool:
return self.decode(x).startswith(" ")
|
COCO-LM/fairseq/fairseq/data/encoders/gpt2_bpe.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/data/encoders/gpt2_bpe.py",
"repo_id": "COCO-LM",
"token_count": 642
}
| 179 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .block_pair_dataset import BlockPairDataset
from .masked_lm_dataset import MaskedLMDataset
from .masked_lm_dictionary import BertDictionary, MaskedLMDictionary
__all__ = [
"BertDictionary",
"BlockPairDataset",
"MaskedLMDataset",
"MaskedLMDictionary",
]
|
COCO-LM/fairseq/fairseq/data/legacy/__init__.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/data/legacy/__init__.py",
"repo_id": "COCO-LM",
"token_count": 158
}
| 180 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import List
logger = logging.getLogger(__name__)
def uniform(dataset_sizes: List[int]):
return [1.0] * len(dataset_sizes)
def temperature_sampling(dataset_sizes, temp):
total_size = sum(dataset_sizes)
return [(size / total_size) ** (1.0 / temp) for size in dataset_sizes]
def make_temperature_sampling(temp=1.0):
def sampling_func(dataset_sizes):
return temperature_sampling(dataset_sizes, temp)
return sampling_func
def make_ratio_sampling(ratios):
def sampling_func(dataset_sizes):
return ratios
return sampling_func
class SamplingMethod:
@staticmethod
def add_arguments(parser):
parser.add_argument(
"--sampling-method",
choices=[
"uniform",
"temperature",
"concat",
"RoundRobin",
],
type=str,
default="concat",
help="The method to sample data per language pairs",
)
parser.add_argument(
"--sampling-temperature",
default=1.5,
type=float,
help="only work with --sampling-method temperature",
)
@staticmethod
def build_sampler(args, task):
return SamplingMethod(args, task)
def __init__(self, args, task):
self.args = args
self.task = task
def is_adaptive(self):
return False
def sampling_method_selector(self):
args = self.args
logger.info(f"selected sampler: {args.sampling_method}")
if args.sampling_method == "uniform":
return uniform
elif args.sampling_method == "temperature" or self.is_adaptive():
return make_temperature_sampling(float(args.sampling_temperature))
else:
# default to concating all data set together
return None
|
COCO-LM/fairseq/fairseq/data/multilingual/sampling_method.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/data/multilingual/sampling_method.py",
"repo_id": "COCO-LM",
"token_count": 905
}
| 181 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from . import BaseWrapperDataset
class SortDataset(BaseWrapperDataset):
def __init__(self, dataset, sort_order):
super().__init__(dataset)
if not isinstance(sort_order, (list, tuple)):
sort_order = [sort_order]
self.sort_order = sort_order
assert all(len(so) == len(dataset) for so in sort_order)
def ordered_indices(self):
return np.lexsort(self.sort_order)
|
COCO-LM/fairseq/fairseq/data/sort_dataset.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/data/sort_dataset.py",
"repo_id": "COCO-LM",
"token_count": 234
}
| 182 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""isort:skip_file"""
import logging
from hydra.core.config_store import ConfigStore
from fairseq.dataclass.configs import FairseqConfig
from omegaconf import DictConfig, OmegaConf
logger = logging.getLogger(__name__)
def hydra_init(cfg_name="config") -> None:
cs = ConfigStore.instance()
cs.store(name=cfg_name, node=FairseqConfig)
for k in FairseqConfig.__dataclass_fields__:
v = FairseqConfig.__dataclass_fields__[k].default
try:
cs.store(name=k, node=v)
except BaseException:
logger.error(f"{k} - {v}")
raise
def add_defaults(cfg: DictConfig) -> None:
"""This function adds default values that are stored in dataclasses that hydra doesn't know about """
from fairseq.registry import REGISTRIES
from fairseq.tasks import TASK_DATACLASS_REGISTRY
from fairseq.models import ARCH_MODEL_NAME_REGISTRY, MODEL_DATACLASS_REGISTRY
from fairseq.dataclass.utils import merge_with_parent
from typing import Any
OmegaConf.set_struct(cfg, False)
for k, v in FairseqConfig.__dataclass_fields__.items():
field_cfg = cfg.get(k)
if field_cfg is not None and v.type == Any:
dc = None
if isinstance(field_cfg, str):
field_cfg = DictConfig({"_name": field_cfg})
field_cfg.__dict__["_parent"] = field_cfg.__dict__["_parent"]
name = field_cfg.get("_name")
if k == "task":
dc = TASK_DATACLASS_REGISTRY.get(name)
elif k == "model":
name = ARCH_MODEL_NAME_REGISTRY.get(name, name)
dc = MODEL_DATACLASS_REGISTRY.get(name)
elif k in REGISTRIES:
dc = REGISTRIES[k]["dataclass_registry"].get(name)
if dc is not None:
cfg[k] = merge_with_parent(dc, field_cfg)
|
COCO-LM/fairseq/fairseq/dataclass/initialize.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/dataclass/initialize.py",
"repo_id": "COCO-LM",
"token_count": 894
}
| 183 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
A standalone module for aggregating metrics.
Metrics can be logged from anywhere using the `log_*` functions defined
in this module. The logged values will be aggregated dynamically based
on the aggregation context in which the logging occurs. See the
:func:`aggregate` context manager for more details.
"""
import contextlib
import time
import uuid
from collections import OrderedDict, defaultdict
from typing import Callable, Dict, List, Optional
from .meters import *
# Aggregation contexts are considered "active" when inside the scope
# created by the :func:`aggregate` context manager.
_aggregators = OrderedDict()
_active_aggregators = OrderedDict()
_active_aggregators_cnt = defaultdict(lambda: 0)
def reset() -> None:
"""Reset all metrics aggregators."""
_aggregators.clear()
_active_aggregators.clear()
_active_aggregators_cnt.clear()
# The "default" aggregator observes all logged values.
_aggregators["default"] = MetersDict()
_active_aggregators["default"] = _aggregators["default"]
_active_aggregators_cnt["default"] = 1
reset()
@contextlib.contextmanager
def aggregate(name: Optional[str] = None, new_root: bool = False):
"""Context manager to aggregate metrics under a given name.
Aggregations can be nested. If *new_root* is ``False``, then logged
metrics will be recorded along the entire stack of nested
aggregators, including a global "default" aggregator. If *new_root*
is ``True``, then this aggregator will be the root of a new
aggregation stack, thus bypassing any parent aggregators.
Note that aggregation contexts are uniquely identified by their
*name* (e.g., train, valid). Creating a context with an existing
name will reuse the corresponding :class:`MetersDict` instance.
If no name is given, then a temporary aggregator will be created.
Usage::
with metrics.aggregate("train"):
for step, batch in enumerate(epoch):
with metrics.aggregate("train_inner") as agg:
metrics.log_scalar("loss", get_loss(batch))
if step % log_interval == 0:
print(agg.get_smoothed_value("loss"))
agg.reset()
print(metrics.get_smoothed_values("train")["loss"])
Args:
name (str): name of the aggregation. Defaults to a
random/temporary name if not given explicitly.
new_root (bool): make this aggregation the root of a new
aggregation stack.
"""
if name is None:
# generate a temporary name
name = str(uuid.uuid4())
assert name not in _aggregators
agg = MetersDict()
else:
assert name != "default"
agg = _aggregators.setdefault(name, MetersDict())
if new_root:
backup_aggregators = _active_aggregators.copy()
_active_aggregators.clear()
backup_aggregators_cnt = _active_aggregators_cnt.copy()
_active_aggregators_cnt.clear()
_active_aggregators[name] = agg
_active_aggregators_cnt[name] += 1
yield agg
_active_aggregators_cnt[name] -= 1
if _active_aggregators_cnt[name] == 0 and name in _active_aggregators:
del _active_aggregators[name]
if new_root:
_active_aggregators.clear()
_active_aggregators.update(backup_aggregators)
_active_aggregators_cnt.clear()
_active_aggregators_cnt.update(backup_aggregators_cnt)
def get_active_aggregators() -> List[MetersDict]:
return list(_active_aggregators.values())
def log_scalar(
key: str,
value: float,
weight: float = 1,
priority: int = 10,
round: Optional[int] = None,
):
"""Log a scalar value.
Args:
key (str): name of the field to log
value (float): value to log
weight (float): weight that this value contributes to the average.
A weight of 0 will always log the latest value.
priority (int): smaller values are logged earlier in the output
round (Optional[int]): number of digits to round to when displaying
"""
for agg in get_active_aggregators():
if key not in agg:
agg.add_meter(key, AverageMeter(round=round), priority)
agg[key].update(value, weight)
def log_derived(key: str, fn: Callable[[MetersDict], float], priority: int = 20):
"""Log a scalar value derived from other meters.
Args:
key (str): name of the field to log
fn (Callable[[MetersDict], float]): function that takes a single
argument *meters* and returns the derived value
priority (int): smaller values are logged earlier in the output
"""
for agg in get_active_aggregators():
if key not in agg:
agg.add_meter(key, MetersDict._DerivedMeter(fn), priority)
def log_speed(
key: str,
value: float,
priority: int = 30,
round: Optional[int] = None,
):
"""Log the rate of some quantity per second.
Args:
key (str): name of the field to log
value (float): value to log
priority (int): smaller values are logged earlier in the output
round (Optional[int]): number of digits to round to when displaying
"""
for agg in get_active_aggregators():
if key not in agg:
agg.add_meter(key, TimeMeter(round=round), priority)
agg[key].reset() # reset meter on the first call
else:
agg[key].update(value)
def log_start_time(key: str, priority: int = 40, round: Optional[int] = None):
"""Log the duration of some event in seconds.
The duration will be computed once :func:`log_stop_time` is called.
Args:
key (str): name of the field to log
priority (int): smaller values are logged earlier in the output
round (Optional[int]): number of digits to round to when displaying
"""
for agg in get_active_aggregators():
if key not in agg:
agg.add_meter(key, StopwatchMeter(round=round), priority)
agg[key].start()
def log_stop_time(key: str, weight: float = 0.0, prehook=None):
"""Log the duration of some event in seconds.
The duration will be computed since :func:`log_start_time` was called.
Set weight > 0 to report the average time instead of the sum.
Args:
key (str): name of the field to log
weight (float): weight that this time contributes to the average
prehook (function, no arguments): will be called before the timer
is stopped. For example, use prehook=torch.cuda.synchronize to
make sure all gpu operations are done before timer is stopped.
"""
for agg in get_active_aggregators():
if key in agg:
agg[key].stop(weight, prehook)
def log_custom(
new_meter_fn: Callable[[], Meter],
key: str,
*args,
priority: int = 50,
**kwargs,
):
"""Log using a custom Meter.
Any extra *args* or *kwargs* will be passed through to the Meter's
*update* method.
Args:
new_meter_fn (Callable[[], Meter]): function that returns a new
Meter instance
key (str): name of the field to log
priority (int): smaller values are logged earlier in the output
"""
for agg in get_active_aggregators():
if key not in agg:
agg.add_meter(key, new_meter_fn(), priority)
agg[key].update(*args, **kwargs)
def reset_meter(name: str, key: str) -> None:
"""Reset Meter instance aggregated under a given *name* and *key*."""
meter = get_meter(name, key)
if meter is not None:
meter.reset()
def reset_meters(name: str) -> None:
"""Reset Meter instances aggregated under a given *name*."""
meters = get_meters(name)
if meters is not None:
meters.reset()
def get_meter(name: str, key: str) -> Meter:
"""Get a single Meter instance aggregated under *name* and *key*.
Returns:
Meter or None if no metrics have been logged under *name* and *key*.
"""
if name not in _aggregators:
return None
return _aggregators[name].get(key, None)
def get_meters(name: str) -> MetersDict:
"""Get Meter instances aggregated under a given *name*.
Returns:
MetersDict or None if no metrics have been logged under *name*.
"""
return _aggregators.get(name, None)
def get_smoothed_value(name: str, key: str) -> float:
"""Get a single smoothed value.
Raises:
KeyError: if no metrics have been logged under *name* and *key*.
"""
return _aggregators[name].get_smoothed_value(key)
def get_smoothed_values(name: str) -> Dict[str, float]:
"""Get smoothed values aggregated under a given *name*.
Raises:
KeyError: if no metrics have been logged under *name*.
"""
return _aggregators[name].get_smoothed_values()
def state_dict():
return OrderedDict([(name, agg.state_dict()) for name, agg in _aggregators.items()])
def load_state_dict(state_dict):
for name, agg_state in state_dict.items():
_aggregators[name] = MetersDict()
_aggregators[name].load_state_dict(agg_state)
def xla_metrics_report():
try:
import torch_xla.debug.metrics as met
print(met.metrics_report())
except ImportError:
return
|
COCO-LM/fairseq/fairseq/logging/metrics.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/logging/metrics.py",
"repo_id": "COCO-LM",
"token_count": 3561
}
| 184 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq.model_parallel.modules import ModelParallelMultiheadAttention
from fairseq.modules import TransformerDecoderLayer, TransformerEncoderLayer
try:
from fairseq.model_parallel.megatron.mpu import (
ColumnParallelLinear,
RowParallelLinear,
)
has_megatron_submodule = True
except (ImportError, ModuleNotFoundError):
has_megatron_submodule = False
class ModelParallelTransformerEncoderLayer(TransformerEncoderLayer):
"""Encoder layer block over multiple gpus.
See "Megatron-LM: https://arxiv.org/pdf/1909.08053.pdf" for more details.
"""
def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size):
if q_noise > 0:
raise NotImplementedError
return ColumnParallelLinear(input_dim, output_dim, gather_output=False)
def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size):
if q_noise > 0:
raise NotImplementedError
return RowParallelLinear(input_dim, output_dim, input_is_parallel=True)
def build_self_attention(self, embed_dim, args, **unused_kwargs):
return ModelParallelMultiheadAttention(
embed_dim,
args.encoder_attention_heads,
dropout=args.attention_dropout,
self_attention=True,
)
class ModelParallelTransformerDecoderLayer(TransformerDecoderLayer):
"""Decoder layer block.
See "Megatron-LM: https://arxiv.org/pdf/1909.08053.pdf" for more details.
"""
def build_fc1(self, input_dim, output_dim, q_noise, qn_block_size):
if q_noise > 0:
raise NotImplementedError
return ColumnParallelLinear(input_dim, output_dim, gather_output=False)
def build_fc2(self, input_dim, output_dim, q_noise, qn_block_size):
if q_noise > 0:
raise NotImplementedError
return RowParallelLinear(input_dim, output_dim, input_is_parallel=True)
def build_self_attention(self, embed_dim, args, **unused_kwargs):
return ModelParallelMultiheadAttention(
embed_dim=embed_dim,
num_heads=args.decoder_attention_heads,
dropout=args.attention_dropout,
self_attention=not getattr(args, "cross_self_attention", False),
)
def build_encoder_attention(self, embed_dim, args, **unused_kwargs):
return ModelParallelMultiheadAttention(
embed_dim=embed_dim,
num_heads=args.decoder_attention_heads,
kdim=getattr(args, "encoder_embed_dim", None),
vdim=getattr(args, "encoder_embed_dim", None),
dropout=args.attention_dropout,
encoder_decoder_attention=True,
)
|
COCO-LM/fairseq/fairseq/model_parallel/modules/transformer_layer.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/model_parallel/modules/transformer_layer.py",
"repo_id": "COCO-LM",
"token_count": 1192
}
| 185 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib
import os
# automatically import any Python files in the models/huggingface/ directory
models_dir = os.path.dirname(__file__)
for file in os.listdir(models_dir):
path = os.path.join(models_dir, file)
if (
not file.startswith("_")
and not file.startswith(".")
and (file.endswith(".py") or os.path.isdir(path))
):
model_name = file[: file.find(".py")] if file.endswith(".py") else file
module = importlib.import_module("fairseq.models.huggingface." + model_name)
|
COCO-LM/fairseq/fairseq/models/huggingface/__init__.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/models/huggingface/__init__.py",
"repo_id": "COCO-LM",
"token_count": 258
}
| 186 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from fairseq.models import register_model, register_model_architecture
from fairseq.models.nat import NATransformerModel, base_architecture
from fairseq.modules import DynamicCRF
@register_model("nacrf_transformer")
class NACRFTransformerModel(NATransformerModel):
def __init__(self, args, encoder, decoder):
super().__init__(args, encoder, decoder)
self.crf_layer = DynamicCRF(
num_embedding=len(self.tgt_dict),
low_rank=args.crf_lowrank_approx,
beam_size=args.crf_beam_approx,
)
@property
def allow_ensemble(self):
return False
@staticmethod
def add_args(parser):
NATransformerModel.add_args(parser)
parser.add_argument(
"--crf-lowrank-approx",
type=int,
help="the dimension of low-rank approximation of transition",
)
parser.add_argument(
"--crf-beam-approx",
type=int,
help="the beam size for apporixmating the normalizing factor",
)
parser.add_argument(
"--word-ins-loss-factor",
type=float,
help="weights on NAT loss used to co-training with CRF loss.",
)
def forward(
self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, **kwargs
):
# encoding
encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
# length prediction
length_out = self.decoder.forward_length(
normalize=False, encoder_out=encoder_out
)
length_tgt = self.decoder.forward_length_prediction(
length_out, encoder_out, tgt_tokens
)
# decoding
word_ins_out = self.decoder(
normalize=False,
prev_output_tokens=prev_output_tokens,
encoder_out=encoder_out,
)
word_ins_tgt, word_ins_mask = tgt_tokens, tgt_tokens.ne(self.pad)
# compute the log-likelihood of CRF
crf_nll = -self.crf_layer(word_ins_out, word_ins_tgt, word_ins_mask)
crf_nll = (crf_nll / word_ins_mask.type_as(crf_nll).sum(-1)).mean()
return {
"word_ins": {
"out": word_ins_out,
"tgt": word_ins_tgt,
"mask": word_ins_mask,
"ls": self.args.label_smoothing,
"nll_loss": True,
"factor": self.args.word_ins_loss_factor,
},
"word_crf": {"loss": crf_nll},
"length": {
"out": length_out,
"tgt": length_tgt,
"factor": self.decoder.length_loss_factor,
},
}
def forward_decoder(self, decoder_out, encoder_out, decoding_format=None, **kwargs):
output_tokens = decoder_out.output_tokens
output_scores = decoder_out.output_scores
history = decoder_out.history
# execute the decoder and get emission scores
output_masks = output_tokens.ne(self.pad)
word_ins_out = self.decoder(
normalize=False, prev_output_tokens=output_tokens, encoder_out=encoder_out
)
# run viterbi decoding through CRF
_scores, _tokens = self.crf_layer.forward_decoder(word_ins_out, output_masks)
output_tokens.masked_scatter_(output_masks, _tokens[output_masks])
output_scores.masked_scatter_(output_masks, _scores[output_masks])
if history is not None:
history.append(output_tokens.clone())
return decoder_out._replace(
output_tokens=output_tokens,
output_scores=output_scores,
attn=None,
history=history,
)
@register_model_architecture("nacrf_transformer", "nacrf_transformer")
def nacrf_base_architecture(args):
args.crf_lowrank_approx = getattr(args, "crf_lowrank_approx", 32)
args.crf_beam_approx = getattr(args, "crf_beam_approx", 64)
args.word_ins_loss_factor = getattr(args, "word_ins_loss_factor", 0.5)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
base_architecture(args)
|
COCO-LM/fairseq/fairseq/models/nat/nat_crf_transformer.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/models/nat/nat_crf_transformer.py",
"repo_id": "COCO-LM",
"token_count": 2094
}
| 187 |
#!/usr/bin/env python3
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import logging
from collections.abc import Iterable
from itertools import repeat
from typing import List, Optional, Tuple
import torch
from torch import Tensor
# ------------------------------------------------------------------------------
# assert_equal()
# ------------------------------------------------------------------------------
def assert_equal(value1, value2, name1=None, name2=None):
"""Asserts two values are equal otherwise raise an error."""
str_name1 = "" if name1 is None else "{} ".format(name1)
str_name2 = "" if name2 is None else "{} ".format(name2)
if value1 != value2:
str_value1 = "{}" if name1 is None else "({})"
str_value1 = str_value1.format(value1)
str_value2 = "{}" if name2 is None else "({})"
str_value2 = str_value2.format(value2)
raise ValueError(
"Expected {}{} == {}{}".format(str_name1, str_value1, str_name2, str_value2)
)
def fill_config(config, key, value):
if value is not None:
if key not in config or config[key] is None:
config[key] = value
assert_equal(value, config[key], "value", f'config["{key}"]')
# ------------------------------------------------------------------------------
# check_and_return_expected()
# ------------------------------------------------------------------------------
def check_and_return_expected(value, undefined_value, expected_value, name=None):
"""
Return the expected value while checking if the given value is undefined or
equal to the expected value.
"""
if (undefined_value is None and value is None) or (undefined_value == value):
return expected_value
if value != expected_value:
str_name = "" if name is None else "{} ".format(name)
str_value = "{}" if name is None else "({})"
str_value = str_value.format(value)
raise ValueError(
"Expected {}{} == {}".format(str_name, str_value, expected_value)
)
return expected_value
# ------------------------------------------------------------------------------
# get_time_axis()
# ------------------------------------------------------------------------------
def get_time_axis(layout):
"""
Extract the time axis from the layout, for example for breaking sequence into
segments.
"""
if layout in ["TB", "TBD"]:
return 0
if layout in ["BT", "BTD"]:
return 1
if layout in ["BCTD"]:
return 2
raise ValueError("Unsupported layout = {}".format(layout))
# ------------------------------------------------------------------------------
# get_batch_axis()
# ------------------------------------------------------------------------------
def get_batch_axis(layout):
"""
Extract the batch axis from the layout
"""
if layout in ["TB", "TBD"]:
return 1
if layout in ["BT", "BTD", "BCTD"]:
return 0
raise ValueError("Unsupported layout = {}".format(layout))
# ------------------------------------------------------------------------------
# monotonically_increasing_and_bounded()
# ------------------------------------------------------------------------------
def monotonically_increasing_and_bounded(iterable, min=None, max=None):
"""
Check if the elements in the given iterable are monotonically increasing and
bounded by upper/lower bounds.
"""
if not isinstance(iterable, Iterable):
raise TypeError(
"Expected iterable to be of type Iterable, got ({})".format(
iterable.__class__.__name__
)
)
for i in range(len(iterable)):
if min is not None and iterable[i] < min:
return False
if max is not None and iterable[i] > max:
return False
if i > 0 and iterable[i] <= iterable[i - 1]:
return False
return True
# ------------------------------------------------------------------------------
# to_pair()
# ------------------------------------------------------------------------------
def to_pair(value, name):
"""Make a pair (of type tuple) of given value."""
if isinstance(value, Iterable):
if len(value) != 2:
raise ValueError(
"Expected `{}` to have exactly 2 elements, got: ({})".format(
name, value
)
)
return value
return tuple(repeat(value, 2))
# ------------------------------------------------------------------------------
# infer_conv_output_attrs()
# ------------------------------------------------------------------------------
# TODO(cfyeh): figure out if we can get `output_dim` without calling the module.
def infer_conv_output_attrs(
module, input_channels, input_dim, batch_size=1, max_length=8
):
"""Get output attributes of a module with input."""
input = torch.randn(batch_size, input_channels, max_length, input_dim)
output = module(input)
output_channels = output.shape[1]
output_dim = output.shape[-1]
return output_channels, output_dim
# ------------------------------------------------------------------------------
# NoOp
# ------------------------------------------------------------------------------
class NoOp(torch.nn.Module):
"""
NoOp simply passes the input as the output.
"""
def __init__(self):
super().__init__()
def forward(self, input: Tensor) -> Tensor:
return input
# ------------------------------------------------------------------------------
# Permute: a torch.nn.Module applies permutation on the input tensor.
# ------------------------------------------------------------------------------
class Permute(torch.nn.Module):
def __init__(self, dims):
super().__init__()
self.dims = dims
def forward(self, input: Tensor) -> Tensor:
return input.permute(self.dims).contiguous()
# ------------------------------------------------------------------------------
# lengths_to_padding_mask()
# ------------------------------------------------------------------------------
def lengths_to_padding_mask(lengths: Tensor) -> Tensor:
"""Convert lengths of shape (B, ) to padding mask."""
batch_size = lengths.shape[0]
max_length = int(torch.max(lengths).item())
padding_mask = torch.arange( # [0, ..., T-1]
max_length, device=lengths.device, dtype=lengths.dtype
).expand(batch_size, max_length) >= lengths.unsqueeze(1)
return padding_mask
# ------------------------------------------------------------------------------
# lengths_to_attention_mask()
# ------------------------------------------------------------------------------
def lengths_to_attention_mask(
lengths: Tensor,
left_context: Optional[int] = None,
right_context: Optional[int] = None,
) -> Optional[Tensor]:
"""
Generate attention mask based on (lengths, left_context, right_context).
left_context is None means unlimited left context.
right_context is None means unlimited right context.
"""
if left_context is None and right_context is None:
return None
max_length = int(torch.max(lengths).item())
# For example, with `max_length` == 5,
# indices = tensor([
# [ 0, 1, 2, 3, 4, 5],
# [-1, 0, 1, 2, 3, 4],
# [-2, -1, 0, 1, 2, 3],
# [-3, -2, -1, 0, 1, 2],
# [-4, -3, -2, -1, 0, 1],
# [-5, -4, -3, -2, -1, 0],
# ])
# In some cases the second torch.arange is created on cpu which causes a
# failure. Adding the device option to guard against it.
indices = torch.arange(
max_length, device=lengths.device, dtype=lengths.dtype
).expand(max_length, max_length) - torch.arange(
max_length, device=lengths.device
).view(
max_length, -1
)
# For example, with `max_length` == 5,
# bool_mask = tensor([
# [True, True, True, True, True],
# [True, True, True, True, True],
# [True, True, True, True, True],
# [True, True, True, True, True],
# [True, True, True, True, True],
# ])
bool_mask = (
torch.tensor([True]).to(device=lengths.device).expand(max_length, max_length)
)
# For example, with `max_length` == 5, left_context == 2
# left_mask = tensor([
# [ True, True, True, True, True],
# [ True, True, True, True, True],
# [ True, True, True, True, True],
# [False, True, True, True, True],
# [False, False, True, True, True],
# ])
if left_context is not None:
left_mask = indices >= -left_context
bool_mask = bool_mask & left_mask
# For example, with `max_length` == 5, right_context == 1
# right_mask = tensor([
# [True, True, False, False, False],
# [True, True, True, False, False],
# [True, True, True, True, False],
# [True, True, True, True, True],
# [True, True, True, True, True],
# ])
if right_context is not None:
right_mask = indices <= right_context
bool_mask = bool_mask & right_mask
bool_mask = (~bool_mask).to(device=lengths.device)
return bool_mask
# ------------------------------------------------------------------------------
# infer_output_norm()
# ------------------------------------------------------------------------------
def infer_output_norm(module, output_norm=None):
"""
Infer the output norm (string and module) needed on the module gvien desired
output normalization.
"""
if output_norm == module.output_norm():
# output_norm already matches module.output_norm().
return (None, NoOp())
if output_norm is None and module.output_norm() is not None:
logger = logging.getLogger("infer_output_norm()")
logger.warning(
"trying to set output_norm ({}) ".format(output_norm)
+ "but got module.output_norm() ({}), ".format(module.output_norm())
+ "the combined output_norm() will be ({})".format(module.output_norm())
)
return (None, NoOp())
if output_norm == "log_softmax":
if module.output_norm() is not None:
raise ValueError(
"incompatible output_norm ({}) ".format(output_norm)
+ "and module.output_norm() ({})".format(module.output_norm())
)
else:
return ("log_softmax", torch.nn.LogSoftmax(dim=-1))
if output_norm == "softmax":
if module.output_norm() is not None:
raise ValueError(
"incompatible output_norm ({}) ".format(output_norm)
+ "and module.output_norm() ({})".format(module.output_norm())
)
else:
return ("softmax", torch.nn.Softmax(dim=-1))
raise ValueError(
"output_norm ({}) not in ".format(output_norm)
+ "supported list = [None, softmax, log_softmax]"
)
# ------------------------------------------------------------------------------
# infer_channels_from_layout()
# ------------------------------------------------------------------------------
def infer_channels_from_layout(layout, channels):
"""Extract the number of channels from the layout."""
if layout in ("TBD", "BTD"):
if channels is not None and channels != 1:
raise ValueError(
"Expected channels ({}) to be 1 for layout = {}".format(
channels, layout
)
)
if channels is None:
return 1
return channels
# ------------------------------------------------------------------------------
# pad_sequence()
# ------------------------------------------------------------------------------
@torch.jit.export
def pad_sequence(
sequence: Tensor,
time_axis: int,
extra_left_context: int = 0,
extra_right_context: int = 0,
) -> Tensor:
"""Pad extra left/right contexts to the sequence."""
if extra_left_context == 0 and extra_right_context == 0:
return sequence
tensors_to_concat = []
if extra_left_context:
size = (extra_left_context,)
fill_value = 0
indices = torch.full(
size=size,
fill_value=fill_value,
dtype=torch.long,
device=sequence.device,
)
left_padding = torch.index_select(sequence, time_axis, indices)
tensors_to_concat.append(left_padding)
tensors_to_concat.append(sequence)
# NOTE(cfyeh): for efficiency reason we pad 0 instead of the last frame for
# extra right contexts.
if extra_right_context:
size = list(sequence.shape)
size[time_axis] = extra_right_context
right_padding = torch.zeros(size, dtype=sequence.dtype, device=sequence.device)
tensors_to_concat.append(right_padding)
padded_sequence = torch.cat(tensors_to_concat, dim=time_axis)
return padded_sequence
# ------------------------------------------------------------------------------
# sequence_to_segments()
# ------------------------------------------------------------------------------
@torch.jit.export
def sequence_to_segments(
sequence: Tensor,
time_axis: int,
lengths: Tensor,
segment_size: Optional[int] = None,
extra_left_context: int = 0,
extra_right_context: int = 0,
) -> List[Tuple[Tensor, Tensor]]:
"""Breaks sequence into segments."""
sequence = pad_sequence(
sequence=sequence,
time_axis=time_axis,
extra_left_context=extra_left_context,
extra_right_context=extra_right_context,
)
lengths = lengths + extra_left_context + extra_right_context
segments: List[Tuple[Tensor, Tensor]] = []
if segment_size is None:
segments.append((sequence, lengths))
return segments
offset = 0
end = sequence.shape[time_axis]
step = segment_size
size = extra_left_context + segment_size + extra_right_context
while offset + extra_left_context + extra_right_context < end:
clamped_size = min(size, end - offset)
segment_lengths = torch.clamp(lengths - offset, min=0, max=clamped_size)
indices = torch.arange(
start=offset,
end=(offset + clamped_size),
step=1,
dtype=torch.long,
device=sequence.device,
)
segment_tensor = torch.index_select(sequence, time_axis, indices)
segments.append((segment_tensor, segment_lengths))
offset = offset + step
return segments
# ------------------------------------------------------------------------------
# segments_to_sequence()
# ------------------------------------------------------------------------------
@torch.jit.export
def segments_to_sequence(
segments: List[Tuple[Tensor, Tensor]], time_axis: int
) -> Tuple[Tensor, Tensor]:
"""Concatenate segments into a full sequence."""
if len(segments) == 1:
return segments[0]
tensors_to_concat: List[Tensor] = []
lengths_to_stack: List[Tensor] = []
for tensor, lengths in segments:
tensors_to_concat.append(tensor)
lengths_to_stack.append(lengths)
sequence = torch.cat(tensors_to_concat, dim=time_axis)
lengths = torch.stack(lengths_to_stack, dim=0)
lengths = torch.sum(lengths, dim=0)
return sequence, lengths
def lengths_to_encoder_padding_mask(lengths, batch_first: bool = False):
"""
convert lengths (a 1-D Long/Int tensor) to 2-D binary tensor
Args:
lengths: a (B, )-shaped tensor
batch_first: whether to return a (B, T) tensor
Return:
max_length: maximum length of B sequences
encoder_padding_mask: a (max_length, B) binary mask, where
[t, b] = False for t < lengths[b] and True otherwise
TODO:
kernelize this function if benchmarking shows this function is slow
"""
max_lengths = torch.max(lengths).item()
bsz = lengths.size(0)
encoder_padding_mask = torch.arange(
max_lengths
).to( # a (T, ) tensor with [0, ..., T-1]
lengths.device
).view( # move to the right device
1, max_lengths
).expand( # reshape to (1, T)-shaped tensor
bsz, -1
) > lengths.view( # expand to (B, T)-shaped tensor
bsz, 1
).expand(
-1, max_lengths
)
if not batch_first:
return encoder_padding_mask.t(), max_lengths
else:
return encoder_padding_mask, max_lengths
# ------------------------------------------------------------------------------
# attention suppression
# ------------------------------------------------------------------------------
def attention_suppression(attention_weights: Tensor, scale: float):
# B, H, qlen, klen -> B, H, qlen, 1
attention_prob = torch.nn.functional.softmax(attention_weights.float(), dim=-1)
attention_nozeros = attention_prob.to(torch.bool)
nozeros_sum = torch.sum(attention_nozeros.to(torch.float), dim=-1, keepdim=True)
# For very sparse situation, we need get round about 0s
key_sum = torch.sum(attention_prob, dim=-1, keepdim=True)
# nozeros_sum should > 1
key_mean = key_sum / (nozeros_sum + 1e-8)
# std calculation
dis = (attention_prob - key_mean) * (attention_prob - key_mean)
# if attention_prob[i] < threshold, then dis_masked[i] = 0; for all i
dis_masked = torch.where(
attention_nozeros, dis, attention_prob.new_zeros(attention_prob.size())
)
key_var = torch.sum(dis_masked, dim=-1, keepdim=True)
key_var = key_var / (nozeros_sum - 1.0 + 1e-8)
key_std = torch.sqrt(key_var)
key_thread = key_mean - scale * key_std
# if attention_prob[i] >= key_thread, then attention_prob[i]
# , otherwise "-inf"
inf_tensor = attention_prob.new_zeros(attention_prob.size()).detach()
inf_tensor[:] = float("-inf")
attention_weights_float = torch.where(
attention_prob < key_thread,
inf_tensor,
attention_weights.float(),
)
return attention_weights_float.type_as(attention_weights)
def layer_norm_backward_hook(module, grad_input, grad_output, clamp_value):
return tuple(torch.clamp(v, min=-clamp_value, max=clamp_value) for v in grad_input)
|
COCO-LM/fairseq/fairseq/models/speech_to_text/utils.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/models/speech_to_text/utils.py",
"repo_id": "COCO-LM",
"token_count": 6766
}
| 188 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import functools
from typing import Any, Dict, List, Tuple, Union
import torch
import torch.utils.checkpoint as checkpoint
from fairseq import utils
def checkpoint_wrapper(m, offload_to_cpu=False):
"""
A friendlier wrapper for performing activation checkpointing.
Compared to the PyTorch version, this version:
- wraps an nn.Module, so that all subsequent calls will use checkpointing
- handles keyword arguments in the forward
- handles non-Tensor outputs from the forward
Usage::
checkpointed_module = checkpoint_wrapper(my_module, offload_to_cpu=True)
a, b = checkpointed_module(x, y=3, z=torch.Tensor([1]))
"""
m.forward = functools.partial(
_checkpointed_forward,
m.forward, # original_forward
offload_to_cpu,
)
return m
def _checkpointed_forward(original_forward, offload_to_cpu, *args, **kwargs):
# Autograd Functions in PyTorch work best with positional args, since
# the backward must return gradients (or None) for every input argument.
# We can flatten keyword arguments to make this easier.
kwarg_keys, flat_args = pack_kwargs(*args, **kwargs)
parent_ctx_dict = {"offload": offload_to_cpu}
output = CheckpointFunction.apply(
original_forward, parent_ctx_dict, kwarg_keys, *flat_args
)
if isinstance(output, torch.Tensor):
return output
else:
packed_non_tensor_outputs = parent_ctx_dict["packed_non_tensor_outputs"]
if packed_non_tensor_outputs:
output = unpack_non_tensors(output, packed_non_tensor_outputs)
return output
def pack_kwargs(*args, **kwargs) -> Tuple[List[str], List[Any]]:
"""
Usage::
kwarg_keys, flat_args = pack_kwargs(1, 2, a=3, b=4)
args, kwargs = unpack_kwargs(kwarg_keys, flat_args)
assert args == [1, 2]
assert kwargs == {"a": 3, "b": 4}
"""
kwarg_keys = []
flat_args = list(args)
for k, v in kwargs.items():
kwarg_keys.append(k)
flat_args.append(v)
return kwarg_keys, flat_args
def unpack_kwargs(
kwarg_keys: List[str], flat_args: List[Any]
) -> Tuple[List[Any], Dict[str, Any]]:
if len(kwarg_keys) == 0:
return flat_args, {}
args = flat_args[: -len(kwarg_keys)]
kwargs = {k: v for k, v in zip(kwarg_keys, flat_args[-len(kwarg_keys) :])}
return args, kwargs
def split_non_tensors(
mixed: Union[torch.Tensor, Tuple[Any]]
) -> Tuple[Tuple[torch.Tensor], Dict[str, List[Any]]]:
"""
Usage::
x = torch.Tensor([1])
y = torch.Tensor([2])
tensors, packed_non_tensors = split_non_tensors((x, y, None, 3))
recon = unpack_non_tensors(tensors, packed_non_tensors)
assert recon == (x, y, None, 3)
"""
if isinstance(mixed, torch.Tensor):
return (mixed,), None
tensors = []
packed_non_tensors = {"is_tensor": [], "objects": []}
for o in mixed:
if isinstance(o, torch.Tensor):
packed_non_tensors["is_tensor"].append(True)
tensors.append(o)
else:
packed_non_tensors["is_tensor"].append(False)
packed_non_tensors["objects"].append(o)
return tuple(tensors), packed_non_tensors
def unpack_non_tensors(
tensors: Tuple[torch.Tensor],
packed_non_tensors: Dict[str, List[Any]],
) -> Tuple[Any]:
if packed_non_tensors is None:
return tensors
assert isinstance(packed_non_tensors, dict)
mixed = []
is_tensor_list = packed_non_tensors["is_tensor"]
objects = packed_non_tensors["objects"]
assert len(tensors) + len(objects) == len(is_tensor_list)
obj_i = tnsr_i = 0
for is_tensor in is_tensor_list:
if is_tensor:
mixed.append(tensors[tnsr_i])
tnsr_i += 1
else:
mixed.append(objects[obj_i])
obj_i += 1
return tuple(mixed)
class CheckpointFunction(torch.autograd.Function):
"""Similar to the torch version, but support non-Tensor outputs.
The caller is expected to provide a dict (*parent_ctx_dict*) that will hold
the non-Tensor outputs. These should be combined with the Tensor *outputs*
by calling ``unpack_non_tensors``.
"""
@staticmethod
def forward(ctx, run_function, parent_ctx_dict, kwarg_keys, *args):
if torch.is_grad_enabled(): # grad may be disabled, e.g., during validation
checkpoint.check_backward_validity(args)
ctx.run_function = run_function
ctx.kwarg_keys = kwarg_keys
ctx.fwd_rng_state = utils.get_rng_state()
tensor_inputs, packed_non_tensor_inputs = split_non_tensors(args)
if parent_ctx_dict["offload"]:
ctx.fwd_device = tuple(x.device for x in tensor_inputs)
ctx.grad_requirements = tuple(x.requires_grad for x in tensor_inputs)
tensor_inputs = tuple(x.cpu() for x in tensor_inputs)
else:
ctx.fwd_device, ctx.grad_requirements = None, None
ctx.save_for_backward(*tensor_inputs)
ctx.packed_non_tensor_inputs = packed_non_tensor_inputs
with torch.no_grad():
unpacked_args, unpacked_kwargs = unpack_kwargs(kwarg_keys, args)
outputs = run_function(*unpacked_args, **unpacked_kwargs)
if isinstance(outputs, torch.Tensor):
return outputs
else:
# Autograd Functions don't like non-Tensor outputs. We can split the
# non-Tensor and Tensor outputs, returning the former by reference
# through *parent_ctx_dict* and returning the latter directly.
outputs, packed_non_tensor_outputs = split_non_tensors(outputs)
parent_ctx_dict["packed_non_tensor_outputs"] = packed_non_tensor_outputs
return outputs
@staticmethod
def backward(ctx, *args):
if not torch.autograd._is_checkpoint_valid():
raise RuntimeError(
"Checkpointing is not compatible with .grad(), please use .backward() if possible"
)
tensor_inputs: Tuple = ctx.saved_tensors
tensor_inputs = checkpoint.detach_variable(tensor_inputs)
if ctx.fwd_device is not None:
tensor_inputs = [
t.to(ctx.fwd_device[i]) for i, t in enumerate(tensor_inputs)
]
for i, need_grad in enumerate(ctx.grad_requirements):
tensor_inputs[i].requires_grad = need_grad
inputs = unpack_non_tensors(tensor_inputs, ctx.packed_non_tensor_inputs)
# Store the current states.
bwd_rng_state = utils.get_rng_state()
# Set the states to what it used to be before the forward pass.
utils.set_rng_state(ctx.fwd_rng_state)
with torch.enable_grad():
unpacked_args, unpacked_kwargs = unpack_kwargs(ctx.kwarg_keys, inputs)
outputs = ctx.run_function(*unpacked_args, **unpacked_kwargs)
tensor_outputs, _ = split_non_tensors(outputs)
# Set the states back to what it was at the start of this function.
utils.set_rng_state(bwd_rng_state)
# Run backward() with only Tensors that require grad
outputs_with_grad = []
args_with_grad = []
for i in range(len(tensor_outputs)):
if tensor_outputs[i].requires_grad:
outputs_with_grad.append(tensor_outputs[i])
args_with_grad.append(args[i])
if len(outputs_with_grad) == 0:
raise RuntimeError(
"None of the outputs have requires_grad=True, "
"this checkpoint() is not necessary"
)
torch.autograd.backward(outputs_with_grad, args_with_grad)
grads = tuple(
inp.grad if isinstance(inp, torch.Tensor) else None for inp in inputs
)
return (None, None, None) + grads
|
COCO-LM/fairseq/fairseq/modules/checkpoint_activations.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/modules/checkpoint_activations.py",
"repo_id": "COCO-LM",
"token_count": 3538
}
| 189 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Layer norm done in fp32 (for fp16 training)
"""
import torch.nn as nn
import torch.nn.functional as F
class Fp32GroupNorm(nn.GroupNorm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def forward(self, input):
output = F.group_norm(
input.float(),
self.num_groups,
self.weight.float() if self.weight is not None else None,
self.bias.float() if self.bias is not None else None,
self.eps,
)
return output.type_as(input)
|
COCO-LM/fairseq/fairseq/modules/fp32_group_norm.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/modules/fp32_group_norm.py",
"repo_id": "COCO-LM",
"token_count": 294
}
| 190 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.incremental_decoding_utils import with_incremental_state
from .conv_tbc import ConvTBC
from typing import Dict, Optional
from torch import Tensor
@with_incremental_state
class LinearizedConvolution(ConvTBC):
"""An optimized version of nn.Conv1d.
At training time, this module uses ConvTBC, which is an optimized version
of Conv1d. At inference time, it optimizes incremental generation (i.e.,
one time step at a time) by replacing the convolutions with linear layers.
Note that the input order changes from training to inference.
"""
def __init__(self, in_channels, out_channels, kernel_size, **kwargs):
super().__init__(in_channels, out_channels, kernel_size, **kwargs)
self._linearized_weight = None
self.register_backward_hook(self._clear_linearized_weight)
def state_dict(self, destination=None, prefix="", keep_vars=False):
state = ConvTBC.state_dict(self, destination, prefix, keep_vars=keep_vars)
# don't store redundant _linearized_weight in checkpoints
if prefix + "_linearized_weight" in state:
del state[prefix + "_linearized_weight"]
return state
def upgrade_state_dict_named(self, state_dict, name):
prefix = name + "." if name != "" else ""
if prefix + "_linearized_weight" in state_dict:
del state_dict[prefix + "_linearized_weight"]
@torch.jit.export
def forward(self, input, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None):
"""
Args:
incremental_state: Used to buffer signal; if not None, then input is
expected to contain a single frame. If the input order changes
between time steps, call reorder_incremental_state.
Input:
Time x Batch x Channel during training
Batch x Time x Channel during inference
"""
if incremental_state is None:
output = self.conv_tbc(input)
if self.kernel_size[0] > 1 and self.padding[0] > 0:
# remove future timesteps added by padding
output = output[: -self.padding[0], :, :]
return output
# reshape weight
weight = self._get_linearized_weight()
kw = self.kernel_size[0]
bsz = input.size(0) # input: bsz x len x dim
if kw > 1:
input = input.data
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is None:
input_buffer = input.new(bsz, kw, input.size(2)).zero_()
self._set_input_buffer(incremental_state, input_buffer)
else:
# shift buffer
input_buffer[:, :-1, :] = input_buffer[:, 1:, :].clone()
# append next input
input_buffer[:, -1, :] = input[:, -1, :]
input = input_buffer
with torch.no_grad():
output = F.linear(input.view(bsz, -1), weight, self.bias)
return output.view(bsz, 1, -1)
@torch.jit.unused
def reorder_incremental_state(self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], new_order):
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
input_buffer = input_buffer.index_select(0, new_order)
self._set_input_buffer(incremental_state, input_buffer)
@torch.jit.unused
def _get_input_buffer(self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]]):
return utils.get_incremental_state(self, incremental_state, "input_buffer")
@torch.jit.unused
def _set_input_buffer(self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]], new_buffer):
return utils.set_incremental_state(
self, incremental_state, "input_buffer", new_buffer
)
@torch.jit.unused
def _get_linearized_weight(self):
if self._linearized_weight is None:
kw = self.kernel_size[0]
weight = self.weight.transpose(2, 1).transpose(1, 0).contiguous()
assert weight.size() == (self.out_channels, kw, self.in_channels)
return weight.view(self.out_channels, -1)
return self._linearized_weight
@torch.jit.unused
def _clear_linearized_weight(self, *args):
self._linearized_weight = None
|
COCO-LM/fairseq/fairseq/modules/linearized_convolution.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/modules/linearized_convolution.py",
"repo_id": "COCO-LM",
"token_count": 1912
}
| 191 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from ..ops import emulate_int
class ActivationQuantizer:
"""
Fake scalar quantization of the activations using a forward hook.
Args:
- module. a nn.Module for which we quantize the *post-activations*
- p: proportion of activations to quantize, set by default to 1
- update_step: to recompute quantization parameters
- bits: number of bits for quantization
- method: choose among {"tensor", "histogram", "channel"}
- clamp_threshold: to prevent gradients overflow
Remarks:
- Parameters scale and zero_point are recomputed every update_step
forward pass to reduce the overhead
- For the list of quantization methods and number of bits, see ops.py
- To remove the hook from the module, simply call self.handle.remove()
- At test time, the activations are fully quantized
- We use the straight-through estimator so that the gradients
back-propagate nicely in the network, this is implemented with
the detach() trick
- The activations are hard-clamped in [-clamp_threshold, clamp_threshold]
to prevent overflow during the backward pass
"""
def __init__(
self,
module,
p=1,
update_step=1000,
bits=8,
method="histogram",
clamp_threshold=5,
):
self.module = module
self.p = p
self.update_step = update_step
self.counter = 0
self.bits = bits
self.method = method
self.clamp_threshold = clamp_threshold
self.handle = None
self.register_hook()
def register_hook(self):
# forward hook
def quantize_hook(module, x, y):
# update parameters every 1000 iterations
if self.counter % self.update_step == 0:
self.scale = None
self.zero_point = None
self.counter += 1
# train with QuantNoise and evaluate the fully quantized network
p = self.p if self.module.training else 1
# quantize activations
y_q, self.scale, self.zero_point = emulate_int(
y.detach(),
bits=self.bits,
method=self.method,
scale=self.scale,
zero_point=self.zero_point,
)
# mask to apply noise
mask = torch.zeros_like(y)
mask.bernoulli_(1 - p)
noise = (y_q - y).masked_fill(mask.bool(), 0)
# using straight-through estimator (STE)
clamp_low = -self.scale * self.zero_point
clamp_high = self.scale * (2 ** self.bits - 1 - self.zero_point)
return torch.clamp(y, clamp_low.item(), clamp_high.item()) + noise.detach()
# register hook
self.handle = self.module.register_forward_hook(quantize_hook)
|
COCO-LM/fairseq/fairseq/modules/quantization/scalar/modules/qact.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/modules/quantization/scalar/modules/qact.py",
"repo_id": "COCO-LM",
"token_count": 1304
}
| 192 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn.functional as F
def unfold1d(x, kernel_size, padding_l, pad_value=0):
"""unfold T x B x C to T x B x C x K"""
if kernel_size > 1:
T, B, C = x.size()
x = F.pad(
x, (0, 0, 0, 0, padding_l, kernel_size - 1 - padding_l), value=pad_value
)
x = x.as_strided((T, B, C, kernel_size), (B * C, C, 1, B * C))
else:
x = x.unsqueeze(3)
return x
|
COCO-LM/fairseq/fairseq/modules/unfold.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/modules/unfold.py",
"repo_id": "COCO-LM",
"token_count": 263
}
| 193 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import types
import torch
def get_fused_adam_class():
"""
Look for the FusedAdam optimizer from apex. We first try to load the
"contrib" interface, which is a bit faster than the main interface,
but is technically deprecated.
"""
try:
# The "deprecated" interface in recent versions of apex is a bit
# faster than the main interface, since we don"t use the apex
# optimizer. This can be installed by passing the
# `--deprecated_fused_adam` option when building apex.
global fused_adam_cuda_v2
import importlib
fused_adam_cuda_v2 = importlib.import_module("fused_adam_cuda_v2")
return FusedAdamV1
except ImportError:
pass
return None
class FusedAdamV1(torch.optim.Optimizer):
"""
Implements Adam algorithm. Currently GPU-only. Requires Apex to be installed via
``python setup.py install --cuda_ext --cpp_ext``.
It has been proposed in `Adam: A Method for Stochastic Optimization`_.
Compared to the original version in Apex, the fairseq version casts grads
and params to FP32 internally to support ``--memory-efficient-fp16``.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups.
lr (float, optional): learning rate. (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square. (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability. (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
amsgrad (boolean, optional): whether to use the AMSGrad variant of this
algorithm from the paper `On the Convergence of Adam and Beyond`_
(default: False) NOT SUPPORTED in FusedAdam!
eps_inside_sqrt (boolean, optional): in the "update parameters" step,
adds eps to the bias-corrected second moment estimate before
evaluating square root instead of adding it to the square root of
second moment estimate as in the original paper. (default: False)
.. _Adam: A Method for Stochastic Optimization:
https://arxiv.org/abs/1412.6980
.. _On the Convergence of Adam and Beyond:
https://openreview.net/forum?id=ryQu7f-RZ
"""
def __init__(self, params,
lr=1e-3, bias_correction=True,
betas=(0.9, 0.999), eps=1e-8,
weight_decay=0., amsgrad=False):
global fused_adam_cuda_v2
import importlib
fused_adam_cuda_v2 = importlib.import_module("fused_adam_cuda_v2")
if amsgrad:
raise RuntimeError("FusedAdam does not support the AMSGrad variant.")
defaults = {
"lr": lr,
"bias_correction": bias_correction,
"betas": betas,
"eps": eps,
"weight_decay": weight_decay,
}
super().__init__(params, defaults)
@property
def supports_memory_efficient_fp16(self):
return True
@property
def supports_flat_params(self):
return True
@property
def supports_step_with_scale(self):
return True
def step(self, closure=None, scale=1.):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
scale (float, optional): factor to divide gradient tensor values
by before applying to weights. (default: 1)
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
# compute combined scale factor for this group
combined_scale = scale
bias_correction = 1 if group.get("bias_correction", 1) else 0
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError(
"FusedAdam does not support sparse gradients, "
"please consider SparseAdam instead"
)
p_data_fp32 = p.data.float()
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = 0
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(p_data_fp32)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(p_data_fp32)
else:
state["exp_avg"] = state["exp_avg"].to(p_data_fp32)
state["exp_avg_sq"] = state["exp_avg_sq"].to(p_data_fp32)
exp_avg = state["exp_avg"]
exp_avg_sq = state["exp_avg_sq"]
beta1, beta2 = group["betas"]
state["step"] += 1
with torch.cuda.device(p.device):
fused_adam_cuda_v2.adam(p_data_fp32,
exp_avg,
exp_avg_sq,
grad,
group["lr"],
beta1,
beta2,
group["eps"],
combined_scale,
state["step"],
bias_correction,
group["weight_decay"])
return loss
|
COCO-LM/fairseq/fairseq/optim/fused_adam.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/optim/fused_adam.py",
"repo_id": "COCO-LM",
"token_count": 2948
}
| 194 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from typing import Callable, List, Optional
import torch
from fairseq import utils
from fairseq.data.indexed_dataset import get_available_dataset_impl
from fairseq.dataclass.configs import (
CheckpointConfig,
CommonConfig,
CommonEvalConfig,
DatasetConfig,
DistributedTrainingConfig,
EvalLMConfig,
GenerationConfig,
InteractiveConfig,
OptimizationConfig,
)
from fairseq.dataclass.utils import gen_parser_from_dataclass
# this import is for backward compatibility
from fairseq.utils import csv_str_list, eval_bool, eval_str_dict, eval_str_list # noqa
def get_preprocessing_parser(default_task="translation"):
parser = get_parser("Preprocessing", default_task)
add_preprocess_args(parser)
return parser
def get_training_parser(default_task="translation"):
parser = get_parser("Trainer", default_task)
add_dataset_args(parser, train=True)
add_distributed_training_args(parser)
add_model_args(parser)
add_optimization_args(parser)
add_checkpoint_args(parser)
return parser
def get_generation_parser(interactive=False, default_task="translation"):
parser = get_parser("Generation", default_task)
add_dataset_args(parser, gen=True)
add_distributed_training_args(parser, default_world_size=1)
add_generation_args(parser)
add_checkpoint_args(parser)
if interactive:
add_interactive_args(parser)
return parser
def get_interactive_generation_parser(default_task="translation"):
return get_generation_parser(interactive=True, default_task=default_task)
def get_eval_lm_parser(default_task="language_modeling"):
parser = get_parser("Evaluate Language Model", default_task)
add_dataset_args(parser, gen=True)
add_distributed_training_args(parser, default_world_size=1)
add_eval_lm_args(parser)
return parser
def get_validation_parser(default_task=None):
parser = get_parser("Validation", default_task)
add_dataset_args(parser, train=True)
add_distributed_training_args(parser, default_world_size=1)
group = parser.add_argument_group("Evaluation")
gen_parser_from_dataclass(group, CommonEvalConfig())
return parser
def parse_args_and_arch(
parser: argparse.ArgumentParser,
input_args: List[str] = None,
parse_known: bool = False,
suppress_defaults: bool = False,
modify_parser: Optional[Callable[[argparse.ArgumentParser], None]] = None,
):
"""
Args:
parser (ArgumentParser): the parser
input_args (List[str]): strings to parse, defaults to sys.argv
parse_known (bool): only parse known arguments, similar to
`ArgumentParser.parse_known_args`
suppress_defaults (bool): parse while ignoring all default values
modify_parser (Optional[Callable[[ArgumentParser], None]]):
function to modify the parser, e.g., to set default values
"""
if suppress_defaults:
# Parse args without any default values. This requires us to parse
# twice, once to identify all the necessary task/model args, and a second
# time with all defaults set to None.
args = parse_args_and_arch(
parser,
input_args=input_args,
parse_known=parse_known,
suppress_defaults=False,
)
suppressed_parser = argparse.ArgumentParser(add_help=False, parents=[parser])
suppressed_parser.set_defaults(**{k: None for k, v in vars(args).items()})
args = suppressed_parser.parse_args(input_args)
return argparse.Namespace(
**{k: v for k, v in vars(args).items() if v is not None}
)
from fairseq.models import ARCH_MODEL_REGISTRY, ARCH_CONFIG_REGISTRY, MODEL_REGISTRY
# Before creating the true parser, we need to import optional user module
# in order to eagerly import custom tasks, optimizers, architectures, etc.
usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)
usr_parser.add_argument("--user-dir", default=None)
usr_args, _ = usr_parser.parse_known_args(input_args)
utils.import_user_module(usr_args)
if modify_parser is not None:
modify_parser(parser)
# The parser doesn't know about model/criterion/optimizer-specific args, so
# we parse twice. First we parse the model/criterion/optimizer, then we
# parse a second time after adding the *-specific arguments.
# If input_args is given, we will parse those args instead of sys.argv.
args, _ = parser.parse_known_args(input_args)
# Add model-specific args to parser.
if hasattr(args, "arch"):
model_specific_group = parser.add_argument_group(
"Model-specific configuration",
# Only include attributes which are explicitly given as command-line
# arguments or which have default values.
argument_default=argparse.SUPPRESS,
)
if args.arch in ARCH_MODEL_REGISTRY:
ARCH_MODEL_REGISTRY[args.arch].add_args(model_specific_group)
elif args.arch in MODEL_REGISTRY:
MODEL_REGISTRY[args.arch].add_args(model_specific_group)
else:
raise RuntimeError()
if hasattr(args, "task"):
from fairseq.tasks import TASK_REGISTRY
TASK_REGISTRY[args.task].add_args(parser)
if getattr(args, "use_bmuf", False):
# hack to support extra args for block distributed data parallelism
from fairseq.optim.bmuf import FairseqBMUF
FairseqBMUF.add_args(parser)
# Add *-specific args to parser.
from fairseq.registry import REGISTRIES
for registry_name, REGISTRY in REGISTRIES.items():
choice = getattr(args, registry_name, None)
if choice is not None:
cls = REGISTRY["registry"][choice]
if hasattr(cls, "add_args"):
cls.add_args(parser)
elif hasattr(cls, "__dataclass"):
gen_parser_from_dataclass(parser, cls.__dataclass())
# Modify the parser a second time, since defaults may have been reset
if modify_parser is not None:
modify_parser(parser)
# Parse a second time.
if parse_known:
args, extra = parser.parse_known_args(input_args)
else:
args = parser.parse_args(input_args)
extra = None
# Post-process args.
if (
hasattr(args, "batch_size_valid") and args.batch_size_valid is None
) or not hasattr(args, "batch_size_valid"):
args.batch_size_valid = args.batch_size
if hasattr(args, "max_tokens_valid") and args.max_tokens_valid is None:
args.max_tokens_valid = args.max_tokens
if getattr(args, "memory_efficient_fp16", False):
args.fp16 = True
if getattr(args, "memory_efficient_bf16", False):
args.bf16 = True
args.tpu = getattr(args, "tpu", False)
args.bf16 = getattr(args, "bf16", False)
if args.bf16:
args.tpu = True
if args.tpu and args.fp16:
raise ValueError("Cannot combine --fp16 and --tpu, use --bf16 on TPUs")
if getattr(args, "seed", None) is None:
args.seed = 1 # default seed for training
args.no_seed_provided = True
else:
args.no_seed_provided = False
# Apply architecture configuration.
if hasattr(args, "arch") and args.arch in ARCH_CONFIG_REGISTRY:
ARCH_CONFIG_REGISTRY[args.arch](args)
if parse_known:
return args, extra
else:
return args
def get_parser(desc, default_task="translation"):
# Before creating the true parser, we need to import optional user module
# in order to eagerly import custom tasks, optimizers, architectures, etc.
usr_parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)
usr_parser.add_argument("--user-dir", default=None)
usr_args, _ = usr_parser.parse_known_args()
utils.import_user_module(usr_args)
parser = argparse.ArgumentParser(allow_abbrev=False)
gen_parser_from_dataclass(parser, CommonConfig())
from fairseq.registry import REGISTRIES
for registry_name, REGISTRY in REGISTRIES.items():
parser.add_argument(
"--" + registry_name.replace("_", "-"),
default=REGISTRY["default"],
choices=REGISTRY["registry"].keys(),
)
# Task definitions can be found under fairseq/tasks/
from fairseq.tasks import TASK_REGISTRY
parser.add_argument(
"--task",
metavar="TASK",
default=default_task,
choices=TASK_REGISTRY.keys(),
help="task",
)
# fmt: on
return parser
def add_preprocess_args(parser):
group = parser.add_argument_group("Preprocessing")
# fmt: off
group.add_argument("-s", "--source-lang", default=None, metavar="SRC",
help="source language")
group.add_argument("-t", "--target-lang", default=None, metavar="TARGET",
help="target language")
group.add_argument("--trainpref", metavar="FP", default=None,
help="train file prefix (also used to build dictionaries)")
group.add_argument("--validpref", metavar="FP", default=None,
help="comma separated, valid file prefixes "
"(words missing from train set are replaced with <unk>)")
group.add_argument("--testpref", metavar="FP", default=None,
help="comma separated, test file prefixes "
"(words missing from train set are replaced with <unk>)")
group.add_argument("--align-suffix", metavar="FP", default=None,
help="alignment file suffix")
group.add_argument("--destdir", metavar="DIR", default="data-bin",
help="destination dir")
group.add_argument("--thresholdtgt", metavar="N", default=0, type=int,
help="map words appearing less than threshold times to unknown")
group.add_argument("--thresholdsrc", metavar="N", default=0, type=int,
help="map words appearing less than threshold times to unknown")
group.add_argument("--tgtdict", metavar="FP",
help="reuse given target dictionary")
group.add_argument("--srcdict", metavar="FP",
help="reuse given source dictionary")
group.add_argument("--nwordstgt", metavar="N", default=-1, type=int,
help="number of target words to retain")
group.add_argument("--nwordssrc", metavar="N", default=-1, type=int,
help="number of source words to retain")
group.add_argument("--alignfile", metavar="ALIGN", default=None,
help="an alignment file (optional)")
parser.add_argument('--dataset-impl', metavar='FORMAT', default='mmap',
choices=get_available_dataset_impl(),
help='output dataset implementation')
group.add_argument("--joined-dictionary", action="store_true",
help="Generate joined dictionary")
group.add_argument("--only-source", action="store_true",
help="Only process the source language")
group.add_argument("--padding-factor", metavar="N", default=8, type=int,
help="Pad dictionary size to be multiple of N")
group.add_argument("--workers", metavar="N", default=1, type=int,
help="number of parallel workers")
# fmt: on
return parser
def add_dataset_args(parser, train=False, gen=False):
group = parser.add_argument_group("dataset_data_loading")
gen_parser_from_dataclass(group, DatasetConfig())
# fmt: on
return group
def add_distributed_training_args(parser, default_world_size=None):
group = parser.add_argument_group("distributed_training")
if default_world_size is None:
default_world_size = max(1, torch.cuda.device_count())
gen_parser_from_dataclass(
group, DistributedTrainingConfig(distributed_world_size=default_world_size)
)
return group
def add_optimization_args(parser):
group = parser.add_argument_group("optimization")
# fmt: off
gen_parser_from_dataclass(group, OptimizationConfig())
# fmt: on
return group
def add_checkpoint_args(parser):
group = parser.add_argument_group("checkpoint")
# fmt: off
gen_parser_from_dataclass(group, CheckpointConfig())
# fmt: on
return group
def add_common_eval_args(group):
gen_parser_from_dataclass(group, CommonEvalConfig())
def add_eval_lm_args(parser):
group = parser.add_argument_group("LM Evaluation")
add_common_eval_args(group)
gen_parser_from_dataclass(group, EvalLMConfig())
def add_generation_args(parser):
group = parser.add_argument_group("Generation")
add_common_eval_args(group)
gen_parser_from_dataclass(group, GenerationConfig())
return group
def add_interactive_args(parser):
group = parser.add_argument_group("Interactive")
gen_parser_from_dataclass(group, InteractiveConfig())
def add_model_args(parser):
group = parser.add_argument_group("Model configuration")
# fmt: off
# Model definitions can be found under fairseq/models/
#
# The model architecture can be specified in several ways.
# In increasing order of priority:
# 1) model defaults (lowest priority)
# 2) --arch argument
# 3) --encoder/decoder-* arguments (highest priority)
from fairseq.models import ARCH_MODEL_REGISTRY
group.add_argument('--arch', '-a', metavar='ARCH',
choices=ARCH_MODEL_REGISTRY.keys(),
help='model architecture')
# fmt: on
return group
|
COCO-LM/fairseq/fairseq/options.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/options.py",
"repo_id": "COCO-LM",
"token_count": 5496
}
| 195 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import warnings
from argparse import Namespace
from typing import Any, Callable, Dict, List
import torch
from fairseq import metrics, search, tokenizer, utils
from fairseq.data import Dictionary, FairseqDataset, data_utils, encoders, iterators
from fairseq.dataclass import FairseqDataclass
from fairseq.dataclass.utils import gen_parser_from_dataclass
from omegaconf import DictConfig
logger = logging.getLogger(__name__)
class StatefulContainer(object):
_state: Dict[str, Any] = dict()
_factories: Dict[str, Callable[[], Any]] = dict()
def add_factory(self, name, factory: Callable[[], Any]):
self._factories[name] = factory
def merge_state_dict(self, state_dict: Dict[str, Any]):
self._state.update(state_dict)
@property
def state_dict(self) -> Dict[str, Any]:
return self._state
def __getattr__(self, name):
if name not in self._state and name in self._factories:
self._state[name] = self._factories[name]()
if name in self._state:
return self._state[name]
raise AttributeError(f"Task state has no factory for attribute {name}")
class FairseqTask(object):
"""
Tasks store dictionaries and provide helpers for loading/iterating over
Datasets, initializing the Model/Criterion and calculating the loss.
Tasks have limited statefulness. In particular, state that needs to be
saved to/loaded from checkpoints needs to be stored in the `self.state`
:class:`StatefulContainer` object. For example::
self.state.add_factory("dictionary", self.load_dictionary)
print(self.state.dictionary) # calls self.load_dictionary()
This is necessary so that when loading checkpoints, we can properly
recreate the task state after initializing the task instance.
"""
@classmethod
def add_args(cls, parser):
"""Add task-specific arguments to the parser."""
dc = getattr(cls, "__dataclass", None)
if dc is not None:
gen_parser_from_dataclass(parser, dc())
@staticmethod
def logging_outputs_can_be_summed(criterion) -> bool:
"""
Whether the logging outputs returned by `train_step` and `valid_step` can
be summed across workers prior to calling `aggregate_logging_outputs`.
Setting this to True will improves distributed training speed.
"""
return criterion.logging_outputs_can_be_summed()
cfg: FairseqDataclass
datasets: Dict[str, FairseqDataset]
dataset_to_epoch_iter: Dict[FairseqDataset, Any]
state: StatefulContainer = None
def __init__(self, cfg: FairseqDataclass, **kwargs):
self.cfg = cfg
self.datasets = dict()
self.dataset_to_epoch_iter = dict()
self.state = StatefulContainer()
@classmethod
def load_dictionary(cls, filename):
"""Load the dictionary from the filename
Args:
filename (str): the filename
"""
return Dictionary.load(filename)
@classmethod
def build_dictionary(
cls, filenames, workers=1, threshold=-1, nwords=-1, padding_factor=8
):
"""Build the dictionary
Args:
filenames (list): list of filenames
workers (int): number of concurrent workers
threshold (int): defines the minimum word count
nwords (int): defines the total number of words in the final dictionary,
including special symbols
padding_factor (int): can be used to pad the dictionary size to be a
multiple of 8, which is important on some hardware (e.g., Nvidia
Tensor Cores).
"""
d = Dictionary()
for filename in filenames:
Dictionary.add_file_to_dictionary(
filename, d, tokenizer.tokenize_line, workers
)
d.finalize(threshold=threshold, nwords=nwords, padding_factor=padding_factor)
return d
@classmethod
def setup_task(cls, cfg: DictConfig, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
cfg (omegaconf.DictConfig): parsed command-line arguments
"""
return cls(cfg, **kwargs)
def has_sharded_data(self, split):
return os.pathsep in getattr(self.cfg, "data", "")
def load_dataset(
self,
split: str,
combine: bool = False,
task_cfg: FairseqDataclass = None,
**kwargs
):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
combine (bool): combines a split segmented into pieces into one dataset
task_cfg (FairseqDataclass): optional task configuration stored in the checkpoint that can be used
to load datasets
"""
raise NotImplementedError
def dataset(self, split):
"""
Return a loaded dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
Returns:
a :class:`~fairseq.data.FairseqDataset` corresponding to *split*
"""
from fairseq.data import FairseqDataset
if split not in self.datasets:
raise KeyError("Dataset not loaded: " + split)
if not isinstance(self.datasets[split], FairseqDataset):
raise TypeError("Datasets are expected to be of type FairseqDataset")
return self.datasets[split]
def filter_indices_by_size(
self, indices, dataset, max_positions=None, ignore_invalid_inputs=False
):
"""
Filter examples that are too large
Args:
indices (np.array): original array of sample indices
dataset (~fairseq.data.FairseqDataset): dataset to batch
max_positions (optional): max sentence length supported by the
model (default: None).
ignore_invalid_inputs (bool, optional): don't raise Exception for
sentences that are too long (default: False).
Returns:
np.array: array of filtered sample indices
"""
indices, ignored = dataset.filter_indices_by_size(indices, max_positions)
if len(ignored) > 0:
if not ignore_invalid_inputs:
raise Exception(
(
"Size of sample #{} is invalid (={}) since max_positions={}, "
"skip this example with --skip-invalid-size-inputs-valid-test"
).format(ignored[0], dataset.size(ignored[0]), max_positions)
)
logger.warning(
(
"{:,} samples have invalid sizes and will be skipped, "
"max_positions={}, first few sample ids={}"
).format(len(ignored), max_positions, ignored[:10])
)
return indices
def can_reuse_epoch_itr(self, dataset):
# We can reuse the epoch iterator across epochs as long as the dataset
# hasn't disabled it. We default to ``False`` here, although in practice
# this will be ``True`` for most datasets that inherit from
# ``FairseqDataset`` due to the base implementation there.
return getattr(dataset, "can_reuse_epoch_itr_across_epochs", False)
def get_batch_iterator(
self,
dataset,
max_tokens=None,
max_sentences=None,
max_positions=None,
ignore_invalid_inputs=False,
required_batch_size_multiple=1,
seed=1,
num_shards=1,
shard_id=0,
num_workers=0,
epoch=1,
data_buffer_size=0,
disable_iterator_cache=False,
):
"""
Get an iterator that yields batches of data from the given dataset.
Args:
dataset (~fairseq.data.FairseqDataset): dataset to batch
max_tokens (int, optional): max number of tokens in each batch
(default: None).
max_sentences (int, optional): max number of sentences in each
batch (default: None).
max_positions (optional): max sentence length supported by the
model (default: None).
ignore_invalid_inputs (bool, optional): don't raise Exception for
sentences that are too long (default: False).
required_batch_size_multiple (int, optional): require batch size to
be a multiple of N (default: 1).
seed (int, optional): seed for random number generator for
reproducibility (default: 1).
num_shards (int, optional): shard the data iterator into N
shards (default: 1).
shard_id (int, optional): which shard of the data iterator to
return (default: 0).
num_workers (int, optional): how many subprocesses to use for data
loading. 0 means the data will be loaded in the main process
(default: 0).
epoch (int, optional): the epoch to start the iterator from
(default: 1).
data_buffer_size (int, optional): number of batches to
preload (default: 0).
disable_iterator_cache (bool, optional): don't cache the
EpochBatchIterator (ignores `FairseqTask::can_reuse_epoch_itr`)
(default: False).
Returns:
~fairseq.iterators.EpochBatchIterator: a batched iterator over the
given dataset split
"""
can_reuse_epoch_itr = not disable_iterator_cache and self.can_reuse_epoch_itr(
dataset
)
if can_reuse_epoch_itr and dataset in self.dataset_to_epoch_iter:
logger.debug("reusing EpochBatchIterator for epoch {}".format(epoch))
return self.dataset_to_epoch_iter[dataset]
assert isinstance(dataset, FairseqDataset)
# initialize the dataset with the correct starting epoch
dataset.set_epoch(epoch)
# get indices ordered by example size
with data_utils.numpy_seed(seed):
indices = dataset.ordered_indices()
# filter examples that are too large
if max_positions is not None:
indices = self.filter_indices_by_size(
indices, dataset, max_positions, ignore_invalid_inputs
)
# create mini-batches with given size constraints
batch_sampler = dataset.batch_by_size(
indices,
max_tokens=max_tokens,
max_sentences=max_sentences,
required_batch_size_multiple=required_batch_size_multiple,
)
# return a reusable, sharded iterator
epoch_iter = iterators.EpochBatchIterator(
dataset=dataset,
collate_fn=dataset.collater,
batch_sampler=batch_sampler,
seed=seed,
num_shards=num_shards,
shard_id=shard_id,
num_workers=num_workers,
epoch=epoch,
buffer_size=data_buffer_size,
)
if can_reuse_epoch_itr:
self.dataset_to_epoch_iter[dataset] = epoch_iter
return epoch_iter
def build_model(self, cfg: FairseqDataclass):
"""
Build the :class:`~fairseq.models.BaseFairseqModel` instance for this
task.
Args:
cfg (FairseqDataclass): configuration object
Returns:
a :class:`~fairseq.models.BaseFairseqModel` instance
"""
from fairseq import models, quantization_utils
model = models.build_model(cfg, self)
model = quantization_utils.quantize_model_scalar(model, cfg)
return model
def build_criterion(self, cfg: DictConfig):
"""
Build the :class:`~fairseq.criterions.FairseqCriterion` instance for
this task.
Args:
cfg (omegaconf.DictConfig): configration object
Returns:
a :class:`~fairseq.criterions.FairseqCriterion` instance
"""
from fairseq import criterions
return criterions.build_criterion(cfg, self)
def build_generator(
self, models, args, seq_gen_cls=None, extra_gen_cls_kwargs=None
):
if getattr(args, "score_reference", False):
from fairseq.sequence_scorer import SequenceScorer
return SequenceScorer(
self.target_dictionary,
compute_alignment=getattr(args, "print_alignment", False),
)
from fairseq.sequence_generator import (
SequenceGenerator,
SequenceGeneratorWithAlignment,
)
try:
from fairseq.fb_sequence_generator import FBSequenceGenerator
except ModuleNotFoundError:
pass
# Choose search strategy. Defaults to Beam Search.
sampling = getattr(args, "sampling", False)
sampling_topk = getattr(args, "sampling_topk", -1)
sampling_topp = getattr(args, "sampling_topp", -1.0)
diverse_beam_groups = getattr(args, "diverse_beam_groups", -1)
diverse_beam_strength = getattr(args, "diverse_beam_strength", 0.5)
match_source_len = getattr(args, "match_source_len", False)
diversity_rate = getattr(args, "diversity_rate", -1)
constrained = getattr(args, "constraints", False)
prefix_allowed_tokens_fn = getattr(args, "prefix_allowed_tokens_fn", None)
if (
sum(
int(cond)
for cond in [
sampling,
diverse_beam_groups > 0,
match_source_len,
diversity_rate > 0,
]
)
> 1
):
raise ValueError("Provided Search parameters are mutually exclusive.")
assert sampling_topk < 0 or sampling, "--sampling-topk requires --sampling"
assert sampling_topp < 0 or sampling, "--sampling-topp requires --sampling"
if sampling:
search_strategy = search.Sampling(
self.target_dictionary, sampling_topk, sampling_topp
)
elif diverse_beam_groups > 0:
search_strategy = search.DiverseBeamSearch(
self.target_dictionary, diverse_beam_groups, diverse_beam_strength
)
elif match_source_len:
# this is useful for tagging applications where the output
# length should match the input length, so we hardcode the
# length constraints for simplicity
search_strategy = search.LengthConstrainedBeamSearch(
self.target_dictionary,
min_len_a=1,
min_len_b=0,
max_len_a=1,
max_len_b=0,
)
elif diversity_rate > -1:
search_strategy = search.DiverseSiblingsSearch(
self.target_dictionary, diversity_rate
)
elif constrained:
search_strategy = search.LexicallyConstrainedBeamSearch(
self.target_dictionary, args.constraints
)
elif prefix_allowed_tokens_fn:
search_strategy = search.PrefixConstrainedBeamSearch(
self.target_dictionary, prefix_allowed_tokens_fn
)
else:
search_strategy = search.BeamSearch(self.target_dictionary)
extra_gen_cls_kwargs = extra_gen_cls_kwargs or {}
if seq_gen_cls is None:
if getattr(args, "print_alignment", False):
seq_gen_cls = SequenceGeneratorWithAlignment
extra_gen_cls_kwargs["print_alignment"] = args.print_alignment
elif getattr(args, "fb_seq_gen", False):
seq_gen_cls = FBSequenceGenerator
else:
seq_gen_cls = SequenceGenerator
return seq_gen_cls(
models,
self.target_dictionary,
beam_size=getattr(args, "beam", 5),
max_len_a=getattr(args, "max_len_a", 0),
max_len_b=getattr(args, "max_len_b", 200),
min_len=getattr(args, "min_len", 1),
normalize_scores=(not getattr(args, "unnormalized", False)),
len_penalty=getattr(args, "lenpen", 1),
unk_penalty=getattr(args, "unkpen", 0),
temperature=getattr(args, "temperature", 1.0),
match_source_len=getattr(args, "match_source_len", False),
no_repeat_ngram_size=getattr(args, "no_repeat_ngram_size", 0),
search_strategy=search_strategy,
**extra_gen_cls_kwargs,
)
def train_step(
self, sample, model, criterion, optimizer, update_num, ignore_grad=False
):
"""
Do forward and backward, and return the loss as computed by *criterion*
for the given *model* and *sample*.
Args:
sample (dict): the mini-batch. The format is defined by the
:class:`~fairseq.data.FairseqDataset`.
model (~fairseq.models.BaseFairseqModel): the model
criterion (~fairseq.criterions.FairseqCriterion): the criterion
optimizer (~fairseq.optim.FairseqOptimizer): the optimizer
update_num (int): the current update
ignore_grad (bool): multiply loss by 0 if this is set to True
Returns:
tuple:
- the loss
- the sample size, which is used as the denominator for the
gradient
- logging outputs to display while training
"""
model.train()
model.set_num_updates(update_num)
with torch.autograd.profiler.record_function("forward"):
loss, sample_size, logging_output = criterion(model, sample)
if ignore_grad:
loss *= 0
with torch.autograd.profiler.record_function("backward"):
optimizer.backward(loss)
return loss, sample_size, logging_output
def valid_step(self, sample, model, criterion):
model.eval()
with torch.no_grad():
loss, sample_size, logging_output = criterion(model, sample)
return loss, sample_size, logging_output
def optimizer_step(self, optimizer, model, update_num):
optimizer.step()
def build_dataset_for_inference(
self, src_tokens: List[torch.Tensor], src_lengths: List[int], **kwargs
) -> torch.utils.data.Dataset:
raise NotImplementedError
def inference_step(
self, generator, models, sample, prefix_tokens=None, constraints=None
):
with torch.no_grad():
return generator.generate(
models, sample, prefix_tokens=prefix_tokens, constraints=constraints
)
def begin_epoch(self, epoch, model):
"""Hook function called before the start of each epoch."""
pass
def begin_valid_epoch(self, epoch, model):
"""Hook function called before the start of each validation epoch."""
pass
def aggregate_logging_outputs(self, logging_outputs, criterion):
"""[deprecated] Aggregate logging outputs from data parallel training."""
utils.deprecation_warning(
"The aggregate_logging_outputs API is deprecated. "
"Please use the reduce_metrics API instead."
)
with metrics.aggregate() as agg:
self.reduce_metrics(logging_outputs, criterion)
return agg.get_smoothed_values()
def reduce_metrics(self, logging_outputs, criterion):
"""Aggregate logging outputs from data parallel training."""
# backward compatibility for tasks that override aggregate_logging_outputs
base_func = FairseqTask.aggregate_logging_outputs
self_func = getattr(self, "aggregate_logging_outputs").__func__
if self_func is not base_func:
utils.deprecation_warning(
"Tasks should implement the reduce_metrics API. "
"Falling back to deprecated aggregate_logging_outputs API."
)
agg_logging_outputs = self.aggregate_logging_outputs(
logging_outputs, criterion
)
for k, v in agg_logging_outputs.items():
metrics.log_scalar(k, v)
return
if not any("ntokens" in log for log in logging_outputs):
warnings.warn(
"ntokens not found in Criterion logging outputs, cannot log wpb or wps"
)
else:
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
metrics.log_scalar("wpb", ntokens, priority=180, round=1)
metrics.log_speed("wps", ntokens, priority=90, round=1)
if not any("nsentences" in log for log in logging_outputs):
warnings.warn(
"nsentences not found in Criterion logging outputs, cannot log bsz"
)
else:
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
metrics.log_scalar("bsz", nsentences, priority=190, round=1)
criterion.__class__.reduce_metrics(logging_outputs)
criterion.context_metrics(logging_outputs)
def state_dict(self):
if self.state is not None:
return self.state.state_dict
return {}
def load_state_dict(self, state_dict: Dict[str, Any]):
if self.state is not None:
self.state.merge_state_dict(state_dict)
def max_positions(self):
"""Return the max input length allowed by the task."""
return None
@property
def source_dictionary(self):
"""Return the source :class:`~fairseq.data.Dictionary` (if applicable
for this task)."""
raise NotImplementedError
@property
def target_dictionary(self):
"""Return the target :class:`~fairseq.data.Dictionary` (if applicable
for this task)."""
raise NotImplementedError
def build_tokenizer(self, args):
"""Build the pre-tokenizer for this task."""
return encoders.build_tokenizer(args)
def build_bpe(self, args):
"""Build the tokenizer for this task."""
return encoders.build_bpe(args)
def get_interactive_tokens_and_lengths(self, lines, encode_fn):
tokens = [
self.source_dictionary.encode_line(
encode_fn(src_str), add_if_not_exist=False
).long()
for src_str in lines
]
lengths = [t.numel() for t in tokens]
return tokens, lengths
class LegacyFairseqTask(FairseqTask):
def __init__(self, args: Namespace):
self.args = args
self.datasets = {}
self.dataset_to_epoch_iter = {}
@classmethod
def setup_task(cls, args: Namespace, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
return cls(args, **kwargs)
def has_sharded_data(self, split):
return os.pathsep in getattr(self.args, "data", "")
def build_model(self, args: Namespace):
"""
Build the :class:`~fairseq.models.BaseFairseqModel` instance for this
task.
Args:
args (argparse.Namespace): parsed command-line arguments
Returns:
a :class:`~fairseq.models.BaseFairseqModel` instance
"""
from fairseq import models, quantization_utils
model = models.build_model(args, self)
model = quantization_utils.quantize_model_scalar(model, args)
return model
def build_criterion(self, args: Namespace):
"""
Build the :class:`~fairseq.criterions.FairseqCriterion` instance for
this task.
Args:
args (argparse.Namespace): parsed command-line arguments
Returns:
a :class:`~fairseq.criterions.FairseqCriterion` instance
"""
from fairseq import criterions
return criterions.build_criterion(args, self)
|
COCO-LM/fairseq/fairseq/tasks/fairseq_task.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/tasks/fairseq_task.py",
"repo_id": "COCO-LM",
"token_count": 10809
}
| 196 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import datetime
import logging
import time
import torch
from fairseq.data import (
FairseqDataset,
LanguagePairDataset,
ListDataset,
data_utils,
iterators,
)
from fairseq.data.multilingual.multilingual_data_manager import (
MultilingualDatasetManager,
)
from fairseq.data.multilingual.sampling_method import SamplingMethod
from fairseq.tasks import LegacyFairseqTask, register_task
from fairseq.utils import FileContentsAction
###
def get_time_gap(s, e):
return (
datetime.datetime.fromtimestamp(e) - datetime.datetime.fromtimestamp(s)
).__str__()
###
logger = logging.getLogger(__name__)
@register_task("translation_multi_simple_epoch")
class TranslationMultiSimpleEpochTask(LegacyFairseqTask):
"""
Translate from one (source) language to another (target) language.
Args:
langs (List[str]): a list of languages that are being supported
dicts (Dict[str, fairseq.data.Dictionary]): mapping from supported languages to their dictionaries
training (bool): whether the task should be configured for training or not
.. note::
The translation task is compatible with :mod:`fairseq-train`,
:mod:`fairseq-generate` and :mod:`fairseq-interactive`.
The translation task provides the following additional command-line
arguments:
.. argparse::
:ref: fairseq.tasks.translation_parser
:prog:
"""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
# fmt: off
parser.add_argument('-s', '--source-lang', default=None, metavar='SRC',
help='inference source language')
parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET',
help='inference target language')
parser.add_argument('--lang-pairs', default=None, metavar='PAIRS',
help='comma-separated list of language pairs (in training order): en-de,en-fr,de-fr',
action=FileContentsAction)
parser.add_argument('--keep-inference-langtok', action='store_true',
help='keep language tokens in inference output (e.g. for analysis or debugging)')
SamplingMethod.add_arguments(parser)
MultilingualDatasetManager.add_args(parser)
# fmt: on
def __init__(self, args, langs, dicts, training):
super().__init__(args)
self.langs = langs
self.dicts = dicts
self.training = training
if training:
self.lang_pairs = args.lang_pairs
else:
self.lang_pairs = ["{}-{}".format(args.source_lang, args.target_lang)]
# eval_lang_pairs for multilingual translation is usually all of the
# lang_pairs. However for other multitask settings or when we want to
# optimize for certain languages we want to use a different subset. Thus
# the eval_lang_pairs class variable is provided for classes that extend
# this class.
self.eval_lang_pairs = self.lang_pairs
# model_lang_pairs will be used to build encoder-decoder model pairs in
# models.build_model(). This allows multitask type of sub-class can
# build models other than the input lang_pairs
self.model_lang_pairs = self.lang_pairs
self.source_langs = [d.split("-")[0] for d in self.lang_pairs]
self.target_langs = [d.split("-")[1] for d in self.lang_pairs]
self.check_dicts(self.dicts, self.source_langs, self.target_langs)
self.sampling_method = SamplingMethod.build_sampler(args, self)
self.data_manager = MultilingualDatasetManager.setup_data_manager(
args, self.lang_pairs, langs, dicts, self.sampling_method
)
def check_dicts(self, dicts, source_langs, target_langs):
if self.args.source_dict is not None or self.args.target_dict is not None:
# no need to check whether the source side and target side are sharing dictionaries
return
src_dict = dicts[source_langs[0]]
tgt_dict = dicts[target_langs[0]]
for src_lang in source_langs:
assert (
src_dict == dicts[src_lang]
), "Diffrent dictionary are specified for different source languages; "
"TranslationMultiSimpleEpochTask only supports one shared dictionary across all source languages"
for tgt_lang in target_langs:
assert (
tgt_dict == dicts[tgt_lang]
), "Diffrent dictionary are specified for different target languages; "
"TranslationMultiSimpleEpochTask only supports one shared dictionary across all target languages"
@classmethod
def setup_task(cls, args, **kwargs):
langs, dicts, training = MultilingualDatasetManager.prepare(
cls.load_dictionary, args, **kwargs
)
return cls(args, langs, dicts, training)
def has_sharded_data(self, split):
return self.data_manager.has_sharded_data(split)
def load_dataset(self, split, epoch=1, combine=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
if split in self.datasets:
dataset = self.datasets[split]
if self.has_sharded_data(split):
if self.args.virtual_epoch_size is not None:
if dataset.load_next_shard:
shard_epoch = dataset.shard_epoch
else:
# no need to load next shard so skip loading
# also this avoid always loading from beginning of the data
return
else:
shard_epoch = epoch
else:
# estimate the shard epoch from virtual data size and virtual epoch size
shard_epoch = self.data_manager.estimate_global_pass_epoch(epoch)
logger.info(f"loading data for {split} epoch={epoch}/{shard_epoch}")
logger.info(f"mem usage: {data_utils.get_mem_usage()}")
if split in self.datasets:
del self.datasets[split]
logger.info("old dataset deleted manually")
logger.info(f"mem usage: {data_utils.get_mem_usage()}")
self.datasets[split] = self.data_manager.load_dataset(
split,
self.training,
epoch=epoch,
combine=combine,
shard_epoch=shard_epoch,
**kwargs,
)
def build_dataset_for_inference(self, src_tokens, src_lengths, constraints=None):
if constraints is not None:
raise NotImplementedError(
"Constrained decoding with the multilingual_translation task is not supported"
)
src_data = ListDataset(src_tokens, src_lengths)
dataset = LanguagePairDataset(src_data, src_lengths, self.source_dictionary)
src_langtok_spec, tgt_langtok_spec = self.args.langtoks["main"]
if self.args.lang_tok_replacing_bos_eos:
dataset = self.data_manager.alter_dataset_langtok(
dataset,
src_eos=self.source_dictionary.eos(),
src_lang=self.args.source_lang,
tgt_eos=self.target_dictionary.eos(),
tgt_lang=self.args.target_lang,
src_langtok_spec=src_langtok_spec,
tgt_langtok_spec=tgt_langtok_spec,
)
else:
dataset.src = self.data_manager.src_dataset_tranform_func(
self.args.source_lang,
self.args.target_lang,
dataset=dataset.src,
spec=src_langtok_spec,
)
return dataset
def build_generator(
self,
models,
args,
seq_gen_cls=None,
extra_gen_cls_kwargs=None,
):
if not getattr(args, "keep_inference_langtok", False):
_, tgt_langtok_spec = self.args.langtoks["main"]
if tgt_langtok_spec:
tgt_lang_tok = self.data_manager.get_decoder_langtok(
self.args.target_lang, tgt_langtok_spec
)
extra_gen_cls_kwargs = extra_gen_cls_kwargs or {}
extra_gen_cls_kwargs["symbols_to_strip_from_output"] = {tgt_lang_tok}
return super().build_generator(
models, args, seq_gen_cls=None, extra_gen_cls_kwargs=extra_gen_cls_kwargs
)
def build_model(self, args):
return super().build_model(args)
def valid_step(self, sample, model, criterion):
loss, sample_size, logging_output = super().valid_step(sample, model, criterion)
return loss, sample_size, logging_output
def inference_step(
self, generator, models, sample, prefix_tokens=None, constraints=None
):
with torch.no_grad():
_, tgt_langtok_spec = self.args.langtoks["main"]
if not self.args.lang_tok_replacing_bos_eos:
if prefix_tokens is None and tgt_langtok_spec:
tgt_lang_tok = self.data_manager.get_decoder_langtok(
self.args.target_lang, tgt_langtok_spec
)
src_tokens = sample["net_input"]["src_tokens"]
bsz = src_tokens.size(0)
prefix_tokens = (
torch.LongTensor([[tgt_lang_tok]]).expand(bsz, 1).to(src_tokens)
)
return generator.generate(
models,
sample,
prefix_tokens=prefix_tokens,
constraints=constraints,
)
else:
return generator.generate(
models,
sample,
prefix_tokens=prefix_tokens,
bos_token=self.data_manager.get_decoder_langtok(
self.args.target_lang, tgt_langtok_spec
)
if tgt_langtok_spec
else self.target_dictionary.eos(),
)
def reduce_metrics(self, logging_outputs, criterion):
super().reduce_metrics(logging_outputs, criterion)
def max_positions(self):
"""Return the max sentence length allowed by the task."""
return (self.args.max_source_positions, self.args.max_target_positions)
@property
def source_dictionary(self):
return self.data_manager.get_source_dictionary(self.source_langs[0])
@property
def target_dictionary(self):
return self.data_manager.get_target_dictionary(self.target_langs[0])
def create_batch_sampler_func(
self,
max_positions,
ignore_invalid_inputs,
max_tokens,
max_sentences,
required_batch_size_multiple=1,
seed=1,
):
def construct_batch_sampler(dataset, epoch):
splits = [
s for s, _ in self.datasets.items() if self.datasets[s] == dataset
]
split = splits[0] if len(splits) > 0 else None
# NEW implementation
if epoch is not None:
# initialize the dataset with the correct starting epoch
dataset.set_epoch(epoch)
# get indices ordered by example size
start_time = time.time()
logger.info(f"start batch sampler: mem usage: {data_utils.get_mem_usage()}")
with data_utils.numpy_seed(seed):
indices = dataset.ordered_indices()
logger.info(
f"[{split}] @batch_sampler order indices time: {get_time_gap(start_time, time.time())}"
)
logger.info(f"mem usage: {data_utils.get_mem_usage()}")
# filter examples that are too large
if max_positions is not None:
my_time = time.time()
indices = self.filter_indices_by_size(
indices, dataset, max_positions, ignore_invalid_inputs
)
logger.info(
f"[{split}] @batch_sampler filter_by_size time: {get_time_gap(my_time, time.time())}"
)
logger.info(f"mem usage: {data_utils.get_mem_usage()}")
# create mini-batches with given size constraints
my_time = time.time()
batch_sampler = dataset.batch_by_size(
indices,
max_tokens=max_tokens,
max_sentences=max_sentences,
required_batch_size_multiple=required_batch_size_multiple,
)
logger.info(
f"[{split}] @batch_sampler batch_by_size time: {get_time_gap(my_time, time.time())}"
)
logger.info(
f"[{split}] per epoch batch_sampler set-up time: {get_time_gap(start_time, time.time())}"
)
logger.info(f"mem usage: {data_utils.get_mem_usage()}")
return batch_sampler
return construct_batch_sampler
# we need to override get_batch_iterator because we want to reset the epoch iterator each time
def get_batch_iterator(
self,
dataset,
max_tokens=None,
max_sentences=None,
max_positions=None,
ignore_invalid_inputs=False,
required_batch_size_multiple=1,
seed=1,
num_shards=1,
shard_id=0,
num_workers=0,
epoch=1,
data_buffer_size=0,
disable_iterator_cache=False,
):
"""
Get an iterator that yields batches of data from the given dataset.
Args:
dataset (~fairseq.data.FairseqDataset): dataset to batch
max_tokens (int, optional): max number of tokens in each batch
(default: None).
max_sentences (int, optional): max number of sentences in each
batch (default: None).
max_positions (optional): max sentence length supported by the
model (default: None).
ignore_invalid_inputs (bool, optional): don't raise Exception for
sentences that are too long (default: False).
required_batch_size_multiple (int, optional): require batch size to
be a multiple of N (default: 1).
seed (int, optional): seed for random number generator for
reproducibility (default: 1).
num_shards (int, optional): shard the data iterator into N
shards (default: 1).
shard_id (int, optional): which shard of the data iterator to
return (default: 0).
num_workers (int, optional): how many subprocesses to use for data
loading. 0 means the data will be loaded in the main process
(default: 0).
epoch (int, optional): the epoch to start the iterator from
(default: 0).
data_buffer_size (int, optional): number of batches to
preload (default: 0).
disable_iterator_cache (bool, optional): don't cache the
EpochBatchIterator (ignores `FairseqTask::can_reuse_epoch_itr`)
(default: False).
Returns:
~fairseq.iterators.EpochBatchIterator: a batched iterator over the
given dataset split
"""
# initialize the dataset with the correct starting epoch
assert isinstance(dataset, FairseqDataset)
if dataset in self.dataset_to_epoch_iter:
return self.dataset_to_epoch_iter[dataset]
if self.args.sampling_method == "RoundRobin":
batch_iter = super().get_batch_iterator(
dataset,
max_tokens=max_tokens,
max_sentences=max_sentences,
max_positions=max_positions,
ignore_invalid_inputs=ignore_invalid_inputs,
required_batch_size_multiple=required_batch_size_multiple,
seed=seed,
num_shards=num_shards,
shard_id=shard_id,
num_workers=num_workers,
epoch=epoch,
data_buffer_size=data_buffer_size,
disable_iterator_cache=disable_iterator_cache,
)
self.dataset_to_epoch_iter[dataset] = batch_iter
return batch_iter
construct_batch_sampler = self.create_batch_sampler_func(
max_positions,
ignore_invalid_inputs,
max_tokens,
max_sentences,
required_batch_size_multiple=required_batch_size_multiple,
seed=seed,
)
epoch_iter = iterators.EpochBatchIterator(
dataset=dataset,
collate_fn=dataset.collater,
batch_sampler=construct_batch_sampler,
seed=seed,
num_shards=num_shards,
shard_id=shard_id,
num_workers=num_workers,
epoch=epoch,
)
return epoch_iter
|
COCO-LM/fairseq/fairseq/tasks/translation_multi_simple_epoch.py/0
|
{
"file_path": "COCO-LM/fairseq/fairseq/tasks/translation_multi_simple_epoch.py",
"repo_id": "COCO-LM",
"token_count": 8266
}
| 197 |
#include "ATen/ATen.h"
#include "ATen/cuda/CUDAContext.h"
#include "ATen/cuda/detail/IndexUtils.cuh"
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <cmath>
#include "ATen/TensorUtils.h"
#include "ATen/AccumulateType.h"
#include <THC/THCGeneral.h>
#include "type_shim.h"
template <typename T, typename GRAD_T>
__global__ void adam_cuda_kernel(
T* __restrict__ p,
T* __restrict__ m,
T* __restrict__ v,
const GRAD_T * __restrict__ g,
const float b1,
const float b2,
const float eps,
const float grad_scale,
const float step_size,
const size_t tsize,
const float decay_size)
{
//Assuming 2D grids and 2D blocks
const int blockId = gridDim.x * blockIdx.y + blockIdx.x;
const int threadsPerBlock = blockDim.x * blockDim.y;
const int threadIdInBlock = threadIdx.y * blockDim.x + threadIdx.x;
const int i = (blockId * threadsPerBlock + threadIdInBlock);
const int totThreads = gridDim.x*gridDim.y*threadsPerBlock;
for (int j = i; j < tsize; j+=totThreads) {
// weight decay
p[j] = p[j] * decay_size;
T scaled_grad = static_cast<T>(g[j]) / grad_scale;
m[j] = b1*m[j] + (1-b1)*scaled_grad;
v[j] = b2*v[j] + (1-b2)*scaled_grad*scaled_grad;
const float update = m[j] / (sqrtf(v[j]) + eps);
p[j] = p[j] - (step_size*update);
}
}
void fused_adam_cuda(
at::Tensor & p,
at::Tensor & m,
at::Tensor & v,
at::Tensor & g,
float lr,
float beta1,
float beta2,
float eps,
float grad_scale,
int step,
int bias_correction,
float decay)
{
//Get tensor size
int tsize = p.numel();
//Determine #threads and #blocks
const int threadsPerBlock = 512;
const dim3 blocks((tsize+threadsPerBlock-1)/threadsPerBlock);
AT_ASSERTM(at::cuda::detail::canUse32BitIndexMath(p), "parameter tensor is too large to be indexed with int32");
//Constants
float step_size = lr;
if (bias_correction == 1) {
const double bias_correction1 = 1.0 - std::pow(static_cast<double>(beta1), step);
const double bias_correction2 = 1.0 - std::pow(static_cast<double>(beta2), step);
step_size = static_cast<float>(lr * std::sqrt(bias_correction2) / bias_correction1);
}
float decay_size = 1.0;
if (decay != 0.0) {
decay_size = 1.0 - step_size * decay;
}
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
if (g.scalar_type() == at::ScalarType::Half || g.scalar_type() == at::ScalarType::BFloat16) {
AT_ASSERTM(p.scalar_type() == at::ScalarType::Float, "expected parameter to be of float type");
using namespace at; // prevents "toString is undefined" errors
DISPATCH_FLOAT_AND_HALF_AND_BF16(g.scalar_type(), 0, "adam_cuda_kernel",
using accscalar_t = at::acc_type<scalar_t_0, true>;
adam_cuda_kernel<accscalar_t, scalar_t_0><<<blocks,threadsPerBlock, 0, stream>>>(
p.DATA_PTR<accscalar_t>(),
m.DATA_PTR<accscalar_t>(),
v.DATA_PTR<accscalar_t>(),
g.DATA_PTR<scalar_t_0>(),
beta1,
beta2,
eps,
grad_scale,
step_size,
tsize,
decay_size);
);
} else {
using namespace at;
DISPATCH_DOUBLE_AND_FLOAT(g.scalar_type(), 0, "adam_cuda_kernel",
adam_cuda_kernel<scalar_t_0, scalar_t_0><<<blocks,threadsPerBlock, 0, stream>>>(
p.DATA_PTR<scalar_t_0>(),
m.DATA_PTR<scalar_t_0>(),
v.DATA_PTR<scalar_t_0>(),
g.DATA_PTR<scalar_t_0>(),
beta1,
beta2,
eps,
grad_scale,
step_size,
tsize,
decay_size);
);
}
THCudaCheck(cudaGetLastError());
}
|
COCO-LM/fairseq/fused_ops/csrc/adam/adam_kernel.cu/0
|
{
"file_path": "COCO-LM/fairseq/fused_ops/csrc/adam/adam_kernel.cu",
"repo_id": "COCO-LM",
"token_count": 2098
}
| 198 |
import torch
from torch.utils import cpp_extension
from setuptools import setup, find_packages
import subprocess
import sys
import warnings
import os
import site
site.ENABLE_USER_SITE = True
# ninja build does not work unless include_dirs are abs path
this_dir = os.path.dirname(os.path.abspath(__file__))
def get_cuda_bare_metal_version(cuda_dir):
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
output = raw_output.split()
release_idx = output.index("release") + 1
release = output[release_idx].split(".")
bare_metal_major = release[0]
bare_metal_minor = release[1][0]
return raw_output, bare_metal_major, bare_metal_minor
if not torch.cuda.is_available():
print('\nWarning: Torch did not find available GPUs on this system.\n',
'If your intention is to cross-compile, this is not an error.\n'
'By default, it will cross-compile for Volta (compute capability 7.0), Turing (compute capability 7.5),\n'
'and, if the CUDA version is >= 11.0, Ampere (compute capability 8.0).\n'
'If you wish to cross-compile for a single specific architecture,\n'
'export TORCH_CUDA_ARCH_LIST="compute capability" before running setup.py.\n')
if os.environ.get("TORCH_CUDA_ARCH_LIST", None) is None:
_, bare_metal_major, _ = get_cuda_bare_metal_version(cpp_extension.CUDA_HOME)
if int(bare_metal_major) == 11:
os.environ["TORCH_CUDA_ARCH_LIST"] = "7.0;7.5;8.0"
else:
os.environ["TORCH_CUDA_ARCH_LIST"] = "7.0;7.5"
print("\n\ntorch.__version__ = {}\n\n".format(torch.__version__))
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
if TORCH_MAJOR == 0 and TORCH_MINOR < 4:
raise RuntimeError("Requires Pytorch 0.4 or newer.\n" +
"The latest stable release can be obtained from https://pytorch.org/")
cmdclass = {}
ext_modules = []
extras = {}
def get_cuda_bare_metal_version(cuda_dir):
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
output = raw_output.split()
release_idx = output.index("release") + 1
release = output[release_idx].split(".")
bare_metal_major = release[0]
bare_metal_minor = release[1][0]
return raw_output, bare_metal_major, bare_metal_minor
def check_cuda_torch_binary_vs_bare_metal(cuda_dir):
raw_output, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(cuda_dir)
torch_binary_major = torch.version.cuda.split(".")[0]
torch_binary_minor = torch.version.cuda.split(".")[1]
print("\nCompiling cuda extensions with")
print(raw_output + "from " + cuda_dir + "/bin\n")
if (bare_metal_major != torch_binary_major) or (bare_metal_minor != torch_binary_minor):
raise RuntimeError("Cuda extensions are being compiled with a version of Cuda that does " +
"not match the version used to compile Pytorch binaries. " +
"Pytorch binaries were compiled with Cuda {}.\n".format(torch.version.cuda))
version_ge_1_1 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 0):
version_ge_1_1 = ['-DVERSION_GE_1_1']
version_ge_1_3 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 2):
version_ge_1_3 = ['-DVERSION_GE_1_3']
version_ge_1_5 = []
if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 4):
version_ge_1_5 = ['-DVERSION_GE_1_5']
version_dependent_macros = version_ge_1_1 + version_ge_1_3 + version_ge_1_5
from torch.utils.cpp_extension import CUDAExtension
from torch.utils.cpp_extension import BuildExtension
cmdclass['build_ext'] = BuildExtension
if torch.utils.cpp_extension.CUDA_HOME is None:
raise RuntimeError("Nvcc was not found. Are you sure your environment has nvcc available? If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, only images whose names contain 'devel' will provide nvcc.")
check_cuda_torch_binary_vs_bare_metal(torch.utils.cpp_extension.CUDA_HOME)
ext_modules.append(
CUDAExtension(name='fused_xentropy_cuda',
sources=['csrc/xentropy/interface.cpp',
'csrc/xentropy/xentropy_kernel.cu'],
include_dirs=[os.path.join(this_dir, 'csrc')],
extra_compile_args={'cxx': ['-O3'] + version_dependent_macros,
'nvcc':['-O3'] + version_dependent_macros}))
ext_modules.append(
CUDAExtension(name='fused_layernorm_cuda',
sources=['csrc/layernorm/interface.cpp',
'csrc/layernorm/layernorm_kernel.cu'],
include_dirs=[os.path.join(this_dir, 'csrc')],
extra_compile_args={'cxx': ['-O3'] + version_dependent_macros,
'nvcc':['-maxrregcount=50', '-O3', '--use_fast_math'] + version_dependent_macros}))
ext_modules.append(
CUDAExtension(name='fused_adam_cuda_v2',
sources=['csrc/adam/interface.cpp',
'csrc/adam/adam_kernel.cu'],
include_dirs=[os.path.join(this_dir, 'csrc')],
extra_compile_args={'cxx': ['-O3'] + version_dependent_macros,
'nvcc':['-O3', '--use_fast_math'] + version_dependent_macros}))
generator_flag = []
torch_dir = torch.__path__[0]
if os.path.exists(os.path.join(torch_dir, 'include', 'ATen', 'CUDAGenerator.h')):
generator_flag = ['-DOLD_GENERATOR']
ext_modules.append(
CUDAExtension(name='fused_softmax_dropout_cuda',
sources=['csrc/softmax_dropout/interface.cpp',
'csrc/softmax_dropout/softmax_dropout_kernel.cu'],
include_dirs=[os.path.join(this_dir, 'csrc')],
extra_compile_args={'cxx': ['-O3',] + version_dependent_macros + generator_flag,
'nvcc':['-O3', '--use_fast_math',
'-gencode', 'arch=compute_70,code=sm_70',
'-gencode', 'arch=compute_80,code=sm_80',
'-U__CUDA_NO_HALF_OPERATORS__',
'-U__CUDA_NO_BFLOAT16_OPERATORS__',
'-U__CUDA_NO_HALF_CONVERSIONS__',
'-U__CUDA_NO_BFLOAT16_CONVERSIONS__',
'--expt-relaxed-constexpr',
'--expt-extended-lambda'] + version_dependent_macros + generator_flag}))
setup(
name='fused_ops',
version='0.1',
packages=find_packages(exclude=('build',
'csrc',
'include',
'tests',
'dist',
'docs',
'tests',
'examples',)),
description='Fused ops',
ext_modules=ext_modules,
cmdclass=cmdclass,
extras_require=extras,
)
|
COCO-LM/fairseq/fused_ops/setup.py/0
|
{
"file_path": "COCO-LM/fairseq/fused_ops/setup.py",
"repo_id": "COCO-LM",
"token_count": 3664
}
| 199 |
#!/usr/bin/env python3
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
"""Reads in a fairseq output file, and verifies that the constraints
(C- lines) are present in the output (the first H- line). Assumes that
constraints are listed prior to the first hypothesis.
"""
constraints = []
found = 0
total = 0
for line in sys.stdin:
if line.startswith("C-"):
constraints.append(line.rstrip().split("\t")[1])
elif line.startswith("H-"):
text = line.split("\t")[2]
for constraint in constraints:
total += 1
if constraint in text:
found += 1
else:
print(f"No {constraint} in {text}", file=sys.stderr)
constraints = []
print(f"Found {found} / {total} = {100 * found / total:.1f}%")
|
COCO-LM/fairseq/scripts/constraints/validate.py/0
|
{
"file_path": "COCO-LM/fairseq/scripts/constraints/validate.py",
"repo_id": "COCO-LM",
"token_count": 367
}
| 200 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import functools
import random
import unittest
from multiprocessing import Manager
import torch
import torch.nn as nn
from fairseq import optim
from fairseq.distributed import utils as distributed_utils
from omegaconf import OmegaConf
class Model(nn.Module):
def __init__(self, input_size, output_size):
super(Model, self).__init__()
self.fc = nn.Linear(input_size, output_size)
def forward(self, input):
output = self.fc(input)
return output
def setup_model_loss_criterion(cfg, args, rank, is_cuda):
"""
setup model, criterion and optimizer based on input args
"""
args.distributed_rank = rank
cfg.distributed_training.distributed_rank = args.distributed_rank
if cfg.distributed_training.distributed_world_size > 1:
distributed_utils.distributed_init(cfg)
torch.manual_seed(1)
model = Model(args.input_size, args.nb_classes)
loss_fn = nn.CrossEntropyLoss()
if is_cuda:
model = model.cuda()
loss_fn = loss_fn.cuda()
optimizer = optim.sgd.SGD(args, model.parameters())
optimizer = optim.FairseqBMUF(
cfg=cfg.bmuf,
optimizer=optimizer
)
return model, loss_fn, optimizer
def train_step(input, target, model, loss_fn, optimizer, **unused):
"""Do forward, backward and parameter update."""
model.train()
output = model(input)
loss = loss_fn(output, target)
optimizer.backward(loss)
optimizer.step()
def single_gpu_training(cfg, args, rank, iterations, shared_results):
is_cuda = torch.cuda.is_available()
if is_cuda:
torch.cuda.set_device(rank)
model, loss_fn, optimizer = setup_model_loss_criterion(cfg, args, rank, is_cuda)
for _ in range(iterations):
input = torch.randn(1, args.input_size)
target = torch.empty(args.batch_size, dtype=torch.long).random_(args.nb_classes)
if is_cuda:
input = input.cuda()
target = target.cuda()
train_step(input, target, model, loss_fn, optimizer)
results = []
for param in model.parameters():
if len(results) == 0:
results = param.flatten().cpu().data
else:
results = torch.cat((results, param.flatten().cpu().data), 0)
shared_results[rank] = results
def setup_args():
args = argparse.Namespace()
args.global_sync_iter = 20
args.block_momentum = 0.875
args.block_lr = 0.5
args.input_size = 5
args.nb_classes = 2
args.batch_size = 1
args.lr = [1e-3]
args.momentum = 0
args.weight_decay = 0
args.warmup_iterations = 0
args.use_nbm = True
args.average_sync = True
args.global_sync_iter = 1
args.model_parallel_size = 1
args.distributed_backend = "gloo"
args.distributed_world_size = 2
port = random.randint(10000, 20000)
args.distributed_init_method = "tcp://localhost:{port}".format(port=port)
args.distributed_init_host = "localhost"
args.distributed_port = port + 1
args.local_world_size = args.distributed_world_size
cfg = OmegaConf.create()
cfg.optimization = OmegaConf.create()
cfg.common = OmegaConf.create()
cfg.distributed_training = OmegaConf.create()
cfg.dataset = OmegaConf.create()
cfg.bmuf = OmegaConf.create()
cfg.optimizer = OmegaConf.create()
cfg.bmuf.global_sync_iter = args.global_sync_iter
cfg.bmuf.block_momentum = args.block_momentum
cfg.bmuf.block_lr = args.block_lr
cfg.dataset.batch_size = args.batch_size
cfg.optimization.lr = args.lr
cfg.optimizer.momentum = args.momentum
cfg.optimizer.weight_decay = args.weight_decay
cfg.bmuf.warmup_iterations = args.warmup_iterations
cfg.bmuf.use_nbm = args.use_nbm
cfg.bmuf.average_sync = args.average_sync
cfg.common.model_parallel_size = args.model_parallel_size
cfg.distributed_training.distributed_backend = args.distributed_backend
cfg.distributed_training.distributed_world_size = args.distributed_world_size
cfg.bmuf.distributed_world_size = args.distributed_world_size
cfg.distributed_training.distributed_init_method = args.distributed_init_method
cfg.distributed_training.distributed_port = args.distributed_port
return cfg, args
@unittest.skipIf(torch.cuda.device_count() < 2, "test requires 2 GPUs")
class TestBMUF(unittest.TestCase):
def bmuf_process(self, cfg, args, iterations):
processes = []
results = Manager().dict()
torch.multiprocessing.spawn(
fn=functools.partial(single_gpu_training, cfg, args),
args=(iterations, results),
nprocs=args.distributed_world_size,
join=True,
)
return results
def test_bmuf_sync(self):
# Train model for 1 iteration and do bmuf sync without doing warmup
cfg, args = setup_args()
iterations = 1
results = self.bmuf_process(cfg, args, iterations)
# Make sure params in both machines are same
assert len(results) == 2
self.assertAlmostEqual(results[0], results[1])
def test_warmup_sync(self):
# Train model for 20 iteration and do warmup sync without doing bmuf sync
cfg, args = setup_args()
args.warmup_iterations = 20
cfg.bmuf.warmup_iterations = args.warmup_iterations
iterations = 20
results = self.bmuf_process(cfg, args, iterations)
# Make sure params in both machines are same
assert len(results) == 2
self.assertAlmostEqual(results[0], results[1])
def test_warmup_sync_bmuf_sync(self):
# Train model for 25 iteration and do warmup sync after 20 iteration
# and bmuf sync after 25 iteration
cfg, args = setup_args()
args.warmup_iterations = 20
args.global_sync_iter = 5
cfg.bmuf.warmup_iterations = args.warmup_iterations
cfg.bmuf.global_sync_iter = args.global_sync_iter
iterations = 25
results = self.bmuf_process(cfg, args, iterations)
# Make sure params in both machines are same
assert len(results) == 2
self.assertAlmostEqual(results[0], results[1])
def test_single_gpu_bmuf(self):
# Train model for 5 iterations and use GPU 1
cfg, args = setup_args()
args.distributed_world_size = 1
args.warmup_iterations = 5
cfg.distributed_training.distributed_world_size = args.distributed_world_size
cfg.bmuf.distributed_world_size = args.distributed_world_size
cfg.bmuf.warmup_iterations = args.warmup_iterations
iterations = 20
results = self.bmuf_process(cfg, args, iterations)
assert len(results) == 1
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertLess((t1 - t2).abs().max(), 1e-4)
if __name__ == "__main__":
unittest.main()
|
COCO-LM/fairseq/tests/distributed/test_bmuf.py/0
|
{
"file_path": "COCO-LM/fairseq/tests/distributed/test_bmuf.py",
"repo_id": "COCO-LM",
"token_count": 2873
}
| 201 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import tests.utils as test_utils
import torch
from fairseq.data import (
BacktranslationDataset,
LanguagePairDataset,
TransformEosDataset,
)
from fairseq.sequence_generator import SequenceGenerator
class TestBacktranslationDataset(unittest.TestCase):
def setUp(self):
(
self.tgt_dict,
self.w1,
self.w2,
self.src_tokens,
self.src_lengths,
self.model,
) = test_utils.sequence_generator_setup()
dummy_src_samples = self.src_tokens
self.tgt_dataset = test_utils.TestDataset(data=dummy_src_samples)
self.cuda = torch.cuda.is_available()
def _backtranslation_dataset_helper(
self,
remove_eos_from_input_src,
remove_eos_from_output_src,
):
tgt_dataset = LanguagePairDataset(
src=self.tgt_dataset,
src_sizes=self.tgt_dataset.sizes,
src_dict=self.tgt_dict,
tgt=None,
tgt_sizes=None,
tgt_dict=None,
)
generator = SequenceGenerator(
[self.model],
tgt_dict=self.tgt_dict,
max_len_a=0,
max_len_b=200,
beam_size=2,
unk_penalty=0,
)
backtranslation_dataset = BacktranslationDataset(
tgt_dataset=TransformEosDataset(
dataset=tgt_dataset,
eos=self.tgt_dict.eos(),
# remove eos from the input src
remove_eos_from_src=remove_eos_from_input_src,
),
src_dict=self.tgt_dict,
backtranslation_fn=(
lambda sample: generator.generate([self.model], sample)
),
output_collater=TransformEosDataset(
dataset=tgt_dataset,
eos=self.tgt_dict.eos(),
# if we remove eos from the input src, then we need to add it
# back to the output tgt
append_eos_to_tgt=remove_eos_from_input_src,
remove_eos_from_src=remove_eos_from_output_src,
).collater,
cuda=self.cuda,
)
dataloader = torch.utils.data.DataLoader(
backtranslation_dataset,
batch_size=2,
collate_fn=backtranslation_dataset.collater,
)
backtranslation_batch_result = next(iter(dataloader))
eos, pad, w1, w2 = self.tgt_dict.eos(), self.tgt_dict.pad(), self.w1, self.w2
# Note that we sort by src_lengths and add left padding, so actually
# ids will look like: [1, 0]
expected_src = torch.LongTensor([[w1, w2, w1, eos], [pad, pad, w1, eos]])
if remove_eos_from_output_src:
expected_src = expected_src[:, :-1]
expected_tgt = torch.LongTensor([[w1, w2, eos], [w1, w2, eos]])
generated_src = backtranslation_batch_result["net_input"]["src_tokens"]
tgt_tokens = backtranslation_batch_result["target"]
self.assertTensorEqual(expected_src, generated_src)
self.assertTensorEqual(expected_tgt, tgt_tokens)
def test_backtranslation_dataset_no_eos_in_output_src(self):
self._backtranslation_dataset_helper(
remove_eos_from_input_src=False,
remove_eos_from_output_src=True,
)
def test_backtranslation_dataset_with_eos_in_output_src(self):
self._backtranslation_dataset_helper(
remove_eos_from_input_src=False,
remove_eos_from_output_src=False,
)
def test_backtranslation_dataset_no_eos_in_input_src(self):
self._backtranslation_dataset_helper(
remove_eos_from_input_src=True,
remove_eos_from_output_src=False,
)
def assertTensorEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertEqual(t1.ne(t2).long().sum(), 0)
if __name__ == "__main__":
unittest.main()
|
COCO-LM/fairseq/tests/test_backtranslation_dataset.py/0
|
{
"file_path": "COCO-LM/fairseq/tests/test_backtranslation_dataset.py",
"repo_id": "COCO-LM",
"token_count": 2106
}
| 202 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import copy
import unittest
import tests.utils as test_utils
import torch
from fairseq.criterions.cross_entropy import CrossEntropyCriterion
from fairseq.criterions.label_smoothed_cross_entropy import (
LabelSmoothedCrossEntropyCriterion,
)
class TestLabelSmoothing(unittest.TestCase):
def setUp(self):
# build dictionary
self.d = test_utils.dummy_dictionary(3)
vocab = len(self.d)
self.assertEqual(vocab, 4 + 3) # 4 special + 3 tokens
self.assertEqual(self.d.pad(), 1)
self.assertEqual(self.d.eos(), 2)
self.assertEqual(self.d.unk(), 3)
pad, eos, unk, w1, w2, w3 = 1, 2, 3, 4, 5, 6 # noqa: F841
# build dataset
self.data = [
# the first batch item has padding
{
"source": torch.LongTensor([w1, eos]),
"target": torch.LongTensor([w1, eos]),
},
{
"source": torch.LongTensor([w1, eos]),
"target": torch.LongTensor([w1, w1, eos]),
},
]
self.sample = next(test_utils.dummy_dataloader(self.data))
# build model
self.args = argparse.Namespace()
self.args.sentence_avg = False
self.args.report_accuracy = False
self.args.probs = (
torch.FloatTensor(
[
# pad eos unk w1 w2 w3
[0.05, 0.05, 0.1, 0.05, 0.3, 0.4, 0.05],
[0.05, 0.10, 0.2, 0.05, 0.2, 0.3, 0.10],
[0.05, 0.15, 0.3, 0.05, 0.1, 0.2, 0.15],
]
)
.unsqueeze(0)
.expand(2, 3, 7)
) # add batch dimension
self.task = test_utils.TestTranslationTask.setup_task(self.args, self.d, self.d)
self.model = self.task.build_model(self.args)
def test_nll_loss(self):
self.args.label_smoothing = 0.1
nll_crit = CrossEntropyCriterion.build_criterion(self.args, self.task)
smooth_crit = LabelSmoothedCrossEntropyCriterion.build_criterion(
self.args, self.task
)
nll_loss, nll_sample_size, nll_logging_output = nll_crit(
self.model, self.sample
)
smooth_loss, smooth_sample_size, smooth_logging_output = smooth_crit(
self.model, self.sample
)
self.assertLess(abs(nll_loss - nll_logging_output["loss"]), 1e-6)
self.assertLess(abs(nll_loss - smooth_logging_output["nll_loss"]), 1e-6)
def test_padding(self):
self.args.label_smoothing = 0.1
crit = LabelSmoothedCrossEntropyCriterion.build_criterion(self.args, self.task)
loss, _, logging_output = crit(self.model, self.sample)
def get_one_no_padding(idx):
# create a new sample with just a single batch item so that there's
# no padding
sample1 = next(test_utils.dummy_dataloader([self.data[idx]]))
args1 = copy.copy(self.args)
args1.probs = args1.probs[idx, :, :].unsqueeze(0)
model1 = self.task.build_model(args1)
loss1, _, _ = crit(model1, sample1)
return loss1
loss1 = get_one_no_padding(0)
loss2 = get_one_no_padding(1)
self.assertAlmostEqual(loss, loss1 + loss2)
def test_reduction(self):
self.args.label_smoothing = 0.1
crit = LabelSmoothedCrossEntropyCriterion.build_criterion(self.args, self.task)
loss, _, logging_output = crit(self.model, self.sample, reduce=True)
unreduced_loss, _, _ = crit(self.model, self.sample, reduce=False)
self.assertAlmostEqual(loss, unreduced_loss.sum())
def test_zero_eps(self):
self.args.label_smoothing = 0.0
nll_crit = CrossEntropyCriterion.build_criterion(self.args, self.task)
smooth_crit = LabelSmoothedCrossEntropyCriterion.build_criterion(
self.args, self.task
)
nll_loss, nll_sample_size, nll_logging_output = nll_crit(
self.model, self.sample
)
smooth_loss, smooth_sample_size, smooth_logging_output = smooth_crit(
self.model, self.sample
)
self.assertAlmostEqual(nll_loss, smooth_loss)
def assertAlmostEqual(self, t1, t2):
self.assertEqual(t1.size(), t2.size(), "size mismatch")
self.assertLess((t1 - t2).abs().max(), 1e-6)
if __name__ == "__main__":
unittest.main()
|
COCO-LM/fairseq/tests/test_label_smoothing.py/0
|
{
"file_path": "COCO-LM/fairseq/tests/test_label_smoothing.py",
"repo_id": "COCO-LM",
"token_count": 2270
}
| 203 |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import logging
import unittest
from io import StringIO
from unittest.mock import MagicMock, patch
import torch
from fairseq import checkpoint_utils, data
from omegaconf import OmegaConf
def mock_trainer(epoch, num_updates, iterations_in_epoch):
trainer = MagicMock()
trainer.load_checkpoint.return_value = {
"train_iterator": {
"epoch": epoch,
"iterations_in_epoch": iterations_in_epoch,
"shuffle": False,
},
}
trainer.get_num_updates.return_value = num_updates
return trainer
def mock_dict():
d = MagicMock()
d.pad.return_value = 1
d.eos.return_value = 2
d.unk.return_value = 3
return d
def get_trainer_and_epoch_itr(epoch, epoch_size, num_updates, iterations_in_epoch):
tokens = torch.LongTensor(list(range(epoch_size))).view(1, -1)
tokens_ds = data.TokenBlockDataset(
tokens,
sizes=[tokens.size(-1)],
block_size=1,
pad=0,
eos=1,
include_targets=False,
)
trainer = mock_trainer(epoch, num_updates, iterations_in_epoch)
dataset = data.LanguagePairDataset(
tokens_ds, tokens_ds.sizes, mock_dict(), shuffle=False
)
epoch_itr = data.EpochBatchIterator(
dataset=dataset,
collate_fn=dataset.collater,
batch_sampler=[[i] for i in range(epoch_size)],
)
return trainer, epoch_itr
def get_mock_cfg(finetune_from_model):
cfg_mock = OmegaConf.create(
{
"checkpoint": {
"optimizer_overrides": "{}",
"reset_dataloader": False,
"reset_meters": False,
"reset_optimizer": False,
"reset_lr_scheduler": False,
"finetune_from_model": finetune_from_model,
"model_parallel_size": 1,
"restore_file": "checkpoint_last.pt",
},
"common": {
"model_parallel_size": 1,
},
}
)
return cfg_mock
class TestLoadCheckpoint(unittest.TestCase):
def setUp(self):
self.cfg_mock = get_mock_cfg(None)
self.patches = {
"os.makedirs": MagicMock(),
"os.path.join": MagicMock(),
"os.path.isfile": MagicMock(return_value=True),
"os.path.isabs": MagicMock(return_value=False),
"fairseq.file_io.PathManager.exists": MagicMock(return_value=False),
}
self.applied_patches = [patch(p, d) for p, d in self.patches.items()]
[p.start() for p in self.applied_patches]
logging.disable(logging.CRITICAL)
def tearDown(self):
patch.stopall()
logging.disable(logging.NOTSET)
def test_load_partial_checkpoint(self):
with contextlib.redirect_stdout(StringIO()):
trainer, epoch_itr = get_trainer_and_epoch_itr(2, 150, 200, 50)
trainer.get_train_iterator = MagicMock(return_value=epoch_itr)
_, epoch_itr = checkpoint_utils.load_checkpoint(
self.cfg_mock.checkpoint, trainer
)
self.assertEqual(epoch_itr.epoch, 2)
self.assertEqual(epoch_itr.iterations_in_epoch, 50)
itr = epoch_itr.next_epoch_itr(shuffle=False)
self.assertEqual(epoch_itr.epoch, 2)
self.assertEqual(epoch_itr.iterations_in_epoch, 50)
self.assertEqual(next(itr)["net_input"]["src_tokens"][0].item(), 50)
self.assertEqual(epoch_itr.iterations_in_epoch, 51)
for _ in range(150 - 52):
next(itr)
self.assertEqual(epoch_itr.iterations_in_epoch, 149)
self.assertTrue(itr.has_next())
next(itr)
self.assertFalse(itr.has_next())
itr = epoch_itr.next_epoch_itr(shuffle=False)
self.assertTrue(itr.has_next())
self.assertEqual(epoch_itr.epoch, 3)
self.assertEqual(epoch_itr.iterations_in_epoch, 0)
def test_load_full_checkpoint(self):
with contextlib.redirect_stdout(StringIO()):
trainer, epoch_itr = get_trainer_and_epoch_itr(2, 150, 300, 150)
trainer.get_train_iterator = MagicMock(return_value=epoch_itr)
_, epoch_itr = checkpoint_utils.load_checkpoint(
self.cfg_mock.checkpoint, trainer
)
itr = epoch_itr.next_epoch_itr(shuffle=False)
self.assertEqual(epoch_itr.epoch, 3)
self.assertEqual(epoch_itr.iterations_in_epoch, 0)
self.assertEqual(next(itr)["net_input"]["src_tokens"][0].item(), 0)
def test_load_no_checkpoint(self):
with contextlib.redirect_stdout(StringIO()):
trainer, epoch_itr = get_trainer_and_epoch_itr(1, 150, 0, 0)
trainer.get_train_iterator = MagicMock(return_value=epoch_itr)
self.patches["os.path.isfile"].return_value = False
_, epoch_itr = checkpoint_utils.load_checkpoint(
self.cfg_mock.checkpoint, trainer
)
itr = epoch_itr.next_epoch_itr(shuffle=False)
self.assertEqual(epoch_itr.epoch, 1)
self.assertEqual(epoch_itr.iterations_in_epoch, 0)
self.assertEqual(next(itr)["net_input"]["src_tokens"][0].item(), 0)
def test_finetune_from_model_args_conflict(self):
with contextlib.redirect_stdout(StringIO()):
trainer, epoch_itr = get_trainer_and_epoch_itr(1, 150, 0, 0)
trainer.get_train_iterator = MagicMock(return_value=epoch_itr)
for arg in [
"reset_optimizer",
"reset_lr_scheduler",
"reset_meters",
"reset_dataloader",
]:
with self.subTest(arg=arg):
cfg_mock = get_mock_cfg("/temp/checkpoint_pretrained.pt")
cfg_mock["checkpoint"][arg] = True
with self.assertRaises(Exception) as context:
_, _ = checkpoint_utils.load_checkpoint(
cfg_mock.checkpoint, trainer
)
self.assertTrue(
"--finetune-from-model can not be set together with either --reset-optimizer"
" or reset_lr_scheduler or reset_meters or reset_dataloader"
in str(context.exception)
)
def test_finetune_from_model(self):
with contextlib.redirect_stdout(StringIO()):
trainer, epoch_itr = get_trainer_and_epoch_itr(1, 150, 0, 0)
trainer.get_train_iterator = MagicMock(return_value=epoch_itr)
from_model_path = "/temp/checkpoint_pretrained.pt"
def mock_finetune_exist(path):
if path == from_model_path:
return True
else:
return False
self.patches[
"fairseq.file_io.PathManager.exists"
].side_effect = mock_finetune_exist
cfg_mock = get_mock_cfg(from_model_path)
cfg_mock.checkpoint.restore_file = "checkpoint_last.pt"
_, _ = checkpoint_utils.load_checkpoint(cfg_mock.checkpoint, trainer)
(
checkpoint_path,
reset_optimizer,
reset_lr_scheduler,
optimizer_overrides,
) = trainer.load_checkpoint.call_args[0]
reset_meters = trainer.load_checkpoint.call_args[1]["reset_meters"]
self.assertTrue(reset_optimizer)
self.assertTrue(reset_lr_scheduler)
self.assertTrue(reset_meters)
def test_finetune_from_model_resume(self):
with contextlib.redirect_stdout(StringIO()):
trainer, epoch_itr = get_trainer_and_epoch_itr(1, 150, 0, 0)
trainer.get_train_iterator = MagicMock(return_value=epoch_itr)
from_model_path = "/temp/checkpoint_pretrained.pt"
# launch second time
# both restore_file=checkpoint_last.pt and finetune_from_model are set
def mock_finetune_exist(path):
if path == from_model_path or path.endsWith("checkpoint_last.pt"):
return True
else:
return False
self.patches[
"fairseq.file_io.PathManager.exists"
].side_effect = mock_finetune_exist
cfg_mock = get_mock_cfg(from_model_path)
cfg_mock.checkpoint.restore_file = "checkpoint_last.pt"
_, _ = checkpoint_utils.load_checkpoint(cfg_mock.checkpoint, trainer)
(
checkpoint_path,
reset_optimizer,
reset_lr_scheduler,
optimizer_overrides,
) = trainer.load_checkpoint.call_args[0]
reset_meters = trainer.load_checkpoint.call_args[1]["reset_meters"]
self.assertFalse(reset_optimizer)
self.assertFalse(reset_lr_scheduler)
self.assertFalse(reset_meters)
if __name__ == "__main__":
unittest.main()
|
COCO-LM/fairseq/tests/test_train.py/0
|
{
"file_path": "COCO-LM/fairseq/tests/test_train.py",
"repo_id": "COCO-LM",
"token_count": 4760
}
| 204 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
## The script is largely adapted from the huggingface transformers library.
""" Load SQuAD dataset. """
from __future__ import absolute_import, division, print_function
import json
import logging
import math
import collections
from io import open
from tqdm import tqdm
from transformers.models.bert import BasicTokenizer
from transformers.models.bert.tokenization_bert import whitespace_tokenize
# Required by XLNet evaluation method to compute optimal threshold (see write_predictions_extended() method)
from utils_squad_evaluate import find_all_best_thresh_v2, make_qid_to_has_ans, get_raw_scores
logger = logging.getLogger(__name__)
class SquadExample(object):
"""
A single training/test example for the Squad dataset.
For examples without an answer, the start and end position are -1.
"""
def __init__(self,
qas_id,
question_text,
doc_tokens,
orig_answer_text=None,
start_position=None,
end_position=None,
is_impossible=None):
self.qas_id = qas_id
self.question_text = question_text
self.doc_tokens = doc_tokens
self.orig_answer_text = orig_answer_text
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
def __str__(self):
return self.__repr__()
def __repr__(self):
s = ""
s += "qas_id: %s" % (self.qas_id)
s += ", question_text: %s" % (
self.question_text)
s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens))
if self.start_position:
s += ", start_position: %d" % (self.start_position)
if self.end_position:
s += ", end_position: %d" % (self.end_position)
if self.is_impossible:
s += ", is_impossible: %r" % (self.is_impossible)
return s
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
unique_id,
example_index,
doc_span_index,
tokens,
token_to_orig_map,
token_is_max_context,
input_ids,
input_mask,
segment_ids,
cls_index,
p_mask,
paragraph_len,
start_position=None,
end_position=None,
is_impossible=None):
self.unique_id = unique_id
self.example_index = example_index
self.doc_span_index = doc_span_index
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.token_is_max_context = token_is_max_context
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.cls_index = cls_index
self.p_mask = p_mask
self.paragraph_len = paragraph_len
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
def read_squad_examples(input_file, is_training, version_2_with_negative):
"""Read a SQuAD json file into a list of SquadExample."""
with open(input_file, "r", encoding='utf-8') as reader:
input_data = json.load(reader)["data"]
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
examples = []
for entry in input_data:
# if len(examples) > 1000:
# logger.info("Debug mode ! only load %d examples !" % len(examples))
# break
for paragraph in entry["paragraphs"]:
paragraph_text = paragraph["context"]
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in paragraph_text:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
for qa in paragraph["qas"]:
qas_id = qa["id"]
question_text = qa["question"]
start_position = None
end_position = None
orig_answer_text = None
is_impossible = False
if is_training:
if version_2_with_negative:
is_impossible = qa["is_impossible"]
if (len(qa["answers"]) != 1) and (not is_impossible):
raise ValueError(
"For training, each question should have exactly 1 answer.")
if not is_impossible:
answer = qa["answers"][0]
orig_answer_text = answer["text"]
answer_offset = answer["answer_start"]
answer_length = len(orig_answer_text)
start_position = char_to_word_offset[answer_offset]
end_position = char_to_word_offset[answer_offset + answer_length - 1]
# Only add answers where the text can be exactly recovered from the
# document. If this CAN'T happen it's likely due to weird Unicode
# stuff so we will just skip the example.
#
# Note that this means for training mode, every example is NOT
# guaranteed to be preserved.
actual_text = " ".join(doc_tokens[start_position:(end_position + 1)])
cleaned_answer_text = " ".join(
whitespace_tokenize(orig_answer_text))
if actual_text.find(cleaned_answer_text) == -1:
logger.warning("Could not find answer: '%s' vs. '%s'",
actual_text, cleaned_answer_text)
continue
else:
start_position = -1
end_position = -1
orig_answer_text = ""
example = SquadExample(
qas_id=qas_id,
question_text=question_text,
doc_tokens=doc_tokens,
orig_answer_text=orig_answer_text,
start_position=start_position,
end_position=end_position,
is_impossible=is_impossible)
examples.append(example)
return examples
def convert_examples_to_features(examples, tokenizer, max_seq_length,
doc_stride, max_query_length, is_training,
cls_token='[CLS]', sep_token='[SEP]', pad_token_id=0,
cls_token_at_end=False,
sequence_a_segment_id=0, sequence_b_segment_id=1,
cls_token_segment_id=0, pad_token_segment_id=0,
mask_padding_with_zero=True,
sequence_a_is_doc=False,
add_two_separators=False):
"""Loads a data file into a list of `InputBatch`s."""
unique_id = 1000000000
# cnt_pos, cnt_neg = 0, 0
# max_N, max_M = 1024, 1024
# f = np.zeros((max_N, max_M), dtype=np.float32)
features = []
for (example_index, example) in enumerate(tqdm(examples)):
# if example_index % 100 == 0:
# logger.info('Converting %s/%s pos %s neg %s', example_index, len(examples), cnt_pos, cnt_neg)
query_tokens = tokenizer.tokenize(example.question_text)
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = None
tok_end_position = None
if is_training and example.is_impossible:
tok_start_position = -1
tok_end_position = -1
if is_training and not example.is_impossible:
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position, tok_end_position) = _improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position, tokenizer,
example.orig_answer_text)
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
if add_two_separators:
max_tokens_for_doc -= 1
assert max_tokens_for_doc > 0
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = collections.namedtuple( # pylint: disable=invalid-name
"DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
# p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer)
# Original TF implem also keep the classification token (set to 0) (not sure why...)
p_mask = []
# CLS token at the beginning
if not cls_token_at_end:
tokens.append(cls_token)
segment_ids.append(cls_token_segment_id)
p_mask.append(0)
cls_index = 0
# XLNet: P SEP Q SEP CLS
# Others: CLS Q SEP P SEP
if not sequence_a_is_doc:
# Query
tokens += query_tokens
segment_ids += [sequence_a_segment_id] * len(query_tokens)
p_mask += [1] * len(query_tokens)
# SEP token
tokens.append(sep_token)
segment_ids.append(sequence_a_segment_id)
p_mask.append(1)
if add_two_separators:
# COCO-LM uses 2 SEP tokens between query and document
# SEP token
tokens.append(sep_token)
segment_ids.append(sequence_a_segment_id)
p_mask.append(1)
# Paragraph
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(doc_spans, doc_span_index,
split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
if not sequence_a_is_doc:
segment_ids.append(sequence_b_segment_id)
else:
segment_ids.append(sequence_a_segment_id)
p_mask.append(0)
paragraph_len = doc_span.length
if sequence_a_is_doc:
# SEP token
tokens.append(sep_token)
segment_ids.append(sequence_a_segment_id)
p_mask.append(1)
if add_two_separators:
# COCO-LM uses 2 SEP tokens between query and document
# SEP token
tokens.append(sep_token)
segment_ids.append(sequence_a_segment_id)
p_mask.append(1)
tokens += query_tokens
segment_ids += [sequence_b_segment_id] * len(query_tokens)
p_mask += [1] * len(query_tokens)
# SEP token
tokens.append(sep_token)
segment_ids.append(sequence_b_segment_id)
p_mask.append(1)
# CLS token at the end
if cls_token_at_end:
tokens.append(cls_token)
segment_ids.append(cls_token_segment_id)
p_mask.append(0)
cls_index = len(tokens) - 1 # Index of classification token
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(pad_token_id)
input_mask.append(0 if mask_padding_with_zero else 1)
segment_ids.append(pad_token_segment_id)
p_mask.append(1)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
span_is_impossible = example.is_impossible
start_position = None
end_position = None
if is_training and not span_is_impossible:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
out_of_span = False
if not (tok_start_position >= doc_start and
tok_end_position <= doc_end):
out_of_span = True
if out_of_span:
start_position = 0
end_position = 0
span_is_impossible = True
else:
if sequence_a_is_doc:
doc_offset = 0
else:
doc_offset = len(query_tokens) + 2
if add_two_separators:
doc_offset += 1
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
if is_training and span_is_impossible:
start_position = cls_index
end_position = cls_index
if example_index < 20:
logger.info("*** Example ***")
logger.info("unique_id: %s" % (unique_id))
logger.info("example_index: %s" % (example_index))
logger.info("doc_span_index: %s" % (doc_span_index))
logger.info("tokens: %s" % " ".join(tokens))
logger.info("token_to_orig_map: %s" % " ".join([
"%d:%d" % (x, y) for (x, y) in token_to_orig_map.items()]))
logger.info("token_is_max_context: %s" % " ".join([
"%d:%s" % (x, y) for (x, y) in token_is_max_context.items()
]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info(
"input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
if is_training and span_is_impossible:
logger.info("impossible example")
if is_training and not span_is_impossible:
answer_text = " ".join(tokens[start_position:(end_position + 1)])
logger.info("start_position: %d" % (start_position))
logger.info("end_position: %d" % (end_position))
logger.info(
"answer: %s" % (answer_text))
features.append(
InputFeatures(
unique_id=unique_id,
example_index=example_index,
doc_span_index=doc_span_index,
tokens=tokens,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
cls_index=cls_index,
p_mask=p_mask,
paragraph_len=paragraph_len,
start_position=start_position,
end_position=end_position,
is_impossible=span_is_impossible))
unique_id += 1
return features
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,
orig_answer_text):
"""Returns tokenized answer spans that better match the annotated answer."""
# The SQuAD annotations are character based. We first project them to
# whitespace-tokenized words. But then after WordPiece tokenization, we can
# often find a "better match". For example:
#
# Question: What year was John Smith born?
# Context: The leader was John Smith (1895-1943).
# Answer: 1895
#
# The original whitespace-tokenized answer will be "(1895-1943).". However
# after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match
# the exact answer, 1895.
#
# However, this is not always possible. Consider the following:
#
# Question: What country is the top exporter of electornics?
# Context: The Japanese electronics industry is the lagest in the world.
# Answer: Japan
#
# In this case, the annotator chose "Japan" as a character sub-span of
# the word "Japanese". Since our WordPiece tokenizer does not split
# "Japanese", we just use "Japanese" as the annotation. This is fairly rare
# in SQuAD, but does happen.
tokens = tokenizer.tokenize(orig_answer_text)
tok_answer_text = tokenizer.convert_tokens_to_string(tokens)
# tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = tokenizer.convert_tokens_to_string(doc_tokens[new_start:(new_end + 1)])
if text_span == tok_answer_text:
return (new_start, new_end)
return (input_start, input_end)
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
RawResult = collections.namedtuple("RawResult",
["unique_id", "start_logits", "end_logits"])
def write_predictions(all_examples, all_features, all_results, n_best_size,
max_answer_length, do_lower_case, output_prediction_file,
output_nbest_file, output_null_log_odds_file, verbose_logging,
version_2_with_negative, null_score_diff_threshold, tokens_to_text=None):
"""Write final predictions to the json file and log-odds of null if needed."""
logger.info("Writing predictions to: %s" % (output_prediction_file))
logger.info("Writing nbest to: %s" % (output_nbest_file))
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["feature_index", "start_index", "end_index", "start_logit", "end_logit"])
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
# keep track of the minimum score of null start+end of position 0
score_null = 1000000 # large and positive
min_null_feature_index = 0 # the paragraph slice with min null score
null_start_logit = 0 # the start logit at the slice with min null score
null_end_logit = 0 # the end logit at the slice with min null score
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
# if we could have irrelevant answers, get the min score of irrelevant
if version_2_with_negative:
feature_null_score = result.start_logits[0] + result.end_logits[0]
if feature_null_score < score_null:
score_null = feature_null_score
min_null_feature_index = feature_index
null_start_logit = result.start_logits[0]
null_end_logit = result.end_logits[0]
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index]))
if version_2_with_negative:
prelim_predictions.append(
_PrelimPrediction(
feature_index=min_null_feature_index,
start_index=0,
end_index=0,
start_logit=null_start_logit,
end_logit=null_end_logit))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True)
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_logit", "end_logit"])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
if pred.start_index > 0: # this is a non-null prediction
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
if tokens_to_text:
tok_text = tokens_to_text(tok_tokens)
else:
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
else:
final_text = ""
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit))
# if we didn't include the empty option in the n-best, include it
if version_2_with_negative:
if "" not in seen_predictions:
nbest.append(
_NbestPrediction(
text="",
start_logit=null_start_logit,
end_logit=null_end_logit))
# In very rare edge cases we could only have single null prediction.
# So we just create a nonce prediction in this case to avoid failure.
if len(nbest) == 1:
nbest.insert(0,
_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(
_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
assert len(nbest) >= 1
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
if not best_non_null_entry:
if entry.text:
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
nbest_json.append(output)
assert len(nbest_json) >= 1
if not version_2_with_negative:
all_predictions[example.qas_id] = nbest_json[0]["text"]
else:
# predict "" iff the null score - the score of best non-null > threshold
score_diff = score_null - best_non_null_entry.start_logit - (
best_non_null_entry.end_logit)
scores_diff_json[example.qas_id] = score_diff
if score_diff > null_score_diff_threshold:
all_predictions[example.qas_id] = ""
else:
all_predictions[example.qas_id] = best_non_null_entry.text
all_nbest_json[example.qas_id] = nbest_json
with open(output_prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
with open(output_nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
if version_2_with_negative:
with open(output_null_log_odds_file, "w") as writer:
writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
return all_predictions
# For XLNet (and XLM which uses the same head)
RawResultExtended = collections.namedtuple("RawResultExtended",
["unique_id", "start_top_log_probs", "start_top_index",
"end_top_log_probs", "end_top_index", "cls_logits"])
def write_predictions_extended(all_examples, all_features, all_results, n_best_size,
max_answer_length, output_prediction_file,
output_nbest_file,
output_null_log_odds_file, orig_data_file,
start_n_top, end_n_top, version_2_with_negative,
tokenizer, do_lower_case, verbose_logging):
""" XLNet write prediction logic (more complex than Bert's).
Write final predictions to the json file and log-odds of null if needed.
Requires utils_squad_evaluate.py
"""
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["feature_index", "start_index", "end_index",
"start_log_prob", "end_log_prob"])
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_log_prob", "end_log_prob"])
logger.info("Writing predictions to: %s", output_prediction_file)
# logger.info("Writing nbest to: %s" % (output_nbest_file))
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
# keep track of the minimum score of null start+end of position 0
score_null = 1000000 # large and positive
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
cur_null_score = result.cls_logits
# if we could have irrelevant answers, get the min score of irrelevant
score_null = min(score_null, cur_null_score)
for i in range(start_n_top):
for j in range(end_n_top):
start_log_prob = result.start_top_log_probs[i]
start_index = result.start_top_index[i]
j_index = j * start_n_top + i
end_log_prob = result.end_top_log_probs[j_index]
end_index = result.end_top_index[j_index]
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_log_prob=start_log_prob,
end_log_prob=end_log_prob))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_log_prob + x.end_log_prob),
reverse=True)
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
# XLNet un-tokenizer
# Let's keep it simple for now and see if we need all this later.
#
# tok_start_to_orig_index = feature.tok_start_to_orig_index
# tok_end_to_orig_index = feature.tok_end_to_orig_index
# start_orig_pos = tok_start_to_orig_index[pred.start_index]
# end_orig_pos = tok_end_to_orig_index[pred.end_index]
# paragraph_text = example.paragraph_text
# final_text = paragraph_text[start_orig_pos: end_orig_pos + 1].strip()
# Previously used Bert untokenizer
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = tokenizer.convert_tokens_to_string(tok_tokens)
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, do_lower_case,
verbose_logging)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_log_prob=pred.start_log_prob,
end_log_prob=pred.end_log_prob))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(
_NbestPrediction(text="", start_log_prob=-1e6,
end_log_prob=-1e6))
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_log_prob + entry.end_log_prob)
if not best_non_null_entry:
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_log_prob"] = entry.start_log_prob
output["end_log_prob"] = entry.end_log_prob
nbest_json.append(output)
assert len(nbest_json) >= 1
assert best_non_null_entry is not None
score_diff = score_null
scores_diff_json[example.qas_id] = score_diff
# note(zhiliny): always predict best_non_null_entry
# and the evaluation script will search for the best threshold
all_predictions[example.qas_id] = best_non_null_entry.text
all_nbest_json[example.qas_id] = nbest_json
with open(output_prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
with open(output_nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
if version_2_with_negative:
with open(output_null_log_odds_file, "w") as writer:
writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
with open(orig_data_file, "r", encoding='utf-8') as reader:
orig_data = json.load(reader)["data"]
qid_to_has_ans = make_qid_to_has_ans(orig_data)
has_ans_qids = [k for k, v in qid_to_has_ans.items() if v]
no_ans_qids = [k for k, v in qid_to_has_ans.items() if not v]
exact_raw, f1_raw = get_raw_scores(orig_data, all_predictions)
out_eval = {}
find_all_best_thresh_v2(out_eval, all_predictions, exact_raw, f1_raw, scores_diff_json, qid_to_has_ans)
return out_eval
def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False):
"""Project the tokenized prediction back to the original text."""
# When we created the data, we kept track of the alignment between original
# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
# now `orig_text` contains the span of our original text corresponding to the
# span that we predicted.
#
# However, `orig_text` may contain extra characters that we don't want in
# our prediction.
#
# For example, let's say:
# pred_text = steve smith
# orig_text = Steve Smith's
#
# We don't want to return `orig_text` because it contains the extra "'s".
#
# We don't want to return `pred_text` because it's already been normalized
# (the SQuAD eval script also does punctuation stripping/lower casing but
# our tokenizer does additional normalization like stripping accent
# characters).
#
# What we really want to return is "Steve Smith".
#
# Therefore, we have to apply a semi-complicated alignment heuristic between
# `pred_text` and `orig_text` to get a character-to-character alignment. This
# can fail in certain cases in which case we just return `orig_text`.
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if c == " ":
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = "".join(ns_chars)
return (ns_text, ns_to_s_map)
# We first tokenize `orig_text`, strip whitespace from the result
# and `pred_text`, and check if they are the same length. If they are
# NOT the same length, the heuristic has failed. If they are the same
# length, we assume the characters are one-to-one aligned.
tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
tok_text = " ".join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
if verbose_logging:
logger.info(
"Unable to find text: '%s' in '%s'" % (pred_text, orig_text))
return orig_text
end_position = start_position + len(pred_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
if verbose_logging:
logger.info("Length not equal after stripping spaces: '%s' vs '%s'",
orig_ns_text, tok_ns_text)
return orig_text
# We then project the characters in `pred_text` back to `orig_text` using
# the character-to-character alignment.
tok_s_to_ns_map = {}
for (i, tok_index) in tok_ns_to_s_map.items():
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
if verbose_logging:
logger.info("Couldn't map start position")
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
if verbose_logging:
logger.info("Couldn't map end position")
return orig_text
output_text = orig_text[orig_start_position:(orig_end_position + 1)]
return output_text
def _get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_and_score[i][0])
return best_indexes
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
|
COCO-LM/huggingface/utils_for_squad.py/0
|
{
"file_path": "COCO-LM/huggingface/utils_for_squad.py",
"repo_id": "COCO-LM",
"token_count": 21501
}
| 205 |
datadir: /data/CMIP6/HAMMOZ
name: 10m_v_component_of_wind
cmip_name: vas
era_name: v10
run: r1i1p1f1
version: v20190627
res:
- 1.40625
# - 5.625
|
ClimaX/snakemake_configs/HAMMOZ/config_10m_v_component_of_wind.yml/0
|
{
"file_path": "ClimaX/snakemake_configs/HAMMOZ/config_10m_v_component_of_wind.yml",
"repo_id": "ClimaX",
"token_count": 76
}
| 206 |
year_strings = [
'185001010000-186001010000',
'186001010600-187001010000',
'187001010600-188001010000',
'188001010600-189001010000',
'189001010600-190001010000',
'190001010600-191001010000',
'191001010600-192001010000',
'192001010600-193001010000',
'193001010600-194001010000',
'194001020000-195001010000',
'195001010600-196001010000',
'196001010600-197001010000',
'197001010600-198001010000',
'198001010600-199001010000',
'199001010600-200001010000',
'200001010600-201001010000',
'201001010600-201501010000'
]
print(config)
rule download:
output:
"{dataset}/raw/{name}/{name}_{year_str}_raw.nc",
shell:
"wget {config[server_prefix]}/AS-RCEC/TaiESM1/historical/{config["
"run]}/6hrPlevPt/"
"{config[cmip_name]}/gn/v20201112/"
"{config[cmip_name]}_6hrPlevPt_TaiESM1_historical_{config[run]}_gn_{wildcards.year_str}.nc "
"-O {wildcards.dataset}/raw/{config[name]}/{config[name]}_{wildcards.year_str}_raw.nc"
# http://esgf.rcec.sinica.edu.tw/thredds/fileServer/my_cmip6_dataroot/CMIP/AS-RCEC/TaiESM1/historical/r1i1p1f1/6hrPlevPt/ta/gn/v20201112/ta_6hrPlevPt_TaiESM1_historical_r1i1p1f1_gn_185001010000-186001010000.nc
# https://esgf.ceda.ac.uk/thredds/fileServer/esg_cmip6/CMIP6/CMIP/AS-RCEC/TaiESM1/historical/r1i1p1f1/6hrPlevPt/ta/gn/v20201112/ta_6hrPlevPt_TaiESM1_historical_r1i1p1f1_gn_185001010000-186001010000.nc
rule regrid:
input:
"{dataset}/raw/{name}/{name}_{year_str}_raw.nc"
output:
"{dataset}/{res}deg/{name}/{name}_{year_str}_{res}deg.nc.tmp"
shell:
"python ../../src/data_preprocessing/regrid.py \
--input_fns {input} \
--output_dir {wildcards.dataset}/{wildcards.res}deg/{wildcards.name} \
--ddeg_out {wildcards.res} \
--cmip 1 \
--rename {config[cmip_name]} {config[era_name]} \
--file_ending nc.tmp"
rule delete:
input:
expand("{{dataset}}/{res}deg/{{name}}/{{name}}_{{year_str}}_{res}deg.nc.tmp",
res=config['res']),
output:
expand("{{dataset}}/{res}deg/{{name}}/{{name}}_{{year_str}}_{res}deg.nc",
res=config['res'])
priority: 100
run:
for i, o in zip(input, output):
shell("mv {i} {o}")
# shell("rm {wildcards.dataset}/raw/{wildcards.name}/{wildcards.name}_{wildcards.year_str}_raw.nc"),
rule all:
input:
expand("{datadir}/{res}deg/{name}/{name}_{year_str}_{res}deg.nc",
datadir=config['datadir'], res=config['res'], name=config['name'], year_str=year_strings)
|
ClimaX/snakemake_configs/TaiESM1/Snakefile/0
|
{
"file_path": "ClimaX/snakemake_configs/TaiESM1/Snakefile",
"repo_id": "ClimaX",
"token_count": 1426
}
| 207 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import os
from typing import Optional
import numpy as np
import torch
import torchdata.datapipes as dp
from pytorch_lightning import LightningDataModule
from torch.utils.data import DataLoader, IterableDataset
from torchvision.transforms import transforms
from climax.pretrain.datamodule import collate_fn
from climax.pretrain.dataset import (
Forecast,
IndividualForecastDataIter,
NpyReader,
ShuffleIterableDataset,
)
class GlobalForecastDataModule(LightningDataModule):
"""DataModule for global forecast data.
Args:
root_dir (str): Root directory for sharded data.
variables (list): List of input variables.
buffer_size (int): Buffer size for shuffling.
out_variables (list, optional): List of output variables.
predict_range (int, optional): Predict range.
hrs_each_step (int, optional): Hours each step.
batch_size (int, optional): Batch size.
num_workers (int, optional): Number of workers.
pin_memory (bool, optional): Whether to pin memory.
"""
def __init__(
self,
root_dir,
variables,
buffer_size,
out_variables=None,
predict_range: int = 6,
hrs_each_step: int = 1,
batch_size: int = 64,
num_workers: int = 0,
pin_memory: bool = False,
):
super().__init__()
if num_workers > 1:
raise NotImplementedError(
"num_workers > 1 is not supported yet. Performance will likely degrage too with larger num_workers."
)
# this line allows to access init params with 'self.hparams' attribute
self.save_hyperparameters(logger=False)
if isinstance(out_variables, str):
out_variables = [out_variables]
self.hparams.out_variables = out_variables
self.lister_train = list(dp.iter.FileLister(os.path.join(root_dir, "train")))
self.lister_val = list(dp.iter.FileLister(os.path.join(root_dir, "val")))
self.lister_test = list(dp.iter.FileLister(os.path.join(root_dir, "test")))
self.transforms = self.get_normalize()
self.output_transforms = self.get_normalize(out_variables)
self.val_clim = self.get_climatology("val", out_variables)
self.test_clim = self.get_climatology("test", out_variables)
self.data_train: Optional[IterableDataset] = None
self.data_val: Optional[IterableDataset] = None
self.data_test: Optional[IterableDataset] = None
def get_normalize(self, variables=None):
if variables is None:
variables = self.hparams.variables
normalize_mean = dict(np.load(os.path.join(self.hparams.root_dir, "normalize_mean.npz")))
mean = []
for var in variables:
if var != "total_precipitation":
mean.append(normalize_mean[var])
else:
mean.append(np.array([0.0]))
normalize_mean = np.concatenate(mean)
normalize_std = dict(np.load(os.path.join(self.hparams.root_dir, "normalize_std.npz")))
normalize_std = np.concatenate([normalize_std[var] for var in variables])
return transforms.Normalize(normalize_mean, normalize_std)
def get_lat_lon(self):
lat = np.load(os.path.join(self.hparams.root_dir, "lat.npy"))
lon = np.load(os.path.join(self.hparams.root_dir, "lon.npy"))
return lat, lon
def get_climatology(self, partition="val", variables=None):
path = os.path.join(self.hparams.root_dir, partition, "climatology.npz")
clim_dict = np.load(path)
if variables is None:
variables = self.hparams.variables
clim = np.concatenate([clim_dict[var] for var in variables])
clim = torch.from_numpy(clim)
return clim
def setup(self, stage: Optional[str] = None):
# load datasets only if they're not loaded already
if not self.data_train and not self.data_val and not self.data_test:
self.data_train = ShuffleIterableDataset(
IndividualForecastDataIter(
Forecast(
NpyReader(
file_list=self.lister_train,
start_idx=0,
end_idx=1,
variables=self.hparams.variables,
out_variables=self.hparams.out_variables,
shuffle=True,
multi_dataset_training=False,
),
max_predict_range=self.hparams.predict_range,
random_lead_time=False,
hrs_each_step=self.hparams.hrs_each_step,
),
transforms=self.transforms,
output_transforms=self.output_transforms,
),
buffer_size=self.hparams.buffer_size,
)
self.data_val = IndividualForecastDataIter(
Forecast(
NpyReader(
file_list=self.lister_val,
start_idx=0,
end_idx=1,
variables=self.hparams.variables,
out_variables=self.hparams.out_variables,
shuffle=False,
multi_dataset_training=False,
),
max_predict_range=self.hparams.predict_range,
random_lead_time=False,
hrs_each_step=self.hparams.hrs_each_step,
),
transforms=self.transforms,
output_transforms=self.output_transforms,
)
self.data_test = IndividualForecastDataIter(
Forecast(
NpyReader(
file_list=self.lister_test,
start_idx=0,
end_idx=1,
variables=self.hparams.variables,
out_variables=self.hparams.out_variables,
shuffle=False,
multi_dataset_training=False,
),
max_predict_range=self.hparams.predict_range,
random_lead_time=False,
hrs_each_step=self.hparams.hrs_each_step,
),
transforms=self.transforms,
output_transforms=self.output_transforms,
)
def train_dataloader(self):
return DataLoader(
self.data_train,
batch_size=self.hparams.batch_size,
drop_last=False,
num_workers=self.hparams.num_workers,
pin_memory=self.hparams.pin_memory,
collate_fn=collate_fn,
)
def val_dataloader(self):
return DataLoader(
self.data_val,
batch_size=self.hparams.batch_size,
shuffle=False,
drop_last=False,
num_workers=self.hparams.num_workers,
pin_memory=self.hparams.pin_memory,
collate_fn=collate_fn,
)
def test_dataloader(self):
return DataLoader(
self.data_test,
batch_size=self.hparams.batch_size,
shuffle=False,
drop_last=False,
num_workers=self.hparams.num_workers,
pin_memory=self.hparams.pin_memory,
collate_fn=collate_fn,
)
|
ClimaX/src/climax/global_forecast/datamodule.py/0
|
{
"file_path": "ClimaX/src/climax/global_forecast/datamodule.py",
"repo_id": "ClimaX",
"token_count": 3886
}
| 208 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import numpy as np
import torch
from scipy import stats
def mse(pred, y, vars, lat=None, mask=None):
"""Mean squared error
Args:
pred: [B, L, V*p*p]
y: [B, V, H, W]
vars: list of variable names
"""
loss = (pred - y) ** 2
loss_dict = {}
with torch.no_grad():
for i, var in enumerate(vars):
if mask is not None:
loss_dict[var] = (loss[:, i] * mask).sum() / mask.sum()
else:
loss_dict[var] = loss[:, i].mean()
if mask is not None:
loss_dict["loss"] = (loss.mean(dim=1) * mask).sum() / mask.sum()
else:
loss_dict["loss"] = loss.mean(dim=1).mean()
return loss_dict
def lat_weighted_mse(pred, y, vars, lat, mask=None):
"""Latitude weighted mean squared error
Allows to weight the loss by the cosine of the latitude to account for gridding differences at equator vs. poles.
Args:
y: [B, V, H, W]
pred: [B, V, H, W]
vars: list of variable names
lat: H
"""
error = (pred - y) ** 2 # [N, C, H, W]
# lattitude weights
w_lat = np.cos(np.deg2rad(lat))
w_lat = w_lat / w_lat.mean() # (H, )
w_lat = torch.from_numpy(w_lat).unsqueeze(0).unsqueeze(-1).to(dtype=error.dtype, device=error.device) # (1, H, 1)
loss_dict = {}
with torch.no_grad():
for i, var in enumerate(vars):
if mask is not None:
loss_dict[var] = (error[:, i] * w_lat * mask).sum() / mask.sum()
else:
loss_dict[var] = (error[:, i] * w_lat).mean()
if mask is not None:
loss_dict["loss"] = ((error * w_lat.unsqueeze(1)).mean(dim=1) * mask).sum() / mask.sum()
else:
loss_dict["loss"] = (error * w_lat.unsqueeze(1)).mean(dim=1).mean()
return loss_dict
def lat_weighted_mse_val(pred, y, transform, vars, lat, clim, log_postfix):
"""Latitude weighted mean squared error
Args:
y: [B, V, H, W]
pred: [B, V, H, W]
vars: list of variable names
lat: H
"""
error = (pred - y) ** 2 # [B, V, H, W]
# lattitude weights
w_lat = np.cos(np.deg2rad(lat))
w_lat = w_lat / w_lat.mean() # (H, )
w_lat = torch.from_numpy(w_lat).unsqueeze(0).unsqueeze(-1).to(dtype=error.dtype, device=error.device) # (1, H, 1)
loss_dict = {}
with torch.no_grad():
for i, var in enumerate(vars):
loss_dict[f"w_mse_{var}_{log_postfix}"] = (error[:, i] * w_lat).mean()
loss_dict["w_mse"] = np.mean([loss_dict[k].cpu() for k in loss_dict.keys()])
return loss_dict
def lat_weighted_rmse(pred, y, transform, vars, lat, clim, log_postfix):
"""Latitude weighted root mean squared error
Args:
y: [B, V, H, W]
pred: [B, V, H, W]
vars: list of variable names
lat: H
"""
pred = transform(pred)
y = transform(y)
error = (pred - y) ** 2 # [B, V, H, W]
# lattitude weights
w_lat = np.cos(np.deg2rad(lat))
w_lat = w_lat / w_lat.mean() # (H, )
w_lat = torch.from_numpy(w_lat).unsqueeze(0).unsqueeze(-1).to(dtype=error.dtype, device=error.device)
loss_dict = {}
with torch.no_grad():
for i, var in enumerate(vars):
loss_dict[f"w_rmse_{var}_{log_postfix}"] = torch.mean(
torch.sqrt(torch.mean(error[:, i] * w_lat, dim=(-2, -1)))
)
loss_dict["w_rmse"] = np.mean([loss_dict[k].cpu() for k in loss_dict.keys()])
return loss_dict
def lat_weighted_acc(pred, y, transform, vars, lat, clim, log_postfix):
"""
y: [B, V, H, W]
pred: [B V, H, W]
vars: list of variable names
lat: H
"""
pred = transform(pred)
y = transform(y)
# lattitude weights
w_lat = np.cos(np.deg2rad(lat))
w_lat = w_lat / w_lat.mean() # (H, )
w_lat = torch.from_numpy(w_lat).unsqueeze(0).unsqueeze(-1).to(dtype=pred.dtype, device=pred.device) # [1, H, 1]
# clim = torch.mean(y, dim=(0, 1), keepdim=True)
clim = clim.to(device=y.device).unsqueeze(0)
pred = pred - clim
y = y - clim
loss_dict = {}
with torch.no_grad():
for i, var in enumerate(vars):
pred_prime = pred[:, i] - torch.mean(pred[:, i])
y_prime = y[:, i] - torch.mean(y[:, i])
loss_dict[f"acc_{var}_{log_postfix}"] = torch.sum(w_lat * pred_prime * y_prime) / torch.sqrt(
torch.sum(w_lat * pred_prime**2) * torch.sum(w_lat * y_prime**2)
)
loss_dict["acc"] = np.mean([loss_dict[k].cpu() for k in loss_dict.keys()])
return loss_dict
def lat_weighted_nrmses(pred, y, transform, vars, lat, clim, log_postfix):
"""
y: [B, V, H, W]
pred: [B V, H, W]
vars: list of variable names
lat: H
"""
pred = transform(pred)
y = transform(y)
y_normalization = clim
# lattitude weights
w_lat = np.cos(np.deg2rad(lat))
w_lat = w_lat / w_lat.mean() # (H, )
w_lat = torch.from_numpy(w_lat).unsqueeze(-1).to(dtype=y.dtype, device=y.device) # (H, 1)
loss_dict = {}
with torch.no_grad():
for i, var in enumerate(vars):
pred_ = pred[:, i] # B, H, W
y_ = y[:, i] # B, H, W
error = (torch.mean(pred_, dim=0) - torch.mean(y_, dim=0)) ** 2 # H, W
error = torch.mean(error * w_lat)
loss_dict[f"w_nrmses_{var}"] = torch.sqrt(error) / y_normalization
return loss_dict
def lat_weighted_nrmseg(pred, y, transform, vars, lat, clim, log_postfix):
"""
y: [B, V, H, W]
pred: [B V, H, W]
vars: list of variable names
lat: H
"""
pred = transform(pred)
y = transform(y)
y_normalization = clim
# lattitude weights
w_lat = np.cos(np.deg2rad(lat))
w_lat = w_lat / w_lat.mean() # (H, )
w_lat = torch.from_numpy(w_lat).unsqueeze(0).unsqueeze(-1).to(dtype=y.dtype, device=y.device) # (1, H, 1)
loss_dict = {}
with torch.no_grad():
for i, var in enumerate(vars):
pred_ = pred[:, i] # B, H, W
pred_ = torch.mean(pred_ * w_lat, dim=(-2, -1)) # B
y_ = y[:, i] # B, H, W
y_ = torch.mean(y_ * w_lat, dim=(-2, -1)) # B
error = torch.mean((pred_ - y_) ** 2)
loss_dict[f"w_nrmseg_{var}"] = torch.sqrt(error) / y_normalization
return loss_dict
def lat_weighted_nrmse(pred, y, transform, vars, lat, clim, log_postfix):
"""
y: [B, V, H, W]
pred: [B V, H, W]
vars: list of variable names
lat: H
"""
nrmses = lat_weighted_nrmses(pred, y, transform, vars, lat, clim, log_postfix)
nrmseg = lat_weighted_nrmseg(pred, y, transform, vars, lat, clim, log_postfix)
loss_dict = {}
for var in vars:
loss_dict[f"w_nrmses_{var}"] = nrmses[f"w_nrmses_{var}"]
loss_dict[f"w_nrmseg_{var}"] = nrmseg[f"w_nrmseg_{var}"]
loss_dict[f"w_nrmse_{var}"] = nrmses[f"w_nrmses_{var}"] + 5 * nrmseg[f"w_nrmseg_{var}"]
return loss_dict
def remove_nans(pred: torch.Tensor, gt: torch.Tensor):
# pred and gt are two flattened arrays
pred_nan_ids = torch.isnan(pred) | torch.isinf(pred)
pred = pred[~pred_nan_ids]
gt = gt[~pred_nan_ids]
gt_nan_ids = torch.isnan(gt) | torch.isinf(gt)
pred = pred[~gt_nan_ids]
gt = gt[~gt_nan_ids]
return pred, gt
def pearson(pred, y, transform, vars, lat, log_steps, log_days, clim):
"""
y: [N, T, 3, H, W]
pred: [N, T, 3, H, W]
vars: list of variable names
lat: H
"""
pred = transform(pred)
y = transform(y)
loss_dict = {}
with torch.no_grad():
for i, var in enumerate(vars):
for day, step in zip(log_days, log_steps):
pred_, y_ = pred[:, step - 1, i].flatten(), y[:, step - 1, i].flatten()
pred_, y_ = remove_nans(pred_, y_)
loss_dict[f"pearsonr_{var}_day_{day}"] = stats.pearsonr(pred_.cpu().numpy(), y_.cpu().numpy())[0]
loss_dict["pearsonr"] = np.mean([loss_dict[k] for k in loss_dict.keys()])
return loss_dict
def lat_weighted_mean_bias(pred, y, transform, vars, lat, log_steps, log_days, clim):
"""
y: [N, T, 3, H, W]
pred: [N, T, 3, H, W]
vars: list of variable names
lat: H
"""
pred = transform(pred)
y = transform(y)
# lattitude weights
w_lat = np.cos(np.deg2rad(lat))
w_lat = w_lat / w_lat.mean() # (H, )
w_lat = torch.from_numpy(w_lat).unsqueeze(0).unsqueeze(-1).to(dtype=pred.dtype, device=pred.device) # [1, H, 1]
loss_dict = {}
with torch.no_grad():
for i, var in enumerate(vars):
for day, step in zip(log_days, log_steps):
pred_, y_ = pred[:, step - 1, i].flatten(), y[:, step - 1, i].flatten()
pred_, y_ = remove_nans(pred_, y_)
loss_dict[f"mean_bias_{var}_day_{day}"] = pred_.mean() - y_.mean()
# pred_mean = torch.mean(w_lat * pred[:, step - 1, i])
# y_mean = torch.mean(w_lat * y[:, step - 1, i])
# loss_dict[f"mean_bias_{var}_day_{day}"] = y_mean - pred_mean
loss_dict["mean_bias"] = np.mean([loss_dict[k].cpu() for k in loss_dict.keys()])
return loss_dict
|
ClimaX/src/climax/utils/metrics.py/0
|
{
"file_path": "ClimaX/src/climax/utils/metrics.py",
"repo_id": "ClimaX",
"token_count": 4576
}
| 209 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import torch
import importlib
def find_model_using_name(model_name):
# Given the option --model [modelname],
# the file "models/modelname_model.py"
# will be imported.
model_filename = "models." + model_name + "_model"
modellib = importlib.import_module(model_filename)
# In the file, the class called ModelNameModel() will
# be instantiated. It has to be a subclass of torch.nn.Module,
# and it is case-insensitive.
model = None
target_model_name = model_name.replace('_', '') + 'model'
for name, cls in modellib.__dict__.items():
if name.lower() == target_model_name.lower() \
and issubclass(cls, torch.nn.Module):
model = cls
if model is None:
print("In %s.py, there should be a subclass of torch.nn.Module with class name that matches %s in lowercase." % (model_filename, target_model_name))
exit(0)
return model
def get_option_setter(model_name):
model_class = find_model_using_name(model_name)
return model_class.modify_commandline_options
def create_model(opt):
model = find_model_using_name(opt.model)
instance = model(opt)
print("model [%s] was created" % (type(instance).__name__))
return instance
|
CoCosNet-v2/models/__init__.py/0
|
{
"file_path": "CoCosNet-v2/models/__init__.py",
"repo_id": "CoCosNet-v2",
"token_count": 479
}
| 210 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
from .base_options import BaseOptions
class TestOptions(BaseOptions):
def initialize(self, parser):
BaseOptions.initialize(self, parser)
parser.add_argument('--results_dir', type=str, default='./results/', help='saves results here.')
parser.add_argument('--which_epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
parser.add_argument('--how_many', type=int, default=float("inf"), help='how many test images to run')
parser.add_argument('--save_per_img', action='store_true', help='if specified, save per image')
parser.add_argument('--show_corr', action='store_true', help='if specified, save bilinear upsample correspondence')
parser.set_defaults(preprocess_mode='scale_width_and_crop', crop_size=256, load_size=256, display_winsize=256)
parser.set_defaults(serial_batches=True)
parser.set_defaults(no_flip=True)
parser.set_defaults(phase='test')
self.isTrain = False
return parser
|
CoCosNet-v2/options/test_options.py/0
|
{
"file_path": "CoCosNet-v2/options/test_options.py",
"repo_id": "CoCosNet-v2",
"token_count": 395
}
| 211 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import os
import cv2
import torch
import numpy as np
import math
import random
from PIL import Image
from skimage import feature
from data.pix2pix_dataset import Pix2pixDataset
from data.base_dataset import get_params, get_transform
class DeepFashionDataset(Pix2pixDataset):
@staticmethod
def modify_commandline_options(parser, is_train):
parser = Pix2pixDataset.modify_commandline_options(parser, is_train)
parser.set_defaults(preprocess_mode='resize_and_crop')
parser.set_defaults(no_pairing_check=True)
if is_train:
parser.set_defaults(load_size=286)
else:
parser.set_defaults(load_size=256)
parser.set_defaults(crop_size=256)
parser.set_defaults(display_winsize=256)
parser.set_defaults(label_nc=20)
parser.set_defaults(contain_dontcare_label=False)
parser.set_defaults(cache_filelist_read=False)
parser.set_defaults(cache_filelist_write=False)
return parser
def get_paths(self, opt):
#root = os.path.dirname(opt.dataroot) if opt.hdfs else opt.dataroot
root = opt.dataroot
if opt.phase == 'train':
fd = open(os.path.join(root, 'train.txt'))
lines = fd.readlines()
fd.close()
elif opt.phase == 'test':
fd = open(os.path.join(root, 'val.txt'))
lines = fd.readlines()
fd.close()
image_paths = []
label_paths = []
for i in range(len(lines)):
image_paths.append(os.path.join(opt.dataroot, lines[i].strip().replace('\\', '/')))
label_path = lines[i].strip().replace('img', 'pose').replace('.jpg', '_{}.txt').replace('\\', '/')
label_paths.append(os.path.join(opt.dataroot, label_path))
return label_paths, image_paths
def get_ref_video_like(self, opt):
pair_path = './data/deepfashion_self_pair.txt'
with open(pair_path) as fd:
self_pair = fd.readlines()
self_pair = [it.strip() for it in self_pair]
key_name = {}
for it in self_pair:
items = it.split(',')
key_name[items[0]] = items[1:]
ref_name = './data/deepfashion_ref_test.txt' if opt.phase == 'test' else './data/deepfashion_ref.txt'
with open(ref_name) as fd:
ref = fd.readlines()
ref = [it.strip() for it in ref]
ref_dict = {}
#split = 'DeepFashion.zip@/' if opt.hdfs else 'DeepFashion/'
split = 'DeepFashion/'
for i in range(len(ref)):
items = ref[i].strip().split(',')
if items[0] in key_name.keys():
#ref_dict[items[0].replace('\\', '/')] = [random.choice(key_name[items[0]]).replace('\\', '/'), random.choice(self.image_paths).split(split)[-1]]
ref_dict[items[0].replace('\\', '/')] = [it.replace('\\', '/') for it in key_name[items[0]]] + [it.split(split)[-1] for it in random.sample(self.image_paths, min(len(self.image_paths), 20))]
else:
ref_dict[items[0].replace('\\', '/')] = [items[-1].replace('\\', '/')] + [it.split(split)[-1] for it in random.sample(self.image_paths, min(len(self.image_paths), 20))]
train_test_folder = ('', '')
return ref_dict, train_test_folder
def get_ref_vgg(self, opt):
extra = ''
if opt.phase == 'test':
extra = '_test'
with open('./data/deepfashion_ref{}.txt'.format(extra)) as fd:
lines = fd.readlines()
ref_dict = {}
for i in range(len(lines)):
items = lines[i].strip().split(',')
key = items[0].replace('\\', '/')
if opt.phase == 'test':
val = [it.replace('\\', '/') for it in items[1:]]
else:
val = [items[-1].replace('\\', '/'), random.choice(self.image_paths).split('DeepFashion/')[-1].replace('\\', '/')]
ref_dict[key] = val
train_test_folder = ('', '')
return ref_dict, train_test_folder
def get_ref(self, opt):
if opt.video_like:
return self.get_ref_video_like(opt)
else:
return self.get_ref_vgg(opt)
def get_edges(self, edge, t):
edge[:,1:] = edge[:,1:] | (t[:,1:] != t[:,:-1])
edge[:,:-1] = edge[:,:-1] | (t[:,1:] != t[:,:-1])
edge[1:,:] = edge[1:,:] | (t[1:,:] != t[:-1,:])
edge[:-1,:] = edge[:-1,:] | (t[1:,:] != t[:-1,:])
return edge
def get_label_tensor(self, path):
candidate = np.loadtxt(path.format('candidate'))
subset = np.loadtxt(path.format('subset'))
stickwidth = 4
limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10], \
[10, 11], [2, 12], [12, 13], [13, 14], [2, 1], [1, 15], [15, 17], \
[1, 16], [16, 18], [3, 17], [6, 18]]
colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 255, 0], [170, 255, 0], [85, 255, 0], [0, 255, 0], \
[0, 255, 85], [0, 255, 170], [0, 255, 255], [0, 170, 255], [0, 85, 255], [0, 0, 255], [85, 0, 255], \
[170, 0, 255], [255, 0, 255], [255, 0, 170], [255, 0, 85]]
img_path = self.labelpath_to_imgpath(path)
img = cv2.imread(img_path)
canvas = np.zeros_like(img)
for i in range(18):
index = int(subset[i])
if index == -1:
continue
x, y = candidate[index][0:2]
cv2.circle(canvas, (int(x), int(y)), 4, colors[i], thickness=-1)
joints = []
for i in range(17):
index = subset[np.array(limbSeq[i]) - 1]
cur_canvas = canvas.copy()
if -1 in index:
joints.append(np.zeros_like(cur_canvas[:, :, 0]))
continue
Y = candidate[index.astype(int), 0]
X = candidate[index.astype(int), 1]
mX = np.mean(X)
mY = np.mean(Y)
length = ((X[0] - X[1]) ** 2 + (Y[0] - Y[1]) ** 2) ** 0.5
angle = math.degrees(math.atan2(X[0] - X[1], Y[0] - Y[1]))
polygon = cv2.ellipse2Poly((int(mY), int(mX)), (int(length / 2), stickwidth), int(angle), 0, 360, 1)
cv2.fillConvexPoly(cur_canvas, polygon, colors[i])
canvas = cv2.addWeighted(canvas, 0.4, cur_canvas, 0.6, 0)
joint = np.zeros_like(cur_canvas[:, :, 0])
cv2.fillConvexPoly(joint, polygon, 255)
joint = cv2.addWeighted(joint, 0.4, joint, 0.6, 0)
joints.append(joint)
pose = Image.fromarray(cv2.cvtColor(canvas, cv2.COLOR_BGR2RGB)).resize((self.opt.load_size, self.opt.load_size), resample=Image.NEAREST)
params = get_params(self.opt, pose.size)
transform_label = get_transform(self.opt, params, method=Image.NEAREST, normalize=False)
transform_img = get_transform(self.opt, params, method=Image.BILINEAR, normalize=False)
tensors_dist = 0
e = 1
for i in range(len(joints)):
im_dist = cv2.distanceTransform(255-joints[i], cv2.DIST_L1, 3)
im_dist = np.clip((im_dist / 3), 0, 255).astype(np.uint8)
tensor_dist = transform_img(Image.fromarray(im_dist))
tensors_dist = tensor_dist if e == 1 else torch.cat([tensors_dist, tensor_dist])
e += 1
tensor_pose = transform_label(pose)
label_tensor = torch.cat((tensor_pose, tensors_dist), dim=0)
return label_tensor, params
def imgpath_to_labelpath(self, path):
label_path = path.replace('\\', '/').replace('/img/', '/pose/').replace('.jpg', '_{}.txt')
return label_path
def labelpath_to_imgpath(self, path):
img_path = path.replace('\\', '/').replace('/pose/', '/img/').replace('_{}.txt', '.jpg')
return img_path
|
CoCosNet/data/deepfashion_dataset.py/0
|
{
"file_path": "CoCosNet/data/deepfashion_dataset.py",
"repo_id": "CoCosNet",
"token_count": 3952
}
| 212 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.networks.architecture import VGG19
from models.networks.correspondence import VGG19_feature_color_torchversion
# Defines the GAN loss which uses either LSGAN or the regular GAN.
# When LSGAN is used, it is basically same as MSELoss,
# but it abstracts away the need to create the target label tensor
# that has the same size as the input
class GANLoss(nn.Module):
def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0,
tensor=torch.FloatTensor, opt=None):
super(GANLoss, self).__init__()
self.real_label = target_real_label
self.fake_label = target_fake_label
self.real_label_tensor = None
self.fake_label_tensor = None
self.zero_tensor = None
self.Tensor = tensor
self.gan_mode = gan_mode
self.opt = opt
if gan_mode == 'ls':
pass
elif gan_mode == 'original':
pass
elif gan_mode == 'w':
pass
elif gan_mode == 'hinge':
pass
else:
raise ValueError('Unexpected gan_mode {}'.format(gan_mode))
def get_target_tensor(self, input, target_is_real):
if target_is_real:
if self.real_label_tensor is None:
self.real_label_tensor = self.Tensor(1).fill_(self.real_label)
self.real_label_tensor.requires_grad_(False)
return self.real_label_tensor.expand_as(input)
else:
if self.fake_label_tensor is None:
self.fake_label_tensor = self.Tensor(1).fill_(self.fake_label)
self.fake_label_tensor.requires_grad_(False)
return self.fake_label_tensor.expand_as(input)
def get_zero_tensor(self, input):
if self.zero_tensor is None:
self.zero_tensor = self.Tensor(1).fill_(0)
self.zero_tensor.requires_grad_(False)
return self.zero_tensor.expand_as(input)
def loss(self, input, target_is_real, for_discriminator=True):
if self.gan_mode == 'original': # cross entropy loss
target_tensor = self.get_target_tensor(input, target_is_real)
loss = F.binary_cross_entropy_with_logits(input, target_tensor)
return loss
elif self.gan_mode == 'ls':
target_tensor = self.get_target_tensor(input, target_is_real)
return F.mse_loss(input, target_tensor)
elif self.gan_mode == 'hinge':
if for_discriminator:
if target_is_real:
minval = torch.min(input - 1, self.get_zero_tensor(input))
loss = -torch.mean(minval)
else:
minval = torch.min(-input - 1, self.get_zero_tensor(input))
loss = -torch.mean(minval)
else:
assert target_is_real, "The generator's hinge loss must be aiming for real"
loss = -torch.mean(input)
return loss
else:
# wgan
if target_is_real:
return -input.mean()
else:
return input.mean()
def __call__(self, input, target_is_real, for_discriminator=True):
# computing loss is a bit complicated because |input| may not be
# a tensor, but list of tensors in case of multiscale discriminator
if isinstance(input, list):
loss = 0
for pred_i in input:
if isinstance(pred_i, list):
pred_i = pred_i[-1]
loss_tensor = self.loss(pred_i, target_is_real, for_discriminator)
bs = 1 if len(loss_tensor.size()) == 0 else loss_tensor.size(0)
new_loss = torch.mean(loss_tensor.view(bs, -1), dim=1)
loss += new_loss
return loss / len(input)
else:
return self.loss(input, target_is_real, for_discriminator)
# Perceptual loss that uses a pretrained VGG network
class VGGLoss(nn.Module):
def __init__(self, gpu_ids, vgg_normal_correct=False):
super(VGGLoss, self).__init__()
self.vgg_normal_correct = vgg_normal_correct
if vgg_normal_correct:
self.vgg = VGG19_feature_color_torchversion(vgg_normal_correct=True).cuda()
else:
self.vgg = VGG19().cuda()
self.criterion = nn.L1Loss()
self.weights = [1.0 / 32, 1.0 / 16, 1.0 / 8, 1.0 / 4, 1.0]
def forward(self, x, y):
if self.vgg_normal_correct:
x_vgg, y_vgg = self.vgg(x, ['r11', 'r21', 'r31', 'r41', 'r51'], preprocess=True), self.vgg(y, ['r11', 'r21', 'r31', 'r41', 'r51'], preprocess=True)
else:
x_vgg, y_vgg = self.vgg(x), self.vgg(y)
loss = 0
for i in range(len(x_vgg)):
loss += self.weights[i] * self.criterion(x_vgg[i], y_vgg[i].detach())
return loss
# KL Divergence loss used in VAE with an image encoder
class KLDLoss(nn.Module):
def forward(self, mu, logvar):
return -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
|
CoCosNet/models/networks/loss.py/0
|
{
"file_path": "CoCosNet/models/networks/loss.py",
"repo_id": "CoCosNet",
"token_count": 2528
}
| 213 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import os
import copy
import sys
import torch
from models.networks.sync_batchnorm import DataParallelWithCallback
from models.pix2pix_model import Pix2PixModel
from models.networks.generator import EMA
import util.util as util
class Pix2PixTrainer():
"""
Trainer creates the model and optimizers, and uses them to
updates the weights of the network while reporting losses
and the latest visuals to visualize the progress in training.
"""
def __init__(self, opt, resume_epoch=0):
self.opt = opt
self.pix2pix_model = Pix2PixModel(opt)
if len(opt.gpu_ids) > 1:
self.pix2pix_model = DataParallelWithCallback(self.pix2pix_model,
device_ids=opt.gpu_ids)
self.pix2pix_model_on_one_gpu = self.pix2pix_model.module
else:
self.pix2pix_model.to(opt.gpu_ids[0])
self.pix2pix_model_on_one_gpu = self.pix2pix_model
if opt.use_ema:
self.netG_ema = EMA(opt.ema_beta)
for name, param in self.pix2pix_model_on_one_gpu.net['netG'].named_parameters():
if param.requires_grad:
self.netG_ema.register(name, param.data)
self.netCorr_ema = EMA(opt.ema_beta)
for name, param in self.pix2pix_model_on_one_gpu.net['netCorr'].named_parameters():
if param.requires_grad:
self.netCorr_ema.register(name, param.data)
self.generated = None
if opt.isTrain:
self.optimizer_G, self.optimizer_D = \
self.pix2pix_model_on_one_gpu.create_optimizers(opt)
self.old_lr = opt.lr
if opt.continue_train and opt.which_epoch == 'latest':
checkpoint = torch.load(os.path.join(opt.checkpoints_dir, opt.name, 'optimizer.pth'))
self.optimizer_G.load_state_dict(checkpoint['G'])
self.optimizer_D.load_state_dict(checkpoint['D'])
self.last_data, self.last_netCorr, self.last_netG, self.last_optimizer_G = None, None, None, None
def run_generator_one_step(self, data, alpha=1):
self.optimizer_G.zero_grad()
g_losses, out = self.pix2pix_model(data, mode='generator', alpha=alpha)
g_loss = sum(g_losses.values()).mean()
g_loss.backward()
self.optimizer_G.step()
self.g_losses = g_losses
self.out = out
if self.opt.use_ema:
self.netG_ema(self.pix2pix_model_on_one_gpu.net['netG'])
self.netCorr_ema(self.pix2pix_model_on_one_gpu.net['netCorr'])
def run_discriminator_one_step(self, data):
self.optimizer_D.zero_grad()
GforD = {}
GforD['fake_image'] = self.out['fake_image']
GforD['adaptive_feature_seg'] = self.out['adaptive_feature_seg']
GforD['adaptive_feature_img'] = self.out['adaptive_feature_img']
d_losses = self.pix2pix_model(data, mode='discriminator', GforD=GforD)
d_loss = sum(d_losses.values()).mean()
d_loss.backward()
self.optimizer_D.step()
self.d_losses = d_losses
def get_latest_losses(self):
return {**self.g_losses, **self.d_losses}
def get_latest_generated(self):
return self.out['fake_image']
def update_learning_rate(self, epoch):
self.update_learning_rate(epoch)
def save(self, epoch):
self.pix2pix_model_on_one_gpu.save(epoch)
if self.opt.use_ema:
self.netG_ema.assign(self.pix2pix_model_on_one_gpu.net['netG'])
util.save_network(self.pix2pix_model_on_one_gpu.net['netG'], 'G_ema', epoch, self.opt)
self.netG_ema.resume(self.pix2pix_model_on_one_gpu.net['netG'])
self.netCorr_ema.assign(self.pix2pix_model_on_one_gpu.net['netCorr'])
util.save_network(self.pix2pix_model_on_one_gpu.net['netCorr'], 'netCorr_ema', epoch, self.opt)
self.netCorr_ema.resume(self.pix2pix_model_on_one_gpu.net['netCorr'])
if epoch == 'latest':
torch.save({'G': self.optimizer_G.state_dict(),
'D': self.optimizer_D.state_dict(),
'lr': self.old_lr,
}, os.path.join(self.opt.checkpoints_dir, self.opt.name, 'optimizer.pth'))
##################################################################
# Helper functions
##################################################################
def update_learning_rate(self, epoch):
if epoch > self.opt.niter:
lrd = self.opt.lr / self.opt.niter_decay
new_lr = self.old_lr - lrd
else:
new_lr = self.old_lr
if new_lr != self.old_lr:
if self.opt.no_TTUR:
new_lr_G = new_lr
new_lr_D = new_lr
else:
new_lr_G = new_lr / 2
new_lr_D = new_lr * 2
for param_group in self.optimizer_D.param_groups:
param_group['lr'] = new_lr_D
for param_group in self.optimizer_G.param_groups:
param_group['lr'] = new_lr_G
print('update learning rate: %f -> %f' % (self.old_lr, new_lr))
self.old_lr = new_lr
def update_fixed_params(self):
for param in self.pix2pix_model_on_one_gpu.net['netCorr'].parameters():
param.requires_grad = True
G_params = [{'params': self.pix2pix_model_on_one_gpu.net['netG'].parameters(), 'lr': self.opt.lr*0.5}]
G_params += [{'params': self.pix2pix_model_on_one_gpu.net['netCorr'].parameters(), 'lr': self.opt.lr*0.5}]
if self.opt.no_TTUR:
beta1, beta2 = self.opt.beta1, self.opt.beta2
G_lr = self.opt.lr
else:
beta1, beta2 = 0, 0.9
G_lr = self.opt.lr / 2
self.optimizer_G = torch.optim.Adam(G_params, lr=G_lr, betas=(beta1, beta2), eps=1e-3)
|
CoCosNet/trainers/pix2pix_trainer.py/0
|
{
"file_path": "CoCosNet/trainers/pix2pix_trainer.py",
"repo_id": "CoCosNet",
"token_count": 2995
}
| 214 |
import random
import torch
from torch.utils.data import Dataset
import os
import pickle
import logging
import json
from tqdm import tqdm
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def _truncate_seq_pair_two_length(tokens_a, tokens_b, max_length_a, max_length_b):
"""Truncates a sequence pair in place to the maximum length."""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length_a + max_length_b:
break
if len(tokens_b) > max_length_b:
tokens_b.pop()
else: # len(tokens_a) > max_length_a
tokens_a.pop()
class InputFeatures(object):
"""A single training/test features for a example."""
def __init__(self,
code_tokens,
trace_tokens
):
self.code_tokens = code_tokens
self.trace_tokens = trace_tokens
def convert_examples_to_features(item):
# parsing
js,tokenizer=item
code_tokens = js["code_tokens"]
trace_tokens = js["trace_tokens"]
code_tokens = tokenizer.tokenize(" ".join(code_tokens))
trace_tokens = tokenizer.tokenize(" ".join(trace_tokens))
return InputFeatures(code_tokens,trace_tokens)
class TextDataset(Dataset):
def __init__(self, tokenizer, args, filename, local_rank, world_size, logger, mode, prefix=""):
self.args = args
self.tokenizer = tokenizer
if len(prefix) > 0:
cached_features_file = os.path.join('{}'.format(args.data_cache_dir), prefix + "_word_size_"+str(world_size)+"_rank_"+str(local_rank)+'_size_'+ str(args.block_size)+'_'+mode+'.pkl')
else:
cached_features_file = os.path.join('{}'.format(args.data_cache_dir), "word_size_"+str(world_size)+"_rank_"+str(local_rank)+'_size_'+ str(args.block_size)+'_'+mode+'.pkl')
if os.path.exists(cached_features_file):
logger.warning("Loading features from cached file %s", cached_features_file)
with open(cached_features_file, 'rb') as handle1:
self.examples = pickle.load(handle1)
if 'train' in mode and local_rank==0:
for idx, example in enumerate(self.examples[:1]):
logger.warning("*** Example ***")
logger.warning("idx: %s",idx)
logger.warning("code_tokens: {}".format(' '.join(map(str, example.code_tokens))))
logger.warning("trace_tokens: {}".format(' '.join(map(str, example.trace_tokens))))
else:
self.examples = []
total_num = 0
error_num = 0
logger.info("Load and create features from dataset file at %s", filename)
num_lines = sum(1 for line in open(filename,'r'))
with open(filename,"r",encoding="utf8") as f:
for i,line in enumerate(tqdm(f,total=num_lines)):
json_line = json.loads(line)
if len(json_line['code_tokens']) != 0:
total_num += 1
if (mode == "train" and total_num % world_size == local_rank) or (mode != "train" and local_rank in [-1, 0]):
js = {}
if len(prefix) > 0:
js["code_tokens"] = ["<"+prefix+">"]
js["code_tokens"].extend(json_line["code_tokens"])
else:
js["code_tokens"] = json_line["code_tokens"]
js["trace_tokens"] = json_line["trace_tokens"]
try:
features = convert_examples_to_features((js, tokenizer))
cur_index = len(self.examples)
self.examples.append(features)
except:
error_num += 1
if mode == "train" and local_rank==0:
for idx, example in enumerate(self.examples[:1]):
logger.warning("*** Example ***")
logger.warning("idx: %s",idx)
logger.warning("code_tokens: {}".format(example.code_tokens))
logger.warning("trace_tokens: {}".format(example.trace_tokens))
logger.warning("Num examples = %d: %d", local_rank,len(self.examples))
logger.warning(f"Error num = {error_num}")
# debug
logger.warning("Saving features into cached file %s", cached_features_file)
if not os.path.exists(args.data_cache_dir):
os.makedirs(args.data_cache_dir)
with open(cached_features_file, 'wb') as handle1:
pickle.dump(self.examples, handle1, protocol=pickle.HIGHEST_PROTOCOL)
def __len__(self):
return len(self.examples)
def __getitem__(self, item):
js = self.examples[item]
# Encoder-Decoder for Trace Generation
source_tokens = js.code_tokens[0:self.args.max_source_length-5]
source_tokens = ["<s>","<encoder-decoder>","</s>"] + source_tokens + ["<mask0>"] + ["</s>"]
source_ids = self.tokenizer.convert_tokens_to_ids(source_tokens)
padding_length = self.args.max_source_length - len(source_ids)
source_ids += [self.tokenizer.pad_token_id]*padding_length
target_tokens = self.tokenizer.tokenize("None") # generate
target_tokens = ["<mask0>"] + target_tokens + [self.tokenizer.sep_token]
target_ids = self.tokenizer.convert_tokens_to_ids(target_tokens)
padding_length = self.args.max_target_length - len(target_ids)
target_ids += [self.tokenizer.pad_token_id] * padding_length
gold_tokens = js.trace_tokens[:self.args.max_target_length-2]
gold_tokens = ["<mask0>"] + gold_tokens + [self.tokenizer.sep_token]
gold_ids = self.tokenizer.convert_tokens_to_ids(gold_tokens)
padding_length = self.args.max_target_length - len(gold_ids)
gold_ids += [self.tokenizer.pad_token_id] * padding_length
return (
torch.tensor(source_ids),
torch.tensor(target_ids),
torch.tensor(gold_ids),
)
|
CodeBERT/CodeExecutor/inference/dataset.py/0
|
{
"file_path": "CodeBERT/CodeExecutor/inference/dataset.py",
"repo_id": "CodeBERT",
"token_count": 3399
}
| 215 |
import os
import torch
import logging
import argparse
import random
import json
from tqdm import tqdm
import multiprocessing
import time
from itertools import cycle
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from torch.utils.data import ConcatDataset
from torch.utils.data.distributed import DistributedSampler
from transformers import AdamW, get_linear_schedule_with_warmup
from models import build_or_load_gen_model
from configs import add_args, set_seed, set_dist
from torch.nn.parallel import DistributedDataParallel as DDP
import torch.distributed as dist
from utils import RefineDataset, SimpleRefineDataset
from evaluator.smooth_bleu import bleu_fromstr
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
def get_loader(data_file, args, tokenizer, pool, eval=False):
def fn(features):
return features
global_rank = args.global_rank
if args.raw_input:
dataset = SimpleRefineDataset(tokenizer, pool, args, data_file)
else:
dataset = RefineDataset(tokenizer, pool, args, data_file)
data_len = len(dataset)
if global_rank == 0:
logger.info(f"Data length: {data_len}.")
if eval:
sampler = SequentialSampler(dataset)
else:
sampler = DistributedSampler(dataset)
dataloader = DataLoader(dataset, sampler=sampler, batch_size=args.train_batch_size, num_workers=args.cpu_count, collate_fn=fn)
return dataset, sampler, dataloader
def eval_bleu_epoch(args, eval_dataloader, model, tokenizer):
logger.info(f" ***** Running bleu evaluation on {args.eval_file} *****")
logger.info(" Batch size = %d", args.eval_batch_size)
model.eval()
if hasattr(model, "module"):
model = model.module
pred_ids, ex_ids = [], []
for step, examples in enumerate(eval_dataloader, 1):
source_ids = torch.tensor(
[ex.source_ids for ex in examples], dtype=torch.long
).to(args.local_rank)
source_mask = source_ids.ne(tokenizer.pad_id)
preds = model.generate(source_ids,
attention_mask=source_mask,
use_cache=True,
num_beams=args.beam_size,
early_stopping=True,
max_length=args.max_target_length)
top_preds = list(preds.cpu().numpy())
pred_ids.extend(top_preds)
pred_nls = [tokenizer.decode(id, skip_special_tokens=True, clean_up_tokenization_spaces=False) for id in pred_ids]
valid_file = args.dev_filename
golds = []
with open(valid_file, "r") as f:
for line in f:
golds.append(json.loads(line)["new"])
golds = golds[:len(pred_nls)]
for i in range(len(golds)):
pred_nls[i], golds[i] = RefineDataset.process_pred_gold(pred_nls[i], golds[i])
with open(os.path.join(args.model_name_or_path, "preds.txt"), "w", encoding="utf-8") as f:
for pred in pred_nls:
f.write(pred.strip() + "\n")
with open(os.path.join(args.model_name_or_path, "golds.txt"), "w", encoding="utf-8") as f:
for gold in golds:
f.write(gold.strip() + "\n")
em = 0
for pred, gold in zip(pred_nls, golds):
if " ".join(pred.split()) == " ".join(gold.split()):
em += 1
em = em / len(golds)
logger.warning(f"EM: {em}")
bleu = bleu_fromstr(pred_nls, golds, rmstop=False)
return bleu
def save_model(model, optimizer, scheduler, output_dir, config):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, "module") else model
config.save_pretrained(output_dir)
output_model_file = os.path.join(output_dir, "pytorch_model.bin")
torch.save(model_to_save.state_dict(), output_model_file)
output_optimizer_file = os.path.join(output_dir, "optimizer.pt")
torch.save(
optimizer.state_dict(),
output_optimizer_file,
_use_new_zipfile_serialization=False,
)
output_scheduler_file = os.path.join(output_dir, "scheduler.pt")
torch.save(
scheduler.state_dict(),
output_scheduler_file,
_use_new_zipfile_serialization=False,
)
def main(args):
dist.init_process_group(backend="nccl")
local_rank = dist.get_rank() % args.gpu_per_node
args.global_rank = local_rank + args.node_index * args.gpu_per_node
args.local_rank = local_rank
args.world_size = dist.get_world_size()
logger.warning("Process rank: %s, global rank: %s, world size: %s, bs: %s",
args.local_rank, args.global_rank, \
torch.distributed.get_world_size(), \
args.train_batch_size)
torch.cuda.set_device(local_rank)
set_seed(args)
config, model, tokenizer = build_or_load_gen_model(args)
model = DDP(model.cuda(), device_ids=[local_rank], output_device=local_rank, find_unused_parameters=True)
pool = multiprocessing.Pool(args.cpu_count)
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [
p
for n, p in model.named_parameters()
if not any(nd in n for nd in no_decay)
],
"weight_decay": args.weight_decay,
},
{
"params": [
p
for n, p in model.named_parameters()
if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
optimizer = AdamW(
optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon
)
args.warmup_steps = int(args.train_steps * 0.1)
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=args.warmup_steps,
num_training_steps=args.train_steps,
)
if os.path.exists("{}/checkpoints-last/optimizer.pt".format(args.output_dir)):
optimizer.load_state_dict(
torch.load(
"{}/checkpoints-last/optimizer.pt".format(args.output_dir),
map_location="cpu",
)
)
scheduler.load_state_dict(
torch.load(
"{}/checkpoints-last/scheduler.pt".format(args.output_dir),
map_location="cpu",
)
)
global_step = 0
save_steps = args.save_steps
train_file = args.train_filename
valid_file = args.dev_filename
data_tuple = get_loader(train_file, args, tokenizer, pool) # WARNING: this is a iterator, to save memory
_, _, train_dataloader = data_tuple
data_tuple = get_loader(valid_file, args, tokenizer, pool, eval=True)
_, _, valid_dataloader = data_tuple
# bleu = eval_bleu_epoch(args, valid_dataloader, model, tokenizer)
# logger.warning("Initial bleu: {}".format(bleu))
for epoch in range(1, args.train_epochs + 1):
# set seed for reproducible data split
save_seed = args.seed
args.seed += epoch
set_seed(args)
args.seed = save_seed
model.train()
nb_tr_examples, nb_tr_steps, tr_loss = 0, 0, 0
for step, examples in enumerate(train_dataloader, 1):
if step == 1:
ex = examples[0]
logger.info(f"batch size: {len(examples)}")
logger.info(f"example source: {tokenizer.convert_ids_to_tokens(ex.source_ids)}")
# logger.info(f"example label: {tokenizer.convert_ids_to_tokens(ex.source_labels)}")
logger.info(f"example target: {tokenizer.convert_ids_to_tokens(ex.target_ids)}")
source_ids = torch.tensor(
[ex.source_ids for ex in examples], dtype=torch.long
).to(local_rank)
target_ids = torch.tensor(
[ex.target_ids for ex in examples], dtype=torch.long
).to(local_rank)
source_mask = source_ids.ne(tokenizer.pad_id)
target_mask = target_ids.ne(tokenizer.pad_id)
loss = model(
input_ids=source_ids,
input_labels=None,
decoder_input_ids=target_ids,
attention_mask=source_mask,
decoder_attention_mask=target_mask,
encoder_loss=False
)
if args.gpu_per_node > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
tr_loss += loss.item()
nb_tr_examples += source_ids.size(0)
nb_tr_steps += 1
loss.backward()
if nb_tr_steps % args.gradient_accumulation_steps == 0:
# Update parameters
optimizer.step()
optimizer.zero_grad()
scheduler.step()
global_step += 1
if args.global_rank == 0 and global_step % args.log_steps == 0:
train_loss = round(
tr_loss * args.gradient_accumulation_steps / nb_tr_steps,
4,
)
logger.info(
"step {}/{}: Train loss {}".format(
global_step,
args.train_steps,
round(train_loss, 3),
)
)
if global_step == args.train_steps and args.global_rank == 0:
# end training
bleu = eval_bleu_epoch(args, valid_dataloader, model, tokenizer)
output_dir = os.path.join(args.output_dir, "checkpoints-last" + "-" + str(bleu))
save_model(model, optimizer, scheduler, output_dir, config)
logger.info(f"Reach max steps {args.train_steps}.")
time.sleep(5)
return
if args.global_rank == 0 and \
global_step % save_steps == 0 and \
nb_tr_steps % args.gradient_accumulation_steps == 0:
bleu = eval_bleu_epoch(args, valid_dataloader, model, tokenizer)
output_dir = os.path.join(args.output_dir, "checkpoints-" + str(global_step) + "-" + str(bleu))
save_model(model, optimizer, scheduler, output_dir, config)
logger.info(
"Save the {}-step model and optimizer into {}".format(
global_step, output_dir
)
)
time.sleep(5)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
args = add_args(parser)
args.cpu_count = multiprocessing.cpu_count()
# remove long tokenization warning. ref: https://github.com/huggingface/transformers/issues/991
logging.getLogger("transformers.tokenization_utils_base").setLevel(logging.ERROR)
logger.info(args)
main(args)
logger.info("Training finished.")
# torch.multiprocessing.spawn(main, args=(args,), nprocs=torch.cuda.device_count())
|
CodeBERT/CodeReviewer/code/run_finetune_ref.py/0
|
{
"file_path": "CodeBERT/CodeReviewer/code/run_finetune_ref.py",
"repo_id": "CodeBERT",
"token_count": 5447
}
| 216 |
# Clone Detection (POJ-104)
## Data Download
```bash
cd dataset
pip install gdown
gdown https://drive.google.com/uc?id=0B2i-vWnOu7MxVlJwQXN6eVNONUU
tar -xvf programs.tar.gz
python preprocess.py
cd ..
```
## Dependency
- pip install torch
- pip install transformers
## Fine-Tune
Here we provide fine-tune settings for code summarization, whose results are reported in the paper.
```shell
# Training
python run.py \
--output_dir saved_models \
--model_name_or_path microsoft/unixcoder-base \
--do_train \
--train_data_file dataset/train.jsonl \
--eval_data_file dataset/valid.jsonl \
--test_data_file dataset/test.jsonl \
--num_train_epochs 2 \
--block_size 400 \
--train_batch_size 8 \
--eval_batch_size 16 \
--learning_rate 2e-5 \
--max_grad_norm 1.0 \
--seed 123456
# Evaluating
python run.py \
--output_dir saved_models \
--model_name_or_path microsoft/unixcoder-base \
--do_eval \
--do_test \
--eval_data_file dataset/valid.jsonl \
--test_data_file dataset/test.jsonl \
--num_train_epochs 2 \
--block_size 400 \
--train_batch_size 8 \
--eval_batch_size 16 \
--learning_rate 2e-5 \
--max_grad_norm 1.0 \
--seed 123456
```
|
CodeBERT/UniXcoder/downstream-tasks/clone-detection/POJ-104/README.md/0
|
{
"file_path": "CodeBERT/UniXcoder/downstream-tasks/clone-detection/POJ-104/README.md",
"repo_id": "CodeBERT",
"token_count": 520
}
| 217 |
# Code Summarization
## Data Download
```bash
wget https://github.com/microsoft/CodeXGLUE/raw/main/Code-Text/code-to-text/dataset.zip
unzip dataset.zip
rm dataset.zip
cd dataset
wget https://zenodo.org/record/7857872/files/python.zip
wget https://zenodo.org/record/7857872/files/java.zip
wget https://zenodo.org/record/7857872/files/ruby.zip
wget https://zenodo.org/record/7857872/files/javascript.zip
wget https://zenodo.org/record/7857872/files/go.zip
wget https://zenodo.org/record/7857872/files/php.zip
unzip python.zip
unzip java.zip
unzip ruby.zip
unzip javascript.zip
unzip go.zip
unzip php.zip
rm *.zip
rm *.pkl
python preprocess.py
rm -r */final
cd ..
```
## Dependency
- pip install torch
- pip install transformers
## Fine-Tune Setting
Here we provide fine-tune settings for code summarization, whose results are reported in the paper.
```shell
lang=python
# Training
python run.py \
--do_train \
--do_eval \
--model_name_or_path microsoft/unixcoder-base \
--train_filename dataset/$lang/train.jsonl \
--dev_filename dataset/$lang/valid.jsonl \
--output_dir saved_models/$lang \
--max_source_length 256 \
--max_target_length 128 \
--beam_size 10 \
--train_batch_size 48 \
--eval_batch_size 48 \
--learning_rate 5e-5 \
--gradient_accumulation_steps 2 \
--num_train_epochs 10
# Evaluating
python run.py \
--do_test \
--model_name_or_path microsoft/unixcoder-base \
--test_filename dataset/$lang/test.jsonl \
--output_dir saved_models/$lang \
--max_source_length 256 \
--max_target_length 128 \
--beam_size 10 \
--train_batch_size 48 \
--eval_batch_size 48 \
--learning_rate 5e-5 \
--gradient_accumulation_steps 2 \
--num_train_epochs 10
```
|
CodeBERT/UniXcoder/downstream-tasks/code-summarization/README.md/0
|
{
"file_path": "CodeBERT/UniXcoder/downstream-tasks/code-summarization/README.md",
"repo_id": "CodeBERT",
"token_count": 637
}
| 218 |
https://allenai.org/data/strategyqa
|
CodeT/DIVERSE/data/sqa/README.md/0
|
{
"file_path": "CodeT/DIVERSE/data/sqa/README.md",
"repo_id": "CodeT",
"token_count": 13
}
| 219 |
.PHONY: clean deps install lint pep8 pyflakes pylint test
clean:
find . -name '*.pyc' -print0 | xargs -0 rm -f
find . -name '*.swp' -print0 | xargs -0 rm -f
find . -name '__pycache__' -print0 | xargs -0 rm -rf
-rm -rf build dist *.egg-info .eggs
deps:
pip install -r requirements.txt
install:
python setup.py install
lint: pep8 pyflakes pylint
pep8:
-pep8 --statistics --count cognitive_face setup.py sample
pyflakes:
-pyflakes cognitive_face setup.py sample
pylint:
-pylint --rcfile=.pylintrc cognitive_face setup.py sample/*
test:
python setup.py test
|
Cognitive-Face-Python/Makefile/0
|
{
"file_path": "Cognitive-Face-Python/Makefile",
"repo_id": "Cognitive-Face-Python",
"token_count": 230
}
| 220 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
File: test_face_list.py
Description: Unittests for Face List section of the Cognitive Face API.
"""
import uuid
import unittest
import cognitive_face as CF
from . import util
class TestFaceList(unittest.TestCase):
"""Unittests for Face List section."""
def test_face(self):
"""Unittests for `face_list.add_face` and `face_list.delete_face`."""
image = '{}PersonGroup/Family1-Dad/Family1-Dad3.jpg'.format(
util.BASE_URL_IMAGE)
res = CF.face_list.add_face(image, util.DataStore.face_list_id)
print(res)
self.assertIsInstance(res, dict)
util.wait()
persisted_face_id = res['persistedFaceId']
res = CF.face_list.delete_face(util.DataStore.face_list_id,
persisted_face_id)
print(res)
self.assertIsInstance(res, dict)
util.wait()
def test_face_list(self):
"""Unittests for `face_list.create`, `face_list.update` and
`face_list.delete`.
"""
face_list_id = str(uuid.uuid1())
res = CF.face_list.create(face_list_id)
print(res)
self.assertIsInstance(res, dict)
util.wait()
res = CF.face_list.update(face_list_id, 'test')
print(res)
self.assertIsInstance(res, dict)
util.wait()
res = CF.face_list.delete(face_list_id)
print(res)
self.assertIsInstance(res, dict)
util.wait()
def test_get(self):
"""Unittest for `face_list.get`."""
res = CF.face_list.get(util.DataStore.face_list_id)
print(res)
self.assertIsInstance(res, dict)
util.wait()
def test_lists(self):
"""Unittest for `face_list.lists`."""
res = CF.face_list.lists()
print(res)
self.assertIsInstance(res, list)
util.wait()
|
Cognitive-Face-Python/cognitive_face/tests/test_face_list.py/0
|
{
"file_path": "Cognitive-Face-Python/cognitive_face/tests/test_face_list.py",
"repo_id": "Cognitive-Face-Python",
"token_count": 903
}
| 221 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
File: view.py
Description: Base components for Python SDK sample.
"""
import time
import wx
import util
class MyPanel(wx.Panel):
"""Base Panel."""
def __init__(self, parent):
super(MyPanel, self).__init__(parent)
colour_window = wx.SystemSettings.GetColour(wx.SYS_COLOUR_WINDOW)
self.SetBackgroundColour(colour_window)
class MyStaticBitmap(MyPanel):
"""Base StaticBitmap."""
def __init__(self, parent, bitmap=wx.NullBitmap, size=util.MAX_IMAGE_SIZE):
super(MyStaticBitmap, self).__init__(parent)
self.bmp = bitmap
self.scale = 1.0
self.bitmap = wx.StaticBitmap(self, bitmap=bitmap)
self.size = size
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.AddStretchSpacer()
self.sizer.Add(self.bitmap, 0, wx.EXPAND)
self.sizer.AddStretchSpacer()
self.SetMinSize((size, size))
self.SetSizer(self.sizer)
self.sizer.Layout()
def set_path(self, path):
"""Set the image path."""
img = util.rotate_image(path)
width = img.GetWidth()
img = util.scale_image(img, size=self.size)
new_width = img.GetWidth()
self.scale = 1.0 * new_width / width
self.bmp = img.ConvertToBitmap()
self.bitmap.SetBitmap(self.bmp)
self.sizer.Layout()
class MyGridStaticBitmap(wx.Panel):
"""Base Grid StaticBitmap."""
def __init__(self,
parent,
rows=1,
cols=0,
vgap=0,
hgap=0,
size=util.MAX_THUMBNAIL_SIZE):
super(MyGridStaticBitmap, self).__init__(parent)
self.sizer = wx.GridSizer(rows, cols, vgap, hgap)
self.SetSizer(self.sizer)
self.size = size
def set_paths(self, paths):
"""Set the paths for the images."""
self.sizer.Clear(True)
for path in paths:
bitmap = MyStaticBitmap(self, size=self.size)
bitmap.set_path(path)
self.sizer.Add(bitmap)
self.SetSizerAndFit(self.sizer)
self.sizer.Layout()
def set_faces(self, faces):
"""Set the faces."""
self.sizer.Clear(True)
for face in faces:
bitmap = MyStaticBitmap(self, bitmap=face.bmp, size=self.size)
self.sizer.Add(bitmap)
self.SetSizerAndFit(self.sizer)
self.sizer.Layout()
class WrapCaptionFaceList(wx.WrapSizer):
"""Wrap face list with caption under the face."""
def __init__(self, parent, confidence_faces, size=util.MAX_THUMBNAIL_SIZE):
super(WrapCaptionFaceList, self).__init__()
for face, confidence in confidence_faces:
vsizer = wx.BoxSizer(wx.VERTICAL)
bitmap = MyStaticBitmap(parent, face.bmp, size=size)
vsizer.Add(bitmap)
static_text = wx.StaticText(parent, label='%.2f' % confidence)
vsizer.Add(static_text)
self.Add(vsizer, 0, wx.ALIGN_LEFT | wx.EXPAND)
vsizer.Layout()
if len(confidence_faces) == 0:
static_text = wx.StaticText(parent, label='no one')
self.Add(static_text, 0, wx.ALIGN_LEFT | wx.EXPAND)
self.Layout()
class FindSimilarsResult(wx.Panel):
"""The view for Find Similar result."""
def __init__(self, parent):
super(FindSimilarsResult, self).__init__(parent)
self.sizer = wx.BoxSizer(wx.VERTICAL)
def set_data(self, faces, res_tot, size=util.MAX_THUMBNAIL_SIZE):
"""Set the data."""
self.sizer.Clear(True)
static_text_title = wx.StaticText(
self,
label='Find {} Similar Candidate Faces Results:'.format(
len(faces)))
self.sizer.Add(static_text_title, 0, wx.EXPAND)
for mode in ('matchPerson', 'matchFace'):
static_text_caption = wx.StaticText(
self, label='{} Mode:'.format(mode))
self.sizer.Add(static_text_caption, 0, wx.EXPAND)
for face_id, face in faces.items():
static_line = wx.StaticLine(self)
self.sizer.Add(static_line, 0, wx.EXPAND)
bitmap = MyStaticBitmap(self, face.bmp, size=size)
self.sizer.Add(bitmap, 0, wx.ALIGN_LEFT)
static_text = wx.StaticText(
self, label='Similar Faces Ranked by Similarity')
self.sizer.Add(static_text, 0, wx.ALIGN_LEFT)
confidence_face_list = WrapCaptionFaceList(
self, res_tot[mode][face_id])
self.sizer.Add(confidence_face_list, 0, wx.EXPAND)
self.SetSizerAndFit(self.sizer)
class WrapFaceList(wx.Panel):
"""Base wrap face list."""
def __init__(self, parent, faces, size=util.MAX_THUMBNAIL_SIZE):
super(WrapFaceList, self).__init__(parent)
self.sizer = wx.WrapSizer()
self.sizer.SetMinSize((util.MAX_IMAGE_SIZE, -1))
for face in faces:
bitmap = MyStaticBitmap(self, face.bmp, size=size)
self.sizer.Add(bitmap, 0, wx.ALIGN_LEFT | wx.EXPAND)
self.SetSizer(self.sizer)
self.Layout()
class CaptionWrapFaceList(wx.Panel):
"""Wrap face list with a caption."""
def __init__(self, parent):
super(CaptionWrapFaceList, self).__init__(parent)
self.sizer = wx.BoxSizer(wx.VERTICAL)
def set_data(self, caption_faces_list, size=util.MAX_THUMBNAIL_SIZE):
"""Set the data."""
self.sizer.Clear(True)
for caption, faces in caption_faces_list.items():
static_text = wx.StaticText(self, label=caption)
self.sizer.Add(static_text, 0, wx.ALIGN_LEFT)
wrap_face_list = WrapFaceList(self, faces, size)
self.sizer.Add(wrap_face_list, 0, wx.EXPAND)
self.SetSizerAndFit(self.sizer)
class GroupResult(wx.Panel):
"""The view for Group result."""
def __init__(self, parent):
super(GroupResult, self).__init__(parent)
self.sizer = wx.BoxSizer(wx.VERTICAL)
def set_data(self, faces, res, size=util.MAX_THUMBNAIL_SIZE):
"""Set the data."""
self.sizer.Clear(True)
for group in res['groups']:
static_text = wx.StaticText(self, label='Group:')
self.sizer.Add(static_text, 0, wx.EXPAND)
group_faces = [faces[face_id] for face_id in group]
wrap_face_list = WrapFaceList(self, group_faces, size)
self.sizer.Add(wrap_face_list, 0, wx.EXPAND)
if res.get('messyGroup'):
static_text = wx.StaticText(self, label='Group (Messy):')
self.sizer.Add(static_text, 0, wx.EXPAND)
group_faces = [faces[face_id] for face_id in res['messyGroup']]
wrap_face_list = WrapFaceList(self, group_faces, size)
self.sizer.Add(wrap_face_list, 0, wx.EXPAND)
self.SetSizerAndFit(self.sizer)
self.sizer.Layout()
class MyLog(wx.TextCtrl):
"""The window for each scenario."""
def __init__(self, parent):
style = wx.TE_MULTILINE | wx.TE_READONLY
super(MyLog, self).__init__(parent, style=style)
colour_menu = wx.SystemSettings.GetColour(wx.SYS_COLOUR_MENU)
self.SetBackgroundColour(colour_menu)
def log(self, msg):
"""Add log."""
log_time = time.strftime("%H:%M:%S", time.localtime())
msg = '[{}]: {}\n'.format(log_time, msg)
self.WriteText(msg)
class MyFaceList(wx.VListBox):
"""Face List."""
def __init__(self, parent, faces=[], **kwargs):
super(MyFaceList, self).__init__(parent, **kwargs)
self.SetItems(faces)
def OnMeasureItem(self, index):
"""OnMeasureItem for Layout."""
face = self.faces[index]
bmp_height = face.bmp.GetHeight() + 4
label_height = self.GetTextExtent(face.attr.gender)[1] * 6
return max(bmp_height, label_height)
def OnDrawItem(self, dc, rect, index):
"""OnDrawItem for Layout."""
face = self.faces[index]
dc.DrawBitmap(face.bmp, rect.x + 2,
((rect.height - face.bmp.GetHeight()) / 2) + rect.y)
textx = rect.x + 2 + face.bmp.GetWidth() + 2
label_rect = wx.Rect(textx, rect.y, rect.width - textx, rect.height)
label = util.LABEL_FACE.format(
face.attr.gender, face.attr.age, face.attr.hair,
face.attr.facial_hair, face.attr.makeup, face.attr.emotion,
face.attr.occlusion, face.attr.exposure, face.attr.head_pose,
face.attr.accessories)
dc.DrawLabel(label, label_rect, wx.ALIGN_LEFT | wx.ALIGN_TOP)
def SetItems(self, faces):
"""Set the items for the list."""
self.faces = faces
self.SetItemCount(len(self.faces))
self.Refresh()
|
Cognitive-Face-Python/sample/view/base.py/0
|
{
"file_path": "Cognitive-Face-Python/sample/view/base.py",
"repo_id": "Cognitive-Face-Python",
"token_count": 4278
}
| 222 |
export CUDA_VISIBLE_DEVICES=0
python t5_run_eval.py \
--model_name_or_path ./checkpoint/Com/MainExp_finetune_set1_seed1/checkpoint-50000 \
--subtask Com \
--validation_file test \
--ebatch_size 16 \
--set set1
|
ContextualSP/abstraction_probing/code/t5_code/Com_MainExp_test.sh/0
|
{
"file_path": "ContextualSP/abstraction_probing/code/t5_code/Com_MainExp_test.sh",
"repo_id": "ContextualSP",
"token_count": 84
}
| 223 |
description: Adapter MT-NLU Job on AMLK8s
target:
service: amlk8s
# run "amlt target list amlk8s" to list the names of available AMLK8s targets
name: itpeusp100cl
vc: resrchvc
environment:
image: python:3.6
registry: docker.io # any public registry can be specified here
setup:
- pip install -r requirements.txt
code:
# local directory of the code. this will be uploaded to the server.
# $CONFIG_DIR is expanded to the directory of this config file
local_dir: $CONFIG_DIR
jobs:
- name: mtnlu-mnli
sku: G1
command:
- sudo apt-get install git -y
- sudo git clone https://github.com/WowCZ/adapter-transformers.git
- cd adapter-transformers
- pip install .
- cd ..
- sh scripts/adapter_train.sh -tr mnli -te mnli_matched,mnli_mismatched -ls 1000 -ss 5000 > LOGS/mtnlu_mnli.log
- name: mtnlu-cola
sku: G1
command:
- sudo apt-get install git -y
- sudo git clone https://github.com/WowCZ/adapter-transformers.git
- cd adapter-transformers
- pip install .
- cd ..
- sh scripts/adapter_train.sh -tr cola -te cola -ls 100 -ss 500 > LOGS/mtnlu_cola.log
- name: mtnlu-qnli
sku: G1
command:
- sudo apt-get install git -y
- sudo git clone https://github.com/WowCZ/adapter-transformers.git
- cd adapter-transformers
- pip install .
- cd ..
- sh scripts/adapter_train.sh -tr qnli -te qnli -ls 500 -ss 1000 > LOGS/mtnlu_qnli.log
- name: mtnlu-qqp
sku: G1
command:
- sudo apt-get install git -y
- sudo git clone https://github.com/WowCZ/adapter-transformers.git
- cd adapter-transformers
- pip install .
- cd ..
- sh scripts/adapter_train.sh -tr qqp -te qqp -ls 1000 -ss 5000 > LOGS/mtnlu_qqp.log
- name: mtnlu-rte
sku: G1
command:
- sudo apt-get install git -y
- sudo git clone https://github.com/WowCZ/adapter-transformers.git
- cd adapter-transformers
- pip install .
- cd ..
- sh scripts/adapter_train.sh -tr rte -te rte -ls 100 -ss 100 > LOGS/mtnlu_rte.log
- name: mtnlu-sst
sku: G1
command:
- sudo apt-get install git -y
- sudo git clone https://github.com/WowCZ/adapter-transformers.git
- cd adapter-transformers
- pip install .
- cd ..
- sh scripts/adapter_train.sh -tr sst -te sst -ls 1000 -ss 2000 > LOGS/mtnlu_sst.log
- name: mtnlu-stsb
sku: G1
command:
- sudo apt-get install git -y
- sudo git clone https://github.com/WowCZ/adapter-transformers.git
- cd adapter-transformers
- pip install .
- cd ..
- sh scripts/adapter_train.sh -tr stsb -te stsb -ls 100 -ss 200 > LOGS/mtnlu_stsb.log
- name: mtnlu-wnli
sku: G1
command:
- sudo apt-get install git -y
- sudo git clone https://github.com/WowCZ/adapter-transformers.git
- cd adapter-transformers
- pip install .
- cd ..
- sh scripts/adapter_train.sh -tr wnli -te wnli -ls 50 -ss 100 > LOGS/mtnlu_wnli.log
- name: mtnlu-unified
sku: G1
command:
- sudo apt-get install git -y
- sudo git clone https://github.com/WowCZ/adapter-transformers.git
- cd adapter-transformers
- pip install .
- cd ..
- sh scripts/adapter_train.sh -tr mnli,cola,qnli,qqp,rte,sst,stsb,wnli -te mnli_matched,mnli_mismatched,cola,qnli,qqp,rte,sst,stsb,wnli -ls 1000 -ss 2000 > LOGS/mtnlu_unified.log
|
ContextualSP/adaptershare/adapter_train.yaml/0
|
{
"file_path": "ContextualSP/adaptershare/adapter_train.yaml",
"repo_id": "ContextualSP",
"token_count": 1269
}
| 224 |
# coding=utf-8
# Copyright (c) Microsoft. All rights reserved.
from data_utils import DataFormat
def dump_rows(rows, out_path, data_format):
"""
output files should have following format
:param rows:
:param out_path:
:return:
"""
with open(out_path, "w", encoding="utf-8") as out_f:
row0 = rows[0]
# data_format = detect_format(row0)
for row in rows:
# assert data_format == detect_format(row), row
if data_format == DataFormat.PremiseOnly:
for col in ["uid", "label", "premise"]:
if "\t" in str(row[col]):
import pdb
pdb.set_trace()
out_f.write("%s\t%s\t%s\n" % (row["uid"], row["label"], row["premise"]))
elif data_format == DataFormat.PremiseAndOneHypothesis:
for col in ["uid", "label", "premise", "hypothesis"]:
if "\t" in str(row[col]):
import pdb
pdb.set_trace()
out_f.write(
"%s\t%s\t%s\t%s\n"
% (row["uid"], row["label"], row["premise"], row["hypothesis"])
)
elif data_format == DataFormat.PremiseAndMultiHypothesis:
for col in ["uid", "label", "premise"]:
if "\t" in str(row[col]):
import pdb
pdb.set_trace()
hypothesis = row["hypothesis"]
for one_hypo in hypothesis:
if "\t" in str(one_hypo):
import pdb
pdb.set_trace()
hypothesis = "\t".join(hypothesis)
out_f.write(
"%s\t%s\t%s\t%s\t%s\n"
% (
row["uid"],
row["ruid"],
row["label"],
row["premise"],
hypothesis,
)
)
elif data_format == DataFormat.Seqence:
for col in ["uid", "label", "premise"]:
if "\t" in str(row[col]):
import pdb
pdb.set_trace()
out_f.write("%s\t%s\t%s\n" % (row["uid"], row["label"], row["premise"]))
elif data_format == DataFormat.ClozeChoice:
for col in ["uid", "label", "premise"]:
if "\t" in str(row[col]):
import pdb
pdb.set_trace()
hypothesis = row["hypothesis"]
for one_hypo in hypothesis:
if "\t" in str(one_hypo):
import pdb
pdb.set_trace()
hypothesis = "\t".join(hypothesis)
out_f.write(
"%s\t%s\t%s\t%s\t%s\t%s\n"
% (
row["uid"],
row["choice"],
row["answer"],
row["label"],
row["premise"],
hypothesis,
)
)
else:
raise ValueError(data_format)
|
ContextualSP/adaptershare/experiments/common_utils.py/0
|
{
"file_path": "ContextualSP/adaptershare/experiments/common_utils.py",
"repo_id": "ContextualSP",
"token_count": 2070
}
| 225 |
# coding=utf-8
# Copyright (c) Microsoft. All rights reserved.
import torch
import math
from torch.nn.init import (
uniform,
normal,
eye,
xavier_uniform,
xavier_normal,
kaiming_uniform,
kaiming_normal,
orthogonal,
)
def linear(x):
return x
def swish(x):
return x * sigmoid(x)
def bertgelu(x):
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def gptgelu(x):
return (
0.5
* x
* (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
)
# default gelue
gelu = bertgelu
def activation(func_a):
"""Activation function wrapper"""
try:
f = eval("nn.{}".format(func_a))
except:
f = linear
return f
def init_wrapper(init="xavier_uniform"):
return eval(init)
|
ContextualSP/adaptershare/module/common.py/0
|
{
"file_path": "ContextualSP/adaptershare/module/common.py",
"repo_id": "ContextualSP",
"token_count": 377
}
| 226 |
# Copyright (c) Microsoft. All rights reserved.
from copy import deepcopy
import torch
import logging
import random
from torch.nn import Parameter
from functools import wraps
import torch.nn.functional as F
from data_utils.task_def import TaskType
from data_utils.task_def import EncoderModelType
from .loss import stable_kl
logger = logging.getLogger(__name__)
def generate_noise(embed, mask, epsilon=1e-5):
noise = embed.data.new(embed.size()).normal_(0, 1) * epsilon
noise.detach()
noise.requires_grad_()
return noise
class SmartPerturbation:
def __init__(
self,
epsilon=1e-6,
multi_gpu_on=False,
step_size=1e-3,
noise_var=1e-5,
norm_p="inf",
k=1,
fp16=False,
encoder_type=EncoderModelType.BERT,
loss_map=[],
norm_level=0,
):
super(SmartPerturbation, self).__init__()
self.epsilon = epsilon
# eta
self.step_size = step_size
self.multi_gpu_on = multi_gpu_on
self.fp16 = fp16
self.K = k
# sigma
self.noise_var = noise_var
self.norm_p = norm_p
self.encoder_type = encoder_type
self.loss_map = loss_map
self.norm_level = norm_level > 0
assert len(loss_map) > 0
def _norm_grad(self, grad, eff_grad=None, sentence_level=False):
if self.norm_p == "l2":
if sentence_level:
direction = grad / (
torch.norm(grad, dim=(-2, -1), keepdim=True) + self.epsilon
)
else:
direction = grad / (
torch.norm(grad, dim=-1, keepdim=True) + self.epsilon
)
elif self.norm_p == "l1":
direction = grad.sign()
else:
if sentence_level:
direction = grad / (
grad.abs().max((-2, -1), keepdim=True)[0] + self.epsilon
)
else:
direction = grad / (grad.abs().max(-1, keepdim=True)[0] + self.epsilon)
eff_direction = eff_grad / (
grad.abs().max(-1, keepdim=True)[0] + self.epsilon
)
return direction, eff_direction
def forward(
self,
model,
logits,
input_ids,
token_type_ids,
attention_mask,
premise_mask=None,
hyp_mask=None,
task_id=0,
task_type=TaskType.Classification,
pairwise=1,
):
# adv training
assert task_type in set(
[TaskType.Classification, TaskType.Ranking, TaskType.Regression]
), "Donot support {} yet".format(task_type)
vat_args = [
input_ids,
token_type_ids,
attention_mask,
premise_mask,
hyp_mask,
task_id,
1,
]
# init delta
embed = model(*vat_args)
noise = generate_noise(embed, attention_mask, epsilon=self.noise_var)
for step in range(0, self.K):
vat_args = [
input_ids,
token_type_ids,
attention_mask,
premise_mask,
hyp_mask,
task_id,
2,
embed + noise,
]
adv_logits = model(*vat_args)
if task_type == TaskType.Regression:
adv_loss = F.mse_loss(adv_logits, logits.detach(), reduction="sum")
else:
if task_type == TaskType.Ranking:
adv_logits = adv_logits.view(-1, pairwise)
adv_loss = stable_kl(adv_logits, logits.detach(), reduce=False)
(delta_grad,) = torch.autograd.grad(
adv_loss, noise, only_inputs=True, retain_graph=False
)
norm = delta_grad.norm()
if torch.isnan(norm) or torch.isinf(norm):
return 0
eff_delta_grad = delta_grad * self.step_size
delta_grad = noise + delta_grad * self.step_size
noise, eff_noise = self._norm_grad(
delta_grad, eff_grad=eff_delta_grad, sentence_level=self.norm_level
)
noise = noise.detach()
noise.requires_grad_()
vat_args = [
input_ids,
token_type_ids,
attention_mask,
premise_mask,
hyp_mask,
task_id,
2,
embed + noise,
]
adv_logits = model(*vat_args)
if task_type == TaskType.Ranking:
adv_logits = adv_logits.view(-1, pairwise)
adv_lc = self.loss_map[task_id]
adv_loss = adv_lc(logits, adv_logits, ignore_index=-1)
return adv_loss, embed.detach().abs().mean(), eff_noise.detach().abs().mean()
|
ContextualSP/adaptershare/mt_dnn/perturbation.py/0
|
{
"file_path": "ContextualSP/adaptershare/mt_dnn/perturbation.py",
"repo_id": "ContextualSP",
"token_count": 2597
}
| 227 |
################################
# Assumptions:
# 1. sql is correct
# 2. only table name has alias
# 3. only one intersect/union/except
#
# val: number(float)/string(str)/sql(dict)
# col_unit: (agg_id, col_id, isDistinct(bool))
# val_unit: (unit_op, col_unit1, col_unit2)
# table_unit: (table_type, col_unit/sql)
# cond_unit: (not_op, op_id, val_unit, val1, val2)
# condition: [cond_unit1, 'and'/'or', cond_unit2, ...]
# sql {
# 'select': (isDistinct(bool), [(agg_id, val_unit), (agg_id, val_unit), ...])
# 'from': {'table_units': [table_unit1, table_unit2, ...], 'conds': condition}
# 'where': condition
# 'groupBy': [col_unit1, col_unit2, ...]
# 'orderBy': ('asc'/'desc', [val_unit1, val_unit2, ...])
# 'having': condition
# 'limit': None/limit value
# 'intersect': None/sql
# 'except': None/sql
# 'union': None/sql
# }
################################
import json
import sqlite3
from typing import Dict, List
from nltk import word_tokenize
from utils.data_types import *
CLAUSE_KEYWORDS = ('select', 'from', 'where', 'group', 'order', 'limit', 'intersect', 'union', 'except')
JOIN_KEYWORDS = ('join', 'on', 'as')
WHERE_OPS = ('not', 'between', '=', '>', '<', '>=', '<=', '!=', 'in', 'like', 'is', 'exists')
UNIT_OPS = ('none', '-', '+', "*", '/')
AGG_OPS = ('none', 'max', 'min', 'count', 'sum', 'avg')
TABLE_TYPE = {
'sql': "sql",
'table_unit': "table_unit",
}
COND_OPS = ('and', 'or')
SQL_OPS = ('intersect', 'union', 'except')
ORDER_OPS = ('desc', 'asc')
class Schema:
"""
Simple schema which maps table&column to a unique identifier
"""
def __init__(self, schema):
self._schema = schema
self._idMap = self._map(self._schema)
@property
def schema(self):
return self._schema
@property
def idMap(self):
return self._idMap
def _map(self, schema):
idMap = {'*': "__all__"}
id = 1
for key, vals in schema.items():
for val in vals:
idMap[key.lower() + "." + val.lower()] = "__" + key.lower() + "." + val.lower() + "__"
id += 1
for key in schema:
idMap[key.lower()] = "__" + key.lower() + "__"
id += 1
return idMap
def get_schema(db):
"""
Get database's schema, which is a dict with table name as key
and list of column names as value
:param db: database path
:return: schema dict
"""
schema = {}
conn = sqlite3.connect(db)
cursor = conn.cursor()
# fetch table names
cursor.execute("SELECT name FROM sqlite_master WHERE type='table';")
tables = [str(table[0].lower()) for table in cursor.fetchall()]
# fetch table info
for table in tables:
cursor.execute("PRAGMA table_info({})".format(table))
schema[table] = [str(col[1].lower()) for col in cursor.fetchall()]
return schema
def get_schema_from_json(fpath):
with open(fpath) as f:
data = json.load(f)
schema = {}
for entry in data:
table = str(entry['table'].lower())
cols = [str(col['column_name'].lower()) for col in entry['col_data']]
schema[table] = cols
return schema
def tokenize(string):
string = str(string)
string = string.replace("\'", "\"") # ensures all string values wrapped by "" problem??
quote_idxs = [idx for idx, char in enumerate(string) if char == '"']
assert len(quote_idxs) % 2 == 0, "Unexpected quote"
# keep string value as token
vals = {}
for i in range(len(quote_idxs) - 1, -1, -2):
qidx1 = quote_idxs[i - 1]
qidx2 = quote_idxs[i]
val = string[qidx1: qidx2 + 1]
key = "__val_{}_{}__".format(qidx1, qidx2)
string = string[:qidx1] + key + string[qidx2 + 1:]
vals[key] = val
toks = [word.lower() for word in word_tokenize(string)]
# replace with string value token
for i in range(len(toks)):
if toks[i] in vals:
toks[i] = vals[toks[i]]
# find if there exists !=, >=, <=
eq_idxs = [idx for idx, tok in enumerate(toks) if tok == "="]
eq_idxs.reverse()
prefix = ('!', '>', '<')
for eq_idx in eq_idxs:
pre_tok = toks[eq_idx - 1]
if pre_tok in prefix:
toks = toks[:eq_idx - 1] + [pre_tok + "="] + toks[eq_idx + 1:]
return toks
def scan_alias(toks):
"""Scan the index of 'as' and build the map for all alias"""
as_idxs = [idx for idx, tok in enumerate(toks) if tok == 'as']
alias = {}
for idx in as_idxs:
alias[toks[idx + 1]] = toks[idx - 1]
return alias
def get_tables_with_alias(schema, toks):
tables = scan_alias(toks)
for key in schema:
assert key not in tables, "Alias {} has the same name in table".format(key)
tables[key] = key
return tables
def parse_col(toks, start_idx, tables_with_alias, schema, default_tables=None):
"""
:returns next idx, column id
"""
tok = toks[start_idx]
if tok == "*":
return start_idx + 1, schema.idMap[tok]
if '.' in tok: # if token is a composite
alias, col = tok.split('.')
key = tables_with_alias[alias] + "." + col
return start_idx + 1, schema.idMap[key]
assert default_tables is not None and len(default_tables) > 0, "Default tables should not be None or empty"
for alias in default_tables:
table = tables_with_alias[alias]
if tok in schema.schema[table]:
key = table + "." + tok
return start_idx + 1, schema.idMap[key]
assert False, "Error col: {}".format(tok)
def parse_col_unit(toks, start_idx, tables_with_alias, schema, default_tables=None):
"""
:returns next idx, (agg_op id, col_id)
"""
idx = start_idx
len_ = len(toks)
isBlock = False
isDistinct = False
if toks[idx] == '(':
isBlock = True
idx += 1
if toks[idx] in AGG_OPS:
agg_id = AGG_OPS.index(toks[idx])
idx += 1
assert idx < len_ and toks[idx] == '('
idx += 1
if toks[idx] == "distinct":
idx += 1
isDistinct = True
idx, col_id = parse_col(toks, idx, tables_with_alias, schema, default_tables)
assert idx < len_ and toks[idx] == ')'
idx += 1
return idx, (agg_id, col_id, isDistinct)
if toks[idx] == "distinct":
idx += 1
isDistinct = True
agg_id = AGG_OPS.index("none")
idx, col_id = parse_col(toks, idx, tables_with_alias, schema, default_tables)
if isBlock:
assert toks[idx] == ')'
idx += 1 # skip ')'
return idx, (agg_id, col_id, isDistinct)
def parse_val_unit(toks, start_idx, tables_with_alias, schema, default_tables=None):
idx = start_idx
len_ = len(toks)
isBlock = False
if toks[idx] == '(':
isBlock = True
idx += 1
col_unit1 = None
col_unit2 = None
unit_op = UNIT_OPS.index('none')
idx, col_unit1 = parse_col_unit(toks, idx, tables_with_alias, schema, default_tables)
if idx < len_ and toks[idx] in UNIT_OPS:
unit_op = UNIT_OPS.index(toks[idx])
idx += 1
idx, col_unit2 = parse_col_unit(toks, idx, tables_with_alias, schema, default_tables)
if isBlock:
assert toks[idx] == ')'
idx += 1 # skip ')'
return idx, (unit_op, col_unit1, col_unit2)
def parse_table_unit(toks, start_idx, tables_with_alias, schema):
"""
:returns next idx, table id, table name
"""
idx = start_idx
len_ = len(toks)
key = tables_with_alias[toks[idx]]
if idx + 1 < len_ and toks[idx + 1] == "as":
idx += 3
else:
idx += 1
return idx, schema.idMap[key], key
def parse_value(toks, start_idx, tables_with_alias, schema, default_tables=None):
idx = start_idx
len_ = len(toks)
isBlock = False
if toks[idx] == '(':
isBlock = True
idx += 1
if toks[idx] == 'select':
idx, val = parse_sql(toks, idx, tables_with_alias, schema)
elif "\"" in toks[idx]: # token is a string value
val = toks[idx]
idx += 1
else:
try:
val = float(toks[idx])
idx += 1
except:
end_idx = idx
while end_idx < len_ and toks[end_idx] != ',' and toks[end_idx] != ')' \
and toks[end_idx] != 'and' and toks[end_idx] not in CLAUSE_KEYWORDS and toks[
end_idx] not in JOIN_KEYWORDS:
end_idx += 1
idx, val = parse_col_unit(toks[start_idx: end_idx], 0, tables_with_alias, schema, default_tables)
idx = end_idx
if isBlock:
assert toks[idx] == ')'
idx += 1
return idx, val
def parse_condition(toks, start_idx, tables_with_alias, schema, default_tables=None):
idx = start_idx
len_ = len(toks)
conds = []
while idx < len_:
idx, val_unit = parse_val_unit(toks, idx, tables_with_alias, schema, default_tables)
not_op = False
if toks[idx] == 'not':
not_op = True
idx += 1
assert idx < len_ and toks[idx] in WHERE_OPS, "Error condition: idx: {}, tok: {}".format(idx, toks[idx])
op_id = WHERE_OPS.index(toks[idx])
idx += 1
val1 = val2 = None
if op_id == WHERE_OPS.index('between'): # between..and... special case: dual values
idx, val1 = parse_value(toks, idx, tables_with_alias, schema, default_tables)
assert toks[idx] == 'and'
idx += 1
idx, val2 = parse_value(toks, idx, tables_with_alias, schema, default_tables)
else: # normal case: single value
idx, val1 = parse_value(toks, idx, tables_with_alias, schema, default_tables)
val2 = None
conds.append((not_op, op_id, val_unit, val1, val2))
if idx < len_ and (toks[idx] in CLAUSE_KEYWORDS or toks[idx] in (")", ";") or toks[idx] in JOIN_KEYWORDS):
break
if idx < len_ and toks[idx] in COND_OPS:
conds.append(toks[idx])
idx += 1 # skip and/or
return idx, conds
def parse_select(toks, start_idx, tables_with_alias, schema, default_tables=None):
idx = start_idx
len_ = len(toks)
assert toks[idx] == 'select', "'select' not found"
idx += 1
isDistinct = False
if idx < len_ and toks[idx] == 'distinct':
idx += 1
isDistinct = True
val_units = []
while idx < len_ and toks[idx] not in CLAUSE_KEYWORDS:
agg_id = AGG_OPS.index("none")
if toks[idx] in AGG_OPS:
agg_id = AGG_OPS.index(toks[idx])
idx += 1
idx, val_unit = parse_val_unit(toks, idx, tables_with_alias, schema, default_tables)
val_units.append((agg_id, val_unit))
if idx < len_ and toks[idx] == ',':
idx += 1 # skip ','
return idx, (isDistinct, val_units)
def parse_from(toks, start_idx, tables_with_alias, schema):
"""
Assume in the from clause, all table units are combined with join
"""
assert 'from' in toks[start_idx:], "'from' not found"
len_ = len(toks)
idx = toks.index('from', start_idx) + 1
default_tables = []
table_units = []
conds = []
while idx < len_:
isBlock = False
if toks[idx] == '(':
isBlock = True
idx += 1
if toks[idx] == 'select':
idx, sql = parse_sql(toks, idx, tables_with_alias, schema)
table_units.append((TABLE_TYPE['sql'], sql))
else:
if idx < len_ and toks[idx] == 'join':
idx += 1 # skip join
idx, table_unit, table_name = parse_table_unit(toks, idx, tables_with_alias, schema)
table_units.append((TABLE_TYPE['table_unit'], table_unit))
default_tables.append(table_name)
if idx < len_ and toks[idx] == "on":
idx += 1 # skip on
idx, this_conds = parse_condition(toks, idx, tables_with_alias, schema, default_tables)
if len(conds) > 0:
conds.append('and')
conds.extend(this_conds)
if isBlock:
assert toks[idx] == ')'
idx += 1
if idx < len_ and (toks[idx] in CLAUSE_KEYWORDS or toks[idx] in (")", ";")):
break
return idx, table_units, conds, default_tables
def parse_where(toks, start_idx, tables_with_alias, schema, default_tables):
idx = start_idx
len_ = len(toks)
if idx >= len_ or toks[idx] != 'where':
return idx, []
idx += 1
idx, conds = parse_condition(toks, idx, tables_with_alias, schema, default_tables)
return idx, conds
def parse_group_by(toks, start_idx, tables_with_alias, schema, default_tables):
idx = start_idx
len_ = len(toks)
col_units = []
if idx >= len_ or toks[idx] != 'group':
return idx, col_units
idx += 1
assert toks[idx] == 'by'
idx += 1
while idx < len_ and not (toks[idx] in CLAUSE_KEYWORDS or toks[idx] in (")", ";")):
idx, col_unit = parse_col_unit(toks, idx, tables_with_alias, schema, default_tables)
col_units.append(col_unit)
if idx < len_ and toks[idx] == ',':
idx += 1 # skip ','
else:
break
return idx, col_units
def parse_order_by(toks, start_idx, tables_with_alias, schema, default_tables):
idx = start_idx
len_ = len(toks)
val_units = []
order_type = 'asc' # default type is 'asc'
if idx >= len_ or toks[idx] != 'order':
return idx, val_units
idx += 1
assert toks[idx] == 'by'
idx += 1
while idx < len_ and not (toks[idx] in CLAUSE_KEYWORDS or toks[idx] in (")", ";")):
idx, val_unit = parse_val_unit(toks, idx, tables_with_alias, schema, default_tables)
val_units.append(val_unit)
if idx < len_ and toks[idx] in ORDER_OPS:
order_type = toks[idx]
idx += 1
if idx < len_ and toks[idx] == ',':
idx += 1 # skip ','
else:
break
return idx, (order_type, val_units)
def parse_having(toks, start_idx, tables_with_alias, schema, default_tables):
idx = start_idx
len_ = len(toks)
if idx >= len_ or toks[idx] != 'having':
return idx, []
idx += 1
idx, conds = parse_condition(toks, idx, tables_with_alias, schema, default_tables)
return idx, conds
def parse_limit(toks, start_idx):
idx = start_idx
len_ = len(toks)
if idx < len_ and toks[idx] == 'limit':
idx += 2
return idx, int(toks[idx - 1])
return idx, None
def parse_sql(toks, start_idx, tables_with_alias, schema):
isBlock = False # indicate whether this is a block of sql/sub-sql
len_ = len(toks)
idx = start_idx
sql = {}
if toks[idx] == '(':
isBlock = True
idx += 1
# parse from clause in order to get default tables
from_end_idx, table_units, conds, default_tables = parse_from(toks, start_idx, tables_with_alias, schema)
sql['from'] = {'table_units': table_units, 'conds': conds}
# select clause
_, select_col_units = parse_select(toks, idx, tables_with_alias, schema, default_tables)
idx = from_end_idx
sql['select'] = select_col_units
# where clause
idx, where_conds = parse_where(toks, idx, tables_with_alias, schema, default_tables)
sql['where'] = where_conds
# group by clause
idx, group_col_units = parse_group_by(toks, idx, tables_with_alias, schema, default_tables)
sql['groupBy'] = group_col_units
# having clause
idx, having_conds = parse_having(toks, idx, tables_with_alias, schema, default_tables)
sql['having'] = having_conds
# order by clause
idx, order_col_units = parse_order_by(toks, idx, tables_with_alias, schema, default_tables)
sql['orderBy'] = order_col_units
# limit clause
idx, limit_val = parse_limit(toks, idx)
sql['limit'] = limit_val
idx = skip_semicolon(toks, idx)
if isBlock:
assert toks[idx] == ')'
idx += 1 # skip ')'
idx = skip_semicolon(toks, idx)
# intersect/union/except clause
for op in SQL_OPS: # initialize IUE
sql[op] = None
if idx < len_ and toks[idx] in SQL_OPS:
sql_op = toks[idx]
idx += 1
idx, IUE_sql = parse_sql(toks, idx, tables_with_alias, schema)
sql[sql_op] = IUE_sql
return idx, sql
def skip_semicolon(toks, start_idx):
idx = start_idx
while idx < len(toks) and toks[idx] == ";":
idx += 1
return idx
def load_data(fpath):
with open(fpath) as f:
data = json.load(f)
return data
def get_sql(schema, query):
toks = tokenize(query)
tables_with_alias = get_tables_with_alias(schema.schema, toks)
_, sql = parse_sql(toks, 0, tables_with_alias, schema)
return sql
def parse_tokens_from_column_unit(column_unit: Dict, schema: SpiderSchema) -> List[SQLToken]:
agg_id, col_id, is_distinct = column_unit
column_tokens = []
if is_distinct:
column_tokens += [KeywordToken('DISTINCT')]
if agg_id > 0:
column_tokens += [KeywordToken(AGG_OPS[agg_id].upper())]
column_tokens += [KeywordToken('(')]
column_name = schema.get_column_full_name(col_id)
column_tokens += [ColumnToken(column_name, '')]
if agg_id > 0:
column_tokens += [KeywordToken(')')]
return column_tokens
def parse_tokens_from_value_unit(value_unit: Dict, schema: SpiderSchema) -> List[SQLToken]:
unit_op, col_unit1, col_unit2 = value_unit
value_tokens = []
if unit_op > 0:
value_tokens += [KeywordToken('(')]
value_tokens += parse_tokens_from_column_unit(col_unit1, schema)
if unit_op > 0:
value_tokens += [KeywordToken(UNIT_OPS[unit_op])]
value_tokens += parse_tokens_from_column_unit(col_unit2, schema)
value_tokens += [KeywordToken(')')]
return value_tokens
def parse_tokens_from_value(value: object, schema: SpiderSchema) -> List[SQLToken]:
if isinstance(value, str):
return [ValueToken(value, columns=None)]
if isinstance(value, float):
if int(value) == value:
return [ValueToken(int(value), columns=None)]
return [ValueToken(value, columns=None)]
if isinstance(value, dict):
return parse_tokens_from_sql(value, schema)
if isinstance(value, list) or isinstance(value, tuple):
return parse_tokens_from_column_unit(value, schema)
else:
raise NotImplementedError()
def resolve_column_reference(value_tokens: List[SQLToken], value_unit_tokens: List[SQLToken]):
column_names = [token.value for token in value_unit_tokens if token.token_type == SQLTokenType.column]
for value_token in value_tokens:
if not isinstance(value_token, ValueToken):
continue
if value_token.columns is None:
value_token.columns = column_names
def parse_tokens_from_cond_unit(cond_unit: Dict, schema: SpiderSchema) -> List[SQLToken]:
cond_unit_tokens = []
not_op, op_id, val_unit, val1, val2 = cond_unit
value_unit_tokens = parse_tokens_from_value_unit(val_unit, schema)
cond_unit_tokens += value_unit_tokens
if not_op:
cond_unit_tokens += [KeywordToken('NOT')]
cond_unit_tokens += [KeywordToken(WHERE_OPS[op_id].upper())]
if WHERE_OPS[op_id] == 'in':
cond_unit_tokens += [KeywordToken('(')]
value_tokens1 = parse_tokens_from_value(val1, schema)
resolve_column_reference(value_tokens1, value_unit_tokens)
cond_unit_tokens += value_tokens1
if val2 is not None:
assert op_id == 1
cond_unit_tokens += [KeywordToken('AND')]
value_tokens2 = parse_tokens_from_value(val2, schema)
resolve_column_reference(value_tokens2, value_unit_tokens)
cond_unit_tokens += value_tokens2
if WHERE_OPS[op_id] == 'in':
cond_unit_tokens += [KeywordToken(')')]
return cond_unit_tokens
def parse_tokens_from_condition(condition: Dict, schema: SpiderSchema) -> List[SQLToken]:
cond_tokens = []
for cond_unit in condition:
if cond_unit in ['and', 'or']:
cond_tokens += [KeywordToken(cond_unit.upper())]
continue
cond_tokens += parse_tokens_from_cond_unit(cond_unit, schema)
return cond_tokens
def parse_tokens_from_sql(sql_obj: Dict, schema: SpiderSchema) -> List[SQLToken]:
sql_tokens = []
if sql_obj['select'] is not None:
sel_obj = sql_obj['select']
sql_tokens += [KeywordToken('SELECT')]
if sel_obj[0] is True:
sql_tokens += [KeywordToken('DISTINCT')]
for i, (agg_id, val_unit) in enumerate(sel_obj[1]):
if i > 0:
sql_tokens += [KeywordToken(',')]
if agg_id > 0:
agg = AGG_OPS[agg_id].upper()
sql_tokens += [KeywordToken(agg)]
sql_tokens += [KeywordToken('(')]
sql_tokens += parse_tokens_from_value_unit(val_unit, schema)
if agg_id > 0:
sql_tokens += [KeywordToken(')')]
if sql_obj['from']:
sql_tokens += [KeywordToken('FROM')]
table_units, cond_units = sql_obj['from']['table_units'], sql_obj['from']['conds']
for i, table_unit in enumerate(table_units):
if i > 0:
sql_tokens += [KeywordToken(',')]
if table_unit[0] == 'table_unit':
sql_tokens += [TableToken(schema.table_names_original[table_unit[1]].lower())]
else:
assert table_unit[0] == 'sql'
sql_tokens += parse_tokens_from_sql(table_unit[1], schema)
if cond_units is not None and len(cond_units) > 0:
sql_tokens += [KeywordToken('ON')]
sql_tokens += parse_tokens_from_condition(cond_units, schema)
if sql_obj['groupBy'] and len(sql_obj['groupBy']) > 0:
sql_tokens += [KeywordToken('GROUP'), KeywordToken('BY')]
for i, col_unit in enumerate(sql_obj['groupBy']):
if i > 0:
sql_tokens += [KeywordToken(',')]
sql_tokens += parse_tokens_from_column_unit(col_unit, schema)
if sql_obj['where'] and len(sql_obj['where']) > 0:
sql_tokens += [KeywordToken('WHERE')]
sql_tokens += parse_tokens_from_condition(sql_obj['where'], schema)
if sql_obj['having'] and len(sql_obj['having']) > 0:
sql_tokens += [KeywordToken('HAVING')]
sql_tokens += parse_tokens_from_condition(sql_obj['having'], schema)
if sql_obj['orderBy'] and len(sql_obj['orderBy']) > 0:
sql_tokens += [KeywordToken('ORDER'), KeywordToken('BY')]
for i, value_unit in enumerate(sql_obj['orderBy'][1]):
if i > 0:
sql_tokens += [KeywordToken(',')]
sql_tokens += parse_tokens_from_value_unit(value_unit, schema)
sql_tokens += [KeywordToken(sql_obj['orderBy'][0].upper())]
if sql_obj['limit']:
sql_tokens += [KeywordToken('limit'.upper())]
sql_tokens += [ValueToken(sql_obj['limit'], columns=[])]
if sql_obj['intersect']:
sql_tokens += [KeywordToken('intersect'.upper())]
sql_tokens += parse_tokens_from_sql(sql_obj['intersect'], schema)
if sql_obj['except']:
sql_tokens += [KeywordToken('except'.upper())]
sql_tokens += parse_tokens_from_sql(sql_obj['except'], schema)
if sql_obj['union']:
sql_tokens += [KeywordToken('union'.upper())]
sql_tokens += parse_tokens_from_sql(sql_obj['union'], schema)
return sql_tokens
def parse_spider_sql(query: str, schema: SpiderSchema) -> SQLExpression:
sql = get_sql(schema, query)
sql_tokens = parse_tokens_from_sql(sql, schema)
return SQLExpression(tokens=sql_tokens)
|
ContextualSP/awakening_latent_grounding/utils/sql_parser.py/0
|
{
"file_path": "ContextualSP/awakening_latent_grounding/utils/sql_parser.py",
"repo_id": "ContextualSP",
"token_count": 11065
}
| 228 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
# Author: Qian Liu (SivilTaram)
# Original Repo: https://github.com/microsoft/ContextualSP
import torch.nn as nn
import torch.nn.functional as F
import torch
class AttentionUNet(torch.nn.Module):
"""
UNet, down sampling & up sampling for global reasoning
"""
def __init__(self, input_channels, class_number, **kwargs):
super(AttentionUNet, self).__init__()
down_channel = kwargs['down_channel']
down_channel_2 = down_channel * 2
up_channel_1 = down_channel_2 * 2
up_channel_2 = down_channel * 2
self.inc = InConv(input_channels, down_channel)
self.down1 = DownLayer(down_channel, down_channel_2)
self.down2 = DownLayer(down_channel_2, down_channel_2)
self.up1 = UpLayer(up_channel_1, up_channel_1 // 4)
self.up2 = UpLayer(up_channel_2, up_channel_2 // 4)
self.outc = OutConv(up_channel_2 // 4, class_number)
def forward(self, attention_channels):
"""
Given multi-channel attention map, return the logits of every one mapping into 3-class
:param attention_channels:
:return:
"""
# attention_channels as the shape of: batch_size x channel x width x height
x = attention_channels
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x = self.up1(x3, x2)
x = self.up2(x, x1)
output = self.outc(x)
# attn_map as the shape of: batch_size x width x height x class
output = output.permute(0, 2, 3, 1).contiguous()
return output
class DoubleConv(nn.Module):
"""(conv => ReLU) * 2"""
def __init__(self, in_ch, out_ch):
super(DoubleConv, self).__init__()
self.double_conv = nn.Sequential(nn.Conv2d(in_ch, out_ch, kernel_size=3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True),
nn.Conv2d(out_ch, out_ch, kernel_size=3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True))
def forward(self, x):
x = self.double_conv(x)
return x
class InConv(nn.Module):
def __init__(self, in_ch, out_ch):
super(InConv, self).__init__()
self.conv = DoubleConv(in_ch, out_ch)
def forward(self, x):
x = self.conv(x)
return x
class DownLayer(nn.Module):
def __init__(self, in_ch, out_ch):
super(DownLayer, self).__init__()
self.maxpool_conv = nn.Sequential(
nn.MaxPool2d(kernel_size=2),
DoubleConv(in_ch, out_ch)
)
def forward(self, x):
x = self.maxpool_conv(x)
return x
class UpLayer(nn.Module):
def __init__(self, in_ch, out_ch, bilinear=True):
super(UpLayer, self).__init__()
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear',
align_corners=True)
else:
self.up = nn.ConvTranspose2d(in_ch // 2, in_ch // 2, 2, stride=2)
self.conv = DoubleConv(in_ch, out_ch)
def forward(self, x1, x2):
x1 = self.up(x1)
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
x1 = F.pad(x1, (diffX // 2, diffX - diffX // 2, diffY // 2, diffY -
diffY // 2))
x = torch.cat([x2, x1], dim=1)
x = self.conv(x)
return x
class OutConv(nn.Module):
def __init__(self, in_ch, out_ch):
super(OutConv, self).__init__()
self.conv = nn.Conv2d(in_ch, out_ch, 1)
def forward(self, x):
x = self.conv(x)
return x
|
ContextualSP/incomplete_utterance_rewriting/src/attn_unet.py/0
|
{
"file_path": "ContextualSP/incomplete_utterance_rewriting/src/attn_unet.py",
"repo_id": "ContextualSP",
"token_count": 1952
}
| 229 |
from typing import List, Tuple, Dict, Set, Optional
from copy import deepcopy
from .db_context import SparcDBContext
from .grammar import Grammar, Action, C, T
from .converter import SQLConverter
class SparcWorld:
"""
World representation for spider dataset.
"""
def __init__(self, db_context: SparcDBContext, sql_clause, sql_query):
"""
:param sql_clause: structural SQL clause(parsed)
:param sql_query: plain SQL query for evaluation
"""
self.db_id = db_context.db_id
self.db_context = db_context
self.sql_clause = sql_clause
self.sql_query = sql_query
self.sql_converter = SQLConverter(db_context=self.db_context)
# keep a list of entities names as they are given in sql queries
self.entities_indexer = {}
for i, entity in enumerate(self.db_context.knowledge_graph.entities):
parts = entity.split(':')
if parts[0] in ['table', 'string']:
self.entities_indexer[parts[1]] = i
else:
_, _, _, column_name = parts
# TODO: here we assume the same column name always map into the same text
self.entities_indexer[f'{column_name}'] = i
self.valid_actions: Dict[str, List[str]] = {}
self.valid_actions_flat: List[Action] = []
def get_action_sequence_and_all_actions(self) -> Tuple[List[str], List[Action], List[Action]]:
"""
Translate the sql clause when initialization into action sequence corresponding to their SemQL.
And return the instantiated local grammars and global grammars.
:return: action sequence corresponding to the sql clause, all valid actions(which has been sorted)
"""
# build global grammar and local grammar
grammar = Grammar(db_context=self.db_context)
global_grammar = grammar.global_grammar
local_grammar = grammar.local_grammar
all_actions = global_grammar + local_grammar
# the sorted actions must follow the same order and
# global grammar will be converted into tensor automatically in allennlp
self.valid_actions_flat = [action for action in all_actions]
# add every action into nonterminal key
for action in self.valid_actions_flat:
action_key = action.__class__.__name__
if action_key not in self.valid_actions:
self.valid_actions[action_key] = []
# record action
self.valid_actions[action_key].append(str(action))
if self.sql_clause is not None:
action_sequence = self.sql_converter.translate_to_intermediate(self.sql_clause)
# validate action sequence
else:
action_sequence = None
# fetch action_non_terminal
action_non_terminal = None
if action_sequence is not None:
action_non_terminal = [action.__class__.__name__ for action in action_sequence]
return action_non_terminal, action_sequence, all_actions
def get_oracle_relevance_score(self, oracle_entities: set):
"""
return 0/1 for each schema item if it should be in the graph,
given the used entities in the gold answer
"""
scores = [0 for _ in range(len(self.db_context.knowledge_graph.entities))]
for i, entity in enumerate(self.db_context.knowledge_graph.entities):
parts = entity.split(':')
if parts[0] == 'column':
name = parts[2] + '@' + parts[3]
else:
name = parts[-1]
if name in oracle_entities:
scores[i] = 1
return scores
def get_action_entity_mapping(self) -> Dict[str, int]:
"""
Get the entity index of every local grammar(also named after linked action)
:return:
"""
mapping = {}
for action in self.valid_actions_flat:
# default is padding
mapping[str(action)] = -1
# lowercase for all entities
ins_id = action.ins_id
if isinstance(ins_id, str):
ins_id = ins_id.lower()
# only instance class should apply entity map
if type(action) not in [C, T] or ins_id not in self.entities_indexer:
continue
# record the entity id
mapping[str(action)] = self.entities_indexer[ins_id]
return mapping
|
ContextualSP/interactive_text_to_sql/src/context/world.py/0
|
{
"file_path": "ContextualSP/interactive_text_to_sql/src/context/world.py",
"repo_id": "ContextualSP",
"token_count": 1907
}
| 230 |
# LEMON
This repository contains the code and pre-trained models for our EMNLP2022 Findings paper [LEMON: Language-Based Environment Manipulation via Execution-guided Pre-training](https://arxiv.org/pdf/2201.08081.pdf)
Data
-------
The data is in the [release](https://github.com/microsoft/ContextualSP/releases/tag/lemon_data). Please unzip it and put it in the lemon_data folder.
Pre-training
-------
Run the following command to preprocess the data:
```bash
bash preprocess_pretrain.bat
```
Then run the following command to pre-train the model:
```bash
bash pretrain.sh
```
Fine-tuning
-------
Run the following command to preprocess the data:
```bash
bash preprocess_finetune.bat
```
Then run the following command to fine-tune the model:
```bash
bash finetune.sh
```
|
ContextualSP/lemon/README.md/0
|
{
"file_path": "ContextualSP/lemon/README.md",
"repo_id": "ContextualSP",
"token_count": 240
}
| 231 |
import json
import logging
import math
import numbers
import os
import platform
import resource
import sys
from collections import MutableMapping
from contextlib import contextmanager
from IPython.core.display import display, HTML
from pyhocon import ConfigFactory
from pyhocon import ConfigMissingException
from pyhocon import ConfigTree
from pyhocon import HOCONConverter
from gtd.utils import NestedDict, Config
def in_ipython():
try:
__IPYTHON__
return True
except NameError:
return False
def print_with_fonts(tokens, sizes, colors, background=None):
def style(text, size=12, color='black'):
return '<span style="font-size: {}px; color: {};">{}</span>'.format(size, color, text)
styled = [style(token, size, color) for token, size, color in zip(tokens, sizes, colors)]
text = ' '.join(styled)
if background:
text = '<span style="background-color: {};">{}</span>'.format(background, text)
display(HTML(text))
def gb_used():
used = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
if platform.system() != 'Darwin':
# on Linux, used is in terms of kilobytes
power = 2
else:
# on Mac, used is in terms of bytes
power = 3
return float(used) / math.pow(1024, power)
class Metadata(MutableMapping):
"""A wrapper around ConfigTree.
Supports a name_scope contextmanager.
"""
def __init__(self, config_tree=None):
if config_tree is None:
config_tree = ConfigTree()
self._config_tree = config_tree
self._namestack = []
@contextmanager
def name_scope(self, name):
self._namestack.append(name)
yield
self._namestack.pop()
def _full_key(self, key):
return '.'.join(self._namestack + [key])
def __getitem__(self, key):
try:
val = self._config_tree.get(self._full_key(key))
except ConfigMissingException:
raise KeyError(key)
if isinstance(val, ConfigTree):
return Metadata(val)
return val
def __setitem__(self, key, value):
"""Put a value (key is a dot-separated name)."""
self._config_tree.put(self._full_key(key), value)
def __delitem__(self, key):
raise NotImplementedError()
def __iter__(self):
return iter(self._config_tree)
def __len__(self):
return len(self._config_tree)
def __repr__(self):
return self.to_str()
def to_str(self):
return HOCONConverter.convert(self._config_tree, 'hocon')
def to_file(self, path):
with open(path, 'w') as f:
f.write(self.to_str())
@classmethod
def from_file(cls, path):
config_tree = ConfigFactory.parse_file(path)
return cls(config_tree)
class SyncedMetadata(Metadata):
"""A Metadata object which writes to file after every change."""
def __init__(self, path):
if os.path.exists(path):
m = Metadata.from_file(path)
else:
m = Metadata()
super(SyncedMetadata, self).__init__(m._config_tree)
self._path = path
def __setitem__(self, key, value):
super(SyncedMetadata, self).__setitem__(key, value)
self.to_file(self._path)
def print_list(l):
for item in l:
print(item)
def print_no_newline(s):
sys.stdout.write(s)
sys.stdout.flush()
def set_log_level(level):
"""Set the log-level of the root logger of the logging module.
Args:
level: can be an integer such as 30 (logging.WARN), or a string such as 'WARN'
"""
if isinstance(level, str):
level = logging._levelNames[level]
logger = logging.getLogger() # gets root logger
logger.setLevel(level)
|
ContextualSP/lemon/executor/gtd/log.py/0
|
{
"file_path": "ContextualSP/lemon/executor/gtd/log.py",
"repo_id": "ContextualSP",
"token_count": 1553
}
| 232 |
import copy
import numpy as np
import pytest
import tensorflow as tf
from math import exp
from numpy.testing import assert_array_almost_equal
from gtd.ml.model import TokenEmbedder, MeanSequenceEmbedder, ConcatSequenceEmbedder, CandidateScorer, LSTMSequenceEmbedder, \
SoftCopyScorer, Attention, BidiLSTMSequenceEmbedder
from gtd.ml.seq_batch import FeedSequenceBatch, SequenceBatch
from gtd.ml.utils import clean_session
from gtd.ml.vocab import SimpleVocab, SimpleEmbeddings
from gtd.tests.ml.test_framework import FeedableTester, clean_test_session
from gtd.utils import softmax
class VocabExample(SimpleVocab):
def __init__(self, tokens, unk):
if unk not in tokens:
raise ValueError('unk must be in tokens')
self.unk = unk
super(VocabExample, self).__init__(tokens)
def word2index(self, w):
try:
return self._word2index[w]
except KeyError:
return self._word2index[self.unk]
class TestTokenEmbedder(FeedableTester):
@pytest.fixture
def model(self):
array = np.array([
[1, 2, 3],
[2, 4, 6],
[3, 5, 7],
], dtype=np.float32)
vocab = SimpleVocab('a b c'.split())
embeddings = SimpleEmbeddings(array, vocab)
return TokenEmbedder(embeddings, 'token_embeds')
@pytest.fixture
def inputs(self):
return self.as_args_kwargs()
@pytest.fixture
def feed_dict(self):
return {}
@pytest.fixture
def output_tensors(self, model):
return [model.embeds]
@pytest.fixture
def outputs(self):
array = np.array([
[1, 2, 3],
[2, 4, 6],
[3, 5, 7],
], dtype=np.float32)
return [array]
class TestSequenceEmbedder(FeedableTester):
@pytest.fixture
def model(self):
token_embeds = tf.constant([
[0, 0, 0],
[1, 2, 3],
[2, 4, 6],
[3, 5, 7],
[9, 9, 9],
], dtype=tf.float32)
return MeanSequenceEmbedder(token_embeds)
@pytest.fixture
def inputs(self):
token_vocab = SimpleVocab(['<pad>'] + 'a b c d'.split())
sequences = [
['a', 'c'],
['b', 'c', 'c'],
['d', 'c', 'a'],
]
return self.as_args_kwargs(sequences, token_vocab)
@pytest.fixture
def feed_dict(self, model):
indices_tensor = model._sequence_batch.values
mask_tensor = model._sequence_batch.mask
pad = 0
indices = [
[1, 3, pad],
[2, 3, 3],
[4, 3, 1]
]
mask = [
[1, 1, 0],
[1, 1, 1],
[1, 1, 1],
]
return {indices_tensor: np.array(indices), mask_tensor: np.array(mask)}
@pytest.fixture
def output_tensors(self, model):
return [model.embeds]
@pytest.fixture
def outputs(self):
npa = lambda arr: np.array(arr, dtype=np.float32)
embeds = npa([
npa([4, 7, 10]) / 2,
npa([8, 14, 20]) / 3,
npa([13, 16, 19]) / 3,
])
return [embeds]
class TestConcatSequenceEmbedder(object):
def test(self):
token_vocab = SimpleVocab('a b c d'.split())
sequences = [
['a', 'b', 'c', 'd'],
['c', 'd'],
]
correct_embeds = np.array([
[1, 2, 0, 3, 4, 1, 5, 6, 0, 7, 8, 1],
[5, 6, 0, 7, 8, 1, 0, 0, 0, 0, 0, 0],
], dtype=np.float32)
with clean_session():
token_embeds = tf.constant([
[1, 2, 0],
[3, 4, 1],
[5, 6, 0],
[7, 8, 1],
], dtype=tf.float32)
model = ConcatSequenceEmbedder(token_embeds)
test_embeds = model.compute(model.embeds, sequences, token_vocab)
assert_array_almost_equal(correct_embeds, test_embeds, decimal=5)
class TestFixedLengthConcatEmbedder(object):
def test(self):
token_vocab = SimpleVocab('a b c d'.split())
sequences = [
['a', 'b', 'c', 'd'],
['c', 'd'],
]
correct_embeds = np.array([
[3, 4, 1, 5, 6, 0, 7, 8, 1],
[0, 0, 0, 5, 6, 0, 7, 8, 1]
], dtype=np.float32)
with clean_session():
token_embeds = tf.constant([
[1, 2, 0],
[3, 4, 1],
[5, 6, 0],
[7, 8, 1],
], dtype=tf.float32)
model = ConcatSequenceEmbedder(token_embeds, seq_length=3, align='right')
test_embeds = model.compute(model.embeds, sequences, token_vocab)
# check that static shape inference works
assert model.embeds.get_shape().as_list() == [None, 3 * 3]
assert_array_almost_equal(correct_embeds, test_embeds, decimal=5)
class TestCandidateScorer(FeedableTester):
@pytest.fixture
def query(self):
# a batch size of three. Each row is a query vector
return np.array([
[2., 2., 4.],
[1., 2., 0.],
[1., 2., 8.],
], dtype=np.float32)
@pytest.fixture
def embeddings(self):
array = np.array([
[0, 1, 2],
[3, 4, 5],
[6, 7, 8],
[9, 10, 11],
[12, 13, 14],
[15, 16, 17],
], dtype=np.float32)
vocab = SimpleVocab(['<pad>', 'a', 'b', 'c', 'd', 'e'])
return SimpleEmbeddings(array, vocab)
@pytest.fixture
def projection_weights(self):
W = np.random.normal(size=[3, 3])
b = np.random.normal(size=[3])
return W, b
@pytest.fixture
def model(self, query, embeddings, projection_weights):
candidate_embedder = TokenEmbedder(embeddings, 'cand_embeds')
scorer = CandidateScorer(tf.constant(query, dtype=tf.float32), candidate_embedder.embeds)
scorer.projection_weights = projection_weights
return scorer
@pytest.fixture
def inputs(self, embeddings):
candidates = [
['a', 'c', 'd'],
['a', 'b', 'c', 'd', 'e'],
[]
]
vocab = embeddings.vocab
return self.as_args_kwargs(candidates, vocab)
@pytest.fixture
def feed_dict(self, model):
values = model._cand_batch.values
mask = model._cand_batch.mask
choice_indices = np.array([
[1, 3, 4, 0, 0],
[1, 2, 3, 4, 5],
[0, 0, 0, 0, 0],
], dtype=np.int32)
t, o = True, False
choice_mask = np.array([
[t, t, t, o, o],
[t, t, t, t, t],
[o, o, o, o, o],
])
return {
values: choice_indices,
mask: choice_mask,
}
@pytest.fixture
def output_tensors(self, model):
return [model.scores.values, model._probs.values]
@pytest.fixture
def outputs(self, query, embeddings, model, feed_dict, projection_weights):
# project the query tensor
W, b = projection_weights
query = query.dot(W) + b
embeds = embeddings.array
choice_embeds0 = embeds[[1, 3, 4]]
query0 = query[0]
logits0 = np.array(list(choice_embeds0.dot(query0)) + 2 * [float('-inf')])
choice_embeds1 = embeds[[1, 2, 3, 4, 5]]
query1 = query[1]
logits1 = choice_embeds1.dot(query1)
logits2 = np.array([1., 1., 1., 1., 1.]) * float('-inf')
logits = [logits0, logits1, logits2]
probs = [softmax(l) for l in logits]
logits = np.array(logits)
probs = np.array(probs)
return [logits, probs]
class TestSoftCopyScorer(object):
@pytest.fixture
def model(self):
attention_weights = tf.constant([
[0.1, 0.5, 10., 0., 0],
[0.1, 0.7, -10, 0., 1],
[8.0, 0.3, 0.0, 11, 2],
], dtype=tf.float32)
return SoftCopyScorer(attention_weights)
@pytest.fixture
def alignments(self):
return [
[[(0, 0.5), (2, 0.5)], [(2, 3.)], [(4, 10.), (0, 10.)]],
[[(0, 0.), (1, 1.), (2, 2.), (4, 4.)]],
[[(4, -1.), (3, -2.)]],
]
@pytest.fixture
def correct_scores(self):
return np.array([
[5.05, 30, 1],
[-15.3, 0, 0],
[-24, 0, 0],
], dtype=np.float32)
@pytest.mark.usefixtures('clean_test_session')
def test(self, model, alignments, correct_scores):
scores = model.compute(model.scores.values, alignments)
assert_array_almost_equal(correct_scores, scores)
assert len(scores.shape) == 2
@pytest.mark.usefixtures('clean_test_session')
def test_out_of_bounds(self, model, alignments, correct_scores):
bad_alignments = copy.deepcopy(alignments)
bad_alignments[0][0][0] = (5, -1) # one index beyond seq_length
with pytest.raises(ValueError):
scores = model.compute(model.scores.values, bad_alignments)
class TestLSTMSequenceEmbedder(object):
def test_lstm(self):
"""Test whether the mask works properly for LSTM embedder."""
token_vocab = SimpleVocab('a b c d'.split())
sequences = [
['a', 'b', 'c', 'd'],
['c', 'd'],
['a', 'b', 'c', 'd'],
]
sequences_alt = [
['a', 'b', 'c', 'd', 'a', 'b', 'd', 'c'],
['b', 'a', 'd'],
['c', 'd'],
]
with clean_session():
token_embeds = tf.constant([
[1, 2, 0],
[3, 4, 1],
[5, 6, 0],
[7, 8, 1],
], dtype=tf.float32)
model = LSTMSequenceEmbedder(token_embeds, seq_length=4, hidden_size=7)
test_embeds, test_hidden_states = model.compute(
[model.embeds, model.hidden_states.values],
sequences, token_vocab)
assert test_embeds.shape == (3, 7)
assert test_hidden_states.shape == (3, 4, 7)
# Padded spaces should have the same hidden states
assert_array_almost_equal(test_hidden_states[1,1,:], test_hidden_states[1,2,:], decimal=5)
assert_array_almost_equal(test_hidden_states[1,1,:], test_hidden_states[1,3,:], decimal=5)
# Try again but with different paddings
# Should get the same result for ['c', 'd']
big_model = LSTMSequenceEmbedder(token_embeds, seq_length=8, hidden_size=7)
big_model.weights = model.weights # match weights
test_embeds_alt, test_hidden_states_alt = big_model.compute(
[big_model.embeds, big_model.hidden_states.values],
sequences_alt, token_vocab)
assert test_embeds_alt.shape == (3, 7)
assert test_hidden_states_alt.shape == (3, 8, 7)
assert_array_almost_equal(test_embeds[1,:], test_embeds_alt[2,:], decimal=5)
assert_array_almost_equal(test_hidden_states[1,:2,:],
test_hidden_states_alt[2,:2,:], decimal=5)
class TestBidiLSTMSequenceEmbedder(object):
def test_lstm(self):
"""Test whether the mask works properly for bidi LSTM embedder."""
token_vocab = SimpleVocab('a b c d'.split())
sequences = [
['a', 'b', 'c', 'd'],
['c', 'd'],
['a', 'b', 'c', 'd'],
]
sequences_alt = [
['a', 'b', 'c', 'd', 'a', 'b', 'd', 'c'],
['b', 'a', 'd'],
['c', 'd'],
]
with clean_session():
token_embeds = tf.constant([
[1, 2, 0],
[3, 4, 1],
[5, 6, 0],
[7, 8, 1],
], dtype=tf.float32)
model = BidiLSTMSequenceEmbedder(token_embeds, seq_length=4, hidden_size=7)
test_embeds, test_hidden_states = model.compute(
[model.embeds, model.hidden_states.values],
sequences, token_vocab)
assert test_embeds.shape == (3, 14)
assert test_hidden_states.shape == (3, 4, 14)
assert_array_almost_equal(test_embeds[1,:7], test_hidden_states[1,1,:7], decimal=5)
assert_array_almost_equal(test_embeds[1,7:], test_hidden_states[1,0,7:], decimal=5)
# Padded spaces should have the same forward embeddings
assert_array_almost_equal(test_hidden_states[1,1,:7], test_hidden_states[1,2,:7], decimal=5)
assert_array_almost_equal(test_hidden_states[1,1,:7], test_hidden_states[1,3,:7], decimal=5)
# Padded spaces should have 0 backward embeddings
assert_array_almost_equal(np.zeros((7,)), test_hidden_states[1,2,7:], decimal=5)
assert_array_almost_equal(np.zeros((7,)), test_hidden_states[1,3,7:], decimal=5)
# Other spaces should not have 0 embeddings with very high probability
assert np.linalg.norm(test_hidden_states[1,0,:7]) > 1e-5
assert np.linalg.norm(test_hidden_states[1,1,:7]) > 1e-5
assert np.linalg.norm(test_hidden_states[1,0,7:]) > 1e-5
assert np.linalg.norm(test_hidden_states[1,1,7:]) > 1e-5
# Try again but with different paddings
# Should get the same result for ['c', 'd']
big_model = BidiLSTMSequenceEmbedder(token_embeds, seq_length=8, hidden_size=7)
big_model.weights = model.weights # match weights
test_embeds_alt, test_hidden_states_alt = big_model.compute(
[big_model.embeds, big_model.hidden_states.values],
sequences_alt, token_vocab)
assert test_embeds_alt.shape == (3, 14)
assert test_hidden_states_alt.shape == (3, 8, 14)
assert_array_almost_equal(test_embeds[1,:], test_embeds_alt[2,:], decimal=5)
assert_array_almost_equal(test_hidden_states[1,:2,:],
test_hidden_states_alt[2,:2,:], decimal=5)
class TestAttention(object):
@pytest.fixture
def memory_cells(self):
# (batch_size, num_cells, cell_dim)
values = tf.constant([ # (2, 2, 3)
[
[1., 2., 3.],
[1., 1., 1.]
],
[
[1., 1.5, 0.],
[-0.8, 1., -0.4]
]
], dtype=tf.float32)
mask = tf.constant([ # (2, 2)
[1, 0],
[1, 1],
], dtype=tf.float32)
return SequenceBatch(values, mask)
@pytest.fixture
def query(self):
# (batch_size, cell_dim)
return tf.constant([ # (2, 3)
[1., 2., -1.5],
[0., 0.3, 2.]
], dtype=tf.float32)
@pytest.fixture
def model(self, memory_cells, query):
return Attention(memory_cells, query)
@pytest.fixture
def correct_logits(self):
ninf = -float('inf')
return np.array([
[(1 + 4 + -4.5), ninf],
[(0 + 0.45 + 0), (0 + 0.3 + -0.8)]
], dtype=np.float32)
@pytest.fixture
def correct_probs(self):
normalizer = exp(0.45) + exp(-0.5)
return np.array([
[1.0, 0.0],
[exp(0.45) / normalizer, exp(-0.5) / normalizer]
], dtype=np.float32)
@pytest.fixture
def correct_retrieved(self, correct_probs):
a0 = correct_probs[1][0]
a1 = correct_probs[1][1]
weighted = a0 * np.array([1., 1.5, 0.]) + \
a1 * np.array([-0.8, 1., -0.4])
return np.array([
[1., 2., 3.],
weighted,
], dtype=np.float32)
@pytest.mark.usefixtures('clean_test_session')
def test(self, model, correct_logits, correct_probs, correct_retrieved):
sess = tf.get_default_session()
logits, probs, retrieved = sess.run([model.logits, model.probs, model.retrieved])
assert_array_almost_equal(correct_logits, logits)
assert_array_almost_equal(correct_probs, probs)
assert_array_almost_equal(correct_retrieved, retrieved)
|
ContextualSP/lemon/executor/gtd/tests/ml/test_model.py/0
|
{
"file_path": "ContextualSP/lemon/executor/gtd/tests/ml/test_model.py",
"repo_id": "ContextualSP",
"token_count": 8384
}
| 233 |
from abc import ABCMeta, abstractproperty, abstractmethod
from gtd.utils import cached_property
class Domain(object, metaclass=ABCMeta):
"""Encapsulate all domain-dependent information.
To add a new domain, create a subclass of domain (in a separate file)
and then add it to the get_domain method below.
"""
def __init__(self, config):
"""Initialize the Domain object.
Args:
config (gtd.util.Config): Top-level config.
"""
self.config = config
@abstractmethod
def load_datasets(self):
"""Load training and validation datasets according to the config.
Returns: a tuple (train, valid)
train (Dataset): Training examples
valid (Dataset): Validation examples (dev set)
final (Dataset): Final examples (test set)
"""
raise NotImplementedError
@cached_property
def path_checker(self):
"""Get a PathChecker for this domain.
Returns:
A callable that takes a ParsePath and returns a boolean
indicating whether the ParsePath is OK to be on the beam.
"""
prune_config = self.config.decoder.get('prune')
if not prune_config:
return lambda x: True
return self._get_path_checker(prune_config)
@abstractmethod
def _get_path_checker(self, prune_config):
"""Get a PathChecker for this domain according to the configuration.
Args:
prune_config (Config): dataset.prune section of the config.
Returns:
A callable that takes a ParsePath and returns a boolean
indicating whether the ParsePath is OK to be on the beam.
"""
raise NotImplementedError
@abstractproperty
def fixed_predicates(self):
"""Return the list of fixed Predicates.
Returns:
list(Predicate)
"""
raise NotImplementedError
@abstractproperty
def all_types(self):
"""Return the list of all possible type names.
Returns:
list(str)
"""
raise NotImplementedError
def get_domain(config):
"""Get the domain object according to the config.
Args:
config (gtd.util.Config): Top-level config
"""
domain_name = config.dataset.domain
if domain_name == 'tables':
from strongsup.tables.domain import TablesDomain
return TablesDomain(config)
elif domain_name == 'rlong':
from strongsup.rlong.domain import RLongDomain
return RLongDomain(config)
else:
raise ValueError('Domain {} not supported.'.format(domain_name))
|
ContextualSP/lemon/executor/strongsup/domain.py/0
|
{
"file_path": "ContextualSP/lemon/executor/strongsup/domain.py",
"repo_id": "ContextualSP",
"token_count": 1061
}
| 234 |
class Recipe(object):
"""Light-weight class that defines the configs to launch types of
jobs. These jobs are defined for all datasets given by the datasets
property.
Args:
name (string): The name of the config
config_mixins (list[string]): Name of the human-readable configs
base: (string): The base config this runs off of.
"""
def __init__(self, name, config_mixins, base="default-base"):
self._name = name
self._config_mixins = config_mixins
self._base = base
@property
def config_mixins(self):
return self._config_mixins
@property
def datasets(self):
return ["alchemy", "tangrams", "scene", "alchemy-multi-step", "tangrams-multi-step", "scene-multi-step"]
@property
def base(self):
return self._base
@property
def name(self):
return self._name
def __str__(self):
return 'Recipe({}: {} + {})'.format(
self.name, self.base, self.config_mixins)
__repr__ = __str__
class AlchemyRecipe(Recipe):
@property
def datasets(self):
return ["alchemy"]
class TangramsRecipe(Recipe):
@property
def datasets(self):
return ["tangrams"]
class SceneRecipe(Recipe):
@property
def datasets(self):
return ["scene"]
class Cookbook(object):
"""A collection of recipes"""
def __init__(self, recipes):
self._recipes = recipes
@property
def recipes(self):
return self._recipes
class RLongCookbook(Cookbook):
def __init__(self):
self._recipes = [
# Baseline
Recipe(name="default", config_mixins=[]),
# Alpha (q_RL)
Recipe(name="alpha=0", config_mixins=["alpha=0"]),
# Beta
Recipe(name="beta=0", config_mixins=["beta=0"]),
Recipe(name="beta=0.25", config_mixins=["beta=0.25"]),
#Recipe(name="beta=0.5", config_mixins=["beta=0.5"]),
#Recipe(name="beta=0.75", config_mixins=["beta=0.75"]),
# Beam search
Recipe(name="beam-32", config_mixins=["beam-search"]),
Recipe(name="beam-128", config_mixins=["beam-search", "train_beam_size=128"]),
# Particle Filtering
#Recipe(name="particle-filtering",
# config_mixins=["train_beam_size=256", "particle-filtering"]),
# Epsilon
Recipe(name="epsilon=0.05", config_mixins=[
"beam-search", "epsilon=0.05"]),
#Recipe(name="epsilon=0.08", config_mixins=[
# "beam-search", "epsilon=0.08"]),
#Recipe(name="epsilon=0.1", config_mixins=[
# "beam-search", "epsilon=0.1"]),
#Recipe(name="epsilon=0.12", config_mixins=[
# "beam-search", "epsilon=0.12"]),
Recipe(name="epsilon=0.25", config_mixins=[
"beam-search", "epsilon=0.25"]),
# REINFORCE
Recipe(name="reinforce+beam=001+noahead", config_mixins=[
"batched-reinforce-basic", "batched-reinforce-epsilon=0.2",
"train_beam_size=1"]),
Recipe(name="reinforce+beam=032+noahead", config_mixins=[
"batched-reinforce-basic", "batched-reinforce-epsilon=0.2"]),
#Recipe(name="reinforce+beam=128+noahead", config_mixins=[
# "batched-reinforce-basic", "batched-reinforce-epsilon=0.2",
# "train_beam_size=128"]),
#Recipe(name="reinforce+beam=001+lookahead", config_mixins=[
# "batched-reinforce-basic", "batched-reinforce-epsilon=0.2",
# "train_beam_size=1", "batched-reinforce-lookahead"]),
#Recipe(name="reinforce+beam=032+lookahead", config_mixins=[
# "batched-reinforce-basic", "batched-reinforce-epsilon=0.2",
# "batched-reinforce-lookahead"]),
#Recipe(name="reinforce+beam=128+lookahead", config_mixins=[
# "batched-reinforce-basic", "batched-reinforce-epsilon=0.2",
# "train_beam_size=128", "batched-reinforce-lookahead"]),
# REINFORCE + baseline
Recipe(name="reinforce+baseline=0.1", config_mixins=[
"batched-reinforce-basic", "batched-reinforce-epsilon=0.2",
"baseline=0.1"]),
#Recipe(name="reinforce+baseline=0.03", config_mixins=[
# "batched-reinforce-basic", "batched-reinforce-epsilon=0.2",
# "baseline=0.03"]),
Recipe(name="reinforce+baseline=0.01", config_mixins=[
"batched-reinforce-basic", "batched-reinforce-epsilon=0.2",
"baseline=0.01"]),
#Recipe(name="reinforce+baseline=0.003", config_mixins=[
# "batched-reinforce-basic", "batched-reinforce-epsilon=0.2",
# "baseline=0.003"]),
Recipe(name="reinforce+baseline=0.001", config_mixins=[
"batched-reinforce-basic", "batched-reinforce-epsilon=0.2",
"baseline=0.001"]),
Recipe(name="reinforce+baseline=0.0001", config_mixins=[
"batched-reinforce-basic", "batched-reinforce-epsilon=0.2",
"baseline=0.0001"]),
Recipe(name="reinforce+baseline=0.00001", config_mixins=[
"batched-reinforce-basic", "batched-reinforce-epsilon=0.2",
"baseline=0.00001"]),
# REINFORCE + logistic baseline
Recipe(name="reinforce+logistic-value-fxn", config_mixins=[
"batched-reinforce-basic", "batched-reinforce-epsilon=0.2",
"logistic_value_fxn"]),
# History (h), Stack (s), Independent Utterance (IU)
Recipe(name="stack", config_mixins=["only-use-stack-emb"]),
#Recipe(name="h+s", config_mixins=["stack-emb"]),
#Recipe(name="iu", config_mixins=["indep-utt-expl"]),
#Recipe(name="h+s+iu", config_mixins=["stack-emb", "indep-utt-expl"]),
# Multi-step training
Recipe(name="multi-step-train", config_mixins=["multi-step-train"]),
# Best
AlchemyRecipe(name="alchemy-best", config_mixins=[
"beta=0", "only-use-stack-emb"]),
TangramsRecipe(name="tangrams-best", config_mixins=["beta=0.25"]),
SceneRecipe(name="scene-best", config_mixins=[
"beta=0", "only-use-stack-emb"]),
]
def get_recipe_name(self, configs, base):
for recipe in self._recipes:
if sorted(recipe.config_mixins) == sorted(configs) and recipe.base == base:
return recipe.name
return None
|
ContextualSP/lemon/executor/strongsup/results/recipe.py/0
|
{
"file_path": "ContextualSP/lemon/executor/strongsup/results/recipe.py",
"repo_id": "ContextualSP",
"token_count": 3317
}
| 235 |
import os
import pytest
from strongsup.predicate import Predicate
from strongsup.value import check_denotation
from strongsup.tables.graph import TablesKnowledgeGraph
from strongsup.tables.executor import TablesPostfixExecutor
from strongsup.tables.structure import Date, NeqInfiniteSet, RangeInfiniteSet, GenericDateInfiniteSet
from strongsup.tables.value import StringValue, NumberValue, DateValue
DATA_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')
class TestExecutorNt0(object):
@pytest.fixture()
def graph(self):
return TablesKnowledgeGraph(os.path.join(DATA_PATH, 'nt-0.graph'))
@pytest.fixture()
def executor(self, graph):
return TablesPostfixExecutor(graph)
def run(self, executor, lf, old_deno=None, expected_deno=None):
"""Assert the executed denotation and return it."""
if isinstance(lf, str):
lf = [Predicate(x) for x in lf.split()]
try:
d = executor.execute(lf, old_deno)
if expected_deno is not None:
assert list(d) == expected_deno
except:
# See what is going wrong in more details
executor.debug = True
d = executor.execute(lf, old_deno)
if expected_deno is not None:
assert list(d) == expected_deno
# Also test execute_predicate
current_deno = old_deno
for predicate in lf:
current_deno = executor.execute_predicate(predicate, current_deno)
assert list(d) == list(current_deno)
return d
def run_error(self, executor, lf, old_deno=None):
"""Assert that an exception is thrown."""
if isinstance(lf, str):
lf = [Predicate(x) for x in lf.split()]
with pytest.raises(Exception):
executor.execute(lf, old_deno)
def test_basic_join(self, executor):
e = executor
d = self.run(e, 'fb:cell.2001', None,
[{'fb:cell.2001'}])
self.run(e, 'fb:row.row.year', d,
[{'fb:row.r1'}])
d = self.run(e, 'fb:cell.2001 fb:row.row.year', None,
[{'fb:row.r1'}])
assert d.utterance_idx == 0
d = self.run(e, 'fb:cell.2001 <EOU> fb:row.row.year', None,
[{'fb:row.r1'}])
assert d.utterance_idx == 1
d = self.run(e, '<EOU> fb:cell.2001 fb:row.row.year <EOU>', None,
[{'fb:row.r1'}])
assert d.utterance_idx == 2
d = self.run(e, 'fb:cell.2001 fb:row.row.year !fb:row.row.league <EOU>', None,
[{'fb:cell.usl_a_league'}])
assert d.utterance_idx == 1
assert e.finalize(d) == [StringValue('USL A-League')]
def test_errors(self, executor):
e = executor
self.run_error(e, 'argmax')
d = self.run(e, 'fb:cell.2001', None,
[{'fb:cell.2001'}])
self.run_error(e, 'fb:row.row.league', d)
self.run_error(e, '!fb:row.row.year', d)
self.run_error(e, 'max', d)
d = self.run(e, 'fb:cell.2002', d,
[{'fb:cell.2001'}, {'fb:cell.2002'}])
self.run_error(e, 'and', d)
d = self.run(e, 'or !fb:cell.cell.number', d,
[{2001., 2002.}])
# Test finalization
d = self.run(e, 'N2003 or', d)
d = e.finalize(d)
assert set(d) == {NumberValue(2001.0), NumberValue(2002.0), NumberValue(2003.0)}
d = self.run(e, 'fb:cell.does_not_exist')
with pytest.raises(Exception):
e.finalize(d)
def test_infinite_set(self, executor):
e = executor
self.run(e, 'N8000 > fb:cell.cell.number', None,
[{'fb:cell.8_567', 'fb:cell.9_734', 'fb:cell.10_727'}])
d = self.run(e, 'fb:cell.2005 !=')
assert isinstance(d[0], NeqInfiniteSet)
d = self.run(e, 'fb:row.row.year', d)
assert d[0] == {'fb:row.r{}'.format(x) for x in range(1, 11) if x != 5}
d = self.run(e, 'N2005 != fb:cell.cell.number fb:row.row.year')
assert d[0] == {'fb:row.r{}'.format(x) for x in range(1, 11) if x != 5}
d = self.run(e, 'N2005 > N2009 <= and fb:cell.cell.number fb:row.row.year', None,
[{'fb:row.r6', 'fb:row.r7', 'fb:row.r8', 'fb:row.r9'}])
def test_operations(self, executor):
e = executor
# aggregates
d = self.run(e, 'fb:cell.usl_a_league fb:row.row.league '
'!fb:row.row.avg_attendance !fb:cell.cell.number')
self.run(e, 'sum', d, [{24928.0}])
self.run(e, 'avg', d, [{6232.0}])
self.run(e, 'min', d, [{5628.0}])
self.run(e, 'max', d, [{7169.0}])
self.run(e, 'count', d, [{4.0}])
self.run(e, 'fb:cell.usl_a_league fb:row.row.league '
'!fb:row.row.division count', None, [{1.0}])
d = self.run(e, 'fb:cell.usl_a_league fb:row.row.league '
'!fb:row.row.avg_attendance')
self.run_error(e, 'sum', d)
d = self.run(e, 'type-row !fb:row.row.year !fb:cell.cell.date')
self.run(e, 'min', d, [{Date(2001, -1, -1)}])
self.run(e, 'max', d, [{Date(2010, -1, -1)}])
self.run(e, 'count', d, [{10.0}])
# merge
d = self.run(e, 'fb:cell.usl_a_league fb:row.row.league '
'fb:cell.quarterfinals fb:row.row.playoffs')
self.run(e, 'and', d, [{'fb:row.r1', 'fb:row.r4'}])
self.run(e, 'or count', d, [{6.0}])
self.run(e, 'type-row N3 fb:row.row.index != and count', None, [{9.0}])
self.run(e, 'type-row !fb:row.row.avg_attendance !fb:cell.cell.number '
'N6000 > N8000 < and and count', None, [{4.0}])
# diff
self.run(e, 'N11 fb:cell.2001 fb:row.row.year '
'!fb:row.row.regular_season !fb:cell.cell.number diff',
None, [{7.0}])
self.run(e, 'fb:cell.2001 fb:cell.2004 or fb:row.row.year '
'!fb:row.row.regular_season !fb:cell.cell.number N3 diff',
None, [{1.0, 2.0}])
def test_superlative(self, executor):
e = executor
unary = self.run(e, 'fb:cell.did_not_qualify fb:row.row.playoffs')
assert len(unary[0]) == 3
self.run(e, 'x !fb:row.row.index argmin', unary,
[{'fb:row.r3'}])
self.run(e, 'x !fb:row.row.index argmax', unary,
[{'fb:row.r8'}])
self.run(e, 'x !fb:row.row.avg_attendance !fb:cell.cell.number argmin', unary,
[{'fb:row.r6'}])
self.run_error(e, 'x !fb:row.row.next argmax', unary)
# Another example
unary = self.run(e, 'type-row')
assert len(unary[0]) == 10
self.run(e, 'x fb:row.row.next !fb:row.row.index argmin', unary,
[{'fb:row.r2'}])
self.run(e, 'x !fb:row.row.regular_season !fb:cell.cell.number argmin', unary,
[{'fb:row.r4', 'fb:row.r9'}])
# Yet another one
unary = self.run(e, 'type-row !fb:row.row.league')
assert len(unary[0]) == 3
self.run(e, 'x fb:row.row.league count argmax', unary,
[{'fb:cell.usl_first_division'}])
def test_finalization(self, executor):
e = executor
f = executor.finalize
d = f(self.run(e, 'N2002 N2003 or'))
assert set(d) == {NumberValue(2002.0), NumberValue(2003.0)}
d = f(self.run(e, 'N2 fb:row.row.index !fb:row.row.year !fb:cell.cell.date'))
assert set(d) == {DateValue(2002, -1, -1)}
d = f(self.run(e, 'type-row !fb:row.row.league'))
assert set(d) == {
StringValue('USL A-League'),
StringValue('USL First Division'),
StringValue('USSF D-2 Pro League')}
with pytest.raises(Exception):
f(self.run(e, 'type-row'))
class TestDates(object):
@pytest.fixture()
def graph(self):
return TablesKnowledgeGraph(os.path.join(DATA_PATH, 'nt-4.graph'))
@pytest.fixture()
def executor(self, graph):
return TablesPostfixExecutor(graph)
def run(self, executor, lf, old_deno=None, expected_deno=None):
"""Assert the executed denotation and return it."""
if isinstance(lf, str):
lf = [Predicate(x) for x in lf.split()]
try:
d = executor.execute(lf, old_deno)
if expected_deno is not None:
assert list(d) == expected_deno
except:
# See what is going wrong in more details
executor.debug = True
d = executor.execute(lf, old_deno)
if expected_deno is not None:
assert list(d) == expected_deno
return d
def test_date_logic(self, executor):
e = executor
d = self.run(e, 'Dxx-10-xx')
assert isinstance(d[0], GenericDateInfiniteSet)
self.run(e, 'Dxx-10-xx fb:cell.cell.date fb:row.row.date', None,
[{'fb:row.r10', 'fb:row.r11', 'fb:row.r12', 'fb:row.r13'}])
self.run(e, 'D1987-xx-xx fb:cell.cell.date fb:row.row.date count', None,
[{21.0}])
self.run(e, 'D1987-xx-xx > fb:cell.cell.date fb:row.row.date count', None,
[{19.0}])
self.run(e, 'D1987-xx-xx >= fb:cell.cell.date fb:row.row.date count', None,
[{40.0}])
self.run(e, 'fb:cell.home fb:row.row.venue x !fb:row.row.date !fb:cell.cell.date argmax', None,
[{'fb:row.r39'}])
class TestEndToEnd(object):
def run(self, ex_id, formula, target):
formula = self.expand_shorthands(formula)
formula = [Predicate(x) for x in formula]
if not isinstance(target, list):
target = [target]
graph = TablesKnowledgeGraph(os.path.join(DATA_PATH, 'nt-{}.graph'.format(ex_id)))
executor = TablesPostfixExecutor(graph)
try:
d = executor.execute(formula)
d = executor.finalize(d)
assert check_denotation(d, target)
except:
executor.debug = True
d = executor.execute(formula)
d = executor.finalize(d)
assert check_denotation(d, target)
def expand_shorthands(self, formula):
formula = formula.split()
expanded = []
for x in formula:
if x.startswith('c.'):
expanded.append('fb:cell.' + x[2:])
elif x.startswith('r.'):
expanded.append('fb:row.row.' + x[2:])
elif x.startswith('!r.'):
expanded.append('!fb:row.row.' + x[3:])
elif x.startswith('p.'):
expanded.append('fb:cell.cell.' + x[2:])
elif x.startswith('!p.'):
expanded.append('!fb:cell.cell.' + x[3:])
else:
expanded.append(x)
return expanded
def test_end_to_end(self):
self.run(0,
'c.usl_a_league r.league x !r.index argmax !r.year !p.number',
NumberValue(2004))
self.run(1,
'c.1st r.position x !r.index argmax !r.venue',
StringValue('Bangkok, Thailand'))
self.run(2,
'c.crettyard r.team !r.next !r.team',
StringValue('Wolfe Tones'))
self.run(3,
'c.united_states_los_angeles r.city !r.passengers !p.number '
'c.canada_saskatoon r.city !r.passengers !p.number diff',
NumberValue(12467))
self.run(4,
'type-row x !r.date !p.date argmin !r.opponent',
StringValue('Derby County'))
self.run(7,
'c.lake_tuz c.lake_palas_tuzla or x r.name_in_english !r.depth !p.number argmax',
StringValue('Lake Palas Tuzla'))
self.run(8,
'c.full_house r.hand !r.4_credits',
NumberValue(32))
self.run(9,
'c.ardo_kreek != c.ardo_kreek r.player !r.position r.position !r.player and',
[StringValue('Siim Ennemuist'), StringValue('Andri Aganits')])
self.run(12,
'c.matsuyama r.city_town_village count c.imabari r.city_town_village count diff',
NumberValue(2))
self.run(14,
'c.south_korea_kor r.nation N2010 >= p.number r.olympics and !r.athlete',
StringValue('Kim Yu-Na'))
self.run(15,
'N1 p.number r.position !r.venue',
StringValue('New Delhi, India')),
# This example shows that using set as intermediate denotation is not sufficient
#self.run(16,
# 'c.vs_bc_lions c.at_bc_lions or r.opponent !r.score !p.number sum',
# NumberValue(58))
# This example shows that empty intermediate denotation might still be fine
#self.run(19,
# 'N4 > p.number r.score N4 > p.num2 r.score or count',
# NumberValue(3))
self.run(20,
'type-row !r.album x r.album c.null != r.peak_chart_positions_aus and count argmax',
StringValue('The Sound Of Trees'))
self.run(21,
'type-row x !r.in_service !p.number argmax !r.model',
StringValue('KM-45 Series'))
self.run(22,
'c.auckland r.port x !r.propulsion !p.num2 argmax !r.name',
StringValue('Manawanui i'))
self.run(23,
'type-row !r.nationality x r.nationality count argmin',
[StringValue('Morocco'), StringValue('France'), StringValue('Spain')])
self.run(24,
'c.turkey r.nation !r.next !r.nation',
StringValue('Sweden'))
self.run(25,
'N1800 >= p.number r.founded N1900 < p.number r.founded and count',
NumberValue(4))
self.run(25,
'N1800 >= N1900 < and p.number r.founded count',
NumberValue(4))
self.run(28,
'type-row !r.computer !p.part x p.part r.computer count argmax',
StringValue('Windows'))
# Another example showing that using set as intermediate denotation is not sufficient
#self.run(30,
# 'c.totals != r.tenure !r.years !p.number avg',
# NumberValue(4))
self.run(35,
'N24 p.number r.age !r.contestant c.reyna_royo != and',
StringValue('Marisela Moreno Montero'))
self.run(37,
'c.desktop_with_integrated_color_display r.case fb:part.enhanced_keyboard p.part r.notes and count',
NumberValue(4))
self.run(49,
'c.new_zealand r.victor D2010-xx-xx p.date r.date and count',
NumberValue(3))
self.run(60,
'D2010-05-xx p.date r.date count',
NumberValue(2))
self.run(60,
'D2010-05-01 >= D2010-06-01 < and p.date r.date count',
NumberValue(2))
self.run(60,
'D2010-05-01 >= p.date D2010-06-01 < p.date and r.date count',
NumberValue(2))
|
ContextualSP/lemon/executor/strongsup/tests/tables/test_executor.py/0
|
{
"file_path": "ContextualSP/lemon/executor/strongsup/tests/tables/test_executor.py",
"repo_id": "ContextualSP",
"token_count": 7918
}
| 236 |
import os
import re
from codecs import open
import itertools
from strongsup.example import DelexicalizedContext
from strongsup.evaluation import Evaluation, BernoulliSequenceStat
from strongsup.value import check_denotation
from strongsup.utils import EOU
class Visualizer(object):
"""Subclass around a Decoder, which does exactly the same thing as Decoder, but also
prints out context and predictions.
Args:
decoder (Decoder)
filename (unicode string): filename of where to write the output. This
overwrites the file
group_name (str): group name (show up in log and tensorboard)
train (bool): If this is a train or a valid Visualizer
"""
def __init__(self, decoder, filename, group_name, train):
self._decoder = decoder
self._filename = filename
self._group_name = group_name
self._train = train
@property
def group_name(self):
return self._group_name
def log_silver_logical_forms(self, examples):
"""Logs the silver logical forms of the examples if they exist.
Args:
examples (list[Example]): the examples
"""
with open(self._filename, 'a+') as log:
for example in examples:
self._log_example_basic(example, log)
def _log_example_basic(self, example, log):
"""Logs the basic info for a single example."""
context = example.context
log.write('Utterances:\n{}\n'.format(context).encode('utf-8'))
if isinstance(context, DelexicalizedContext):
log.write('Orig Utterances:\n{}\n'.format(context.original_context).encode('utf-8'))
log.write('World: {}\n'.format(context.world))
log.write('Denotation: {}\n'.format(example.answer))
log.write('Gold Logical Form: {}\n'.format(example.logical_form))
if example.logical_form:
pattern = _logical_form_pattern(example.logical_form)
log.write('Gold Logical Form Pattern: {}\n'.format(pattern))
if example.context.silver_logical_form is not None:
log.write('Silver Logical Form: {}\n\n'.format(
example.context.silver_logical_form.decisions))
else:
log.write('Silver Logical Form: None\n\n')
def reset(self, step=None):
"""Reset the output file and print the header."""
with open(self._filename, 'w') as log:
log.write('\n################ {}-{} ################\n\n'.format(step, self._group_name))
def predictions(self, examples, verbose=False):
"""Gets predictions from decoder and prints out additional information
Args:
examples (list[Example]): a batch of Example
Returns:
list[Beam]: a batch of Beams
"""
with open(self._filename, 'a') as log:
contexts = [example.context for example in examples]
beams, intermediates = self._decoder.get_intermediate_beams(
examples, train=self._train, verbose=verbose)
evaluation = Evaluation()
for beam, intermeds, example in zip(beams, intermediates, examples):
self._log_beam(beam, intermeds, example, log, evaluation)
return [beam.get_terminated() for beam in beams], evaluation
def _log_beam(self, final_beam, intermediate_beams, example, log, evaluation,
top_n=10, log_all_beams=True):
"""Takes a single prediction and logs information in a file
Args:
final_beam (Beam)
intermediate_beams (list[Beam])
example (Example): the example
log (file): file to dump output to
evaluation (Evaluation): statistics collector
top_n (int): number of predictions to print out
log_all_beams (bool): whether to log all intermediate beams
"""
context = example.context
predictions = final_beam.get_terminated()
ranker = self._decoder.exploration_policy(self._train)._ranker
probs = self._decoder.get_probs(predictions)
evaluation.add('numCandidates_{}'.format(self._group_name), len(predictions))
log.write('World:\n')
context.world.dump_human_readable(log)
log.write('\n')
self._log_example_basic(example, log)
first_candidate = 0 if predictions else None
num_deno_corrects = 0
first_deno_correct = None
deno_hit_mass = 0.
for i, prediction in enumerate(predictions):
denotation = prediction.finalized_denotation
is_correct = check_denotation(example.answer, denotation)
if is_correct:
log.write('Deno correct at {}\n'.format(i))
num_deno_corrects += 1
if first_deno_correct is None:
first_deno_correct = i
deno_hit_mass += probs[i]
if is_correct or i < top_n:
log.write('Predicted Logical Form {}: {}\n'.format(i, prediction.decisions))
log.write('Predicted Denotation {}: {}\n'.format(i, denotation))
log.write('\n')
if not predictions:
log.write('No predictions\n')
else:
log.write('Candidates with correct denotation: {} / {}\n'.format(num_deno_corrects, len(predictions)))
log.write('First deno correct: {} / {}\n'.format(first_deno_correct, len(predictions)))
log.write('\n')
# Denotation Evaluation
deno_acc = (first_deno_correct == 0)
evaluation.add('denoAcc_{}'.format(self._group_name), deno_acc)
evaluation.add('denoHit_{}'.format(self._group_name), num_deno_corrects > 0)
evaluation.add('denoSpu_{}'.format(self._group_name), num_deno_corrects)
evaluation.add('denoHitMass_{}'.format(self._group_name), deno_hit_mass)
# Separate by number of utterances
num_utterances = len(context.utterances)
evaluation.add('denoAcc_{}_{}utts'.format(self._group_name, num_utterances), deno_acc)
evaluation.add('denoHit_{}_{}utts'.format(self._group_name, num_utterances), num_deno_corrects > 0)
evaluation.add('denoSpu_{}_{}utts'.format(self._group_name, num_utterances), num_deno_corrects)
evaluation.add('denoHitMass_{}_{}utts'.format(self._group_name, num_utterances), deno_hit_mass)
# Sequence Evaluation
first_seq_correct = None
if example.logical_form:
true_lf = _raw_lf(example.logical_form)
seq_acc = (len(predictions) > 0 and true_lf == _raw_lf(predictions[0].decisions))
evaluation.add('seqAcc_{}'.format(self._group_name), seq_acc)
evaluation.add('seqAcc_{}_{}utts'.format(self._group_name, num_utterances), seq_acc)
for i, prediction in enumerate(predictions):
if true_lf == _raw_lf(prediction.decisions):
first_seq_correct = i
log.write('Seq correct at {}: {}\n\n'.format(i, prediction.decisions))
seq_hit = True
seq_hit_mass = probs[i]
break
else: # No prediction has a matching LF
log.write('Seq correct not found.\n\n')
seq_hit = False
seq_hit_mass = 0.
evaluation.add('seqHit_{}'.format(self._group_name), seq_hit)
evaluation.add('seqHitMass_{}'.format(self._group_name), seq_hit_mass)
evaluation.add('spuriousMass_{}'.format(self._group_name), deno_hit_mass - seq_hit_mass)
evaluation.add('seqHit_{}_{}utts'.format(self._group_name, num_utterances), seq_hit)
evaluation.add('seqHitMass_{}_{}utts'.format(self._group_name, num_utterances), seq_hit_mass)
evaluation.add('spuriousMass_{}_{}utts'.format(self._group_name, num_utterances), deno_hit_mass - seq_hit_mass)
# Separate by LF pattern
pattern = _logical_form_pattern(example.logical_form)
evaluation.add('denoAcc_{}_{}'.format(self._group_name, pattern), deno_acc)
evaluation.add('denoHit_{}_{}'.format(self._group_name, pattern), num_deno_corrects > 0)
evaluation.add('denoSpu_{}_{}'.format(self._group_name, pattern), num_deno_corrects)
evaluation.add('seqAcc_{}_{}'.format(self._group_name, pattern), seq_acc)
evaluation.add('seqHit_{}_{}'.format(self._group_name, pattern), seq_hit)
evaluation.add('seqHitMass_{}_{}'.format(self._group_name, pattern), seq_hit_mass)
evaluation.add('spuriousMass_{}_{}'.format(self._group_name, pattern), deno_hit_mass - seq_hit_mass)
# Score breakdown: basic, attention, and soft_copy
# First, gather all paths of interest
paths_of_interest = [first_candidate, first_deno_correct, first_seq_correct]
uniqued_paths_of_interest = list(set(x for x in paths_of_interest if x is not None))
attentions, score_breakdowns = self._decoder.score_breakdown(
[predictions[i] for i in uniqued_paths_of_interest])
# Top candidate
if first_candidate is None:
log.write('[breakdown] Top candidate: NONE\n')
else:
log.write('[breakdown] Top candidate: {} / {}\n'.format(
first_candidate, len(predictions)))
self.log_score_breakdown(predictions[first_candidate],
attentions[uniqued_paths_of_interest.index(first_candidate)],
score_breakdowns[uniqued_paths_of_interest.index(first_candidate)], log)
log.write('\n')
# First deno correct
if first_deno_correct is None:
log.write('[breakdown] First deno correct: NONE\n')
elif first_deno_correct == first_candidate:
log.write('[breakdown] First deno correct: {} / {} (same as Top candidate)\n'.format(
first_deno_correct, len(predictions)))
else:
log.write('[breakdown] First deno correct: {} / {}\n'.format(
first_deno_correct, len(predictions)))
self.log_score_breakdown(predictions[first_deno_correct],
attentions[uniqued_paths_of_interest.index(first_deno_correct)],
score_breakdowns[uniqued_paths_of_interest.index(first_deno_correct)], log)
log.write('\n')
# First seq correct
if first_seq_correct is None:
log.write('[breakdown] First seq correct: NONE\n')
elif first_seq_correct == first_candidate:
log.write('[breakdown] First seq correct: {} / {} (same as Top candidate)\n'.format(
first_seq_correct, len(predictions)))
elif first_seq_correct == first_deno_correct:
log.write('[breakdown] First seq correct: {} / {} (same as First deno correct)\n'.format(
first_seq_correct, len(predictions)))
else:
log.write('[breakdown] First seq correct: {} / {}\n'.format(
first_seq_correct, len(predictions)))
self.log_score_breakdown(predictions[first_seq_correct],
attentions[uniqued_paths_of_interest.index(first_seq_correct)],
score_breakdowns[uniqued_paths_of_interest.index(first_seq_correct)], log)
log.write('\n')
# Print the Beams
if log_all_beams:
if not example.logical_form:
match_gold_prefix = lambda x: False
else:
match_gold_prefix = lambda x: true_lf[:len(x)] == x
for step, beam in enumerate(itertools.chain(intermediate_beams, [final_beam])):
log.write('Beam at step {}:\n'.format(step))
match_any = False
for path in beam:
match = match_gold_prefix(_raw_lf(path.decisions))
match_any = match_any or match
log.write('{match} {decisions} ({score}) -- {embed}\n'.format(
match='@' if match else ' ', decisions=path.decisions, score=ranker(path),
embed=path[-1].pretty_embed if len(path) > 0 else None
))
if example.logical_form:
evaluation.add('seqOra_{}_{}'.format(self._group_name, step), match_any)
log.write('\n')
def log_score_breakdown(self, path, attention, score_breakdown, log):
decisions = path.decisions
log.write('Logical form: {}\n'.format(' '.join(str(x) for x in decisions)))
for i, case in enumerate(path):
log.write(' Step {}: {} ___\n'.format(i, ' '.join(str(x) for x in decisions[:i])))
utterance = case.current_utterance
capped_utterance = utterance[:min(len(utterance), len(attention[i]))]
# Attention
log.write(' {}\n'.format(' '.join('{:>6}'.format(x.encode('utf8')[:6])
for x in capped_utterance)))
log.write(' {}\n'.format(' '.join('{:6.3f}'.format(x)
for x in attention[i][:len(capped_utterance)])))
attention_rank = sorted(list(range(len(capped_utterance))),
key=lambda j: -attention[i][j])
log.write(' {}\n'.format(' '.join('{:>6}'.format(
'*' if j in attention_rank[:3] else '')
for j in range(len(capped_utterance)))))
# Sort by total logit
choice_indices = sorted(list(range(len(case.choices))),
key=lambda j: -sum(score_breakdown[i][j]))
for j in choice_indices:
is_chosen = (case.choices[j] == decisions[i])
log.write(' {:>15} {} | {} | {:7.3f}\n'.format(
_abbrev_predicate(case.choices[j])[:15],
'@' if is_chosen else ' ',
' '.join('{:7.3f}'.format(x) for x in
score_breakdown[i][j]),
sum(score_breakdown[i][j])))
# Helper function
def _raw_lf(lf):
"""Return the logical form without EOU"""
return [x.name for x in lf if x.name != EOU]
def _logical_form_pattern(lf):
lf = ' '.join(_raw_lf(lf))
if re.match(r'^type-row count$', lf):
return 'row_count'
if re.match(r'^fb:cell\.\w+ fb:row\.row\.\w+ count$', lf):
return 'ent_count'
if re.match(r'^fb:cell\.\w+ fb:row\.row\.\w+ !fb:row\.row\.\w+$', lf):
return 'lookup'
if re.match(r'^fb:cell\.\w+ fb:row\.row\.\w+ !?fb:row\.row\.next !fb:row\.row\.\w+$', lf):
return 'next_prev'
if re.match(r'^type-row x !fb:row\.row\.index arg(min|max) !fb:row\.row\.\w+$', lf):
return 'first_last'
if re.match(r'^type-row !fb:row\.row\.index (min|max) fb:row\.row\.index !fb:row\.row\.\w+$', lf):
return 'first_last'
if re.match(r'^type-row !fb:row\.row\.\w+ x fb:row\.row\.\w+ count argmax$', lf):
return 'most_freq'
return 'unknown'
def _abbrev_predicate(x):
x = str(x)
if x.startswith('fb:row.row.'):
return 'r.' + x[11:]
if x.startswith('!fb:row.row.'):
return '!r.' + x[12:]
if x.startswith('fb:cell.cell.'):
return 'n.' + x[13:]
if x.startswith('!fb:cell.cell.'):
return '!n.' + x[14:]
if x.startswith('fb:cell.'):
return 'c.' + x[8:]
if x.startswith('fb:part.'):
return 'p.' + x[8:]
return x
|
ContextualSP/lemon/executor/strongsup/visualizer.py/0
|
{
"file_path": "ContextualSP/lemon/executor/strongsup/visualizer.py",
"repo_id": "ContextualSP",
"token_count": 7099
}
| 237 |
#!/bin/bash
set -euo pipefail
if [[ ! -f ARC-V1-Feb2018.zip ]]; then
echo Missing file ARC-V1-Feb2018.zip.
echo
echo Download it first: https://s3-us-west-2.amazonaws.com/ai2-website/data/ARC-V1-Feb2018.zip
exit 1
fi
unzip -p ARC-V1-Feb2018.zip ARC-V1-Feb2018-2/ARC-Challenge/ARC-Challenge-Test.jsonl | jq -r -c '[.id, .question.choices[0].label] | @csv' | tr -d '"' > dummy-predictions.csv
|
ContextualSP/lemon/propara_evaluator/aristo-leaderboard/arc/data-challenge/build-dummy-predictions.sh/0
|
{
"file_path": "ContextualSP/lemon/propara_evaluator/aristo-leaderboard/arc/data-challenge/build-dummy-predictions.sh",
"repo_id": "ContextualSP",
"token_count": 174
}
| 238 |
# eQASC
This directory has code and data for the eQASC evaluator, as described in the EMNLP 2020 paper [Learning to Explain: Datasets and Models for Identifying Valid Reasoning Chains in Multihop Question-Answering](https://www.semanticscholar.org/paper/Learning-to-Explain%3A-Datasets-and-Models-for-Valid-Jhamtani-Clark/ebaebfefec9d5c21a4559a1a038743bd437d2f01).
* [code/](code/) holds the evaluator
* [data/](data/) holds the labels used by the evaluator
## Example usage
To evaluate your prediction file (located at /tmp/my_predictions_test.jsonl) against the
test dataset, run this and look at the scores in the file /tmp/metrics.json:
```
cd code
docker build -t eqasc-evaluator .
docker run \
-e PYTHONPATH=. \
-e PYTHONUNBUFFERED=yes \
-v /tmp/my_predictions_test.jsonl:/predictions.jsonl:ro \
-v $PWD/../data/chainid_to_label_test.json:/labels.json:ro \
-v /tmp:/output:rw \
--entrypoint python \
eqasc-evaluator \
allennlp_reasoning_explainqa/evaluator/evaluator.py \
/predictions.jsonl \
/labels.json \
/output/metrics.json
```
To confirm that the evaluator is working on correct inputs, you can use [dummy
prediction files](data/). To to do, replace `/tmp/my_predictions_test.jsonl` above
with `$PWD/../data/dummy_predictions_test.jsonl`.
You'll find more details about the evaluator in the [code/](code/) directory.
## Reference
Please cite the work like this:
```
@inproceedings{jhamtani2020grc,
title={Learning to Explain: Datasets and Models for Identifying Valid Reasoning Chains in Multihop Question-Answering},
author={Jhamtani, Harsh and Clark, Peter},
booktitle={Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)},
year={2020}
}
```
|
ContextualSP/lemon/propara_evaluator/aristo-leaderboard/eqasc/README.md/0
|
{
"file_path": "ContextualSP/lemon/propara_evaluator/aristo-leaderboard/eqasc/README.md",
"repo_id": "ContextualSP",
"token_count": 603
}
| 239 |
from collections import defaultdict
from typing import Dict, List, Tuple
def sentences_from_sentences_file(sentences_filename: str) -> Dict[int, List[str]]:
all_sentences = dict() # type: Dict[Tuple[int, int], str]
with open(sentences_filename) as f:
for line in f:
process_id_str, sentence_number_str, text = line.strip().split('\t', 3)[:3]
process_id = int(process_id_str)
sentence_number = int(sentence_number_str)
all_sentences[(process_id, sentence_number)] = text
sentences_by_process = defaultdict(list) # type: Dict[int, List[str]]
for key, sentence in sorted(all_sentences.items()):
process_id, sentence_number = key
sentences_by_process[process_id].append(sentence)
return sentences_by_process
|
ContextualSP/lemon/propara_evaluator/aristo-leaderboard/propara/evaluator/process/sentence_file.py/0
|
{
"file_path": "ContextualSP/lemon/propara_evaluator/aristo-leaderboard/propara/evaluator/process/sentence_file.py",
"repo_id": "ContextualSP",
"token_count": 316
}
| 240 |
#!/bin/bash
# This script will test the evaluator, build a docker image, and publish it as
# a Beaker image owned by the Leaderboard user. This is meant to be run by AI2
# after making changes to the QASC evaluator.
set -e
echo --------------------
echo Unit tests
echo --------------------
echo
set -x
python3 test_evaluator.py
set +x
echo
echo --------------------
echo Test docker image
echo --------------------
echo
set -x
./test.sh
set +x
echo
echo --------------------
echo Build local docker image
echo --------------------
echo
NAME="qasc-evaluator-$(date +"%Y%m%d-%H%M%S")"
set -x
docker build -t $NAME .
set +x
echo
echo --------------------
echo Publish Beaker image
echo --------------------
echo
# Beaker must be configured to run as the leaderboard user.
cat >>/tmp/beaker-leaderboard-config.yml <<EOF
agent_address: https://beaker.org
user_token: $(vault read -field=token secret/ai2/alexandria/beaker/dev)
EOF
set -x
export BEAKER_CONFIG_FILE=/tmp/beaker-leaderboard-config.yml
if [[ "$(beaker configure test | grep 'Authenticated as user:')" == 'Authenticated as user: "leaderboard" (us_s03ci03mnt6u)' ]]; then
echo 'beaker is correctly configured for user "leaderboard"'
else
echo 'beaker must be configured for user "leaderboard"'
exit 1
fi
beaker image create -n $NAME $NAME
set +x
rm /tmp/beaker-leaderboard-config.yml
|
ContextualSP/lemon/propara_evaluator/aristo-leaderboard/qasc/evaluator/publish_for_leaderboard.sh/0
|
{
"file_path": "ContextualSP/lemon/propara_evaluator/aristo-leaderboard/qasc/evaluator/publish_for_leaderboard.sh",
"repo_id": "ContextualSP",
"token_count": 432
}
| 241 |
<jupyter_start><jupyter_code>import json
import numpy as np
import os
def write(d, f):
json.dump(d, f)
f.write("\n")
# for f in os.listdir("/data_ext/v-xinyupi/PLoGAN/data/gan_corpus/ver_train_src.jsonl"):
current_input = None
is_gold = []
conclusions = []
last_input = None
with open("/data_ext/v-xinyupi/PLoGAN/data/gan_corpus/gen_valid_src.jsonl", "r") as fr:
with open("/data_ext/v-xinyupi/PLoGAN/data/gan_corpus_squeezed/gen_valid_src.jsonl", "w") as fw:
lines = fr.readlines()
cnt = 1
for l in lines:
dic = json.loads(l)
current_input = dic["input"]
ig = dic["is_gold"]
c = dic["output"]
if cnt == len(lines) or (current_input != last_input and last_input != None): # we are affronting a new chunck
new_dic = {
"input" : last_input,
"is_gold": is_gold,
"conclusions" : conclusions,
}
write(new_dic, fw)
is_gold.clear() # clear up
conclusions.clear()
conclusions.append(c)
is_gold.append(ig)
last_input = current_input
cnt += 1
# for f in os.listdir("/data_ext/v-xinyupi/PLoGAN/data/gan_corpus/ver_train_src.jsonl"):
current_input = None
is_gold = []
conclusions = []
last_input = None
with open("/data_ext/v-xinyupi/PLoGAN/data/gan_corpus_new/ver_train_es.jsonl", "r") as fr:
with open("/data_ext/v-xinyupi/PLoGAN/data/gan_corpus_squeezed/ver_train_es.jsonl", "w") as fw:
for l in fr.readlines():
dic = json.loads(l)
current_input = dic["input"]
ig = dic["is_gold"]
c = dic["conclusion"]
if current_input != last_input and last_input != None: # we are affronting a new chunck
new_dic = {
"input" : last_input,
"is_gold": is_gold,
"conclusions" : conclusions,
}
write(new_dic, fw)
is_gold.clear() # clear up
conclusions.clear()
conclusions.append(c)
is_gold.append(ig)
last_input = current_input
# file = "ver_train_src.jsonl"
# with open(f"../../data/gan_corpus_new/{file}", "r") as f:
# lines = f.readlines()
# with open(f"../../data/gan_corpus_new/{file}", "w") as f:
# for l in lines:
# dic = json.loads(l)
# new_dic = {"input": dic["input"], "conclusion": dic["output"], "is_gold": 1}
# write(new_dic, f)
with open("/data_ext/v-xinyupi/PLoGAN/data/Indicators/reverse.txt", "r") as f:
indicators = []
for l in f.readlines():
indicators.append(l.strip())
print(", ".join(indicators))<jupyter_output>but, yet, however, even if, although, though, even though, unless, otherwise, nonetheless, oppositely, on the contrary, contrarily, paradoxically, rather than, instead of, nevertheless, despite, in spite of, regardless of
|
ContextualSP/logigan/corpus_construction/elastic_search/merge.ipynb/0
|
{
"file_path": "ContextualSP/logigan/corpus_construction/elastic_search/merge.ipynb",
"repo_id": "ContextualSP",
"token_count": 1586
}
| 242 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.