python_code
stringlengths 0
992k
| repo_name
stringlengths 8
46
| file_path
stringlengths 5
162
|
---|---|---|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Replabel transforms for use with wav2letter's ASG criterion.
"""
def replabel_symbol(i):
"""
Replabel symbols used in wav2letter, currently just "1", "2", ...
This prevents training with numeral tokens, so this might change in the future
"""
return str(i)
def pack_replabels(tokens, dictionary, max_reps):
"""
Pack a token sequence so that repeated symbols are replaced by replabels
"""
if len(tokens) == 0 or max_reps <= 0:
return tokens
replabel_value_to_idx = [0] * (max_reps + 1)
for i in range(1, max_reps + 1):
replabel_value_to_idx[i] = dictionary.index(replabel_symbol(i))
result = []
prev_token = -1
num_reps = 0
for token in tokens:
if token == prev_token and num_reps < max_reps:
num_reps += 1
else:
if num_reps > 0:
result.append(replabel_value_to_idx[num_reps])
num_reps = 0
result.append(token)
prev_token = token
if num_reps > 0:
result.append(replabel_value_to_idx[num_reps])
return result
def unpack_replabels(tokens, dictionary, max_reps):
"""
Unpack a token sequence so that replabels are replaced by repeated symbols
"""
if len(tokens) == 0 or max_reps <= 0:
return tokens
replabel_idx_to_value = {}
for i in range(1, max_reps + 1):
replabel_idx_to_value[dictionary.index(replabel_symbol(i))] = i
result = []
prev_token = -1
for token in tokens:
try:
for _ in range(replabel_idx_to_value[token]):
result.append(prev_token)
prev_token = -1
except KeyError:
result.append(token)
prev_token = token
return result
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/examples/speech_recognition/data/replabels.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .asr_dataset import AsrDataset
__all__ = [
'AsrDataset',
]
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/examples/speech_recognition/data/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
This module contains collection of classes which implement
collate functionalities for various tasks.
Collaters should know what data to expect for each sample
and they should pack / collate them into batches
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import torch
from fairseq.data import data_utils as fairseq_data_utils
class Seq2SeqCollater(object):
"""
Implements collate function mainly for seq2seq tasks
This expects each sample to contain feature (src_tokens) and
targets.
This collator is also used for aligned training task.
"""
def __init__(
self,
feature_index=0,
label_index=1,
pad_index=1,
eos_index=2,
move_eos_to_beginning=True,
):
self.feature_index = feature_index
self.label_index = label_index
self.pad_index = pad_index
self.eos_index = eos_index
self.move_eos_to_beginning = move_eos_to_beginning
def _collate_frames(self, frames):
"""Convert a list of 2d frames into a padded 3d tensor
Args:
frames (list): list of 2d frames of size L[i]*f_dim. Where L[i] is
length of i-th frame and f_dim is static dimension of features
Returns:
3d tensor of size len(frames)*len_max*f_dim where len_max is max of L[i]
"""
len_max = max(frame.size(0) for frame in frames)
f_dim = frames[0].size(1)
res = frames[0].new(len(frames), len_max, f_dim).fill_(0.0)
for i, v in enumerate(frames):
res[i, : v.size(0)] = v
return res
def collate(self, samples):
"""
utility function to collate samples into batch for speech recognition.
"""
if len(samples) == 0:
return {}
# parse samples into torch tensors
parsed_samples = []
for s in samples:
# skip invalid samples
if s["data"][self.feature_index] is None:
continue
source = s["data"][self.feature_index]
if isinstance(source, (np.ndarray, np.generic)):
source = torch.from_numpy(source)
target = s["data"][self.label_index]
if isinstance(target, (np.ndarray, np.generic)):
target = torch.from_numpy(target).long()
parsed_sample = {"id": s["id"], "source": source, "target": target}
parsed_samples.append(parsed_sample)
samples = parsed_samples
id = torch.LongTensor([s["id"] for s in samples])
frames = self._collate_frames([s["source"] for s in samples])
# sort samples by descending number of frames
frames_lengths = torch.LongTensor([s["source"].size(0) for s in samples])
frames_lengths, sort_order = frames_lengths.sort(descending=True)
id = id.index_select(0, sort_order)
frames = frames.index_select(0, sort_order)
target = None
target_lengths = None
prev_output_tokens = None
if samples[0].get("target", None) is not None:
ntokens = sum(len(s["target"]) for s in samples)
target = fairseq_data_utils.collate_tokens(
[s["target"] for s in samples],
self.pad_index,
self.eos_index,
left_pad=False,
move_eos_to_beginning=False,
)
target = target.index_select(0, sort_order)
target_lengths = torch.LongTensor(
[s["target"].size(0) for s in samples]
).index_select(0, sort_order)
prev_output_tokens = fairseq_data_utils.collate_tokens(
[s["target"] for s in samples],
self.pad_index,
self.eos_index,
left_pad=False,
move_eos_to_beginning=self.move_eos_to_beginning,
)
prev_output_tokens = prev_output_tokens.index_select(0, sort_order)
else:
ntokens = sum(len(s["source"]) for s in samples)
batch = {
"id": id,
"ntokens": ntokens,
"net_input": {"src_tokens": frames, "src_lengths": frames_lengths},
"target": target,
"target_lengths": target_lengths,
"nsentences": len(samples),
}
if prev_output_tokens is not None:
batch["net_input"]["prev_output_tokens"] = prev_output_tokens
return batch
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/examples/speech_recognition/data/collaters.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
def calc_mean_invstddev(feature):
if len(feature.size()) != 2:
raise ValueError("We expect the input feature to be 2-D tensor")
mean = feature.mean(0)
var = feature.var(0)
# avoid division by ~zero
eps = 1e-8
if (var < eps).any():
return mean, 1.0 / (torch.sqrt(var) + eps)
return mean, 1.0 / torch.sqrt(var)
def apply_mv_norm(features):
mean, invstddev = calc_mean_invstddev(features)
res = (features - mean) * invstddev
return res
def lengths_to_encoder_padding_mask(lengths, batch_first=False):
"""
convert lengths (a 1-D Long/Int tensor) to 2-D binary tensor
Args:
lengths: a (B, )-shaped tensor
Return:
max_length: maximum length of B sequences
encoder_padding_mask: a (max_length, B) binary mask, where
[t, b] = 0 for t < lengths[b] and 1 otherwise
TODO:
kernelize this function if benchmarking shows this function is slow
"""
max_lengths = torch.max(lengths).item()
bsz = lengths.size(0)
encoder_padding_mask = torch.arange(
max_lengths
).to( # a (T, ) tensor with [0, ..., T-1]
lengths.device
).view( # move to the right device
1, max_lengths
).expand( # reshape to (1, T)-shaped tensor
bsz, -1
) >= lengths.view( # expand to (B, T)-shaped tensor
bsz, 1
).expand(
-1, max_lengths
)
if not batch_first:
return encoder_padding_mask.t(), max_lengths
else:
return encoder_padding_mask, max_lengths
def encoder_padding_mask_to_lengths(
encoder_padding_mask, max_lengths, batch_size, device
):
"""
convert encoder_padding_mask (2-D binary tensor) to a 1-D tensor
Conventionally, encoder output contains a encoder_padding_mask, which is
a 2-D mask in a shape (T, B), whose (t, b) element indicate whether
encoder_out[t, b] is a valid output (=0) or not (=1). Occasionally, we
need to convert this mask tensor to a 1-D tensor in shape (B, ), where
[b] denotes the valid length of b-th sequence
Args:
encoder_padding_mask: a (T, B)-shaped binary tensor or None; if None,
indicating all are valid
Return:
seq_lengths: a (B,)-shaped tensor, where its (b, )-th element is the
number of valid elements of b-th sequence
max_lengths: maximum length of all sequence, if encoder_padding_mask is
not None, max_lengths must equal to encoder_padding_mask.size(0)
batch_size: batch size; if encoder_padding_mask is
not None, max_lengths must equal to encoder_padding_mask.size(1)
device: which device to put the result on
"""
if encoder_padding_mask is None:
return torch.Tensor([max_lengths] * batch_size).to(torch.int32).to(device)
assert encoder_padding_mask.size(0) == max_lengths, "max_lengths does not match"
assert encoder_padding_mask.size(1) == batch_size, "batch_size does not match"
return max_lengths - torch.sum(encoder_padding_mask, dim=0)
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/examples/speech_recognition/data/data_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import numpy as np
from fairseq.data import FairseqDataset
from . import data_utils
from .collaters import Seq2SeqCollater
class AsrDataset(FairseqDataset):
"""
A dataset representing speech and corresponding transcription.
Args:
aud_paths: (List[str]): A list of str with paths to audio files.
aud_durations_ms (List[int]): A list of int containing the durations of
audio files.
tgt (List[torch.LongTensor]): A list of LongTensors containing the indices
of target transcriptions.
tgt_dict (~fairseq.data.Dictionary): target vocabulary.
ids (List[str]): A list of utterance IDs.
speakers (List[str]): A list of speakers corresponding to utterances.
num_mel_bins (int): Number of triangular mel-frequency bins (default: 80)
frame_length (float): Frame length in milliseconds (default: 25.0)
frame_shift (float): Frame shift in milliseconds (default: 10.0)
"""
def __init__(
self, aud_paths, aud_durations_ms, tgt,
tgt_dict, ids, speakers,
num_mel_bins=80, frame_length=25.0, frame_shift=10.0
):
assert frame_length > 0
assert frame_shift > 0
assert all(x > frame_length for x in aud_durations_ms)
self.frame_sizes = [
int(1 + (d - frame_length) / frame_shift)
for d in aud_durations_ms
]
assert len(aud_paths) > 0
assert len(aud_paths) == len(aud_durations_ms)
assert len(aud_paths) == len(tgt)
assert len(aud_paths) == len(ids)
assert len(aud_paths) == len(speakers)
self.aud_paths = aud_paths
self.tgt_dict = tgt_dict
self.tgt = tgt
self.ids = ids
self.speakers = speakers
self.num_mel_bins = num_mel_bins
self.frame_length = frame_length
self.frame_shift = frame_shift
def __getitem__(self, index):
import torchaudio
import torchaudio.compliance.kaldi as kaldi
tgt_item = self.tgt[index] if self.tgt is not None else None
path = self.aud_paths[index]
if not os.path.exists(path):
raise FileNotFoundError("Audio file not found: {}".format(path))
sound, sample_rate = torchaudio.load_wav(path)
output = kaldi.fbank(
sound,
num_mel_bins=self.num_mel_bins,
frame_length=self.frame_length,
frame_shift=self.frame_shift
)
output_cmvn = data_utils.apply_mv_norm(output)
self.s2s_collater = Seq2SeqCollater(
0, 1, pad_index=self.tgt_dict.pad(),
eos_index=self.tgt_dict.eos(), move_eos_to_beginning=True
)
return {"id": index, "data": [output_cmvn.detach(), tgt_item]}
def __len__(self):
return len(self.aud_paths)
def collater(self, samples):
"""Merge a list of samples to form a mini-batch.
Args:
samples (List[int]): sample indices to collate
Returns:
dict: a mini-batch suitable for forwarding with a Model
"""
return self.s2s_collater.collate(samples)
def num_tokens(self, index):
return self.frame_sizes[index]
def size(self, index):
"""Return an example's size as a float or tuple. This value is used when
filtering a dataset with ``--max-positions``."""
return (
self.frame_sizes[index],
len(self.tgt[index]) if self.tgt is not None else 0,
)
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
return np.arange(len(self))
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/examples/speech_recognition/data/asr_dataset.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import math
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.criterions import FairseqCriterion, register_criterion
@register_criterion("cross_entropy_acc")
class CrossEntropyWithAccCriterion(FairseqCriterion):
def __init__(self, args, task):
super().__init__(args, task)
def compute_loss(self, model, net_output, target, reduction, log_probs):
# N, T -> N * T
target = target.view(-1)
lprobs = model.get_normalized_probs(net_output, log_probs=log_probs)
if not hasattr(lprobs, "batch_first"):
logging.warning(
"ERROR: we need to know whether "
"batch first for the net output; "
"you need to set batch_first attribute for the return value of "
"model.get_normalized_probs. Now, we assume this is true, but "
"in the future, we will raise exception instead. "
)
batch_first = getattr(lprobs, "batch_first", True)
if not batch_first:
lprobs = lprobs.transpose(0, 1)
# N, T, D -> N * T, D
lprobs = lprobs.view(-1, lprobs.size(-1))
loss = F.nll_loss(
lprobs, target, ignore_index=self.padding_idx, reduction=reduction
)
return lprobs, loss
def get_logging_output(self, sample, target, lprobs, loss):
target = target.view(-1)
mask = target != self.padding_idx
correct = torch.sum(
lprobs.argmax(1).masked_select(mask) == target.masked_select(mask)
)
total = torch.sum(mask)
sample_size = (
sample["target"].size(0) if self.args.sentence_avg else sample["ntokens"]
)
logging_output = {
"loss": utils.item(loss.data), # * sample['ntokens'],
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
"correct": utils.item(correct.data),
"total": utils.item(total.data),
"nframes": torch.sum(sample["net_input"]["src_lengths"]).item(),
}
return sample_size, logging_output
def forward(self, model, sample, reduction="sum", log_probs=True):
"""Computes the cross entropy with accuracy metric for the given sample.
This is similar to CrossEntropyCriterion in fairseq, but also
computes accuracy metrics as part of logging
Args:
logprobs (Torch.tensor) of shape N, T, D i.e.
batchsize, timesteps, dimensions
targets (Torch.tensor) of shape N, T i.e batchsize, timesteps
Returns:
tuple: With three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
TODO:
* Currently this Criterion will only work with LSTMEncoderModels or
FairseqModels which have decoder, or Models which return TorchTensor
as net_output.
We need to make a change to support all FairseqEncoder models.
"""
net_output = model(**sample["net_input"])
target = model.get_targets(sample, net_output)
lprobs, loss = self.compute_loss(
model, net_output, target, reduction, log_probs
)
sample_size, logging_output = self.get_logging_output(
sample, target, lprobs, loss
)
return loss, sample_size, logging_output
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
correct_sum = sum(log.get("correct", 0) for log in logging_outputs)
total_sum = sum(log.get("total", 0) for log in logging_outputs)
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
nframes = sum(log.get("nframes", 0) for log in logging_outputs)
agg_output = {
"loss": loss_sum / sample_size / math.log(2) if sample_size > 0 else 0.0,
# if args.sentence_avg, then sample_size is nsentences, then loss
# is per-sentence loss; else sample_size is ntokens, the loss
# becomes per-output token loss
"ntokens": ntokens,
"nsentences": nsentences,
"nframes": nframes,
"sample_size": sample_size,
"acc": correct_sum * 100.0 / total_sum if total_sum > 0 else 0.0,
"correct": correct_sum,
"total": total_sum,
# total is the number of validate tokens
}
if sample_size != ntokens:
agg_output["nll_loss"] = loss_sum / ntokens / math.log(2)
# loss: per output token loss
# nll_loss: per sentence loss
return agg_output
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/examples/speech_recognition/criterions/cross_entropy_acc.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import numpy as np
import torch
from fairseq import utils
from fairseq.criterions import FairseqCriterion, register_criterion
from examples.speech_recognition.data.replabels import pack_replabels
from wav2letter.criterion import ASGLoss, CriterionScaleMode
@register_criterion("asg_loss")
class ASGCriterion(FairseqCriterion):
@staticmethod
def add_args(parser):
group = parser.add_argument_group("ASG Loss")
group.add_argument(
"--asg-transitions-init",
help="initial diagonal value of transition matrix",
type=float,
default=0.0,
)
group.add_argument(
"--max-replabel", help="maximum # of replabels", type=int, default=2
)
group.add_argument(
"--linseg-updates",
help="# of training updates to use LinSeg initialization",
type=int,
default=0,
)
group.add_argument(
"--hide-linseg-messages",
help="hide messages about LinSeg initialization",
action="store_true",
)
def __init__(self, args, task):
super().__init__(args, task)
self.tgt_dict = task.target_dictionary
self.eos = self.tgt_dict.eos()
self.silence = (
self.tgt_dict.index(args.silence_token)
if args.silence_token in self.tgt_dict
else None
)
self.max_replabel = args.max_replabel
num_labels = len(self.tgt_dict)
self.asg = ASGLoss(num_labels, scale_mode=CriterionScaleMode.TARGET_SZ_SQRT)
self.asg.trans = torch.nn.Parameter(
args.asg_transitions_init * torch.eye(num_labels), requires_grad=True
)
self.linseg_progress = torch.nn.Parameter(
torch.tensor([0], dtype=torch.int), requires_grad=False
)
self.linseg_maximum = args.linseg_updates
self.linseg_message_state = "none" if args.hide_linseg_messages else "start"
def linseg_step(self):
if not self.training:
return False
if self.linseg_progress.item() < self.linseg_maximum:
if self.linseg_message_state == "start":
print("| using LinSeg to initialize ASG")
self.linseg_message_state = "finish"
self.linseg_progress.add_(1)
return True
elif self.linseg_message_state == "finish":
print("| finished LinSeg initialization")
self.linseg_message_state = "none"
return False
def replace_eos_with_silence(self, tgt):
if tgt[-1] != self.eos:
return tgt
elif self.silence is None or (len(tgt) > 1 and tgt[-2] == self.silence):
return tgt[:-1]
else:
return tgt[:-1] + [self.silence]
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(**sample["net_input"])
emissions = net_output["encoder_out"].transpose(0, 1).contiguous()
B = emissions.size(0)
T = emissions.size(1)
device = emissions.device
target = torch.IntTensor(B, T)
target_size = torch.IntTensor(B)
using_linseg = self.linseg_step()
for b in range(B):
initial_target_size = sample["target_lengths"][b].item()
if initial_target_size == 0:
raise ValueError("target size cannot be zero")
tgt = sample["target"][b, :initial_target_size].tolist()
tgt = self.replace_eos_with_silence(tgt)
tgt = pack_replabels(tgt, self.tgt_dict, self.max_replabel)
tgt = tgt[:T]
if using_linseg:
tgt = [tgt[t * len(tgt) // T] for t in range(T)]
target[b][: len(tgt)] = torch.IntTensor(tgt)
target_size[b] = len(tgt)
loss = self.asg.forward(emissions, target.to(device), target_size.to(device))
if reduce:
loss = torch.sum(loss)
sample_size = (
sample["target"].size(0) if self.args.sentence_avg else sample["ntokens"]
)
logging_output = {
"loss": utils.item(loss.data) if reduce else loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
}
return loss, sample_size, logging_output
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
agg_output = {
"loss": loss_sum / nsentences,
"ntokens": ntokens,
"nsentences": nsentences,
"sample_size": sample_size,
}
return agg_output
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/examples/speech_recognition/criterions/ASG_loss.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import math
from itertools import groupby
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.criterions import FairseqCriterion, register_criterion
from examples.speech_recognition.data.data_utils import encoder_padding_mask_to_lengths
from examples.speech_recognition.utils.wer_utils import Code, EditDistance, Token
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def arr_to_toks(arr):
toks = []
for a in arr:
toks.append(Token(str(a), 0.0, 0.0))
return toks
def compute_ctc_uer(logprobs, targets, input_lengths, target_lengths, blank_idx):
"""
Computes utterance error rate for CTC outputs
Args:
logprobs: (Torch.tensor) N, T1, D tensor of log probabilities out
of the encoder
targets: (Torch.tensor) N, T2 tensor of targets
input_lengths: (Torch.tensor) lengths of inputs for each sample
target_lengths: (Torch.tensor) lengths of targets for each sample
blank_idx: (integer) id of blank symbol in target dictionary
Returns:
batch_errors: (float) errors in the batch
batch_total: (float) total number of valid samples in batch
"""
batch_errors = 0.0
batch_total = 0.0
for b in range(logprobs.shape[0]):
predicted = logprobs[b][: input_lengths[b]].argmax(1).tolist()
target = targets[b][: target_lengths[b]].tolist()
# dedup predictions
predicted = [p[0] for p in groupby(predicted)]
# remove blanks
nonblanks = []
for p in predicted:
if p != blank_idx:
nonblanks.append(p)
predicted = nonblanks
# compute the alignment based on EditDistance
alignment = EditDistance(False).align(
arr_to_toks(predicted), arr_to_toks(target)
)
# compute the number of errors
# note that alignment.codes can also be used for computing
# deletion, insersion and substitution error breakdowns in future
for a in alignment.codes:
if a != Code.match:
batch_errors += 1
batch_total += len(target)
return batch_errors, batch_total
@register_criterion("ctc_loss")
class CTCCriterion(FairseqCriterion):
def __init__(self, args, task):
super().__init__(args, task)
self.blank_idx = task.target_dictionary.index("<ctc_blank>")
self.pad_idx = task.target_dictionary.pad()
self.task = task
@staticmethod
def add_args(parser):
parser.add_argument(
"--use-source-side-sample-size",
action="store_true",
default=False,
help=(
"when compute average loss, using number of source tokens "
+ "as denominator. "
+ "This argument will be no-op if sentence-avg is used."
),
)
def forward(self, model, sample, reduce=True, log_probs=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
net_output = model(**sample["net_input"])
lprobs = model.get_normalized_probs(net_output, log_probs=log_probs)
if not hasattr(lprobs, "batch_first"):
logging.warning(
"ERROR: we need to know whether "
"batch first for the encoder output; "
"you need to set batch_first attribute for the return value of "
"model.get_normalized_probs. Now, we assume this is true, but "
"in the future, we will raise exception instead. "
)
batch_first = getattr(lprobs, "batch_first", True)
if not batch_first:
max_seq_len = lprobs.size(0)
bsz = lprobs.size(1)
else:
max_seq_len = lprobs.size(1)
bsz = lprobs.size(0)
device = net_output["encoder_out"].device
input_lengths = encoder_padding_mask_to_lengths(
net_output["encoder_padding_mask"], max_seq_len, bsz, device
)
target_lengths = sample["target_lengths"]
targets = sample["target"]
if batch_first:
# N T D -> T N D (F.ctc_loss expects this)
lprobs = lprobs.transpose(0, 1)
pad_mask = sample["target"] != self.pad_idx
targets_flat = targets.masked_select(pad_mask)
loss = F.ctc_loss(
lprobs,
targets_flat,
input_lengths,
target_lengths,
blank=self.blank_idx,
reduction="sum",
zero_infinity=True,
)
lprobs = lprobs.transpose(0, 1) # T N D -> N T D
errors, total = compute_ctc_uer(
lprobs, targets, input_lengths, target_lengths, self.blank_idx
)
if self.args.sentence_avg:
sample_size = sample["target"].size(0)
else:
if self.args.use_source_side_sample_size:
sample_size = torch.sum(input_lengths).item()
else:
sample_size = sample["ntokens"]
logging_output = {
"loss": utils.item(loss.data) if reduce else loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample["target"].size(0),
"sample_size": sample_size,
"errors": errors,
"total": total,
"nframes": torch.sum(sample["net_input"]["src_lengths"]).item(),
}
return loss, sample_size, logging_output
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
nsentences = sum(log.get("nsentences", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
errors = sum(log.get("errors", 0) for log in logging_outputs)
total = sum(log.get("total", 0) for log in logging_outputs)
nframes = sum(log.get("nframes", 0) for log in logging_outputs)
agg_output = {
"loss": loss_sum / sample_size / math.log(2),
"ntokens": ntokens,
"nsentences": nsentences,
"nframes": nframes,
"sample_size": sample_size,
"acc": 100.0 - min(errors * 100.0 / total, 100.0),
}
if sample_size != ntokens:
agg_output["nll_loss"] = loss_sum / ntokens / math.log(2)
return agg_output
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/examples/speech_recognition/criterions/CTC_loss.py |
import importlib
import os
# ASG loss requires wav2letter
blacklist = set()
try:
import wav2letter
except ImportError:
blacklist.add("ASG_loss.py")
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith(".py") and not file.startswith("_") and file not in blacklist:
criterion_name = file[: file.find(".py")]
importlib.import_module(
"examples.speech_recognition.criterions." + criterion_name
)
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/examples/speech_recognition/criterions/__init__.py |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import contextlib
import sys
from collections import Counter
from multiprocessing import Pool
from fairseq.data.encoders.gpt2_bpe import get_encoder
def main():
"""
Helper script to encode raw text with the GPT-2 BPE using multiple processes.
The encoder.json and vocab.bpe files can be obtained here:
- https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/encoder.json
- https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/vocab.bpe
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--encoder-json",
help='path to encoder.json',
)
parser.add_argument(
"--vocab-bpe",
type=str,
help='path to vocab.bpe',
)
parser.add_argument(
"--inputs",
nargs="+",
default=['-'],
help="input files to filter/encode",
)
parser.add_argument(
"--outputs",
nargs="+",
default=['-'],
help="path to save encoded outputs",
)
parser.add_argument(
"--keep-empty",
action="store_true",
help="keep empty lines",
)
parser.add_argument("--workers", type=int, default=20)
args = parser.parse_args()
assert len(args.inputs) == len(args.outputs), \
"number of input and output paths should match"
with contextlib.ExitStack() as stack:
inputs = [
stack.enter_context(open(input, "r", encoding="utf-8"))
if input != "-" else sys.stdin
for input in args.inputs
]
outputs = [
stack.enter_context(open(output, "w", encoding="utf-8"))
if output != "-" else sys.stdout
for output in args.outputs
]
encoder = MultiprocessingEncoder(args)
pool = Pool(args.workers, initializer=encoder.initializer)
encoded_lines = pool.imap(encoder.encode_lines, zip(*inputs), 100)
stats = Counter()
for i, (filt, enc_lines) in enumerate(encoded_lines, start=1):
if filt == "PASS":
for enc_line, output_h in zip(enc_lines, outputs):
print(enc_line, file=output_h)
else:
stats["num_filtered_" + filt] += 1
if i % 10000 == 0:
print("processed {} lines".format(i), file=sys.stderr)
for k, v in stats.most_common():
print("[{}] filtered {} lines".format(k, v), file=sys.stderr)
class MultiprocessingEncoder(object):
def __init__(self, args):
self.args = args
def initializer(self):
global bpe
bpe = get_encoder(self.args.encoder_json, self.args.vocab_bpe)
def encode(self, line):
global bpe
ids = bpe.encode(line)
return list(map(str, ids))
def decode(self, tokens):
global bpe
return bpe.decode(tokens)
def encode_lines(self, lines):
"""
Encode a set of lines. All lines will be encoded together.
"""
enc_lines = []
for line in lines:
line = line.strip()
if len(line) == 0 and not self.args.keep_empty:
return ["EMPTY", None]
tokens = self.encode(line)
enc_lines.append(" ".join(tokens))
return ["PASS", enc_lines]
def decode_lines(self, lines):
dec_lines = []
for line in lines:
tokens = map(int, line.strip().split())
dec_lines.append(self.decode(tokens))
return ["PASS", dec_lines]
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/examples/roberta/multiprocessing_bpe_encoder.py |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import os
import re
class InputExample:
def __init__(self, paragraph, qa_list, label):
self.paragraph = paragraph
self.qa_list = qa_list
self.label = label
def get_examples(data_dir, set_type):
"""
Extract paragraph and question-answer list from each json file
"""
examples = []
levels = ["middle", "high"]
set_type_c = set_type.split('-')
if len(set_type_c) == 2:
levels = [set_type_c[1]]
set_type = set_type_c[0]
for level in levels:
cur_dir = os.path.join(data_dir, set_type, level)
for filename in os.listdir(cur_dir):
cur_path = os.path.join(cur_dir, filename)
with open(cur_path, 'r') as f:
cur_data = json.load(f)
answers = cur_data["answers"]
options = cur_data["options"]
questions = cur_data["questions"]
context = cur_data["article"].replace("\n", " ")
context = re.sub(r'\s+', ' ', context)
for i in range(len(answers)):
label = ord(answers[i]) - ord("A")
qa_list = []
question = questions[i]
for j in range(4):
option = options[i][j]
if "_" in question:
qa_cat = question.replace("_", option)
else:
qa_cat = " ".join([question, option])
qa_cat = re.sub(r'\s+', ' ', qa_cat)
qa_list.append(qa_cat)
examples.append(InputExample(context, qa_list, label))
return examples
def main():
"""
Helper script to extract paragraphs questions and answers from RACE datasets.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--input-dir",
help='input directory for downloaded RACE dataset',
)
parser.add_argument(
"--output-dir",
help='output directory for extracted data',
)
args = parser.parse_args()
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir, exist_ok=True)
for set_type in ["train", "dev", "test-middle", "test-high"]:
examples = get_examples(args.input_dir, set_type)
qa_file_paths = [os.path.join(args.output_dir, set_type + ".input" + str(i + 1)) for i in range(4)]
qa_files = [open(qa_file_path, 'w') for qa_file_path in qa_file_paths]
outf_context_path = os.path.join(args.output_dir, set_type + ".input0")
outf_label_path = os.path.join(args.output_dir, set_type + ".label")
outf_context = open(outf_context_path, 'w')
outf_label = open(outf_label_path, 'w')
for example in examples:
outf_context.write(example.paragraph + '\n')
for i in range(4):
qa_files[i].write(example.qa_list[i] + '\n')
outf_label.write(str(example.label) + '\n')
for f in qa_files:
f.close()
outf_label.close()
outf_context.close()
if __name__ == '__main__':
main()
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/examples/roberta/preprocess_RACE.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from functools import lru_cache
import json
def convert_sentence_to_json(sentence):
if '_' in sentence:
prefix, rest = sentence.split('_', 1)
query, rest = rest.split('_', 1)
query_index = len(prefix.rstrip().split(' '))
else:
query, query_index = None, None
prefix, rest = sentence.split('[', 1)
pronoun, rest = rest.split(']', 1)
pronoun_index = len(prefix.rstrip().split(' '))
sentence = sentence.replace('_', '').replace('[', '').replace(']', '')
return {
'idx': 0,
'text': sentence,
'target': {
'span1_index': query_index,
'span1_text': query,
'span2_index': pronoun_index,
'span2_text': pronoun,
},
}
def extended_noun_chunks(sentence):
noun_chunks = {(np.start, np.end) for np in sentence.noun_chunks}
np_start, cur_np = 0, 'NONE'
for i, token in enumerate(sentence):
np_type = token.pos_ if token.pos_ in {'NOUN', 'PROPN'} else 'NONE'
if np_type != cur_np:
if cur_np != 'NONE':
noun_chunks.add((np_start, i))
if np_type != 'NONE':
np_start = i
cur_np = np_type
if cur_np != 'NONE':
noun_chunks.add((np_start, len(sentence)))
return [sentence[s:e] for (s, e) in sorted(noun_chunks)]
def find_token(sentence, start_pos):
found_tok = None
for tok in sentence:
if tok.idx == start_pos:
found_tok = tok
break
return found_tok
def find_span(sentence, search_text, start=0):
search_text = search_text.lower()
for tok in sentence[start:]:
remainder = sentence[tok.i:].text.lower()
if remainder.startswith(search_text):
len_to_consume = len(search_text)
start_idx = tok.idx
for next_tok in sentence[tok.i:]:
end_idx = next_tok.idx + len(next_tok.text)
if end_idx - start_idx == len_to_consume:
span = sentence[tok.i:next_tok.i + 1]
return span
return None
@lru_cache(maxsize=1)
def get_detokenizer():
from sacremoses import MosesDetokenizer
detok = MosesDetokenizer(lang='en')
return detok
@lru_cache(maxsize=1)
def get_spacy_nlp():
import en_core_web_lg
nlp = en_core_web_lg.load()
return nlp
def jsonl_iterator(input_fname, positive_only=False, ngram_order=3, eval=False):
detok = get_detokenizer()
nlp = get_spacy_nlp()
with open(input_fname) as fin:
for line in fin:
sample = json.loads(line.strip())
if positive_only and 'label' in sample and not sample['label']:
# only consider examples where the query is correct
continue
target = sample['target']
# clean up the query
query = target['span1_text']
if query is not None:
if '\n' in query:
continue
if query.endswith('.') or query.endswith(','):
query = query[:-1]
# split tokens
tokens = sample['text'].split(' ')
def strip_pronoun(x):
return x.rstrip('.,"')
# find the pronoun
pronoun_idx = target['span2_index']
pronoun = strip_pronoun(target['span2_text'])
if strip_pronoun(tokens[pronoun_idx]) != pronoun:
# hack: sometimes the index is misaligned
if strip_pronoun(tokens[pronoun_idx + 1]) == pronoun:
pronoun_idx += 1
else:
raise Exception('Misaligned pronoun!')
assert strip_pronoun(tokens[pronoun_idx]) == pronoun
# split tokens before and after the pronoun
before = tokens[:pronoun_idx]
after = tokens[pronoun_idx + 1:]
# the GPT BPE attaches leading spaces to tokens, so we keep track
# of whether we need spaces before or after the pronoun
leading_space = ' ' if pronoun_idx > 0 else ''
trailing_space = ' ' if len(after) > 0 else ''
# detokenize
before = detok.detokenize(before, return_str=True)
pronoun = detok.detokenize([pronoun], return_str=True)
after = detok.detokenize(after, return_str=True)
# hack: when the pronoun ends in a period (or comma), move the
# punctuation to the "after" part
if pronoun.endswith('.') or pronoun.endswith(','):
after = pronoun[-1] + trailing_space + after
pronoun = pronoun[:-1]
# hack: when the "after" part begins with a comma or period, remove
# the trailing space
if after.startswith('.') or after.startswith(','):
trailing_space = ''
# parse sentence with spacy
sentence = nlp(before + leading_space + pronoun + trailing_space + after)
# find pronoun span
start = len(before + leading_space)
first_pronoun_tok = find_token(sentence, start_pos=start)
pronoun_span = find_span(sentence, pronoun, start=first_pronoun_tok.i)
assert pronoun_span.text == pronoun
if eval:
# convert to format where pronoun is surrounded by "[]" and
# query is surrounded by "_"
query_span = find_span(sentence, query)
query_with_ws = '_{}_{}'.format(
query_span.text,
(' ' if query_span.text_with_ws.endswith(' ') else '')
)
pronoun_with_ws = '[{}]{}'.format(
pronoun_span.text,
(' ' if pronoun_span.text_with_ws.endswith(' ') else '')
)
if query_span.start < pronoun_span.start:
first = (query_span, query_with_ws)
second = (pronoun_span, pronoun_with_ws)
else:
first = (pronoun_span, pronoun_with_ws)
second = (query_span, query_with_ws)
sentence = (
sentence[:first[0].start].text_with_ws
+ first[1]
+ sentence[first[0].end:second[0].start].text_with_ws
+ second[1]
+ sentence[second[0].end:].text
)
yield sentence, sample.get('label', None)
else:
yield sentence, pronoun_span, query, sample.get('label', None)
def winogrande_jsonl_iterator(input_fname, eval=False):
with open(input_fname) as fin:
for line in fin:
sample = json.loads(line.strip())
sentence, option1, option2 = sample['sentence'], sample['option1'],\
sample['option2']
pronoun_span = (sentence.index('_'), sentence.index('_') + 1)
if eval:
query, cand = option1, option2
else:
query = option1 if sample['answer'] == '1' else option2
cand = option2 if sample['answer'] == '1' else option1
yield sentence, pronoun_span, query, cand
def filter_noun_chunks(chunks, exclude_pronouns=False, exclude_query=None, exact_match=False):
if exclude_pronouns:
chunks = [
np for np in chunks if (
np.lemma_ != '-PRON-'
and not all(tok.pos_ == 'PRON' for tok in np)
)
]
if exclude_query is not None:
excl_txt = [exclude_query.lower()]
filtered_chunks = []
for chunk in chunks:
lower_chunk = chunk.text.lower()
found = False
for excl in excl_txt:
if (
(not exact_match and (lower_chunk in excl or excl in lower_chunk))
or lower_chunk == excl
):
found = True
break
if not found:
filtered_chunks.append(chunk)
chunks = filtered_chunks
return chunks
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/examples/roberta/wsc/wsc_utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.data import encoders
from fairseq.criterions import FairseqCriterion, register_criterion
@register_criterion('wsc')
class WSCCriterion(FairseqCriterion):
def __init__(self, args, task):
super().__init__(args, task)
if self.args.save_predictions is not None:
self.prediction_h = open(self.args.save_predictions, 'w')
else:
self.prediction_h = None
self.bpe = encoders.build_bpe(args)
self.tokenizer = encoders.build_tokenizer(args)
def __del__(self):
if self.prediction_h is not None:
self.prediction_h.close()
@staticmethod
def add_args(parser):
"""Add criterion-specific arguments to the parser."""
parser.add_argument('--wsc-margin-alpha', type=float, metavar='A', default=1.0)
parser.add_argument('--wsc-margin-beta', type=float, metavar='B', default=0.0)
parser.add_argument('--wsc-cross-entropy', action='store_true',
help='use cross entropy formulation instead of margin loss')
parser.add_argument('--save-predictions', metavar='FILE',
help='file to save predictions to')
def get_masked_input(self, tokens, mask):
masked_tokens = tokens.clone()
masked_tokens[mask] = self.task.mask
return masked_tokens
def get_lprobs(self, model, tokens, mask):
logits, _ = model(src_tokens=self.get_masked_input(tokens, mask))
lprobs = F.log_softmax(logits, dim=-1, dtype=torch.float)
scores = lprobs.gather(2, tokens.unsqueeze(-1)).squeeze(-1)
mask = mask.type_as(scores)
scores = (scores * mask).sum(dim=-1) / mask.sum(dim=-1)
return scores
def get_loss(self, query_lprobs, cand_lprobs):
if self.args.wsc_cross_entropy:
return F.cross_entropy(
torch.cat([query_lprobs, cand_lprobs]).unsqueeze(0),
query_lprobs.new([0]).long(),
)
else:
return (
- query_lprobs
+ self.args.wsc_margin_alpha * (
cand_lprobs - query_lprobs + self.args.wsc_margin_beta
).clamp(min=0)
).sum()
def forward(self, model, sample, reduce=True):
# compute loss and accuracy
loss, nloss = 0., 0
ncorrect, nqueries = 0, 0
for i, label in enumerate(sample['labels']):
query_lprobs = self.get_lprobs(
model,
sample['query_tokens'][i].unsqueeze(0),
sample['query_masks'][i].unsqueeze(0),
)
cand_lprobs = self.get_lprobs(
model,
sample['candidate_tokens'][i],
sample['candidate_masks'][i],
)
pred = (query_lprobs >= cand_lprobs).all().item()
if label is not None:
label = 1 if label else 0
ncorrect += 1 if pred == label else 0
nqueries += 1
if label:
# only compute a loss for positive instances
nloss += 1
loss += self.get_loss(query_lprobs, cand_lprobs)
id = sample['id'][i].item()
if self.prediction_h is not None:
print('{}\t{}\t{}'.format(id, pred, label), file=self.prediction_h)
if nloss == 0:
loss = torch.tensor(0.0, requires_grad=True)
sample_size = nqueries if nqueries > 0 else 1
logging_output = {
'loss': utils.item(loss.data) if reduce else loss.data,
'ntokens': sample['ntokens'],
'nsentences': sample['nsentences'],
'sample_size': sample_size,
'ncorrect': ncorrect,
'nqueries': nqueries,
}
return loss, sample_size, logging_output
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
loss_sum = sum(log.get('loss', 0) for log in logging_outputs)
ntokens = sum(log.get('ntokens', 0) for log in logging_outputs)
nsentences = sum(log.get('nsentences', 0) for log in logging_outputs)
sample_size = sum(log.get('sample_size', 0) for log in logging_outputs)
agg_output = {
'loss': loss_sum / sample_size / math.log(2),
'ntokens': ntokens,
'nsentences': nsentences,
'sample_size': sample_size,
}
ncorrect = sum(log.get('ncorrect', 0) for log in logging_outputs)
nqueries = sum(log.get('nqueries', 0) for log in logging_outputs)
if nqueries > 0:
agg_output['accuracy'] = ncorrect / float(nqueries)
return agg_output
@register_criterion('winogrande')
class WinograndeCriterion(WSCCriterion):
def forward(self, model, sample, reduce=True):
# compute loss and accuracy
query_lprobs = self.get_lprobs(
model,
sample['query_tokens'],
sample['query_masks'],
)
cand_lprobs = self.get_lprobs(
model,
sample['candidate_tokens'],
sample['candidate_masks'],
)
pred = query_lprobs >= cand_lprobs
loss = self.get_loss(query_lprobs, cand_lprobs)
sample_size = sample['query_tokens'].size(0)
ncorrect = pred.sum().item()
logging_output = {
'loss': utils.item(loss.data) if reduce else loss.data,
'ntokens': sample['ntokens'],
'nsentences': sample['nsentences'],
'sample_size': sample_size,
'ncorrect': ncorrect,
'nqueries': sample_size,
}
return loss, sample_size, logging_output
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/examples/roberta/wsc/wsc_criterion.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import wsc_criterion # noqa
from . import wsc_task # noqa
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/examples/roberta/wsc/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import os
import tempfile
import numpy as np
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.data import (
data_utils,
Dictionary,
encoders,
IdDataset,
ListDataset,
NestedDictionaryDataset,
NumSamplesDataset,
NumelDataset,
PadDataset,
SortDataset,
)
from fairseq.tasks import FairseqTask, register_task
from . import wsc_utils
@register_task('wsc')
class WSCTask(FairseqTask):
"""Task to finetune RoBERTa for Winograd Schemas."""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument('data', metavar='DIR',
help='path to data directory; we load <split>.jsonl')
parser.add_argument('--init-token', type=int, default=None,
help='add token at the beginning of each batch item')
def __init__(self, args, vocab):
super().__init__(args)
self.vocab = vocab
self.mask = vocab.add_symbol('<mask>')
self.bpe = encoders.build_bpe(args)
self.tokenizer = encoders.build_tokenizer(args)
# hack to handle GPT-2 BPE, which includes leading spaces
if args.bpe == 'gpt2':
self.leading_space = True
self.trailing_space = False
else:
self.leading_space = False
self.trailing_space = True
@classmethod
def load_dictionary(cls, filename):
"""Load the dictionary from the filename
Args:
filename (str): the filename
"""
dictionary = Dictionary.load(filename)
dictionary.add_symbol('<mask>')
return dictionary
@classmethod
def setup_task(cls, args, **kwargs):
assert args.criterion == 'wsc', 'Must set --criterion=wsc'
# load data and label dictionaries
vocab = cls.load_dictionary(os.path.join(args.data, 'dict.txt'))
print('| dictionary: {} types'.format(len(vocab)))
return cls(args, vocab)
def binarize(self, s: str, append_eos: bool = False):
if self.tokenizer is not None:
s = self.tokenizer.encode(s)
if self.bpe is not None:
s = self.bpe.encode(s)
tokens = self.vocab.encode_line(
s, append_eos=append_eos, add_if_not_exist=False,
).long()
if self.args.init_token is not None:
tokens = torch.cat([tokens.new([self.args.init_token]), tokens])
return tokens
def binarize_with_mask(self, txt, prefix, suffix, leading_space, trailing_space):
toks = self.binarize(
prefix + leading_space + txt + trailing_space + suffix,
append_eos=True,
)
mask = torch.zeros_like(toks, dtype=torch.uint8)
mask_start = len(self.binarize(prefix))
mask_size = len(self.binarize(leading_space + txt))
mask[mask_start:mask_start + mask_size] = 1
return toks, mask
def load_dataset(self, split, epoch=0, combine=False, data_path=None, return_only=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
if data_path is None:
data_path = os.path.join(self.args.data, split + '.jsonl')
if not os.path.exists(data_path):
raise FileNotFoundError('Cannot find data: {}'.format(data_path))
query_tokens = []
query_masks = []
query_lengths = []
candidate_tokens = []
candidate_masks = []
candidate_lengths = []
labels = []
for sentence, pronoun_span, query, label in wsc_utils.jsonl_iterator(data_path):
prefix = sentence[:pronoun_span.start].text
suffix = sentence[pronoun_span.end:].text_with_ws
# spaCy spans include trailing spaces, but we need to know about
# leading spaces for the GPT-2 BPE
leading_space = ' ' if sentence[:pronoun_span.start].text_with_ws.endswith(' ') else ''
trailing_space = ' ' if pronoun_span.text_with_ws.endswith(' ') else ''
# get noun phrases, excluding pronouns and anything overlapping with the query
cand_spans = wsc_utils.filter_noun_chunks(
wsc_utils.extended_noun_chunks(sentence),
exclude_pronouns=True,
exclude_query=query,
exact_match=False,
)
if query is not None:
query_toks, query_mask = self.binarize_with_mask(
query, prefix, suffix, leading_space, trailing_space
)
query_len = len(query_toks)
else:
query_toks, query_mask, query_len = None, None, 0
query_tokens.append(query_toks)
query_masks.append(query_mask)
query_lengths.append(query_len)
cand_toks, cand_masks = [], []
for cand_span in cand_spans:
toks, mask = self.binarize_with_mask(
cand_span.text, prefix, suffix, leading_space, trailing_space,
)
cand_toks.append(toks)
cand_masks.append(mask)
# collate candidates
cand_toks = data_utils.collate_tokens(cand_toks, pad_idx=self.vocab.pad())
cand_masks = data_utils.collate_tokens(cand_masks, pad_idx=0)
assert cand_toks.size() == cand_masks.size()
candidate_tokens.append(cand_toks)
candidate_masks.append(cand_masks)
candidate_lengths.append(cand_toks.size(1))
labels.append(label)
query_lengths = np.array(query_lengths)
query_tokens = ListDataset(query_tokens, query_lengths)
query_masks = ListDataset(query_masks, query_lengths)
candidate_lengths = np.array(candidate_lengths)
candidate_tokens = ListDataset(candidate_tokens, candidate_lengths)
candidate_masks = ListDataset(candidate_masks, candidate_lengths)
labels = ListDataset(labels, [1]*len(labels))
dataset = {
'id': IdDataset(),
'query_tokens': query_tokens,
'query_masks': query_masks,
'candidate_tokens': candidate_tokens,
'candidate_masks': candidate_masks,
'labels': labels,
'nsentences': NumSamplesDataset(),
'ntokens': NumelDataset(query_tokens, reduce=True),
}
nested_dataset = NestedDictionaryDataset(
dataset,
sizes=[query_lengths],
)
with data_utils.numpy_seed(self.args.seed):
shuffle = np.random.permutation(len(query_tokens))
dataset = SortDataset(
nested_dataset,
# shuffle
sort_order=[shuffle],
)
if return_only:
return dataset
self.datasets[split] = dataset
return self.datasets[split]
def build_dataset_for_inference(self, sample_json):
with tempfile.NamedTemporaryFile(buffering=0) as h:
h.write((json.dumps(sample_json) + '\n').encode('utf-8'))
dataset = self.load_dataset(
'disambiguate_pronoun',
data_path=h.name,
return_only=True,
)
return dataset
def disambiguate_pronoun(self, model, sentence, use_cuda=False):
sample_json = wsc_utils.convert_sentence_to_json(sentence)
dataset = self.build_dataset_for_inference(sample_json)
sample = dataset.collater([dataset[0]])
if use_cuda:
sample = utils.move_to_cuda(sample)
def get_masked_input(tokens, mask):
masked_tokens = tokens.clone()
masked_tokens[mask.bool()] = self.mask
return masked_tokens
def get_lprobs(tokens, mask):
logits, _ = model(src_tokens=get_masked_input(tokens, mask))
lprobs = F.log_softmax(logits, dim=-1, dtype=torch.float)
scores = lprobs.gather(2, tokens.unsqueeze(-1)).squeeze(-1)
mask = mask.type_as(scores)
scores = (scores * mask).sum(dim=-1) / mask.sum(dim=-1)
return scores
cand_lprobs = get_lprobs(
sample['candidate_tokens'][0],
sample['candidate_masks'][0],
)
if sample['query_tokens'][0] is not None:
query_lprobs = get_lprobs(
sample['query_tokens'][0].unsqueeze(0),
sample['query_masks'][0].unsqueeze(0),
)
return (query_lprobs >= cand_lprobs).all().item() == 1
else:
best_idx = cand_lprobs.argmax().item()
full_cand = sample['candidate_tokens'][0][best_idx]
mask = sample['candidate_masks'][0][best_idx]
toks = full_cand[mask.bool()]
return self.bpe.decode(self.source_dictionary.string(toks)).strip()
@property
def source_dictionary(self):
return self.vocab
@property
def target_dictionary(self):
return self.vocab
@register_task('winogrande')
class WinograndeTask(WSCTask):
"""
Task for WinoGrande dataset. Efficient implementation for Winograd schema
tasks with exactly two candidates, one of which is correct.
"""
@classmethod
def setup_task(cls, args, **kwargs):
assert args.criterion == 'winogrande', 'Must set --criterion=winogrande'
# load data and label dictionaries
vocab = cls.load_dictionary(os.path.join(args.data, 'dict.txt'))
print('| dictionary: {} types'.format(len(vocab)))
return cls(args, vocab)
def load_dataset(self, split, epoch=0, combine=False, data_path=None, return_only=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
if data_path is None:
data_path = os.path.join(self.args.data, split + '.jsonl')
if not os.path.exists(data_path):
raise FileNotFoundError('Cannot find data: {}'.format(data_path))
query_tokens = []
query_masks = []
query_lengths = []
candidate_tokens = []
candidate_masks = []
candidate_lengths = []
itr = wsc_utils.winogrande_jsonl_iterator(data_path, eval=(split == 'test'))
for sample in itr:
sentence, pronoun_span, query, cand_text = sample
prefix = sentence[:pronoun_span[0]].rstrip()
suffix = sentence[pronoun_span[1]:]
leading_space = ' ' if sentence[:pronoun_span[0]].endswith(' ') else ''
trailing_space = ''
if query is not None:
query_toks, query_mask = self.binarize_with_mask(
query, prefix, suffix, leading_space, trailing_space,
)
query_len = len(query_toks)
else:
query_toks, query_mask, query_len = None, None, 0
query_tokens.append(query_toks)
query_masks.append(query_mask)
query_lengths.append(query_len)
cand_toks, cand_mask = self.binarize_with_mask(
cand_text, prefix, suffix, leading_space, trailing_space,
)
candidate_tokens.append(cand_toks)
candidate_masks.append(cand_mask)
candidate_lengths.append(cand_toks.size(0))
query_lengths = np.array(query_lengths)
def get_pad_dataset_fn(tokens, length, pad_idx):
return PadDataset(
ListDataset(tokens, length),
pad_idx=pad_idx,
left_pad=False,
)
query_tokens = get_pad_dataset_fn(query_tokens, query_lengths, self.vocab.pad())
query_masks = get_pad_dataset_fn(query_masks, query_lengths, 0)
candidate_lengths = np.array(candidate_lengths)
candidate_tokens = get_pad_dataset_fn(candidate_tokens, candidate_lengths, self.vocab.pad())
candidate_masks = get_pad_dataset_fn(candidate_masks, candidate_lengths, 0)
dataset = {
'id': IdDataset(),
'query_tokens': query_tokens,
'query_masks': query_masks,
'candidate_tokens': candidate_tokens,
'candidate_masks': candidate_masks,
'nsentences': NumSamplesDataset(),
'ntokens': NumelDataset(query_tokens, reduce=True),
}
nested_dataset = NestedDictionaryDataset(
dataset,
sizes=[query_lengths],
)
with data_utils.numpy_seed(self.args.seed):
shuffle = np.random.permutation(len(query_tokens))
dataset = SortDataset(
nested_dataset,
# shuffle
sort_order=[shuffle],
)
if return_only:
return dataset
self.datasets[split] = dataset
return self.datasets[split]
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/examples/roberta/wsc/wsc_task.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import commonsense_qa_task # noqa
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/examples/roberta/commonsense_qa/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import os
import numpy as np
import torch
from fairseq.data import (
data_utils,
Dictionary,
encoders,
IdDataset,
ListDataset,
NestedDictionaryDataset,
NumSamplesDataset,
NumelDataset,
RawLabelDataset,
RightPadDataset,
SortDataset,
)
from fairseq.tasks import FairseqTask, register_task
@register_task('commonsense_qa')
class CommonsenseQATask(FairseqTask):
"""Task to finetune RoBERTa for Commonsense QA."""
@staticmethod
def add_args(parser):
"""Add task-specific arguments to the parser."""
parser.add_argument('data', metavar='DIR',
help='path to data directory; we load <split>.jsonl')
parser.add_argument('--init-token', type=int, default=None,
help='add token at the beginning of each batch item')
parser.add_argument('--num-classes', type=int, default=5)
def __init__(self, args, vocab):
super().__init__(args)
self.vocab = vocab
self.mask = vocab.add_symbol('<mask>')
self.bpe = encoders.build_bpe(args)
@classmethod
def load_dictionary(cls, filename):
"""Load the dictionary from the filename
Args:
filename (str): the filename
"""
dictionary = Dictionary.load(filename)
dictionary.add_symbol('<mask>')
return dictionary
@classmethod
def setup_task(cls, args, **kwargs):
assert args.criterion == 'sentence_ranking', 'Must set --criterion=sentence_ranking'
# load data and label dictionaries
vocab = cls.load_dictionary(os.path.join(args.data, 'dict.txt'))
print('| dictionary: {} types'.format(len(vocab)))
return cls(args, vocab)
def load_dataset(self, split, epoch=0, combine=False, data_path=None, return_only=False, **kwargs):
"""Load a given dataset split.
Args:
split (str): name of the split (e.g., train, valid, test)
"""
def binarize(s, append_bos=False):
if self.bpe is not None:
s = self.bpe.encode(s)
tokens = self.vocab.encode_line(
s, append_eos=True, add_if_not_exist=False,
).long()
if append_bos and self.args.init_token is not None:
tokens = torch.cat([tokens.new([self.args.init_token]), tokens])
return tokens
if data_path is None:
data_path = os.path.join(self.args.data, split + '.jsonl')
if not os.path.exists(data_path):
raise FileNotFoundError('Cannot find data: {}'.format(data_path))
src_tokens = [[] for i in range(self.args.num_classes)]
src_lengths = [[] for i in range(self.args.num_classes)]
labels = []
with open(data_path) as h:
for line in h:
example = json.loads(line.strip())
if 'answerKey' in example:
label = ord(example['answerKey']) - ord('A')
labels.append(label)
question = example['question']['stem']
assert len(example['question']['choices']) == self.args.num_classes
# format: `<s> Q: Where would I not want a fox? </s> A: hen house </s>`
question = 'Q: ' + question
question_toks = binarize(question, append_bos=True)
for i, choice in enumerate(example['question']['choices']):
src = 'A: ' + choice['text']
src_bin = torch.cat([question_toks, binarize(src)])
src_tokens[i].append(src_bin)
src_lengths[i].append(len(src_bin))
assert all(len(src_tokens[0]) == len(src_tokens[i]) for i in range(self.args.num_classes))
assert len(src_tokens[0]) == len(src_lengths[0])
assert len(labels) == 0 or len(labels) == len(src_tokens[0])
for i in range(self.args.num_classes):
src_lengths[i] = np.array(src_lengths[i])
src_tokens[i] = ListDataset(src_tokens[i], src_lengths[i])
src_lengths[i] = ListDataset(src_lengths[i])
dataset = {
'id': IdDataset(),
'nsentences': NumSamplesDataset(),
'ntokens': NumelDataset(src_tokens[0], reduce=True),
}
for i in range(self.args.num_classes):
dataset.update({
'net_input{}'.format(i + 1): {
'src_tokens': RightPadDataset(
src_tokens[i],
pad_idx=self.source_dictionary.pad(),
),
'src_lengths': src_lengths[i],
}
})
if len(labels) > 0:
dataset.update({'target': RawLabelDataset(labels)})
dataset = NestedDictionaryDataset(
dataset,
sizes=[np.maximum.reduce([src_token.sizes for src_token in src_tokens])],
)
with data_utils.numpy_seed(self.args.seed):
dataset = SortDataset(
dataset,
# shuffle
sort_order=[np.random.permutation(len(dataset))],
)
print('| Loaded {} with {} samples'.format(split, len(dataset)))
self.datasets[split] = dataset
return self.datasets[split]
def build_model(self, args):
from fairseq import models
model = models.build_model(args, self)
model.register_classification_head(
'sentence_classification_head',
num_classes=1,
)
return model
@property
def source_dictionary(self):
return self.vocab
@property
def target_dictionary(self):
return self.vocab
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/examples/roberta/commonsense_qa/commonsense_qa_task.py |
#!/usr/bin/env python
"""Helper script to compare two argparse.Namespace objects."""
from argparse import Namespace # noqa
def main():
ns1 = eval(input('Namespace 1: '))
ns2 = eval(input('Namespace 2: '))
def keys(ns):
ks = set()
for k in dir(ns):
if not k.startswith('_'):
ks.add(k)
return ks
k1 = keys(ns1)
k2 = keys(ns2)
def print_keys(ks, ns1, ns2=None):
for k in ks:
if ns2 is None:
print('{}\t{}'.format(k, getattr(ns1, k, None)))
else:
print('{}\t{}\t{}'.format(k, getattr(ns1, k, None), getattr(ns2, k, None)))
print('Keys unique to namespace 1:')
print_keys(k1 - k2, ns1)
print()
print('Keys unique to namespace 2:')
print_keys(k2 - k1, ns2)
print()
print('Overlapping keys with different values:')
ks = [k for k in k1 & k2 if getattr(ns1, k, 'None') != getattr(ns2, k, 'None')]
print_keys(ks, ns1, ns2)
print()
if __name__ == '__main__':
main()
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/scripts/compare_namespaces.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Split a large file into a train and valid set while respecting document
boundaries. Documents should be separated by a single empty line.
"""
import argparse
import random
import sys
def main():
parser = argparse.ArgumentParser()
parser.add_argument('input')
parser.add_argument('sample_output', help='train output file')
parser.add_argument('remainder_output', help='valid output file')
parser.add_argument('-k', type=int, help="remainder size")
parser.add_argument('--lines', action='store_true',
help='split lines instead of docs')
args = parser.parse_args()
assert args.k is not None
sample = []
remainder = []
num_docs = [0]
def update_sample(doc):
if len(sample) < args.k:
sample.append(doc.copy())
else:
i = num_docs[0]
j = random.randrange(i + 1)
if j < args.k:
remainder.append(sample[j])
sample[j] = doc.copy()
else:
remainder.append(doc.copy())
num_docs[0] += 1
doc.clear()
with open(args.input, 'r', encoding='utf-8') as h:
doc = []
for i, line in enumerate(h):
if line.strip() == "": # empty line indicates new document
update_sample(doc)
else:
doc.append(line)
if args.lines:
update_sample(doc)
if i % 1000000 == 0:
print(i, file=sys.stderr, end="", flush=True)
elif i % 100000 == 0:
print(".", file=sys.stderr, end="", flush=True)
if len(doc) > 0:
update_sample(doc)
print(file=sys.stderr, flush=True)
assert len(sample) == args.k
with open(args.sample_output, 'w', encoding='utf-8') as out:
first = True
for doc in sample:
if not first and not args.lines:
out.write("\n")
first = False
for line in doc:
out.write(line)
with open(args.remainder_output, 'w', encoding='utf-8') as out:
first = True
for doc in remainder:
if not first and not args.lines:
out.write("\n")
first = False
for line in doc:
out.write(line)
if __name__ == '__main__':
main()
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/scripts/split_train_valid_docs.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Helper script to pre-compute embeddings for a wav2letter++ dataset
"""
import argparse
import glob
import os
from shutil import copy
import h5py
import soundfile as sf
import numpy as np
import torch
from torch import nn
import tqdm
from fairseq.models.wav2vec import Wav2VecModel
def read_audio(fname):
""" Load an audio file and return PCM along with the sample rate """
wav, sr = sf.read(fname)
assert sr == 16e3
return wav, 16e3
class PretrainedWav2VecModel(nn.Module):
def __init__(self, fname):
super().__init__()
checkpoint = torch.load(fname)
self.args = checkpoint["args"]
model = Wav2VecModel.build_model(self.args, None)
model.load_state_dict(checkpoint["model"])
model.eval()
self.model = model
def forward(self, x):
with torch.no_grad():
z = self.model.feature_extractor(x)
if isinstance(z, tuple):
z = z[0]
c = self.model.feature_aggregator(z)
return z, c
class EmbeddingWriterConfig(argparse.ArgumentParser):
def __init__(self):
super().__init__("Pre-compute embeddings for wav2letter++ datasets")
kwargs = {"action": "store", "type": str, "required": True}
self.add_argument("--input", "-i",
help="Input Directory", **kwargs)
self.add_argument("--output", "-o",
help="Output Directory", **kwargs)
self.add_argument("--model",
help="Path to model checkpoint", **kwargs)
self.add_argument("--split",
help="Dataset Splits", nargs='+', **kwargs)
self.add_argument("--ext", default="wav", required=False,
help="Audio file extension")
self.add_argument("--no-copy-labels", action="store_true",
help="Do not copy label files. Useful for large datasets, use --targetdir in wav2letter then.")
self.add_argument("--use-feat", action="store_true",
help="Use the feature vector ('z') instead of context vector ('c') for features")
self.add_argument("--gpu",
help="GPU to use", default=0, type=int)
class Prediction():
""" Lightweight wrapper around a fairspeech embedding model """
def __init__(self, fname, gpu=0):
self.gpu = gpu
self.model = PretrainedWav2VecModel(fname).cuda(gpu)
def __call__(self, x):
x = torch.from_numpy(x).float().cuda(self.gpu)
with torch.no_grad():
z, c = self.model(x.unsqueeze(0))
return z.squeeze(0).cpu().numpy(), c.squeeze(0).cpu().numpy()
class H5Writer():
""" Write features as hdf5 file in wav2letter++ compatible format """
def __init__(self, fname):
self.fname = fname
os.makedirs(os.path.dirname(self.fname), exist_ok=True)
def write(self, data):
channel, T = data.shape
with h5py.File(self.fname, "w") as out_ds:
data = data.T.flatten()
out_ds["features"] = data
out_ds["info"] = np.array([16e3 // 160, T, channel])
class EmbeddingDatasetWriter(object):
""" Given a model and a wav2letter++ dataset, pre-compute and store embeddings
Args:
input_root, str :
Path to the wav2letter++ dataset
output_root, str :
Desired output directory. Will be created if non-existent
split, str :
Dataset split
"""
def __init__(self, input_root, output_root, split,
model_fname,
extension="wav",
gpu=0,
verbose=False,
use_feat=False,
):
assert os.path.exists(model_fname)
self.model_fname = model_fname
self.model = Prediction(self.model_fname, gpu)
self.input_root = input_root
self.output_root = output_root
self.split = split
self.verbose = verbose
self.extension = extension
self.use_feat = use_feat
assert os.path.exists(self.input_path), \
"Input path '{}' does not exist".format(self.input_path)
def _progress(self, iterable, **kwargs):
if self.verbose:
return tqdm.tqdm(iterable, **kwargs)
return iterable
def require_output_path(self, fname=None):
path = self.get_output_path(fname)
os.makedirs(path, exist_ok=True)
@property
def input_path(self):
return self.get_input_path()
@property
def output_path(self):
return self.get_output_path()
def get_input_path(self, fname=None):
if fname is None:
return os.path.join(self.input_root, self.split)
return os.path.join(self.get_input_path(), fname)
def get_output_path(self, fname=None):
if fname is None:
return os.path.join(self.output_root, self.split)
return os.path.join(self.get_output_path(), fname)
def copy_labels(self):
self.require_output_path()
labels = list(filter(lambda x: self.extension not in x, glob.glob(self.get_input_path("*"))))
for fname in tqdm.tqdm(labels):
copy(fname, self.output_path)
@property
def input_fnames(self):
return sorted(glob.glob(self.get_input_path("*.{}".format(self.extension))))
def __len__(self):
return len(self.input_fnames)
def write_features(self):
paths = self.input_fnames
fnames_context = map(lambda x: os.path.join(self.output_path, x.replace("." + self.extension, ".h5context")), \
map(os.path.basename, paths))
for name, target_fname in self._progress(zip(paths, fnames_context), total=len(self)):
wav, sr = read_audio(name)
z, c = self.model(wav)
feat = z if self.use_feat else c
writer = H5Writer(target_fname)
writer.write(feat)
def __repr__(self):
return "EmbeddingDatasetWriter ({n_files} files)\n\tinput:\t{input_root}\n\toutput:\t{output_root}\n\tsplit:\t{split})".format(
n_files=len(self), **self.__dict__)
if __name__ == "__main__":
args = EmbeddingWriterConfig().parse_args()
for split in args.split:
writer = EmbeddingDatasetWriter(
input_root=args.input,
output_root=args.output,
split=split,
model_fname=args.model,
gpu=args.gpu,
extension=args.ext,
use_feat=args.use_feat,
)
print(writer)
writer.require_output_path()
print("Writing Features...")
writer.write_features()
print("Done.")
if not args.no_copy_labels:
print("Copying label data...")
writer.copy_labels()
print("Done.")
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/scripts/wav2vec_featurize.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Use this script in order to build symmetric alignments for your translation
dataset.
This script depends on fast_align and mosesdecoder tools. You will need to
build those before running the script.
fast_align:
github: http://github.com/clab/fast_align
instructions: follow the instructions in README.md
mosesdecoder:
github: http://github.com/moses-smt/mosesdecoder
instructions: http://www.statmt.org/moses/?n=Development.GetStarted
The script produces the following files under --output_dir:
text.joined - concatenation of lines from the source_file and the
target_file.
align.forward - forward pass of fast_align.
align.backward - backward pass of fast_align.
aligned.sym_heuristic - symmetrized alignment.
"""
import argparse
import os
from itertools import zip_longest
def main():
parser = argparse.ArgumentParser(description='symmetric alignment builer')
# fmt: off
parser.add_argument('--fast_align_dir',
help='path to fast_align build directory')
parser.add_argument('--mosesdecoder_dir',
help='path to mosesdecoder root directory')
parser.add_argument('--sym_heuristic',
help='heuristic to use for symmetrization',
default='grow-diag-final-and')
parser.add_argument('--source_file',
help='path to a file with sentences '
'in the source language')
parser.add_argument('--target_file',
help='path to a file with sentences '
'in the target language')
parser.add_argument('--output_dir',
help='output directory')
# fmt: on
args = parser.parse_args()
fast_align_bin = os.path.join(args.fast_align_dir, 'fast_align')
symal_bin = os.path.join(args.mosesdecoder_dir, 'bin', 'symal')
sym_fast_align_bin = os.path.join(
args.mosesdecoder_dir, 'scripts', 'ems',
'support', 'symmetrize-fast-align.perl')
# create joined file
joined_file = os.path.join(args.output_dir, 'text.joined')
with open(args.source_file, 'r', encoding='utf-8') as src, open(args.target_file, 'r', encoding='utf-8') as tgt:
with open(joined_file, 'w', encoding='utf-8') as joined:
for s, t in zip_longest(src, tgt):
print('{} ||| {}'.format(s.strip(), t.strip()), file=joined)
bwd_align_file = os.path.join(args.output_dir, 'align.backward')
# run forward alignment
fwd_align_file = os.path.join(args.output_dir, 'align.forward')
fwd_fast_align_cmd = '{FASTALIGN} -i {JOINED} -d -o -v > {FWD}'.format(
FASTALIGN=fast_align_bin,
JOINED=joined_file,
FWD=fwd_align_file)
assert os.system(fwd_fast_align_cmd) == 0
# run backward alignment
bwd_align_file = os.path.join(args.output_dir, 'align.backward')
bwd_fast_align_cmd = '{FASTALIGN} -i {JOINED} -d -o -v -r > {BWD}'.format(
FASTALIGN=fast_align_bin,
JOINED=joined_file,
BWD=bwd_align_file)
assert os.system(bwd_fast_align_cmd) == 0
# run symmetrization
sym_out_file = os.path.join(args.output_dir, 'aligned')
sym_cmd = '{SYMFASTALIGN} {FWD} {BWD} {SRC} {TGT} {OUT} {HEURISTIC} {SYMAL}'.format(
SYMFASTALIGN=sym_fast_align_bin,
FWD=fwd_align_file,
BWD=bwd_align_file,
SRC=args.source_file,
TGT=args.target_file,
OUT=sym_out_file,
HEURISTIC=args.sym_heuristic,
SYMAL=symal_bin
)
assert os.system(sym_cmd) == 0
if __name__ == '__main__':
main()
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/scripts/build_sym_alignment.py |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import sentencepiece as spm
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--model", required=True,
help="sentencepiece model to use for decoding")
parser.add_argument("--input", required=True, help="input file to decode")
parser.add_argument("--input_format", choices=["piece", "id"], default="piece")
args = parser.parse_args()
sp = spm.SentencePieceProcessor()
sp.Load(args.model)
if args.input_format == "piece":
def decode(l):
return "".join(sp.DecodePieces(l))
elif args.input_format == "id":
def decode(l):
return "".join(sp.DecodeIds(l))
else:
raise NotImplementedError
def tok2int(tok):
# remap reference-side <unk> (represented as <<unk>>) to 0
return int(tok) if tok != "<<unk>>" else 0
with open(args.input, "r", encoding="utf-8") as h:
for line in h:
print(decode(list(map(tok2int, line.rstrip().split()))))
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/scripts/spm_decode.py |
EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/scripts/__init__.py |
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import re
import shutil
import sys
pt_regexp = re.compile(r'checkpoint(\d+|_\d+_\d+|_[a-z]+)\.pt')
pt_regexp_epoch_based = re.compile(r'checkpoint(\d+)\.pt')
pt_regexp_update_based = re.compile(r'checkpoint_\d+_(\d+)\.pt')
def parse_checkpoints(files):
entries = []
for f in files:
m = pt_regexp_epoch_based.fullmatch(f)
if m is not None:
entries.append((int(m.group(1)), m.group(0)))
else:
m = pt_regexp_update_based.fullmatch(f)
if m is not None:
entries.append((int(m.group(1)), m.group(0)))
return entries
def last_n_checkpoints(files, n):
entries = parse_checkpoints(files)
return [x[1] for x in sorted(entries, reverse=True)[:n]]
def every_n_checkpoints(files, n):
entries = parse_checkpoints(files)
return [x[1] for x in sorted(sorted(entries)[::-n])]
def main():
parser = argparse.ArgumentParser(
description=(
'Recursively delete checkpoint files from `root_dir`, '
'but preserve checkpoint_best.pt and checkpoint_last.pt'
)
)
parser.add_argument('root_dirs', nargs='*')
parser.add_argument('--save-last', type=int, default=0, help='number of last checkpoints to save')
parser.add_argument('--save-every', type=int, default=0, help='interval of checkpoints to save')
parser.add_argument('--preserve-test', action='store_true',
help='preserve checkpoints in dirs that start with test_ prefix (default: delete them)')
parser.add_argument('--delete-best', action='store_true', help='delete checkpoint_best.pt')
parser.add_argument('--delete-last', action='store_true', help='delete checkpoint_last.pt')
parser.add_argument('--no-dereference', action='store_true', help='don\'t dereference symlinks')
args = parser.parse_args()
files_to_desymlink = []
files_to_preserve = []
files_to_delete = []
for root_dir in args.root_dirs:
for root, _subdirs, files in os.walk(root_dir):
if args.save_last > 0:
to_save = last_n_checkpoints(files, args.save_last)
else:
to_save = []
if args.save_every > 0:
to_save += every_n_checkpoints(files, args.save_every)
for file in files:
if not pt_regexp.fullmatch(file):
continue
full_path = os.path.join(root, file)
if (
(
not os.path.basename(root).startswith('test_')
or args.preserve_test
)
and (
(file == 'checkpoint_last.pt' and not args.delete_last)
or (file == 'checkpoint_best.pt' and not args.delete_best)
or file in to_save
)
):
if os.path.islink(full_path) and not args.no_dereference:
files_to_desymlink.append(full_path)
else:
files_to_preserve.append(full_path)
else:
files_to_delete.append(full_path)
if len(files_to_desymlink) == 0 and len(files_to_delete) == 0:
print('Nothing to do.')
sys.exit(0)
files_to_desymlink = sorted(files_to_desymlink)
files_to_preserve = sorted(files_to_preserve)
files_to_delete = sorted(files_to_delete)
print('Operations to perform (in order):')
if len(files_to_desymlink) > 0:
for file in files_to_desymlink:
print(' - preserve (and dereference symlink): ' + file)
if len(files_to_preserve) > 0:
for file in files_to_preserve:
print(' - preserve: ' + file)
if len(files_to_delete) > 0:
for file in files_to_delete:
print(' - delete: ' + file)
while True:
resp = input('Continue? (Y/N): ')
if resp.strip().lower() == 'y':
break
elif resp.strip().lower() == 'n':
sys.exit(0)
print('Executing...')
if len(files_to_desymlink) > 0:
for file in files_to_desymlink:
realpath = os.path.realpath(file)
print('rm ' + file)
os.remove(file)
print('cp {} {}'.format(realpath, file))
shutil.copyfile(realpath, file)
if len(files_to_delete) > 0:
for file in files_to_delete:
print('rm ' + file)
os.remove(file)
if __name__ == '__main__':
main()
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/scripts/rm_pt.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Count the number of documents and average number of lines and tokens per
document in a large file. Documents should be separated by a single empty line.
"""
import argparse
import gzip
import sys
import numpy as np
def main():
parser = argparse.ArgumentParser()
parser.add_argument('input')
parser.add_argument('--gzip', action='store_true')
args = parser.parse_args()
def gopen():
if args.gzip:
return gzip.open(args.input, 'r')
else:
return open(args.input, 'r', encoding='utf-8')
num_lines = []
num_toks = []
with gopen() as h:
num_docs = 1
num_lines_in_doc = 0
num_toks_in_doc = 0
for i, line in enumerate(h):
if len(line.strip()) == 0: # empty line indicates new document
num_docs += 1
num_lines.append(num_lines_in_doc)
num_toks.append(num_toks_in_doc)
num_lines_in_doc = 0
num_toks_in_doc = 0
else:
num_lines_in_doc += 1
num_toks_in_doc += len(line.rstrip().split())
if i % 1000000 == 0:
print(i, file=sys.stderr, end="", flush=True)
elif i % 100000 == 0:
print(".", file=sys.stderr, end="", flush=True)
print(file=sys.stderr, flush=True)
print("found {} docs".format(num_docs))
print("average num lines per doc: {}".format(np.mean(num_lines)))
print("average num toks per doc: {}".format(np.mean(num_toks)))
if __name__ == '__main__':
main()
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/scripts/count_docs.py |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import contextlib
import sys
import sentencepiece as spm
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--model", required=True,
help="sentencepiece model to use for encoding")
parser.add_argument("--inputs", nargs="+", default=['-'],
help="input files to filter/encode")
parser.add_argument("--outputs", nargs="+", default=['-'],
help="path to save encoded outputs")
parser.add_argument("--output_format", choices=["piece", "id"], default="piece")
parser.add_argument("--min-len", type=int, metavar="N",
help="filter sentence pairs with fewer than N tokens")
parser.add_argument("--max-len", type=int, metavar="N",
help="filter sentence pairs with more than N tokens")
args = parser.parse_args()
assert len(args.inputs) == len(args.outputs), \
"number of input and output paths should match"
sp = spm.SentencePieceProcessor()
sp.Load(args.model)
if args.output_format == "piece":
def encode(l):
return sp.EncodeAsPieces(l)
elif args.output_format == "id":
def encode(l):
return list(map(str, sp.EncodeAsIds(l)))
else:
raise NotImplementedError
if args.min_len is not None or args.max_len is not None:
def valid(line):
return (
(args.min_len is None or len(line) >= args.min_len)
and (args.max_len is None or len(line) <= args.max_len)
)
else:
def valid(lines):
return True
with contextlib.ExitStack() as stack:
inputs = [
stack.enter_context(open(input, "r", encoding="utf-8")) \
if input != "-" else sys.stdin
for input in args.inputs
]
outputs = [
stack.enter_context(open(output, "w", encoding="utf-8")) \
if output != "-" else sys.stdout
for output in args.outputs
]
stats = {
"num_empty": 0,
"num_filtered": 0,
}
def encode_line(line):
line = line.strip()
if len(line) > 0:
line = encode(line)
if valid(line):
return line
else:
stats["num_filtered"] += 1
else:
stats["num_empty"] += 1
return None
for i, lines in enumerate(zip(*inputs), start=1):
enc_lines = list(map(encode_line, lines))
if not any(enc_line is None for enc_line in enc_lines):
for enc_line, output_h in zip(enc_lines, outputs):
print(" ".join(enc_line), file=output_h)
if i % 10000 == 0:
print("processed {} lines".format(i), file=sys.stderr)
print("skipped {} empty lines".format(stats["num_empty"]), file=sys.stderr)
print("filtered {} lines".format(stats["num_filtered"]), file=sys.stderr)
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/scripts/spm_encode.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Split a large file into shards while respecting document boundaries. Documents
should be separated by a single empty line.
"""
import argparse
import contextlib
def main():
parser = argparse.ArgumentParser()
parser.add_argument('input')
parser.add_argument('--num-shards', type=int)
args = parser.parse_args()
assert args.num_shards is not None and args.num_shards > 1
with open(args.input, 'r', encoding='utf-8') as h:
with contextlib.ExitStack() as stack:
outputs = [
stack.enter_context(open(args.input + ".shard" + str(i), "w", encoding="utf-8"))
for i in range(args.num_shards)
]
doc = []
first_doc = [True]*args.num_shards
def output_doc(i):
if not first_doc[i]:
outputs[i].write("\n")
first_doc[i] = False
for line in doc:
outputs[i].write(line)
doc.clear()
num_docs = 0
for line in h:
if line.strip() == "": # empty line indicates new document
output_doc(num_docs % args.num_shards)
num_docs += 1
else:
doc.append(line)
output_doc(num_docs % args.num_shards)
if __name__ == '__main__':
main()
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/scripts/shard_docs.py |
#!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import sentencepiece as spm
if __name__ == "__main__":
spm.SentencePieceTrainer.Train(" ".join(sys.argv[1:]))
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/scripts/spm_train.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import collections
import torch
import os
import re
def average_checkpoints(inputs):
"""Loads checkpoints from inputs and returns a model with averaged weights.
Args:
inputs: An iterable of string paths of checkpoints to load from.
Returns:
A dict of string keys mapping to various values. The 'model' key
from the returned dict should correspond to an OrderedDict mapping
string parameter names to torch Tensors.
"""
params_dict = collections.OrderedDict()
params_keys = None
new_state = None
num_models = len(inputs)
for f in inputs:
state = torch.load(
f,
map_location=(
lambda s, _: torch.serialization.default_restore_location(s, 'cpu')
),
)
# Copies over the settings from the first checkpoint
if new_state is None:
new_state = state
model_params = state['model']
model_params_keys = list(model_params.keys())
if params_keys is None:
params_keys = model_params_keys
elif params_keys != model_params_keys:
raise KeyError(
'For checkpoint {}, expected list of params: {}, '
'but found: {}'.format(f, params_keys, model_params_keys)
)
for k in params_keys:
p = model_params[k]
if isinstance(p, torch.HalfTensor):
p = p.float()
if k not in params_dict:
params_dict[k] = p.clone()
# NOTE: clone() is needed in case of p is a shared parameter
else:
params_dict[k] += p
averaged_params = collections.OrderedDict()
for k, v in params_dict.items():
averaged_params[k] = v
averaged_params[k].div_(num_models)
new_state['model'] = averaged_params
return new_state
def last_n_checkpoints(paths, n, update_based, upper_bound=None):
assert len(paths) == 1
path = paths[0]
if update_based:
pt_regexp = re.compile(r'checkpoint_\d+_(\d+)\.pt')
else:
pt_regexp = re.compile(r'checkpoint(\d+)\.pt')
files = os.listdir(path)
entries = []
for f in files:
m = pt_regexp.fullmatch(f)
if m is not None:
sort_key = int(m.group(1))
if upper_bound is None or sort_key <= upper_bound:
entries.append((sort_key, m.group(0)))
if len(entries) < n:
raise Exception('Found {} checkpoint files but need at least {}', len(entries), n)
return [os.path.join(path, x[1]) for x in sorted(entries, reverse=True)[:n]]
def main():
parser = argparse.ArgumentParser(
description='Tool to average the params of input checkpoints to '
'produce a new checkpoint',
)
# fmt: off
parser.add_argument('--inputs', required=True, nargs='+',
help='Input checkpoint file paths.')
parser.add_argument('--output', required=True, metavar='FILE',
help='Write the new checkpoint containing the averaged weights to this path.')
num_group = parser.add_mutually_exclusive_group()
num_group.add_argument('--num-epoch-checkpoints', type=int,
help='if set, will try to find checkpoints with names checkpoint_xx.pt in the path specified by input, '
'and average last this many of them.')
num_group.add_argument('--num-update-checkpoints', type=int,
help='if set, will try to find checkpoints with names checkpoint_ee_xx.pt in the path specified by input, '
'and average last this many of them.')
parser.add_argument('--checkpoint-upper-bound', type=int,
help='when using --num-epoch-checkpoints, this will set an upper bound on which checkpoint to use, '
'e.g., with --num-epoch-checkpoints=10 --checkpoint-upper-bound=50, checkpoints 41-50 would be averaged.')
# fmt: on
args = parser.parse_args()
print(args)
num = None
is_update_based = False
if args.num_update_checkpoints is not None:
num = args.num_update_checkpoints
is_update_based = True
elif args.num_epoch_checkpoints is not None:
num = args.num_epoch_checkpoints
assert args.checkpoint_upper_bound is None or args.num_epoch_checkpoints is not None, \
'--checkpoint-upper-bound requires --num-epoch-checkpoints'
assert args.num_epoch_checkpoints is None or args.num_update_checkpoints is None, \
'Cannot combine --num-epoch-checkpoints and --num-update-checkpoints'
if num is not None:
args.inputs = last_n_checkpoints(
args.inputs, num, is_update_based, upper_bound=args.checkpoint_upper_bound,
)
print('averaging checkpoints: ', args.inputs)
new_state = average_checkpoints(args.inputs)
torch.save(new_state, args.output)
print('Finished writing averaged checkpoint to {}.'.format(args.output))
if __name__ == '__main__':
main()
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/scripts/average_checkpoints.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Data pre-processing: build vocabularies and binarize training data.
"""
import argparse
import glob
import os
import soundfile
import random
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument('root', metavar='DIR', help='root directory containing flac files to index')
parser.add_argument('--valid-percent', default=0.01, type=float, metavar='D',
help='percentage of data to use as validation set (between 0 and 1)')
parser.add_argument('--dest', default='.', type=str, metavar='DIR', help='output directory')
parser.add_argument('--ext', default='flac', type=str, metavar='EXT', help='extension to look for')
parser.add_argument('--seed', default=42, type=int, metavar='N', help='random seed')
parser.add_argument('--path-must-contain', default=None, type=str, metavar='FRAG',
help='if set, path must contain this substring for a file to be included in the manifest')
return parser
def main(args):
assert args.valid_percent >= 0 and args.valid_percent <= 1.
dir_path = os.path.realpath(args.root)
search_path = os.path.join(dir_path, '**/*.' + args.ext)
rand = random.Random(args.seed)
with open(os.path.join(args.dest, 'train.tsv'), 'w') as train_f, open(
os.path.join(args.dest, 'valid.tsv'), 'w') as valid_f:
print(dir_path, file=train_f)
print(dir_path, file=valid_f)
for fname in glob.iglob(search_path, recursive=True):
file_path = os.path.realpath(fname)
if args.path_must_contain and args.path_must_contain not in file_path:
continue
frames = soundfile.info(fname).frames
dest = train_f if rand.random() > args.valid_percent else valid_f
print('{}\t{}'.format(os.path.relpath(file_path, dir_path), frames), file=dest)
if __name__ == '__main__':
parser = get_parser()
args = parser.parse_args()
main(args)
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/scripts/wav2vec_manifest.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from fairseq.data import data_utils, Dictionary, indexed_dataset
def get_parser():
parser = argparse.ArgumentParser(
description='writes text from binarized file to stdout')
# fmt: off
parser.add_argument('--dataset-impl', help='dataset implementation',
choices=indexed_dataset.get_available_dataset_impl())
parser.add_argument('--dict', metavar='FP', help='dictionary containing known words', default=None)
parser.add_argument('--input', metavar='FP', required=True, help='binarized file to read')
# fmt: on
return parser
def main():
parser = get_parser()
args = parser.parse_args()
dictionary = Dictionary.load(args.dict) if args.dict is not None else None
dataset = data_utils.load_indexed_dataset(
args.input,
dictionary,
dataset_impl=args.dataset_impl,
default='lazy',
)
for tensor_line in dataset:
if dictionary is None:
line = ' '.join([str(int(x)) for x in tensor_line])
else:
line = dictionary.string(tensor_line)
print(line)
if __name__ == '__main__':
main()
| EXA-1-master | exa/models/unilm-master/infoxlm/fairseq/scripts/read_binarized.py |
import setuptools
setuptools.setup(
name="infoxlm",
version="0.0.1",
author="Zewen",
author_email="[email protected]",
description="infoxlm",
url="https://github.com/CZWin32768/XLM-Align",
packages=setuptools.find_packages(),
install_requires=[],
classifiers=(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
)
) | EXA-1-master | exa/models/unilm-master/infoxlm/src-infoxlm/setup.py |
import infoxlm
from fairseq_cli.train import cli_main
if __name__ == "__main__":
cli_main() | EXA-1-master | exa/models/unilm-master/infoxlm/src-infoxlm/train.py |
import infoxlm.tasks
import infoxlm.models
import infoxlm.criterions | EXA-1-master | exa/models/unilm-master/infoxlm/src-infoxlm/infoxlm/__init__.py |
import torch
from fairseq import utils
@torch.no_grad()
def concat_all_gather(tensor):
"""
Performs all_gather operation on the provided tensors.
*** Warning ***: torch.distributed.all_gather has no gradient.
"""
if torch.cuda.device_count() > 1:
return varsize_tensor_all_gather(tensor)
else:
output = tensor
return output
@torch.no_grad()
def tensor_all_gather(tensor):
tensors_gather = [torch.ones_like(tensor)
for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
output = torch.cat(tensors_gather, dim=0)
return output
@torch.no_grad()
def varsize_tensor_all_gather(tensor):
# cuda_device = f'cuda:{torch.distributed.get_rank()}
cuda_device = 'cuda'
if tensor is None:
size_tens = torch.tensor([0], dtype=torch.int64, device=cuda_device)
else:
size_tens = torch.tensor([tensor.shape[0]], dtype=torch.int64, device=cuda_device)
# print("size_tens", flush=True)
# print(size_tens, flush=True)
size_tens = tensor_all_gather(size_tens).cpu()
max_size = size_tens.max()
padded = torch.empty(max_size, *tensor.shape[1:],
dtype=tensor.dtype,
device=cuda_device)
if tensor is not None:
padded[:tensor.shape[0]] = tensor
# print("padded:", flush=True)
# print(padded, flush=True)
ag = tensor_all_gather(padded)
# print("ag:", flush=True)
# print(ag, flush=True)
slices = []
for i, sz in enumerate(size_tens):
start_idx = i * max_size
end_idx = start_idx + sz.item()
if end_idx > start_idx:
slices.append(ag[start_idx:end_idx])
ret = torch.cat(slices, dim=0)
return ret.to(tensor)
def _get_logging_loss(loss, reduce=True):
if loss is None: return 0
return utils.item(loss.data) if reduce else loss.data
def construct_idx_tensor_from_list(idx_list2d, lens, pad_idx, device=None):
max_len = max(lens)
padded_list = [list_i + [pad_idx] * (max_len - lens[i]) for i, list_i in enumerate(idx_list2d)]
tensor = torch.LongTensor(padded_list)
if device is not None:
tensor = tensor.to(device=device)
return tensor
def move_to_device(sample, device):
def _move_to_device(tensor):
return tensor.to(device=device)
return utils.apply_to_sample(_move_to_device, sample)
| EXA-1-master | exa/models/unilm-master/infoxlm/src-infoxlm/infoxlm/utils.py |
import os
from fairseq.tasks import register_task, FairseqTask
from fairseq.data.dictionary import Dictionary
from infoxlm.data import mlm_utils
from infoxlm.data.dict_dataset import DictDataset
from infoxlm.tasks.mlm import Mlm
@register_task("tlm")
class Tlm(Mlm):
@staticmethod
def add_args(parser):
Mlm.add_args(parser)
parser.add_argument('--tlm_data', type=str, default="")
def train_step(self, sample, model, criterion, optimizer, ignore_grad=False):
model.train()
agg_loss, agg_sample_size, agg_logging_output = 0., 0., {}
# tlm step
loss, sample_size, logging_output = criterion(model, sample["tlm"])
if ignore_grad: loss *= 0
tlm_loss = loss
optimizer.backward(tlm_loss)
agg_loss += tlm_loss.detach().item()
agg_sample_size += sample_size
agg_logging_output.update(logging_output)
# mlm_step
loss, sample_size, logging_output = criterion(model, sample["mlm"])
if ignore_grad: loss *= 0
optimizer.backward(loss)
agg_loss += loss.detach().item()
agg_sample_size += sample_size
for key, value in logging_output.items():
agg_logging_output[key] += value
return agg_loss, agg_sample_size, agg_logging_output
def load_dataset(self, split, epoch=0, combine=False, **kwargs):
print("| Loading dataset at epoch %d" % epoch, flush=True)
args = self.args
sid = 0
dataset_path = os.path.join(args.data, "train.%d" % sid)
mlm_dataset = mlm_utils.get_mlm_dataset(
args, dataset_path, self.dictionary, self.mask_idx, self.mww, combine=False)
dataset_path = os.path.join(args.tlm_data, "train.%d" % sid)
tlm_dataset = mlm_utils.get_mlm_dataset(
args, dataset_path, self.dictionary, self.mask_idx, self.mww, combine=False)
dataset = DictDataset({
"tlm": tlm_dataset,
"mlm": mlm_dataset,
})
self.datasets[split] = dataset
| EXA-1-master | exa/models/unilm-master/infoxlm/src-infoxlm/infoxlm/tasks/tlm.py |
import os
from functools import lru_cache
import numpy as np
import torch
from fairseq import utils
from fairseq.data.data_utils import process_bpe_symbol
from fairseq.data.dictionary import Dictionary
from fairseq.tasks import FairseqTask, register_task
from infoxlm.data import mlm_utils
from infoxlm.data.dict_dataset import DictDataset
from infoxlm.data.xlm_align import get_xlm_align_dataset_with_mask
def extract_wa_from_pi_xi(pi, xi):
m, n = pi.size()
forward = torch.eye(n)[pi.argmax(dim=1)]
backward = torch.eye(m)[xi.argmax(dim=0)]
inter = forward * backward.transpose(0, 1)
ret = []
for i in range(m):
for j in range(n):
if inter[i, j].item() > 0:
ret.append((i, j))
return ret
def _sinkhorn_iter(S, num_iter=2):
assert S.dim() == 2
S[S <= 0] = 1e-6
pi = S
xi = pi
for i in range(num_iter):
pi_sum_over_i = pi.sum(dim=0, keepdim=True)
xi = pi / pi_sum_over_i
xi_sum_over_j = xi.sum(dim=1, keepdim=True)
pi = xi / xi_sum_over_j
return pi, xi
@register_task('xlm_align')
class XlmAlignTask(FairseqTask):
@staticmethod
def add_args(parser):
# MLM args
mlm_utils.add_mlm_args(parser)
parser.add_argument('data', help='colon separated path to data directories list, '
'will be iterated upon during epochs in round-robin manner')
parser.add_argument('--tokens-per-sample', default=512, type=int,
help='max number of total tokens over all segments per sample')
# apply prepend bos + tokenblock
parser.add_argument('--apply_ptb', default=False, action='store_true')
# TLM args
parser.add_argument('--tlm_data', type=str, default="")
# Word Alignment Self-Labeling
parser.add_argument('--wa_layer', type=int, default=8, help="the layer to obtain word alignment")
parser.add_argument('--wa_max_count', type=int, default=2, help="max_count for itermax")
parser.add_argument('--align_enable_step', default=-1, type=int)
parser.add_argument('--feed_inner_states', default=False, action='store_true')
parser.add_argument('--sinkhorn_iter', type=int, default=2, help="num of sinkhorn iterations")
@classmethod
def setup_task(cls, args, **kwargs):
dictionary = Dictionary.load(os.path.join(args.data, 'dict.txt'))
print('| Dictionary: {} types'.format(len(dictionary)), flush=True)
return cls(args, dictionary)
def __init__(self, args, dictionary):
super().__init__(args)
self.dictionary = dictionary
self.mask_idx = self.dictionary.add_symbol('<mask>')
self.seed = args.seed
self.mww = self._get_whole_word_mask()
self.sa_model = None
self._enable_align = False
def prepare_train(self, model, criterion):
print("| Prepare train ...", flush=True)
self.model = model
model.train()
def _get_whole_word_mask(self):
# create masked input and targets
if self.args.mask_whole_words:
print("| Get whole word mask ...")
return mlm_utils.get_whole_word_mask(self.args, self.dictionary)
return None
@property
def source_dictionary(self):
return self.dictionary
@property
def target_dictionary(self):
return self.dictionary
def load_dataset(self, split, epoch=0, combine=False, **kwargs):
print("| Loading dataset at epoch %d" % epoch, flush=True)
args = self.args
sid = 0
dataset_path = os.path.join(args.data, "train.%d" % sid)
mlm_dataset = mlm_utils.get_mlm_dataset(
args, dataset_path, self.dictionary, self.mask_idx, self.mww, combine=False)
dataset_path = os.path.join(args.tlm_data, "train.%d" % sid)
sa_dataset = get_xlm_align_dataset_with_mask(args, dataset_path, self.dictionary, self.mask_idx, combine=False)
dataset = DictDataset({
"mlm": mlm_dataset,
"sa": sa_dataset
})
# NOTE Set dataset epoch as sid for different random state
# of each shard, because when local indices are the same, the
# random states are the same.
dataset.set_epoch(sid)
self.datasets[split] = dataset
def iter_max(self, sim_matrix):
sim_matrix = sim_matrix.cpu().detach().numpy()
max_count = self.args.wa_max_count
alpha_ratio = 0.9
m, n = sim_matrix.shape
forward = np.eye(n)[sim_matrix.argmax(axis=1)] # m x n
backward = np.eye(m)[sim_matrix.argmax(axis=0)] # n x m
inter = forward * backward.transpose()
# if min(m, n) <= 2:
# return inter
if min(m, n) > 2:
new_inter = np.zeros((m, n))
count = 1
while count < max_count:
mask_x = 1.0 - np.tile(inter.sum(1)[:, np.newaxis], (1, n)).clip(0.0, 1.0)
mask_y = 1.0 - np.tile(inter.sum(0)[np.newaxis, :], (m, 1)).clip(0.0, 1.0)
mask = ((alpha_ratio * mask_x) + (alpha_ratio * mask_y)).clip(0.0, 1.0)
mask_zeros = 1.0 - ((1.0 - mask_x) * (1.0 - mask_y))
if mask_x.sum() < 1.0 or mask_y.sum() < 1.0:
mask *= 0.0
mask_zeros *= 0.0
new_sim = sim_matrix * mask
fwd = np.eye(n)[new_sim.argmax(axis=1)] * mask_zeros
bac = np.eye(m)[new_sim.argmax(axis=0)].transpose() * mask_zeros
new_inter = fwd * bac
if np.array_equal(inter + new_inter, inter):
break
inter = inter + new_inter
count += 1
ret = []
for i in range(m):
for j in range(n):
if inter[i, j] > 0:
ret.append((i, j))
return inter, ret
def get_gold_or_silver_wa(self, sample, batch_sim, src_fr, src_to, trg_fr, trg_to):
gold_wa = []
for i, sim in enumerate(batch_sim):
sim_wo_offset = sim[src_fr[i]: src_to[i], trg_fr[i]: trg_to[i]]
if src_to[i] - src_fr[i] <= 0 or trg_to[i] - trg_fr[i] <= 0:
print("[W] src or trg len=0")
gold_wa.append([])
continue
pi, xi = _sinkhorn_iter(sim_wo_offset, self.args.sinkhorn_iter)
gold_wa_i_wo_offset = self._extract_wa_from_pi_xi(pi, xi)
gold_wa_i = []
for src_idx, trg_idx in gold_wa_i_wo_offset:
gold_wa_i.append((src_idx + src_fr[i], trg_idx + trg_fr[i]))
gold_wa.append(gold_wa_i)
return gold_wa
def get_aligned_tokens(self, sample, model, use_csls=False, return_inner_states=False):
_, inner_states = model(**sample['net_input'],
features_only=True, return_all_hiddens=True)
# rep: batch, hidden, length
rep = inner_states["inner_states"][self.args.wa_layer]
src_fr, src_to, trg_fr, trg_to = sample["offsets"]
# rep: batch, length, hidden
rep = rep.transpose(0, 1)
if use_csls: raise NotImplementedError
batch_sim = torch.bmm(rep, rep.transpose(1,2))
wa = self.get_gold_or_silver_wa(sample, batch_sim, src_fr, src_to, trg_fr, trg_to)
if return_inner_states: return wa, inner_states
else: return wa
def _extract_wa_from_pi_xi(self, pi, xi):
# return extract_wa_from_pi_xi(pi, xi)
_, wa = self.iter_max(pi)
return wa
def _set_enable_align(self, num_updates):
if num_updates < self.args.align_enable_step: self._enable_align = False
else: self._enable_align = True
def update_step(self, num_updates):
self._set_enable_align(num_updates)
def train_step(self, sample, model, criterion, optimizer, ignore_grad=False):
if self.sa_model is None:
self.sa_model = model
agg_loss, agg_sample_size, agg_logging_output = 0., 0., {}
if self._enable_align:
self.sa_model.eval()
if self.args.feed_inner_states:
with torch.no_grad():
aligned_tokens, inner_states = self.get_aligned_tokens(sample["sa"], self.sa_model, return_inner_states=True)
model.train()
loss, sample_size, logging_output = criterion(
model, sample["sa"], reduce=True, aligned_tokens=aligned_tokens, inner_states=inner_states)
else:
with torch.no_grad():
aligned_tokens = self.get_aligned_tokens(sample["sa"], self.sa_model)
model.train()
loss, sample_size, logging_output = criterion(
model, sample["sa"], reduce=True, aligned_tokens=aligned_tokens)
if ignore_grad: loss *= 0
optimizer.backward(loss)
else:
model.train()
loss, sample_size, logging_output = criterion(model, sample["sa"], tlm=True)
if ignore_grad: loss *= 0
optimizer.backward(loss)
agg_loss += loss.detach().item()
agg_sample_size += sample_size
agg_logging_output.update(logging_output)
loss, sample_size, logging_output = criterion(model, sample["mlm"], mlm=True)
if ignore_grad: loss *= 0
optimizer.backward(loss)
agg_loss += loss.detach().item()
agg_sample_size += sample_size
agg_logging_output.update(logging_output)
return agg_loss, agg_sample_size, agg_logging_output
| EXA-1-master | exa/models/unilm-master/infoxlm/src-infoxlm/infoxlm/tasks/xlm_align.py |
import os
from fairseq.tasks import register_task, FairseqTask
from fairseq.data.dictionary import Dictionary
from infoxlm.data import mlm_utils
@register_task("mlm")
class Mlm(FairseqTask):
@staticmethod
def add_args(parser):
mlm_utils.add_mlm_args(parser)
parser.add_argument('data', help='colon separated path to data directories list, '
'will be iterated upon during epochs in round-robin manner')
parser.add_argument('--tokens-per-sample', default=512, type=int,
help='max number of total tokens over all segments per sample')
# apply prepend bos + tokenblock
parser.add_argument('--apply_ptb', default=False, action='store_true')
@classmethod
def setup_task(cls, args, **kwargs):
dictionary = Dictionary.load(os.path.join(args.data, 'dict.txt'))
print('| Dictionary: {} types'.format(len(dictionary)), flush=True)
return cls(args, dictionary)
def __init__(self, args, dictionary):
super().__init__(args)
self.dictionary = dictionary
self.mask_idx = self.dictionary.add_symbol('<mask>')
self.seed = args.seed
self.mww = self._get_whole_word_mask()
def _get_whole_word_mask(self):
# create masked input and targets
if self.args.mask_whole_words:
print("| Get whole work mask ...")
return mlm_utils.get_whole_word_mask(self.args, self.dictionary)
return None
def load_dataset(self, split, epoch=0, combine=False, **kwargs):
print("| Loading dataset at epoch %d" % epoch, flush=True)
args = self.args
sid = 0
dataset_path = os.path.join(args.data, "train.%d" % sid)
dataset = mlm_utils.get_mlm_dataset(
args, dataset_path, self.dictionary, self.mask_idx, self.mww, combine=False)
self.datasets[split] = dataset
@property
def source_dictionary(self):
return self.dictionary
@property
def target_dictionary(self):
return self.dictionary
| EXA-1-master | exa/models/unilm-master/infoxlm/src-infoxlm/infoxlm/tasks/mlm.py |
import argparse
import importlib
import os
from fairseq.tasks import TASK_REGISTRY
# automatically import any Python files in the tasks/ directory
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith('.py') and not file.startswith('_'):
task_name = file[:file.find('.py')]
importlib.import_module('infoxlm.tasks.' + task_name)
# expose `task_parser` for sphinx
if task_name in TASK_REGISTRY:
parser = argparse.ArgumentParser(add_help=False)
group_task = parser.add_argument_group('Task name')
# fmt: off
group_task.add_argument('--task', metavar=task_name,
help='Enable this task with: ``--task=' + task_name + '``')
# fmt: on
group_args = parser.add_argument_group('Additional command-line arguments')
TASK_REGISTRY[task_name].add_args(group_args)
globals()[task_name + '_parser'] = parser | EXA-1-master | exa/models/unilm-master/infoxlm/src-infoxlm/infoxlm/tasks/__init__.py |
import os
import torch
from functools import lru_cache
from fairseq.tasks import register_task, FairseqTask
from fairseq.data.dictionary import Dictionary
from fairseq.data import FairseqDataset
from fairseq import utils
from infoxlm.data import mlm_utils
from infoxlm.data.dict_dataset import DictDataset
from infoxlm.data.xlco_dataset import get_xlco_dataset
from infoxlm.tasks.mlm import Mlm
def _prepare_sample(sample, cuda=True, fp16=True):
if sample is None or len(sample) == 0:
return None
if cuda:
sample = utils.move_to_cuda(sample)
def apply_half(t):
if t.dtype is torch.float32:
return t.half()
return t
if fp16:
sample = utils.apply_to_sample(apply_half, sample)
return sample
@register_task("infoxlm")
class InfoXLM(Mlm):
@staticmethod
def add_args(parser):
Mlm.add_args(parser)
parser.add_argument('--tlm_data', type=str, default="")
parser.add_argument('--xlco_data', type=str, default="")
# e.g. constant,0.999
# e.g. linear,0,700000,0.999,1.0
parser.add_argument('--xlco_momentum', default="constant,0.999", type=str)
parser.add_argument('--xlco_enable_step', default=-1, type=int)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# NOTE walkaround for model building
# Actually, self.langs represents the keys of proj heads
self.model_langs = ["share_lang"]
self.xlco_lambda = self.args.xlco_lambda
# parse xlco_momentum
cxlm_args = self.args.xlco_momentum.split(",")
# self.constant_xlco_momentum = True
self.cxlm_scheduler = "constant"
self.constant_momentum_refresh_interval = -1
if cxlm_args[0] == "constant":
self._xlco_momentum = float(cxlm_args[1])
print("Momentum args: consant momentum: %.4f" % (self._xlco_momentum), flush=True)
elif cxlm_args[0] == "linear":
# self.constant_xlco_momentum = False
self.cxlm_scheduler = "linear"
self._mom_schedule_begin, self._mom_schedule_end, self._xlco_momentum_min, self._xlco_momentum_max = map(float, cxlm_args[1:])
print("Momentum args: linear self._mom_schedule_begin: %.4f, self._mom_schedule_end: %.4f, self._xlco_momentum_min: %.4f, self._xlco_momentum_max: %.4f " % (self._mom_schedule_begin, self._mom_schedule_end, self._xlco_momentum_min, self._xlco_momentum_max), flush=True)
assert self._mom_schedule_end >= self._mom_schedule_begin
elif cxlm_args[0] == "constant_with_refresh":
self._xlco_momentum = float(cxlm_args[1])
self.constant_momentum_refresh_interval = int(cxlm_args[2])
print("Momentum args: consant momentum: %.4f, refresh interval: %d" % (self._xlco_momentum, self.constant_momentum_refresh_interval), flush=True)
elif cxlm_args[0] == "exponential":
# example exponential,0.51,0.0,0.9995
self.cxlm_scheduler = "exponential"
self._xlco_momentum_alpha, self._xlco_momentum_min, self._xlco_momentum_max = map(float, cxlm_args[1:])
print("Momentum args: exponential self._xlco_momentum_alpha: %.4f, self._xlco_momentum_min: %.4f, self._xlco_momentum_max: %.4f " % (self._xlco_momentum_alpha, self._xlco_momentum_min, self._xlco_momentum_max), flush=True)
else:
raise NotImplementedError
self._cur_momentum = self.get_xlco_momentum(0)
print("Test get_xlco_momentum ...")
for i in range(10):
num_updates = i * 100000
print("num_updates: %d get_xlco_momentum:%f" % (i, self.get_xlco_momentum(num_updates)))
def get_xlco_momentum(self, num_updates):
if self.cxlm_scheduler == "constant":
if self.constant_momentum_refresh_interval == -1:
return self._xlco_momentum
else:
if num_updates % self.constant_momentum_refresh_interval == 0:
return 0.0
else:
return self._xlco_momentum
elif self.cxlm_scheduler == "linear":
if num_updates <= self._mom_schedule_begin:
return self._xlco_momentum_min
elif num_updates >= self._mom_schedule_end:
return self._xlco_momentum_max
else:
return (num_updates - self._mom_schedule_begin) * (self._xlco_momentum_max - self._xlco_momentum_min) / (self._mom_schedule_end - self._mom_schedule_begin) + self._xlco_momentum_min
elif self.cxlm_scheduler == "exponential":
if num_updates <= 0: return self._xlco_momentum_min
mom = 1.0 - num_updates ** (-self._xlco_momentum_alpha)
mom = max(mom, self._xlco_momentum_min)
mom = min(mom, self._xlco_momentum_max)
return mom
else:
raise ValueError
def prepare_train(self, model, criterion):
print("| Prepare train ...", flush=True)
# DEBUG
# print("Test get_xlco_momentum ...")
# for i in range(10):
# num_updates = i * 100000
# print("num_updates: %d get_xlco_momentum:%f" % (i, self.get_xlco_momentum(num_updates)))
self.model = model
model.train()
if not model.is_queue_ready():
self.fill_queue(criterion)
assert model.is_queue_ready()
def fill_queue(self, criterion):
print("| Filling language queue ... ")
fill_opt_cnt = 0
dummy_batch = None
epoch_itr = self.get_batch_iterator(
dataset=self.load_xlco_dataset(self.args.train_subset),
max_tokens=self.args.max_tokens,
max_sentences=self.args.max_sentences,
max_positions=utils.resolve_max_positions(
self.max_positions(), self.model.max_positions()
),
ignore_invalid_inputs=True,
required_batch_size_multiple=self.args.required_batch_size_multiple,
seed=self.args.seed,
num_shards=self.args.distributed_world_size,
shard_id=self.args.distributed_rank,
num_workers=0,
epoch=0,)
itr = epoch_itr.next_epoch_itr(
fix_batches_to_gpus=self.args.fix_batches_to_gpus,
shuffle=False,)
# DEBUG
# NOTE add a ref to prevent deletion
# self._fill_queue_itr = itr
ddp_size = 1 if not hasattr(self.args, "distributed_world_size") else self.args.distributed_world_size
tot_fill_opt = criterion.xlco_queue_size // self.args.max_sentences // ddp_size + 100
# print("| %d filling opt in total." % tot_fill_opt, flush=True)
for _ in range(tot_fill_opt):
sample = next(itr)
if dummy_batch is None: dummy_batch = sample
sample = _prepare_sample(sample)
if sample is None:
sample = _prepare_sample(dummy_batch)
print("| [W] a dummy batch used", flush=True)
with torch.no_grad():
criterion(self.model, sample)
if fill_opt_cnt % 100 == 0:
print("| Filling queue, fill_opt_cnt: %d" % fill_opt_cnt, flush=True)
fill_opt_cnt += 1
print("| %d filling opt in total." % fill_opt_cnt, flush=True)
assert self.model.is_queue_ready()
print("| queue.mean(): %f, queue.var(): %f" % (self.model.queue.mean().item(), self.model.queue.var().item()))
del itr
del epoch_itr
def update_step(self, num_updates):
if num_updates < self.args.xlco_enable_step:
self.xlco_lambda = 0.0
self._cur_momentum = 0.0
if num_updates + 5 >= self.args.xlco_enable_step:
self.model.update_slow_weight(0.0)
else:
self.xlco_lambda = self.args.xlco_lambda
self._cur_momentum = self.get_xlco_momentum(num_updates)
self.model.update_slow_weight(self._cur_momentum)
# pass
def train_step(self, sample, model, criterion, optimizer, ignore_grad=False):
model.train()
agg_loss, agg_sample_size, agg_logging_output = 0., 0., {}
# cxlm_step
loss, sample_size, logging_output = criterion(model, sample["xlco"])
if loss is None:
raise ValueError
if ignore_grad: loss *= 0
cxlm_loss = loss
optimizer.backward(cxlm_loss)
if loss is not None:
agg_loss += cxlm_loss.detach().item()
agg_sample_size += sample_size
agg_logging_output.update(logging_output)
# tlm step
loss, sample_size, logging_output = criterion(model, sample["tlm"], mlm=True)
if ignore_grad: loss *= 0
tlm_loss = loss
optimizer.backward(tlm_loss)
agg_loss += tlm_loss.detach().item()
agg_sample_size += sample_size
agg_logging_output.update(logging_output)
# mlm_step
loss, sample_size, logging_output = criterion(model, sample["mlm"], mlm=True)
if ignore_grad: loss *= 0
optimizer.backward(loss)
agg_loss += loss.detach().item()
agg_sample_size += sample_size
# agg_logging_output.update(logging_output)
for key, value in logging_output.items():
agg_logging_output[key] += value
# print("DEBUG2: %s" % str(agg_logging_output))
agg_logging_output["momentum"] = self._cur_momentum
return agg_loss, agg_sample_size, agg_logging_output
def load_dataset(self, split, epoch=0, combine=False, **kwargs):
print("| Loading dataset at epoch %d" % epoch, flush=True)
args = self.args
sid = 0
dataset_path = os.path.join(args.data, "train.%d" % sid)
mlm_dataset = mlm_utils.get_mlm_dataset(
args, dataset_path, self.dictionary, self.mask_idx, self.mww, combine=False)
dataset_path = os.path.join(args.tlm_data, "train.%d" % sid)
tlm_dataset = mlm_utils.get_mlm_dataset(
args, dataset_path, self.dictionary, self.mask_idx, self.mww, combine=False)
dataset_path = os.path.join(args.xlco_data, "train.%d" % sid)
xlco_dataset = get_xlco_dataset(
args, dataset_path, self.dictionary, self.mask_idx, combine=False)
dataset = DictDataset({
"tlm": tlm_dataset,
"mlm": mlm_dataset,
"xlco": xlco_dataset
})
# NOTE Set dataset epoch as sid for different random state
# of each shard, because when local indices are the same, the
# random states are the same.
dataset.set_epoch(sid)
self.datasets[split] = dataset
def load_xlco_dataset(self, split, epoch=0, combine=False, **kwargs):
args = self.args
dataset_path = os.path.join(args.xlco_data, "train.0")
xlco_dataset = get_xlco_dataset(
args, dataset_path, self.dictionary, self.mask_idx)
return xlco_dataset
| EXA-1-master | exa/models/unilm-master/infoxlm/src-infoxlm/infoxlm/tasks/infoxlm.py |
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import checkpoint_utils
from fairseq import utils
from fairseq.models import (
BaseFairseqModel,
register_model,
register_model_architecture,
)
from fairseq.models.roberta import (
RobertaModel,
RobertaEncoder,
roberta_base_architecture,
roberta_large_architecture,
)
@register_model("reload_roberta")
class ReloadRoberta(RobertaModel):
@staticmethod
def add_args(parser):
RobertaModel.add_args(parser)
parser.add_argument('--roberta-model-path', type=str, default="")
@classmethod
def build_model(cls, args, task):
reload_roberta_base(args)
if not hasattr(args, 'max_positions'):
args.max_positions = args.tokens_per_sample
encoder = RobertaEncoder(args, task.source_dictionary)
model = cls(args, encoder)
if args.roberta_model_path != "":
state = checkpoint_utils.load_checkpoint_to_cpu(args.roberta_model_path)
model.load_state_dict(state["model"], strict=True, args=args)
print(model.__class__)
return model
@classmethod
def from_pretrained(cls, model_name_or_path, checkpoint_file='model.pt', data_name_or_path='.', bpe='sentencepiece', **kwargs):
raise NotImplementedError
# NOTE WALKAROUND `size` method of dataset classes
# examples are filtered during preprocessing
# so we do not need to filter once again
def max_positions(self):
"""Maximum length supported by the model."""
return None
@register_model_architecture("reload_roberta", "reload_roberta_base")
def reload_roberta_base(args):
roberta_base_architecture(args)
@register_model_architecture("reload_roberta", "reload_roberta_large")
def reload_roberta_large(args):
roberta_large_architecture(args)
| EXA-1-master | exa/models/unilm-master/infoxlm/src-infoxlm/infoxlm/models/roberta.py |
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import checkpoint_utils
from fairseq import utils
from fairseq.models import (
BaseFairseqModel,
register_model,
register_model_architecture,
)
from fairseq.models.roberta import (
RobertaModel,
roberta_base_architecture,
roberta_large_architecture
)
from fairseq.modules import LayerNorm
from infoxlm.models.roberta import ReloadRoberta, reload_roberta_base, RobertaEncoder
@register_model("xlm_align")
class XlmAlignModel(ReloadRoberta):
@staticmethod
def add_args(parser):
ReloadRoberta.add_args(parser)
parser.add_argument('--no_linear_proj', default=False, action='store_true')
def __init__(self, args, encoder):
super().__init__(args, encoder)
if args.no_linear_proj:
self.q_linear = self.k_linear = lambda x: x
else:
self.q_linear = nn.Linear(args.encoder_embed_dim, args.encoder_embed_dim,)
self.k_linear = nn.Linear(args.encoder_embed_dim, args.encoder_embed_dim,)
@classmethod
def build_model(cls, args, task):
reload_roberta_base(args)
if not hasattr(args, 'max_positions'):
args.max_positions = args.tokens_per_sample
encoder = RobertaEncoder(args, task.source_dictionary)
model = cls(args, encoder)
if args.roberta_model_path != "":
state = checkpoint_utils.load_checkpoint_to_cpu(args.roberta_model_path)
model.load_state_dict(state["model"], strict=False, args=args)
print(model.__class__)
return model
@register_model_architecture("xlm_align", "xlm_align_base")
def xlm_align_base(args):
roberta_base_architecture(args)
@register_model_architecture("xlm_align", "xlm_align_large")
def xlm_align_large(args):
roberta_large_architecture(args)
| EXA-1-master | exa/models/unilm-master/infoxlm/src-infoxlm/infoxlm/models/xlm_align.py |
import argparse
import importlib
import os
from fairseq.models import MODEL_REGISTRY, ARCH_MODEL_INV_REGISTRY
# automatically import any Python files in the models/ directory
models_dir = os.path.dirname(__file__)
for file in os.listdir(models_dir):
path = os.path.join(models_dir, file)
if not file.startswith('_') and not file.startswith('.') and (file.endswith('.py') or os.path.isdir(path)):
model_name = file[:file.find('.py')] if file.endswith('.py') else file
module = importlib.import_module('infoxlm.models.' + model_name)
# extra `model_parser` for sphinx
if model_name in MODEL_REGISTRY:
parser = argparse.ArgumentParser(add_help=False)
group_archs = parser.add_argument_group('Named architectures')
group_archs.add_argument('--arch', choices=ARCH_MODEL_INV_REGISTRY[model_name])
group_args = parser.add_argument_group('Additional command-line arguments')
MODEL_REGISTRY[model_name].add_args(group_args)
globals()[model_name + '_parser'] = parser
| EXA-1-master | exa/models/unilm-master/infoxlm/src-infoxlm/infoxlm/models/__init__.py |
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq import checkpoint_utils
from fairseq import utils
from fairseq.models import (
BaseFairseqModel,
register_model,
register_model_architecture,
)
from fairseq.models.roberta import (
RobertaModel,
roberta_base_architecture,
roberta_large_architecture
)
from infoxlm.utils import concat_all_gather
def build_projection_dict(langs, dim, activation_fn, fp16=False):
proj_dict = {}
cnt = 0
for lang in langs:
proj_dict[lang] = cnt
cnt += 1
proj_matrix_slow = torch.randn(cnt, dim, dim)
proj_matrix_slow.normal_(mean=0.0, std=0.02)
proj_matrix_slow = nn.Parameter(proj_matrix_slow, requires_grad=False)
proj_matrix_fast = nn.Parameter(proj_matrix_slow.data.clone(), requires_grad=True)
return proj_dict, proj_matrix_fast, proj_matrix_slow
@register_model("infoxlm")
class InfoXlmModel(BaseFairseqModel):
def __init__(self, model_fast, model_slow, queue, proj=None):
super().__init__()
self.model_slow:nn.Module = model_slow
self.model_fast:nn.Module = model_fast
self.use_proj = False
self.share_proj = True
self.queue_size = queue.size(0)
self.register_buffer("queue", queue)
self.register_buffer("enqueue_cnt", torch.zeros(1, dtype=torch.long))
self.register_buffer("queue_ptr", torch.zeros(1, dtype=torch.long))
if proj is not None:
self.use_proj = True
self.proj_dict, proj_matrix_fast, proj_matrix_slow = proj
# if "share_lang" in self.proj_dict: self.share_proj = True
assert "share_lang" in self.proj_dict
self.register_parameter("proj_matrix_fast", proj_matrix_fast)
self.register_parameter("proj_matrix_slow", proj_matrix_slow)
for param in self.model_slow.parameters():
param.requires_grad = False
@staticmethod
def add_args(parser):
parser.add_argument('--roberta-model-path', type=str, default="")
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D',
help='dropout probability for attention weights')
parser.add_argument('--max-positions', type=int,
help='number of positional embeddings to learn')
parser.add_argument('--activation-dropout', type=float, metavar='D',
help='dropout probability after activation in FFN')
parser.add_argument('--use_proj', default=False, action='store_true')
def is_queue_ready(self):
return int(self.enqueue_cnt) >= self.queue_size
@torch.no_grad()
def update_queue(self, k):
k = concat_all_gather(k)
batch_size = k.size(0)
ptr = int(self.queue_ptr)
# assert self.queue_size % batch_size == 0
if ptr + batch_size <= self.queue_size:
self.queue[ptr:ptr+batch_size, :] = k
ptr = (ptr + batch_size) % self.queue_size
else:
left_len = self.queue_size - ptr
self.queue[ptr:, :] = k[:left_len, :]
ptr = batch_size-left_len
self.queue[:ptr, :] = k[left_len:, :]
self.queue_ptr[0] = ptr
self.enqueue_cnt += batch_size
@classmethod
def build_model(cls, args, task):
model_fast = RobertaModel.build_model(args, task)
model_slow = RobertaModel.build_model(args, task)
if args.roberta_model_path != "":
state = checkpoint_utils.load_checkpoint_to_cpu(args.roberta_model_path)
model_fast.load_state_dict(state["model"], strict=True, args=args)
model_slow.load_state_dict(state["model"], strict=True, args=args)
else:
model_slow.load_state_dict(model_fast.state_dict(), strict=True, args=args)
proj = None
if args.use_proj:
# NOTE alway be share_proj
langs = ["share_lang"]
proj = build_projection_dict(langs, args.encoder_embed_dim, args.activation_fn, args.fp16)
if "xlco_queue_size" in args:
xlco_queue_size = args.xlco_queue_size
else: xlco_queue_size = 1
print("xlco_queue_size is set as %d" % xlco_queue_size, flush=True)
queue = torch.randn(xlco_queue_size, args.encoder_embed_dim)
return cls(model_fast, model_slow, queue, proj=proj)
@classmethod
def from_pretrained(cls, model_name_or_path, checkpoint_file='model.pt',
data_name_or_path='.', bpe='sentencepiece', **kwargs):
raise NotImplementedError
def forward(self, src_tokens, use_model_fast=True, **kwargs):
forward_model = self.model_fast if use_model_fast else self.model_slow
return forward_model(src_tokens, **kwargs)
def forward_proj(self, rep, lang, use_model_fast=True, **kwargs):
proj_matrix = self.proj_matrix_fast if use_model_fast else self.proj_matrix_slow
if self.share_proj: lang = "share_lang"
if isinstance(lang, str):
return torch.mm(rep, proj_matrix[self.proj_dict[lang],:,:])
else:
proj_indices = [self.proj_dict[l] for l in lang]
batch_rep = rep.unsqueeze(1)
return torch.bmm(batch_rep, proj_matrix[proj_indices,:,:])[:,0,:]
def output_layer(self, features, use_model_fast=True, **kwargs):
forward_model = self.model_fast if use_model_fast else self.model_slow
return forward_model.decoder.output_layer(features, **kwargs)
@torch.no_grad()
def update_slow_weight(self, momentum):
for p1, p2 in zip(self.model_fast.parameters(), self.model_slow.parameters()):
assert p2.requires_grad == False
new_p2_data = p2.data * momentum + p1.data * (1. - momentum)
p2.data.copy_(new_p2_data)
if self.use_proj:
p1 = self.proj_matrix_fast.data
p2 = self.proj_matrix_slow.data
assert p2.requires_grad == False
new_p2_data = p2.data * momentum + p1.data * (1. - momentum)
p2.data.copy_(new_p2_data)
@register_model_architecture("infoxlm", "infoxlm_base")
def infoxlm_base(args):
roberta_base_architecture(args)
@register_model_architecture("infoxlm", "infoxlm_large")
def infoxlm_large(args):
roberta_large_architecture(args)
| EXA-1-master | exa/models/unilm-master/infoxlm/src-infoxlm/infoxlm/models/infoxlm.py |
import torch
from fairseq.data import FairseqDataset
class TLMDataset(FairseqDataset):
def __init__(self, src_dataset, tgt_dataset, bos, eos):
assert len(src_dataset) == len(tgt_dataset)
self.src_dataset = src_dataset
self.tgt_dataset = tgt_dataset
self.bos = bos
self.eos = eos
self._sizes = src_dataset.sizes + tgt_dataset.sizes
def __len__(self):
return len(self.src_dataset)
@property
def sizes(self):
return self._sizes
def __getitem__(self, index):
src_item = self.src_dataset[index]
tgt_item = self.tgt_dataset[index]
return torch.cat([
src_item.new([self.bos]), src_item, src_item.new([self.eos]),
tgt_item, tgt_item.new([self.eos]),
])
| EXA-1-master | exa/models/unilm-master/infoxlm/src-infoxlm/infoxlm/data/tlm_dataset.py |
import torch
from fairseq.data import BaseWrapperDataset
from fairseq.data import (data_utils,
TokenBlockDataset, PrependTokenDataset, PadDataset, TruncateDataset,
NumelDataset, NumSamplesDataset, NestedDictionaryDataset,
MaskTokensDataset, AppendTokenDataset, )
from infoxlm.data.mlm_utils import get_mlm_dataset, get_prepended_token_block_dataset
def get_mlm_dataset_with_offset(args, dataset_path, vocab, mask_idx,mask_whole_words=None, combine=False):
ptb_dataset = get_prepended_token_block_dataset(
args, dataset_path, vocab, combine=combine)
src_dataset, tgt_dataset = MaskTokensDataset.apply_mask(
ptb_dataset,
vocab=vocab,
pad_idx=vocab.pad(),
mask_idx=mask_idx,
seed=args.seed,
mask_prob=args.mask_prob,
mask_whole_words=mask_whole_words,
)
dataset = NestedDictionaryDataset(
{
'net_input': {
'src_tokens': PadDataset(
src_dataset,
pad_idx=vocab.pad(),
left_pad=False,
),
'src_lengths': NumelDataset(src_dataset, reduce=False),
},
'target': PadDataset(
tgt_dataset,
pad_idx=vocab.pad(),
left_pad=False,
),
'nsentences': NumSamplesDataset(),
'ntokens': NumelDataset(src_dataset, reduce=True),
'offsets': OffsetDataset(ptb_dataset, vocab),
},
sizes=[src_dataset.sizes],
)
return dataset
class OffsetDataset(BaseWrapperDataset):
def __init__(self, ptb_dataset, vocab):
super().__init__(ptb_dataset)
self.vocab = vocab
def get_check_ptb_offsets(self, ptb_item):
# parse ptb_item
eos_idx = self.vocab.eos()
bos_idx = self.vocab.bos()
_nonzero = (ptb_item == eos_idx).nonzero()
if len(_nonzero) != 2:
# raise ValueError
# NOTE WALKAROUND
_nonzero_0 = _nonzero[0].item()
_nonzero_1 = len(ptb_item)
else:
_nonzero_0 = _nonzero[0].item()
_nonzero_1 = _nonzero[1].item()
assert ptb_item[0].item() == bos_idx, (ptb_item[0].item(), bos_idx)
src_fr = 1
src_to = _nonzero[0].item()
trg_fr = src_to + 1
trg_to = _nonzero[1].item()
# print("ptb_item:")
# print(ptb_item)
# print("offsets:")
# print("%d %d %d %d" % (src_fr, src_to, trg_fr, trg_to))
# print("4 items: %d %d %d %d" % tuple(ptb_item[i].item() for i in [src_fr, src_to, trg_fr, trg_to]))
if src_to - src_fr <= 0 or trg_to - trg_fr <= 0:
print("[W] ptb_item=%s offsets=%d,%d,%d,%d" % (
str(ptb_item), src_fr, src_to, trg_fr, trg_to,
))
# raise ValueError
return src_fr, src_to, trg_fr, trg_to
def __getitem__(self, index):
ptb_item = self.dataset[index]
return self.get_check_ptb_offsets(ptb_item)
def collater(self, samples):
src_fr = [s[0] for s in samples]
src_to = [s[1] for s in samples]
trg_fr = [s[2] for s in samples]
trg_to = [s[3] for s in samples]
return src_fr, src_to, trg_fr, trg_to | EXA-1-master | exa/models/unilm-master/infoxlm/src-infoxlm/infoxlm/data/offset_dataset.py |
import torch
from fairseq.data import (data_utils,
TokenBlockDataset, PrependTokenDataset, PadDataset, TruncateDataset,
NumelDataset, NumSamplesDataset, NestedDictionaryDataset,
MaskTokensDataset, AppendTokenDataset, )
from fairseq.data.encoders.utils import get_whole_word_mask
def get_mlm_dataset(args, dataset_path, vocab, mask_idx, mask_whole_words=None, combine=False):
ptb_dataset = get_prepended_token_block_dataset(
args, dataset_path, vocab, combine=combine)
src_dataset, tgt_dataset = MaskTokensDataset.apply_mask(
ptb_dataset,
vocab=vocab,
pad_idx=vocab.pad(),
mask_idx=mask_idx,
seed=args.seed,
mask_prob=args.mask_prob,
mask_whole_words=mask_whole_words,
)
dataset = NestedDictionaryDataset(
{
'net_input': {
'src_tokens': PadDataset(
src_dataset,
pad_idx=vocab.pad(),
left_pad=False,
),
'src_lengths': NumelDataset(src_dataset, reduce=False),
},
'target': PadDataset(
tgt_dataset,
pad_idx=vocab.pad(),
left_pad=False,
),
'nsentences': NumSamplesDataset(),
'ntokens': NumelDataset(src_dataset, reduce=True),
# 'lang_id': RawLabelDataset([lang_id] * src_dataset.sizes.shape[0]),
},
sizes=[src_dataset.sizes],
)
return dataset
def add_mlm_args(parser):
parser.add_argument('--mask-whole-words', default=False, action='store_true',
help='mask whole words; you may also want to set --bpe')
parser.add_argument('--mask-prob', default=0.15, type=float,
help='probability of replacing a token with mask')
parser.add_argument('--leave-unmasked-prob', default=0.1, type=float,
help='probability that a masked token is unmasked')
parser.add_argument('--random-token-prob', default=0.1, type=float,
help='probability of replacing a token with a random token')
parser.add_argument('--sample-break-mode', default='complete',
choices=['none', 'complete', 'complete_doc', 'eos'],
help='If omitted or "none", fills each sample with tokens-per-sample '
'tokens. If set to "complete", splits samples only at the end '
'of sentence, but may include multiple sentences per sample. '
'"complete_doc" is similar but respects doc boundaries. '
'If set to "eos", includes only one sentence per sample.')
def get_preprocessed_ptb_dataset(args, dataset_path, vocab, combine=False):
dataset = data_utils.load_indexed_dataset(
dataset_path, vocab, args.dataset_impl, combine=combine, )
if dataset is None:
raise FileNotFoundError('Dataset not found: ({})'.format(dataset_path))
return dataset
def get_prepended_token_block_dataset(args, dataset_path, vocab, combine=False):
dataset = data_utils.load_indexed_dataset(
dataset_path, vocab, args.dataset_impl, combine=combine, )
if dataset is None:
raise FileNotFoundError('Dataset not found: ({})'.format(dataset_path))
if not args.apply_ptb:
print("| [I] ptb not applied.", flush=True)
return dataset
dataset = TruncateDataset(dataset, args.tokens_per_sample - 1)
dataset = TokenBlockDataset(
dataset,
dataset.sizes,
args.tokens_per_sample - 1, # one less for <s>
pad=vocab.pad(),
eos=vocab.eos(),
break_mode=args.sample_break_mode,
)
print('| loaded {} blocks from: {}'.format(len(dataset), dataset_path), flush=True)
dataset = PrependTokenDataset(dataset, vocab.bos())
return dataset
| EXA-1-master | exa/models/unilm-master/infoxlm/src-infoxlm/infoxlm/data/mlm_utils.py |
import torch
from fairseq.data import (data_utils,
TokenBlockDataset, PrependTokenDataset, PadDataset, TruncateDataset,
NumelDataset, NumSamplesDataset, NestedDictionaryDataset,
MaskTokensDataset, AppendTokenDataset, )
from fairseq.data.encoders.utils import get_whole_word_mask
from infoxlm.data.mlm_utils import get_prepended_token_block_dataset
from infoxlm.data.offset_dataset import OffsetDataset
def get_xlm_align_dataset_with_mask(args, dataset_path, vocab, mask_idx, combine=False):
ptb_dataset = get_prepended_token_block_dataset(
args, dataset_path, vocab, combine=combine)
src_dataset, tgt_dataset = MaskTokensDataset.apply_mask(
ptb_dataset,
vocab=vocab,
pad_idx=vocab.pad(),
mask_idx=mask_idx,
seed=args.seed,
mask_prob=args.mask_prob,
)
dataset = NestedDictionaryDataset({
'net_input': {
'src_tokens': PadDataset(
ptb_dataset,
pad_idx=vocab.pad(),
left_pad=False,
),
'src_lengths': NumelDataset(ptb_dataset, reduce=False),
},
'nsentences': NumSamplesDataset(),
'ntokens': NumelDataset(ptb_dataset, reduce=True),
'offsets': OffsetDataset(ptb_dataset, vocab),
'net_input_tlm': {
'src_tokens': PadDataset(
src_dataset,
pad_idx=vocab.pad(),
left_pad=False,
),
'src_lengths': NumelDataset(src_dataset, reduce=False),
},
'target': PadDataset(
tgt_dataset,
pad_idx=vocab.pad(),
left_pad=False,
),
}, sizes=[ptb_dataset.sizes])
return dataset | EXA-1-master | exa/models/unilm-master/infoxlm/src-infoxlm/infoxlm/data/xlm_align.py |
import numpy as np
import os
import torch
from threading import Thread
from fairseq.data import data_utils, FairseqDataset, FairseqIterableDataset
class DictIterDataset(FairseqIterableDataset):
def __init__(self, defn, sizes=None):
self.defn = defn
for v in self.defn.values():
if not isinstance(v, (FairseqIterableDataset, )):
raise ValueError('Expected Dataset but found: {}'.format(v.__class__))
def set_epoch(self, epoch):
for ds in self.defn.values():
ds.set_epoch(epoch)
def __iter__(self):
iters = {key:iter(self.defn[key]) for key in self.defn}
while True:
try:
yield {key:next(iters[key]) for key in iters}
except StopIteration:
break
def __len__(self):
return min(len(v) for v in self.defn.values())
def collater(self, samples):
if len(samples) == 0:
return {}
sample = {}
for k, ds in self.defn.items():
sample[k] = ds.collater([s[k] for s in samples])
return sample
class DictDataset(FairseqDataset):
def __init__(self, defn, sizes=None):
self.defn = defn
for v in self.defn.values():
if not isinstance(v, (FairseqDataset, )):
raise ValueError('Expected Dataset but found: {}'.format(v.__class__))
def set_epoch(self, epoch):
for ds in self.defn.values():
ds.set_epoch(epoch)
def __getitem__(self, index):
ret = {key:self.defn[key][index] for key in self.defn}
return ret
def __len__(self):
return min(len(v) for v in self.defn.values())
def collater(self, samples):
if len(samples) == 0:
return {}
sample = {}
for k, ds in self.defn.items():
sample[k] = ds.collater([s[k] for s in samples])
# DEBUG
# print(sample)
return sample
| EXA-1-master | exa/models/unilm-master/infoxlm/src-infoxlm/infoxlm/data/dict_dataset.py |
EXA-1-master | exa/models/unilm-master/infoxlm/src-infoxlm/infoxlm/data/__init__.py |
|
import numpy as np
import torch
from fairseq.data import data_utils, FairseqDataset, MaskTokensDataset, TruncateDataset, BaseWrapperDataset
from infoxlm.data.dict_dataset import DictDataset
def get_xlco_dataset(args, dataset_path, vocab, mask_idx, combine=False):
dataset = data_utils.load_indexed_dataset(
dataset_path, vocab, args.dataset_impl, combine=combine)
dataset, _ = MaskTokensDataset.apply_mask(
dataset,
vocab=vocab,
pad_idx=vocab.pad(),
mask_idx=mask_idx,
seed=args.seed,
mask_prob=args.mask_prob,
mask_whole_words=None,
)
dataset = XlcoDataset(dataset, vocab)
return dataset
class XlcoDataset(FairseqDataset):
def __init__(self, dataset, vocab, remove_bos_of_item2=True, seed=1):
# dataset: pair -> (line i, line i + 1) where i % 2 == 0
self.dataset = dataset
self.vocab = vocab
self.remove_bos_of_item2 = remove_bos_of_item2
self.seed = seed
self.epoch = 0
def set_epoch(self, epoch):
self.epoch = epoch
if hasattr(self.dataset, 'set_epoch'):
self.dataset.set_epoch(epoch)
def __len__(self):
return len(self.dataset) // 4
# NOTE mix-up contrast
def __getitem__(self, index):
src_item1 = self.dataset[index*4]
tgt_item1 = self.dataset[index*4+1]
src_item2 = self.dataset[index*4+2]
tgt_item2 = self.dataset[index*4+3]
with data_utils.numpy_seed(self.seed, self.epoch, index):
mode = np.random.randint(8)
if mode & 1: src_item1, src_item2 = src_item2, src_item1
if mode & 2: tgt_item1, tgt_item2 = tgt_item2, tgt_item1
bos = self.vocab.bos()
if self.remove_bos_of_item2 and src_item2[0] == bos:
src_item2 = src_item2[1:]
if self.remove_bos_of_item2 and tgt_item2[0] == bos:
tgt_item2 = tgt_item2[1:]
src_item = torch.cat([src_item1, src_item2])
tgt_item = torch.cat([tgt_item1, tgt_item2])
if mode & 4: src_item, tgt_item = tgt_item, src_item
return {
'id': index,
'source': src_item,
'target': tgt_item,
}
def collater(self, samples):
if len(samples) == 0:
return {}
pad_idx = self.vocab.pad()
eos_idx = self.vocab.eos()
def merge(key, left_pad, move_eos_to_beginning=False):
return data_utils.collate_tokens(
[s[key] for s in samples],
pad_idx, eos_idx, left_pad, move_eos_to_beginning,
)
id = torch.LongTensor([s['id'] for s in samples])
src_tokens = merge('source', left_pad=False)
src_lengths = torch.LongTensor([s['source'].numel() for s in samples])
tgt_tokens = merge('target', left_pad=False)
tgt_lengths = torch.LongTensor([s['target'].numel() for s in samples])
n_src_tokens = sum(len(s['source']) for s in samples)
n_tgt_tokens = sum(len(s['target']) for s in samples)
batch = {
'id': id,
'nsentences': len(samples),
'ntokens': n_src_tokens + n_tgt_tokens,
'src_net_input': {
'src_tokens': src_tokens,
'src_lengths': src_lengths,
},
# NOTE the Roberta forward function takes src_tokens as input
'tgt_net_input': {
'src_tokens': tgt_tokens,
'src_lengths': tgt_lengths,
},
}
return batch | EXA-1-master | exa/models/unilm-master/infoxlm/src-infoxlm/infoxlm/data/xlco_dataset.py |
import collections
import logging
import math
import torch
import numpy as np
from torch import nn
from torch.nn import functional as F
from torch import distributed
from fairseq import utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.data.data_utils import process_bpe_symbol
from infoxlm.utils import _get_logging_loss, construct_idx_tensor_from_list
@register_criterion('dwa_mlm_tlm')
class DwaMlmTlm(FairseqCriterion):
IGNORE_INDEX = 1000000
def __init__(self, args, task):
super().__init__(args, task)
self.padding_idx = self.task.dictionary.pad_index
@staticmethod
def add_args(parser):
parser.add_argument('--no_tlm_loss', default=False, action='store_true')
def forward_mlm(self, model, sample, reduce=True, dep_rep_size=3):
masked_tokens = sample['target'].ne(self.padding_idx)
sample_size = masked_tokens.int().sum().item()
# (Rare case) When all tokens are masked, the model results in empty
# tensor and gives CUDA error.
if sample_size == 0:
masked_tokens = None
# logger.warning(str(sample["net_input"]["src_tokens"]))
# logger.warning("index - " + str(sample["net_input"]["src_tokens"].max()))
# logger.warning("len - " + str(sample["net_input"]["src_lengths"].max()))
features, _ = model(**sample['net_input'], use_model_fast=True, features_only=True)
logits = model.output_layer(features, masked_tokens=masked_tokens, use_model_fast=True)
targets = model.get_targets(sample, [logits])
if sample_size != 0:
targets = targets[masked_tokens]
# loss could be FloatTensor caused by deprecated functional method
loss = F.nll_loss(
F.log_softmax(
logits.view(-1, logits.size(-1)),
dim=-1,
dtype=torch.float32,
),
targets.view(-1),
reduction='sum',
ignore_index=self.padding_idx,
).half()
logging_loss = utils.item(loss.data) if reduce else loss.data
logging_output = {
'mlm_loss': logging_loss,
'mlm_ntokens': sample['ntokens'],
'mlm_nsentences': sample['nsentences'],
'mlm_sample_size': sample_size,
}
# NOTE WALKAROUND We have to use all parameters for ddp.
hidden_sz = features.size(-1)
if hasattr(model, "qa_layer"):
dep_rep = features.new(hidden_sz * dep_rep_size).fill_(0)
dep_rep = model.qa_layer(dep_rep)
loss += dep_rep.mean() * 0.0
if hasattr(model, "q_linear"):
dep_rep = features.new(hidden_sz).fill_(0)
dep_rep1 = model.q_linear(dep_rep).mean()
dep_rep2 = model.k_linear(dep_rep).mean()
loss += dep_rep1 * 0.0 + dep_rep2 * 0.0
if hasattr(model, "predictor"):
dep_rep = features.new(hidden_sz).fill_(0)
dep_rep = model.predictor(dep_rep)
loss += dep_rep.mean() * 0.0
return loss, sample_size, logging_output
def forward_tlm(self, model, sample, reduce=True, dep_rep_size=3, net_input_key="net_input_tlm"):
masked_tokens = sample['target'].ne(self.padding_idx)
sample_size = masked_tokens.int().sum().item()
# (Rare case) When all tokens are masked, the model results in empty
# tensor and gives CUDA error.
if sample_size == 0:
masked_tokens = None
# logger.warning(str(sample["net_input"]["src_tokens"]))
# logger.warning("index - " + str(sample["net_input"]["src_tokens"].max()))
# logger.warning("len - " + str(sample["net_input"]["src_lengths"].max()))
features, _ = model(**sample[net_input_key], use_model_fast=True, features_only=True)
logits = model.output_layer(features, masked_tokens=masked_tokens, use_model_fast=True)
targets = model.get_targets(sample, [logits])
if sample_size != 0:
targets = targets[masked_tokens]
# loss could be FloatTensor caused by deprecated functional method
loss = F.nll_loss(
F.log_softmax(
logits.view(-1, logits.size(-1)),
dim=-1,
dtype=torch.float32,
),
targets.view(-1),
reduction='sum',
ignore_index=self.padding_idx,
).half()
logging_loss = utils.item(loss.data) if reduce else loss.data
logging_output = {
'tlm_loss': logging_loss,
'tlm_ntokens': sample['ntokens'],
'tlm_nsentences': sample['nsentences'],
'tlm_sample_size': sample_size,
}
# NOTE WALKAROUND We have to use all parameters for ddp.
hidden_sz = features.size(-1)
if hasattr(model, "qa_layer"):
dep_rep = features.new(hidden_sz * dep_rep_size).fill_(0)
dep_rep = model.qa_layer(dep_rep)
loss += dep_rep.mean() * 0.0
if hasattr(model, "q_linear"):
dep_rep = features.new(hidden_sz).fill_(0)
dep_rep1 = model.q_linear(dep_rep).mean()
dep_rep2 = model.k_linear(dep_rep).mean()
loss += dep_rep1 * 0.0 + dep_rep2 * 0.0
if hasattr(model, "predictor"):
dep_rep = features.new(hidden_sz).fill_(0)
dep_rep = model.predictor(dep_rep)
loss += dep_rep.mean() * 0.0
return loss, sample_size, logging_output
def forward(self, model, sample, reduce=True, aligned_tokens=None, mlm=False, tlm=False):
if mlm:
return self.forward_mlm(model, sample, reduce, dep_rep_size=2)
elif tlm:
return self.forward_tlm(model, sample, reduce, dep_rep_size=2, net_input_key="net_input_tlm")
else:
return self.forward_denoise_word_alignment(model, sample, reduce, aligned_tokens, use_tlm_loss=(not self.args.no_tlm_loss))
def forward_masked_lm(self, features, tlm_targets, model):
masked_tokens = tlm_targets.ne(self.padding_idx)
sample_size = masked_tokens.int().sum().item()
if sample_size == 0: masked_tokens = None
logits = model.output_layer(features, masked_tokens=masked_tokens)
targets = tlm_targets
if sample_size != 0: targets = targets[masked_tokens]
loss = F.nll_loss(
F.log_softmax(
logits.view(-1, logits.size(-1)),
dim=-1,
dtype=torch.float32,
),
targets.view(-1),
reduction='sum',
ignore_index=self.padding_idx,
).half()
logging_output = {
'tlm_loss': _get_logging_loss(loss),
'tlm_sample_size': sample_size,
}
return loss, sample_size, logging_output
def _positions2masked_features(self, positions, features, hidden_sz):
# bsz, max_num_spans
# NOTE paddings are filled with -1, but we need to replace -1 to 0 to gather
positions4gather = positions.clone().detach()
positions4gather[positions==DwaMlmTlm.IGNORE_INDEX] = 0
# bsz, max_num_spans -> bsz, max_num_spans, hidden
positions4gather = positions4gather.unsqueeze(-1).expand(-1, -1, hidden_sz)
masked_features = features.gather(dim=1, index=positions4gather)
return masked_features
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
# loss_sum = sum(log.get('loss', 0) for log in logging_outputs)
reduced_log = collections.defaultdict(float)
# TODO sa EM & F1
reduced_keys = ["sa_loss", 'sa_EM', 'sa_EM_tot', 'sa_nsentences', 'sa_ntokens', 'sa_sample_size', "tlm_loss", "tlm_sample_size", "mlm_ntokens", "mlm_nsentences", "mlm_sample_size", "mlm_loss"]
for log in logging_outputs:
for key in reduced_keys:
reduced_log[key] += log.get(key, 0)
eps = 1e-7
sa_sample_size = reduced_log["sa_sample_size"]
sa_loss = reduced_log["sa_loss"] / (sa_sample_size + eps) / math.log(2)
tlm_sample_size = reduced_log["tlm_sample_size"]
tlm_loss = reduced_log["tlm_loss"] / (tlm_sample_size + eps) / math.log(2)
mlm_sample_size = reduced_log["mlm_sample_size"]
mlm_loss = reduced_log["mlm_loss"] / (mlm_sample_size + eps) / math.log(2)
sample_size = sa_sample_size + tlm_sample_size + mlm_sample_size
loss = (reduced_log["sa_loss"] + reduced_log["tlm_loss"] + reduced_log["mlm_loss"]) / (sample_size + eps) / math.log(2)
# WALKAROUND
if reduced_log["sa_EM_tot"] < 1: reduced_log["sa_EM_tot"] = 1
agg_output = {
'loss': loss,
'ntokens': reduced_log["sa_ntokens"] + reduced_log["mlm_ntokens"],
'nsentences': reduced_log["sa_nsentences"] + reduced_log["mlm_nsentences"],
'dwa_loss': sa_loss,
'dwa_sample_size': sa_sample_size,
'dwa_EM': 0 if reduced_log["sa_EM_tot"] == 0 else 100 * reduced_log["sa_EM"] / reduced_log["sa_EM_tot"],
'mlm_loss': mlm_loss,
'mlm_sample_size': mlm_sample_size,
'tlm_loss': tlm_loss,
'tlm_sample_size': tlm_sample_size,
'sample_size': sample_size,
}
# DEBUG
# for k, v in agg_output.items():
# print("%s: %.2f" % (k, v), end=" | ")
# print("")
return agg_output
def construct_tensor_from_list(self, idx_list2d, lens, pad_idx, device=None):
max_len = max(lens)
padded_list = [list_i + [pad_idx] * (max_len - lens[i]) for i, list_i in enumerate(idx_list2d)]
tensor = torch.LongTensor(padded_list)
if device is not None:
tensor = tensor.to(device=device)
return tensor
def prepare_positions(self, sample, aligned_tokens, device=None):
masked_tokens = sample['target'].ne(self.padding_idx)
bsz = masked_tokens.size(0)
src_fr, src_to, trg_fr, trg_to = sample["offsets"]
# NOTE aligned_tokens should be extracted from the jointly encoded representations
align_dicts = []
for tokens_i in aligned_tokens:
dict_i = {}
for src, trg in tokens_i:
dict_i[src] = trg
dict_i[trg] = src
align_dicts.append(dict_i)
positions_fwd = [[] for i in range(bsz)]
positions_bwd = [[] for i in range(bsz)]
masked_positions_fwd = [[] for i in range(bsz)]
masked_positions_bwd = [[] for i in range(bsz)]
pos_cnt_fwd = [0] * bsz
pos_cnt_bwd = [0] * bsz
for ij in masked_tokens.nonzero():
i = ij[0].item()
masked_j = ij[1].item()
if masked_j not in align_dicts[i]: continue
aligned_j = align_dicts[i][masked_j]
if src_fr[i] <= masked_j < src_to[i] and trg_fr[i] <= aligned_j < trg_to[i]:
masked_positions_fwd[i].append(masked_j)
positions_fwd[i].append(aligned_j)
pos_cnt_fwd[i] += 1
elif src_fr[i] <= aligned_j < src_to[i] and trg_fr[i] <= masked_j < trg_to[i]:
masked_positions_bwd[i].append(masked_j)
positions_bwd[i].append(aligned_j)
pos_cnt_bwd[i] += 1
else:
print("[W] Value Error of alignments!!!")
continue
positions_fwd = self.construct_tensor_from_list(positions_fwd, pos_cnt_fwd, DwaMlmTlm.IGNORE_INDEX, device=device)
positions_bwd = self.construct_tensor_from_list(positions_bwd, pos_cnt_bwd, DwaMlmTlm.IGNORE_INDEX, device=device)
masked_positions_fwd = self.construct_tensor_from_list(masked_positions_fwd, pos_cnt_fwd, DwaMlmTlm.IGNORE_INDEX, device=device)
masked_positions_bwd = self.construct_tensor_from_list(masked_positions_bwd, pos_cnt_bwd, DwaMlmTlm.IGNORE_INDEX, device=device)
return positions_fwd, positions_bwd, masked_positions_fwd, masked_positions_bwd
def forward_denoise_word_alignment(self, model, sample, reduce=True, aligned_tokens=None, use_tlm_loss=True):
src_fr, src_to, trg_fr, trg_to = sample["offsets"]
features, _ = model(**sample["net_input_tlm"], features_only=True)
device = features.device
positions_fwd, positions_bwd, masked_positions_fwd, masked_positions_bwd = \
self.prepare_positions(sample, aligned_tokens, device=device)
if use_tlm_loss:
tlm_loss, tlm_sample_size, tlm_logging_output = self.forward_masked_lm(
features, sample["target"], model)
fwd_loss, fwd_em_cnt, fwd_tot = self.get_token_align_loss(model, features, positions_fwd, masked_positions_fwd, trg_fr, trg_to)
bwd_loss, bwd_em_cnt, bwd_tot = self.get_token_align_loss(model, features, positions_bwd, masked_positions_bwd, src_fr, src_to)
loss = fwd_loss + bwd_loss
em_cnt = fwd_em_cnt + bwd_em_cnt
tot = fwd_tot + bwd_tot
em = 0 if tot == 0 else 100.0 * em_cnt / tot
sample_size = tot
logging_output = {
'sa_loss': _get_logging_loss(loss),
'sa_EM': em_cnt,
'sa_EM_tot': tot,
'sa_nsentences': sample["nsentences"],
'sa_ntokens': sample["ntokens"],
'sa_sample_size': sample_size,
}
if use_tlm_loss:
loss += tlm_loss
sample_size += tlm_sample_size
logging_output.update(tlm_logging_output)
else:
hidden_sz = features.size(-1)
dep_rep = features.new(hidden_sz).fill_(0)
dep_rep = model.output_layer(dep_rep, features_only=True)
loss += dep_rep.mean() * 0.0
if hasattr(model, "forward_proj"):
hidden_sz = features.size(-1)
dep_rep = features.new(hidden_sz).fill_(0)
dep_rep = model.forward_proj(dep_rep[None, :], "en", use_model_fast=True)
loss += dep_rep.mean() * 0.0
return loss, sample_size, logging_output
def get_token_align_loss(self, model, features, positions, masked_positions, fr, to):
if len(positions.view(-1)) <= 0:
dep_rep = features[0, 0, :]
loss = dep_rep.mean() * 0.0
em_cnt = tot = 0
return loss, em_cnt, tot
bsz, seq_len, hidden_sz = features.size()
# _, max_num_spans = positions.size()
device = features.device
# get attention mask
fr_tensor = torch.LongTensor(fr).to(device=device)
to_tensor = torch.LongTensor(to).to(device=device)
# bsz, seq_len
attention_mask = (torch.arange(seq_len)[None, :].to(device=device) >= fr_tensor[:, None]) & (torch.arange(seq_len)[None, :].to(device=device) < to_tensor[:, None])
# bsz, 1, seq_len
attention_mask = attention_mask[:, None, :]
attention_mask = (1.0-attention_mask.half()) * -1e4
# print(attention_mask)
# masked_features: bsz, max_num_spans, hidden
masked_features = self._positions2masked_features(masked_positions, features, hidden_sz)
q_features = model.q_linear(masked_features)
# bsz, len, hidden
k_features = model.k_linear(features)
# bsz, max_num_spans, len
attention_scores = torch.matmul(q_features, k_features.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(hidden_sz)
attention_scores = attention_scores + attention_mask
logits = attention_scores
loss_fct = nn.CrossEntropyLoss(ignore_index=DwaMlmTlm.IGNORE_INDEX, reduction='sum')
loss = loss_fct(logits.view(-1, logits.size(-1)), positions.view(-1))
# calc EM & F1
def _get_em_mask(logits, targets):
logits = logits.view(-1, logits.size(-1))
targets = targets.view(-1)
prediction = logits.argmax(dim=-1)
return targets == prediction, (targets != DwaMlmTlm.IGNORE_INDEX).sum().item()
em_mask, tot = _get_em_mask(logits, positions)
em_cnt = em_mask.sum().item()
return loss, em_cnt, tot
| EXA-1-master | exa/models/unilm-master/infoxlm/src-infoxlm/infoxlm/criterions/xlm_align.py |
import os
import importlib
# automatically import any Python files in the criterions/ directory
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith('.py') and not file.startswith('_'):
module = file[:file.find('.py')]
importlib.import_module('infoxlm.criterions.' + module) | EXA-1-master | exa/models/unilm-master/infoxlm/src-infoxlm/infoxlm/criterions/__init__.py |
import collections
import logging
import math
import torch
from torch import nn
from torch.nn import functional as F
from torch import distributed
from fairseq import utils
from fairseq.criterions import FairseqCriterion, register_criterion
logger = logging.getLogger(__name__)
@register_criterion('xlco')
class XlCoCriterion(FairseqCriterion):
@staticmethod
def add_args(parser):
parser.add_argument('--xlco_queue_size', default=256, type=int)
parser.add_argument('--xlco_softmax_tau', default=0.25, type=float)
parser.add_argument('--xlco_layer', default=8, type=int)
parser.add_argument('--xlco_lambda', default=1.0, type=float)
def __init__(self, args, task):
super().__init__(args, task)
self.criterion = nn.CrossEntropyLoss(reduction='sum')
self.xlco_queue_size = args.xlco_queue_size
def contrastive_loss(self, q, k, queue):
queue = queue.clone().detach()
N, C = q.size()
assert k.size() == (N,C), (N, C, k.size())
logits_pos = torch.bmm(q.view(N, 1, C), k.view(N, C, 1)).view(N, 1)
logits_neg = torch.mm(q, queue.transpose(0, 1))
logits = torch.cat([logits_pos, logits_neg], dim=1) / self.args.xlco_softmax_tau
labels = torch.zeros(N).cuda().long()
loss = self.criterion(logits, labels)
cxlm_ncorrect = utils.item((logits.argmax(dim=1) == labels).sum())
return loss, cxlm_ncorrect
def _get_logging_loss(self, loss, reduce=True):
if loss is None: return 0
return utils.item(loss.data) if reduce else loss.data
def forward_xlco(self, model, sample, reduce=True):
cxlm_head_key = "share_lang"
with torch.no_grad():
_, inner_states = model(**sample['tgt_net_input'], use_model_fast=False, features_only=True, return_all_hiddens=True)
slow_features = inner_states["inner_states"][self.args.xlco_layer]
slow_features = slow_features[0, :, :].clone().detach()
if self.args.use_proj:
slow_rep = model.forward_proj(
slow_features, cxlm_head_key, use_model_fast=False)
else: slow_rep = slow_features
if model.is_queue_ready():
fast_features, inner_states = model(**sample['src_net_input'],
use_model_fast=True, features_only=True, return_all_hiddens=True)
fast_features = inner_states["inner_states"][-1][0, :, :]
fast_features8 = inner_states["inner_states"][self.args.xlco_layer][0, :, :]
if self.args.use_proj:
fast_rep = model.forward_proj(
fast_features8, cxlm_head_key, use_model_fast=True)
else: fast_rep = fast_features8
cxlm_loss, cxlm_ncorrect = self.contrastive_loss(fast_rep, slow_rep, model.queue)
cxlm_loss *= self.task.xlco_lambda
loss = cxlm_loss
# NOTE WALKAROUND We have to use all parameters for ddp.
dep_logits = model.output_layer(fast_features, features_only=True)
loss += dep_logits.mean() * 0.0
if hasattr(model, "q_linear"):
hidden_sz = fast_features.size(-1)
dep_rep = fast_features.new(hidden_sz).fill_(0)
dep_rep1 = model.q_linear(dep_rep).mean()
dep_rep2 = model.k_linear(dep_rep).mean()
loss += dep_rep1 * 0.0 + dep_rep2 * 0.0
cxlm_logging_loss = self._get_logging_loss(cxlm_loss, reduce)
else:
loss = None
cxlm_logging_loss = 0
cxlm_ncorrect = 0
if model.training:
rank = self.args.distributed_rank
model.update_queue(slow_rep)
sample_size = sample["nsentences"]
logging_output = {
'cxlm_loss': cxlm_logging_loss,
'cxlm_nsentences': sample["nsentences"],
'cxlm_ntokens': sample["ntokens"],
'cxlm_sample_size': sample_size,
'cxlm_ncorrect': cxlm_ncorrect,
}
return loss, sample_size, logging_output
def forward_mlm(self, model, sample, reduce=True):
masked_tokens = sample['target'].ne(self.padding_idx)
sample_size = masked_tokens.int().sum().item()
# (Rare case) When all tokens are masked, the model results in empty
# tensor and gives CUDA error.
if sample_size == 0:
masked_tokens = None
features, _ = model(**sample['net_input'], use_model_fast=True, features_only=True)
logits = model.output_layer(features, masked_tokens=masked_tokens, use_model_fast=True)
targets = model.get_targets(sample, [logits])
if sample_size != 0:
targets = targets[masked_tokens]
# loss could be FloatTensor caused by deprecated functional method
loss = F.nll_loss(
F.log_softmax(
logits.view(-1, logits.size(-1)),
dim=-1,
dtype=torch.float32,
),
targets.view(-1),
reduction='sum',
ignore_index=self.padding_idx,
).half()
logging_loss = utils.item(loss.data) if reduce else loss.data
logging_output = {
'mlm_loss': logging_loss,
'mlm_ntokens': sample['ntokens'],
'mlm_nsentences': sample['nsentences'],
'mlm_sample_size': sample_size,
}
# NOTE WALKAROUND We have to use all parameters for ddp.
if self.args.use_proj:
dep_rep = model.forward_proj(features[:, 0, :], "en", use_model_fast=True)
loss += dep_rep.mean() * 0.0
if hasattr(model, "q_linear"):
hidden_sz = features.size(-1)
dep_rep = features.new(hidden_sz).fill_(0)
dep_rep1 = model.q_linear(dep_rep).mean()
dep_rep2 = model.k_linear(dep_rep).mean()
loss += dep_rep1 * 0.0 + dep_rep2 * 0.0
return loss, sample_size, logging_output
def forward(self, model, sample, reduce=True, mlm=False):
if mlm:
return self.forward_mlm(model, sample, reduce=reduce)
else:
return self.forward_xlco(model, sample, reduce=reduce)
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
# loss_sum = sum(log.get('loss', 0) for log in logging_outputs)
reduced_log = collections.defaultdict(float)
reduced_keys = ["cxlm_loss", "mlm_loss", "cxlm_ntokens",
"cxlm_nsentences", "mlm_ntokens", "mlm_nsentences", "cxlm_sample_size",
"mlm_sample_size", "cxlm_ncorrect", "momentum"]
for log in logging_outputs:
for key in reduced_keys:
reduced_log[key] += log.get(key, 0)
loss_sum_cxlm = reduced_log["cxlm_loss"]
loss_sum_mlm = reduced_log["mlm_loss"]
loss_sum = loss_sum_cxlm + loss_sum_mlm
cxlm_ntokens = reduced_log["cxlm_ntokens"]
cxlm_nsentences = reduced_log["cxlm_nsentences"]
mlm_ntokens = reduced_log["mlm_ntokens"]
mlm_nsentences = reduced_log["mlm_nsentences"]
cxlm_sample_size = reduced_log["cxlm_sample_size"]
mlm_sample_size = reduced_log["mlm_sample_size"]
sample_size = cxlm_sample_size + mlm_sample_size
ncorrect = reduced_log["cxlm_ncorrect"]
eps = 1e-7
agg_output = {
'loss': loss_sum / (sample_size + eps) / math.log(2),
'ntokens': cxlm_ntokens + mlm_ntokens,
'nsentences': cxlm_nsentences + mlm_nsentences,
'xlco_loss': loss_sum_cxlm / (cxlm_sample_size + eps) / math.log(2),
'mlm_loss': loss_sum_mlm / (mlm_sample_size + eps) / math.log(2),
'xlco_accuracy': 100.0 * ncorrect / (cxlm_nsentences + eps),
'momentum': reduced_log["momentum"] / len(logging_outputs),
'xlco_ntokens': cxlm_ntokens,
'xlco_nsentences': cxlm_nsentences,
'mlm_ntokens': mlm_ntokens,
'mlm_nsentences': mlm_nsentences,
'xlco_sample_size': cxlm_sample_size,
'mlm_sample_size': mlm_sample_size,
'sample_size': sample_size,
}
# DEBUG
# for k, v in agg_output.items():
# print("%s: %f" % (k, v), end=" | ")
# print("")
return agg_output | EXA-1-master | exa/models/unilm-master/infoxlm/src-infoxlm/infoxlm/criterions/xlco.py |
import deltalm
from fairseq_cli.preprocess import cli_main
if __name__ == "__main__":
cli_main() | EXA-1-master | exa/models/unilm-master/deltalm/preprocess.py |
import deltalm
from fairseq_cli.generate import cli_main
if __name__ == "__main__":
cli_main() | EXA-1-master | exa/models/unilm-master/deltalm/generate.py |
import deltalm
from fairseq_cli.interactive import cli_main
if __name__ == "__main__":
cli_main() | EXA-1-master | exa/models/unilm-master/deltalm/interactive.py |
import deltalm
from fairseq_cli.train import cli_main
if __name__ == "__main__":
cli_main() | EXA-1-master | exa/models/unilm-master/deltalm/train.py |
import deltalm.models | EXA-1-master | exa/models/unilm-master/deltalm/deltalm/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from typing import Any, Dict, List, Optional, Tuple
import torch
import torch.nn as nn
from torch import Tensor
from fairseq import checkpoint_utils
from fairseq.models import register_model, register_model_architecture
from fairseq.models.transformer import (
TransformerModel,
TransformerDecoderBase,
TransformerEncoderBase,
)
from fairseq.models.transformer.transformer_config import (
TransformerConfig,
DEFAULT_MAX_SOURCE_POSITIONS,
DEFAULT_MAX_TARGET_POSITIONS,
DEFAULT_MIN_PARAMS_TO_WRAP,
)
from fairseq.modules.transformer_layer import (
TransformerDecoderLayerBase
)
from fairseq.modules.multihead_attention import MultiheadAttention
from fairseq.modules import LayerNorm
from fairseq.modules.fairseq_dropout import FairseqDropout
from fairseq.modules.quant_noise import quant_noise
from fairseq import utils
from fairseq.file_io import PathManager
import logging
logger = logging.getLogger(__name__)
def upgrade_state_dict_for_deltalm(
state_dict: Dict[str, Any], pretrained_deltalm_checkpoint: str, is_encoder=True,
) -> Dict[str, Any]:
if not os.path.exists(pretrained_deltalm_checkpoint):
raise IOError("Model file not found: {}".format(pretrained_deltalm_checkpoint))
with open(pretrained_deltalm_checkpoint, "rb") as f:
state = torch.load(f, map_location=torch.device("cpu"))
deltalm_state_dict = state["weights"]
new_deltalm_state_dict = {}
for key in deltalm_state_dict.keys():
if is_encoder:
if key.startswith('encoder.') or key.startswith('src_embedding.'):
new_key = key.replace('encoder.', '')
new_key = new_key.replace('src_embedding.', '')
new_deltalm_state_dict[new_key] = deltalm_state_dict[key]
else:
if key.startswith('decoder.') or key.startswith('tgt_embedding.'):
new_key = key.replace('decoder.', '')
new_key = new_key.replace('tgt_embedding.', '')
new_deltalm_state_dict[new_key] = deltalm_state_dict[key]
deltalm_state_dict = new_deltalm_state_dict
for key in deltalm_state_dict.keys():
map_key = key
map_key = map_key.replace('.ffn_1.fc1', '.fc3')
map_key = map_key.replace('.ffn_1.fc2', '.fc4')
map_key = map_key.replace('.ffn_2', '')
map_key = map_key.replace('.ffn.', '.')
map_key = map_key.replace('emb_layer_norm', 'layernorm_embedding')
assert map_key in state_dict, map_key
if 'embed_positions' in key or 'embed_tokens' in key:
left_size = state_dict[map_key].size(0)
right_size = deltalm_state_dict[key].size(0)
if left_size <= right_size:
state_dict[map_key] = deltalm_state_dict[key][:left_size]
else:
state_dict[map_key][:right_size] = deltalm_state_dict[key]
else:
state_dict[map_key] = deltalm_state_dict[key]
return state_dict
@register_model("deltalm")
class DeltaLMModel(TransformerModel):
@staticmethod
def add_args(parser):
"""Add model-specific arguments to the parser."""
TransformerModel.add_args(parser)
parser.add_argument(
"--pretrained-deltalm-checkpoint",
type=str,
metavar="STR",
)
@classmethod
def build_encoder(cls, args, tgt_dict, embed_tokens):
return DeltaLMEncoder(TransformerConfig.from_namespace(args), tgt_dict, embed_tokens)
@classmethod
def build_decoder(cls, args, tgt_dict, embed_tokens):
return DeltaLMDecoder(TransformerConfig.from_namespace(args), tgt_dict, embed_tokens)
class DeltaLMEncoder(TransformerEncoderBase):
def __init__(self, args, dictionary, embed_tokens):
super().__init__(args, dictionary, embed_tokens)
if getattr(args, "pretrained_deltalm_checkpoint", "") != "":
deltalm_loaded_state_dict = upgrade_state_dict_for_deltalm(
state_dict=self.state_dict(),
pretrained_deltalm_checkpoint=args.pretrained_deltalm_checkpoint,
is_encoder=True,
)
self.load_state_dict(deltalm_loaded_state_dict, strict=True)
logger.info("Load DeltaLM's encoder from {0}".format(args.pretrained_deltalm_checkpoint))
class DeltaLMDecoder(TransformerDecoderBase):
def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):
super().__init__(args, dictionary, embed_tokens, no_encoder_attn)
if getattr(args, "pretrained_deltalm_checkpoint", "") != "":
deltalm_loaded_state_dict = upgrade_state_dict_for_deltalm(
state_dict=self.state_dict(),
pretrained_deltalm_checkpoint=args.pretrained_deltalm_checkpoint,
is_encoder=False,
)
self.load_state_dict(deltalm_loaded_state_dict, strict=True)
logger.info("Load DeltaLM's decoder from {0}".format(args.pretrained_deltalm_checkpoint))
def build_decoder_layer(self, args, no_encoder_attn=False):
layer = DeltaLMDecoderLayer(args, no_encoder_attn)
if getattr(args, "checkpoint_activations", False):
layer = checkpoint_wrapper(layer)
return layer
class DeltaLMDecoderLayer(TransformerDecoderLayerBase):
def __init__(
self, args, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False
):
super(TransformerDecoderLayerBase, self).__init__()
self.embed_dim = args.decoder_embed_dim
self.dropout_module = FairseqDropout(
args.dropout, module_name=self.__class__.__name__
)
self.quant_noise = getattr(args, "quant_noise_pq", 0)
self.quant_noise_block_size = getattr(args, "quant_noise_pq_block_size", 8)
self.cross_self_attention = getattr(args, "cross_self_attention", False)
self.self_attn = self.build_self_attention(
self.embed_dim,
args,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
)
self.activation_fn = utils.get_activation_fn(
activation=str(args.activation_fn)
if getattr(args, "activation_fn", None) is not None
else "relu"
)
activation_dropout_p = getattr(args, "activation_dropout", 0) or 0
if activation_dropout_p == 0:
# for backwards compatibility with models that use args.relu_dropout
activation_dropout_p = getattr(args, "relu_dropout", 0) or 0
self.activation_dropout_module = FairseqDropout(
float(activation_dropout_p), module_name=self.__class__.__name__
)
self.normalize_before = args.decoder_normalize_before
# use layerNorm rather than FusedLayerNorm for exporting.
# char_inputs can be used to determint this.
# TODO remove this once we update apex with the fix
export = getattr(args, "char_inputs", False)
self.self_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
if no_encoder_attn:
self.encoder_attn = None
self.encoder_attn_layer_norm = None
else:
self.encoder_attn = self.build_encoder_attention(self.embed_dim, args)
self.encoder_attn_layer_norm = LayerNorm(self.embed_dim, export=export)
self.fc1 = self.build_fc1(
self.embed_dim,
args.decoder_ffn_embed_dim,
self.quant_noise,
self.quant_noise_block_size,
)
self.fc2 = self.build_fc2(
args.decoder_ffn_embed_dim,
self.embed_dim,
self.quant_noise,
self.quant_noise_block_size,
)
self.fc3 = self.build_fc1(
self.embed_dim,
args.decoder_ffn_embed_dim,
self.quant_noise,
self.quant_noise_block_size,
)
self.fc4 = self.build_fc2(
args.decoder_ffn_embed_dim,
self.embed_dim,
self.quant_noise,
self.quant_noise_block_size,
)
self.ffn_layer_norm = LayerNorm(self.embed_dim, export=export)
self.final_layer_norm = LayerNorm(self.embed_dim, export=export)
self.need_attn = True
self.onnx_trace = False
def forward(
self,
x,
encoder_out: Optional[torch.Tensor] = None,
encoder_padding_mask: Optional[torch.Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
prev_self_attn_state: Optional[List[torch.Tensor]] = None,
prev_attn_state: Optional[List[torch.Tensor]] = None,
self_attn_mask: Optional[torch.Tensor] = None,
self_attn_padding_mask: Optional[torch.Tensor] = None,
need_attn: bool = False,
need_head_weights: bool = False,
src_lang_id = None,
tgt_lang_id = None
):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor, optional): binary
ByteTensor of shape `(batch, src_len)` where padding
elements are indicated by ``1``.
need_attn (bool, optional): return attention weights
need_head_weights (bool, optional): return attention weights
for each head (default: return average over heads).
Returns:
encoded output of shape `(seq_len, batch, embed_dim)`
"""
if need_head_weights:
need_attn = True
###############################################
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
if prev_self_attn_state is not None:
prev_key, prev_value = prev_self_attn_state[:2]
saved_state: Dict[str, Optional[Tensor]] = {
"prev_key": prev_key,
"prev_value": prev_value,
}
if len(prev_self_attn_state) >= 3:
saved_state["prev_key_padding_mask"] = prev_self_attn_state[2]
assert incremental_state is not None
self.self_attn._set_input_buffer(incremental_state, saved_state)
_self_attn_input_buffer = self.self_attn._get_input_buffer(incremental_state)
if self.cross_self_attention and not (
incremental_state is not None
and _self_attn_input_buffer is not None
and "prev_key" in _self_attn_input_buffer
):
if self_attn_mask is not None:
assert encoder_out is not None
self_attn_mask = torch.cat(
(x.new_zeros(x.size(0), encoder_out.size(0)), self_attn_mask), dim=1
)
if self_attn_padding_mask is not None:
if encoder_padding_mask is None:
assert encoder_out is not None
encoder_padding_mask = self_attn_padding_mask.new_zeros(
encoder_out.size(1), encoder_out.size(0)
)
self_attn_padding_mask = torch.cat(
(encoder_padding_mask, self_attn_padding_mask), dim=1
)
assert encoder_out is not None
y = torch.cat((encoder_out, x), dim=0)
else:
y = x
x, attn = self.self_attn(
query=x,
key=y,
value=y,
key_padding_mask=self_attn_padding_mask,
incremental_state=incremental_state,
need_weights=False,
attn_mask=self_attn_mask,
)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.self_attn_layer_norm(x)
###############################################
residual = x
if self.normalize_before:
x = self.ffn_layer_norm(x)
x = self.activation_fn(self.fc3(x))
x = self.activation_dropout_module(x)
x = self.fc4(x)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.ffn_layer_norm(x)
###############################################
if self.encoder_attn is not None and encoder_out is not None:
residual = x
if self.normalize_before:
x = self.encoder_attn_layer_norm(x)
if prev_attn_state is not None:
prev_key, prev_value = prev_attn_state[:2]
saved_state: Dict[str, Optional[Tensor]] = {
"prev_key": prev_key,
"prev_value": prev_value,
}
if len(prev_attn_state) >= 3:
saved_state["prev_key_padding_mask"] = prev_attn_state[2]
assert incremental_state is not None
self.encoder_attn._set_input_buffer(incremental_state, saved_state)
x, attn = self.encoder_attn(
query=x,
key=encoder_out,
value=encoder_out,
key_padding_mask=encoder_padding_mask,
incremental_state=incremental_state,
static_kv=True,
need_weights=need_attn or (not self.training and self.need_attn),
need_head_weights=need_head_weights,
)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.encoder_attn_layer_norm(x)
###############################################
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = self.activation_dropout_module(x)
x = self.fc2(x)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.final_layer_norm(x)
if self.onnx_trace and incremental_state is not None:
saved_state = self.self_attn._get_input_buffer(incremental_state)
assert saved_state is not None
if self_attn_padding_mask is not None:
self_attn_state = [
saved_state["prev_key"],
saved_state["prev_value"],
saved_state["prev_key_padding_mask"],
]
else:
self_attn_state = [saved_state["prev_key"], saved_state["prev_value"]]
return x, attn, self_attn_state
return x, attn, None
@register_model_architecture(
"deltalm", "deltalm_base"
)
def base_architecture(args):
args.encoder_embed_dim = 768
args.encoder_ffn_embed_dim = 3072
args.encoder_layers = 12
args.encoder_attention_heads = 12
args.encoder_normalize_before = False
args.encoder_learned_pos = True
args.decoder_embed_dim = 768
args.decoder_ffn_embed_dim = 3072
args.decoder_layers = 6
args.decoder_attention_heads = 12
args.decoder_normalize_before = False
args.decoder_learned_pos = True
args.activation_fn = "gelu"
args.no_scale_embedding = True
args.layernorm_embedding = True
args.max_positions = 512
@register_model_architecture(
"deltalm", "deltalm_large"
)
def large_architecture(args):
base_architecture(args)
args.encoder_embed_dim = 1024
args.encoder_ffn_embed_dim = 4096
args.encoder_layers = 24
args.encoder_attention_heads = 16
args.encoder_normalize_before = False
args.decoder_embed_dim = 1024
args.decoder_ffn_embed_dim = 4096
args.decoder_layers = 12
args.decoder_attention_heads = 16
args.decoder_normalize_before = False
args.layernorm_embedding = False | EXA-1-master | exa/models/unilm-master/deltalm/deltalm/models/deltalm.py |
import argparse
import importlib
import os
from fairseq.models import MODEL_REGISTRY, ARCH_MODEL_INV_REGISTRY
# automatically import any Python files in the models/ directory
models_dir = os.path.dirname(__file__)
for file in os.listdir(models_dir):
path = os.path.join(models_dir, file)
if not file.startswith('_') and not file.startswith('.') and (file.endswith('.py') or os.path.isdir(path)):
model_name = file[:file.find('.py')] if file.endswith('.py') else file
module = importlib.import_module('deltalm.models.' + model_name)
# extra `model_parser` for sphinx
if model_name in MODEL_REGISTRY:
parser = argparse.ArgumentParser(add_help=False)
group_archs = parser.add_argument_group('Named architectures')
group_archs.add_argument('--arch', choices=ARCH_MODEL_INV_REGISTRY[model_name])
group_args = parser.add_argument_group('Additional command-line arguments')
MODEL_REGISTRY[model_name].add_args(group_args)
globals()[model_name + '_parser'] = parser
| EXA-1-master | exa/models/unilm-master/deltalm/deltalm/models/__init__.py |
#!/usr/bin/env python3
from setuptools import find_packages, setup
setup(
name="markuplmft",
version="0.1",
author="MarkupLM Team",
packages=find_packages(),
python_requires=">=3.7",
extras_require={"dev": ["flake8", "isort", "black"]},
) | EXA-1-master | exa/models/unilm-master/markuplm/setup.py |
from __future__ import absolute_import, division, print_function
import argparse
import logging
import os
import random
import glob
import timeit
import numpy as np
import torch
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler)
from torch.utils.data.distributed import DistributedSampler
from tensorboardX import SummaryWriter
from tqdm import tqdm, trange
from transformers import (
WEIGHTS_NAME,
AdamW,
get_linear_schedule_with_warmup,
)
from markuplmft.models.markuplm import MarkupLMConfig, MarkupLMTokenizer, MarkupLMTokenizerFast, MarkupLMForQuestionAnswering
from utils import StrucDataset
from utils import (read_squad_examples, convert_examples_to_features, RawResult, write_predictions)
from utils_evaluate import EvalOpts, main as evaluate_on_squad
logger = logging.getLogger(__name__)
def set_seed(args):
r"""
Fix the random seed for reproduction.
"""
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def to_list(tensor):
return tensor.detach().cpu().tolist()
def train(args, train_dataset, model, tokenizer):
r"""
Train the model
"""
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
else:
tb_writer = None
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer,
num_warmup_steps=int(args.warmup_ratio * t_total),
num_training_steps=t_total)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps * (
torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0])
set_seed(args) # Added here for reproductibility (even between python 2 and 3)
for epoch in train_iterator:
if isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler):
train_dataloader.sampler.set_epoch(epoch)
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'token_type_ids': batch[2],
'xpath_tags_seq': batch[3],
'xpath_subs_seq': batch[4],
'start_positions': batch[5],
'end_positions': batch[6],
}
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
# Log metrics
if args.local_rank == -1 and args.evaluate_during_training:
results = evaluate(args, model, tokenizer, prefix=str(global_step))
for key, value in results.items():
tb_writer.add_scalar('eval_{}'.format(key), value, global_step)
tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step)
tb_writer.add_scalar('loss', (tr_loss - logging_loss) / args.logging_steps, global_step)
logging_loss = tr_loss
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
# Save model checkpoint
output_dir = os.path.join(args.output_dir, 'checkpoint-{}'.format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, 'module') else model
# Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, 'training_args.bin'))
logger.info("Saving model checkpoint to %s", output_dir)
if 0 < args.max_steps < global_step:
epoch_iterator.close()
break
if 0 < args.max_steps < global_step:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def evaluate(args, model, tokenizer, max_depth, prefix=""):
r"""
Evaluate the model
"""
dataset, examples, features = load_and_cache_examples(args, tokenizer, max_depth=max_depth, evaluate=True,
output_examples=True)
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(dataset) if args.local_rank == -1 else DistributedSampler(dataset)
eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size
, num_workers=args.dataloader_workers)
# multi-gpu evaluate
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
all_results = []
start_time = timeit.default_timer()
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'token_type_ids': batch[2],
'xpath_tags_seq': batch[4],
'xpath_subs_seq': batch[5],
}
feature_indices = batch[3]
outputs = model(**inputs)
for i, feature_index in enumerate(feature_indices):
eval_feature = features[feature_index.item()]
unique_id = int(eval_feature.unique_id)
result = RawResult(unique_id=unique_id,
start_logits=to_list(outputs[0][i]),
end_logits=to_list(outputs[1][i]))
all_results.append(result)
eval_time = timeit.default_timer() - start_time
logger.info(" Evaluation done in total %f secs (%f sec per example)", eval_time, eval_time / len(dataset))
# Compute predictions
output_prediction_file = os.path.join(args.output_dir, "predictions_{}.json".format(prefix))
output_tag_prediction_file = os.path.join(args.output_dir, "tag_predictions_{}.json".format(prefix))
output_nbest_file = os.path.join(args.output_dir, "nbest_predictions_{}.json".format(prefix))
output_result_file = os.path.join(args.output_dir, "qas_eval_results_{}.json".format(prefix))
output_file = os.path.join(args.output_dir, "eval_matrix_results_{}".format(prefix))
write_predictions(examples, features, all_results, args.n_best_size, args.max_answer_length, args.do_lower_case,
output_prediction_file, output_tag_prediction_file, output_nbest_file, args.verbose_logging,
tokenizer)
# Evaluate
evaluate_options = EvalOpts(data_file=args.predict_file,
root_dir=args.root_dir,
pred_file=output_prediction_file,
tag_pred_file=output_tag_prediction_file,
result_file=output_result_file,
out_file=output_file)
results = evaluate_on_squad(evaluate_options)
return results
def load_and_cache_examples(args, tokenizer, max_depth=50, evaluate=False, output_examples=False):
r"""
Load and process the raw data.
"""
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset,
# and the others will use the cache
# Load data features from cache or dataset file
input_file = args.predict_file if evaluate else args.train_file
cached_features_file = os.path.join(os.path.dirname(input_file), 'cached', 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train',
"markuplm",
str(args.max_seq_length),
str(max_depth)
))
if not os.path.exists(os.path.dirname(cached_features_file)):
os.makedirs(os.path.dirname(cached_features_file))
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
if output_examples:
examples, tag_list = read_squad_examples(input_file=input_file,
root_dir=args.root_dir,
is_training=not evaluate,
tokenizer=tokenizer,
simplify=True,
max_depth=max_depth
)
else:
examples = None
else:
logger.info("Creating features from dataset file at %s", input_file)
examples, _ = read_squad_examples(input_file=input_file,
root_dir=args.root_dir,
is_training=not evaluate,
tokenizer=tokenizer,
simplify=False,
max_depth=max_depth)
features = convert_examples_to_features(examples=examples,
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride,
max_query_length=args.max_query_length,
is_training=not evaluate,
cls_token=tokenizer.cls_token,
sep_token=tokenizer.sep_token,
pad_token=tokenizer.pad_token_id,
sequence_a_segment_id=0,
sequence_b_segment_id=0,
max_depth=max_depth)
if args.local_rank in [-1, 0] and args.save_features:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
if args.local_rank == 0 and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset,
# and the others will use the cache
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
all_xpath_tags_seq = torch.tensor([f.xpath_tags_seq for f in features], dtype=torch.long)
all_xpath_subs_seq = torch.tensor([f.xpath_subs_seq for f in features], dtype=torch.long)
if evaluate:
all_feature_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
dataset = StrucDataset(all_input_ids, all_input_mask, all_segment_ids, all_feature_index,
all_xpath_tags_seq, all_xpath_subs_seq, )
else:
all_start_positions = torch.tensor([f.start_position for f in features], dtype=torch.long)
all_end_positions = torch.tensor([f.end_position for f in features], dtype=torch.long)
dataset = StrucDataset(all_input_ids, all_input_mask, all_segment_ids,
all_xpath_tags_seq, all_xpath_subs_seq,
all_start_positions, all_end_positions, )
if output_examples:
dataset = (dataset, examples, features)
return dataset
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--train_file", default=None, type=str, required=True,
help="json for training. E.g., train-v1.1.json")
parser.add_argument("--predict_file", default=None, type=str, required=True,
help="json for predictions. E.g., dev-v1.1.json or test-v1.1.json")
parser.add_argument("--root_dir", default=None, type=str, required=True,
help="the root directory of the raw WebSRC dataset, which contains the HTML files.")
parser.add_argument("--model_name_or_path", default=None, type=str, required=True,
help="Path to pretrained model or model identifier from huggingface.co/models")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model checkpoints and predictions will be written.")
# Other parameters
parser.add_argument("--config_name", default="", type=str,
help="Pretrained config name or path if not the same as model_name")
parser.add_argument("--tokenizer_name", default="", type=str,
help="Pretrained tokenizer name or path if not the same as model_name")
parser.add_argument("--cache_dir", default=None, type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--max_seq_length", default=384, type=int,
help="The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded.")
parser.add_argument("--doc_stride", default=128, type=int,
help="When splitting up a long document into chunks, how much stride to take between chunks.")
parser.add_argument("--max_query_length", default=64, type=int,
help="The maximum number of tokens for the question. Questions longer than this will "
"be truncated to this length.")
parser.add_argument("--max_answer_length", default=30, type=int,
help="The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another.")
parser.add_argument("--verbose_logging", action='store_true',
help="If true, all of the warnings related to data processing will be printed. "
"A number of warnings are expected for a normal SQuAD evaluation.")
parser.add_argument("--do_train", action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--evaluate_during_training", action='store_true',
help="Run evaluation during training at each logging step.")
parser.add_argument("--eval_all_checkpoints", action='store_true',
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending "
"with step number")
parser.add_argument('--eval_from_checkpoint', type=int, default=0,
help="Only evaluate the checkpoints with prefix larger than or equal to it, beside the final "
"checkpoint with no prefix")
parser.add_argument('--eval_to_checkpoint', type=int, default=None,
help="Only evaluate the checkpoints with prefix smaller than it, beside the final checkpoint "
"with no prefix")
parser.add_argument("--per_gpu_train_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument("--learning_rate", default=1e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--weight_decay", default=0.0, type=float,
help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--num_train_epochs", default=3.0, type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--max_steps", default=-1, type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
parser.add_argument("--warmup_ratio", default=0.0, type=float,
help="RT.")
parser.add_argument("--n_best_size", default=20, type=int,
help="The total number of n-best predictions to generate in the nbest_predictions.json output "
"file.")
parser.add_argument('--logging_steps', type=int, default=10,
help="Log every X updates steps.")
parser.add_argument('--save_steps', type=int, default=3000,
help="Save checkpoint every X updates steps.")
parser.add_argument("--no_cuda", action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument('--overwrite_output_dir', action='store_true',
help="Overwrite the content of the output directory")
parser.add_argument('--overwrite_cache', action='store_true',
help="Overwrite the cached training and evaluation sets")
parser.add_argument('--save_features', type=bool, default=True,
help="whether or not to save the processed features, default is True")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument("--local_rank", type=int, default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.")
parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.")
args = parser.parse_args()
if os.path.exists(args.output_dir) and os.listdir(
args.output_dir) and args.do_train and not args.overwrite_output_dir:
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir))
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count() if not args.no_cuda else 0
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
# Set seed
set_seed(args)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier()
# Make sure only the first process in distributed training will download model & vocab
config = MarkupLMConfig.from_pretrained(args.model_name_or_path)
logger.info("=====Config for model=====")
logger.info(str(config))
max_depth = config.max_depth
tokenizer = MarkupLMTokenizer.from_pretrained(args.model_name_or_path)
model = MarkupLMForQuestionAnswering.from_pretrained(args.model_name_or_path, config=config)
if args.local_rank == 0:
torch.distributed.barrier()
# Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Before we do anything with models, we want to ensure that we get fp16 execution of torch.einsum if args.fp16 is
# set. Otherwise it'll default to "promote" mode, and we'll get fp32 operations. Note that running
# `--fp16_opt_level="O2"` will remove the need for this code, but it is still valid.
if args.fp16:
try:
import apex
apex.amp.register_half_function(torch, 'einsum')
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
# Training
if args.do_train:
train_dataset = load_and_cache_examples(args, tokenizer, max_depth=max_depth, evaluate=False,
output_examples=False)
tokenizer.save_pretrained(args.output_dir)
model.to(args.device)
global_step, tr_loss = train(args, train_dataset, model, tokenizer)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Save the trained model and the tokenizer
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
# Take care of distributed/parallel training
model_to_save = model.module if hasattr(model, 'module') else model
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
torch.save(args, os.path.join(args.output_dir, 'training_args.bin'))
# Evaluation - we can ask to evaluate all the checkpoints (sub-directories) in a directory
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(
os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True)))
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce model loading logs
logger.info("Evaluate the following checkpoints: %s", checkpoints)
config = MarkupLMConfig.from_pretrained(args.output_dir)
tokenizer = MarkupLMTokenizer.from_pretrained(args.output_dir)
for checkpoint in checkpoints:
# Reload the model
global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else ""
try:
int(global_step)
except ValueError:
global_step = ""
if global_step and int(global_step) < args.eval_from_checkpoint:
continue
if global_step and args.eval_to_checkpoint is not None and int(global_step) >= args.eval_to_checkpoint:
continue
model = MarkupLMForQuestionAnswering.from_pretrained(checkpoint, config=config)
model.to(args.device)
# Evaluate
result = evaluate(args, model, tokenizer, max_depth=max_depth, prefix=global_step)
result = dict((k + ('_{}'.format(global_step) if global_step else ''), v) for k, v in result.items())
results.update(result)
logger.info("Results: {}".format(results))
return results
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/markuplm/examples/fine_tuning/run_websrc/run.py |
import csv
import json
import argparse
import os.path as osp
import os
from operator import itemgetter
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--root_dir", default=None, type=str, required=True,
help="The root directory of the raw WebSRC dataset; The output SQuAD-style json file will also"
"be placed here.")
parser.add_argument("--version", default=None, type=str, required=True,
help="The version of the generating dataset, which will also be the name of the json file.")
parser.add_argument("--suffix", default="", type=str,
help="Other suffix to distinguish different dataset.")
return parser.parse_args()
def convert_csv_to_dict(args):
dir_list = os.walk(args.root_dir)
print('Start Converting')
data, websites, qas, answers = [], [], [], []
last_domain = None
for d, _, fs in dir_list:
for f in fs:
if f != 'dataset.csv':
continue
print('Now converting', d + '/' + f)
raw_data = list(csv.DictReader(open(osp.join(d, f))))
curr_domain = d.split('/')[-2]
if last_domain != curr_domain and last_domain is not None:
domain = {'domain': last_domain, 'websites': websites}
data.append(domain)
websites = []
last_domain = curr_domain
raw_data.sort(key=itemgetter('id'))
last = raw_data[0]
for i in range(len(raw_data)):
current = raw_data[i]
if i != 0:
qa = {'question': last['question'],
'id' : last['id'],
'answers' : answers} # , 'type': last['type']}
qas.append(qa)
answers = []
if last['id'][:-5] != current['id'][:-5]:
website = {'qas': qas, 'page_id': last['id'][2:-5]}
websites.append(website)
qas = []
answer = {'text' : current['answer'],
'element_id' : int(current['element_id']),
'answer_start': int(current['answer_start'])}
answers.append(answer)
last = current
if len(answers) > 0:
qa = {'question': last['question'],
'id' : last['id'],
'answers' : answers} # , 'type' : last['type']}
qas.append(qa)
answers = []
if len(qas) > 0:
website = {'qas': qas, 'page_id': last['id'][2:-5]}
websites.append(website)
qas = []
domain = {'domain': last_domain, 'websites': websites}
data.append(domain)
dataset = {'version': args.version, 'data': data}
print('Converting Finished\n')
return dataset
def dataset_split(args, dataset):
def count(last, curr):
if last is None:
return False
if last != curr:
return False
return True
split = json.load(open(osp.join(args.root_dir, 'dataset_split.json')))
data = dataset['data']
count_website = set()
for domain in data:
for website in domain['websites']:
count_website.add(domain['domain'][0:2] + website['page_id'][0:2])
print('The number of total websites is', len(count_website))
train_list = []
dev_list, test_list = split['dev'], split['test']
for website in count_website:
if website not in dev_list and website not in test_list:
train_list.append(website)
print('The train websites list is', train_list)
print('The test websites list is', test_list)
print('The dev websites list is', dev_list)
train_data, test_data, dev_data = [], [], []
cnt = 0
for domain in data:
train_websites, test_websites, dev_websites = [], [], []
last = None
for website in domain['websites']:
if not count(last, website['page_id'][0:2]):
last = website['page_id'][0:2]
cnt += 1
name = domain['domain'][0:2] + website['page_id'][0:2]
if name in test_list:
test_websites.append(website)
continue
if name in dev_list:
dev_websites.append(website)
continue
if len(train_list) != 0 and name not in train_list:
continue
train_websites.append(website)
if len(train_websites) != 0:
train_data.append({'domain': domain['domain'], 'websites': train_websites})
if len(test_websites) != 0:
test_data.append({'domain': domain['domain'], 'websites': test_websites})
if len(dev_websites) != 0:
dev_data.append({'domain': domain['domain'], 'websites': dev_websites})
print('The number of processed websites is', cnt)
train_dataset = {'version': dataset['version'], 'data': train_data}
with open(osp.join(args.root_dir, dataset['version'] + '_train_' + args.suffix + '.json'), 'w') as f:
f.write(json.dumps(train_dataset))
test_dataset = {'version': dataset['version'], 'data': test_data}
with open(osp.join(args.root_dir, dataset['version'] + '_test_' + args.suffix + '.json'), 'w') as f:
f.write(json.dumps(test_dataset))
dev_dataset = {'version': dataset['version'], 'data': dev_data}
with open(osp.join(args.root_dir, dataset['version'] + '_dev_' + args.suffix + '.json'), 'w') as f:
f.write(json.dumps(dev_dataset))
return
if __name__ == '__main__':
args = parse_args()
dataset = convert_csv_to_dict(args)
dataset_split(args, dataset)
| EXA-1-master | exa/models/unilm-master/markuplm/examples/fine_tuning/run_websrc/dataset_generation.py |
from __future__ import absolute_import, division, print_function
import json
import logging
import math
import collections
from io import open
from os import path as osp
from tqdm import tqdm
import bs4
from bs4 import BeautifulSoup as bs
from transformers.models.bert.tokenization_bert import BasicTokenizer, whitespace_tokenize
from torch.utils.data import Dataset
from lxml import etree
from markuplmft.data.tag_utils import tags_dict
logger = logging.getLogger(__name__)
class StrucDataset(Dataset):
"""Dataset wrapping tensors.
Each sample will be retrieved by indexing tensors along the first dimension.
Arguments:
*tensors (*torch.Tensor): tensors that have the same size of the first dimension.
page_ids (list): the corresponding page ids of the input features.
cnn_feature_dir (str): the direction where the cnn features are stored.
token_to_tag (torch.Tensor): the mapping from each token to its corresponding tag id.
"""
def __init__(self, *tensors):
tensors = tuple(tensor for tensor in tensors)
assert all(len(tensors[0]) == len(tensor) for tensor in tensors)
self.tensors = tensors
def __getitem__(self, index):
output = [tensor[index] for tensor in self.tensors]
return tuple(item for item in output)
def __len__(self):
return len(self.tensors[0])
class SRCExample(object):
r"""
The Containers for SRC Examples.
Arguments:
doc_tokens (list[str]): the original tokens of the HTML file before dividing into sub-tokens.
qas_id (str): the id of the corresponding question.
tag_num (int): the total tag number in the corresponding HTML file, including the additional 'yes' and 'no'.
question_text (str): the text of the corresponding question.
orig_answer_text (str): the answer text provided by the dataset.
all_doc_tokens (list[str]): the sub-tokens of the corresponding HTML file.
start_position (int): the position where the answer starts in the all_doc_tokens.
end_position (int): the position where the answer ends in the all_doc_tokens; NOTE that the answer tokens
include the token at end_position.
tok_to_orig_index (list[int]): the mapping from sub-tokens (all_doc_tokens) to origin tokens (doc_tokens).
orig_to_tok_index (list[int]): the mapping from origin tokens (doc_tokens) to sub-tokens (all_doc_tokens).
tok_to_tags_index (list[int]): the mapping from sub-tokens (all_doc_tokens) to the id of the deepest tag it
belongs to.
"""
# the difference between T-PLM and H-PLM is just add <xx> and </xx> into the
# original tokens and further-tokenized tokens
def __init__(self,
doc_tokens,
qas_id,
tag_num, # <xx> ?? </xx> is counted as one tag
question_text=None,
html_code=None,
orig_answer_text=None,
start_position=None, # in all_doc_tokens
end_position=None, # in all_doc_tokens
tok_to_orig_index=None,
orig_to_tok_index=None,
all_doc_tokens=None,
tok_to_tags_index=None,
xpath_tag_map=None,
xpath_subs_map=None,
):
self.doc_tokens = doc_tokens
self.qas_id = qas_id
self.tag_num = tag_num
self.question_text = question_text
self.html_code = html_code
self.orig_answer_text = orig_answer_text
self.start_position = start_position
self.end_position = end_position
self.tok_to_orig_index = tok_to_orig_index
self.orig_to_tok_index = orig_to_tok_index
self.all_doc_tokens = all_doc_tokens
self.tok_to_tags_index = tok_to_tags_index
self.xpath_tag_map = xpath_tag_map
self.xpath_subs_map = xpath_subs_map
def __str__(self):
return self.__repr__()
def __repr__(self):
"""
s = ""
s += "qas_id: %s" % self.qas_id
s += ", question_text: %s" % (
self.question_text)
s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens))
if self.start_position:
s += ", start_position: %d" % self.start_position
if self.end_position:
s += ", end_position: %d" % self.end_position
"""
s = "[INFO]\n"
s += f"qas_id ({type(self.qas_id)}): {self.qas_id}\n"
s += f"tag_num ({type(self.tag_num)}): {self.tag_num}\n"
s += f"question_text ({type(self.question_text)}): {self.question_text}\n"
s += f"html_code ({type(self.html_code)}): {self.html_code}\n"
s += f"orig_answer_text ({type(self.orig_answer_text)}): {self.orig_answer_text}\n"
s += f"start_position ({type(self.start_position)}): {self.start_position}\n"
s += f"end_position ({type(self.end_position)}): {self.end_position}\n"
s += f"tok_to_orig_index ({type(self.tok_to_orig_index)}): {self.tok_to_orig_index}\n"
s += f"orig_to_tok_index ({type(self.orig_to_tok_index)}): {self.orig_to_tok_index}\n"
s += f"all_doc_tokens ({type(self.all_doc_tokens)}): {self.all_doc_tokens}\n"
s += f"tok_to_tags_index ({type(self.tok_to_tags_index)}): {self.tok_to_tags_index}\n"
s += f"xpath_tag_map ({type(self.xpath_tag_map)}): {self.xpath_tag_map}\n"
s += f"xpath_subs_map ({type(self.xpath_subs_map)}): {self.xpath_subs_map}\n"
s += f"tree_id_map ({type(self.tree_id_map)}): {self.tree_id_map}\n"
return s
class InputFeatures(object):
r"""
The Container for the Features of Input Doc Spans.
Arguments:
unique_id (int): the unique id of the input doc span.
example_index (int): the index of the corresponding SRC Example of the input doc span.
page_id (str): the id of the corresponding web page of the question.
doc_span_index (int): the index of the doc span among all the doc spans which corresponding to the same SRC
Example.
tokens (list[str]): the sub-tokens of the input sequence, including cls token, sep tokens, and the sub-tokens
of the question and HTML file.
token_to_orig_map (dict[int, int]): the mapping from the HTML file's sub-tokens in the sequence tokens (tokens)
to the origin tokens (all_tokens in the corresponding SRC Example).
token_is_max_context (dict[int, bool]): whether the current doc span contains the max pre- and post-context for
each HTML file's sub-tokens.
input_ids (list[int]): the ids of the sub-tokens in the input sequence (tokens).
input_mask (list[int]): use 0/1 to distinguish the input sequence from paddings.
segment_ids (list[int]): use 0/1 to distinguish the question and the HTML files.
paragraph_len (int): the length of the HTML file's sub-tokens.
start_position (int): the position where the answer starts in the input sequence (0 if the answer is not fully
in the input sequence).
end_position (int): the position where the answer ends in the input sequence; NOTE that the answer tokens
include the token at end_position (0 if the answer is not fully in the input sequence).
token_to_tag_index (list[int]): the mapping from sub-tokens of the input sequence to the id of the deepest tag
it belongs to.
is_impossible (bool): whether the answer is fully in the doc span.
"""
def __init__(self,
unique_id,
example_index,
page_id,
doc_span_index,
tokens,
token_to_orig_map,
token_is_max_context,
input_ids,
input_mask,
segment_ids,
paragraph_len,
start_position=None,
end_position=None,
token_to_tag_index=None,
is_impossible=None,
xpath_tags_seq=None,
xpath_subs_seq=None
):
self.unique_id = unique_id
self.example_index = example_index
self.page_id = page_id
self.doc_span_index = doc_span_index
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.token_is_max_context = token_is_max_context
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.paragraph_len = paragraph_len
self.start_position = start_position
self.end_position = end_position
self.token_to_tag_index = token_to_tag_index
self.is_impossible = is_impossible
self.xpath_tags_seq = xpath_tags_seq
self.xpath_subs_seq = xpath_subs_seq
def html_escape(html):
r"""
replace the special expressions in the html file for specific punctuation.
"""
html = html.replace('"', '"')
html = html.replace('&', '&')
html = html.replace('<', '<')
html = html.replace('>', '>')
html = html.replace(' ', ' ')
return html
def get_xpath4tokens(html_fn: str, unique_tids: set):
xpath_map = {}
tree = etree.parse(html_fn, etree.HTMLParser())
nodes = tree.xpath('//*')
for node in nodes:
tid = node.attrib.get("tid")
if int(tid) in unique_tids:
xpath_map[int(tid)] = tree.getpath(node)
xpath_map[len(nodes)] = "/html"
xpath_map[len(nodes) + 1] = "/html"
return xpath_map
def get_xpath_and_treeid4tokens(html_code, unique_tids, max_depth):
unknown_tag_id = len(tags_dict)
pad_tag_id = unknown_tag_id + 1
max_width = 1000
width_pad_id = 1001
pad_x_tag_seq = [pad_tag_id] * max_depth
pad_x_subs_seq = [width_pad_id] * max_depth
def xpath_soup(element):
xpath_tags = []
xpath_subscripts = []
tree_index = []
child = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
siblings = parent.find_all(child.name, recursive=False)
para_siblings = parent.find_all(True, recursive=False)
xpath_tags.append(child.name)
xpath_subscripts.append(
0 if 1 == len(siblings) else next(i for i, s in enumerate(siblings, 1) if s is child))
tree_index.append(next(i for i, s in enumerate(para_siblings, 0) if s is child))
child = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
tree_index.reverse()
return xpath_tags, xpath_subscripts, tree_index
xpath_tag_map = {}
xpath_subs_map = {}
for tid in unique_tids:
element = html_code.find(attrs={'tid': tid})
if element is None:
xpath_tags = pad_x_tag_seq
xpath_subscripts = pad_x_subs_seq
xpath_tag_map[tid] = xpath_tags
xpath_subs_map[tid] = xpath_subscripts
continue
xpath_tags, xpath_subscripts, tree_index = xpath_soup(element)
assert len(xpath_tags) == len(xpath_subscripts)
assert len(xpath_tags) == len(tree_index)
if len(xpath_tags) > max_depth:
xpath_tags = xpath_tags[-max_depth:]
xpath_subscripts = xpath_subscripts[-max_depth:]
xpath_tags = [tags_dict.get(name, unknown_tag_id) for name in xpath_tags]
xpath_subscripts = [min(i, max_width) for i in xpath_subscripts]
# we do not append them to max depth here
xpath_tags += [pad_tag_id] * (max_depth - len(xpath_tags))
xpath_subscripts += [width_pad_id] * (max_depth - len(xpath_subscripts))
xpath_tag_map[tid] = xpath_tags
xpath_subs_map[tid] = xpath_subscripts
return xpath_tag_map, xpath_subs_map
def read_squad_examples(input_file, root_dir, is_training, tokenizer, simplify=False, max_depth=50):
r"""
pre-process the data in json format into SRC Examples.
Arguments:
split_flag:
attention_width:
input_file (str): the inputting data file in json format.
root_dir (str): the root directory of the raw WebSRC dataset, which contains the HTML files.
is_training (bool): True if processing the training set, else False.
tokenizer (Tokenizer): the tokenizer for PLM in use.
method (str): the name of the method in use, choice: ['T-PLM', 'H-PLM', 'V-PLM'].
simplify (bool): when setting to Ture, the returned Example will only contain document tokens, the id of the
question-answers, and the total tag number in the corresponding html files.
Returns:
list[SRCExamples]: the resulting SRC Examples, contained all the needed information for the feature generation
process, except when the argument simplify is setting to True;
set[str]: all the tag names appeared in the processed dataset, e.g. <div>, <img/>, </p>, etc..
"""
with open(input_file, "r", encoding='utf-8') as reader:
input_data = json.load(reader)["data"]
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
def html_to_text_list(h):
tag_num, text_list = 0, []
for element in h.descendants:
if (type(element) == bs4.element.NavigableString) and (element.strip()):
text_list.append(element.strip())
if type(element) == bs4.element.Tag:
tag_num += 1
return text_list, tag_num + 2 # + 2 because we treat the additional 'yes' and 'no' as two special tags.
def e_id_to_t_id(e_id, html):
t_id = 0
for element in html.descendants:
if type(element) == bs4.element.NavigableString and element.strip():
t_id += 1
if type(element) == bs4.element.Tag:
if int(element.attrs['tid']) == e_id:
break
return t_id
def calc_num_from_raw_text_list(t_id, l):
n_char = 0
for i in range(t_id):
n_char += len(l[i]) + 1
return n_char
def word_tag_offset(html):
cnt, w_t, t_w, tags, tags_tids = 0, [], [], [], []
for element in html.descendants:
if type(element) == bs4.element.Tag:
content = ' '.join(list(element.strings)).split()
t_w.append({'start': cnt, 'len': len(content)})
tags.append('<' + element.name + '>')
tags_tids.append(element['tid'])
elif type(element) == bs4.element.NavigableString and element.strip():
text = element.split()
tid = element.parent['tid']
ind = tags_tids.index(tid)
for _ in text:
w_t.append(ind)
cnt += 1
assert cnt == len(w_t)
w_t.append(len(t_w))
w_t.append(len(t_w) + 1)
return w_t
def subtoken_tag_offset(html, s_tok):
w_t = word_tag_offset(html)
s_t = []
unique_tids = set()
for i in range(len(s_tok)):
s_t.append(w_t[s_tok[i]])
unique_tids.add(w_t[s_tok[i]])
return s_t, unique_tids
examples = []
all_tag_list = set()
total_num = sum([len(entry["websites"]) for entry in input_data])
with tqdm(total=total_num, desc="Converting websites to examples") as t:
for entry in input_data:
domain = entry["domain"]
for website in entry["websites"]:
# Generate Doc Tokens
page_id = website["page_id"]
curr_dir = osp.join(root_dir, domain, page_id[0:2], 'processed_data')
html_fn = osp.join(curr_dir, page_id + '.html')
html_file = open(html_fn).read()
html_code = bs(html_file, "html.parser")
raw_text_list, tag_num = html_to_text_list(html_code) # 字符列表及标签数
doc_tokens = []
char_to_word_offset = []
page_text = ' '.join(raw_text_list)
prev_is_whitespace = True
for c in page_text:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
doc_tokens.append('no')
char_to_word_offset.append(len(doc_tokens) - 1)
doc_tokens.append('yes')
char_to_word_offset.append(len(doc_tokens) - 1)
tag_list = []
assert len(doc_tokens) == char_to_word_offset[-1] + 1, (len(doc_tokens), char_to_word_offset[-1])
if simplify:
for qa in website["qas"]:
qas_id = qa["id"]
example = SRCExample(doc_tokens=doc_tokens, qas_id=qas_id, tag_num=tag_num)
examples.append(example)
t.update(1)
else:
# Tokenize all doc tokens
# tokenize sth like < / >
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
if token in tag_list:
sub_tokens = [token]
else:
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
# Generate extra information for features
tok_to_tags_index, unique_tids = subtoken_tag_offset(html_code, tok_to_orig_index)
xpath_tag_map, xpath_subs_map = get_xpath_and_treeid4tokens(html_code,
unique_tids,
max_depth=max_depth)
assert tok_to_tags_index[-1] == tag_num - 1, (tok_to_tags_index[-1], tag_num - 1)
# Process each qas, which is mainly calculate the answer position
for qa in website["qas"]:
qas_id = qa["id"]
question_text = qa["question"]
start_position = None
end_position = None
orig_answer_text = None
if is_training:
if len(qa["answers"]) != 1:
raise ValueError(
"For training, each question should have exactly 1 answer.")
answer = qa["answers"][0]
orig_answer_text = answer["text"]
if answer["element_id"] == -1:
num_char = len(char_to_word_offset) - 2
else:
num_char = calc_num_from_raw_text_list(e_id_to_t_id(answer["element_id"], html_code),
raw_text_list)
answer_offset = num_char + answer["answer_start"]
answer_length = len(orig_answer_text) if answer["element_id"] != -1 else 1
start_position = char_to_word_offset[answer_offset]
end_position = char_to_word_offset[answer_offset + answer_length - 1]
# Only add answers where the text can be exactly recovered from the
# document. If this CAN'T happen it's likely due to weird Unicode
# stuff so we will just skip the example.
#
# Note that this means for training mode, every example is NOT
# guaranteed to be preserved.
actual_text = " ".join([w for w in doc_tokens[start_position:(end_position + 1)]
if (w[0] != '<' or w[-1] != '>')])
cleaned_answer_text = " ".join(whitespace_tokenize(orig_answer_text))
if actual_text.find(cleaned_answer_text) == -1:
logger.warning("Could not find answer of question %s: '%s' vs. '%s'",
qa['id'], actual_text, cleaned_answer_text)
continue
example = SRCExample(
doc_tokens=doc_tokens,
qas_id=qas_id,
tag_num=tag_num,
question_text=question_text,
html_code=html_code,
orig_answer_text=orig_answer_text,
start_position=start_position,
end_position=end_position,
tok_to_orig_index=tok_to_orig_index,
orig_to_tok_index=orig_to_tok_index,
all_doc_tokens=all_doc_tokens,
tok_to_tags_index=tok_to_tags_index,
xpath_tag_map=xpath_tag_map,
xpath_subs_map=xpath_subs_map,
)
examples.append(example)
t.update(1)
return examples, all_tag_list
def convert_examples_to_features(examples, tokenizer, max_seq_length, doc_stride, max_query_length, is_training,
cls_token='[CLS]', sep_token='[SEP]', pad_token=0,
sequence_a_segment_id=0, sequence_b_segment_id=1,
cls_token_segment_id=0, pad_token_segment_id=0,
mask_padding_with_zero=True, max_depth=50):
r"""
Converting the SRC Examples further into the features for all the input doc spans.
Arguments:
examples (list[SRCExample]): the list of SRC Examples to process.
tokenizer (Tokenizer): the tokenizer for PLM in use.
max_seq_length (int): the max length of the total sub-token sequence, including the question, cls token, sep
tokens, and documents; if the length of the input is bigger than max_seq_length, the input
will be cut into several doc spans.
doc_stride (int): the stride length when the input is cut into several doc spans.
max_query_length (int): the max length of the sub-token sequence of the questions; the question will be truncate
if it is longer than max_query_length.
is_training (bool): True if processing the training set, else False.
cls_token (str): the cls token in use, default is '[CLS]'.
sep_token (str): the sep token in use, default is '[SEP]'.
pad_token (int): the id of the padding token in use when the total sub-token length is smaller that
max_seq_length, default is 0 which corresponding to the '[PAD]' token.
sequence_a_segment_id: the segment id for the first sequence (the question), default is 0.
sequence_b_segment_id: the segment id for the second sequence (the html file), default is 1.
cls_token_segment_id: the segment id for the cls token, default is 0.
pad_token_segment_id: the segment id for the padding tokens, default is 0.
mask_padding_with_zero: determine the pattern of the returned input mask; 0 for padding tokens and 1 for others
when True, and vice versa.
Returns:
list[InputFeatures]: the resulting input features for all the input doc spans
"""
pad_x_tag_seq = [216] * max_depth
pad_x_subs_seq = [1001] * max_depth
unique_id = 1000000000
features = []
for (example_index, example) in enumerate(tqdm(examples, desc="Converting examples to features")):
xpath_tag_map = example.xpath_tag_map
xpath_subs_map = example.xpath_subs_map
query_tokens = tokenizer.tokenize(example.question_text)
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:max_query_length]
tok_start_position = None
tok_end_position = None
if is_training:
tok_start_position = example.orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = example.orig_to_tok_index[example.end_position + 1] - 1
else:
tok_end_position = len(example.all_doc_tokens) - 1
(tok_start_position, tok_end_position) = _improve_answer_span(
example.all_doc_tokens, tok_start_position, tok_end_position, tokenizer,
example.orig_answer_text)
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = collections.namedtuple( # pylint: disable=invalid-name
"DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(example.all_doc_tokens):
length = len(example.all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(example.all_doc_tokens):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
token_to_tag_index = []
# CLS token at the beginning
tokens.append(cls_token)
segment_ids.append(cls_token_segment_id)
token_to_tag_index.append(example.tag_num)
# Query
tokens += query_tokens
segment_ids += [sequence_a_segment_id] * len(query_tokens)
token_to_tag_index += [example.tag_num] * len(query_tokens)
# SEP token
tokens.append(sep_token)
segment_ids.append(sequence_a_segment_id)
token_to_tag_index.append(example.tag_num)
# Paragraph
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = example.tok_to_orig_index[split_token_index]
token_to_tag_index.append(example.tok_to_tags_index[split_token_index])
is_max_context = _check_is_max_context(doc_spans, doc_span_index,
split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(example.all_doc_tokens[split_token_index])
segment_ids.append(sequence_b_segment_id)
paragraph_len = doc_span.length
# SEP token
tokens.append(sep_token)
segment_ids.append(sequence_b_segment_id)
token_to_tag_index.append(example.tag_num)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(pad_token)
input_mask.append(0 if mask_padding_with_zero else 1)
segment_ids.append(pad_token_segment_id)
token_to_tag_index.append(example.tag_num)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(token_to_tag_index) == max_seq_length
span_is_impossible = False
start_position = None
end_position = None
if is_training:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
out_of_span = False
if not (tok_start_position >= doc_start and
tok_end_position <= doc_end):
out_of_span = True
if out_of_span:
span_is_impossible = True
start_position = 0
end_position = 0
else:
doc_offset = len(query_tokens) + 2
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
xpath_tags_seq = [xpath_tag_map.get(tid, pad_x_tag_seq) for tid in token_to_tag_index] # ok
xpath_subs_seq = [xpath_subs_map.get(tid, pad_x_subs_seq) for tid in token_to_tag_index] # ok
# we need to get extended_attention_mask
features.append(
InputFeatures(
unique_id=unique_id,
example_index=example_index,
page_id=example.qas_id[:-5],
doc_span_index=doc_span_index,
tokens=tokens,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
paragraph_len=paragraph_len,
start_position=start_position,
end_position=end_position,
token_to_tag_index=token_to_tag_index,
is_impossible=span_is_impossible,
xpath_tags_seq=xpath_tags_seq,
xpath_subs_seq=xpath_subs_seq,
))
unique_id += 1
return features
# ---------- copied ! --------------
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, orig_answer_text):
tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = " ".join([w for w in doc_tokens[new_start:(new_end + 1)]
if w[0] != '<' or w[-1] != '>'])
if text_span == tok_answer_text:
return new_start, new_end
return input_start, input_end
# ---------- copied ! --------------
def _check_is_max_context(doc_spans, cur_span_index, position):
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
RawResult = collections.namedtuple("RawResult",
["unique_id", "start_logits", "end_logits"])
def write_predictions(all_examples, all_features, all_results, n_best_size, max_answer_length, do_lower_case,
output_prediction_file, output_tag_prediction_file,
output_nbest_file, verbose_logging, tokenizer):
r"""
Compute and write down the final results, including the n best results.
Arguments:
all_examples (list[SRCExample]): all the SRC Example of the dataset; note that we only need it to provide the
mapping from example index to the question-answers id.
all_features (list[InputFeatures]): all the features for the input doc spans.
all_results (list[RawResult]): all the results from the models.
n_best_size (int): the number of the n best buffer and the final n best result saved.
max_answer_length (int): constrain the model to predict the answer no longer than it.
do_lower_case (bool): whether the model distinguish upper and lower case of the letters.
output_prediction_file (str): the file which the best answer text predictions will be written to.
output_tag_prediction_file (str): the file which the best answer tag predictions will be written to.
output_nbest_file (str): the file which the n best answer predictions including text, tag, and probabilities
will be written to.
verbose_logging (bool): if true, all of the warnings related to data processing will be printed.
"""
logger.info("Writing predictions to: %s" % output_prediction_file)
logger.info("Writing nbest to: %s" % output_nbest_file)
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["feature_index", "start_index", "end_index", "start_logit", "end_logit", "tag_ids"])
all_predictions = collections.OrderedDict()
all_tag_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
# if we could have irrelevant answers, get the min score of irrelevant
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
tag_ids = set(feature.token_to_tag_index[start_index: end_index + 1])
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index],
tag_ids=list(tag_ids)))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True)
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_logit", "end_logit", "tag_ids"])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
if pred.start_index > 0: # this is a non-null prediction
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = _get_final_text(tok_text, orig_text, do_lower_case, verbose_logging)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
else:
final_text = ""
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit,
tag_ids=pred.tag_ids))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(
_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0, tag_ids=[-1]))
assert len(nbest) >= 1
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
if not best_non_null_entry:
if entry.text:
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
output["tag_ids"] = entry.tag_ids
nbest_json.append(output)
assert len(nbest_json) >= 1
best = nbest_json[0]["text"].split()
best = ' '.join([w for w in best
if (w[0] != '<' or w[-1] != '>')
and w != "<end-of-node>"
and w != tokenizer.sep_token
and w != tokenizer.cls_token])
all_predictions[example.qas_id] = best
all_tag_predictions[example.qas_id] = nbest_json[0]["tag_ids"]
all_nbest_json[example.qas_id] = nbest_json
with open(output_prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
with open(output_nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
with open(output_tag_prediction_file, 'w') as writer:
writer.write(json.dumps(all_tag_predictions, indent=4) + '\n')
return
def _get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False):
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if c == " ":
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = "".join(ns_chars)
return ns_text, ns_to_s_map
# We first tokenize `orig_text`, strip whitespace from the result
# and `pred_text`, and check if they are the same length. If they are
# NOT the same length, the heuristic has failed. If they are the same
# length, we assume the characters are one-to-one aligned.
tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
tok_text = " ".join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
if verbose_logging:
logger.info(
"Unable to find text: '%s' in '%s'" % (pred_text, orig_text))
return orig_text
end_position = start_position + len(pred_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
if verbose_logging:
logger.info("Length not equal after stripping spaces: '%s' vs '%s'",
orig_ns_text, tok_ns_text)
return orig_text
# We then project the characters in `pred_text` back to `orig_text` using
# the character-to-character alignment.
tok_s_to_ns_map = {}
for (i, tok_index) in tok_ns_to_s_map.items():
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
if verbose_logging:
logger.info("Couldn't map start position")
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
if verbose_logging:
logger.info("Couldn't map end position")
return orig_text
output_text = orig_text[orig_start_position:(orig_end_position + 1)]
return output_text
def _get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_and_score[i][0])
return best_indexes
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
| EXA-1-master | exa/models/unilm-master/markuplm/examples/fine_tuning/run_websrc/utils.py |
import argparse
import collections
import json
import os
import re
import string
import sys
from copy import deepcopy
from bs4 import BeautifulSoup
class EvalOpts:
r"""
The options which the matrix evaluation process needs.
Arguments:
data_file (str): the SQuAD-style json file of the dataset in evaluation.
root_dir (str): the root directory of the raw WebSRC dataset, which contains the HTML files.
pred_file (str): the prediction file which contain the best predicted answer text of each question from the
model.
tag_pred_file (str): the prediction file which contain the best predicted answer tag id of each question from
the model.
result_file (str): the file to write down the matrix evaluation results of each question.
out_file (str): the file to write down the final matrix evaluation results of the whole dataset.
"""
def __init__(self, data_file, root_dir, pred_file, tag_pred_file, result_file='', out_file=""):
self.data_file = data_file
self.root_dir = root_dir
self.pred_file = pred_file
self.tag_pred_file = tag_pred_file
self.result_file = result_file
self.out_file = out_file
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('data_file', metavar='data.json', help='Input data JSON file.')
parser.add_argument('root_dir', metavar='./data', help='The root directory of the raw WebSRC dataset')
parser.add_argument('pred_file', metavar='pred.json', help='Model predictions.')
parser.add_argument('tag_pred_file', metavar='tag_pred.json', help='Model predictions.')
parser.add_argument('--result-file', '-r', metavar='qas_eval.json')
parser.add_argument('--out-file', '-o', metavar='eval.json',
help='Write accuracy metrics to file (default is stdout).')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def make_pages_list(dataset):
r"""
Record all the pages which appears in the dataset and return the list.
"""
pages_list = []
last_page = None
for domain in dataset:
for w in domain['websites']:
for qa in w['qas']:
if last_page != qa['id'][:4]:
last_page = qa['id'][:4]
pages_list.append(last_page)
return pages_list
def make_qid_to_has_ans(dataset):
r"""
Pick all the questions which has answer in the dataset and return the list.
"""
qid_to_has_ans = {}
for domain in dataset:
for w in domain['websites']:
for qa in w['qas']:
qid_to_has_ans[qa['id']] = bool(qa['answers'])
return qid_to_has_ans
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
regex = re.compile(r'\b(a|an|the)\b', re.UNICODE)
return re.sub(regex, ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def get_tokens(s):
r"""
Get the word list in the input.
"""
if not s:
return []
return normalize_answer(s).split()
def compute_exact(a_gold, a_pred):
r"""
Calculate the exact match.
"""
if normalize_answer(a_gold) == normalize_answer(a_pred):
return 1
return 0
def compute_f1(a_gold, a_pred):
r"""
Calculate the f1 score.
"""
gold_toks = get_tokens(a_gold)
pred_toks = get_tokens(a_pred)
common = collections.Counter(gold_toks) & collections.Counter(pred_toks)
num_same = sum(common.values())
if len(gold_toks) == 0 or len(pred_toks) == 0:
# If either is no-answer, then F1 is 1 if they agree, 0 otherwise
return int(gold_toks == pred_toks)
if num_same == 0:
return 0
precision = 1.0 * num_same / len(pred_toks)
recall = 1.0 * num_same / len(gold_toks)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def compute_pos(f, t_gold, addition, t_pred):
r"""
Calculate the POS score.
Arguments:
f (str): the html file on which the question is based.
t_gold (int): the gold answer tag id provided by the dataset (the value correspond to the key element_id).
addition (int): the addition information used for yes/no question provided by the dataset (the value
corresponding to the key answer_start).
t_pred (list[int]): the tag ids of the tags corresponding the each word in the predicted answer.
Returns:
float: the POS score.
"""
h = BeautifulSoup(open(f), "lxml")
p_gold, e_gold = set(), h.find(tid=t_gold)
if e_gold is None:
if len(t_pred) != 1:
return 0
else:
t = t_pred[0]
e_pred, e_prev = h.find(tid=t), h.find(tid=t-1)
if (e_pred is not None) or (addition == 1 and e_prev is not None) or\
(addition == 0 and e_prev is None):
return 0
else:
return 1
else:
p_gold.add(e_gold['tid'])
for e in e_gold.parents:
if int(e['tid']) < 2:
break
p_gold.add(e['tid'])
p = None
for t in t_pred:
p_pred, e_pred = set(), h.find(tid=t)
if e_pred is not None:
p_pred.add(e_pred['tid'])
if e_pred.name != 'html':
for e in e_pred.parents:
if int(e['tid']) < 2:
break
p_pred.add(e['tid'])
else:
p_pred.add(str(t))
if p is None:
p = p_pred
else:
p = p & p_pred # 预测值的公共祖先序列,except html&body
return len(p_gold & p) / len(p_gold | p)
def get_raw_scores(dataset, preds, tag_preds, root_dir):
r"""
Calculate all the three matrix (exact match, f1, POS) for each question.
Arguments:
dataset (dict): the dataset in use.
preds (dict): the answer text prediction for each question in the dataset.
tag_preds (dict): the answer tags prediction for each question in the dataset.
root_dir (str): the base directory for the html files.
Returns:
tuple(dict, dict, dict): exact match, f1, pos scores for each question.
"""
exact_scores = {}
f1_scores = {}
pos_scores = {}
for websites in dataset:
for w in websites['websites']:
f = os.path.join(root_dir, websites['domain'], w['page_id'][0:2], 'processed_data',
w['page_id'] + '.html')
for qa in w['qas']:
qid = qa['id']
gold_answers = [a['text'] for a in qa['answers']
if normalize_answer(a['text'])]
gold_tag_answers = [a['element_id'] for a in qa['answers']]
additional_tag_information = [a['answer_start'] for a in qa['answers']]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
gold_answers = ['']
if qid not in preds:
print('Missing prediction for %s' % qid)
continue
a_pred, t_pred = preds[qid], tag_preds[qid]
# Take max over all gold answers
exact_scores[qid] = max(compute_exact(a, a_pred) for a in gold_answers)
f1_scores[qid] = max(compute_f1(a, a_pred) for a in gold_answers)
pos_scores[qid] = max(compute_pos(f, t, a, t_pred)
for t, a in zip(gold_tag_answers, additional_tag_information))
return exact_scores, f1_scores, pos_scores
def make_eval_dict(exact_scores, f1_scores, pos_scores, qid_list=None):
r"""
Make the dictionary to show the evaluation results.
"""
if qid_list is None:
total = len(exact_scores)
return collections.OrderedDict([
('exact', 100.0 * sum(exact_scores.values()) / total),
('f1', 100.0 * sum(f1_scores.values()) / total),
('pos', 100.0 * sum(pos_scores.values()) / total),
('total', total),
])
else:
total = len(qid_list)
if total == 0:
return collections.OrderedDict([
('exact', 0),
('f1', 0),
('pos', 0),
('total', 0),
])
return collections.OrderedDict([
('exact', 100.0 * sum(exact_scores[k] for k in qid_list) / total),
('f1', 100.0 * sum(f1_scores[k] for k in qid_list) / total),
('pos', 100.0 * sum(pos_scores[k] for k in qid_list) / total),
('total', total),
])
def merge_eval(main_eval, new_eval, prefix):
for k in new_eval:
main_eval['%s_%s' % (prefix, k)] = new_eval[k]
def main(opts):
with open(opts.data_file) as f:
dataset_json = json.load(f)
dataset = dataset_json['data']
if isinstance(opts.pred_file, str):
with open(opts.pred_file) as f:
preds = json.load(f)
else:
preds = opts.pred_file
if isinstance(opts.tag_pred_file, str):
with open(opts.tag_pred_file) as f:
tag_preds = json.load(f)
else:
tag_preds = opts.tag_pred_file
qid_to_has_ans = make_qid_to_has_ans(dataset)
has_ans_qids = [k for k, v in qid_to_has_ans.items() if v]
no_ans_qids = [k for k, v in qid_to_has_ans.items() if not v]
exact, f1, pos = get_raw_scores(dataset, preds, tag_preds, opts.root_dir)
out_eval = make_eval_dict(exact, f1, pos)
if has_ans_qids:
has_ans_eval = make_eval_dict(exact, f1, pos, qid_list=has_ans_qids)
merge_eval(out_eval, has_ans_eval, 'HasAns')
if no_ans_qids:
no_ans_eval = make_eval_dict(exact, f1, pos, qid_list=no_ans_qids)
merge_eval(out_eval, no_ans_eval, 'NoAns')
print(json.dumps(out_eval, indent=2))
pages_list, write_eval = make_pages_list(dataset), deepcopy(out_eval)
for p in pages_list:
pages_ans_qids = [k for k, _ in qid_to_has_ans.items() if p in k]
page_eval = make_eval_dict(exact, f1, pos, qid_list=pages_ans_qids)
merge_eval(write_eval, page_eval, p)
if opts.result_file:
with open(opts.result_file, 'w') as f:
w = {}
for k, v in qid_to_has_ans.items():
w[k] = {'exact': exact[k], 'f1': f1[k], 'pos': pos[k]}
json.dump(w, f)
if opts.out_file:
with open(opts.out_file, 'w') as f:
json.dump(write_eval, f)
return out_eval
if __name__ == '__main__':
a="$4.99"
b="$4.99"
print(compute_exact(a,b))
| EXA-1-master | exa/models/unilm-master/markuplm/examples/fine_tuning/run_websrc/utils_evaluate.py |
from __future__ import absolute_import, division, print_function
import argparse
import logging
import os
import random
import glob
import timeit
import numpy as np
import torch
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler)
from torch.utils.data.distributed import DistributedSampler
from tensorboardX import SummaryWriter
from tqdm import tqdm, trange
from transformers import (
WEIGHTS_NAME,
AdamW,
get_linear_schedule_with_warmup,
)
from markuplmft.models.markuplm import MarkupLMConfig, MarkupLMTokenizer, MarkupLMTokenizerFast, MarkupLMForQuestionAnswering
from utils import StrucDataset
from utils import (read_squad_examples, convert_examples_to_features, RawResult, write_predictions)
from utils_evaluate import EvalOpts, main as evaluate_on_squad
logger = logging.getLogger(__name__)
if __name__ == '__main__':
mp = "../../../../../results/markuplm-base"
op = "./moli"
config = MarkupLMConfig.from_pretrained(mp)
logger.info("=====Config for model=====")
logger.info(str(config))
max_depth = config.max_depth
tokenizer = MarkupLMTokenizer.from_pretrained(mp)
model = MarkupLMForQuestionAnswering.from_pretrained(mp, config=config)
tokenizer.save_pretrained(op) | EXA-1-master | exa/models/unilm-master/markuplm/examples/fine_tuning/run_websrc/draft.py |
from __future__ import absolute_import, division, print_function
import argparse
import logging
import os
import random
import glob
import numpy as np
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler)
from torch.utils.data.distributed import DistributedSampler
from tensorboardX import SummaryWriter
from tqdm import tqdm, trange
from transformers import (
WEIGHTS_NAME,
AdamW,
get_linear_schedule_with_warmup,
)
from markuplmft.models.markuplm import MarkupLMConfig, MarkupLMTokenizer, MarkupLMForTokenClassification
from utils import get_swde_features, SwdeDataset
from eval_utils import page_level_constraint
import constants
import torch
import copy
logger = logging.getLogger(__name__)
def set_seed(args):
r"""
Fix the random seed for reproduction.
"""
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def to_list(tensor):
return tensor.detach().cpu().tolist()
def train(args, train_dataset, model, tokenizer, sub_output_dir):
r"""
Train the model
"""
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
else:
tb_writer = None
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset,
sampler=train_sampler,
batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters,
lr=args.learning_rate,
eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer,
num_warmup_steps=int(args.warmup_ratio * t_total),
num_training_steps=t_total)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model,
device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps * (
torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0])
set_seed(args) # Added here for reproductibility (even between python 2 and 3)
for epoch in train_iterator:
if isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler):
train_dataloader.sampler.set_epoch(epoch)
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'token_type_ids': batch[2],
'xpath_tags_seq': batch[3],
'xpath_subs_seq': batch[4],
'labels': batch[5],
}
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
# Log metrics
if args.local_rank == -1 and args.evaluate_during_training:
raise ValueError("Shouldn't `evaluate_during_training` when ft SWDE!!")
# results = evaluate(args, model, tokenizer, prefix=str(global_step))
# for key, value in results.items():
# tb_writer.add_scalar('eval_{}'.format(key), value, global_step)
tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step)
tb_writer.add_scalar('loss', (tr_loss - logging_loss) / args.logging_steps, global_step)
logging_loss = tr_loss
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
# Save model checkpoint
output_dir = os.path.join(sub_output_dir, 'checkpoint-{}'.format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, 'module') else model
# Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, 'training_args.bin'))
logger.info("Saving model checkpoint to %s", output_dir)
if 0 < args.max_steps < global_step:
epoch_iterator.close()
break
if 0 < args.max_steps < global_step:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def eval_on_one_website(args, model, website, sub_output_dir, prefix=""):
dataset, info = get_dataset_and_info_for_websites([website], evaluate=True)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
# In our setting, we should not apply DDP
eval_sampler = SequentialSampler(dataset) if args.local_rank == -1 else DistributedSampler(dataset)
eval_dataloader = DataLoader(dataset,
sampler=eval_sampler,
batch_size=args.eval_batch_size)
# multi-gpu evaluate
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
all_logits = []
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'token_type_ids': batch[2],
'xpath_tags_seq': batch[3],
'xpath_subs_seq': batch[4],
}
outputs = model(**inputs)
logits = outputs["logits"] # which is (bs,seq_len,node_type)
all_logits.append(logits.detach().cpu())
all_probs = torch.softmax(torch.cat(all_logits, dim=0), dim=2) # (all_samples, seq_len, node_type)
assert len(all_probs) == len(info)
all_res = {}
for sub_prob, sub_info in zip(all_probs, info):
html_path, involved_first_tokens_pos, \
involved_first_tokens_xpaths, involved_first_tokens_types, \
involved_first_tokens_text = sub_info
if html_path not in all_res:
all_res[html_path] = {}
for pos, xpath, type,text in zip(involved_first_tokens_pos, involved_first_tokens_xpaths,
involved_first_tokens_types, involved_first_tokens_text):
pred = sub_prob[pos] # (node_type_size)
if xpath not in all_res[html_path]:
all_res[html_path][xpath] = {}
all_res[html_path][xpath]["pred"] = pred
all_res[html_path][xpath]["truth"] = type
all_res[html_path][xpath]["text"] = text
else:
all_res[html_path][xpath]["pred"] += pred
assert all_res[html_path][xpath]["truth"] == type
assert all_res[html_path][xpath]["text"] == text
# we have build all_res
# then write predictions
lines = []
for html_path in all_res:
for xpath in all_res[html_path]:
final_probs = all_res[html_path][xpath]["pred"] / torch.sum(all_res[html_path][xpath]["pred"])
pred_id = torch.argmax(final_probs).item()
pred_type = constants.ATTRIBUTES_PLUS_NONE[args.vertical][pred_id]
final_probs = final_probs.numpy().tolist()
s = "\t".join([
html_path,
xpath,
all_res[html_path][xpath]["text"],
all_res[html_path][xpath]["truth"],
pred_type,
",".join([str(score) for score in final_probs]),
])
lines.append(s)
res = page_level_constraint(args.vertical, website, lines, sub_output_dir)
return res # (precision, recall, f1)
def evaluate(args, model, test_websites, sub_output_dir, prefix=""):
r"""
Evaluate the model
"""
all_eval_res = {}
all_precision = []
all_recall = []
all_f1 = []
for website in test_websites:
res_on_one_website = eval_on_one_website(args, model, website, sub_output_dir, prefix)
all_precision.append(res_on_one_website[0])
all_recall.append(res_on_one_website[1])
all_f1.append(res_on_one_website[2])
return {"precision": sum(all_precision) / len(all_precision),
"recall": sum(all_recall) / len(all_recall),
"f1": sum(all_f1) / len(all_f1),
}
def load_and_cache_one_website(args, tokenizer, website):
cached_features_file = os.path.join(
args.root_dir,
"cached",
args.vertical,
website,
f"cached_markuplm_{str(args.max_seq_length)}_pages{args.n_pages}_prevnodes{args.prev_nodes_into_account}"
)
if not os.path.exists(os.path.dirname(cached_features_file)):
os.makedirs(os.path.dirname(cached_features_file))
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info(
f"Creating features for {args.vertical}-{website}-pages{args.n_pages}_prevnodes{args.prev_nodes_into_account}")
features = get_swde_features(root_dir=args.root_dir,
vertical=args.vertical,
website=website,
tokenizer=tokenizer,
doc_stride=args.doc_stride,
max_length=args.max_seq_length,
prev_nodes=args.prev_nodes_into_account,
n_pages=args.n_pages)
if args.local_rank in [-1, 0] and args.save_features:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
return features
def load_and_cache_examples(args, tokenizer, websites):
r"""
Load and process the raw data.
"""
# if args.local_rank not in [-1, 0]:
# torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset,
# and the others will use the cache
feature_dicts = {}
for website in websites:
features_per_website = load_and_cache_one_website(args, tokenizer, website)
feature_dicts[website] = features_per_website
return feature_dicts
def get_dataset_and_info_for_websites(websites, evaluate=False):
"""
Args:
websites: a list of websites
Returns:
a dataset object
"""
all_features = []
for website in websites:
features_per_website = global_feature_dicts[website]
all_features += features_per_website
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in all_features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in all_features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in all_features], dtype=torch.long)
all_xpath_tags_seq = torch.tensor([f.xpath_tags_seq for f in all_features], dtype=torch.long)
all_xpath_subs_seq = torch.tensor([f.xpath_subs_seq for f in all_features], dtype=torch.long)
if not evaluate:
all_labels = torch.tensor([f.labels for f in all_features], dtype=torch.long)
dataset = SwdeDataset(all_input_ids=all_input_ids,
all_attention_mask=all_attention_mask,
all_token_type_ids=all_token_type_ids,
all_xpath_tags_seq=all_xpath_tags_seq,
all_xpath_subs_seq=all_xpath_subs_seq,
all_labels=all_labels)
info = None
else:
# in evaluation, we do not add labels
dataset = SwdeDataset(all_input_ids=all_input_ids,
all_attention_mask=all_attention_mask,
all_token_type_ids=all_token_type_ids,
all_xpath_tags_seq=all_xpath_tags_seq,
all_xpath_subs_seq=all_xpath_subs_seq)
info = [(f.html_path,
f.involved_first_tokens_pos,
f.involved_first_tokens_xpaths,
f.involved_first_tokens_types,
f.involved_first_tokens_text) for f in all_features]
return dataset, info
def do_something(train_websites, test_websites, args, config, tokenizer):
# before each run, we reset the seed
set_seed(args)
model = MarkupLMForTokenClassification.from_pretrained(args.model_name_or_path, config=config)
model.resize_token_embeddings(len(tokenizer))
sub_output_dir = os.path.join(args.output_dir,
args.vertical,
f"seed-{args.n_seed}_pages-{args.n_pages}",
"-".join(train_websites))
# if args.local_rank == 0:
# torch.distributed.barrier()
# Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Before we do anything with models, we want to ensure that we get fp16 execution of torch.einsum if args.fp16 is
# set. Otherwise it'll default to "promote" mode, and we'll get fp32 operations. Note that running
# `--fp16_opt_level="O2"` will remove the need for this code, but it is still valid.
if args.fp16:
try:
import apex
apex.amp.register_half_function(torch, 'einsum')
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
# Training
if args.do_train:
train_dataset, _ = get_dataset_and_info_for_websites(train_websites)
tokenizer.save_pretrained(sub_output_dir)
model.to(args.device)
global_step, tr_loss = train(args, train_dataset, model, tokenizer, sub_output_dir)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Save the trained model and the tokenizer
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Create output directory if needed
if not os.path.exists(sub_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(sub_output_dir)
logger.info("Saving model checkpoint to %s", sub_output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
# Take care of distributed/parallel training
model_to_save = model.module if hasattr(model, 'module') else model
model_to_save.save_pretrained(sub_output_dir)
tokenizer.save_pretrained(sub_output_dir)
torch.save(args, os.path.join(sub_output_dir, 'training_args.bin'))
# Evaluation - we can ask to evaluate all the checkpoints (sub-directories) in a directory
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
checkpoints = [sub_output_dir]
if args.eval_all_checkpoints:
checkpoints = list(
os.path.dirname(c) for c in sorted(glob.glob(sub_output_dir + '/**/' + WEIGHTS_NAME, recursive=True)))
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce model loading logs
logger.info("Evaluate the following checkpoints: %s", checkpoints)
config = MarkupLMConfig.from_pretrained(sub_output_dir)
tokenizer = MarkupLMTokenizer.from_pretrained(sub_output_dir)
for checkpoint in checkpoints:
# Reload the model
global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else ""
try:
int(global_step)
except ValueError:
global_step = ""
if global_step and int(global_step) < args.eval_from_checkpoint:
continue
if global_step and args.eval_to_checkpoint is not None and int(global_step) >= args.eval_to_checkpoint:
continue
model = MarkupLMForTokenClassification.from_pretrained(checkpoint, config=config)
model.to(args.device)
# Evaluate
result = evaluate(args, model, test_websites, sub_output_dir, prefix=global_step)
result = dict((k + ('_{}'.format(global_step) if global_step else ''), v) for k, v in result.items())
results.update(result)
logger.info("Results: {}".format(results))
return results
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--root_dir", default=None, type=str, required=True,
help="the root directory of the pre-processed SWDE dataset, "
"in which we have `book-abebooks-2000.pickle` files like that")
parser.add_argument("--vertical", default="book", type=str,
help="Which vertical to train and test"
"Now we haven't supported multi-verticals in one program")
parser.add_argument("--n_seed", default=2, type=int,
help="number of seed pages")
parser.add_argument("--n_pages", default=2000, type=int,
help="number of pages in each website, set a small number for debugging")
parser.add_argument("--prev_nodes_into_account", default=4, type=int,
help="how many previous nodes before a variable nodes will we use"
"large value means more context")
parser.add_argument("--model_name_or_path", default=None, type=str, required=True,
help="Path to pretrained model or model identifier from huggingface.co/models")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model checkpoints and predictions will be written.")
# Other parameters
parser.add_argument("--config_name", default="", type=str,
help="Pretrained config name or path if not the same as model_name")
parser.add_argument("--tokenizer_name", default="", type=str,
help="Pretrained tokenizer name or path if not the same as model_name")
parser.add_argument("--cache_dir", default=None, type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument("--max_seq_length", default=384, type=int,
help="The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded.")
parser.add_argument("--doc_stride", default=128, type=int,
help="When splitting up a long document into chunks, how much stride to take between chunks.")
parser.add_argument("--verbose_logging", action='store_true',
help="If true, all of the warnings related to data processing will be printed. "
"A number of warnings are expected for a normal SQuAD evaluation.")
parser.add_argument("--do_train", action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--evaluate_during_training", action='store_true',
help="Run evaluation during training at each logging step.")
parser.add_argument("--eval_all_checkpoints", action='store_true',
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending "
"with step number")
parser.add_argument('--eval_from_checkpoint', type=int, default=0,
help="Only evaluate the checkpoints with prefix larger than or equal to it, beside the final "
"checkpoint with no prefix")
parser.add_argument('--eval_to_checkpoint', type=int, default=None,
help="Only evaluate the checkpoints with prefix smaller than it, beside the final checkpoint "
"with no prefix")
parser.add_argument("--per_gpu_train_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument("--learning_rate", default=1e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--weight_decay", default=0.0, type=float,
help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--num_train_epochs", default=3.0, type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--max_steps", default=-1, type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
parser.add_argument("--warmup_ratio", default=0.0, type=float,
help="Linear warmup ratio over all steps")
parser.add_argument('--logging_steps', type=int, default=10,
help="Log every X updates steps.")
parser.add_argument('--save_steps', type=int, default=3000,
help="Save checkpoint every X updates steps.")
parser.add_argument("--no_cuda", action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument('--overwrite_output_dir', action='store_true',
help="Overwrite the content of the output directory")
parser.add_argument('--overwrite_cache', action='store_true',
help="Overwrite the cached training and evaluation sets")
parser.add_argument('--save_features', type=bool, default=True,
help="whether or not to save the processed features, default is True")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument("--local_rank", type=int, default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument('--server_ip', type=str, default='', help="Can be used for distant debugging.")
parser.add_argument('--server_port', type=str, default='', help="Can be used for distant debugging.")
args = parser.parse_args()
if os.path.exists(args.output_dir) and os.listdir(
args.output_dir) and args.do_train and not args.overwrite_output_dir:
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir))
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count() if not args.no_cuda else 0
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
# Set seed
set_seed(args)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier()
# Make sure only the first process in distributed training will download model & vocab
config = MarkupLMConfig.from_pretrained(args.model_name_or_path)
config_dict = config.to_dict()
config_dict.update({"node_type_size": len(constants.ATTRIBUTES_PLUS_NONE[args.vertical])})
config = MarkupLMConfig.from_dict(config_dict)
tokenizer = MarkupLMTokenizer.from_pretrained(args.model_name_or_path)
# first we load the features
feature_dicts = load_and_cache_examples(args=args,
tokenizer=tokenizer,
websites=constants.VERTICAL_WEBSITES[args.vertical],
)
global global_feature_dicts
global_feature_dicts = feature_dicts
all_precision = []
all_recall = []
all_f1 = []
for i in range(10):
wid_start = i
wid_end = i + args.n_seed
train_websites = []
test_websites = []
for wid in range(wid_start, wid_end):
wwid = wid % 10
train_websites.append(constants.VERTICAL_WEBSITES[args.vertical][wwid])
for website in constants.VERTICAL_WEBSITES[args.vertical]:
if website not in train_websites:
test_websites.append(website)
ori_config = copy.deepcopy(config)
ori_tokenizer = copy.deepcopy(tokenizer)
eval_res = do_something(train_websites, test_websites, args, config, tokenizer)
all_precision.append(eval_res["precision"])
all_recall.append(eval_res["recall"])
all_f1.append(eval_res["f1"])
config = ori_config
tokenizer = ori_tokenizer
p = sum(all_precision) / len(all_precision)
r = sum(all_recall) / len(all_recall)
f = sum(all_f1) / len(all_f1)
logger.info("=================FINAL RESULTS=================")
logger.info(f"Precision : {p}")
logger.info(f"Recall : {r}")
logger.info(f"F1 : {f}")
res_file = os.path.join(args.output_dir, f"{args.vertical}-all-10-runs-score.txt")
with open(res_file, "w") as fio:
fio.write(f"Precision : {p}\nRecall : {r}\nF1 : {f}\n")
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/markuplm/examples/fine_tuning/run_swde/run.py |
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Extracting XPaths of the values of all fields for SWDE dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import pickle
import random
import re
import sys
import unicodedata
from absl import app
from absl import flags
import lxml
from lxml import etree
from lxml.html.clean import Cleaner
from tqdm import tqdm
import constants
import multiprocessing as mp
FLAGS = flags.FLAGS
random.seed(42)
flags.DEFINE_integer("n_pages", 2000, "The maximum number of pages to read.")
flags.DEFINE_string(
"input_groundtruth_path", "",
"The root path to parent folder of all ground truth files.")
flags.DEFINE_string("input_pickle_path", "",
"The root path to pickle file of swde html content.")
flags.DEFINE_string(
"output_data_path", "",
"The path of the output file containing both the input sequences and "
"output sequences of the sequence tagging version of swde dataset.")
def clean_spaces(text):
r"""Clean extra spaces in a string.
Example:
input: " asd qwe " --> output: "asd qwe"
input: " asd\t qwe " --> output: "asd qwe"
Args:
text: the input string with potentially extra spaces.
Returns:
a string containing only the necessary spaces.
"""
return " ".join(re.split(r"\s+", text.strip()))
def clean_format_str(text):
"""Cleans unicode control symbols, non-ascii chars, and extra blanks."""
text = "".join(ch for ch in text if unicodedata.category(ch)[0] != "C")
text = "".join([c if ord(c) < 128 else "" for c in text])
text = clean_spaces(text)
return text
def non_ascii_equal(website, field, value, node_text):
"""Compares value and node_text by their non-ascii texts.
Website/field are used for handling special cases.
Args:
website: the website that the value belongs to, used for dealing with
special cases.
field: the field that the value belongs to, used for dealing with special
cases.
value: the value string that we want to compare.
node_text: the clean text of the node that we want to compare.
Returns:
a boolean variable indicating if the value and node_text are equal.
"""
value = clean_format_str(value)
node_text = clean_format_str(node_text)
# A special case in the ALLMOVIE website's MPAA_RATING,
# the truth values are not complete but only the first character.
# For example, truth value in the file:"P", which should be "PG13" in htmls.
# Note that the length of the truth should be less than 5.
if website == "allmovie" and field == "mpaa_rating" and len(node_text) <= 5:
return node_text.strip().startswith(value.strip())
# A special case in the AMCTV website, DIRECTOR field.
# The name are not complete in the truth values.
# E.g. truth value in files, "Roy Hill" and real value: "Geogre Roy Hill".
if website == "amctv" and field == "director":
return node_text.strip().endswith(value.strip())
return value.strip() == node_text.strip()
def match_value_node(node, node_text, current_xpath_data, overall_xpath_dict,
text_part_flag, groundtruth_value, matched_xpaths, website,
field, dom_tree, current_page_nodes_in_order, is_truth_value_list):
"""Matches the ground truth value with a specific node in the domtree.
In the function, the current_xpath_data, overall_xpath_dict, matched_xpaths
will be updated.
Args:
is_truth_value_list: [], indicate which node is the truth-value
current_page_nodes_in_order: [(text, xpath)] seq
node: the node on the domtree that we are going to match.
node_text: the text inside this node.
current_xpath_data: the dictionary of the xpaths of the current domtree.
overall_xpath_dict: the dictionary of the xpaths of the current website.
text_part_flag: to match the "text" or the "tail" part of the node.
groundtruth_value: the value of our interest to match.
matched_xpaths: the existing matched xpaths list for this value on domtree.
website: the website where the value is from.
field: the field where the value is from.
dom_tree: the current domtree object, used for getting paths.
"""
assert text_part_flag in ["node_text", "node_tail_text"]
# Dealing with the cases with multiple <br>s in the node text,
# where we need to split and create new tags of matched_xpaths.
# For example, "<div><span>asd<br/>qwe</span></div>"
len_brs = len(node_text.split("--BRRB--")) # The number of the <br>s.
for index, etext in enumerate(node_text.split("--BRRB--")):
if text_part_flag == "node_text":
xpath = dom_tree.getpath(node)
elif text_part_flag == "node_tail_text":
xpath = dom_tree.getpath(node) + "/tail"
if len_brs >= 2:
xpath += "/br[%d]" % (index + 1) # E.g. /div/span/br[1]
clean_etext = clean_spaces(etext)
# Update the dictionary.
current_xpath_data[xpath] = clean_etext
overall_xpath_dict[xpath].add(clean_etext)
current_page_nodes_in_order.append((clean_etext, xpath))
# Exactly match the text.
if non_ascii_equal(website, field, groundtruth_value, clean_etext):
matched_xpaths.append(xpath)
is_truth_value_list.append(len(current_page_nodes_in_order) - 1)
# 这里我们更新三样东西
# 如果当前节点与truth_value一致,则将当前xpath加入matched_xpaths
# 此外,还需要 current_xpath_data[xpath] = clean_etext,即记录当前页面 该xpath对应的文字
# 以及 overall_xpath_dict[xpath].add(clean_etext),即记录当前网址上该xpath对应的文字,以add加入集合
def get_value_xpaths(dom_tree,
truth_value,
overall_xpath_dict,
website="",
field=""):
"""Gets a list of xpaths that contain a text truth_value in DOMTree objects.
Args:
dom_tree: the DOMTree object of a specific HTML page.
truth_value: a certain groundtruth value.
overall_xpath_dict: a dict maintaining all xpaths data of a website.
website: the website name.
field: the field name.
Returns:
xpaths: a list of xpaths containing the truth_value exactly as inner texts.
current_xpath_data: the xpaths and corresponding values in this DOMTree.
"""
if not truth_value:
# Some values are empty strings, that are not in the DOMTree.
return []
xpaths = [] # The resulting list of xpaths to be returned.
current_xpath_data = dict() # The resulting dictionary to save all page data.
current_page_nodes_in_order = []
is_truth_value_list = []
# Some values contains HTML tags and special strings like " "
# So we need to escape the HTML by parsing and then extract the inner text.
value_dom = lxml.html.fromstring(truth_value)
value = " ".join(etree.XPath("//text()")(value_dom))
value = clean_spaces(value)
# Iterate all the nodes in the given DOMTree.
for e in dom_tree.iter():
# The value can only be matched in the text of the node or the tail.
if e.text:
match_value_node(
e,
e.text,
current_xpath_data,
overall_xpath_dict,
text_part_flag="node_text",
groundtruth_value=value,
matched_xpaths=xpaths,
website=website,
field=field,
dom_tree=dom_tree,
current_page_nodes_in_order=current_page_nodes_in_order,
is_truth_value_list=is_truth_value_list
)
if e.tail:
match_value_node(
e,
e.tail,
current_xpath_data,
overall_xpath_dict,
text_part_flag="node_tail_text",
groundtruth_value=value,
matched_xpaths=xpaths,
website=website,
field=field,
dom_tree=dom_tree,
current_page_nodes_in_order=current_page_nodes_in_order,
is_truth_value_list=is_truth_value_list
)
return xpaths, current_xpath_data, current_page_nodes_in_order, is_truth_value_list
def get_dom_tree(html, website):
"""Parses a HTML string to a DOMTree.
We preprocess the html string and use lxml lib to get a tree structure object.
Args:
html: the string of the HTML document.
website: the website name for dealing with special cases.
Returns:
A parsed DOMTree object using lxml library.
"""
cleaner = Cleaner()
cleaner.javascript = True
cleaner.style = True
cleaner.page_structure = False
html = html.replace("\0", "") # Delete NULL bytes.
# Replace the <br> tags with a special token for post-processing the xpaths.
html = html.replace("<br>", "--BRRB--")
html = html.replace("<br/>", "--BRRB--")
html = html.replace("<br />", "--BRRB--")
html = html.replace("<BR>", "--BRRB--")
html = html.replace("<BR/>", "--BRRB--")
html = html.replace("<BR />", "--BRRB--")
# A special case in this website, where the values are inside the comments.
if website == "careerbuilder":
html = html.replace("<!--<tr>", "<tr>")
html = html.replace("<!-- <tr>", "<tr>")
html = html.replace("<!-- <tr>", "<tr>")
html = html.replace("<!-- <tr>", "<tr>")
html = html.replace("</tr>-->", "</tr>")
html = clean_format_str(html)
x = lxml.html.fromstring(html)
etree_root = cleaner.clean_html(x)
dom_tree = etree.ElementTree(etree_root)
return dom_tree
def load_html_and_groundtruth(vertical_to_load, website_to_load):
"""
DONE READ!
"""
# example is `book` and `abebooks`
"""Loads and returns the html sting and ground turth data as a dictionary."""
all_data_dict = collections.defaultdict(dict)
vertical_to_websites_map = constants.VERTICAL_WEBSITES
gt_path = FLAGS.input_groundtruth_path
"""
First build groudtruth dict
"""
for v in vertical_to_websites_map:
if v != vertical_to_load: continue
for truthfile in os.listdir(os.path.join(gt_path, v)):
# For example, a groundtruth file name can be "auto-yahoo-price.txt".
vertical, website, field = truthfile.replace(".txt", "").split("-")
# like book , amazon , isbn_13
if website != website_to_load:
continue
with open(os.path.join(gt_path, v, truthfile), "r") as gfo:
lines = gfo.readlines()
for line in lines[2:]:
# Each line should contains more than 3 elements splitted by \t
# which are: index, number of values, value1, value2, etc.
item = line.strip().split("\t")
index = item[0] # like 0123
num_values = int(item[1]) # Can be 0 (when item[2] is "<NULL>").
all_data_dict[index]["field-" + field] = dict(values=item[2:2 + num_values])
# {"0123":
# {"field-engine":
# {"values":["engine A","engine B"]},
# "field-price":
# }
# }
"""
this is an example for book-abebooks-0000.htm
<-- all_data_dict["0000"] -->
{
'field-publication_date': {'values': ['2008']},
'field-author': {'values': ['Howard Zinn', 'Paul Buhle', 'Mike Konopacki']},
'field-title': {'values': ["A People's History of American Empire"]},
'field-publisher': {'values': ['Metropolitan Books']},
'field-isbn_13': {'values': ['9780805087444']}
}
"""
print("Reading the pickle of SWDE original dataset.....", file=sys.stderr)
with open(FLAGS.input_pickle_path, "rb") as gfo:
swde_html_data = pickle.load(gfo)
# {"vertical":'book',"website":'book-amazon(2000)',"path:'book/book-amazon(2000)/0000.htm',"html_str":xx} here
for page in tqdm(swde_html_data, desc="Loading HTML data"):
vertical = page["vertical"]
website = page["website"]
website = website[website.find("-") + 1:website.find("(")]
if vertical != vertical_to_load or website != website_to_load:
continue
path = page["path"] # For example, auto/auto-aol(2000)/0000.htm
html_str = page["html_str"]
_, _, index = path.split("/") # website be like auto-aol(2000)
index = index.replace(".htm", "")
all_data_dict[index]["html_str"] = html_str
all_data_dict[index]["path"] = path
"""
this is an example for book-abebooks-0000.htm
<-- all_data_dict["0000"] -->
{
'field-publication_date': {'values': ['2008']},
'field-author': {'values': ['Howard Zinn', 'Paul Buhle', 'Mike Konopacki']},
'field-title': {'values': ["A People's History of American Empire"]},
'field-publisher': {'values': ['Metropolitan Books']},
'field-isbn_13': {'values': ['9780805087444']},
'path': 'book/book-abebooks(2000)/0000.htm',
'html_str': omitted,
}
"""
# all_data_dict here has all the pages
# however, only those in swde.pickle has the newly-appended 'path' and 'html_str'
return all_data_dict
def get_field_xpaths(all_data_dict,
vertical_to_process,
website_to_process,
n_pages,
max_variable_nodes_per_website):
"""Gets xpaths data for each page in the data dictionary.
Args:
all_data_dict: the dictionary saving both the html content and the truth.
vertical_to_process: the vertical that we are working on;
website_to_process: the website that we are working on.
n_pages: we will work on the first n_pages number of the all pages.
max_variable_nodes_per_website: top N frequent variable nodes as the final set.
"""
# Saving the xpath info of the whole website,
# - Key is a xpath.
# - Value is a set of text appeared before inside the node.
overall_xpath_dict = collections.defaultdict(set)
# Update page data with groundtruth xpaths and the overall xpath-value dict.
for index in tqdm(all_data_dict, desc="Processing %s" % website_to_process, total=n_pages):
if int(index) >= n_pages:
continue
# We add dom-tree attributes for the first n_pages
page_data = all_data_dict[index]
html = page_data["html_str"]
dom_tree = get_dom_tree(html, website=website_to_process)
page_data["dom_tree"] = dom_tree
# Match values of each field for the current page.
for field in page_data:
if not field.startswith("field-"):
continue
# Saving the xpaths of the values for each field.
page_data[field]["groundtruth_xpaths"] = set()
page_data[field]["is_truth_value_list"] = set()
for value in page_data[field]["values"]:
xpaths, current_xpath_data, current_page_nodes_in_order, is_truth_value_list = \
get_value_xpaths(dom_tree,
value,
overall_xpath_dict,
website_to_process,
field[6:])
# Assert each truth value can be founded in >=1 nodes.
assert len(xpaths) >= 1, \
"%s;\t%s;\t%s;\t%s; is not found" % (website_to_process, field, index, value)
# Update the page-level xpath information.
page_data[field]["groundtruth_xpaths"].update(xpaths)
page_data[field]["is_truth_value_list"].update(is_truth_value_list)
# now for each page_data
# an example
# page_data["field-author"] =
# {
# 'values': ['Dave Kemper', 'Patrick Sebranek', 'Verne Meyer'],
# 'groundtruth_xpaths':
# {'/html/body/div[2]/div[2]/div[2]/div[1]/h3/a[3]',
# '/html/body/div[2]/div[2]/div[2]/div[1]/h3/a[2]',
# '/html/body/div[2]/div[2]/div[2]/div[1]/h3/a[1]',
# '/html/body/div[2]/div[2]/div[3]/div[3]/p/a'}
# }
page_data["xpath_data"] = current_xpath_data #
page_data["doc_strings"] = current_page_nodes_in_order # [(text, xpath)*N]
# page_data["reversed_doc_strings_ids"] = {v[0]: i for i, v in enumerate(current_page_nodes_in_order)}
# page_data["doc_strings"] is the basis of our transformers-based method!!!
# Define the fixed-text nodes and variable nodes.
fixed_nodes = set()
variable_nodes = set()
# 这里对这个网址上的所有xpath进行排序
# 以对应的不同文本数目倒序排列
node_variability = sorted(
[(xpath, len(text_set)) for xpath, text_set in overall_xpath_dict.items()],
key=lambda x: x[1],
reverse=True
)
for xpath, variability in node_variability:
# variability 为xpath的可变性
if variability > 5 and len(variable_nodes) < max_variable_nodes_per_website:
variable_nodes.add(xpath)
else:
fixed_nodes.add(xpath)
print("Vertical: %s; Website: %s; fixed_nodes: %d; variable_nodes: %d" %
(
vertical_to_process, website_to_process, len(fixed_nodes), len(variable_nodes)
)
)
assure_value_variable(all_data_dict, variable_nodes, fixed_nodes, n_pages)
all_data_dict["fixed_nodes"] = list(fixed_nodes)
all_data_dict["variable_nodes"] = list(variable_nodes)
# 总之到这为止
# fixed_nodes包含的就是固定的node
# variable_nodes包含的就是值会变化的node
# 并且我们保证truth_value必定在variable nodes中
# now page_data has the `doc_strings` attributes
# and each field has the `is_truth_value_list` attributes
# all_data_dict has the following attributes
# "0000" ~ "1999" is the infomation for each page
# "fixed_nodes" are the xpaths for nodes that cannot have truth-value
# "variable_nodes" are the xpaths for nodes that might have truth-value
return
def assure_value_variable(all_data_dict, variable_nodes, fixed_nodes, n_pages):
"""Makes sure all values are in the variable nodes by updating sets.
Args:
all_data_dict: the dictionary saving all data with groundtruth.
variable_nodes: the current set of variable nodes.
fixed_nodes: the current set of fixed nodes.
n_pages: to assume we only process first n_pages pages from each website.
"""
for index in all_data_dict:
if not index.isdigit() or int(index) >= n_pages:
# the key should be an integer, to exclude "fixed/variable nodes" entries.
# n_pages to stop for only process part of the website.
continue
for field in all_data_dict[index]:
if not field.startswith("field-"):
continue
xpaths = all_data_dict[index][field]["groundtruth_xpaths"]
if not xpaths: # There are zero value for this field in this page.
continue
flag = False
for xpath in xpaths:
if flag: # The value's xpath is in the variable_nodes.
break
flag = xpath in variable_nodes
variable_nodes.update(xpaths) # Add new xpaths if they are not in.
fixed_nodes.difference_update(xpaths)
def generate_nodes_seq_and_write_to_file(compressed_args):
"""Extracts all the xpaths and labels the nodes for all the pages."""
vertical, website = compressed_args
all_data_dict = load_html_and_groundtruth(vertical, website)
get_field_xpaths(
all_data_dict,
vertical_to_process=vertical,
website_to_process=website,
n_pages=2000,
max_variable_nodes_per_website=300
)
"""
keys to the following example --->
example = all_data_dict["0000"]
dict_keys([
'field-publication_date',
'field-author',
'field-title',
'field-publisher',
'field-isbn_13',
'html_str',
'path',
'dom_tree',
'xpath_data'
])
"""
variable_nodes = all_data_dict["variable_nodes"]
cleaned_features_for_this_website = {}
for index in all_data_dict:
if not index.isdigit():
# Skip the cases when index is actually the "fixed/variable_nodes" keys.
continue
if int(index) >= FLAGS.n_pages:
break
page_data = all_data_dict[index]
assert "xpath_data" in page_data
doc_strings = page_data["doc_strings"]
new_doc_strings = []
field_info = {}
for field in page_data:
if not field.startswith("field-"):
continue
for doc_string_id in page_data[field]["is_truth_value_list"]:
field_info[doc_string_id] = field[6:]
for id, doc_string in enumerate(doc_strings):
text, xpath = doc_string
is_variable = xpath in variable_nodes
if not is_variable:
new_doc_strings.append((text, xpath, "fixed-node"))
else:
# for variable nodes,we need to give them labels
gt_field = field_info.get(id, "none")
new_doc_strings.append((text, xpath, gt_field))
cleaned_features_for_this_website[index] = new_doc_strings
output_file_path = os.path.join(FLAGS.output_data_path, f"{vertical}-{website}-{FLAGS.n_pages}.pickle")
print(f"Writing the processed first {FLAGS.n_pages} pages of {vertical}-{website} into {output_file_path}")
with open(output_file_path, "wb") as f:
pickle.dump(cleaned_features_for_this_website, f)
def main(_):
if not os.path.exists(FLAGS.output_data_path):
os.makedirs(FLAGS.output_data_path)
args_list = []
vertical_to_websites_map = constants.VERTICAL_WEBSITES
verticals = vertical_to_websites_map.keys()
for vertical in verticals:
websites = vertical_to_websites_map[vertical]
for website in websites:
args_list.append((vertical, website))
num_cores = int(mp.cpu_count()/2)
with mp.Pool(num_cores) as pool, tqdm(total=len(args_list), desc="Processing swde-data") as t:
for res in pool.imap_unordered(generate_nodes_seq_and_write_to_file, args_list):
t.update()
if __name__ == "__main__":
app.run(main)
| EXA-1-master | exa/models/unilm-master/markuplm/examples/fine_tuning/run_swde/prepare_data.py |
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# DONE READ!
#
r"""To pack all the swde html page files into a single pickle file.
This script is to generate a single file to pack up all the content of htmls.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import pickle
import sys
from absl import app
from absl import flags
import tqdm
import constants
FLAGS = flags.FLAGS
# Flags related to input data.
flags.DEFINE_string("input_swde_path", "",
"The root path to swde html page files.")
flags.DEFINE_string("output_pack_path", "",
"The file path to save the packed data.")
flags.DEFINE_integer("first_n_pages", -1,
"The cut-off number to shorten the number of pages.")
def pack_swde_data(swde_path, pack_path, cut_off):
"""Packs the swde dataset to a single file.
Args:
swde_path: The path to SWDE dataset pages (http://shortn/_g22KuARPAi).
pack_path: The path to save packed SWDE dataset file.
cut_off: To shorten the list for testing.
Returns:
None
"""
# Get all website names for each vertical.
# The SWDE dataset fold is structured as follows:
# - swde/ # The root folder.
# - swde/auto/ # A certain vertical.
# - swde/auto/auto-aol(2000)/ # A certain website.
# - swde/auto/auto-aol(2000)/0000.htm # A page.
# Get all vertical names.
vertical_to_websites_map = constants.VERTICAL_WEBSITES
"""
for `auto`, that is --->
[
"msn", "aol", "kbb", "cars", "yahoo", "autoweb", "autobytel",
"automotive", "carquotes", "motortrend"
]
"""
# The data dict initialized with the path of each html file of SWDE.
swde_data = list()
print("Start loading data...")
for v in vertical_to_websites_map:
for w in os.listdir(os.path.join(swde_path, v)):
page_count = 0
filenames = os.listdir(os.path.join(swde_path, v, w))
filenames.sort()
for filename in filenames:
print(os.path.join(swde_path, v, w, filename))
page = dict(vertical=v, website=w, path=os.path.join(v, w, filename))
# path is something like `book/book-amazon(2000)/0000.htm`
swde_data.append(page)
page_count += 1
if cut_off > 0 and page_count == cut_off:
break
# Load the html data.
with tqdm.tqdm(total=len(swde_data), file=sys.stdout) as progressbar:
for page in swde_data:
with open(os.path.join(swde_path, page["path"])) as webpage:
page["html_str"] = webpage.read()
progressbar.set_description("processed")
progressbar.update(1)
# now, the swde_data is a list
# for each page in it
# we have it as
# {"vertical":'book',"website":'book-amazon(2000)',"path:'book/book-amazon(2000)/0000.htm',"html_str":xx}
# and finally these info are dumped into the swde.pickle file
# Save the html_str data.
with open(pack_path, "wb") as gfo:
pickle.dump(swde_data, gfo)
def main(_):
pack_swde_data(
swde_path=FLAGS.input_swde_path,
pack_path=FLAGS.output_pack_path,
cut_off=FLAGS.first_n_pages)
if __name__ == "__main__":
app.run(main)
| EXA-1-master | exa/models/unilm-master/markuplm/examples/fine_tuning/run_swde/pack_data.py |
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""All the constant varaibales that can be reused in many files.
"""
VERTICAL_WEBSITES = {
"auto": [
"msn", "aol", "kbb", "cars", "yahoo", "autoweb", "autobytel",
"automotive", "carquotes", "motortrend"
],
"book": [
"abebooks", "amazon", "barnesandnoble", "bookdepository",
"booksamillion", "borders", "buy", "christianbook", "deepdiscount",
"waterstones"
],
"camera": [
"amazon", "beachaudio", "buy", "compsource", "ecost", "jr", "newegg",
"onsale", "pcnation", "thenerds"
],
"job": [
"careerbuilder", "dice", "hotjobs", "job", "jobcircle", "jobtarget",
"monster", "nettemps", "rightitjobs", "techcentric"
],
"movie": [
"allmovie", "amctv", "boxofficemojo", "hollywood", "iheartmovies",
"imdb", "metacritic", "msn", "rottentomatoes", "yahoo"
],
"nbaplayer": [
"espn", "fanhouse", "foxsports", "msnca", "nba", "si", "slam",
"usatoday", "wiki", "yahoo"
],
"restaurant": [
"fodors", "frommers", "gayot", "opentable", "pickarestaurant",
"restaurantica", "tripadvisor", "urbanspoon", "usdiners", "zagat"
],
"university": [
"collegeboard", "collegenavigator", "collegeprowler", "collegetoolkit",
"ecampustours", "embark", "matchcollege", "princetonreview",
"studentaid", "usnews"
]
}
ATTRIBUTES = {
"auto": ["model", "price", "engine", "fuel_economy"],
"book": ["title", "author", "isbn_13", "publisher", "publication_date"],
"camera": ["model", "price", "manufacturer"],
"job": ["title", "company", "location", "date_posted"],
"movie": ["title", "director", "genre", "mpaa_rating"],
"nbaplayer": ["name", "team", "height", "weight"],
"restaurant": ["name", "address", "phone", "cuisine"],
"university": ["name", "phone", "website", "type"]
}
ATTRIBUTES_PAD = {
"auto": ["model", "price", "engine", "fuel_economy", "<PAD>"],
"book": ["title", "author", "isbn_13", "publisher", "publication_date"],
"camera": ["model", "price", "manufacturer", "<PAD>", "<PAD>"],
"job": ["title", "company", "location", "date_posted", "<PAD>"],
"movie": ["title", "director", "genre", "mpaa_rating", "<PAD>"],
"nbaplayer": ["name", "team", "height", "weight", "<PAD>"],
"restaurant": ["name", "address", "phone", "cuisine", "<PAD>"],
"university": ["name", "phone", "website", "type", "<PAD>"]
}
ATTRIBUTES_PLUS_NONE = {
"auto": ["engine", "fuel_economy", "model", "none", "price"],
"book": [
"author", "isbn_13", "none", "publication_date", "publisher", "title"
],
"camera": ["manufacturer", "model", "none", "price"],
"job": ["company", "date_posted", "location", "none", "title"],
"movie": ["director", "genre", "mpaa_rating", "none", "title"],
"nbaplayer": ["height", "name", "none", "team", "weight"],
"restaurant": ["address", "cuisine", "name", "none", "phone"],
"university": ["name", "none", "phone", "type", "website"]
}
| EXA-1-master | exa/models/unilm-master/markuplm/examples/fine_tuning/run_swde/constants.py |
import tqdm
from torch.utils.data import Dataset
from markuplmft.data.tag_utils import tags_dict
import pickle
import os
import constants
class SwdeFeature(object):
def __init__(self,
html_path,
input_ids,
token_type_ids,
attention_mask,
xpath_tags_seq,
xpath_subs_seq,
labels,
involved_first_tokens_pos,
involved_first_tokens_xpaths,
involved_first_tokens_types,
involved_first_tokens_text,
):
"""
html_path: indicate which page the feature belongs to
input_ids: RT
token_type_ids: RT
attention_mask: RT
xpath_tags_seq: RT
xpath_subs_seq: RT
labels: RT
involved_first_tokens_pos: a list, indicate the positions of the first-tokens in this feature
involved_first_tokens_xpaths: the xpaths of the first-tokens, used to build dict
involved_first_tokens_types: the types of the first-tokens
involved_first_tokens_text: the text of the first tokens
Note that `involved_xxx` are not fixed-length array, so they shouldn't be sent into our model
They are just used for evaluation
"""
self.html_path = html_path
self.input_ids = input_ids
self.token_type_ids = token_type_ids
self.attention_mask = attention_mask
self.xpath_tags_seq = xpath_tags_seq
self.xpath_subs_seq = xpath_subs_seq
self.labels = labels
self.involved_first_tokens_pos = involved_first_tokens_pos
self.involved_first_tokens_xpaths = involved_first_tokens_xpaths
self.involved_first_tokens_types = involved_first_tokens_types
self.involved_first_tokens_text = involved_first_tokens_text
class SwdeDataset(Dataset):
def __init__(self,
all_input_ids,
all_attention_mask,
all_token_type_ids,
all_xpath_tags_seq,
all_xpath_subs_seq,
all_labels=None,
):
'''
print(type(all_input_ids))
print(type(all_attention_mask))
print(type(all_token_type_ids))
print(type(all_xpath_tags_seq))
print(type(all_xpath_subs_seq))
print(type(all_labels))
raise ValueError
'''
self.tensors = [all_input_ids, all_attention_mask, all_token_type_ids,
all_xpath_tags_seq, all_xpath_subs_seq]
if not all_labels is None:
self.tensors.append(all_labels)
def __len__(self):
return len(self.tensors[0])
def __getitem__(self, index):
return tuple(tensor[index] for tensor in self.tensors)
def process_xpath(xpath: str):
if xpath.endswith("/tail"):
xpath = xpath[:-5]
xpath_tags_seq, xpath_subs_seq = [], []
units = xpath.split("/")
for unit in units:
if not unit:
continue
if '[' not in unit:
xpath_tags_seq.append(tags_dict.get(unit, 215))
xpath_subs_seq.append(0)
else:
xx = unit.split('[')
name = xx[0]
id = int(xx[1][:-1])
xpath_tags_seq.append(tags_dict.get(name, 215))
xpath_subs_seq.append(min(id, 1000))
assert len(xpath_subs_seq) == len(xpath_tags_seq)
if len(xpath_tags_seq) > 50:
xpath_tags_seq = xpath_tags_seq[-50:]
xpath_subs_seq = xpath_subs_seq[-50:]
xpath_tags_seq = xpath_tags_seq + [216] * (50 - len(xpath_tags_seq))
xpath_subs_seq = xpath_subs_seq + [1001] * (50 - len(xpath_subs_seq))
return xpath_tags_seq, xpath_subs_seq
def get_swde_features(root_dir, vertical, website, tokenizer,
doc_stride, max_length, prev_nodes, n_pages):
real_max_token_num = max_length - 2 # for cls and sep
padded_xpath_tags_seq = [216] * 50
padded_xpath_subs_seq = [1001] * 50
filename = os.path.join(root_dir, f"{vertical}-{website}-{n_pages}.pickle")
with open(filename, "rb") as f:
raw_data = pickle.load(f)
features = []
for index in tqdm.tqdm(raw_data, desc=f"Processing {vertical}-{website}-{n_pages} features ..."):
html_path = f"{vertical}-{website}-{index}.htm"
needed_docstrings_id_set = set()
for i in range(len(raw_data[index])):
doc_string_type = raw_data[index][i][2]
if doc_string_type == "fixed-node":
continue
# we take i-3, i-2, i-1 into account
needed_docstrings_id_set.add(i)
used_prev = 0
prev_id = i - 1
while prev_id >= 0 and used_prev < prev_nodes:
if raw_data[index][prev_id][0].strip():
needed_docstrings_id_set.add(prev_id)
used_prev += 1
prev_id -= 1
needed_docstrings_id_list = sorted(list(needed_docstrings_id_set))
all_token_ids_seq = []
all_xpath_tags_seq = []
all_xpath_subs_seq = []
token_to_ori_map_seq = []
all_labels_seq = []
first_token_pos = []
first_token_xpaths = []
first_token_type = []
first_token_text = []
for i, needed_id in enumerate(needed_docstrings_id_list):
text = raw_data[index][needed_id][0]
xpath = raw_data[index][needed_id][1]
type = raw_data[index][needed_id][2]
token_ids = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(text))
xpath_tags_seq, xpath_subs_seq = process_xpath(xpath)
all_token_ids_seq += token_ids
all_xpath_tags_seq += [xpath_tags_seq] * len(token_ids)
all_xpath_subs_seq += [xpath_subs_seq] * len(token_ids)
token_to_ori_map_seq += [i] * len(token_ids)
if type == "fixed-node":
all_labels_seq += [-100] * len(token_ids)
else:
# we always use the first token to predict
first_token_pos.append(len(all_labels_seq))
first_token_type.append(type)
first_token_xpaths.append(xpath)
first_token_text.append(text)
all_labels_seq += [constants.ATTRIBUTES_PLUS_NONE[vertical].index(type)] * len(token_ids)
assert len(all_token_ids_seq) == len(all_xpath_tags_seq)
assert len(all_token_ids_seq) == len(all_xpath_subs_seq)
assert len(all_token_ids_seq) == len(all_labels_seq)
# we have all the pos of variable nodes in all_token_ids_seq
# now we need to assign them into each feature
start_pos = 0
flag = False
curr_first_token_index = 0
while True:
# invloved is [ start_pos , end_pos )
token_type_ids = [0] * max_length # that is always this
end_pos = start_pos + real_max_token_num
# add start_pos ~ end_pos as a feature
splited_token_ids_seq = [tokenizer.cls_token_id] + all_token_ids_seq[start_pos:end_pos] + [
tokenizer.sep_token_id]
splited_xpath_tags_seq = [padded_xpath_tags_seq] + all_xpath_tags_seq[start_pos:end_pos] + [
padded_xpath_tags_seq]
splited_xpath_subs_seq = [padded_xpath_subs_seq] + all_xpath_subs_seq[start_pos:end_pos] + [
padded_xpath_subs_seq]
splited_labels_seq = [-100] + all_labels_seq[start_pos:end_pos] + [-100]
# locate first-tokens in them
involved_first_tokens_pos = []
involved_first_tokens_xpaths = []
involved_first_tokens_types = []
involved_first_tokens_text = []
while curr_first_token_index < len(first_token_pos) \
and end_pos > first_token_pos[curr_first_token_index] >= start_pos:
involved_first_tokens_pos.append(
first_token_pos[curr_first_token_index] - start_pos + 1) # +1 for [cls]
involved_first_tokens_xpaths.append(first_token_xpaths[curr_first_token_index])
involved_first_tokens_types.append(first_token_type[curr_first_token_index])
involved_first_tokens_text.append(first_token_text[curr_first_token_index])
curr_first_token_index += 1
# we abort this feature if no useful node in it
if len(involved_first_tokens_pos) == 0:
break
if end_pos >= len(all_token_ids_seq):
flag = True
# which means we need to pad in this feature
current_len = len(splited_token_ids_seq)
splited_token_ids_seq += [tokenizer.pad_token_id] * (max_length - current_len)
splited_xpath_tags_seq += [padded_xpath_tags_seq] * (max_length - current_len)
splited_xpath_subs_seq += [padded_xpath_subs_seq] * (max_length - current_len)
splited_labels_seq += [-100] * (max_length - current_len)
attention_mask = [1] * current_len + [0] * (max_length - current_len)
else:
# no need to pad, the splited seq is exactly with the length `max_length`
assert len(splited_token_ids_seq) == max_length
attention_mask = [1] * max_length
features.append(
SwdeFeature(
html_path=html_path,
input_ids=splited_token_ids_seq,
token_type_ids=token_type_ids,
attention_mask=attention_mask,
xpath_tags_seq=splited_xpath_tags_seq,
xpath_subs_seq=splited_xpath_subs_seq,
labels=splited_labels_seq,
involved_first_tokens_pos=involved_first_tokens_pos,
involved_first_tokens_xpaths=involved_first_tokens_xpaths,
involved_first_tokens_types=involved_first_tokens_types,
involved_first_tokens_text=involved_first_tokens_text,
)
)
start_pos = end_pos - doc_stride
if flag:
break
return features
| EXA-1-master | exa/models/unilm-master/markuplm/examples/fine_tuning/run_swde/utils.py |
import os
import sys
import constants
def page_hits_level_metric(
vertical,
target_website,
sub_output_dir,
prev_voted_lines
):
"""Evaluates the hit level prediction result with precision/recall/f1."""
all_precisions = []
all_recall = []
all_f1 = []
lines = prev_voted_lines
evaluation_dict = dict()
for line in lines:
items = line.split("\t")
assert len(items) >= 5, items
html_path = items[0]
text = items[2]
truth = items[3] # gt for this node
pred = items[4] # pred-value for this node
if truth not in evaluation_dict and truth != "none":
evaluation_dict[truth] = dict()
if pred not in evaluation_dict and pred != "none":
evaluation_dict[pred] = dict()
if truth != "none":
if html_path not in evaluation_dict[truth]:
evaluation_dict[truth][html_path] = {"truth": set(), "pred": set()}
evaluation_dict[truth][html_path]["truth"].add(text)
if pred != "none":
if html_path not in evaluation_dict[pred]:
evaluation_dict[pred][html_path] = {"truth": set(), "pred": set()}
evaluation_dict[pred][html_path]["pred"].add(text)
metric_str = "tag, num_truth, num_pred, precision, recall, f1\n"
for tag in evaluation_dict:
num_html_pages_with_truth = 0
num_html_pages_with_pred = 0
num_html_pages_with_correct = 0
for html_path in evaluation_dict[tag]:
result = evaluation_dict[tag][html_path]
if result["truth"]:
num_html_pages_with_truth += 1
if result["pred"]:
num_html_pages_with_pred += 1
if result["truth"] & result["pred"]: # 似乎这里是个交集...不能随便乱搞
num_html_pages_with_correct += 1
precision = num_html_pages_with_correct / (
num_html_pages_with_pred + 0.000001)
recall = num_html_pages_with_correct / (
num_html_pages_with_truth + 0.000001)
f1 = 2 * (precision * recall) / (precision + recall + 0.000001)
metric_str += "%s, %d, %d, %.2f, %.2f, %.2f\n" % (
tag, num_html_pages_with_truth, num_html_pages_with_pred, precision,
recall, f1)
all_precisions.append(precision)
all_recall.append(recall)
all_f1.append(f1)
output_path = os.path.join(sub_output_dir, "scores", f"{target_website}-final-scores.txt")
if not os.path.exists(os.path.dirname(output_path)):
os.makedirs(os.path.dirname(output_path))
with open(output_path, "w") as f:
f.write(metric_str)
print(f.name, file=sys.stderr)
print(metric_str, file=sys.stderr)
return sum(all_precisions) / len(all_precisions), sum(all_recall) / len(all_recall), sum(all_f1) / len(all_f1)
def site_level_voting(vertical, target_website, sub_output_dir, prev_voted_lines):
"""Adds the majority voting for the predictions."""
lines = prev_voted_lines
field_xpath_freq_dict = dict()
for line in lines:
items = line.split("\t")
assert len(items) >= 5, items
xpath = items[1]
pred = items[4]
if pred == "none":
continue
if pred not in field_xpath_freq_dict:
field_xpath_freq_dict[pred] = dict()
if xpath not in field_xpath_freq_dict[pred]:
field_xpath_freq_dict[pred][xpath] = 0
field_xpath_freq_dict[pred][xpath] += 1
most_frequent_xpaths = dict() # Site level voting.
for field, xpth_freq in field_xpath_freq_dict.items():
frequent_xpath = sorted(
xpth_freq.items(), key=lambda kv: kv[1], reverse=True)[0][0] # Top 1.
most_frequent_xpaths[field] = frequent_xpath
voted_lines = []
for line in lines:
items = line.split("\t")
assert len(items) >= 5, items
xpath = items[1]
flag = "none"
for field, most_freq_xpath in most_frequent_xpaths.items():
if xpath == most_freq_xpath:
flag = field
if items[4] == "none" and flag != "none":
items[4] = flag
voted_lines.append("\t".join(items))
output_path = os.path.join(sub_output_dir, "preds", f"{target_website}-final-preds.txt")
if not os.path.exists(os.path.dirname(output_path)):
os.makedirs(os.path.dirname(output_path))
with open(output_path, "w") as f:
f.write("\n".join(voted_lines))
return page_hits_level_metric( # re-eval with the voted prediction
vertical,
target_website,
sub_output_dir,
voted_lines
)
def page_level_constraint(vertical, target_website,
lines, sub_output_dir):
"""Takes the top highest prediction for empty field by ranking raw scores."""
"""
In this step, we make sure every node has a prediction
"""
tags = constants.ATTRIBUTES_PLUS_NONE[vertical]
site_field_truth_exist = dict()
page_field_max = dict()
page_field_pred_count = dict()
for line in lines:
items = line.split("\t")
assert len(items) >= 5, items
html_path = items[0]
truth = items[3]
pred = items[4]
if pred != "none":
if pred not in page_field_pred_count:
page_field_pred_count[pred] = 0
page_field_pred_count[pred] += 1
continue
raw_scores = [float(x) for x in items[5].split(",")]
assert len(raw_scores) == len(tags)
site_field_truth_exist[truth] = True
for index, score in enumerate(raw_scores):
if html_path not in page_field_max:
page_field_max[html_path] = {}
if tags[index] not in page_field_max[
html_path] or score >= page_field_max[html_path][tags[index]]:
page_field_max[html_path][tags[index]] = score
print(page_field_pred_count, file=sys.stderr)
voted_lines = []
for line in lines:
items = line.split("\t")
assert len(items) >= 5, items
html_path = items[0]
raw_scores = [float(x) for x in items[5].split(",")]
pred = items[4]
for index, tag in enumerate(tags):
if tag in site_field_truth_exist and tag not in page_field_pred_count:
if pred != "none":
continue
if raw_scores[index] >= page_field_max[html_path][tags[index]] - (1e-3):
items[4] = tag
voted_lines.append("\t".join(items))
return site_level_voting(
vertical, target_website, sub_output_dir, voted_lines)
| EXA-1-master | exa/models/unilm-master/markuplm/examples/fine_tuning/run_swde/eval_utils.py |
from transformers import CONFIG_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, \
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, MODEL_NAMES_MAPPING, TOKENIZER_MAPPING
from transformers.convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS, RobertaConverter
from transformers.file_utils import PRESET_MIRROR_DICT
from .models.markuplm import (
MarkupLMConfig,
MarkupLMTokenizer,
MarkupLMForQuestionAnswering,
MarkupLMForTokenClassification,
MarkupLMTokenizerFast,
)
CONFIG_MAPPING.update(
[
("markuplm", MarkupLMConfig),
]
)
MODEL_NAMES_MAPPING.update([("markuplm", "MarkupLM")])
TOKENIZER_MAPPING.update(
[
(MarkupLMConfig, (MarkupLMTokenizer, MarkupLMTokenizerFast)),
]
)
SLOW_TO_FAST_CONVERTERS.update(
{"MarkupLMTokenizer": RobertaConverter}
)
MODEL_FOR_QUESTION_ANSWERING_MAPPING.update(
[(MarkupLMConfig, MarkupLMForQuestionAnswering)]
)
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.update(
[(MarkupLMConfig, MarkupLMForTokenClassification)]
)
| EXA-1-master | exa/models/unilm-master/markuplm/markuplmft/__init__.py |
EXA-1-master | exa/models/unilm-master/markuplm/markuplmft/models/__init__.py |
|
# coding=utf-8
# Copyright 2018 The Microsoft Research Asia MarkupLM Team Authors and the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch MarkupLM model. """
import math
import os
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss
from transformers.activations import ACT2FN
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward, \
replace_return_docstrings
from transformers.modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
MaskedLMOutput,
TokenClassifierOutput,
QuestionAnsweringModelOutput
)
from transformers.modeling_utils import (
PreTrainedModel,
apply_chunking_to_forward,
find_pruneable_heads_and_indices,
prune_linear_layer,
)
from transformers.utils import logging
from .configuration_markuplm import MarkupLMConfig
from typing import Optional, Union
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "MarkupLMConfig"
_TOKENIZER_FOR_DOC = "MarkupLMTokenizer"
MARKUPLM_PRETRAINED_MODEL_ARCHIVE_LIST = [
"microsoft/markuplm-base",
"microsoft/markuplm-large",
]
MarkupLMLayerNorm = torch.nn.LayerNorm
class XPathEmbeddings(nn.Module):
"""Construct the embddings from xpath -- tag and subscript"""
# we drop tree-id in this version, as its info can be covered by xpath
def __init__(self, config):
super(XPathEmbeddings, self).__init__()
self.max_depth = config.max_depth
self.xpath_unitseq2_embeddings = nn.Linear(
config.xpath_unit_hidden_size * self.max_depth, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.activation = nn.ReLU()
self.xpath_unitseq2_inner = nn.Linear(config.xpath_unit_hidden_size * self.max_depth, 4 * config.hidden_size)
self.inner2emb = nn.Linear(4 * config.hidden_size, config.hidden_size)
self.xpath_tag_sub_embeddings = nn.ModuleList(
[nn.Embedding(config.max_xpath_tag_unit_embeddings, config.xpath_unit_hidden_size) for _ in
range(self.max_depth)])
self.xpath_subs_sub_embeddings = nn.ModuleList(
[nn.Embedding(config.max_xpath_subs_unit_embeddings, config.xpath_unit_hidden_size) for _ in
range(self.max_depth)])
def forward(self,
xpath_tags_seq=None,
xpath_subs_seq=None):
xpath_tags_embeddings = []
xpath_subs_embeddings = []
for i in range(self.max_depth):
xpath_tags_embeddings.append(self.xpath_tag_sub_embeddings[i](xpath_tags_seq[:, :, i]))
xpath_subs_embeddings.append(self.xpath_subs_sub_embeddings[i](xpath_subs_seq[:, :, i]))
xpath_tags_embeddings = torch.cat(xpath_tags_embeddings, dim=-1)
xpath_subs_embeddings = torch.cat(xpath_subs_embeddings, dim=-1)
xpath_embeddings = xpath_tags_embeddings + xpath_subs_embeddings
xpath_embeddings = self.inner2emb(
self.dropout(self.activation(self.xpath_unitseq2_inner(xpath_embeddings))))
return xpath_embeddings
class MarkupLMEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super(MarkupLMEmbeddings, self).__init__()
self.config = config
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.max_depth = config.max_depth
self.xpath_embeddings = XPathEmbeddings(config)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.LayerNorm = MarkupLMLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
self.padding_idx = config.pad_token_id
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
)
def create_position_ids_from_inputs_embeds(self, inputs_embeds):
"""
We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
Args:
inputs_embeds: torch.Tensor
Returns: torch.Tensor
"""
input_shape = inputs_embeds.size()[:-1]
sequence_length = input_shape[1]
position_ids = torch.arange(
self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
)
return position_ids.unsqueeze(0).expand(input_shape)
def forward(
self,
input_ids=None,
xpath_tags_seq=None,
xpath_subs_seq=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
past_key_values_length=0
):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
device = input_ids.device if input_ids is not None else inputs_embeds.device
if position_ids is None:
if input_ids is not None:
# Create the position ids from the input token ids. Any padded tokens remain padded.
position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx,
past_key_values_length)
else:
position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
# xpath seq prepare
if xpath_tags_seq is None:
xpath_tags_seq = 216 * torch.ones(tuple(list(input_shape) + [self.max_depth]), dtype=torch.long,
device=device)
if xpath_subs_seq is None:
xpath_subs_seq = 1001 * torch.ones(tuple(list(input_shape) + [self.max_depth]), dtype=torch.long,
device=device)
# xpath seq prepare
words_embeddings = inputs_embeds
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
xpath_embeddings = self.xpath_embeddings(xpath_tags_seq,
xpath_subs_seq)
embeddings = (
words_embeddings
+ position_embeddings
+ token_type_embeddings
+ xpath_embeddings
)
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
# Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->MarkupLM
class MarkupLMSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertIntermediate
class MarkupLMIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->MarkupLM
class MarkupLMOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertPooler
class MarkupLMPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
# Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->MarkupLM
class MarkupLMPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->MarkupLM
class MarkupLMLMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = MarkupLMPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
# Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->MarkupLM
class MarkupLMOnlyMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = MarkupLMLMPredictionHead(config)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class MarkupLMSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.position_embedding_type = getattr(config, "position_embedding_type", "absolute")
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
self.max_position_embeddings = config.max_position_embeddings
self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size)
self.is_decoder = config.is_decoder
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
mixed_query_layer = self.query(hidden_states)
# If this is instantiated as a cross-attention module, the keys
# and values come from an encoder; the attention mask needs to be
# such that the encoder's padding tokens are not attended to.
is_cross_attention = encoder_hidden_states is not None
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_layer = past_key_value[0]
value_layer = past_key_value[1]
attention_mask = encoder_attention_mask
elif is_cross_attention:
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
attention_mask = encoder_attention_mask
elif past_key_value is not None:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
else:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_layer, value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query":
seq_length = hidden_states.size()[1]
position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1)
position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1)
distance = position_ids_l - position_ids_r
positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1)
positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility
if self.position_embedding_type == "relative_key":
relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores
elif self.position_embedding_type == "relative_key_query":
relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding)
relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding)
attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in MarkupLMModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
if self.is_decoder:
outputs = outputs + (past_key_value,)
return outputs
class MarkupLMAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = MarkupLMSelfAttention(config)
self.output = MarkupLMSelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class MarkupLMLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = MarkupLMAttention(config)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
assert self.is_decoder, f"{self} should be used as a decoder model if cross attention is added"
self.crossattention = MarkupLMAttention(config)
self.intermediate = MarkupLMIntermediate(config)
self.output = MarkupLMOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
past_key_value=self_attn_past_key_value,
)
attention_output = self_attention_outputs[0]
# if decoder, the last output is tuple of self-attn cache
if self.is_decoder:
outputs = self_attention_outputs[1:-1]
present_key_value = self_attention_outputs[-1]
else:
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
cross_attn_present_key_value = None
if self.is_decoder and encoder_hidden_states is not None:
assert hasattr(
self, "crossattention"
), f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
# cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
cross_attn_past_key_value,
output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
# add cross-attn cache to positions 3,4 of present_key_value tuple
cross_attn_present_key_value = cross_attention_outputs[-1]
present_key_value = present_key_value + cross_attn_present_key_value
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
# if decoder, return the attn key/values as the last output
if self.is_decoder:
outputs = outputs + (present_key_value,)
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
class MarkupLMEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([MarkupLMLayer(config) for _ in range(config.num_hidden_layers)])
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
next_decoder_cache = () if use_cache else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warning(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, past_key_value, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
class MarkupLMPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = MarkupLMConfig
pretrained_model_archive_map = MARKUPLM_PRETRAINED_MODEL_ARCHIVE_LIST
base_model_prefix = "markuplm"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, MarkupLMLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, **kwargs):
return super(MarkupLMPreTrainedModel, cls).from_pretrained(
pretrained_model_name_or_path, *model_args, **kwargs
)
MARKUPLM_START_DOCSTRING = r"""
The MarkupLM model was proposed in
----- NOTHING!!!!!! -----
This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config (:class:`~transformers.MarkupLMConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
MARKUPLM_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`transformers.MarkupLMTokenizer`. See
:func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
xpath_tags_seq (:obj:`torch.LongTensor` of shape :obj:`({0}, 50)`, `optional`):
None
xpath_subs_seq (:obj:`torch.LongTensor` of shape :obj:`({0}, 50)`, `optional`):
None
tree_index_seq (:obj:`torch.LongTensor` of shape :obj:`({0}, 50)`, `optional`):
None
attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: ``1`` for
tokens that are NOT MASKED, ``0`` for MASKED tokens.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``: ``0`` corresponds to a `sentence A` token, ``1`` corresponds to a `sentence B` token
`What are token type IDs? <../glossary.html#token-type-ids>`_
position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`_
head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: :obj:`1`
indicates the head is **not masked**, :obj:`0` indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert `input_ids` indices into associated vectors
than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
If set to ``True``, the attentions tensors of all attention layers are returned. See ``attentions`` under
returned tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
If set to ``True``, the hidden states of all layers are returned. See ``hidden_states`` under returned
tensors for more detail.
return_dict (:obj:`bool`, `optional`):
If set to ``True``, the model will return a :class:`~transformers.file_utils.ModelOutput` instead of a
plain tuple.
"""
@add_start_docstrings(
"The bare MarkupLM Model transformer outputting raw hidden-states without any specific head on top.",
MARKUPLM_START_DOCSTRING,
)
class MarkupLMModel(MarkupLMPreTrainedModel):
def __init__(self, config, add_pooling_layer=True):
super(MarkupLMModel, self).__init__(config)
self.config = config
self.embeddings = MarkupLMEmbeddings(config)
self.encoder = MarkupLMEncoder(config)
self.pooler = MarkupLMPooler(config) if add_pooling_layer else None
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_model_forward(MARKUPLM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
xpath_tags_seq=None,
xpath_subs_seq=None,
):
r"""
Returns:
Examples::
No examples now !
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype)
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.to(dtype=next(self.parameters()).dtype)
else:
head_mask = [None] * self.config.num_hidden_layers
embedding_output = self.embeddings(
input_ids=input_ids,
xpath_tags_seq=xpath_tags_seq,
xpath_subs_seq=xpath_subs_seq,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
)
encoder_outputs = self.encoder(
embedding_output,
extended_attention_mask,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
@add_start_docstrings(
"""
MarkupLM Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
MARKUPLM_START_DOCSTRING,
)
class MarkupLMForQuestionAnswering(MarkupLMPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.markuplm = MarkupLMModel(config, add_pooling_layer=False)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
@add_start_docstrings_to_model_forward(MARKUPLM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
start_positions=None,
end_positions=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
xpath_tags_seq=None,
xpath_subs_seq=None,
):
r"""
start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
Returns:
Examples:
No example now !
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.markuplm(
input_ids,
xpath_tags_seq=xpath_tags_seq,
xpath_subs_seq=xpath_subs_seq,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class MarkupLMOnlyTokenClassificationHead(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.decoder = nn.Linear(config.hidden_size, config.node_type_size)
def forward(self, sequence_output):
# sequence_output : (bs,seq_len,dim)
sequence_output_x = self.dense(sequence_output)
sequence_output_x = self.transform_act_fn(sequence_output_x)
sequence_output_x = self.LayerNorm(sequence_output_x)
output_res = self.decoder(sequence_output_x)
# (bs,seq_len,node_type_size) here node_type_size is real+none
return output_res
@add_start_docstrings("""MarkupLM Model with a `token_classification` head on top. """, MARKUPLM_START_DOCSTRING)
class MarkupLMForTokenClassification(MarkupLMPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.markuplm = MarkupLMModel(config, add_pooling_layer=False)
self.token_cls = MarkupLMOnlyTokenClassificationHead(config)
self.init_weights()
@add_start_docstrings_to_model_forward(MARKUPLM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
xpath_tags_seq=None,
xpath_subs_seq=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the token classification loss. Indices should be in ``[-100, 0, ...,
config.node_type_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.node_type_size]``
Returns:
Examples:
No example now !
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.markuplm(
input_ids,
xpath_tags_seq=xpath_tags_seq,
xpath_subs_seq=xpath_subs_seq,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
prediction_scores = self.token_cls(sequence_output) # (bs,seq,node_type_size)
# pred_node_types = torch.argmax(prediction_scores,dim=2) # (bs,seq)
token_cls_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
token_cls_loss = loss_fct(
prediction_scores.view(-1, self.config.node_type_size),
labels.view(-1),
)
if not return_dict:
output = (prediction_scores,) + outputs[2:]
return ((token_cls_loss,) + output) if token_cls_loss is not None else output
return TokenClassifierOutput(
loss=token_cls_loss,
logits=prediction_scores, # (bs,seq,node_type_size)
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0):
"""
Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
are ignored. This is modified from fairseq's `utils.make_positions`.
Args:
x: torch.Tensor x:
Returns: torch.Tensor
"""
# The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
mask = input_ids.ne(padding_idx).int()
incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask
return incremental_indices.long() + padding_idx
| EXA-1-master | exa/models/unilm-master/markuplm/markuplmft/models/markuplm/modeling_markuplm.py |
# coding=utf-8
# Copyright 2018 The Microsoft Research Asia MarkupLM Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tokenization class for model MarkupLM."""
from transformers.utils import logging
from transformers import RobertaTokenizer
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/vocab.json",
"markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/vocab.json",
},
"merges_file": {
"markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/merges.txt",
"markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/merges.txt",
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"markuplm-base": 512,
"markuplm-large": 512,
}
class MarkupLMTokenizer(RobertaTokenizer):
r"""
Constructs a MarkupLM tokenizer.
:class:`~transformers.LayoutLMTokenizer is identical to :class:`~transformers.RobertaTokenizer` and runs end-to-end
tokenization.
Refer to superclass :class:`~transformers.RobertaTokenizer` for usage examples and documentation concerning
parameters.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ["input_ids", "attention_mask"]
| EXA-1-master | exa/models/unilm-master/markuplm/markuplmft/models/markuplm/tokenization_markuplm.py |
# flake8: noqa
# There's no way to ignore "F401 '...' imported but unused" warnings in this
# module, but to preserve other warnings. So, don't check this module at all.
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from transformers.file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from .configuration_markuplm import MARKUPLM_PRETRAINED_CONFIG_ARCHIVE_MAP, MarkupLMConfig
from .tokenization_markuplm import MarkupLMTokenizer
from .modeling_markuplm import (
MARKUPLM_PRETRAINED_MODEL_ARCHIVE_LIST,
MarkupLMForQuestionAnswering,
MarkupLMForTokenClassification,
MarkupLMModel,
)
_import_structure = {
"configuration_markuplm": ["MARKUPLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "MarkupLMConfig"],
"tokenization_markuplm": ["MarkupLMTokenizer"],
}
if is_tokenizers_available():
_import_structure["tokenization_markuplm_fast"] = ["MarkupLMTokenizerFast"]
if is_torch_available():
_import_structure["modeling_markuplm"] = [
"MARKUPLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"MarkupLMForQuestionAnswering",
"MarkupLMForTokenClassification",
"MarkupLMModel",
]
if TYPE_CHECKING:
from .configuration_markuplm import MARKUPLM_PRETRAINED_CONFIG_ARCHIVE_MAP, MarkupLMConfig
from .tokenization_markuplm import MarkupLMTokenizer
if is_tokenizers_available():
from .tokenization_markuplm_fast import MarkupLMTokenizerFast
if is_torch_available():
from .modeling_markuplm import (
MARKUPLM_PRETRAINED_MODEL_ARCHIVE_LIST,
MarkupLMForQuestionAnswering,
MarkupLMForTokenClassification,
MarkupLMModel,
)
else:
import sys
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| EXA-1-master | exa/models/unilm-master/markuplm/markuplmft/models/markuplm/__init__.py |
# coding=utf-8
# Copyright 2010, The Microsoft Research Asia MarkupLM Team authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" MarkupLM model configuration """
from transformers.utils import logging
from transformers.models.roberta.configuration_roberta import RobertaConfig
logger = logging.get_logger(__name__)
MARKUPLM_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json",
}
class MarkupLMConfig(RobertaConfig):
r"""
This is the configuration class to store the configuration of a :class:`~transformers.MarkupLMModel`. It is used to
instantiate a MarkupLM model according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the MarkupLM `markuplm-base-uncased
<https://huggingface.co/microsoft/markuplm-base-uncased>`__ architecture.
Configuration objects inherit from :class:`~transformers.BertConfig` and can be used to control the model outputs.
Read the documentation from :class:`~transformers.BertConfig` for more information.
Args:
vocab_size (:obj:`int`, `optional`, defaults to 30522):
Vocabulary size of the MarkupLM model. Defines the different tokens that can be represented by the
`inputs_ids` passed to the forward method of :class:`~transformers.MarkupLMModel`.
hidden_size (:obj:`int`, `optional`, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
num_hidden_layers (:obj:`int`, `optional`, defaults to 12):
Number of hidden layers in the Transformer encoder.
num_attention_heads (:obj:`int`, `optional`, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
intermediate_size (:obj:`int`, `optional`, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
hidden_act (:obj:`str` or :obj:`function`, `optional`, defaults to :obj:`"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string,
:obj:`"gelu"`, :obj:`"relu"`, :obj:`"silu"` and :obj:`"gelu_new"` are supported.
hidden_dropout_prob (:obj:`float`, `optional`, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob (:obj:`float`, `optional`, defaults to 0.1):
The dropout ratio for the attention probabilities.
max_position_embeddings (:obj:`int`, `optional`, defaults to 512):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
type_vocab_size (:obj:`int`, `optional`, defaults to 2):
The vocabulary size of the :obj:`token_type_ids` passed into :class:`~transformers.MarkupLMModel`.
initializer_range (:obj:`float`, `optional`, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (:obj:`float`, `optional`, defaults to 1e-12):
The epsilon used by the layer normalization layers.
gradient_checkpointing (:obj:`bool`, `optional`, defaults to :obj:`False`):
If True, use gradient checkpointing to save memory at the expense of slower backward pass.
max_tree_id_unit_embeddings (:obj:`int`, `optional`, defaults to 1024):
The maximum value that the tree id unit embedding might ever used.
Typically set this to something large just in case (e.g., 1024).
max_xpath_tag_unit_embeddings (:obj:`int`, `optional`, defaults to 256):
The maximum value that the xpath tag unit embedding might ever used.
Typically set this to something large just in case (e.g., 256).
max_xpath_subs_unit_embeddings (:obj:`int`, `optional`, defaults to 1024):
The maximum value that the xpath subscript unit embedding might ever used.
Typically set this to something large just in case (e.g., 1024).
tree_id_unit_hidden_size (:obj:`int`, `optional`, defaults to 32):
The hidden size of each tree id unit, since we set max_depth as 50,
then we weill have (50*tree_id_unit_hidden_size)-dim for one
complete tree index.
xpath_tag_unit_hidden_size (:obj:`int`, `optional`, defaults to 32):
The hidden size of each tree id unit, since we set max_depth as 50,
then we weill have (50*xpath_tag_unit_hidden_size)-dim for one
complete tree index.
xpath_subs_unit_hidden_size (:obj:`int`, `optional`, defaults to 32):
The hidden size of each tree id unit, since we set max_depth as 50,
then we weill have (50*xpath_subs_unit_hidden_size)-dim for one
complete tree index.
max_depth : maximum depth in xpath
Examples::
no examples~
"""
model_type = "markuplm"
def __init__(
self,
vocab_size=30522,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
pad_token_id=0,
gradient_checkpointing=False,
max_xpath_tag_unit_embeddings=256,
max_xpath_subs_unit_embeddings=1024,
xpath_unit_hidden_size=32,
max_depth=50,
**kwargs
):
super().__init__(
vocab_size=vocab_size,
hidden_size=hidden_size,
num_hidden_layers=num_hidden_layers,
num_attention_heads=num_attention_heads,
intermediate_size=intermediate_size,
hidden_act=hidden_act,
hidden_dropout_prob=hidden_dropout_prob,
attention_probs_dropout_prob=attention_probs_dropout_prob,
max_position_embeddings=max_position_embeddings,
type_vocab_size=type_vocab_size,
initializer_range=initializer_range,
layer_norm_eps=layer_norm_eps,
pad_token_id=pad_token_id,
gradient_checkpointing=gradient_checkpointing,
**kwargs,
)
self.max_depth = max_depth
self.max_xpath_tag_unit_embeddings = max_xpath_tag_unit_embeddings
self.max_xpath_subs_unit_embeddings = max_xpath_subs_unit_embeddings
self.xpath_unit_hidden_size = xpath_unit_hidden_size | EXA-1-master | exa/models/unilm-master/markuplm/markuplmft/models/markuplm/configuration_markuplm.py |
# coding=utf-8
# Copyright 2018 The Microsoft Research Asia MarkupLM Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tokenization class for model MarkupLM."""
from transformers.utils import logging
from transformers.models.roberta.tokenization_roberta_fast import RobertaTokenizerFast
from .tokenization_markuplm import MarkupLMTokenizer
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/vocab.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/vocab.json",
},
"merges_file": {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/merges.txt",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/merges.txt",
},
"tokenizer_file": {
"microsoft/markuplm-base": "https://huggingface.co/microsoft/markuplm-base/resolve/main/tokenizer.json",
"microsoft/markuplm-large": "https://huggingface.co/microsoft/markuplm-large/resolve/main/tokenizer.json",
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"microsoft/markuplm-base": 512,
"microsoft/markuplm-large": 512,
}
class MarkupLMTokenizerFast(RobertaTokenizerFast):
r"""
Constructs a "Fast" MarkupLMTokenizer.
:class:`~transformers.MArkupLMTokenizerFast` is identical to :class:`~transformers.RobertaTokenizerFast` and runs
end-to-end tokenization.
Refer to superclass :class:`~transformers.RobertaTokenizerFast` for usage examples and documentation concerning
parameters.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ["input_ids", "attention_mask"]
slow_tokenizer_class = MarkupLMTokenizer
| EXA-1-master | exa/models/unilm-master/markuplm/markuplmft/models/markuplm/tokenization_markuplm_fast.py |
tags_dict = {'a': 0, 'abbr': 1, 'acronym': 2, 'address': 3, 'altGlyph': 4, 'altGlyphDef': 5, 'altGlyphItem': 6,
'animate': 7, 'animateColor': 8, 'animateMotion': 9, 'animateTransform': 10, 'applet': 11, 'area': 12,
'article': 13, 'aside': 14, 'audio': 15, 'b': 16, 'base': 17, 'basefont': 18, 'bdi': 19, 'bdo': 20,
'bgsound': 21, 'big': 22, 'blink': 23, 'blockquote': 24, 'body': 25, 'br': 26, 'button': 27, 'canvas': 28,
'caption': 29, 'center': 30, 'circle': 31, 'cite': 32, 'clipPath': 33, 'code': 34, 'col': 35,
'colgroup': 36, 'color-profile': 37, 'content': 38, 'cursor': 39, 'data': 40, 'datalist': 41, 'dd': 42,
'defs': 43, 'del': 44, 'desc': 45, 'details': 46, 'dfn': 47, 'dialog': 48, 'dir': 49, 'div': 50, 'dl': 51,
'dt': 52, 'ellipse': 53, 'em': 54, 'embed': 55, 'feBlend': 56, 'feColorMatrix': 57,
'feComponentTransfer': 58, 'feComposite': 59, 'feConvolveMatrix': 60, 'feDiffuseLighting': 61,
'feDisplacementMap': 62, 'feDistantLight': 63, 'feFlood': 64, 'feFuncA': 65, 'feFuncB': 66, 'feFuncG': 67,
'feFuncR': 68, 'feGaussianBlur': 69, 'feImage': 70, 'feMerge': 71, 'feMergeNode': 72, 'feMorphology': 73,
'feOffset': 74, 'fePointLight': 75, 'feSpecularLighting': 76, 'feSpotLight': 77, 'feTile': 78,
'feTurbulence': 79, 'fieldset': 80, 'figcaption': 81, 'figure': 82, 'filter': 83, 'font-face-format': 84,
'font-face-name': 85, 'font-face-src': 86, 'font-face-uri': 87, 'font-face': 88, 'font': 89, 'footer': 90,
'foreignObject': 91, 'form': 92, 'frame': 93, 'frameset': 94, 'g': 95, 'glyph': 96, 'glyphRef': 97,
'h1': 98, 'h2': 99, 'h3': 100, 'h4': 101, 'h5': 102, 'h6': 103, 'head': 104, 'header': 105, 'hgroup': 106,
'hkern': 107, 'hr': 108, 'html': 109, 'i': 110, 'iframe': 111, 'image': 112, 'img': 113, 'input': 114,
'ins': 115, 'kbd': 116, 'keygen': 117, 'label': 118, 'legend': 119, 'li': 120, 'line': 121,
'linearGradient': 122, 'link': 123, 'main': 124, 'map': 125, 'mark': 126, 'marker': 127, 'marquee': 128,
'mask': 129, 'math': 130, 'menu': 131, 'menuitem': 132, 'meta': 133, 'metadata': 134, 'meter': 135,
'missing-glyph': 136, 'mpath': 137, 'nav': 138, 'nobr': 139, 'noembed': 140, 'noframes': 141,
'noscript': 142, 'object': 143, 'ol': 144, 'optgroup': 145, 'option': 146, 'output': 147, 'p': 148,
'param': 149, 'path': 150, 'pattern': 151, 'picture': 152, 'plaintext': 153, 'polygon': 154,
'polyline': 155, 'portal': 156, 'pre': 157, 'progress': 158, 'q': 159, 'radialGradient': 160, 'rb': 161,
'rect': 162, 'rp': 163, 'rt': 164, 'rtc': 165, 'ruby': 166, 's': 167, 'samp': 168, 'script': 169,
'section': 170, 'select': 171, 'set': 172, 'shadow': 173, 'slot': 174, 'small': 175, 'source': 176,
'spacer': 177, 'span': 178, 'stop': 179, 'strike': 180, 'strong': 181, 'style': 182, 'sub': 183,
'summary': 184, 'sup': 185, 'svg': 186, 'switch': 187, 'symbol': 188, 'table': 189, 'tbody': 190,
'td': 191, 'template': 192, 'text': 193, 'textPath': 194, 'textarea': 195, 'tfoot': 196, 'th': 197,
'thead': 198, 'time': 199, 'title': 200, 'tr': 201, 'track': 202, 'tref': 203, 'tspan': 204, 'tt': 205,
'u': 206, 'ul': 207, 'use': 208, 'var': 209, 'video': 210, 'view': 211, 'vkern': 212, 'wbr': 213,
'xmp': 214}
| EXA-1-master | exa/models/unilm-master/markuplm/markuplmft/data/tag_utils.py |
EXA-1-master | exa/models/unilm-master/markuplm/markuplmft/data/__init__.py |
|
#!/usr/bin/env python3
from setuptools import find_packages, setup
setup(
name="layoutlmft",
version="0.1",
author="LayoutLM Team",
url="https://github.com/microsoft/unilm/tree/master/layoutlmft",
packages=find_packages(),
python_requires=">=3.7",
extras_require={"dev": ["flake8", "isort", "black"]},
) | EXA-1-master | exa/models/unilm-master/layoutlmft/setup.py |
import os
import re
import numpy as np
from transformers.utils import logging
logger = logging.get_logger(__name__)
PREFIX_CHECKPOINT_DIR = "checkpoint"
_re_checkpoint = re.compile(r"^" + PREFIX_CHECKPOINT_DIR + r"\-(\d+)$")
def get_last_checkpoint(folder):
content = os.listdir(folder)
checkpoints = [
path
for path in content
if _re_checkpoint.search(path) is not None and os.path.isdir(os.path.join(folder, path))
]
if len(checkpoints) == 0:
return
return os.path.join(folder, max(checkpoints, key=lambda x: int(_re_checkpoint.search(x).groups()[0])))
def re_score(pred_relations, gt_relations, mode="strict"):
"""Evaluate RE predictions
Args:
pred_relations (list) : list of list of predicted relations (several relations in each sentence)
gt_relations (list) : list of list of ground truth relations
rel = { "head": (start_idx (inclusive), end_idx (exclusive)),
"tail": (start_idx (inclusive), end_idx (exclusive)),
"head_type": ent_type,
"tail_type": ent_type,
"type": rel_type}
vocab (Vocab) : dataset vocabulary
mode (str) : in 'strict' or 'boundaries'"""
assert mode in ["strict", "boundaries"]
relation_types = [v for v in [0, 1] if not v == 0]
scores = {rel: {"tp": 0, "fp": 0, "fn": 0} for rel in relation_types + ["ALL"]}
# Count GT relations and Predicted relations
n_sents = len(gt_relations)
n_rels = sum([len([rel for rel in sent]) for sent in gt_relations])
n_found = sum([len([rel for rel in sent]) for sent in pred_relations])
# Count TP, FP and FN per type
for pred_sent, gt_sent in zip(pred_relations, gt_relations):
for rel_type in relation_types:
# strict mode takes argument types into account
if mode == "strict":
pred_rels = {
(rel["head"], rel["head_type"], rel["tail"], rel["tail_type"])
for rel in pred_sent
if rel["type"] == rel_type
}
gt_rels = {
(rel["head"], rel["head_type"], rel["tail"], rel["tail_type"])
for rel in gt_sent
if rel["type"] == rel_type
}
# boundaries mode only takes argument spans into account
elif mode == "boundaries":
pred_rels = {(rel["head"], rel["tail"]) for rel in pred_sent if rel["type"] == rel_type}
gt_rels = {(rel["head"], rel["tail"]) for rel in gt_sent if rel["type"] == rel_type}
scores[rel_type]["tp"] += len(pred_rels & gt_rels)
scores[rel_type]["fp"] += len(pred_rels - gt_rels)
scores[rel_type]["fn"] += len(gt_rels - pred_rels)
# Compute per entity Precision / Recall / F1
for rel_type in scores.keys():
if scores[rel_type]["tp"]:
scores[rel_type]["p"] = scores[rel_type]["tp"] / (scores[rel_type]["fp"] + scores[rel_type]["tp"])
scores[rel_type]["r"] = scores[rel_type]["tp"] / (scores[rel_type]["fn"] + scores[rel_type]["tp"])
else:
scores[rel_type]["p"], scores[rel_type]["r"] = 0, 0
if not scores[rel_type]["p"] + scores[rel_type]["r"] == 0:
scores[rel_type]["f1"] = (
2 * scores[rel_type]["p"] * scores[rel_type]["r"] / (scores[rel_type]["p"] + scores[rel_type]["r"])
)
else:
scores[rel_type]["f1"] = 0
# Compute micro F1 Scores
tp = sum([scores[rel_type]["tp"] for rel_type in relation_types])
fp = sum([scores[rel_type]["fp"] for rel_type in relation_types])
fn = sum([scores[rel_type]["fn"] for rel_type in relation_types])
if tp:
precision = tp / (tp + fp)
recall = tp / (tp + fn)
f1 = 2 * precision * recall / (precision + recall)
else:
precision, recall, f1 = 0, 0, 0
scores["ALL"]["p"] = precision
scores["ALL"]["r"] = recall
scores["ALL"]["f1"] = f1
scores["ALL"]["tp"] = tp
scores["ALL"]["fp"] = fp
scores["ALL"]["fn"] = fn
# Compute Macro F1 Scores
scores["ALL"]["Macro_f1"] = np.mean([scores[ent_type]["f1"] for ent_type in relation_types])
scores["ALL"]["Macro_p"] = np.mean([scores[ent_type]["p"] for ent_type in relation_types])
scores["ALL"]["Macro_r"] = np.mean([scores[ent_type]["r"] for ent_type in relation_types])
logger.info(f"RE Evaluation in *** {mode.upper()} *** mode")
logger.info(
"processed {} sentences with {} relations; found: {} relations; correct: {}.".format(
n_sents, n_rels, n_found, tp
)
)
logger.info(
"\tALL\t TP: {};\tFP: {};\tFN: {}".format(scores["ALL"]["tp"], scores["ALL"]["fp"], scores["ALL"]["fn"])
)
logger.info("\t\t(m avg): precision: {:.2f};\trecall: {:.2f};\tf1: {:.2f} (micro)".format(precision, recall, f1))
logger.info(
"\t\t(M avg): precision: {:.2f};\trecall: {:.2f};\tf1: {:.2f} (Macro)\n".format(
scores["ALL"]["Macro_p"], scores["ALL"]["Macro_r"], scores["ALL"]["Macro_f1"]
)
)
for rel_type in relation_types:
logger.info(
"\t{}: \tTP: {};\tFP: {};\tFN: {};\tprecision: {:.2f};\trecall: {:.2f};\tf1: {:.2f};\t{}".format(
rel_type,
scores[rel_type]["tp"],
scores[rel_type]["fp"],
scores[rel_type]["fn"],
scores[rel_type]["p"],
scores[rel_type]["r"],
scores[rel_type]["f1"],
scores[rel_type]["tp"] + scores[rel_type]["fp"],
)
)
return scores
| EXA-1-master | exa/models/unilm-master/layoutlmft/layoutlmft/evaluation.py |
from collections import OrderedDict
from transformers import CONFIG_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, MODEL_NAMES_MAPPING, TOKENIZER_MAPPING
from transformers.convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS, BertConverter, XLMRobertaConverter
from transformers.models.auto.modeling_auto import auto_class_factory
from .models.layoutlmv2 import (
LayoutLMv2Config,
LayoutLMv2ForRelationExtraction,
LayoutLMv2ForTokenClassification,
LayoutLMv2Tokenizer,
LayoutLMv2TokenizerFast,
)
from .models.layoutxlm import (
LayoutXLMConfig,
LayoutXLMForRelationExtraction,
LayoutXLMForTokenClassification,
LayoutXLMTokenizer,
LayoutXLMTokenizerFast,
)
CONFIG_MAPPING.update([("layoutlmv2", LayoutLMv2Config), ("layoutxlm", LayoutXLMConfig)])
MODEL_NAMES_MAPPING.update([("layoutlmv2", "LayoutLMv2"), ("layoutxlm", "LayoutXLM")])
TOKENIZER_MAPPING.update(
[
(LayoutLMv2Config, (LayoutLMv2Tokenizer, LayoutLMv2TokenizerFast)),
(LayoutXLMConfig, (LayoutXLMTokenizer, LayoutXLMTokenizerFast)),
]
)
SLOW_TO_FAST_CONVERTERS.update({"LayoutLMv2Tokenizer": BertConverter, "LayoutXLMTokenizer": XLMRobertaConverter})
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.update(
[(LayoutLMv2Config, LayoutLMv2ForTokenClassification), (LayoutXLMConfig, LayoutXLMForTokenClassification)]
)
MODEL_FOR_RELATION_EXTRACTION_MAPPING = OrderedDict(
[(LayoutLMv2Config, LayoutLMv2ForRelationExtraction), (LayoutXLMConfig, LayoutXLMForRelationExtraction)]
)
AutoModelForTokenClassification = auto_class_factory(
"AutoModelForTokenClassification", MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, head_doc="token classification"
)
AutoModelForRelationExtraction = auto_class_factory(
"AutoModelForRelationExtraction", MODEL_FOR_RELATION_EXTRACTION_MAPPING, head_doc="relation extraction"
)
| EXA-1-master | exa/models/unilm-master/layoutlmft/layoutlmft/__init__.py |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple
import torch
from transformers.file_utils import ModelOutput
@dataclass
class ReOutput(ModelOutput):
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
entities: Optional[Dict] = None
relations: Optional[Dict] = None
pred_relations: Optional[Dict] = None
| EXA-1-master | exa/models/unilm-master/layoutlmft/layoutlmft/utils.py |
EXA-1-master | exa/models/unilm-master/layoutlmft/layoutlmft/models/__init__.py |
|
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
| EXA-1-master | exa/models/unilm-master/layoutlmft/layoutlmft/models/model_args.py |
# coding=utf-8
from transformers.models.layoutlm.tokenization_layoutlm import LayoutLMTokenizer
from transformers.utils import logging
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"microsoft/layoutlmv2-base-uncased": "https://huggingface.co/microsoft/layoutlmv2-base-uncased/resolve/main/vocab.txt",
"microsoft/layoutlmv2-large-uncased": "https://huggingface.co/microsoft/layoutlmv2-large-uncased/resolve/main/vocab.txt",
}
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"microsoft/layoutlmv2-base-uncased": 512,
"microsoft/layoutlmv2-large-uncased": 512,
}
PRETRAINED_INIT_CONFIGURATION = {
"microsoft/layoutlmv2-base-uncased": {"do_lower_case": True},
"microsoft/layoutlmv2-large-uncased": {"do_lower_case": True},
}
class LayoutLMv2Tokenizer(LayoutLMTokenizer):
r"""
Constructs a LayoutLMv2 tokenizer.
:class:`~transformers.LayoutLMv2Tokenizer is identical to :class:`~transformers.BertTokenizer` and runs end-to-end
tokenization: punctuation splitting + wordpiece.
Refer to superclass :class:`~transformers.BertTokenizer` for usage examples and documentation concerning
parameters.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self, model_max_length=512, **kwargs):
super().__init__(model_max_length=model_max_length, **kwargs)
| EXA-1-master | exa/models/unilm-master/layoutlmft/layoutlmft/models/layoutlmv2/tokenization_layoutlmv2.py |
from .configuration_layoutlmv2 import LayoutLMv2Config
from .modeling_layoutlmv2 import LayoutLMv2ForRelationExtraction, LayoutLMv2ForTokenClassification, LayoutLMv2Model
from .tokenization_layoutlmv2 import LayoutLMv2Tokenizer
from .tokenization_layoutlmv2_fast import LayoutLMv2TokenizerFast
| EXA-1-master | exa/models/unilm-master/layoutlmft/layoutlmft/models/layoutlmv2/__init__.py |
# -*- coding: utf-8 -*-
def add_layoutlmv2_config(cfg):
_C = cfg
# -----------------------------------------------------------------------------
# Config definition
# -----------------------------------------------------------------------------
_C.MODEL.MASK_ON = True
# When using pre-trained models in Detectron1 or any MSRA models,
# std has been absorbed into its conv1 weights, so the std needs to be set 1.
# Otherwise, you can use [57.375, 57.120, 58.395] (ImageNet std)
_C.MODEL.PIXEL_STD = [57.375, 57.120, 58.395]
# ---------------------------------------------------------------------------- #
# Backbone options
# ---------------------------------------------------------------------------- #
_C.MODEL.BACKBONE.NAME = "build_resnet_fpn_backbone"
# ---------------------------------------------------------------------------- #
# FPN options
# ---------------------------------------------------------------------------- #
# Names of the input feature maps to be used by FPN
# They must have contiguous power of 2 strides
# e.g., ["res2", "res3", "res4", "res5"]
_C.MODEL.FPN.IN_FEATURES = ["res2", "res3", "res4", "res5"]
# ---------------------------------------------------------------------------- #
# Anchor generator options
# ---------------------------------------------------------------------------- #
# Anchor sizes (i.e. sqrt of area) in absolute pixels w.r.t. the network input.
# Format: list[list[float]]. SIZES[i] specifies the list of sizes
# to use for IN_FEATURES[i]; len(SIZES) == len(IN_FEATURES) must be true,
# or len(SIZES) == 1 is true and size list SIZES[0] is used for all
# IN_FEATURES.
_C.MODEL.ANCHOR_GENERATOR.SIZES = [[32], [64], [128], [256], [512]]
# ---------------------------------------------------------------------------- #
# RPN options
# ---------------------------------------------------------------------------- #
# Names of the input feature maps to be used by RPN
# e.g., ["p2", "p3", "p4", "p5", "p6"] for FPN
_C.MODEL.RPN.IN_FEATURES = ["p2", "p3", "p4", "p5", "p6"]
# Number of top scoring RPN proposals to keep before applying NMS
# When FPN is used, this is *per FPN level* (not total)
_C.MODEL.RPN.PRE_NMS_TOPK_TRAIN = 2000
_C.MODEL.RPN.PRE_NMS_TOPK_TEST = 1000
# Number of top scoring RPN proposals to keep after applying NMS
# When FPN is used, this limit is applied per level and then again to the union
# of proposals from all levels
# NOTE: When FPN is used, the meaning of this config is different from Detectron1.
# It means per-batch topk in Detectron1, but per-image topk here.
# See the "find_top_rpn_proposals" function for details.
_C.MODEL.RPN.POST_NMS_TOPK_TRAIN = 1000
_C.MODEL.RPN.POST_NMS_TOPK_TEST = 1000
# ---------------------------------------------------------------------------- #
# ROI HEADS options
# ---------------------------------------------------------------------------- #
_C.MODEL.ROI_HEADS.NAME = "StandardROIHeads"
# Number of foreground classes
_C.MODEL.ROI_HEADS.NUM_CLASSES = 5
# Names of the input feature maps to be used by ROI heads
# Currently all heads (box, mask, ...) use the same input feature map list
# e.g., ["p2", "p3", "p4", "p5"] is commonly used for FPN
_C.MODEL.ROI_HEADS.IN_FEATURES = ["p2", "p3", "p4", "p5"]
# ---------------------------------------------------------------------------- #
# Box Head
# ---------------------------------------------------------------------------- #
# C4 don't use head name option
# Options for non-C4 models: FastRCNNConvFCHead,
_C.MODEL.ROI_BOX_HEAD.NAME = "FastRCNNConvFCHead"
_C.MODEL.ROI_BOX_HEAD.NUM_FC = 2
_C.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION = 14
# ---------------------------------------------------------------------------- #
# Mask Head
# ---------------------------------------------------------------------------- #
_C.MODEL.ROI_MASK_HEAD.NAME = "MaskRCNNConvUpsampleHead"
_C.MODEL.ROI_MASK_HEAD.NUM_CONV = 4 # The number of convs in the mask head
_C.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION = 7
# ---------------------------------------------------------------------------- #
# ResNe[X]t options (ResNets = {ResNet, ResNeXt}
# Note that parts of a resnet may be used for both the backbone and the head
# These options apply to both
# ---------------------------------------------------------------------------- #
_C.MODEL.RESNETS.DEPTH = 101
_C.MODEL.RESNETS.SIZES = [[32], [64], [128], [256], [512]]
_C.MODEL.RESNETS.ASPECT_RATIOS = [[0.5, 1.0, 2.0]]
_C.MODEL.RESNETS.OUT_FEATURES = ["res2", "res3", "res4", "res5"] # res4 for C4 backbone, res2..5 for FPN backbone
# Number of groups to use; 1 ==> ResNet; > 1 ==> ResNeXt
_C.MODEL.RESNETS.NUM_GROUPS = 32
# Baseline width of each group.
# Scaling this parameters will scale the width of all bottleneck layers.
_C.MODEL.RESNETS.WIDTH_PER_GROUP = 8
# Place the stride 2 conv on the 1x1 filter
# Use True only for the original MSRA ResNet; use False for C2 and Torch models
_C.MODEL.RESNETS.STRIDE_IN_1X1 = False
| EXA-1-master | exa/models/unilm-master/layoutlmft/layoutlmft/models/layoutlmv2/detectron2_config.py |
# coding=utf-8
import math
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss
import detectron2
from detectron2.modeling import META_ARCH_REGISTRY
from transformers import PreTrainedModel
from transformers.modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions,
BaseModelOutputWithPoolingAndCrossAttentions,
TokenClassifierOutput,
)
from transformers.modeling_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
from transformers.models.layoutlm.modeling_layoutlm import LayoutLMIntermediate as LayoutLMv2Intermediate
from transformers.models.layoutlm.modeling_layoutlm import LayoutLMOutput as LayoutLMv2Output
from transformers.models.layoutlm.modeling_layoutlm import LayoutLMPooler as LayoutLMv2Pooler
from transformers.models.layoutlm.modeling_layoutlm import LayoutLMSelfOutput as LayoutLMv2SelfOutput
from transformers.utils import logging
from ...modules.decoders.re import REDecoder
from ...utils import ReOutput
from .configuration_layoutlmv2 import LayoutLMv2Config
from .detectron2_config import add_layoutlmv2_config
logger = logging.get_logger(__name__)
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST = [
"layoutlmv2-base-uncased",
"layoutlmv2-large-uncased",
]
LayoutLMv2LayerNorm = torch.nn.LayerNorm
class LayoutLMv2Embeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super(LayoutLMv2Embeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.x_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.coordinate_size)
self.y_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.coordinate_size)
self.h_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.shape_size)
self.w_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.shape_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.LayerNorm = LayoutLMv2LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)))
def _cal_spatial_position_embeddings(self, bbox):
try:
left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0])
upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1])
right_position_embeddings = self.x_position_embeddings(bbox[:, :, 2])
lower_position_embeddings = self.y_position_embeddings(bbox[:, :, 3])
except IndexError as e:
raise IndexError("The :obj:`bbox`coordinate values should be within 0-1000 range.") from e
h_position_embeddings = self.h_position_embeddings(bbox[:, :, 3] - bbox[:, :, 1])
w_position_embeddings = self.w_position_embeddings(bbox[:, :, 2] - bbox[:, :, 0])
spatial_position_embeddings = torch.cat(
[
left_position_embeddings,
upper_position_embeddings,
right_position_embeddings,
lower_position_embeddings,
h_position_embeddings,
w_position_embeddings,
],
dim=-1,
)
return spatial_position_embeddings
class LayoutLMv2SelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.fast_qkv = config.fast_qkv
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.has_relative_attention_bias = config.has_relative_attention_bias
self.has_spatial_attention_bias = config.has_spatial_attention_bias
if config.fast_qkv:
self.qkv_linear = nn.Linear(config.hidden_size, 3 * self.all_head_size, bias=False)
self.q_bias = nn.Parameter(torch.zeros(1, 1, self.all_head_size))
self.v_bias = nn.Parameter(torch.zeros(1, 1, self.all_head_size))
else:
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def compute_qkv(self, hidden_states):
if self.fast_qkv:
qkv = self.qkv_linear(hidden_states)
q, k, v = torch.chunk(qkv, 3, dim=-1)
if q.ndimension() == self.q_bias.ndimension():
q = q + self.q_bias
v = v + self.v_bias
else:
_sz = (1,) * (q.ndimension() - 1) + (-1,)
q = q + self.q_bias.view(*_sz)
v = v + self.v_bias.view(*_sz)
else:
q = self.query(hidden_states)
k = self.key(hidden_states)
v = self.value(hidden_states)
return q, k, v
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
rel_pos=None,
rel_2d_pos=None,
):
q, k, v = self.compute_qkv(hidden_states)
# (B, L, H*D) -> (B, H, L, D)
query_layer = self.transpose_for_scores(q)
key_layer = self.transpose_for_scores(k)
value_layer = self.transpose_for_scores(v)
query_layer = query_layer / math.sqrt(self.attention_head_size)
# [BSZ, NAT, L, L]
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
if self.has_relative_attention_bias:
attention_scores += rel_pos
if self.has_spatial_attention_bias:
attention_scores += rel_2d_pos
attention_scores = attention_scores.float().masked_fill_(attention_mask.to(torch.bool), float("-inf"))
attention_probs = F.softmax(attention_scores, dim=-1, dtype=torch.float32).type_as(value_layer)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
class LayoutLMv2Attention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = LayoutLMv2SelfAttention(config)
self.output = LayoutLMv2SelfOutput(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
)
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
rel_pos=None,
rel_2d_pos=None,
):
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
rel_pos=rel_pos,
rel_2d_pos=rel_2d_pos,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class LayoutLMv2Layer(nn.Module):
def __init__(self, config):
super().__init__()
self.chunk_size_feed_forward = config.chunk_size_feed_forward
self.seq_len_dim = 1
self.attention = LayoutLMv2Attention(config)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
assert self.is_decoder, f"{self} should be used as a decoder model if cross attention is added"
self.crossattention = LayoutLMv2Attention(config)
self.intermediate = LayoutLMv2Intermediate(config)
self.output = LayoutLMv2Output(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
rel_pos=None,
rel_2d_pos=None,
):
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
past_key_value=self_attn_past_key_value,
rel_pos=rel_pos,
rel_2d_pos=rel_2d_pos,
)
attention_output = self_attention_outputs[0]
# if decoder, the last output is tuple of self-attn cache
if self.is_decoder:
outputs = self_attention_outputs[1:-1]
present_key_value = self_attention_outputs[-1]
else:
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
cross_attn_present_key_value = None
if self.is_decoder and encoder_hidden_states is not None:
assert hasattr(
self, "crossattention"
), f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
# cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
cross_attn_past_key_value,
output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
# add cross-attn cache to positions 3,4 of present_key_value tuple
cross_attn_present_key_value = cross_attention_outputs[-1]
present_key_value = present_key_value + cross_attn_present_key_value
layer_output = apply_chunking_to_forward(
self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
)
outputs = (layer_output,) + outputs
# if decoder, return the attn key/values as the last output
if self.is_decoder:
outputs = outputs + (present_key_value,)
return outputs
def feed_forward_chunk(self, attention_output):
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
return layer_output
def relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
ret = 0
if bidirectional:
num_buckets //= 2
ret += (relative_position > 0).long() * num_buckets
n = torch.abs(relative_position)
else:
n = torch.max(-relative_position, torch.zeros_like(relative_position))
# now n is in the range [0, inf)
# half of the buckets are for exact increments in positions
max_exact = num_buckets // 2
is_small = n < max_exact
# The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
val_if_large = max_exact + (
torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)
).to(torch.long)
val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1))
ret += torch.where(is_small, n, val_if_large)
return ret
class LayoutLMv2Encoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList([LayoutLMv2Layer(config) for _ in range(config.num_hidden_layers)])
self.has_relative_attention_bias = config.has_relative_attention_bias
self.has_spatial_attention_bias = config.has_spatial_attention_bias
if self.has_relative_attention_bias:
self.rel_pos_bins = config.rel_pos_bins
self.max_rel_pos = config.max_rel_pos
self.rel_pos_onehot_size = config.rel_pos_bins
self.rel_pos_bias = nn.Linear(self.rel_pos_onehot_size, config.num_attention_heads, bias=False)
if self.has_spatial_attention_bias:
self.max_rel_2d_pos = config.max_rel_2d_pos
self.rel_2d_pos_bins = config.rel_2d_pos_bins
self.rel_2d_pos_onehot_size = config.rel_2d_pos_bins
self.rel_pos_x_bias = nn.Linear(self.rel_2d_pos_onehot_size, config.num_attention_heads, bias=False)
self.rel_pos_y_bias = nn.Linear(self.rel_2d_pos_onehot_size, config.num_attention_heads, bias=False)
def _cal_1d_pos_emb(self, hidden_states, position_ids):
rel_pos_mat = position_ids.unsqueeze(-2) - position_ids.unsqueeze(-1)
rel_pos = relative_position_bucket(
rel_pos_mat,
num_buckets=self.rel_pos_bins,
max_distance=self.max_rel_pos,
)
rel_pos = F.one_hot(rel_pos, num_classes=self.rel_pos_onehot_size).type_as(hidden_states)
rel_pos = self.rel_pos_bias(rel_pos).permute(0, 3, 1, 2)
rel_pos = rel_pos.contiguous()
return rel_pos
def _cal_2d_pos_emb(self, hidden_states, bbox):
position_coord_x = bbox[:, :, 0]
position_coord_y = bbox[:, :, 3]
rel_pos_x_2d_mat = position_coord_x.unsqueeze(-2) - position_coord_x.unsqueeze(-1)
rel_pos_y_2d_mat = position_coord_y.unsqueeze(-2) - position_coord_y.unsqueeze(-1)
rel_pos_x = relative_position_bucket(
rel_pos_x_2d_mat,
num_buckets=self.rel_2d_pos_bins,
max_distance=self.max_rel_2d_pos,
)
rel_pos_y = relative_position_bucket(
rel_pos_y_2d_mat,
num_buckets=self.rel_2d_pos_bins,
max_distance=self.max_rel_2d_pos,
)
rel_pos_x = F.one_hot(rel_pos_x, num_classes=self.rel_2d_pos_onehot_size).type_as(hidden_states)
rel_pos_y = F.one_hot(rel_pos_y, num_classes=self.rel_2d_pos_onehot_size).type_as(hidden_states)
rel_pos_x = self.rel_pos_x_bias(rel_pos_x).permute(0, 3, 1, 2)
rel_pos_y = self.rel_pos_y_bias(rel_pos_y).permute(0, 3, 1, 2)
rel_pos_x = rel_pos_x.contiguous()
rel_pos_y = rel_pos_y.contiguous()
rel_2d_pos = rel_pos_x + rel_pos_y
return rel_2d_pos
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=False,
output_hidden_states=False,
return_dict=True,
bbox=None,
position_ids=None,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
next_decoder_cache = () if use_cache else None
rel_pos = self._cal_1d_pos_emb(hidden_states, position_ids) if self.has_relative_attention_bias else None
rel_2d_pos = self._cal_2d_pos_emb(hidden_states, bbox) if self.has_spatial_attention_bias else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warn(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, past_key_value, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(layer_module),
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
rel_pos=rel_pos,
rel_2d_pos=rel_2d_pos,
)
else:
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
rel_pos=rel_pos,
rel_2d_pos=rel_2d_pos,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[-1],)
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(
v
for v in [
hidden_states,
next_decoder_cache,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_decoder_cache,
hidden_states=all_hidden_states,
attentions=all_self_attentions,
cross_attentions=all_cross_attentions,
)
class LayoutLMv2PreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = LayoutLMv2Config
pretrained_model_archive_map = LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST
base_model_prefix = "layoutlmv2"
_keys_to_ignore_on_load_missing = [r"position_ids"]
def _init_weights(self, module):
"""Initialize the weights"""
if isinstance(module, nn.Linear):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, LayoutLMv2LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def my_convert_sync_batchnorm(module, process_group=None):
# same as `nn.modules.SyncBatchNorm.convert_sync_batchnorm` but allowing converting from `detectron2.layers.FrozenBatchNorm2d`
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
return nn.modules.SyncBatchNorm.convert_sync_batchnorm(module, process_group)
module_output = module
if isinstance(module, detectron2.layers.FrozenBatchNorm2d):
module_output = torch.nn.SyncBatchNorm(
num_features=module.num_features,
eps=module.eps,
affine=True,
track_running_stats=True,
process_group=process_group,
)
module_output.weight = torch.nn.Parameter(module.weight)
module_output.bias = torch.nn.Parameter(module.bias)
module_output.running_mean = module.running_mean
module_output.running_var = module.running_var
module_output.num_batches_tracked = torch.tensor(0, dtype=torch.long, device=module.running_mean.device)
for name, child in module.named_children():
module_output.add_module(name, my_convert_sync_batchnorm(child, process_group))
del module
return module_output
class VisualBackbone(nn.Module):
def __init__(self, config):
super().__init__()
self.cfg = detectron2.config.get_cfg()
add_layoutlmv2_config(self.cfg)
meta_arch = self.cfg.MODEL.META_ARCHITECTURE
model = META_ARCH_REGISTRY.get(meta_arch)(self.cfg)
assert isinstance(model.backbone, detectron2.modeling.backbone.FPN)
self.backbone = model.backbone
if (
config.convert_sync_batchnorm
and torch.distributed.is_available()
and torch.distributed.is_initialized()
and torch.distributed.get_rank() > -1
):
self_rank = torch.distributed.get_rank()
node_size = torch.cuda.device_count()
world_size = torch.distributed.get_world_size()
assert world_size % node_size == 0
node_global_ranks = [
list(range(i * node_size, (i + 1) * node_size)) for i in range(world_size // node_size)
]
sync_bn_groups = [
torch.distributed.new_group(ranks=node_global_ranks[i]) for i in range(world_size // node_size)
]
node_rank = self_rank // node_size
assert self_rank in node_global_ranks[node_rank]
self.backbone = my_convert_sync_batchnorm(self.backbone, process_group=sync_bn_groups[node_rank])
assert len(self.cfg.MODEL.PIXEL_MEAN) == len(self.cfg.MODEL.PIXEL_STD)
num_channels = len(self.cfg.MODEL.PIXEL_MEAN)
self.register_buffer(
"pixel_mean",
torch.Tensor(self.cfg.MODEL.PIXEL_MEAN).view(num_channels, 1, 1),
)
self.register_buffer("pixel_std", torch.Tensor(self.cfg.MODEL.PIXEL_STD).view(num_channels, 1, 1))
self.out_feature_key = "p2"
if torch.is_deterministic():
logger.warning("using `AvgPool2d` instead of `AdaptiveAvgPool2d`")
input_shape = (224, 224)
backbone_stride = self.backbone.output_shape()[self.out_feature_key].stride
self.pool = nn.AvgPool2d(
(
math.ceil(math.ceil(input_shape[0] / backbone_stride) / config.image_feature_pool_shape[0]),
math.ceil(math.ceil(input_shape[1] / backbone_stride) / config.image_feature_pool_shape[1]),
)
)
else:
self.pool = nn.AdaptiveAvgPool2d(config.image_feature_pool_shape[:2])
if len(config.image_feature_pool_shape) == 2:
config.image_feature_pool_shape.append(self.backbone.output_shape()[self.out_feature_key].channels)
assert self.backbone.output_shape()[self.out_feature_key].channels == config.image_feature_pool_shape[2]
def forward(self, images):
images_input = ((images if torch.is_tensor(images) else images.tensor) - self.pixel_mean) / self.pixel_std
features = self.backbone(images_input)
features = features[self.out_feature_key]
features = self.pool(features).flatten(start_dim=2).transpose(1, 2).contiguous()
return features
class LayoutLMv2Model(LayoutLMv2PreTrainedModel):
def __init__(self, config):
super(LayoutLMv2Model, self).__init__(config)
self.config = config
self.has_visual_segment_embedding = config.has_visual_segment_embedding
self.embeddings = LayoutLMv2Embeddings(config)
self.visual = VisualBackbone(config)
self.visual_proj = nn.Linear(config.image_feature_pool_shape[-1], config.hidden_size)
if self.has_visual_segment_embedding:
self.visual_segment_embedding = nn.Parameter(nn.Embedding(1, config.hidden_size).weight[0])
self.visual_LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.visual_dropout = nn.Dropout(config.hidden_dropout_prob)
self.encoder = LayoutLMv2Encoder(config)
self.pooler = LayoutLMv2Pooler(config)
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def _calc_text_embeddings(self, input_ids, bbox, position_ids, token_type_ids):
seq_length = input_ids.size(1)
if position_ids is None:
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.embeddings.word_embeddings(input_ids)
position_embeddings = self.embeddings.position_embeddings(position_ids)
spatial_position_embeddings = self.embeddings._cal_spatial_position_embeddings(bbox)
token_type_embeddings = self.embeddings.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + spatial_position_embeddings + token_type_embeddings
embeddings = self.embeddings.LayerNorm(embeddings)
embeddings = self.embeddings.dropout(embeddings)
return embeddings
def _calc_img_embeddings(self, image, bbox, position_ids):
visual_embeddings = self.visual_proj(self.visual(image))
position_embeddings = self.embeddings.position_embeddings(position_ids)
spatial_position_embeddings = self.embeddings._cal_spatial_position_embeddings(bbox)
embeddings = visual_embeddings + position_embeddings + spatial_position_embeddings
if self.has_visual_segment_embedding:
embeddings += self.visual_segment_embedding
embeddings = self.visual_LayerNorm(embeddings)
embeddings = self.visual_dropout(embeddings)
return embeddings
def forward(
self,
input_ids=None,
bbox=None,
image=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
device = input_ids.device if input_ids is not None else inputs_embeds.device
visual_shape = list(input_shape)
visual_shape[1] = self.config.image_feature_pool_shape[0] * self.config.image_feature_pool_shape[1]
visual_shape = torch.Size(visual_shape)
final_shape = list(input_shape)
final_shape[1] += visual_shape[1]
final_shape = torch.Size(final_shape)
visual_bbox_x = (
torch.arange(
0,
1000 * (self.config.image_feature_pool_shape[1] + 1),
1000,
device=device,
dtype=bbox.dtype,
)
// self.config.image_feature_pool_shape[1]
)
visual_bbox_y = (
torch.arange(
0,
1000 * (self.config.image_feature_pool_shape[0] + 1),
1000,
device=device,
dtype=bbox.dtype,
)
// self.config.image_feature_pool_shape[0]
)
visual_bbox = torch.stack(
[
visual_bbox_x[:-1].repeat(self.config.image_feature_pool_shape[0], 1),
visual_bbox_y[:-1].repeat(self.config.image_feature_pool_shape[1], 1).transpose(0, 1),
visual_bbox_x[1:].repeat(self.config.image_feature_pool_shape[0], 1),
visual_bbox_y[1:].repeat(self.config.image_feature_pool_shape[1], 1).transpose(0, 1),
],
dim=-1,
).view(-1, bbox.size(-1))
visual_bbox = visual_bbox.repeat(final_shape[0], 1, 1)
final_bbox = torch.cat([bbox, visual_bbox], dim=1)
if attention_mask is None:
attention_mask = torch.ones(input_shape, device=device)
visual_attention_mask = torch.ones(visual_shape, device=device)
final_attention_mask = torch.cat([attention_mask, visual_attention_mask], dim=1)
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
if position_ids is None:
seq_length = input_shape[1]
position_ids = self.embeddings.position_ids[:, :seq_length]
position_ids = position_ids.expand_as(input_ids)
visual_position_ids = torch.arange(0, visual_shape[1], dtype=torch.long, device=device).repeat(
input_shape[0], 1
)
final_position_ids = torch.cat([position_ids, visual_position_ids], dim=1)
if bbox is None:
bbox = torch.zeros(tuple(list(input_shape) + [4]), dtype=torch.long, device=device)
text_layout_emb = self._calc_text_embeddings(
input_ids=input_ids,
bbox=bbox,
token_type_ids=token_type_ids,
position_ids=position_ids,
)
visual_emb = self._calc_img_embeddings(
image=image,
bbox=visual_bbox,
position_ids=visual_position_ids,
)
final_emb = torch.cat([text_layout_emb, visual_emb], dim=1)
extended_attention_mask = final_attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.to(dtype=self.dtype)
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.to(dtype=next(self.parameters()).dtype)
else:
head_mask = [None] * self.config.num_hidden_layers
encoder_outputs = self.encoder(
final_emb,
extended_attention_mask,
bbox=final_bbox,
position_ids=final_position_ids,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output)
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndCrossAttentions(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
)
class LayoutLMv2ForTokenClassification(LayoutLMv2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.layoutlmv2 = LayoutLMv2Model(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def get_input_embeddings(self):
return self.layoutlmv2.embeddings.word_embeddings
def forward(
self,
input_ids=None,
bbox=None,
image=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.layoutlmv2(
input_ids=input_ids,
bbox=bbox,
image=image,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
seq_length = input_ids.size(1)
sequence_output, image_output = outputs[0][:, :seq_length], outputs[0][:, seq_length:]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)[active_loss]
active_labels = labels.view(-1)[active_loss]
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
class LayoutLMv2ForRelationExtraction(LayoutLMv2PreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.layoutlmv2 = LayoutLMv2Model(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.extractor = REDecoder(config)
self.init_weights()
def forward(
self,
input_ids,
bbox,
labels=None,
image=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
entities=None,
relations=None,
):
outputs = self.layoutlmv2(
input_ids=input_ids,
bbox=bbox,
image=image,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
)
seq_length = input_ids.size(1)
sequence_output, image_output = outputs[0][:, :seq_length], outputs[0][:, seq_length:]
sequence_output = self.dropout(sequence_output)
loss, pred_relations = self.extractor(sequence_output, entities, relations)
return ReOutput(
loss=loss,
entities=entities,
relations=relations,
pred_relations=pred_relations,
hidden_states=outputs[0],
)
| EXA-1-master | exa/models/unilm-master/layoutlmft/layoutlmft/models/layoutlmv2/modeling_layoutlmv2.py |
# coding=utf-8
from transformers.models.layoutlm.tokenization_layoutlm_fast import LayoutLMTokenizerFast
from transformers.utils import logging
from .tokenization_layoutlmv2 import LayoutLMv2Tokenizer
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"microsoft/layoutlmv2-base-uncased": "https://huggingface.co/microsoft/layoutlmv2-base-uncased/resolve/main/vocab.txt",
"microsoft/layoutlmv2-large-uncased": "https://huggingface.co/microsoft/layoutlmv2-large-uncased/resolve/main/vocab.txt",
},
"tokenizer_file": {
"microsoft/layoutlmv2-base-uncased": "https://huggingface.co/microsoft/layoutlmv2-base-uncased/resolve/main/tokenizer.json",
"microsoft/layoutlmv2-large-uncased": "https://huggingface.co/microsoft/layoutlmv2-large-uncased/resolve/main/tokenizer.json",
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"microsoft/layoutlmv2-base-uncased": 512,
"microsoft/layoutlmv2-large-uncased": 512,
}
PRETRAINED_INIT_CONFIGURATION = {
"microsoft/layoutlmv2-base-uncased": {"do_lower_case": True},
"microsoft/layoutlmv2-large-uncased": {"do_lower_case": True},
}
class LayoutLMv2TokenizerFast(LayoutLMTokenizerFast):
r"""
Constructs a "Fast" LayoutLMv2Tokenizer.
Refer to superclass :class:`~transformers.BertTokenizerFast` for usage examples and documentation concerning
parameters.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
slow_tokenizer_class = LayoutLMv2Tokenizer
def __init__(self, model_max_length=512, **kwargs):
super().__init__(model_max_length=model_max_length, **kwargs)
| EXA-1-master | exa/models/unilm-master/layoutlmft/layoutlmft/models/layoutlmv2/tokenization_layoutlmv2_fast.py |
# coding=utf-8
from transformers.models.layoutlm.configuration_layoutlm import LayoutLMConfig
from transformers.utils import logging
logger = logging.get_logger(__name__)
LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"layoutlmv2-base-uncased": "https://huggingface.co/microsoft/layoutlmv2-base-uncased/resolve/main/config.json",
"layoutlmv2-large-uncased": "https://huggingface.co/microsoft/layoutlmv2-large-uncased/resolve/main/config.json",
}
class LayoutLMv2Config(LayoutLMConfig):
model_type = "layoutlmv2"
def __init__(
self,
vocab_size=30522,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
pad_token_id=0,
gradient_checkpointing=False,
max_2d_position_embeddings=1024,
max_rel_pos=128,
rel_pos_bins=32,
fast_qkv=True,
max_rel_2d_pos=256,
rel_2d_pos_bins=64,
convert_sync_batchnorm=True,
image_feature_pool_shape=[7, 7, 256],
coordinate_size=128,
shape_size=128,
has_relative_attention_bias=True,
has_spatial_attention_bias=True,
has_visual_segment_embedding=False,
**kwargs
):
super().__init__(
vocab_size=vocab_size,
hidden_size=hidden_size,
num_hidden_layers=num_hidden_layers,
num_attention_heads=num_attention_heads,
intermediate_size=intermediate_size,
hidden_act=hidden_act,
hidden_dropout_prob=hidden_dropout_prob,
attention_probs_dropout_prob=attention_probs_dropout_prob,
max_position_embeddings=max_position_embeddings,
type_vocab_size=type_vocab_size,
initializer_range=initializer_range,
layer_norm_eps=layer_norm_eps,
pad_token_id=pad_token_id,
gradient_checkpointing=gradient_checkpointing,
**kwargs,
)
self.max_2d_position_embeddings = max_2d_position_embeddings
self.max_rel_pos = max_rel_pos
self.rel_pos_bins = rel_pos_bins
self.fast_qkv = fast_qkv
self.max_rel_2d_pos = max_rel_2d_pos
self.rel_2d_pos_bins = rel_2d_pos_bins
self.convert_sync_batchnorm = convert_sync_batchnorm
self.image_feature_pool_shape = image_feature_pool_shape
self.coordinate_size = coordinate_size
self.shape_size = shape_size
self.has_relative_attention_bias = has_relative_attention_bias
self.has_spatial_attention_bias = has_spatial_attention_bias
self.has_visual_segment_embedding = has_visual_segment_embedding
| EXA-1-master | exa/models/unilm-master/layoutlmft/layoutlmft/models/layoutlmv2/configuration_layoutlmv2.py |
# coding=utf-8
from transformers.utils import logging
from ..layoutlmv2 import LayoutLMv2Config
logger = logging.get_logger(__name__)
LAYOUTXLM_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"layoutxlm-base": "https://huggingface.co/layoutxlm-base/resolve/main/config.json",
"layoutxlm-large": "https://huggingface.co/layoutxlm-large/resolve/main/config.json",
}
class LayoutXLMConfig(LayoutLMv2Config):
model_type = "layoutxlm"
| EXA-1-master | exa/models/unilm-master/layoutlmft/layoutlmft/models/layoutxlm/configuration_layoutxlm.py |
# coding=utf-8
from transformers import XLMRobertaTokenizerFast
from transformers.file_utils import is_sentencepiece_available
from transformers.utils import logging
if is_sentencepiece_available():
from .tokenization_layoutxlm import LayoutXLMTokenizer
else:
LayoutXLMTokenizer = None
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"layoutxlm-base": "https://huggingface.co/layoutxlm-base/resolve/main/sentencepiece.bpe.model",
"layoutxlm-large": "https://huggingface.co/layoutxlm-large/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_file": {
"layoutxlm-base": "https://huggingface.co/layoutxlm-base/resolve/main/tokenizer.json",
"layoutxlm-large": "https://huggingface.co/layoutxlm-large/resolve/main/tokenizer.json",
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"layoutxlm-base": 512,
"layoutxlm-large": 512,
}
class LayoutXLMTokenizerFast(XLMRobertaTokenizerFast):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ["input_ids", "attention_mask"]
slow_tokenizer_class = LayoutXLMTokenizer
def __init__(self, model_max_length=512, **kwargs):
super().__init__(model_max_length=model_max_length, **kwargs)
| EXA-1-master | exa/models/unilm-master/layoutlmft/layoutlmft/models/layoutxlm/tokenization_layoutxlm_fast.py |
# coding=utf-8
from transformers import XLMRobertaTokenizer
from transformers.utils import logging
logger = logging.get_logger(__name__)
SPIECE_UNDERLINE = "▁"
VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"layoutxlm-base": "https://huggingface.co/layoutxlm-base/resolve/main/sentencepiece.bpe.model",
"layoutxlm-large": "https://huggingface.co/layoutxlm-large/resolve/main/sentencepiece.bpe.model",
}
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"layoutxlm-base": 512,
"layoutxlm-large": 512,
}
class LayoutXLMTokenizer(XLMRobertaTokenizer):
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ["input_ids", "attention_mask"]
def __init__(self, model_max_length=512, **kwargs):
super().__init__(model_max_length=model_max_length, **kwargs)
| EXA-1-master | exa/models/unilm-master/layoutlmft/layoutlmft/models/layoutxlm/tokenization_layoutxlm.py |
from .configuration_layoutxlm import LayoutXLMConfig
from .modeling_layoutxlm import LayoutXLMForRelationExtraction, LayoutXLMForTokenClassification, LayoutXLMModel
from .tokenization_layoutxlm import LayoutXLMTokenizer
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
| EXA-1-master | exa/models/unilm-master/layoutlmft/layoutlmft/models/layoutxlm/__init__.py |
# coding=utf-8
from transformers.utils import logging
from ..layoutlmv2 import LayoutLMv2ForRelationExtraction, LayoutLMv2ForTokenClassification, LayoutLMv2Model
from .configuration_layoutxlm import LayoutXLMConfig
logger = logging.get_logger(__name__)
LAYOUTXLM_PRETRAINED_MODEL_ARCHIVE_LIST = [
"layoutxlm-base",
"layoutxlm-large",
]
class LayoutXLMModel(LayoutLMv2Model):
config_class = LayoutXLMConfig
class LayoutXLMForTokenClassification(LayoutLMv2ForTokenClassification):
config_class = LayoutXLMConfig
class LayoutXLMForRelationExtraction(LayoutLMv2ForRelationExtraction):
config_class = LayoutXLMConfig
| EXA-1-master | exa/models/unilm-master/layoutlmft/layoutlmft/models/layoutxlm/modeling_layoutxlm.py |
from transformers.models.layoutlm import *
| EXA-1-master | exa/models/unilm-master/layoutlmft/layoutlmft/models/layoutlm/__init__.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.