python_code
stringlengths 0
992k
| repo_name
stringlengths 8
46
| file_path
stringlengths 5
162
|
---|---|---|
# --------------------------------------------------------
# The YiTrans End-to-End Speech Translation System for IWSLT 2022 Offline Shared Task (https://arxiv.org/abs/2206.05777)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/YiTrans
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Based on fairseq code bases
# https://github.com/facebookresearch/fairseq
# --------------------------------------------------------
from .multihead_attention import MultiheadAttention
from .relative_pos_enc import RelativePositionalEncoding
from .transformer_layer import TransformerEncoderLayerBase, TransformerDecoderLayerBase
from .w2v_encoder import TransformerEncoder, TransformerSentenceEncoderLayer
from .learned_positional_embedding import LearnedPositionalEmbedding
__all__ = [
"MultiheadAttention",
"RelativePositionalEncoding",
"TransformerEncoderLayerBase",
"TransformerDecoderLayerBase",
"TransformerEncoder",
"TransformerSentenceEncoderLayer"
]
| EXA-1-master | exa/models/unilm-master/speechlm/speechlm/modules/__init__.py |
# --------------------------------------------------------
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Based on fairseq code bases
# https://github.com/facebookresearch/fairseq
# --------------------------------------------------------
"""
Modified from https://github.com/facebookresearch/fairseq/blob/main/fairseq/modules/transformer_layer.py
https://github.com/microsoft/SpeechT5/blob/main/Speech2C/speech2c/models/modules/transformer_decoder_layer.py
"""
from typing import Dict, List, Optional
import torch
from torch import Tensor
from fairseq.modules import LayerNorm
from speechlm.modules.multihead_attention import MultiheadAttention
from fairseq.modules.transformer_layer import TransformerEncoderLayerBase as FairseqTransformerEncoderLayerBase
from fairseq.modules.transformer_layer import TransformerDecoderLayerBase as FairseqTransformerDecoderLayerBase
class TransformerEncoderLayerBase(FairseqTransformerEncoderLayerBase):
"""Encoder layer block.
In the original paper each operation (multi-head attention or FFN) is
postprocessed with: `dropout -> add residual -> layernorm`. In the
tensor2tensor code they suggest that learning is more robust when
preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
*cfg.encoder.normalize_before* to ``True``.
Args:
args (argparse.Namespace): parsed command-line arguments
"""
def __init__(self, cfg, has_relative_attention_bias=False, scaling_for_att=1.0):
self.scaling_for_att = scaling_for_att
super().__init__(cfg)
if has_relative_attention_bias:
self.norm_k = LayerNorm(self.embed_dim // cfg.encoder.attention_heads)
def build_self_attention(self, embed_dim, cfg, scaling_for_att=1.0):
return MultiheadAttention(
embed_dim,
cfg.encoder.attention_heads,
dropout=cfg.attention_dropout,
self_attention=True,
q_noise=self.quant_noise,
qn_block_size=self.quant_noise_block_size,
scaling_for_att=self.scaling_for_att,
)
def forward(
self,
x,
encoder_padding_mask: Optional[Tensor],
attn_mask: Optional[Tensor] = None,
pos_bias=None,
):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor): binary ByteTensor of shape
`(batch, seq_len)` where padding elements are indicated by ``1``.
attn_mask (ByteTensor): binary tensor of shape `(tgt_len, src_len)`,
where `tgt_len` is the length of output and `src_len` is the
length of input, though here both are equal to `seq_len`.
`attn_mask[tgt_i, src_j] = 1` means that when calculating the
embedding for `tgt_i`, we exclude (mask out) `src_j`. This is
useful for strided self-attention.
Returns:
encoded output of shape `(seq_len, batch, embed_dim)`
"""
# anything in original attn_mask = 1, becomes -1e8
# anything in original attn_mask = 0, becomes 0
# Note that we cannot use -inf here, because at some edge cases,
# the attention weight (before softmax) for some padded element in query
# will become -inf, which results in NaN in model parameters
if attn_mask is not None:
attn_mask = attn_mask.masked_fill(
attn_mask.to(torch.bool), -1e8 if x.dtype == torch.float32 else -1e4
)
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
if pos_bias is not None:
pos_bias = self.norm_k(pos_bias)
x, _ = self.self_attn(
query=x,
key=x,
value=x,
key_padding_mask=encoder_padding_mask,
need_weights=False,
attn_mask=attn_mask,
position_bias=pos_bias,
)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.self_attn_layer_norm(x)
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = self.activation_dropout_module(x)
x = self.fc2(x)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.final_layer_norm(x)
return x
class TransformerDecoderLayerBase(FairseqTransformerDecoderLayerBase):
"""Decoder layer block.
In the original paper each operation (multi-head attention, encoder
attention or FFN) is postprocessed with: `dropout -> add residual ->
layernorm`. In the tensor2tensor code they suggest that learning is more
robust when preprocessing each layer with layernorm and postprocessing with:
`dropout -> add residual`. We default to the approach in the paper, but the
tensor2tensor approach can be enabled by setting
*cfg.decoder.normalize_before* to ``True``.
Args:
args (argparse.Namespace): parsed command-line arguments
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(
self, cfg, no_encoder_attn=False, add_bias_kv=False, add_zero_attn=False, has_relative_attention_bias=False, scaling_for_att=1.0,
):
self.scaling_for_att = scaling_for_att
super().__init__(cfg,
no_encoder_attn,
add_bias_kv,
add_zero_attn,
)
if has_relative_attention_bias:
self.norm_k = LayerNorm(self.embed_dim // cfg.decoder.attention_heads)
def build_self_attention(
self, embed_dim, cfg, add_bias_kv=False, add_zero_attn=False
):
return MultiheadAttention(
embed_dim,
cfg.decoder.attention_heads,
dropout=cfg.attention_dropout,
add_bias_kv=add_bias_kv,
add_zero_attn=add_zero_attn,
self_attention=not cfg.cross_self_attention,
q_noise=self.quant_noise,
qn_block_size=self.quant_noise_block_size,
scaling_for_att=self.scaling_for_att,
)
def build_encoder_attention(self, embed_dim, cfg):
return MultiheadAttention(
embed_dim,
cfg.decoder.attention_heads,
kdim=cfg.encoder.embed_dim,
vdim=cfg.encoder.embed_dim,
dropout=cfg.attention_dropout,
encoder_decoder_attention=True,
q_noise=self.quant_noise,
qn_block_size=self.quant_noise_block_size,
scaling_for_att=self.scaling_for_att,
)
def forward(
self,
x,
encoder_out: Optional[torch.Tensor] = None,
encoder_padding_mask: Optional[torch.Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
prev_self_attn_state: Optional[List[torch.Tensor]] = None,
prev_attn_state: Optional[List[torch.Tensor]] = None,
self_attn_mask: Optional[torch.Tensor] = None,
self_attn_padding_mask: Optional[torch.Tensor] = None,
need_attn: bool = False,
need_head_weights: bool = False,
pos_bias=None,
):
"""
Args:
x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_padding_mask (ByteTensor, optional): binary
ByteTensor of shape `(batch, src_len)` where padding
elements are indicated by ``1``.
need_attn (bool, optional): return attention weights
need_head_weights (bool, optional): return attention weights
for each head (default: return average over heads).
Returns:
encoded output of shape `(seq_len, batch, embed_dim)`
"""
if need_head_weights:
need_attn = True
residual = x
if self.normalize_before:
x = self.self_attn_layer_norm(x)
if pos_bias is not None:
pos_bias = self.norm_k(pos_bias)
if prev_self_attn_state is not None:
prev_key, prev_value = prev_self_attn_state[:2]
saved_state: Dict[str, Optional[Tensor]] = {
"prev_key": prev_key,
"prev_value": prev_value,
}
if len(prev_self_attn_state) >= 3:
saved_state["prev_key_padding_mask"] = prev_self_attn_state[2]
assert incremental_state is not None
self.self_attn._set_input_buffer(incremental_state, saved_state)
_self_attn_input_buffer = self.self_attn._get_input_buffer(incremental_state)
if self.cross_self_attention and not (
incremental_state is not None
and _self_attn_input_buffer is not None
and "prev_key" in _self_attn_input_buffer
):
if self_attn_mask is not None:
assert encoder_out is not None
self_attn_mask = torch.cat(
(x.new_zeros(x.size(0), encoder_out.size(0)), self_attn_mask), dim=1
)
if self_attn_padding_mask is not None:
if encoder_padding_mask is None:
assert encoder_out is not None
encoder_padding_mask = self_attn_padding_mask.new_zeros(
encoder_out.size(1), encoder_out.size(0)
)
self_attn_padding_mask = torch.cat(
(encoder_padding_mask, self_attn_padding_mask), dim=1
)
assert encoder_out is not None
y = torch.cat((encoder_out, x), dim=0)
else:
y = x
x, attn = self.self_attn(
query=x,
key=y,
value=y,
key_padding_mask=self_attn_padding_mask,
incremental_state=incremental_state,
need_weights=False,
attn_mask=self_attn_mask,
position_bias=pos_bias,
)
if self.c_attn is not None:
tgt_len, bsz = x.size(0), x.size(1)
x = x.view(tgt_len, bsz, self.nh, self.head_dim)
x = torch.einsum("tbhd,h->tbhd", x, self.c_attn)
x = x.reshape(tgt_len, bsz, self.embed_dim)
if self.attn_ln is not None:
x = self.attn_ln(x)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.self_attn_layer_norm(x)
if self.encoder_attn is not None and encoder_out is not None:
residual = x
if self.normalize_before:
x = self.encoder_attn_layer_norm(x)
if prev_attn_state is not None:
prev_key, prev_value = prev_attn_state[:2]
saved_state: Dict[str, Optional[Tensor]] = {
"prev_key": prev_key,
"prev_value": prev_value,
}
if len(prev_attn_state) >= 3:
saved_state["prev_key_padding_mask"] = prev_attn_state[2]
assert incremental_state is not None
self.encoder_attn._set_input_buffer(incremental_state, saved_state)
x, attn = self.encoder_attn(
query=x,
key=encoder_out,
value=encoder_out,
key_padding_mask=encoder_padding_mask,
incremental_state=incremental_state,
static_kv=True,
need_weights=need_attn or (not self.training and self.need_attn),
need_head_weights=need_head_weights,
)
x = self.dropout_module(x)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.encoder_attn_layer_norm(x)
residual = x
if self.normalize_before:
x = self.final_layer_norm(x)
x = self.activation_fn(self.fc1(x))
x = self.activation_dropout_module(x)
if self.ffn_layernorm is not None:
x = self.ffn_layernorm(x)
x = self.fc2(x)
x = self.dropout_module(x)
if self.w_resid is not None:
residual = torch.mul(self.w_resid, residual)
x = self.residual_connection(x, residual)
if not self.normalize_before:
x = self.final_layer_norm(x)
if self.onnx_trace and incremental_state is not None:
saved_state = self.self_attn._get_input_buffer(incremental_state)
assert saved_state is not None
if self_attn_padding_mask is not None:
self_attn_state = [
saved_state["prev_key"],
saved_state["prev_value"],
saved_state["prev_key_padding_mask"],
]
else:
self_attn_state = [saved_state["prev_key"], saved_state["prev_value"]]
return x, attn, self_attn_state
return x, attn, None
def make_generation_fast_(self, need_attn: bool = False, **kwargs):
self.need_attn = need_attn
| EXA-1-master | exa/models/unilm-master/speechlm/speechlm/modules/transformer_layer.py |
# --------------------------------------------------------
# The YiTrans End-to-End Speech Translation System for IWSLT 2022 Offline Shared Task (https://arxiv.org/abs/2206.05777)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/YiTrans
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Based on fairseq code bases
# https://github.com/facebookresearch/fairseq
# --------------------------------------------------------
"""
Modified from https://github.com/facebookresearch/fairseq/blob/main/fairseq/models/transformer/transformer_decoder.py
"""
import math
from typing import Any, Dict, List, Optional
import torch
import torch.nn as nn
from fairseq import utils
from fairseq.distributed import fsdp_wrap
from fairseq.models import FairseqIncrementalDecoder
from fairseq.models.transformer import TransformerConfig
from fairseq.modules import (
AdaptiveSoftmax,
BaseLayer,
FairseqDropout,
LayerDropModuleList,
LayerNorm,
PositionalEmbedding,
SinusoidalPositionalEmbedding,
)
from fairseq.modules.checkpoint_activations import checkpoint_wrapper
from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_
from torch import Tensor
from speechlm.modules import transformer_layer
from speechlm.modules.relative_pos_enc import RelativePositionalEncoding
# rewrite name for backward compatibility in `make_generation_fast_`
def module_name_fordropout(module_name: str) -> str:
if module_name == "TransformerDecoderBase":
return "TransformerDecoder"
else:
return module_name
class TransformerDecoderBase(FairseqIncrementalDecoder):
"""
Transformer decoder consisting of *cfg.decoder.layers* layers. Each layer
is a :class:`TransformerDecoderLayer`.
Args:
args (argparse.Namespace): parsed command-line arguments
dictionary (~fairseq.data.Dictionary): decoding dictionary
embed_tokens (torch.nn.Embedding): output embedding
no_encoder_attn (bool, optional): whether to attend to encoder outputs
(default: False).
"""
def __init__(
self,
cfg,
dictionary,
embed_tokens,
no_encoder_attn=False,
output_projection=None,
use_rel_pos_enc=False,
):
self.cfg = cfg
super().__init__(dictionary)
self.register_buffer("version", torch.Tensor([3]))
self._future_mask = torch.empty(0)
self.dropout_module = FairseqDropout(
cfg.dropout, module_name=module_name_fordropout(self.__class__.__name__)
)
self.decoder_layerdrop = cfg.decoder.layerdrop
self.share_input_output_embed = cfg.share_decoder_input_output_embed
input_embed_dim = embed_tokens.embedding_dim
embed_dim = cfg.decoder.embed_dim
self.embed_dim = embed_dim
self.output_embed_dim = cfg.decoder.output_dim
self.padding_idx = embed_tokens.padding_idx
self.max_target_positions = cfg.max_target_positions
self.embed_tokens = embed_tokens
self.embed_scale = 1.0 if cfg.no_scale_embedding else math.sqrt(embed_dim)
if not cfg.adaptive_input and cfg.quant_noise.pq > 0:
self.quant_noise = apply_quant_noise_(
nn.Linear(embed_dim, embed_dim, bias=False),
cfg.quant_noise.pq,
cfg.quant_noise.pq_block_size,
)
else:
self.quant_noise = None
self.project_in_dim = (
Linear(input_embed_dim, embed_dim, bias=False)
if embed_dim != input_embed_dim
else None
)
self.embed_positions = (
PositionalEmbedding(
self.max_target_positions,
embed_dim,
self.padding_idx,
learned=cfg.decoder.learned_pos,
)
if not cfg.no_token_positional_embeddings
else None
)
if cfg.layernorm_embedding:
self.layernorm_embedding = LayerNorm(embed_dim, export=cfg.export)
else:
self.layernorm_embedding = None
self.cross_self_attention = cfg.cross_self_attention
if self.decoder_layerdrop > 0.0:
self.layers = LayerDropModuleList(p=self.decoder_layerdrop)
else:
self.layers = nn.ModuleList([])
self.use_rel_pos_enc = use_rel_pos_enc
self.layers.extend(
[
self.build_decoder_layer(cfg, no_encoder_attn)
for _ in range(cfg.decoder.layers)
]
)
self.num_layers = len(self.layers)
if cfg.decoder.normalize_before and not cfg.no_decoder_final_norm:
self.layer_norm = LayerNorm(embed_dim, export=cfg.export)
else:
self.layer_norm = None
self.project_out_dim = (
Linear(embed_dim, self.output_embed_dim, bias=False)
if embed_dim != self.output_embed_dim and not cfg.tie_adaptive_weights
else None
)
self.adaptive_softmax = None
self.output_projection = output_projection
if self.output_projection is None:
self.build_output_projection(cfg, dictionary, embed_tokens)
if self.use_rel_pos_enc:
self.pos_emb = RelativePositionalEncoding(embed_dim // cfg.decoder.attention_heads, 24)
def build_output_projection(self, cfg, dictionary, embed_tokens):
if cfg.adaptive_softmax_cutoff is not None:
self.adaptive_softmax = AdaptiveSoftmax(
len(dictionary),
self.output_embed_dim,
utils.eval_str_list(cfg.adaptive_softmax_cutoff, type=int),
dropout=cfg.adaptive_softmax_dropout,
adaptive_inputs=embed_tokens if cfg.tie_adaptive_weights else None,
factor=cfg.adaptive_softmax_factor,
tie_proj=cfg.tie_adaptive_proj,
)
elif self.share_input_output_embed:
self.output_projection = nn.Linear(
self.embed_tokens.weight.shape[1],
self.embed_tokens.weight.shape[0],
bias=False,
)
self.output_projection.weight = self.embed_tokens.weight
else:
self.output_projection = nn.Linear(
self.output_embed_dim, len(dictionary), bias=False
)
nn.init.normal_(
self.output_projection.weight, mean=0, std=self.output_embed_dim ** -0.5
)
num_base_layers = cfg.base_layers
for i in range(num_base_layers):
self.layers.insert(
((i + 1) * cfg.decoder.layers) // (num_base_layers + 1),
BaseLayer(cfg),
)
def build_decoder_layer(self, cfg, no_encoder_attn=False):
layer = transformer_layer.TransformerDecoderLayerBase(cfg, no_encoder_attn, has_relative_attention_bias=self.use_rel_pos_enc)
checkpoint = cfg.checkpoint_activations
if checkpoint:
offload_to_cpu = cfg.offload_activations
layer = checkpoint_wrapper(layer, offload_to_cpu=offload_to_cpu)
# if we are checkpointing, enforce that FSDP always wraps the
# checkpointed layer, regardless of layer size
min_params_to_wrap = cfg.min_params_to_wrap if not checkpoint else 0
layer = fsdp_wrap(layer, min_num_params=min_params_to_wrap)
return layer
def forward(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
features_only: bool = False,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
src_lengths: Optional[Any] = None,
return_all_hiddens: bool = False,
):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
encoder_out (optional): output from the encoder, used for
encoder-side attention, should be of size T x B x C
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
features_only (bool, optional): only return features without
applying output layer (default: False).
full_context_alignment (bool, optional): don't apply
auto-regressive mask to self-attention (default: False).
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
x, extra = self.extract_features(
prev_output_tokens,
encoder_out=encoder_out,
incremental_state=incremental_state,
full_context_alignment=full_context_alignment,
alignment_layer=alignment_layer,
alignment_heads=alignment_heads,
)
if not features_only:
x = self.output_layer(x)
return x, extra
def extract_features(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]],
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
):
return self.extract_features_scriptable(
prev_output_tokens,
encoder_out,
incremental_state,
full_context_alignment,
alignment_layer,
alignment_heads,
)
"""
A scriptable subclass of this class has an extract_features method and calls
super().extract_features, but super() is not supported in torchscript. A copy of
this function is made to be used in the subclass instead.
"""
def extract_features_scriptable(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]],
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
):
"""
Similar to *forward* but only return features.
Includes several features from "Jointly Learning to Align and
Translate with Transformer Models" (Garg et al., EMNLP 2019).
Args:
full_context_alignment (bool, optional): don't apply
auto-regressive mask to self-attention (default: False).
alignment_layer (int, optional): return mean alignment over
heads at this layer (default: last layer).
alignment_heads (int, optional): only average alignment over
this many heads (default: all heads).
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
"""
bs, slen = prev_output_tokens.size()
if alignment_layer is None:
alignment_layer = self.num_layers - 1
enc: Optional[Tensor] = None
padding_mask: Optional[Tensor] = None
if encoder_out is not None and len(encoder_out["encoder_out"]) > 0:
enc = encoder_out["encoder_out"][0]
assert (
enc.size()[1] == bs
), f"Expected enc.shape == (t, {bs}, c) got {enc.shape}"
if encoder_out is not None and len(encoder_out["encoder_padding_mask"]) > 0:
padding_mask = encoder_out["encoder_padding_mask"][0]
# embed positions
positions = None
if self.embed_positions is not None:
positions = self.embed_positions(
prev_output_tokens, incremental_state=incremental_state
)
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
if self.quant_noise is not None:
x = self.quant_noise(x)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions
if self.layernorm_embedding is not None:
x = self.layernorm_embedding(x)
x = self.dropout_module(x)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
if self.use_rel_pos_enc:
pos_seq = torch.arange(0, slen).long().to(x.device)
pos_seq = pos_seq[:, None] - pos_seq[None, :]
pos_k, _ = self.pos_emb(pos_seq, incremental_state)
else:
pos_k = None
self_attn_padding_mask: Optional[Tensor] = None
if self.cross_self_attention or prev_output_tokens.eq(self.padding_idx).any():
self_attn_padding_mask = prev_output_tokens.eq(self.padding_idx)
# decoder layers
attn: Optional[Tensor] = None
inner_states: List[Optional[Tensor]] = [x]
for idx, layer in enumerate(self.layers):
if incremental_state is None and not full_context_alignment:
self_attn_mask = self.buffered_future_mask(x)
else:
self_attn_mask = None
x, layer_attn, _ = layer(
x,
enc,
padding_mask,
incremental_state,
self_attn_mask=self_attn_mask,
self_attn_padding_mask=self_attn_padding_mask,
need_attn=bool((idx == alignment_layer)),
need_head_weights=bool((idx == alignment_layer)),
pos_bias=pos_k,
)
inner_states.append(x)
if layer_attn is not None and idx == alignment_layer:
attn = layer_attn.float().to(x)
if attn is not None:
if alignment_heads is not None:
attn = attn[:alignment_heads]
# average probabilities over heads
attn = attn.mean(dim=0)
if self.layer_norm is not None:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
if self.project_out_dim is not None:
x = self.project_out_dim(x)
return x, {"attn": [attn], "inner_states": inner_states}
def output_layer(self, features):
"""Project features to the vocabulary size."""
if self.adaptive_softmax is None:
# project back to size of vocabulary
return self.output_projection(features)
else:
return features
def max_positions(self):
"""Maximum output length supported by the decoder."""
if self.embed_positions is None:
return self.max_target_positions
return min(self.max_target_positions, self.embed_positions.max_positions)
def buffered_future_mask(self, tensor):
dim = tensor.size(0)
# self._future_mask.device != tensor.device is not working in TorchScript. This is a workaround.
if (
self._future_mask.size(0) == 0
or (not self._future_mask.device == tensor.device)
or self._future_mask.size(0) < dim
):
self._future_mask = torch.triu(
utils.fill_with_neg_inf(torch.zeros([dim, dim])), 1
)
self._future_mask = self._future_mask.to(tensor)
return self._future_mask[:dim, :dim]
def upgrade_state_dict_named(self, state_dict, name):
"""Upgrade a (possibly old) state dict for new versions of fairseq."""
if isinstance(self.embed_positions, SinusoidalPositionalEmbedding):
weights_key = "{}.embed_positions.weights".format(name)
if weights_key in state_dict:
del state_dict[weights_key]
state_dict[
"{}.embed_positions._float_tensor".format(name)
] = torch.FloatTensor(1)
if f"{name}.output_projection.weight" not in state_dict:
if self.share_input_output_embed:
embed_out_key = f"{name}.embed_tokens.weight"
else:
embed_out_key = f"{name}.embed_out"
if embed_out_key in state_dict:
state_dict[f"{name}.output_projection.weight"] = state_dict[
embed_out_key
]
if not self.share_input_output_embed:
del state_dict[embed_out_key]
for i in range(self.num_layers):
# update layer norms
layer_norm_map = {
"0": "self_attn_layer_norm",
"1": "encoder_attn_layer_norm",
"2": "final_layer_norm",
}
for old, new in layer_norm_map.items():
for m in ("weight", "bias"):
k = "{}.layers.{}.layer_norms.{}.{}".format(name, i, old, m)
if k in state_dict:
state_dict[
"{}.layers.{}.{}.{}".format(name, i, new, m)
] = state_dict[k]
del state_dict[k]
version_key = "{}.version".format(name)
if utils.item(state_dict.get(version_key, torch.Tensor([1]))[0]) <= 2:
# earlier checkpoints did not normalize after the stack of layers
self.layer_norm = None
self.normalize = False
state_dict[version_key] = torch.Tensor([1])
return state_dict
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.0)
return m
class TransformerDecoderBaseScriptable(TransformerDecoderBase):
def extract_features(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
):
# call scriptable method from parent class
x, _ = self.extract_features_scriptable(
prev_output_tokens,
encoder_out,
incremental_state,
full_context_alignment,
alignment_layer,
alignment_heads,
)
return x, None
class TransformerDecoder(TransformerDecoderBase):
def __init__(
self,
args,
dictionary,
embed_tokens,
no_encoder_attn=False,
output_projection=None,
):
self.args = args
super().__init__(
TransformerConfig.from_namespace(args),
dictionary,
embed_tokens,
no_encoder_attn=no_encoder_attn,
output_projection=output_projection,
use_rel_pos_enc=getattr(args, "use_rel_pos_enc", False),
)
def build_output_projection(self, args, dictionary, embed_tokens):
super().build_output_projection(
TransformerConfig.from_namespace(args), dictionary, embed_tokens
)
def build_decoder_layer(self, args, no_encoder_attn=False):
return super().build_decoder_layer(
TransformerConfig.from_namespace(args), no_encoder_attn=no_encoder_attn
)
class TransformerDecoderScriptable(TransformerDecoder):
def extract_features(
self,
prev_output_tokens,
encoder_out: Optional[Dict[str, List[Tensor]]] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
):
# call scriptable method from parent class
x, _ = self.extract_features_scriptable(
prev_output_tokens,
encoder_out,
incremental_state,
full_context_alignment,
alignment_layer,
alignment_heads,
)
return x, None
| EXA-1-master | exa/models/unilm-master/speechlm/speechlm/modules/transformer_decoder.py |
# --------------------------------------------------------
# Pre-Training Transformer Decoder for End-to-End ASR Model with Unpaired Speech Data (https://arxiv.org/abs/2203.17113)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/Speech2C
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Based on fairseq code bases
# https://github.com/pytorch/fairseq
# --------------------------------------------------------
import torch
class RelativePositionalEncoding(torch.nn.Module):
def __init__(self, d_model, maxlen=1000, embed_v=False):
super(RelativePositionalEncoding, self).__init__()
self.d_model = d_model
self.maxlen = maxlen
self.pe_k = torch.nn.Embedding(2*maxlen, d_model)
if embed_v:
self.pe_v = torch.nn.Embedding(2*maxlen, d_model)
self.embed_v = embed_v
def forward(self, pos_seq, incremental_state=None):
pos_seq[pos_seq < -self.maxlen] = -self.maxlen
pos_seq[pos_seq >= self.maxlen] = self.maxlen - 1
pos_seq = pos_seq + self.maxlen
if incremental_state is not None:
pos_seq = pos_seq[-1:]
if self.embed_v:
return self.pe_k(pos_seq), self.pe_v(pos_seq)
else:
return self.pe_k(pos_seq), None
| EXA-1-master | exa/models/unilm-master/speechlm/speechlm/modules/relative_pos_enc.py |
# ----------------------------------------------------------------------------
# SpeechLM: Enhanced Speech Pre-Training with Unpaired Textual Data (https://arxiv.org/abs/2209.15329)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechLM
# Code based on fairseq: https://github.com/facebookresearch/fairseq/tree/272c4c5197250997148fb12c0db6306035f166a4
#
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# ----------------------------------------------------------------------------
import bisect
import numpy as np
from torch.utils.data.dataloader import default_collate
from fairseq.data import FairseqDataset
class ConcatDataset(FairseqDataset):
@staticmethod
def cumsum(sequence, sample_ratios):
r, s = [], 0
for e, ratio in zip(sequence, sample_ratios):
curr_len = int(ratio * len(e))
r.append(curr_len + s)
s += curr_len
return r
def __init__(self, datasets, sample_ratios=1):
super(ConcatDataset, self).__init__()
assert len(datasets) > 0, "datasets should not be an empty iterable"
self.datasets = list(datasets)
if isinstance(sample_ratios, int):
sample_ratios = [sample_ratios] * len(self.datasets)
self.sample_ratios = sample_ratios
self.cumulative_sizes = self.cumsum(self.datasets, sample_ratios)
self.real_sizes = [len(d) for d in self.datasets]
def __len__(self):
return self.cumulative_sizes[-1]
def __getitem__(self, idx):
dataset_idx, sample_idx = self._get_dataset_and_sample_index(idx)
return self.datasets[dataset_idx][sample_idx]
def _get_dataset_and_sample_index(self, idx: int):
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if dataset_idx == 0:
sample_idx = idx
else:
sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
sample_idx = sample_idx % self.real_sizes[dataset_idx]
return dataset_idx, sample_idx
def collater(self, samples, **extra_args):
# For now only supports datasets with same underlying collater implementations
if hasattr(self.datasets[0], "collater"):
return self.datasets[0].collater(samples, **extra_args)
else:
return default_collate(samples, **extra_args)
def size(self, idx: int):
"""
Return an example's size as a float or tuple.
"""
dataset_idx, sample_idx = self._get_dataset_and_sample_index(idx)
return self.datasets[dataset_idx].size(sample_idx)
def num_tokens(self, index: int):
return np.max(self.size(index))
def attr(self, attr: str, index: int):
dataset_idx = bisect.bisect_right(self.cumulative_sizes, index)
return getattr(self.datasets[dataset_idx], attr, None)
@property
def sizes(self):
_dataset_sizes = []
for ds, sr in zip(self.datasets, self.sample_ratios):
if isinstance(ds.sizes, np.ndarray):
_dataset_sizes.append(np.tile(ds.sizes, sr))
else:
# Only support underlying dataset with single size array.
assert isinstance(ds.sizes, list)
_dataset_sizes.append(np.tile(ds.sizes[0], sr))
return np.concatenate(_dataset_sizes)
@property
def supports_prefetch(self):
return all(d.supports_prefetch for d in self.datasets)
def ordered_indices(self):
"""
Returns indices sorted by length. So less padding is needed.
"""
if isinstance(self.sizes, np.ndarray) and len(self.sizes.shape) > 1:
# special handling for concatenating lang_pair_datasets
if getattr(self.datasets[0], "shuffle", False):
indices = np.random.permutation(len(self)).astype(np.int64)
else:
indices = np.arange(len(self), dtype=np.int64)
sizes = self.sizes
tgt_sizes = (
sizes[:, 1] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else None
)
src_sizes = (
sizes[:, 0] if len(sizes.shape) > 0 and sizes.shape[1] > 1 else sizes
)
# sort by target length, then source length
if tgt_sizes is not None:
indices = indices[np.argsort(tgt_sizes[indices], kind="mergesort")]
return indices[np.argsort(src_sizes[indices], kind="mergesort")]
else:
return np.argsort(self.sizes)
def prefetch(self, indices):
frm = 0
for to, ds in zip(self.cumulative_sizes, self.datasets):
real_size = len(ds)
if getattr(ds, "supports_prefetch", False):
ds.prefetch([(i - frm) % real_size for i in indices if frm <= i < to])
frm = to
@property
def can_reuse_epoch_itr_across_epochs(self):
return all(d.can_reuse_epoch_itr_across_epochs for d in self.datasets)
def set_epoch(self, epoch):
super().set_epoch(epoch)
for ds in self.datasets:
if hasattr(ds, "set_epoch"):
ds.set_epoch(epoch)
| EXA-1-master | exa/models/unilm-master/speechlm/speechlm/data/concat_dataset.py |
# ----------------------------------------------------------------------------
# SpeechLM: Enhanced Speech Pre-Training with Unpaired Textual Data (https://arxiv.org/abs/2209.15329)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechLM
# Code based on fairseq: https://github.com/facebookresearch/fairseq/tree/272c4c5197250997148fb12c0db6306035f166a4
#
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# ----------------------------------------------------------------------------
import logging
import numpy as np
import torch
import os
import itertools
from fairseq.data import FairseqDataset, data_utils
from fairseq.data import (
AppendTokenDataset,
ConcatDataset,
PrependTokenDataset,
data_utils,
indexed_dataset,
)
logger = logging.getLogger(__name__)
def load_langtriple_dataset(
data_path,
split,
src,
src_dict,
ref,
ref_dict,
tgt,
tgt_dict,
combine,
dataset_impl,
upsample_primary,
left_pad_source,
left_pad_target,
max_source_positions,
max_target_positions,
prepend_bos=False,
load_alignments=False,
truncate_source=False,
append_source_id=False,
num_buckets=0,
shuffle=True,
pad_to_multiple=1,
prepend_bos_src=None,
lang_format="[{}]",
):
assert not truncate_source
def split_exists(split, src, ref, tgt, lang, data_path):
filename = os.path.join(data_path, "{}.{}-{}-{}.{}".format(split, src, ref, tgt, lang))
return indexed_dataset.dataset_exists(filename, impl=dataset_impl)
src_datasets = []
ref_datasets = []
tgt_datasets = []
for k in itertools.count():
split_k = split + (str(k) if k > 0 else "")
# infer langcode
if split_exists(split_k, src, ref, tgt, src, data_path):
prefix = os.path.join(data_path, "{}.{}-{}-{}.".format(split_k, src, ref, tgt))
elif split_exists(split_k, tgt, ref, src, src, data_path):
prefix = os.path.join(data_path, "{}.{}-{}-{}.".format(split_k, tgt, ref, src))
else:
if k > 0:
break
else:
raise FileNotFoundError(
"Dataset not found: {} ({})".format(split, data_path)
)
src_dataset = data_utils.load_indexed_dataset(
prefix + src, src_dict, dataset_impl
)
src_datasets.append(src_dataset)
ref_dataset = data_utils.load_indexed_dataset(
prefix + ref, ref_dict, dataset_impl
)
ref_datasets.append(ref_dataset)
tgt_dataset = data_utils.load_indexed_dataset(
prefix + tgt, tgt_dict, dataset_impl
)
if tgt_dataset is not None:
tgt_datasets.append(tgt_dataset)
logger.info(
"{} {} {}-{}-{} {} examples".format(
data_path, split_k, src, ref, tgt, len(src_datasets[-1])
)
)
if not combine:
break
assert len(src_datasets) == len(ref_datasets)
assert len(src_datasets) == len(tgt_datasets) or len(tgt_datasets) == 0
if len(src_datasets) == 1:
src_dataset = src_datasets[0]
ref_dataset = ref_datasets[0]
tgt_dataset = tgt_datasets[0] if len(tgt_datasets) > 0 else None
else:
sample_ratios = [1] * len(src_datasets)
sample_ratios[0] = upsample_primary
src_dataset = ConcatDataset(src_datasets, sample_ratios)
ref_dataset = ConcatDataset(ref_datasets, sample_ratios)
if len(tgt_datasets) > 0:
tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios)
else:
tgt_dataset = None
if prepend_bos:
assert hasattr(src_dict, "bos_index") and hasattr(ref_dict, "bos_index") and hasattr(tgt_dict, "bos_index")
src_dataset = PrependTokenDataset(src_dataset, src_dict.bos())
ref_dataset = PrependTokenDataset(ref_dataset, ref_dict.bos())
if tgt_dataset is not None:
tgt_dataset = PrependTokenDataset(tgt_dataset, tgt_dict.bos())
elif prepend_bos_src is not None:
logger.info(f"prepending src bos: {prepend_bos_src}")
src_dataset = PrependTokenDataset(src_dataset, prepend_bos_src)
ref_dataset = PrependTokenDataset(ref_dataset, prepend_bos_src)
eos = None
if append_source_id:
src_dataset = AppendTokenDataset(
src_dataset, src_dict.index(lang_format.format(src))
)
ref_dataset = AppendTokenDataset(
ref_dataset, ref_dict.index(lang_format.format(ref))
)
if tgt_dataset is not None:
tgt_dataset = AppendTokenDataset(
tgt_dataset, tgt_dict.index(lang_format.format(tgt))
)
eos = tgt_dict.index(lang_format.format(tgt))
align_dataset = None
if load_alignments:
align_path = os.path.join(data_path, "{}.align.{}-{}".format(split, src, tgt))
if indexed_dataset.dataset_exists(align_path, impl=dataset_impl):
align_dataset = data_utils.load_indexed_dataset(
align_path, None, dataset_impl
)
tgt_dataset_sizes = tgt_dataset.sizes if tgt_dataset is not None else None
return LanguageTripleDataset(
src_dataset,
src_dataset.sizes,
src_dict,
ref_dataset,
ref_dataset.sizes,
ref_dict,
tgt_dataset,
tgt_dataset_sizes,
tgt_dict,
left_pad_source=left_pad_source,
left_pad_target=left_pad_target,
align_dataset=align_dataset,
eos=eos,
num_buckets=num_buckets,
shuffle=shuffle,
pad_to_multiple=pad_to_multiple,
)
def collate(
samples,
pad_idx,
eos_idx,
left_pad_source=True,
left_pad_target=False,
input_feeding=True,
pad_to_length=None,
pad_to_multiple=1,
):
if len(samples) == 0:
return {}
def merge(key, left_pad, move_eos_to_beginning=False, pad_to_length=None):
return data_utils.collate_tokens(
[s[key] for s in samples],
pad_idx,
None,
left_pad,
move_eos_to_beginning,
pad_to_length=pad_to_length,
pad_to_multiple=pad_to_multiple,
)
def check_alignment(alignment, src_len, tgt_len):
if alignment is None or len(alignment) == 0:
return False
if (
alignment[:, 0].max().item() >= src_len - 1
or alignment[:, 1].max().item() >= tgt_len - 1
):
logger.warning("alignment size mismatch found, skipping alignment!")
return False
return True
def compute_alignment_weights(alignments):
"""
Given a tensor of shape [:, 2] containing the source-target indices
corresponding to the alignments, a weight vector containing the
inverse frequency of each target index is computed.
For e.g. if alignments = [[5, 7], [2, 3], [1, 3], [4, 2]], then
a tensor containing [1., 0.5, 0.5, 1] should be returned (since target
index 3 is repeated twice)
"""
align_tgt = alignments[:, 1]
_, align_tgt_i, align_tgt_c = torch.unique(
align_tgt, return_inverse=True, return_counts=True
)
align_weights = align_tgt_c[align_tgt_i[np.arange(len(align_tgt))]]
return 1.0 / align_weights.float()
id = torch.LongTensor([s["id"] for s in samples])
src_tokens = merge(
"source",
left_pad=left_pad_source,
pad_to_length=pad_to_length["source"] if pad_to_length is not None else None,
)
ref_tokens = merge(
"reference",
left_pad=left_pad_source,
pad_to_length=pad_to_length["source"] if pad_to_length is not None else None,
)
# sort by descending source length
src_lengths = torch.LongTensor(
[s["source"].ne(pad_idx).long().sum() for s in samples]
)
ref_lengths = torch.LongTensor(
[s["reference"].ne(pad_idx).long().sum() for s in samples]
)
src_lengths, sort_order = src_lengths.sort(descending=True)
id = id.index_select(0, sort_order)
src_tokens = src_tokens.index_select(0, sort_order)
ref_lengths = ref_lengths.index_select(0, sort_order)
ref_tokens = ref_tokens.index_select(0, sort_order)
prev_output_tokens = None
target = None
if samples[0].get("target", None) is not None:
target = merge(
"target",
left_pad=left_pad_target,
pad_to_length=pad_to_length["target"]
if pad_to_length is not None
else None,
)
target = target.index_select(0, sort_order)
tgt_lengths = torch.LongTensor(
[s["target"].ne(pad_idx).long().sum() for s in samples]
).index_select(0, sort_order)
ntokens = tgt_lengths.sum().item()
if samples[0].get("prev_output_tokens", None) is not None:
prev_output_tokens = merge("prev_output_tokens", left_pad=left_pad_target)
elif input_feeding:
# we create a shifted version of targets for feeding the
# previous output token(s) into the next decoder step
prev_output_tokens = merge(
"target",
left_pad=left_pad_target,
move_eos_to_beginning=True,
pad_to_length=pad_to_length["target"]
if pad_to_length is not None
else None,
)
else:
ntokens = src_lengths.sum().item()
batch = {
"id": id,
"nsentences": len(samples),
"ntokens": ntokens,
"net_input": {
"src_tokens": src_tokens,
"src_lengths": src_lengths,
},
"target": target,
"ref_tokens": ref_tokens,
"ref_lengths": ref_lengths,
}
if prev_output_tokens is not None:
batch["net_input"]["prev_output_tokens"] = prev_output_tokens.index_select(
0, sort_order
)
if samples[0].get("alignment", None) is not None:
bsz, tgt_sz = batch["target"].shape
src_sz = batch["net_input"]["src_tokens"].shape[1]
offsets = torch.zeros((len(sort_order), 2), dtype=torch.long)
offsets[:, 1] += torch.arange(len(sort_order), dtype=torch.long) * tgt_sz
if left_pad_source:
offsets[:, 0] += src_sz - src_lengths
if left_pad_target:
offsets[:, 1] += tgt_sz - tgt_lengths
alignments = [
alignment + offset
for align_idx, offset, src_len, tgt_len in zip(
sort_order, offsets, src_lengths, tgt_lengths
)
for alignment in [samples[align_idx]["alignment"].view(-1, 2)]
if check_alignment(alignment, src_len, tgt_len)
]
if len(alignments) > 0:
alignments = torch.cat(alignments, dim=0)
align_weights = compute_alignment_weights(alignments)
batch["alignments"] = alignments
batch["align_weights"] = align_weights
if samples[0].get("constraints", None) is not None:
# Collate the packed constraints across the samples, padding to
# the length of the longest sample.
lens = [sample.get("constraints").size(0) for sample in samples]
max_len = max(lens)
constraints = torch.zeros((len(samples), max(lens))).long()
for i, sample in enumerate(samples):
constraints[i, 0 : lens[i]] = samples[i].get("constraints")
batch["constraints"] = constraints.index_select(0, sort_order)
return batch
class LanguageTripleDataset(FairseqDataset):
"""
A pair of torch.utils.data.Datasets.
Args:
src (torch.utils.data.Dataset): source dataset to wrap
src_sizes (List[int]): source sentence lengths
src_dict (~fairseq.data.Dictionary): source vocabulary
tgt (torch.utils.data.Dataset, optional): target dataset to wrap
tgt_sizes (List[int], optional): target sentence lengths
tgt_dict (~fairseq.data.Dictionary, optional): target vocabulary
left_pad_source (bool, optional): pad source tensors on the left side
(default: True).
left_pad_target (bool, optional): pad target tensors on the left side
(default: False).
shuffle (bool, optional): shuffle dataset elements before batching
(default: True).
input_feeding (bool, optional): create a shifted version of the targets
to be passed into the model for teacher forcing (default: True).
remove_eos_from_source (bool, optional): if set, removes eos from end
of source if it's present (default: False).
append_eos_to_target (bool, optional): if set, appends eos to end of
target if it's absent (default: False).
align_dataset (torch.utils.data.Dataset, optional): dataset
containing alignments.
constraints (Tensor, optional): 2d tensor with a concatenated, zero-
delimited list of constraints for each sentence.
append_bos (bool, optional): if set, appends bos to the beginning of
source/target sentence.
num_buckets (int, optional): if set to a value greater than 0, then
batches will be bucketed into the given number of batch shapes.
src_lang_id (int, optional): source language ID, if set, the collated batch
will contain a field 'src_lang_id' in 'net_input' which indicates the
source language of the samples.
tgt_lang_id (int, optional): target language ID, if set, the collated batch
will contain a field 'tgt_lang_id' which indicates the target language
of the samples.
"""
def __init__(
self,
src,
src_sizes,
src_dict,
ref,
ref_sizes,
ref_dict,
tgt=None,
tgt_sizes=None,
tgt_dict=None,
left_pad_source=True,
left_pad_target=False,
shuffle=True,
input_feeding=True,
remove_eos_from_source=False,
append_eos_to_target=False,
align_dataset=None,
constraints=None,
append_bos=False,
eos=None,
num_buckets=0,
src_lang_id=None,
tgt_lang_id=None,
pad_to_multiple=1,
):
if tgt_dict is not None:
assert src_dict.pad() == tgt_dict.pad()
assert src_dict.eos() == tgt_dict.eos()
assert src_dict.unk() == tgt_dict.unk()
if tgt is not None:
assert len(src) == len(
tgt
), "Source and target must contain the same number of examples"
assert len(src) == len(
ref
), "Source and reference must contain the same number of examples"
self.src = src
self.ref = ref
self.tgt = tgt
self.src_sizes = np.array(src_sizes)
self.ref_sizes = np.array(ref_sizes)
self.tgt_sizes = np.array(tgt_sizes) if tgt_sizes is not None else None
self.sizes = (
np.vstack((self.src_sizes, self.tgt_sizes)).T
if self.tgt_sizes is not None
else self.src_sizes
)
self.src_dict = src_dict
self.ref_dict = ref_dict
self.tgt_dict = tgt_dict
self.left_pad_source = left_pad_source
self.left_pad_target = left_pad_target
self.shuffle = shuffle
self.input_feeding = input_feeding
self.remove_eos_from_source = remove_eos_from_source
self.append_eos_to_target = append_eos_to_target
self.align_dataset = align_dataset
if self.align_dataset is not None:
assert (
self.tgt_sizes is not None
), "Both source and target needed when alignments are provided"
self.constraints = constraints
self.append_bos = append_bos
self.eos = eos if eos is not None else src_dict.eos()
self.src_lang_id = src_lang_id
self.tgt_lang_id = tgt_lang_id
if num_buckets > 0:
from fairseq.data import BucketPadLengthDataset
self.src = BucketPadLengthDataset(
self.src,
sizes=self.src_sizes,
num_buckets=num_buckets,
pad_idx=self.src_dict.pad(),
left_pad=self.left_pad_source,
)
self.src_sizes = self.src.sizes
logger.info("bucketing source lengths: {}".format(list(self.src.buckets)))
self.ref = BucketPadLengthDataset(
self.ref,
sizes=self.ref_sizes,
num_buckets=num_buckets,
pad_idx=self.ref_dict.pad(),
left_pad=self.left_pad_source,
)
self.ref_sizes = self.ref.sizes
logger.info("bucketing reference lengths: {}".format(list(self.src.buckets)))
if self.tgt is not None:
self.tgt = BucketPadLengthDataset(
self.tgt,
sizes=self.tgt_sizes,
num_buckets=num_buckets,
pad_idx=self.tgt_dict.pad(),
left_pad=self.left_pad_target,
)
self.tgt_sizes = self.tgt.sizes
logger.info(
"bucketing target lengths: {}".format(list(self.tgt.buckets))
)
# determine bucket sizes using self.num_tokens, which will return
# the padded lengths (thanks to BucketPadLengthDataset)
num_tokens = np.vectorize(self.num_tokens, otypes=[np.compat.long])
self.bucketed_num_tokens = num_tokens(np.arange(len(self.src)))
self.buckets = [
(None, num_tokens) for num_tokens in np.unique(self.bucketed_num_tokens)
]
else:
self.buckets = None
self.pad_to_multiple = pad_to_multiple
def get_batch_shapes(self):
return self.buckets
def __getitem__(self, index):
tgt_item = self.tgt[index] if self.tgt is not None else None
src_item = self.src[index]
ref_item = self.ref[index]
# Append EOS to end of tgt sentence if it does not have an EOS and remove
# EOS from end of src sentence if it exists. This is useful when we use
# use existing datasets for opposite directions i.e., when we want to
# use tgt_dataset as src_dataset and vice versa
if self.append_eos_to_target:
eos = self.tgt_dict.eos() if self.tgt_dict else self.src_dict.eos()
if self.tgt and self.tgt[index][-1] != eos:
tgt_item = torch.cat([self.tgt[index], torch.LongTensor([eos])])
if self.append_bos:
bos = self.tgt_dict.bos() if self.tgt_dict else self.src_dict.bos()
if self.tgt and self.tgt[index][0] != bos:
tgt_item = torch.cat([torch.LongTensor([bos]), self.tgt[index]])
bos = self.src_dict.bos()
if self.src[index][0] != bos:
src_item = torch.cat([torch.LongTensor([bos]), self.src[index]])
if self.ref[index][0] != bos:
ref_item = torch.cat([torch.LongTensor([bos]), self.ref[index]])
if self.remove_eos_from_source:
eos = self.src_dict.eos()
if self.src[index][-1] == eos:
src_item = self.src[index][:-1]
if self.ref[index][-1] == eos:
ref_item = self.ref[index][:-1]
example = {
"id": index,
"source": src_item,
"reference": ref_item,
"target": tgt_item,
}
if self.align_dataset is not None:
example["alignment"] = self.align_dataset[index]
if self.constraints is not None:
example["constraints"] = self.constraints[index]
return example
def __len__(self):
return len(self.src)
def collater(self, samples, pad_to_length=None):
"""Merge a list of samples to form a mini-batch.
Args:
samples (List[dict]): samples to collate
pad_to_length (dict, optional): a dictionary of
{'source': source_pad_to_length, 'target': target_pad_to_length}
to indicate the max length to pad to in source and target respectively.
Returns:
dict: a mini-batch with the following keys:
- `id` (LongTensor): example IDs in the original input order
- `ntokens` (int): total number of tokens in the batch
- `net_input` (dict): the input to the Model, containing keys:
- `src_tokens` (LongTensor): a padded 2D Tensor of tokens in
the source sentence of shape `(bsz, src_len)`. Padding will
appear on the left if *left_pad_source* is ``True``.
- `src_lengths` (LongTensor): 1D Tensor of the unpadded
lengths of each source sentence of shape `(bsz)`
- `prev_output_tokens` (LongTensor): a padded 2D Tensor of
tokens in the target sentence, shifted right by one
position for teacher forcing, of shape `(bsz, tgt_len)`.
This key will not be present if *input_feeding* is
``False``. Padding will appear on the left if
*left_pad_target* is ``True``.
- `src_lang_id` (LongTensor): a long Tensor which contains source
language IDs of each sample in the batch
- `target` (LongTensor): a padded 2D Tensor of tokens in the
target sentence of shape `(bsz, tgt_len)`. Padding will appear
on the left if *left_pad_target* is ``True``.
- `tgt_lang_id` (LongTensor): a long Tensor which contains target language
IDs of each sample in the batch
"""
res = collate(
samples,
pad_idx=self.src_dict.pad(),
eos_idx=self.eos,
left_pad_source=self.left_pad_source,
left_pad_target=self.left_pad_target,
input_feeding=self.input_feeding,
pad_to_length=pad_to_length,
pad_to_multiple=self.pad_to_multiple,
)
if self.src_lang_id is not None or self.tgt_lang_id is not None:
src_tokens = res["net_input"]["src_tokens"]
bsz = src_tokens.size(0)
if self.src_lang_id is not None:
res["net_input"]["src_lang_id"] = (
torch.LongTensor([[self.src_lang_id]]).expand(bsz, 1).to(src_tokens)
)
if self.tgt_lang_id is not None:
res["tgt_lang_id"] = (
torch.LongTensor([[self.tgt_lang_id]]).expand(bsz, 1).to(src_tokens)
)
return res
def num_tokens(self, index):
"""Return the number of tokens in a sample. This value is used to
enforce ``--max-tokens`` during batching."""
return max(
self.src_sizes[index],
self.tgt_sizes[index] if self.tgt_sizes is not None else 0,
)
def num_tokens_vec(self, indices):
"""Return the number of tokens for a set of positions defined by indices.
This value is used to enforce ``--max-tokens`` during batching."""
sizes = self.src_sizes[indices]
if self.tgt_sizes is not None:
sizes = np.maximum(sizes, self.tgt_sizes[indices])
return sizes
def size(self, index):
"""Return an example's size as a float or tuple. This value is used when
filtering a dataset with ``--max-positions``."""
return (
self.src_sizes[index],
self.tgt_sizes[index] if self.tgt_sizes is not None else 0,
)
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
if self.shuffle:
indices = np.random.permutation(len(self)).astype(np.int64)
else:
indices = np.arange(len(self), dtype=np.int64)
if self.buckets is None:
# sort by target length, then source length
if self.tgt_sizes is not None:
indices = indices[np.argsort(self.tgt_sizes[indices], kind="mergesort")]
return indices[np.argsort(self.src_sizes[indices], kind="mergesort")]
else:
# sort by bucketed_num_tokens, which is:
# max(padded_src_len, padded_tgt_len)
return indices[
np.argsort(self.bucketed_num_tokens[indices], kind="mergesort")
]
@property
def supports_prefetch(self):
return getattr(self.src, "supports_prefetch", False) and (
getattr(self.tgt, "supports_prefetch", False) or self.tgt is None
)
def prefetch(self, indices):
self.src.prefetch(indices)
if self.tgt is not None:
self.tgt.prefetch(indices)
if self.align_dataset is not None:
self.align_dataset.prefetch(indices)
def filter_indices_by_size(self, indices, max_sizes):
"""Filter a list of sample indices. Remove those that are longer
than specified in max_sizes.
Args:
indices (np.array): original array of sample indices
max_sizes (int or list[int] or tuple[int]): max sample size,
can be defined separately for src and tgt (then list or tuple)
Returns:
np.array: filtered sample array
list: list of removed indices
"""
return data_utils.filter_paired_dataset_indices_by_size(
self.src_sizes,
self.tgt_sizes,
indices,
max_sizes,
)
| EXA-1-master | exa/models/unilm-master/speechlm/speechlm/data/language_trible_dataset.py |
# ----------------------------------------------------------------------------
# SpeechLM: Enhanced Speech Pre-Training with Unpaired Textual Data (https://arxiv.org/abs/2209.15329)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechLM
# Code based on fairseq: https://github.com/facebookresearch/fairseq/tree/272c4c5197250997148fb12c0db6306035f166a4
#
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# ----------------------------------------------------------------------------
import logging
from os import replace
import time
from collections import OrderedDict
from typing import Any, Dict, List, Optional
import numpy as np
from fairseq.data import data_utils
from fairseq.data import FairseqDataset
logger = logging.getLogger(__name__)
class MultiCorpusDataset(FairseqDataset):
"""
see fairseq/fairseq/data/multi_corpus_dataset.__doc__
Args:
datasets: a OrderedDict of FairseqDataset instances.
distribution: a List containing the probability of getting an utterance from
corresponding dataset
seed: random seed for sampling the datsets
sort_indices: if true, will sort the ordered indices by size
batch_sample: if true, will ensure each batch is from a single dataset
"""
def __init__(
self,
datasets: Dict[str, FairseqDataset],
max_positions: Dict,
distribution: List[float],
max_tokens_ratio: List[float],
seed: int = 1234,
sort_indices: bool = False,
check_length: bool = False,
):
super().__init__()
assert isinstance(datasets, OrderedDict)
assert len(datasets) == len(distribution)
# assert sum(distribution) == 1
self.datasets = datasets
self.distribution = distribution
self.max_tokens_ratio = max_tokens_ratio
self.seed = seed
self.sort_indices = sort_indices
self.max_positions = max_positions
self.check_length = check_length
# Avoid repeated conversions to list later
self.dataset_list = list(datasets.values())
self.total_num_instances = 0
# first_dataset = self.dataset_list[0]
self.num_instances_per_dataset = []
self.dataset_offsets = []
for i, dataset in enumerate(self.dataset_list):
assert isinstance(dataset, FairseqDataset)
# assert type(dataset) is type(first_dataset)
self.num_instances_per_dataset.append(
0 if self.distribution[i] == 0 else len(dataset)
)
self.dataset_offsets.append(self.total_num_instances)
self.total_num_instances += self.num_instances_per_dataset[i]
def ordered_indices(self):
start = time.time()
with data_utils.numpy_seed(self.seed, self.epoch):
logger.info(f"sampling new dataset with seed {self.seed} epoch {self.epoch}")
sampled_indices = {}
# For each dataset i, sample self.distribution[i] * self.total_num_instances
for i, key in enumerate(self.datasets):
tp = time.time()
if self.distribution[i] == 0:
# skip dataset if sampling probability is 0
continue
if i < len(self.datasets) - 1:
num_instances = int(self.distribution[i] * self.total_num_instances)
high = self.dataset_offsets[i + 1]
else:
num_instances = int(self.distribution[i] * self.total_num_instances)
high = self.total_num_instances
logger.info(f"sampling {num_instances} from {key} dataset")
# First, add k copies of the dataset where k = num_instances // len(dataset).
# This ensures an equal distribution of the data points as much as possible.
# For the remaining entries randomly sample them
dataset_size = len(self.datasets[key])
num_copies = num_instances // dataset_size
dataset_indices = np.random.permutation(high - self.dataset_offsets[i])[: num_instances - num_copies * dataset_size]
if num_copies > 0:
dataset_indices = np.concatenate(
(
np.repeat(
np.arange(high - self.dataset_offsets[i]), num_copies
),
dataset_indices,
)
)
# filter by size, we should ignore it by setting check_length=False
# , as it is very time-consuming on large dadaset
if self.max_positions[key] is not None and self.check_length:
dataset_indices, ignored = self.datasets[key].filter_indices_by_size(
dataset_indices,
self.max_positions[key],
)
if len(ignored) > 0:
logger.warning(
(
"{:,} samples have invalid sizes and will be skipped, "
"max_positions={}, first few sample ids={}"
).format(len(ignored), self.max_positions[key], ignored[:10])
)
if self.sort_indices:
logger.info(" - sampled indices took {}s".format(time.time() - tp))
tp = time.time()
dataset_indices = np.sort(dataset_indices)
ordered_indices = self.datasets[key].ordered_indices()
if isinstance(ordered_indices[0], np.ndarray): # chunked audio data
dataset_indices = [order_idx + self.dataset_offsets[i] for order_idx in ordered_indices]
assert self.dataset_offsets[i] == 0
# TODO for chunked audio data, now assume len(dataset_indices) == len(dataset). Don't filter any data.
else:
dataset_indices = ordered_indices[dataset_indices] + self.dataset_offsets[i]
logger.info(" - ordered_indices took {}s".format(time.time() - tp))
else:
np.random.shuffle(dataset_indices)
sampled_indices[key] = dataset_indices
logger.info(
"multi_corpus_dataset ordered_indices took {}s".format(
time.time() - start
)
)
return sampled_indices
def _map_index(self, index: int):
"""
If dataset A has length N and dataset B has length M
then index 1 maps to index 1 of dataset A, and index N + 1
maps to index 1 of B.
"""
counter = 0
for num_instances, key in zip(self.num_instances_per_dataset, self.datasets):
if index < counter + num_instances:
return index - counter, key
counter += num_instances
raise ValueError(
"Invalid index: {}, max: {}".format(index, self.total_num_instances)
)
def __len__(self):
"""
Length of this dataset is the sum of individual datasets
"""
return self.total_num_instances
def __getitem__(self, index):
new_index, key = self._map_index(index)
try:
item = self.datasets[key][new_index]
item["full_id"] = index
return item
except Exception as e:
e.args = (f"Error from {key} dataset", *e.args)
raise
def collater(self, samples):
"""
If we are doing batch sampling, then pick the right collater to use.
Otherwise we assume all collaters are the same.
"""
if len(samples) == 0:
return None
samples_dict = {key: [] for key in self.datasets}
for s in samples:
_, key = self._map_index(s["full_id"])
samples_dict[key].append(s)
batch = {}
for key in samples_dict:
if len(samples_dict[key]) == 0:
continue
batch[key] = self.datasets[key].collater(samples_dict[key])
return batch
def num_tokens(self, index: int):
index, key = self._map_index(index)
return self.datasets[key].num_tokens(index)
def size(self, index: int):
index, key = self._map_index(index)
return self.datasets[key].size(index)
@property
def can_reuse_epoch_itr_across_epochs(self):
return False
def set_epoch(self, epoch, **unused):
super().set_epoch(epoch)
logger.info(f"setting epoch of multi_corpus_dataset to {epoch}")
for ds in self.dataset_list:
if hasattr(ds, "set_epoch"):
ds.set_epoch(epoch)
self.epoch = epoch
@property
def supports_prefetch(self):
return False
@property
def supports_fetch_outside_dataloader(self):
return all(
self.datasets[key].supports_fetch_outside_dataloader
for key in self.datasets
)
def batch_by_size(
self,
indices,
max_tokens=None,
max_sentences=None,
required_batch_size_multiple=1,
):
dataset_indices = indices
batches_dict = {}
for n, key in enumerate(dataset_indices):
max_tokens_ratio = self.max_tokens_ratio[n]
if isinstance(dataset_indices[key][0], np.ndarray): # chunked audio data
cur_batches = self.datasets[key].batch_by_size(
dataset_indices[key],
round(max_tokens * max_tokens_ratio),
max_sentences,
required_batch_size_multiple,
)
logger.info(f"Created {sum([len(b) for b in cur_batches])} [{len(cur_batches)}] batches for dataset {key}")
else:
cur_batches = super().batch_by_size(
np.array(dataset_indices[key], dtype=np.int64),
round(max_tokens * max_tokens_ratio),
max_sentences,
required_batch_size_multiple,
)
logger.info(f"Created {len(cur_batches)} batches for dataset {key}")
batches_dict[key] = cur_batches
return batches_dict
def get_batch_sampler(
self,
indices,
num_shards,
seed,
max_tokens=None,
max_sentences=None,
required_batch_size_multiple=1,
split_modality_batch=False,
):
def batch_sampler(dataset, epoch):
start = time.time()
batches_dict = dataset.batch_by_size(
indices,
max_tokens=max_tokens,
max_sentences=max_sentences,
required_batch_size_multiple=required_batch_size_multiple,
)
logger.info(f"multi_corpus_dataset, batch_by_size took {time.time() - start}s")
start = time.time()
new_batches = []
### shuffle inner group size, split into speech/text batches
shuffled_batches_list = []
speech_batches = []
### we should specify the speech_batches because: we need concatenate different speech datasets
# (e.g. ltr or km) instead of loading them parellelly.
for name, batches in batches_dict.items():
if name.startswith("speech"):
if isinstance(batches[0], list): # chunked audio data
batches = self.datasets[name].shuffle_batches(list(batches), seed + epoch)
shuffled_batches_list.append(batches)
else:
batches = inner_bucket_shuffle(batches, seed+epoch, num_shards*10)
batches = batches[: (len(batches) // num_shards) * num_shards]
if len(batches) == 0:
logger.warning(f"Sample 0 batch for {name}, you should ensure that no {name} data provided.")
else:
speech_batches += batches
else:
batches = inner_bucket_shuffle(batches, seed+epoch, num_shards*10)
batches = batches[: (len(batches) // num_shards) * num_shards]
if len(batches) == 0:
logger.warning(f"Sample 0 batch for {name}, you should ensure that no {name} data provided.")
else:
batches = shuffle_buckets(batches, seed=seed+epoch, inner_shuf=False)
shuffled_batches_list.append(batches)
if len(speech_batches) > 0:
speech_batches = shuffle_buckets(speech_batches, seed=seed+epoch, inner_shuf=False)
shuffled_batches_list.append(speech_batches)
### create the final new_batches
num_batch = min(len(batches) for batches in shuffled_batches_list)
if split_modality_batch:
for i in range(0, num_batch, num_shards):
for batches in shuffled_batches_list:
new_batches += batches[i: i + num_shards]
else:
for i in range(num_batch):
new_batches.append(np.concatenate([batches[i] for batches in shuffled_batches_list]))
logger.info(f"multi_corpus_dataset sample {len(new_batches)} batches, took {time.time() - start}s")
return new_batches
def inner_bucket_shuffle(batches, seed, bucket_size=10, thr=0):
"""we assert batches is sorted form long to short.
shuffle samples in a buctet(e.g. 10 batches).
batches: a list of numpy array"""
num_batch = len(batches)
new_batches = []
num_buckets = len(batches) // bucket_size
i = 0
while i < num_batch:
if (i < bucket_size * thr or
i >= bucket_size * (num_buckets - thr)
):
new_batches.append(batches[i])
i += 1
else:
group = np.concatenate(batches[i: i+bucket_size])
with data_utils.numpy_seed(seed):
np.random.shuffle(group)
new_batches += np.array_split(group, bucket_size)
i += bucket_size
assert all([len(batch) > 0 for batch in new_batches])
return new_batches
def shuffle_buckets(batches, seed, inner_shuf=True):
if inner_shuf:
batches = inner_bucket_shuffle(batches, seed, num_shards*10)
batches = [batches[i: i + num_shards] for i in range(0, len(batches)-num_shards+1, num_shards)]
assert len(batches[-1]) == num_shards
new_batches = []
with data_utils.numpy_seed(seed):
np.random.shuffle(batches)
for group in batches:
new_batches += group
return new_batches
return batch_sampler
| EXA-1-master | exa/models/unilm-master/speechlm/speechlm/data/multimodal_corpus_dataset.py |
# ----------------------------------------------------------------------------
# SpeechLM: Enhanced Speech Pre-Training with Unpaired Textual Data (https://arxiv.org/abs/2209.15329)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechLM
# Code based on fairseq: https://github.com/facebookresearch/fairseq/tree/272c4c5197250997148fb12c0db6306035f166a4
#
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# ----------------------------------------------------------------------------
"""
Modified from https://github.com/facebookresearch/fairseq/blob/272c4c5197250997148fb12c0db6306035f166a4/fairseq/tasks/translation.py
1. Add custom lang_format in function load_langpair_dataset
2. If truncate_source (default no), use RandomCropDataset instead of TruncateDataset
"""
import itertools
import logging
import os
from fairseq.data import (
AppendTokenDataset,
LanguagePairDataset,
PrependTokenDataset,
StripTokenDataset,
TruncateDataset,
RandomCropDataset,
data_utils,
indexed_dataset,
)
from speechlm.data.concat_dataset import ConcatDataset
EVAL_BLEU_ORDER = 4
logger = logging.getLogger(__name__)
def load_langpair_dataset(
data_path,
split,
src,
src_dict,
tgt,
tgt_dict,
combine,
dataset_impl,
upsample_primary,
left_pad_source,
left_pad_target,
max_source_positions,
max_target_positions,
prepend_bos=False,
load_alignments=False,
truncate_source=False,
append_source_id=False,
num_buckets=0,
shuffle=True,
pad_to_multiple=1,
prepend_bos_src=None,
lang_format="[{}]",
input_feeding=True,
):
def split_exists(split, src, tgt, lang, data_path):
filename = os.path.join(data_path, "{}.{}-{}.{}".format(split, src, tgt, lang))
return indexed_dataset.dataset_exists(filename, impl=dataset_impl)
src_datasets = []
tgt_datasets = []
for k in itertools.count():
split_k = split + (str(k) if k > 0 else "")
# infer langcode
if split_exists(split_k, src, tgt, src, data_path):
prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, src, tgt))
elif split_exists(split_k, tgt, src, src, data_path):
prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, tgt, src))
else:
if k > 0:
break
else:
raise FileNotFoundError(
"Dataset not found: {} ({})".format(split, data_path)
)
src_dataset = data_utils.load_indexed_dataset(
prefix + src, src_dict, dataset_impl
)
if truncate_source:
src_dataset = AppendTokenDataset(
RandomCropDataset(
StripTokenDataset(src_dataset, src_dict.eos()),
max_source_positions - 1,
),
src_dict.eos(),
)
src_datasets.append(src_dataset)
tgt_dataset = data_utils.load_indexed_dataset(
prefix + tgt, tgt_dict, dataset_impl
)
if tgt_dataset is not None:
tgt_datasets.append(tgt_dataset)
logger.info(
"{} {} {}-{} {} examples".format(
data_path, split_k, src, tgt, len(src_datasets[-1])
)
)
if not combine:
break
assert len(src_datasets) == len(tgt_datasets) or len(tgt_datasets) == 0
if len(src_datasets) == 1:
src_dataset = src_datasets[0]
tgt_dataset = tgt_datasets[0] if len(tgt_datasets) > 0 else None
else:
sample_ratios = [1] * len(src_datasets)
sample_ratios[0] = upsample_primary
src_dataset = ConcatDataset(src_datasets, sample_ratios)
if len(tgt_datasets) > 0:
tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios)
else:
tgt_dataset = None
if prepend_bos:
assert hasattr(src_dict, "bos_index") and hasattr(tgt_dict, "bos_index")
src_dataset = PrependTokenDataset(src_dataset, src_dict.bos())
if tgt_dataset is not None:
tgt_dataset = PrependTokenDataset(tgt_dataset, tgt_dict.bos())
elif prepend_bos_src is not None:
logger.info(f"prepending src bos: {prepend_bos_src}")
src_dataset = PrependTokenDataset(src_dataset, prepend_bos_src)
eos = None
if append_source_id:
src_dataset = AppendTokenDataset(
src_dataset, src_dict.index(lang_format.format(src))
)
if tgt_dataset is not None:
tgt_dataset = AppendTokenDataset(
tgt_dataset, tgt_dict.index(lang_format.format(tgt))
)
eos = tgt_dict.index(lang_format.format(tgt))
align_dataset = None
if load_alignments:
align_path = os.path.join(data_path, "{}.align.{}-{}".format(split, src, tgt))
if indexed_dataset.dataset_exists(align_path, impl=dataset_impl):
align_dataset = data_utils.load_indexed_dataset(
align_path, None, dataset_impl
)
tgt_dataset_sizes = tgt_dataset.sizes if tgt_dataset is not None else None
return LanguagePairDataset(
src_dataset,
src_dataset.sizes,
src_dict,
tgt_dataset,
tgt_dataset_sizes,
tgt_dict,
left_pad_source=left_pad_source,
left_pad_target=left_pad_target,
align_dataset=align_dataset,
eos=eos,
num_buckets=num_buckets,
shuffle=shuffle,
pad_to_multiple=pad_to_multiple,
input_feeding=input_feeding,
)
| EXA-1-master | exa/models/unilm-master/speechlm/speechlm/data/load_langpair_dataset.py |
# ----------------------------------------------------------------------------
# SpeechLM: Enhanced Speech Pre-Training with Unpaired Textual Data (https://arxiv.org/abs/2209.15329)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechLM
# Code based on fairseq: https://github.com/facebookresearch/fairseq/tree/272c4c5197250997148fb12c0db6306035f166a4
#
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# ----------------------------------------------------------------------------
import itertools
import logging
import io
import os
import sys
import time
from pathlib import Path
from typing import Any, List, Optional, Union, Tuple
import numpy as np
import torch
import torch.nn.functional as F
from fairseq.data import data_utils, Dictionary
from fairseq.data.fairseq_dataset import FairseqDataset
from fairseq.data.audio.audio_utils import (
read_from_stored_zip,
is_sf_audio_data,
)
FEATURE_OR_SF_AUDIO_FILE_EXTENSIONS = {".npy", ".wav", ".flac", ".ogg"}
logger = logging.getLogger(__name__)
def parse_path(path: str) -> Tuple[str, List[int]]:
"""Parse data path which is either a path to
1. a .npy/.wav/.flac/.ogg file
2. a stored ZIP file with slicing info: "[zip_path]:[offset]:[length]"
Args:
path (str): the data path to parse
Returns:
file_path (str): the file path
slice_ptr (list of int): empty in case 1;
byte offset and length for the slice in case 2
"""
if Path(path).suffix in FEATURE_OR_SF_AUDIO_FILE_EXTENSIONS:
_path, slice_ptr = path, []
else:
_path, *slice_ptr = path.split(":")
if not Path(_path).is_file():
raise FileNotFoundError(f"File not found: {_path}")
assert len(slice_ptr) in {0, 1, 2}, f"Invalid path: {path}"
slice_ptr = [int(i) for i in slice_ptr]
return _path, slice_ptr
def load_audio(manifest_path, max_keep, min_keep, retry_times=5):
n_long, n_short = 0, 0
names, inds, sizes, chunk_names, chunk_indices = [], [], [], [], []
for i in range(retry_times):
with open(manifest_path) as f:
root = f.readline().strip()
for ind, line in enumerate(f):
items = line.strip().split("\t")
assert len(items) == 2, line
sz = int(items[1])
if min_keep is not None and sz < min_keep:
n_short += 1
elif max_keep is not None and sz > max_keep:
n_long += 1
else:
fname = items[0].split(":")
if len(fname) > 2:
if len(chunk_names) == 0 or fname[0] != chunk_names[-1]:
chunk_names.append(fname[0])
chunk_indices.append(len(names))
names.append(items[0])
inds.append(ind)
sizes.append(sz)
if len(names) == 0:
logger.warn(f"Fail to load manifest for the {i} time")
time.sleep(1)
continue
else:
break
tot = ind + 1
logger.info(
(
f"max_keep={max_keep}, min_keep={min_keep}, "
f"loaded {len(names)}, skipped {n_short} short and {n_long} long, "
f"longest-loaded={max(sizes)}, shortest-loaded={min(sizes)}"
)
)
return root, names, inds, tot, sizes, chunk_names, chunk_indices
def load_label(label_path, inds, tot, retry_times=5):
for i in range(retry_times):
with open(label_path) as f:
labels = [line.rstrip() for line in f]
if len(labels) == 0:
logger.warn(f"Fail to load label for the {i} time")
time.sleep(1)
continue
else:
break
assert (
len(labels) == tot
), f"number of labels does not match ({len(labels)} != {tot})"
labels = [labels[i] for i in inds]
return labels
def load_label_offset(label_path, inds, tot, retry_times=5):
for i in range(retry_times):
with open(label_path) as f:
code_lengths = [len(line.encode("utf-8")) for line in f]
if len(code_lengths) == 0:
logger.warn(f"Fail to load label for the {i} time")
time.sleep(1)
continue
else:
break
assert (
len(code_lengths) == tot
), f"number of labels does not match ({len(code_lengths)} != {tot})"
offsets = list(itertools.accumulate([0] + code_lengths))
offsets = [(offsets[i], offsets[i + 1]) for i in inds]
return offsets
def verify_label_lengths(
audio_sizes,
audio_rate,
label_path,
label_rate,
inds,
tot,
tol=0.1, # tolerance in seconds
):
if label_rate < 0:
logger.info(f"{label_path} is sequence label. skipped")
return
with open(label_path) as f:
lengths = [len(line.rstrip().split()) for line in f]
assert len(lengths) == tot
lengths = [lengths[i] for i in inds]
num_invalid = 0
for i, ind in enumerate(inds):
dur_from_audio = audio_sizes[i] / audio_rate
dur_from_label = lengths[i] / label_rate
if abs(dur_from_audio - dur_from_label) > tol:
logger.warning(
(
f"audio and label duration differ too much "
f"(|{dur_from_audio} - {dur_from_label}| > {tol}) "
f"in line {ind+1} of {label_path}. Check if `label_rate` "
f"is correctly set (currently {label_rate}). "
f"num. of samples = {audio_sizes[i]}; "
f"label length = {lengths[i]}"
)
)
num_invalid += 1
if num_invalid > 0:
logger.warning(
f"total {num_invalid} (audio, label) pairs with mismatched lengths"
)
class HubertDataset(FairseqDataset):
def __init__(
self,
manifest_path: str,
sample_rate: float,
label_paths: List[str],
label_rates: Union[List[float], float], # -1 for sequence labels
pad_list: List[str],
eos_list: List[str],
label_processors: Optional[List[Any]] = None,
max_keep_sample_size: Optional[int] = None,
min_keep_sample_size: Optional[int] = None,
max_sample_size: Optional[int] = None,
shuffle: bool = True,
pad_audio: bool = False,
normalize: bool = False,
store_labels: bool = True,
random_crop: bool = False,
single_target: bool = False,
tgt_dict: Optional[Dictionary] = None,
add_decoder_target: bool = False,
fine_tuning: bool = False,
tgt_lang_idx: int = None,
tokenizer = None,
mbart_style_lang_id: bool = False,
retry_times: int = 5,
reduce_label_for_dec: bool = True,
):
self.audio_root, self.audio_names, inds, tot, self.wav_sizes, self.chunk_names, self.chunk_indices = load_audio(
manifest_path, max_keep_sample_size, min_keep_sample_size, retry_times
)
self.sample_rate = sample_rate
self.shuffle = shuffle
self.random_crop = random_crop
self.tgt_dict = tgt_dict
self.add_decoder_target = add_decoder_target
self.fine_tuning = fine_tuning
self.num_labels = len(label_paths)
self.pad_list = pad_list
self.eos_list = eos_list
self.label_processors = label_processors
self.single_target = single_target
self.epoch = 0
self.label_rates = (
[label_rates for _ in range(len(label_paths))]
if isinstance(label_rates, int)
else label_rates
)
self.store_labels = store_labels
if store_labels:
self.label_list = [load_label(p, inds, tot, retry_times) for p in label_paths]
else:
self.label_paths = label_paths
self.label_offsets_list = [
load_label_offset(p, inds, tot, retry_times) for p in label_paths
]
assert label_processors is None or len(label_processors) == self.num_labels
for label_path, label_rate in zip(label_paths, self.label_rates):
verify_label_lengths(
self.wav_sizes, sample_rate, label_path, label_rate, inds, tot
)
self.max_sample_size = (
max_sample_size if max_sample_size is not None else sys.maxsize
)
self.pad_audio = pad_audio
self.normalize = normalize
self.tgt_lang_idx = tgt_lang_idx
self.tokenizer = tokenizer
self.mbart_style_lang_id = mbart_style_lang_id
self.retry_times = retry_times
self.reduce_label_for_dec = reduce_label_for_dec
logger.info(
f"pad_audio={pad_audio}, random_crop={random_crop}, tgt_lang_idx={self.tgt_lang_idx}, reduce_label_for_dec={reduce_label_for_dec}, "
f"mbart_style_lang_id={mbart_style_lang_id}, normalize={normalize}, max_sample_size={self.max_sample_size}"
)
def set_epoch(self, epoch):
self.epoch = epoch
def batch_by_size(self, indices, max_tokens=None, max_sentences=None, required_batch_size_multiple=1):
self.max_tokens = max_tokens
self.max_sentences = max_sentences
self.required_batch_size_multiple = required_batch_size_multiple
if isinstance(indices[0], np.ndarray):
batch_list = []
for indice in indices:
batch = super(HubertDataset, self).batch_by_size(indice, max_tokens, max_sentences, required_batch_size_multiple)
batch_list.append(batch)
return batch_list
else:
return super(HubertDataset, self).batch_by_size(indices, max_tokens, max_sentences, required_batch_size_multiple)
def shuffle_batches(self, batches, seed):
if isinstance(batches[0], list):
new_batches = []
with data_utils.numpy_seed(seed):
np.random.shuffle(batches)
for batch in batches:
np.random.shuffle(batch)
new_batches.extend(batch)
return new_batches
else:
with data_utils.numpy_seed(seed):
np.random.shuffle(batches)
return batches
def get_audio(self, index):
import soundfile as sf
wav_path = os.path.join(self.audio_root, self.audio_names[index])
_path, slice_ptr = parse_path(wav_path)
if len(slice_ptr) == 1:
import kaldiio
feat = kaldiio.load_mat(wav_path)
feat = torch.from_numpy(feat).float()
if self.normalize:
with torch.no_grad():
feat = F.layer_norm(feat, feat.shape[-1])
return feat
else:
if len(slice_ptr) == 2:
byte_data = read_from_stored_zip(_path, slice_ptr[0], slice_ptr[1])
assert is_sf_audio_data(byte_data)
wav_path = io.BytesIO(byte_data)
for i in range(self.retry_times):
if i < self.retry_times - 1:
try:
wav, cur_sample_rate = sf.read(wav_path)
break
except Exception as e:
logger.warn(f"Fail to load wav for the {i} time")
logger.warn(e)
time.sleep(1)
continue
else:
wav, cur_sample_rate = sf.read(wav_path)
wav = torch.from_numpy(wav).float()
wav = self.postprocess(wav, cur_sample_rate)
return wav
def get_label(self, index, label_idx):
if self.store_labels:
label = self.label_list[label_idx][index]
else:
with open(self.label_paths[label_idx]) as f:
offset_s, offset_e = self.label_offsets_list[label_idx][index]
f.seek(offset_s)
label = f.read(offset_e - offset_s)
if self.tokenizer is not None and self.fine_tuning:
label = self.tokenizer.encode(label)
if self.label_processors is not None:
label = self.label_processors[label_idx](label)
return label
def get_labels(self, index):
return [self.get_label(index, i) for i in range(self.num_labels)]
def __getitem__(self, index):
wav = self.get_audio(index)
labels = self.get_labels(index)
return {"id": index, "source": wav, "label_list": labels}
def __len__(self):
return len(self.wav_sizes)
def crop_to_max_size(self, wav, target_size):
size = len(wav)
diff = size - target_size
if diff <= 0:
return wav, 0
start, end = 0, target_size
if self.random_crop:
start = np.random.randint(0, diff + 1)
end = size - diff + start
return wav[start:end], start
def collater(self, samples):
# target = max(sizes) -> random_crop not used
# target = max_sample_size -> random_crop used for long
samples = [s for s in samples if s["source"] is not None]
if len(samples) == 0:
return {}
audios = [s["source"] for s in samples]
audio_sizes = [len(s) for s in audios]
if self.pad_audio:
audio_size = min(max(audio_sizes), self.max_sample_size)
else:
audio_size = min(min(audio_sizes), self.max_sample_size)
feat_dim = audios[0].size(-1) if audios[0].dim() > 1 else 1
collated_audios, padding_mask, audio_starts = self.collater_audio(
audios, audio_size, feat_dim,
)
targets_by_label = [
[s["label_list"][i] for s in samples] for i in range(self.num_labels)
]
targets_list, lengths_list, ntokens_list = self.collater_label(
targets_by_label, audio_size, audio_starts
)
if self.add_decoder_target:
if self.fine_tuning:
decoder_label = [
torch.cat((targets_list[0][i, :lengths_list[0][i]], torch.tensor([self.tgt_dict.eos()])), 0).long()
for i in range(targets_list[0].size(0))
]
else:
if self.tokenizer is not None:
decoder_label = [
# Set 48 for translate int to char and avoid \n
torch.cat(
(
torch.tensor(
self.tokenizer.sp.Encode(
"".join(
[chr(j + 48) for j in (
targets_list[0][i, :lengths_list[0][i]].unique_consecutive() if self.reduce_label_for_dec else targets_list[0][i, :lengths_list[0][i]]
).tolist()]
), out_type=int
)
),
torch.tensor([self.tgt_dict.eos()])
), dim=0
).long()
for i in range(targets_list[0].size(0))
]
else:
decoder_label = [
torch.cat((targets_list[0][i, :lengths_list[0][i]].unique_consecutive() if self.reduce_label_for_dec else targets_list[0][i, :lengths_list[0][i]], torch.tensor([self.tgt_dict.eos()])), 0).long()
for i in range(targets_list[0].size(0))
]
if self.mbart_style_lang_id:
decoder_label = [
torch.cat((decoder_label[i], torch.tensor([self.tgt_lang_idx])), 0).long()
for i in range(targets_list[0].size(0))
]
dec_ntokens = sum(x.size(0) for x in decoder_label)
decoder_target = data_utils.collate_tokens(
decoder_label,
self.tgt_dict.pad(),
self.tgt_dict.eos() if not self.mbart_style_lang_id else self.tgt_lang_idx,
left_pad=False,
move_eos_to_beginning=False,
)
decoder_target_lengths = torch.tensor(
[x.size(0) for x in decoder_label], dtype=torch.long
)
prev_output_tokens = data_utils.collate_tokens(
decoder_label,
self.tgt_dict.pad(),
self.tgt_dict.eos() if not self.mbart_style_lang_id else self.tgt_lang_idx,
left_pad=False,
move_eos_to_beginning=True,
)
if self.tgt_lang_idx is not None and not self.mbart_style_lang_id:
assert (prev_output_tokens[:, 0] != self.tgt_dict.eos()).sum() == 0
prev_output_tokens[:, 0] = self.tgt_lang_idx
net_input = {
"source": collated_audios,
"padding_mask": padding_mask,
"prev_output_tokens": prev_output_tokens,
}
batch = {
"id": torch.LongTensor([s["id"] for s in samples]),
"net_input": net_input,
"decoder_target": decoder_target,
"decoder_target_lengths": decoder_target_lengths,
"dec_ntokens": dec_ntokens,
"lang_idx": self.tgt_lang_idx,
}
else:
net_input = {"source": collated_audios, "padding_mask": padding_mask}
batch = {
"id": torch.LongTensor([s["id"] for s in samples]),
"net_input": net_input,
}
if self.single_target:
batch["target_lengths"] = lengths_list[0]
batch["ntokens"] = ntokens_list[0]
batch["target"] = targets_list[0]
else:
batch["target_lengths_list"] = lengths_list
batch["ntokens_list"] = ntokens_list
batch["target_list"] = targets_list
return batch
def collater_audio(self, audios, audio_size, feat_dim=1):
collated_audios = audios[0].new_zeros(len(audios), audio_size, feat_dim)
padding_mask = (
torch.BoolTensor(collated_audios.shape[0:2]).fill_(False)
# if self.pad_audio else None
)
audio_starts = [0 for _ in audios]
for i, audio in enumerate(audios):
audio = audio.view(-1, feat_dim)
diff = len(audio) - audio_size
if diff == 0:
collated_audios[i] = audio
elif diff < 0:
assert self.pad_audio
collated_audios[i] = torch.cat([audio, audio.new_full((-diff, feat_dim), 0.0)])
padding_mask[i, diff:] = True
else:
collated_audios[i], audio_starts[i] = self.crop_to_max_size(
audio, audio_size
)
return collated_audios.squeeze(-1), padding_mask, audio_starts
def collater_frm_label(self, targets, audio_size, audio_starts, label_rate, pad):
assert label_rate > 0
s2f = label_rate / self.sample_rate
frm_starts = [int(round(s * s2f)) for s in audio_starts]
frm_size = int(round(audio_size * s2f))
if not self.pad_audio:
rem_size = [len(t) - s for t, s in zip(targets, frm_starts)]
frm_size = min(frm_size, *rem_size)
targets = [t[s : s + frm_size] for t, s in zip(targets, frm_starts)]
logger.debug(f"audio_starts={audio_starts}")
logger.debug(f"frame_starts={frm_starts}")
logger.debug(f"frame_size={frm_size}")
lengths = torch.LongTensor([len(t) for t in targets])
ntokens = lengths.sum().item()
targets = data_utils.collate_tokens(targets, pad_idx=pad, left_pad=False)
return targets, lengths, ntokens
def collater_seq_label(self, targets, pad):
lengths = torch.LongTensor([len(t) for t in targets])
ntokens = lengths.sum().item()
targets = data_utils.collate_tokens(targets, pad_idx=pad, left_pad=False)
return targets, lengths, ntokens
def collater_label(self, targets_by_label, audio_size, audio_starts):
targets_list, lengths_list, ntokens_list = [], [], []
itr = zip(targets_by_label, self.label_rates, self.pad_list)
for targets, label_rate, pad in itr:
if label_rate == -1:
targets, lengths, ntokens = self.collater_seq_label(targets, pad)
else:
targets, lengths, ntokens = self.collater_frm_label(
targets, audio_size, audio_starts, label_rate, pad
)
targets_list.append(targets)
lengths_list.append(lengths)
ntokens_list.append(ntokens)
return targets_list, lengths_list, ntokens_list
def num_tokens(self, index):
return self.size(index)
def size(self, index):
if self.pad_audio:
return self.wav_sizes[index]
return min(self.wav_sizes[index], self.max_sample_size)
@property
def sizes(self):
return np.array(self.wav_sizes)
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
if self.shuffle:
if len(self.chunk_names) > 0:
logger.info(f"ordered indices for epoch {self.epoch}")
with data_utils.numpy_seed(self.epoch):
self.chunk_order = np.random.permutation(len(self.chunk_names))
chunk_count = 0
tmp_sizes = []
tmp_indices = []
indice = []
for i in self.chunk_order:
chunk_count += 1
start = self.chunk_indices[i]
end = self.chunk_indices[i+1] if i < len(self.chunk_names) - 1 else len(self)
size = list(self.sizes[start:end])
tmp_indices.extend(list(np.arange(start, end)))
tmp_sizes.extend(size)
if chunk_count % 10 == 0 or i == self.chunk_order[0]:
order = [np.random.permutation(len(tmp_indices))]
order.append(
np.minimum(
np.array(tmp_sizes),
self.max_sample_size,
)
)
sort_idx = np.lexsort(order)[::-1]
indice.append(np.array([tmp_indices[k] for k in sort_idx]))
tmp_indices = []
tmp_sizes =[]
return indice
else:
order = [np.random.permutation(len(self))]
order.append(
np.minimum(
np.array(self.sizes),
self.max_sample_size,
)
)
return np.lexsort(order)[::-1]
else:
return np.arange(len(self))
def postprocess(self, wav, cur_sample_rate):
if wav.dim() == 2:
wav = wav.mean(-1)
assert wav.dim() == 1, wav.dim()
if cur_sample_rate != self.sample_rate:
raise Exception(f"sr {cur_sample_rate} != {self.sample_rate}")
if self.normalize:
with torch.no_grad():
wav = F.layer_norm(wav, wav.shape)
return wav
| EXA-1-master | exa/models/unilm-master/speechlm/speechlm/data/hubert_dataset.py |
# ----------------------------------------------------------------------------
# SpeechLM: Enhanced Speech Pre-Training with Unpaired Textual Data (https://arxiv.org/abs/2209.15329)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechLM
# Code based on fairseq: https://github.com/facebookresearch/fairseq/tree/272c4c5197250997148fb12c0db6306035f166a4
#
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# ----------------------------------------------------------------------------
from pathlib import Path
from typing import List, Dict, Optional, Any
from dataclasses import dataclass
import numpy as np
import torch
from fairseq.data.audio.speech_to_text_dataset import (
SpeechToTextDataset,
SpeechToTextDatasetCreator,
S2TDataConfig,
_collate_frames,
get_features_or_waveform,
)
from fairseq.data import Dictionary, data_utils as fairseq_data_utils
@dataclass
class TextToUnitDatasetItem(object):
index: int
source: torch.Tensor
target: Optional[torch.Tensor] = None
speaker_id: Optional[int] = None
speaker_emb: Optional[torch.Tensor] = None
duration: Optional[torch.Tensor] = None
pitch: Optional[torch.Tensor] = None
energy: Optional[torch.Tensor] = None
class Text2UnitDataset(SpeechToTextDataset):
def __init__(
self,
split: str,
is_train_split: bool,
cfg: S2TDataConfig,
unit_labels: List[str],
n_frames: List[int],
src_texts: Optional[List[str]] = None,
tgt_texts: Optional[List[str]] = None,
speakers: Optional[List[str]] = None,
src_langs: Optional[List[str]] = None,
tgt_langs: Optional[List[str]] = None,
ids: Optional[List[str]] = None,
tgt_dict: Optional[Dictionary] = None,
pre_tokenizer=None,
bpe_tokenizer=None,
n_frames_per_step=1,
speaker_to_id=None,
durations: Optional[List[List[int]]] = None,
pitches: Optional[List[str]] = None,
energies: Optional[List[str]] = None,
):
super(Text2UnitDataset, self).__init__(
split,
is_train_split,
cfg,
unit_labels,
n_frames,
src_texts=src_texts,
tgt_texts=tgt_texts,
speakers=speakers,
src_langs=src_langs,
tgt_langs=tgt_langs,
ids=ids,
tgt_dict=tgt_dict,
pre_tokenizer=pre_tokenizer,
bpe_tokenizer=bpe_tokenizer,
n_frames_per_step=n_frames_per_step,
speaker_to_id=speaker_to_id,
)
self.durations = durations
self.pitches = pitches
self.energies = energies
self.unit_labels = unit_labels
self.feature_root = Path(cfg.audio_root)
self.spk_emb_type = cfg.config.get("speaker_embedding_type", None)
self.random_spk = cfg.config.get("random_speaker", False)
if self.spk_emb_type is not None:
self.spk_emb_choices = [i for i in (self.feature_root / self.spk_emb_type).glob("*.npy")]
self.spk_emb_num = len(self.spk_emb_choices)
def __getitem__(self, index: int) -> TextToUnitDatasetItem:
# s2t_item = super().__getitem__(index)
source = torch.LongTensor(self.unit_labels[index])
target = None
if self.tgt_texts is not None:
tokenized = self.get_tokenized_tgt_text(index)
target = self.tgt_dict.encode_line(
tokenized, add_if_not_exist=False, append_eos=self.append_eos
).long()
if self.cfg.prepend_tgt_lang_tag:
lang_tag_idx = self.get_lang_tag_idx(
self.tgt_langs[index], self.tgt_dict
)
target = torch.cat((torch.LongTensor([lang_tag_idx]), target), 0)
speaker_id = None
if self.speaker_to_id is not None:
speaker_id = self.speaker_to_id[self.speakers[index]]
speaker_emb = None
if self.spk_emb_type is not None:
if self.random_spk:
spk_emb_path = self.spk_emb_choices[np.random.choice(self.spk_emb_num)]
else:
spk_emb_path = self.feature_root / self.spk_emb_type / f"{self.ids[index]}.npy"
speaker_emb = get_features_or_waveform(spk_emb_path)
speaker_emb = torch.from_numpy(speaker_emb).float()
duration, pitch, energy = None, None, None
if self.durations is not None:
duration = torch.tensor(
self.durations[index] + [0], dtype=torch.long # pad 0 for EOS
)
if self.pitches is not None:
pitch = get_features_or_waveform(self.pitches[index])
pitch = torch.from_numpy(
np.concatenate((pitch, [0])) # pad 0 for EOS
).float()
if self.energies is not None:
energy = get_features_or_waveform(self.energies[index])
energy = torch.from_numpy(
np.concatenate((energy, [0])) # pad 0 for EOS
).float()
return TextToUnitDatasetItem(
index=index,
source=source,
target=target,
speaker_id=speaker_id,
speaker_emb=speaker_emb,
duration=duration,
pitch=pitch,
energy=energy,
)
def collater(self, samples: List[TextToUnitDatasetItem]) -> Dict[str, Any]:
if len(samples) == 0:
return {}
src_lengths, order = torch.tensor(
[s.target.shape[0] for s in samples], dtype=torch.long
).sort(descending=True)
id_ = torch.tensor([s.index for s in samples], dtype=torch.long).index_select(
0, order
)
traget = fairseq_data_utils.collate_tokens(
[s.source for s in samples],
self.tgt_dict.pad(),
).index_select(0, order)
target_lengths = torch.tensor(
[s.source.shape[0] for s in samples], dtype=torch.long
).index_select(0, order)
src_tokens = fairseq_data_utils.collate_tokens(
[s.target for s in samples],
self.tgt_dict.pad(),
self.tgt_dict.eos(),
left_pad=False,
move_eos_to_beginning=False,
).index_select(0, order)
speaker = None
if self.speaker_to_id is not None:
speaker = (
torch.tensor([s.speaker_id for s in samples], dtype=torch.long)
.index_select(0, order)
.view(-1, 1)
)
if self.spk_emb_type is not None:
speaker = torch.stack([s.speaker_emb for s in samples], dim=0).index_select(0, order)
bsz, _ = traget.size()
prev_output_tokens = torch.cat(
(traget.new_zeros((bsz, self.tgt_dict.bos())), traget[:, :-1]), dim=1
)
durations, pitches, energies = None, None, None
if self.durations is not None:
durations = fairseq_data_utils.collate_tokens(
[s.duration for s in samples], 0
).index_select(0, order)
assert src_tokens.shape[1] == durations.shape[1]
if self.pitches is not None:
pitches = _collate_frames([s.pitch for s in samples], True)
pitches = pitches.index_select(0, order)
assert src_tokens.shape[1] == pitches.shape[1]
if self.energies is not None:
energies = _collate_frames([s.energy for s in samples], True)
energies = energies.index_select(0, order)
assert src_tokens.shape[1] == energies.shape[1]
src_texts = [self.tgt_dict.string(samples[i].target) for i in order]
return {
"id": id_,
"net_input": {
"src_tokens": src_tokens,
"src_lengths": src_lengths,
"prev_output_tokens": prev_output_tokens,
},
"speaker": speaker,
"target": traget,
"durations": durations,
"pitches": pitches,
"energies": energies,
"target_lengths": target_lengths,
"ntokens": sum(target_lengths).item(),
"nsentences": len(samples),
"src_texts": src_texts,
}
class Text2UnitDatasetCreator(SpeechToTextDatasetCreator):
KEY_DURATION = "duration"
KEY_PITCH = "pitch"
KEY_ENERGY = "energy"
KEY_UNIT = "unit"
@classmethod
def _from_list(
cls,
split_name: str,
is_train_split,
samples: List[Dict],
cfg: S2TDataConfig,
tgt_dict,
pre_tokenizer,
bpe_tokenizer,
n_frames_per_step,
speaker_to_id,
) -> Text2UnitDataset:
audio_root = Path(cfg.audio_root)
ids = [s[cls.KEY_ID] for s in samples]
# audio_paths = [(audio_root / s[cls.KEY_AUDIO]).as_posix() for s in samples]
unit_labels = [s[cls.KEY_UNIT] for s in samples]
unit_labels = [
None if dd is None else [int(d) for d in dd.split(" ")] for dd in unit_labels
]
n_frames = [int(s[cls.KEY_N_FRAMES]) for s in samples]
tgt_texts = [s[cls.KEY_TGT_TEXT] for s in samples]
src_texts = [s.get(cls.KEY_SRC_TEXT, cls.DEFAULT_SRC_TEXT) for s in samples]
speakers = [s.get(cls.KEY_SPEAKER, cls.DEFAULT_SPEAKER) for s in samples]
src_langs = [s.get(cls.KEY_SRC_LANG, cls.DEFAULT_LANG) for s in samples]
tgt_langs = [s.get(cls.KEY_TGT_LANG, cls.DEFAULT_LANG) for s in samples]
durations = [s.get(cls.KEY_DURATION, None) for s in samples]
durations = [
None if dd is None else [int(d) for d in dd.split(" ")] for dd in durations
]
durations = None if any(dd is None for dd in durations) else durations
pitches = [s.get(cls.KEY_PITCH, None) for s in samples]
pitches = [
None if pp is None else (audio_root / pp).as_posix() for pp in pitches
]
pitches = None if any(pp is None for pp in pitches) else pitches
energies = [s.get(cls.KEY_ENERGY, None) for s in samples]
energies = [
None if ee is None else (audio_root / ee).as_posix() for ee in energies
]
energies = None if any(ee is None for ee in energies) else energies
return Text2UnitDataset(
split_name,
is_train_split,
cfg,
unit_labels,
n_frames,
src_texts,
tgt_texts,
speakers,
src_langs,
tgt_langs,
ids,
tgt_dict,
pre_tokenizer,
bpe_tokenizer,
n_frames_per_step,
speaker_to_id,
durations,
pitches,
energies,
)
| EXA-1-master | exa/models/unilm-master/speechlm/speechlm/data/text_to_unit_dataset.py |
# ----------------------------------------------------------------------------
# SpeechLM: Enhanced Speech Pre-Training with Unpaired Textual Data (https://arxiv.org/abs/2209.15329)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechLM
# Code based on fairseq: https://github.com/facebookresearch/fairseq/tree/272c4c5197250997148fb12c0db6306035f166a4
#
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# ----------------------------------------------------------------------------
import logging
import math
import re
from dataclasses import dataclass, field
from typing import List, Optional
import numpy as np
import torch
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.criterions.label_smoothed_cross_entropy import label_smoothed_nll_loss
from fairseq.dataclass import FairseqDataclass
logger = logging.getLogger(__name__)
@dataclass
class HSTCriterionConfig(FairseqDataclass):
pred_masked_weight: float = field(
default=1.0,
metadata={"help": "weight for predictive loss for masked frames"},
)
pred_nomask_weight: float = field(
default=0.0,
metadata={"help": "weight for predictive loss for unmasked frames"},
)
loss_weights: Optional[List[float]] = field(
default=None,
metadata={"help": "weights for additional loss terms (not first one)"},
)
log_keys: List[str] = field(
default_factory=lambda: [],
metadata={"help": "output keys to log"},
)
text_ctc_weight: float = field(
default=0.1,
metadata={"help": "weights for text CTC Loss, loss will be (hubert_loss + dec_weight * CE_Loss + text_weight * (CE_Loss + CTC_loss))"},
)
text_mum_weight: float = field(
default=0.0,
metadata={"help": "masked unit modeling weight from the text end"},
)
report_accuracy: bool = field(
default=True,
metadata={"help": "report decoder accuracy metric"},
)
ignore_prefix_size: int = field(
default=0,
metadata={"help": "Ignore first N tokens"},
)
no_ctc_blank: bool = field(
default=False,
metadata={"help": "mask out the blank of ctc, only when dec_loss_type=ctc"},
)
@register_criterion("speechlm_criterion", dataclass=HSTCriterionConfig)
class SpeechLMCriterion(FairseqCriterion):
def __init__(
self,
task,
pred_masked_weight,
pred_nomask_weight,
loss_weights=None,
log_keys=None,
text_ctc_weight=0.1,
text_mum_weight=0,
report_accuracy=False,
ignore_prefix_size=0,
no_ctc_blank=False,
):
super().__init__(task)
self.pred_masked_weight = pred_masked_weight
self.pred_nomask_weight = pred_nomask_weight
self.loss_weights = loss_weights
self.log_keys = [] if log_keys is None else log_keys
self.text_ctc_weight = text_ctc_weight
self.text_mum_weight = text_mum_weight
self.report_accuracy = report_accuracy
self.ignore_prefix_size = ignore_prefix_size
self.no_ctc_blank = no_ctc_blank
self.padding_idx = task.dictionaries[0].pad()
self.eos_idx = task.dictionaries[0].eos()
self.blank_idx = task.dictionaries[0].bos()
def compute_hubert_loss(self, model, net_output, reduction, suffix=''):
loss = 0
sample_size = []
logging_output = {}
loss_m_list = []
logp_m_list = model.get_logits(net_output, True)
targ_m_list = model.get_targets(net_output, True)
assert self.pred_masked_weight == 0 or len(logp_m_list) > 0
for i, (logp_m, targ_m) in enumerate(zip(logp_m_list, targ_m_list)):
loss_m = F.cross_entropy(logp_m, targ_m, reduction=reduction)
loss_m_list.append(loss_m)
logging_output[f"loss_m_{i}{suffix}"] = loss_m.detach().item()
if self.pred_masked_weight > 0:
loss += self.pred_masked_weight * sum(loss_m_list)
sample_size.append(targ_m_list[0].numel())
loss_u_list = []
logp_u_list = model.get_logits(net_output, False)
targ_u_list = model.get_targets(net_output, False)
assert self.pred_nomask_weight == 0 or len(logp_u_list) > 0
for i, (logp_u, targ_u) in enumerate(zip(logp_u_list, targ_u_list)):
loss_u = F.cross_entropy(logp_u, targ_u, reduction=reduction)
loss_u_list.append(loss_u)
logging_output[f"loss_u_{i}{suffix}"] = loss_u.detach().item()
if self.pred_nomask_weight > 0:
loss += self.pred_nomask_weight * sum(loss_u_list)
sample_size.append(targ_u_list[0].numel())
sample_size = np.mean(sample_size)
def compute_correct(logits, targets):
if logits.numel() == 0:
return 0, 0
else:
assert logits.dim() > 1, logits.shape
max = logits.argmax(-1) == targets
min = logits.argmin(-1) == targets
both = max & min
corr = max.long().sum().item() - both.long().sum().item()
count = max.numel()
return corr, count
with torch.no_grad():
for i, (logp_m, targ_m) in enumerate(zip(logp_m_list, targ_m_list)):
corr_m, count_m = compute_correct(logp_m, targ_m)
logging_output[f"correct_m_{i}{suffix}"] = corr_m
logging_output[f"count_m_{i}{suffix}"] = count_m
for i, (logp_u, targ_u) in enumerate(zip(logp_u_list, targ_u_list)):
corr_u, count_u = compute_correct(logp_u, targ_u)
logging_output[f"correct_u_{i}{suffix}"] = corr_u
logging_output[f"count_u_{i}{suffix}"] = count_u
return loss, sample_size, logging_output
def forward(self, model, sample, reduce=True, log_pred=False):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
reduction = "sum" if reduce else "none"
if "net_input" in sample:
text_sample = None
else:
text_sample = sample.get("text_paired")
sample = sample.get("speech")
### 1. L_UMLM: do hubert forward and loss computation
sample["modality"] = "speech"
net_output = model(target_list=sample["target_list"], **sample["net_input"])
loss, sample_size, logging_output = self.compute_hubert_loss(
model,
net_output,
reduction,
)
if self.loss_weights is not None:
assert hasattr(model, "get_extra_losses")
extra_losses, names = model.get_extra_losses(net_output)
if torch.is_tensor(extra_losses):
extra_losses = [extra_losses]
names = [names]
if len(self.loss_weights) == 1 and len(extra_losses) != 1:
self.loss_weights = [self.loss_weights[0]] * len(extra_losses)
assert len(extra_losses) == len(
self.loss_weights
), f"{len(extra_losses)}, {len(self.loss_weights)}"
for p, n, coef in zip(extra_losses, names, self.loss_weights):
if coef != 0 and p is not None:
p = coef * p.float() * sample_size
loss += p
logging_output[f"loss_{n}"] = p.item()
for lk in self.log_keys:
if lk in net_output:
logging_output[lk] = float((net_output[lk]))
### 2. do text forward and loss computation
if text_sample is not None:
text_sample["modality"] = "text"
## 2.1 re-loading "target_list", in default case, target_list = [src_tokens],
## while in case of using "unit-phone-char" structure, target_list will be [ref_tokens]
text_sample["net_input"]["target_list"] = [
text_sample.get("ref_tokens", text_sample["net_input"]["src_tokens"].clone()),
]
text_net_output = model(**text_sample["net_input"])
### 2.2 L_UMLM (text-end, not applied by default)
if self.text_mum_weight > 0:
loss_u2t, sample_size_u2t, logging_output_u2t = self.compute_hubert_loss(
model,
text_net_output,
reduction,
suffix="_u2t",
)
loss += self.text_mum_weight * loss_u2t * sample_size / sample_size_u2t
logging_output.update(logging_output_u2t)
### 2.3 L_UCTC
text_sample_size = text_sample["ntokens"]
if self.text_ctc_weight > 0:
text_ctc_loss = self.compute_ctc_loss(model, text_net_output, text_sample["target"], reduction=reduction)
loss += self.text_ctc_weight * text_ctc_loss * sample_size / text_sample_size
logging_output["text_ctc_loss"] = utils.item(text_ctc_loss)
logging_output["text_sample_size"] = text_sample_size
logging_output = {
"loss": utils.item(loss) if reduce else loss,
"ntokens": sample_size,
"nsentences": sample["id"].numel() + (text_sample["id"].numel() if text_sample is not None else 0),
"sample_size": sample_size,
**logging_output,
}
return loss, sample_size, logging_output
def compute_ctc_loss(self, model, net_output, target, reduction):
logits = net_output["encoder_out_ctc"][0] # (T, B, C) from the code-encoder
if self.no_ctc_blank:
## set prob of <blank> to -inf
logits = logits.float()
logits[:, :, self.blank_idx] = -1000000.0
lprobs = F.log_softmax(logits.float(), dim=-1)
encoder_padding_mask = net_output["encoder_padding_mask"][0]
non_padding_mask = ~encoder_padding_mask
input_lengths = non_padding_mask.long().sum(-1)
pad_mask = (target != self.padding_idx) & (target != self.eos_idx)
targets_flat = target.masked_select(pad_mask)
target_lengths = pad_mask.sum(-1)
with torch.backends.cudnn.flags(enabled=False):
loss = F.ctc_loss(
lprobs,
targets_flat,
input_lengths,
target_lengths,
blank=self.blank_idx,
reduction=reduction,
zero_infinity=True,
)
return loss
def compute_ce_loss(self, model, net_output, sample, reduce=True):
lprobs, target = self.get_lprobs_and_target(model, net_output, sample)
loss, nll_loss = label_smoothed_nll_loss(
lprobs,
target,
self.eps,
ignore_index=self.padding_idx,
reduce=reduce,
)
return loss, nll_loss
def compute_accuracy(self, model, net_output, sample):
lprobs, target = self.get_lprobs_and_target(model, net_output, sample)
mask = target.ne(self.padding_idx)
n_correct = torch.sum(
lprobs.argmax(1).masked_select(mask).eq(target.masked_select(mask))
)
total = torch.sum(mask)
return n_correct, total
def get_lprobs_and_target(self, model, net_output, sample):
lprobs = model.get_normalized_probs(net_output, log_probs=True)
if sample["modality"] == "speech":
target = sample["decoder_target"]
if self.ignore_prefix_size > 0:
if getattr(lprobs, "batch_first", False):
lprobs = lprobs[:, self.ignore_prefix_size :, :].contiguous()
target = target[:, self.ignore_prefix_size :].contiguous()
else:
lprobs = lprobs[self.ignore_prefix_size :, :, :].contiguous()
target = target[self.ignore_prefix_size :, :].contiguous()
else:
target = sample["target"]
return lprobs.view(-1, lprobs.size(-1)), target.view(-1)
@staticmethod
def reduce_metrics(logging_outputs) -> None:
"""Aggregate logging outputs from data parallel training (copied from normal cross entropy)."""
loss_sum = sum(log.get("loss", 0) for log in logging_outputs)
ntokens = sum(log.get("ntokens", 0) for log in logging_outputs)
sample_size = sum(log.get("sample_size", 0) for log in logging_outputs)
metrics.log_scalar(
"loss", loss_sum / sample_size / math.log(2), sample_size, round=3
)
if sample_size != ntokens:
metrics.log_scalar(
"nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3
)
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["nll_loss"].avg)
)
else:
metrics.log_derived(
"ppl", lambda meters: utils.get_perplexity(meters["loss"].avg)
)
counts = {}
for lk in logging_outputs[0].keys():
if lk.startswith("count_"):
val = sum(log.get(lk, 0) for log in logging_outputs)
metrics.log_scalar(lk, val)
counts[lk] = val
for lk in logging_outputs[0].keys():
if lk.startswith("loss_"):
val = sum(log.get(lk, 0) for log in logging_outputs)
metrics.log_scalar(lk, val / sample_size / math.log(2), round=3)
elif lk.startswith("correct_"):
val = sum(log.get(lk, 0) for log in logging_outputs)
metrics.log_scalar(lk, val / counts[re.sub("correct", "count", lk)])
if "text_sample_size" in logging_outputs[0]:
text_sample_size = sum(log.get("text_sample_size", 0) for log in logging_outputs)
for lk in logging_outputs[0].keys():
if lk.startswith("text_") and lk.endswith("_loss"):
val = sum(log.get(lk, 0) for log in logging_outputs)
metrics.log_scalar(lk, val / text_sample_size / math.log(2), round=3)
@staticmethod
def aggregate_logging_outputs(logging_outputs):
"""Aggregate logging outputs from data parallel training."""
raise NotImplementedError()
@staticmethod
def logging_outputs_can_be_summed() -> bool:
"""
Whether the logging outputs returned by `forward` can be summed
across workers prior to calling `reduce_metrics`. Setting this
to True will improves distributed training speed.
"""
return False
| EXA-1-master | exa/models/unilm-master/speechlm/speechlm/criterions/speechlm_criterion.py |
# ----------------------------------------------------------------------------
# SpeechLM: Enhanced Speech Pre-Training with Unpaired Textual Data (https://arxiv.org/abs/2209.15329)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechLM
# Code based on fairseq: https://github.com/facebookresearch/fairseq/tree/272c4c5197250997148fb12c0db6306035f166a4
#
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# ----------------------------------------------------------------------------
from typing import List, Dict, Any
from dataclasses import dataclass, field
import torch
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.dataclass import FairseqDataclass
from fairseq.data.data_utils import lengths_to_mask
from fairseq.models.fairseq_model import FairseqEncoderModel
def label_smoothed_nll_loss(lprobs, target, epsilon, ignore_index=None, reduce=True):
if target.dim() == lprobs.dim() - 1:
target = target.unsqueeze(-1)
nll_loss = -lprobs.gather(dim=-1, index=target)
smooth_loss = -lprobs.sum(dim=-1, keepdim=True)
if ignore_index is not None:
pad_mask = target.eq(ignore_index)
nll_loss.masked_fill_(pad_mask, 0.0)
smooth_loss.masked_fill_(pad_mask, 0.0)
else:
nll_loss = nll_loss.squeeze(-1)
smooth_loss = smooth_loss.squeeze(-1)
if reduce:
ntokens = (~pad_mask).sum()
nll_loss = nll_loss.sum() / ntokens
smooth_loss = smooth_loss.sum() / ntokens
eps_i = epsilon / (lprobs.size(-1) - 1)
loss = (1.0 - epsilon - eps_i) * nll_loss + eps_i * smooth_loss
return loss, nll_loss
@dataclass
class FastText2UnitCriterionConfig(FairseqDataclass):
label_smoothing: float = field(
default=0.0,
metadata={"help": "epsilon for label smoothing, 0 means no label smoothing"},
)
dur_loss_weight: float = field(
default=1.0,
metadata={"help": "scale of duration loss"},
)
report_accuracy: bool = field(
default=True,
metadata={"help": "report decoder accuracy metric"},
)
@register_criterion("fasttext2unit_criterion", dataclass=FastText2UnitCriterionConfig)
class FastText2UnitLoss(FairseqCriterion):
def __init__(self,
task,
label_smoothing=0,
dur_loss_weight=1.0,
report_accuracy=False,
):
super().__init__(task)
self.eps = label_smoothing
self.dur_loss_weight = dur_loss_weight
self.pad_idx = task.tgt_dict.pad()
self.report_accuracy = report_accuracy
def forward(self, model: FairseqEncoderModel, sample, reduction="mean"):
src_tokens = sample["net_input"]["src_tokens"]
src_lens = sample["net_input"]["src_lengths"]
tgt_lens = sample["target_lengths"]
_feat_out, _feat_out_post, out_lens, log_dur_out, pitch_out, energy_out = model(
src_tokens=src_tokens,
src_lengths=src_lens,
prev_output_tokens=sample["net_input"]["prev_output_tokens"],
incremental_state=None,
target_lengths=tgt_lens,
speaker=sample["speaker"],
durations=sample["durations"],
pitches=sample["pitches"],
energies=sample["energies"],
)
src_mask = lengths_to_mask(sample["net_input"]["src_lengths"])
tgt_mask = lengths_to_mask(sample["target_lengths"])
lprobs = model.get_normalized_probs((_feat_out,), log_probs=True)
target = sample["target"].long()
ce_loss, nll_loss = label_smoothed_nll_loss(lprobs, target, self.eps, self.padding_idx, reduce=True)
pitches, energies = sample["pitches"], sample["energies"]
if pitches is not None:
pitch_out, pitches = pitch_out[src_mask], pitches[src_mask]
pitch_loss = F.mse_loss(pitch_out, pitches, reduction=reduction)
else:
pitch_loss = 0
if energies is not None:
energy_out, energies = energy_out[src_mask], energies[src_mask]
energy_loss = F.mse_loss(energy_out, energies, reduction=reduction)
else:
energy_loss = 0
log_dur_out = log_dur_out[src_mask]
dur = sample["durations"].float()
dur = dur.half() if log_dur_out.type().endswith(".HalfTensor") else dur
log_dur = torch.log(dur + 1)[src_mask]
dur_loss = F.mse_loss(log_dur_out, log_dur, reduction=reduction)
dur_loss = self.dur_loss_weight * dur_loss
loss = ce_loss + dur_loss + pitch_loss + energy_loss
sample_size = sample["nsentences"]
logging_output = {
"loss": utils.item(loss.data),
"ntokens": sample["ntokens"],
"nsentences": sample["nsentences"],
"sample_size": sample_size,
"ce_loss": utils.item(ce_loss.data),
"dur_loss": utils.item(dur_loss.data),
"pitch_loss": utils.item(pitch_loss),
"energy_loss": utils.item(energy_loss),
}
if self.report_accuracy:
n_correct = lprobs.argmax(-1).masked_select(tgt_mask).eq(target.masked_select(tgt_mask)).sum()
logging_output["n_correct"] = utils.item(n_correct.data)
logging_output["total"] = tgt_mask.sum()
return loss, 1, logging_output
@classmethod
def reduce_metrics(cls, logging_outputs: List[Dict[str, Any]]) -> None:
ns = [log.get("sample_size", 0) for log in logging_outputs]
ntot = sum(ns)
ws = [n / (ntot + 1e-8) for n in ns]
for key in [
"loss",
"ce_loss",
"dur_loss",
"pitch_loss",
"energy_loss",
]:
vals = [log.get(key, 0) for log in logging_outputs]
val = sum(val * w for val, w in zip(vals, ws))
metrics.log_scalar(key, val, ntot, round=3)
metrics.log_scalar("sample_size", ntot, len(logging_outputs))
total = utils.item(sum(log.get("total", 0) for log in logging_outputs))
if total > 0:
metrics.log_scalar("total", total)
n_correct = utils.item(
sum(log.get("n_correct", 0) for log in logging_outputs)
)
metrics.log_scalar("n_correct", n_correct)
metrics.log_derived(
"accuracy",
lambda meters: round(
meters["n_correct"].sum * 100.0 / meters["total"].sum, 3
)
if meters["total"].sum > 0
else float("nan"),
)
# inference metrics
if "targ_frames" not in logging_outputs[0]:
return
n = sum(log.get("targ_frames", 0) for log in logging_outputs)
for key, new_key in [
("mcd_loss", "mcd_loss"),
("pred_frames", "pred_ratio"),
("nins", "ins_rate"),
("ndel", "del_rate"),
]:
val = sum(log.get(key, 0) for log in logging_outputs)
metrics.log_scalar(new_key, val / n, n, round=3)
@staticmethod
def logging_outputs_can_be_summed() -> bool:
return False
| EXA-1-master | exa/models/unilm-master/speechlm/speechlm/criterions/fasttext2unit_loss.py |
import importlib
import os
for file in os.listdir(os.path.dirname(__file__)):
if file.endswith(".py") and not file.startswith("_"):
criterion_name = file[: file.find(".py")]
importlib.import_module(
"speechlm.criterions." + criterion_name
)
| EXA-1-master | exa/models/unilm-master/speechlm/speechlm/criterions/__init__.py |
# ----------------------------------------------------------------------------
# SpeechLM: Enhanced Speech Pre-Training with Unpaired Textual Data (https://arxiv.org/abs/2209.15329)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechLM
# Code based on fairseq: https://github.com/facebookresearch/fairseq/tree/272c4c5197250997148fb12c0db6306035f166a4
#
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# ----------------------------------------------------------------------------
import argparse
import logging
from pathlib import Path
from collections import defaultdict
import pandas as pd
import torchaudio
from tqdm import tqdm
import numpy as np
import torch
from fairseq.data.audio.audio_utils import convert_waveform
from examples.speech_to_text.data_utils import save_df_to_tsv
from examples.speech_synthesis.data_utils import extract_pitch
log = logging.getLogger(__name__)
def get_duration(fa_phone):
"""fa_phone: force-aligned phone, 1-D numpy"""
same = np.concatenate(([True], fa_phone[:-1] != fa_phone[1:], [True]))
index = np.where(same)[0]
count = np.diff(index)
return count
def process(args):
# assert "train" in args.splits
out_root = Path(args.output_root).absolute()
out_root.mkdir(exist_ok=True)
print("Fetching data...")
audio_manifest_root = Path(args.audio_manifest_root).absolute()
for s in args.splits:
if args.add_pitch:
pitch_root = out_root / "pitch" / s
pitch_root.mkdir(exist_ok=True)
manifest = defaultdict(list)
with open(audio_manifest_root / f"{s}.audio.tsv") as f1, \
open(audio_manifest_root / f"{s}.phn") as f2, \
open(audio_manifest_root / f"{s}.km") as f3:
audio_root = f1.readline().strip()
audio_root = Path(audio_root)
for audio_path, fa_phone, fa_unit in tqdm(zip(f1, f2, f3)):
record = True
audio_path, n_frames = audio_path.strip().split("\t")
fa_phone = fa_phone.strip().split()
fa_unit = fa_unit.strip()
uttid = audio_path.split("/")[-1].split(".")[0]
speaker = uttid.split("-")[0]
if args.add_duration:
assert len(fa_phone) == len(fa_unit.split())
fa_phone = np.array(list(map(int, fa_phone)))
duration = get_duration(fa_phone)
reduced_phone = torch.LongTensor(fa_phone).unique_consecutive().numpy()
if args.add_pitch:
pitch_path = pitch_root / f"{uttid}.npy"
if not pitch_path.is_file():
waveform, sample_rate = torchaudio.load(audio_root / audio_path)
waveform, sample_rate = convert_waveform(
waveform, sample_rate, normalize_volume=args.normalize_volume,
)
pitch = extract_pitch(
waveform, sample_rate, None,
hop_length=args.hop_length, log_scale=True,
phoneme_durations=duration
)
if pitch is not None:
np.save(pitch_path.as_posix(), pitch)
else:
record = False
else:
reduced_phone = fa_phone
if record:
manifest["id"].append(uttid)
manifest["speaker"].append(speaker)
manifest["n_frames"].append(len(fa_unit.split()))
manifest["tgt_text"].append(" ".join(map(str, reduced_phone)))
manifest["unit"].append(fa_unit)
if args.add_duration:
manifest["duration"].append(" ".join(map(str, duration)))
if args.add_pitch:
manifest["pitch"].append(f"pitch/{s}/{uttid}.npy")
save_df_to_tsv(
pd.DataFrame.from_dict(manifest),
out_root / f"{s}.tsv"
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--audio-manifest-root", "-m", type=str)
parser.add_argument("--output-root", "-o", required=True, type=str)
parser.add_argument("--splits", "-s", type=str, nargs="+",
default=["train", "dev", "test"])
parser.add_argument("--normalize-volume", "-n", action="store_true")
parser.add_argument("--hop-length", type=int, default=256)
parser.add_argument("--add-duration", action="store_true")
parser.add_argument("--add-pitch", action="store_true")
args = parser.parse_args()
process(args)
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/speechlm/speechlm/data_process/get_t2u_manifest.py |
import sys
def main():
for line in sys.stdin:
line = line.replace("<unk>", "")
line = " ".join(line.strip().split())
line = line.replace(" ", "|").upper() + "|"
print(" ".join(line))
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/speechlm/speechlm/data_process/wrd2ltr.py |
# ----------------------------------------------------------------------------
# SpeechLM: Enhanced Speech Pre-Training with Unpaired Textual Data (https://arxiv.org/abs/2209.15329)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechLM
# Code based on fairseq: https://github.com/facebookresearch/fairseq/tree/272c4c5197250997148fb12c0db6306035f166a4
#
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# ----------------------------------------------------------------------------
import argparse
import logging
from pathlib import Path
from collections import defaultdict
import pandas as pd
from tqdm import tqdm
import numpy as np
from examples.speech_to_text.data_utils import save_df_to_tsv
log = logging.getLogger(__name__)
def get_duration(fa_phone):
"""fa_phone: force-aligned phone, 1-D numpy"""
same = np.concatenate(([True], fa_phone[:-1] != fa_phone[1:], [True]))
index = np.where(same)[0]
count = np.diff(index)
return count
def process(args):
# assert "train" in args.splits
out_root = Path(args.output_root).absolute()
out_root.mkdir(exist_ok=True)
print("Fetching data...")
audio_manifest_root = Path(args.audio_manifest_root).absolute()
for s in args.splits:
manifest = defaultdict(list)
with open(audio_manifest_root / f"{s}.phn") as f1:
for i, reduced_phone in tqdm(enumerate(f1)):
reduced_phone = reduced_phone.strip()
uttid = f"librilm-{i}"
speaker = uttid.split("-")[0]
manifest["id"].append(uttid)
manifest["speaker"].append(speaker)
manifest["n_frames"].append(len(reduced_phone))
manifest["tgt_text"].append(reduced_phone)
manifest["unit"].append(0)
save_df_to_tsv(
pd.DataFrame.from_dict(manifest),
out_root / f"{s}.tsv"
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--audio-manifest-root", "-m", type=str)
parser.add_argument("--output-root", "-o", required=True, type=str)
parser.add_argument("--splits", "-s", type=str, nargs="+",
default=["train", "dev", "test"])
parser.add_argument("--add-fastspeech-targets", action="store_true")
args = parser.parse_args()
process(args)
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/speechlm/speechlm/data_process/get_t2u_manifest_textonly.py |
# ----------------------------------------------------------------------------
# SpeechLM: Enhanced Speech Pre-Training with Unpaired Textual Data (https://arxiv.org/abs/2209.15329)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechLM
# Code based on fairseq: https://github.com/facebookresearch/fairseq/tree/272c4c5197250997148fb12c0db6306035f166a4
#
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# ----------------------------------------------------------------------------
import os
import argparse
from tqdm import tqdm
import numpy as np
lg_label = "__label__{}"
def writefile(filename, lines):
with open(filename, 'w', encoding='utf-8') as f:
f.writelines(lines)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--input", "-i", required=True, type=str)
parser.add_argument("--output", "-o", required=True, type=str)
parser.add_argument("--src", "-s", required=True, type=str)
parser.add_argument("--tgt", "-t", required=True, type=str)
parser.add_argument("--max-len", "-m", default=2998, type=int)
args = parser.parse_args()
src_lines, tgt_lines = [], []
with open(f"{args.input}.{args.src}", 'r') as f1, open(f"{args.input}.{args.tgt}", 'r') as f2:
for src_line, tgt_line in tqdm(zip(f1, f2)):
src_len = len(src_line.strip().split())
tgt_len = len(tgt_line.strip().split())
if src_len < args.max_len and src_len > 0 and tgt_len < args.max_len and tgt_len > 0:
src_lines.append(src_line)
tgt_lines.append(tgt_line)
writefile(f"{args.output}.{args.src}", src_lines)
writefile(f"{args.output}.{args.tgt}", tgt_lines)
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/speechlm/speechlm/data_process/filter_paireddata_by_len.py |
# ----------------------------------------------------------------------------
# SpeechLM: Enhanced Speech Pre-Training with Unpaired Textual Data (https://arxiv.org/abs/2209.15329)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechLM
# Code based on fairseq: https://github.com/facebookresearch/fairseq/tree/272c4c5197250997148fb12c0db6306035f166a4
#
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# ----------------------------------------------------------------------------
"""
Modified from https://github.com/facebookresearch/fairseq/tree/272c4c5197250997148fb12c0db6306035f166a4/examples/wav2vec/unsupervised/scripts/phonemize_with_sil.py
"""
import argparse
import numpy as np
import sys
from g2p_en import G2p
from tqdm import tqdm
import logging
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=logging.INFO,
)
logger = logging.getLogger(__name__)
def get_parser():
parser = argparse.ArgumentParser(
description="converts words to phones adding optional silences around in between words"
)
parser.add_argument(
"--sil-prob",
"-s",
type=float,
default=0,
help="probability of inserting silence between each word",
)
parser.add_argument(
"--surround",
action="store_true",
help="if set, surrounds each example with silence",
)
parser.add_argument(
"--lexicon",
help="lexicon to convert to phones",
required=True,
)
parser.add_argument(
"--strict",
action="store_true",
help="if set, OOV words will raise a error (for train/valid set)",
)
parser.add_argument(
"--input",
"-i",
help="input text file",
required=True,
)
parser.add_argument(
"--output",
"-o",
help="input text file",
required=True,
)
return parser
def normalize_phn(phons):
"""
convert g2p style phone to 39-phone set
"""
return [p.rstrip('0123456789') for p in phons]
def main():
parser = get_parser()
args = parser.parse_args()
sil_prob = args.sil_prob
surround = args.surround
sil = "<SIL>"
wrd_to_phn = {}
g2p = G2p()
with open(args.lexicon, "r") as lf:
for line in lf:
items = line.rstrip().split()
assert len(items) > 1, line
assert items[0] not in wrd_to_phn, items
wrd_to_phn[items[0]] = items[1:]
with open(args.input, "r") as fin, open(args.output, "w", encoding="utf-8") as fout:
for line in tqdm(fin):
words = line.strip().upper().split()
if not all(w in wrd_to_phn for w in words):
if args.strict:
# logger.warning(f"| Warning: OOV words found: {line}")
pass
else:
continue
phones = []
if surround:
phones.append(sil)
sample_sil_probs = None
if sil_prob > 0 and len(words) > 1:
sample_sil_probs = np.random.random(len(words) - 1)
for i, w in enumerate(words):
if w in wrd_to_phn:
phones.extend(wrd_to_phn[w])
else:
phones.extend(normalize_phn(g2p(w)))
if (
sample_sil_probs is not None
and i < len(sample_sil_probs)
and sample_sil_probs[i] < sil_prob
):
phones.append(sil)
if surround:
phones.append(sil)
print(" ".join(phones), file=fout)
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/speechlm/speechlm/data_process/phoneize_with_sil.py |
# ----------------------------------------------------------------------------
# SpeechLM: Enhanced Speech Pre-Training with Unpaired Textual Data (https://arxiv.org/abs/2209.15329)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechLM
# Code based on fairseq: https://github.com/facebookresearch/fairseq/tree/272c4c5197250997148fb12c0db6306035f166a4
#
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# ----------------------------------------------------------------------------
"""
Modified from: https://github.com/facebookresearch/fairseq/blob/272c4c5197250997148fb12c0db6306035f166a4/examples/speech_to_text/prep_covost_data.py
1. normalize the punctuation
2. instead of extract fbank features, we direcly use 16k-Hz waveform
"""
import argparse
import logging
from pathlib import Path
from tempfile import NamedTemporaryFile
from typing import Optional, Tuple
import pandas as pd
import torchaudio
from examples.speech_to_text.data_utils import (
filter_manifest_df,
gen_config_yaml,
gen_vocab,
load_df_from_tsv,
save_df_to_tsv,
)
from torch import Tensor
from torch.utils.data import Dataset
from torchaudio.datasets.utils import download_url, extract_archive
from tqdm import tqdm
from pydub import AudioSegment
import soundfile as sf
import sacremoses
log = logging.getLogger(__name__)
MANIFEST_COLUMNS = ["id", "audio", "n_frames", "tgt_text"]
def mp3_convert_wav(mp3_file, wav_file):
sound = AudioSegment.from_mp3(mp3_file)
sound=sound.set_frame_rate(16000)
sound=sound.set_channels(1)
sound=sound.set_sample_width(2)
sound.export(wav_file, format="wav")
class CoVoST(Dataset):
"""Create a Dataset for CoVoST (https://github.com/facebookresearch/covost).
Args:
root (str): root path to the dataset and generated manifests/features
source_language (str): source (audio) language
target_language (str, optional): target (text) language,
None for no translation (default: None)
version (int, optional): CoVoST version. (default: 2)
download (bool, optional): Whether to download the dataset if it is not
found at root path. (default: ``False``).
"""
COVOST_URL_TEMPLATE = (
"https://dl.fbaipublicfiles.com/covost/"
"covost_v2.{src_lang}_{tgt_lang}.tsv.tar.gz"
)
VERSIONS = {2}
SPLITS = ["train", "dev", "test"]
XX_EN_LANGUAGES = {
1: ["fr", "de", "nl", "ru", "es", "it", "tr", "fa", "sv-SE", "mn", "zh-CN"],
2: [
"fr",
"de",
"es",
"ca",
"it",
"ru",
"zh-CN",
"pt",
"fa",
"et",
"mn",
"nl",
"tr",
"ar",
"sv-SE",
"lv",
"sl",
"ta",
"ja",
"id",
"cy",
],
}
EN_XX_LANGUAGES = {
1: [],
2: [
"de",
"tr",
"fa",
"sv-SE",
"mn",
"zh-CN",
"cy",
"ca",
"sl",
"et",
"id",
"ar",
"ta",
"lv",
"ja",
],
}
def __init__(
self,
root: str,
split: str,
source_language: str,
target_language: Optional[str] = None,
version: int = 2,
) -> None:
assert version in self.VERSIONS and split in self.SPLITS
assert source_language is not None
self.no_translation = target_language is None
if not self.no_translation:
assert "en" in {source_language, target_language}
if source_language == "en":
assert target_language in self.EN_XX_LANGUAGES[version]
else:
assert source_language in self.XX_EN_LANGUAGES[version]
else:
# Hack here so that we can get "split" column from CoVoST TSV.
# Note that we use CoVoST train split for ASR which is an extension
# to Common Voice train split.
target_language = "de" if source_language == "en" else "en"
self.root: Path = Path(root)
cv_tsv_path = self.root / "validated.tsv"
assert cv_tsv_path.is_file()
covost_url = self.COVOST_URL_TEMPLATE.format(
src_lang=source_language, tgt_lang=target_language
)
covost_archive = self.root / Path(covost_url).name
if not covost_archive.is_file():
download_url(covost_url, self.root.as_posix(), hash_value=None)
extract_archive(covost_archive.as_posix())
cv_tsv = load_df_from_tsv(cv_tsv_path)
covost_tsv = load_df_from_tsv(
self.root / Path(covost_url).name.replace(".tar.gz", "")
)
df = pd.merge(
left=cv_tsv[["path", "sentence", "client_id"]],
right=covost_tsv[["path", "translation", "split"]],
how="inner",
on="path",
)
if split == "train":
df = df[(df["split"] == split) | (df["split"] == f"{split}_covost")]
else:
df = df[df["split"] == split]
data = df.to_dict(orient="index").items()
data = [v for k, v in sorted(data, key=lambda x: x[0])]
self.data = []
for e in data:
try:
path = self.root / "clips" / e["path"]
_ = torchaudio.info(path.as_posix())
self.data.append(e)
except RuntimeError:
pass
self.normalizer = sacremoses.MosesPunctNormalizer(
lang=target_language,
pre_replace_unicode_punct=True,
post_remove_control_chars=True,
)
def __getitem__(
self, n: int
) -> Tuple[Tensor, int, str, str, Optional[str], str, str]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
tuple: ``(waveform, sample_rate, sentence, translation, speaker_id,
sample_id)``
"""
data = self.data[n]
path = self.root / "clips" / data["path"]
# waveform, sample_rate = torchaudio.load(path)
sentence = data["sentence"]
translation = None if self.no_translation else data["translation"]
translation = self.normalizer.normalize(translation)
speaker_id = data["client_id"]
_id = data["path"].replace(".mp3", "")
return path, -1, sentence, translation, speaker_id, _id
def __len__(self) -> int:
return len(self.data)
def process(args):
root = Path(args.data_root).absolute() / args.src_lang
outroot = root / f"{args.src_lang}-{args.tgt_lang}"
if args.vocab_type != "char":
outroot = root / f"{args.src_lang}-{args.tgt_lang}-{args.vocab_type}"
if not root.is_dir():
raise NotADirectoryError(f"{root} does not exist")
#1. Extract featuress
# mp3-to-wav can take long long time, better run it externally with multi threads.
feature_root = root / "wav"
# feature_root.mkdir(exist_ok=True)
# for split in CoVoST.SPLITS:
# print(f"Fetching split {split}...")
# dataset = CoVoST(root, split, args.src_lang, args.tgt_lang)
# print("Converting mp3 to wav...")
# handle = open(root / f"{split}.id", "w")
# for waveform, _, _, _, _, utt_id in tqdm(dataset):
# wav_file = feature_root / f"{utt_id}.wav"
# print(waveform, file=handle)
# mp3_convert_wav(waveform, wav_file)
#2. Generate TSV manifest
print("Generating manifest...")
train_text = []
task = f"asr_{args.src_lang}"
if args.tgt_lang is not None:
task = f"st_{args.src_lang}_{args.tgt_lang}"
for split in CoVoST.SPLITS:
manifest = {c: [] for c in MANIFEST_COLUMNS}
dataset = CoVoST(root, split, args.src_lang, args.tgt_lang)
for waveform, _, src_utt, tgt_utt, speaker_id, utt_id in tqdm(dataset):
wav_file = feature_root / f"{utt_id}.wav"
manifest["id"].append(utt_id)
manifest["audio"].append(wav_file.as_posix().replace("/data/", "/mnt/default/"))
manifest["n_frames"].append(sf.info(wav_file).frames)
manifest["tgt_text"].append(src_utt if args.tgt_lang is None else tgt_utt)
is_train_split = split.startswith("train")
if is_train_split:
train_text.extend(manifest["tgt_text"])
df = pd.DataFrame.from_dict(manifest)
df = filter_manifest_df(df, is_train_split=is_train_split, min_n_frames=320, max_n_frames=480000)
save_df_to_tsv(df, outroot / f"{split}_{task}.tsv")
# Generate vocab
vocab_size_str = "" if args.vocab_type == "char" else str(args.vocab_size)
spm_filename_prefix = f"spm_{args.vocab_type}{vocab_size_str}_{task}"
with NamedTemporaryFile(mode="w") as f:
for t in train_text:
f.write(t + "\n")
gen_vocab(
Path(f.name),
outroot / spm_filename_prefix,
args.vocab_type,
args.vocab_size
)
# Generate config YAML
# gen_config_yaml(
# outroot,
# spm_filename=spm_filename_prefix + ".model",
# yaml_filename=f"config_{task}.yaml",
# specaugment_policy="lb",
# )
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--data-root", "-d", required=True, type=str,
help="data root with sub-folders for each language <root>/<src_lang>"
)
parser.add_argument(
"--vocab-type",
default="unigram",
required=True,
type=str,
choices=["bpe", "unigram", "char"],
),
parser.add_argument("--vocab-size", default=1000, type=int)
parser.add_argument("--src-lang", "-s", required=True, type=str)
parser.add_argument("--tgt-lang", "-t", type=str)
args = parser.parse_args()
process(args)
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/speechlm/speechlm/data_process/covost2/prepare_covost_data.py |
import argparse
from tqdm import tqdm
from pydub import AudioSegment
import torchaudio
import os
def mp3_convert_wav(mp3_file, wav_file):
try:
sound = AudioSegment.from_mp3(mp3_file)
sound=sound.set_frame_rate(16000)
sound=sound.set_channels(1)
sound=sound.set_sample_width(2)
sound.export(wav_file, format="wav")
except Exception as e:
print(e)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--input", "-i", required=True, type=str)
parser.add_argument("--shard", "-n", required=True, type=int)
parser.add_argument("--rank", "-r", required=True, type=int)
args = parser.parse_args()
assert args.rank < args.shard, f"rank: {args.rank} >= shard: {args.shard}"
with open(args.input, 'r') as f:
files = [line.strip() for line in f ]
mp3_files = files[args.rank::args.shard]
for mp3_file in tqdm(mp3_files):
wav_file = mp3_file.replace("/clips/", "/wav/").replace(".mp3", ".wav")
if os.path.exists(wav_file):
try:
torchaudio.info(wav_file)
except Exception as e:
print(e)
mp3_convert_wav(mp3_file, wav_file)
else:
mp3_convert_wav(mp3_file, wav_file)
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/speechlm/speechlm/data_process/covost2/mp3_to_wav.py |
# ----------------------------------------------------------------------------
# SpeechLM: Enhanced Speech Pre-Training with Unpaired Textual Data (https://arxiv.org/abs/2209.15329)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechLM
# Code based on fairseq: https://github.com/facebookresearch/fairseq/tree/272c4c5197250997148fb12c0db6306035f166a4
#
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# ----------------------------------------------------------------------------
import os
import tqdm
import argparse
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument("--input", "-i", required=True, type=str)
parser.add_argument("--output", "-o", required=True, type=str)
parser.add_argument("--lexicon", default='align_lexicon.txt', type=str)
args = parser.parse_args()
sil_prob = 0.25
if not os.path.exists(args.lexicon):
print(f"| Warning: lexicon {args.lexicon} not found, downloading ...")
try:
os.system(f"wget --no-check-certificate 'https://drive.google.com/uc?export=download&id=1QVeyCpLXLnujBUAickpo-jaSVY-vKLnT' -O {args.lexicon}")
except Exception as e:
print(e)
print(f"| Error downloading {args.lexicon}, please download it from https://drive.google.com/file/d/1QVeyCpLXLnujBUAickpo-jaSVY-vKLnT/view?usp=sharing")
exit(1)
dict = {}
f = open(args.lexicon)
for l in f:
dict[l.split()[0]] = l.split()[2:]
assert l.split()[0] == l.split()[1]
f = open(args.input, 'r')
w_f = open(f'{args.output}.kaldi_phn_sil025', 'w')
w_oov = open(f'{args.output}.kaldi_phn_sil025.oov', 'w')
oov_nums = 0
total_nums = 0
for l in tqdm.tqdm(f):
words = l.strip().replace(" ", "").split("|")
# words = l.strip().upper().split()
words = [w for w in words if w != '']
phones = []
phones.extend(dict['!SIL'])
sample_sil_probs = None
if sil_prob > 0 and len(words) > 1:
sample_sil_probs = np.random.random(len(words) - 1)
for i, w in enumerate(words):
total_nums += 1
if w not in dict:
w = '<UNK>'
oov_nums += 1
w_oov.write(w + '\n')
phones.extend(dict[w])
if (
sample_sil_probs is not None
and i < len(sample_sil_probs)
and sample_sil_probs[i] < sil_prob
):
phones.extend(dict['!SIL'])
phones.extend(dict['!SIL'])
w_f.write(' '.join(phones) + '\n')
w_oov.write(f'{oov_nums}\n')
print(f"OOV rate: {oov_nums}/{total_nums}")
# !!! After processing, use this comand to adjust the SIL
### sed -i 's/SIL_S/SIL/g' your_file
| EXA-1-master | exa/models/unilm-master/speechlm/speechlm/data_process/phoneme_tokenizer/ltr2kaldi_phn_sil025.py |
# ----------------------------------------------------------------------------
# SpeechLM: Enhanced Speech Pre-Training with Unpaired Textual Data (https://arxiv.org/abs/2209.15329)
# Github source: https://github.com/microsoft/SpeechT5/tree/main/SpeechLM
# Code based on fairseq: https://github.com/facebookresearch/fairseq/tree/272c4c5197250997148fb12c0db6306035f166a4
#
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# ----------------------------------------------------------------------------
import sys, json, tqdm
import numpy as np
input_file = sys.argv[1]
mean_and_std_file = sys.argv[2]
out_file = sys.argv[3]
mean_and_std = json.load(open(mean_and_std_file, 'r'))
with open(input_file, 'r') as f, open(out_file, 'w') as w:
for line in tqdm.tqdm(f):
l = line.split()
new_l = []
for phn in l:
if phn not in mean_and_std:
mean_and_std[phn] = [5, 2.5]
print(f'unk phone {phn}')
n = max(1, round(np.random.normal(loc=mean_and_std[phn][0], scale=mean_and_std[phn][1])))
new_l.extend([phn] * int(n))
minus = 0
while len(new_l) >= 4375:
minus += 1
new_l = []
for phn in l:
n = max(1, round(mean_and_std[phn][0] - minus))
new_l.extend([phn] * n)
print(f"too long line try minus {minus}")
w.write(' '.join(new_l)+'\n')
| EXA-1-master | exa/models/unilm-master/speechlm/speechlm/data_process/phoneme_tokenizer/repeat_withou_insert_sil_less_4375.py |
import json
import sys
import argparse
sys.path.insert(0, './src')
from logger_config import logger
from metrics import compute_mrr, trec_eval
from utils import save_json_to_file
from data_utils import load_qrels, load_msmarco_predictions
parser = argparse.ArgumentParser(description='compute metrics for ms-marco predictions')
parser.add_argument('--in-path', default='', type=str, metavar='N',
help='path to predictions in msmarco output format')
parser.add_argument('--qrels', default='./data/msmarco/dev_qrels.txt', type=str, metavar='N',
help='path to qrels')
args = parser.parse_args()
logger.info('Args={}'.format(json.dumps(args.__dict__, ensure_ascii=False, indent=4)))
def main():
qrels = load_qrels(path=args.qrels)
predictions = load_msmarco_predictions(args.in_path)
all_metrics = trec_eval(qrels=qrels, predictions=predictions)
all_metrics['mrr'] = compute_mrr(qrels=qrels, predictions=predictions)
logger.info(json.dumps(all_metrics, ensure_ascii=False, indent=4))
save_json_to_file(all_metrics, '{}.metrics.json'.format(args.in_path))
if __name__ == '__main__':
main()
| EXA-1-master | exa/models/unilm-master/simlm/misc/compute_metrics_marco.py |
import json
import os
import sys
import tqdm
import argparse
sys.path.insert(0, './src')
from typing import List, Dict
from utils import save_json_to_file
from logger_config import logger
from data_utils import load_qrels, load_corpus, load_queries, load_msmarco_predictions, ScoredDoc
from metrics import get_rel_threshold
parser = argparse.ArgumentParser(description='convert ms-marco predictions to a human-readable format')
parser.add_argument('--in-path', default='', type=str, metavar='N',
help='path to predictions in msmarco output format')
parser.add_argument('--split', default='dev', type=str, metavar='N',
help='which split to use')
parser.add_argument('--data-dir', default='./data/msmarco/', type=str, metavar='N',
help='data dir')
args = parser.parse_args()
logger.info('Args={}'.format(json.dumps(args.__dict__, ensure_ascii=False, indent=4)))
def main(topk: int = 10):
predictions: Dict[str, List[ScoredDoc]] = load_msmarco_predictions(path=args.in_path)
path_qrels = '{}/{}_qrels.txt'.format(args.data_dir, args.split)
qrels = load_qrels(path=path_qrels) if os.path.exists(path_qrels) else None
queries = load_queries(path='{}/{}_queries.tsv'.format(args.data_dir, args.split))
corpus = load_corpus(path='{}/passages.jsonl.gz'.format(args.data_dir))
pred_infos = []
out_path = '{}.details.json'.format(args.in_path)
rel_threshold = get_rel_threshold(qrels) if qrels else -1
for qid in tqdm.tqdm(queries):
pred_docs = []
for scored_doc in predictions[qid][:topk]:
correct = qrels is not None and scored_doc.pid in qrels[qid] and qrels[qid][scored_doc.pid] >= rel_threshold
pred_docs.append({'id': scored_doc.pid,
'contents': corpus[int(scored_doc.pid)]['contents'],
'title': corpus[int(scored_doc.pid)]['title'],
'score': scored_doc.score})
if qrels is not None:
pred_docs[-1]['correct'] = correct
if correct: break
gold_rank, gold_score = -1, -1
for idx, scored_doc in enumerate(predictions[qid]):
if qrels is None:
break
if scored_doc.pid in qrels[qid] and qrels[qid][scored_doc.pid] >= rel_threshold:
gold_rank = idx + 1
gold_score = scored_doc.score
break
pred_info = {'query_id': qid,
'query': queries[qid],
'pred_docs': pred_docs}
if qrels is not None:
pred_info.update({
'gold_docs': [corpus[int(doc_id)] for doc_id in qrels[qid] if qrels[qid][doc_id] >= rel_threshold],
'gold_score': gold_score,
'gold_rank': gold_rank
})
pred_infos.append(pred_info)
save_json_to_file(pred_infos, out_path)
logger.info('Save prediction details to {}'.format(out_path))
if __name__ == '__main__':
main()
| EXA-1-master | exa/models/unilm-master/simlm/misc/marco_pred_to_cases.py |
import os
import io
import gzip
import json
import random
import argparse
import ir_datasets
import numpy as np
import sys
sys.path.insert(0, 'src/')
from tqdm import tqdm
from typing import Dict, List
from datasets import Dataset
from logger_config import logger
from utils import save_json_to_file
from data_utils import load_msmarco_predictions, load_queries, load_qrels, load_corpus, \
ScoredDoc, save_to_readable_format
parser = argparse.ArgumentParser(description='data preprocessing')
parser.add_argument('--out-dir', default='./data/msmarco/', type=str, metavar='N',
help='output directory')
parser.add_argument('--train-pred-path', default='./preds/official/train.msmarco.txt',
type=str, metavar='N', help='path to train predictions to construct negatives')
parser.add_argument('--dev-pred-path', default='./preds/official/dev.msmarco.txt',
type=str, metavar='N', help='path to dev predictions to construct negatives')
parser.add_argument('--num-negatives', default=210, type=int, metavar='N',
help='number of negative passages')
parser.add_argument('--num-random-neg', default=10, type=int, metavar='N',
help='number of random negatives to use')
parser.add_argument('--depth', default=200, type=int, metavar='N',
help='depth to choose negative passages from')
parser.add_argument('--title-path', default='./data/msmarco/para.title.txt',
type=str, metavar='N', help='path to titles data')
parser.add_argument('--create-train-dev-only', action='store_true', help='path to titles data')
parser.add_argument('--filter-noisy-positives', action='store_true', help='filter noisy positives or not')
args = parser.parse_args()
os.makedirs(args.out_dir, exist_ok=True)
logger.info('Args: {}'.format(json.dumps(args.__dict__, ensure_ascii=False, indent=4)))
def _write_corpus_to_disk():
dataset = ir_datasets.load('msmarco-passage/train')
titles = []
if os.path.exists(args.title_path):
titles = [line.strip().split('\t')[1] for line in tqdm(open(args.title_path).readlines(), desc='load title')]
logger.info('Load {} titles from {}'.format(len(titles), args.title_path))
else:
logger.warning('No title data found: {}'.format(args.title_path))
title_idx = 0
out_path = os.path.join(args.out_dir, 'passages.jsonl.gz')
with gzip.open(out_path, 'wb') as output:
with io.TextIOWrapper(output, encoding='utf-8') as writer:
for doc in tqdm(dataset.docs_iter()):
ex = {'id': doc.doc_id, 'contents': doc.text}
if titles:
ex['title'] = titles[title_idx]
title_idx += 1
writer.write(json.dumps(ex, ensure_ascii=False, separators=(',', ':')))
writer.write('\n')
if titles:
assert title_idx == len(titles), '{} != {}'.format(title_idx, len(titles))
def _write_queries_to_disk(split: str, out_path: str):
dataset = ir_datasets.load("msmarco-passage/{}".format(split))
with open(out_path, 'w', encoding='utf-8') as writer:
for query in dataset.queries_iter():
writer.write('{}\t{}\n'.format(query.query_id, query.text))
logger.info('Write {} queries to {}'.format(split, out_path))
def _write_qrels_to_disk(split: str, out_path: str):
dataset = ir_datasets.load("msmarco-passage/{}".format(split))
with open(out_path, 'w', encoding='utf-8') as writer:
for qrel in dataset.qrels_iter():
# query_id, iteration, doc_id, relevance
writer.write('{}\t{}\t{}\t{}\n'
.format(qrel.query_id, qrel.iteration, qrel.doc_id, qrel.relevance))
logger.info('Write {} qrels to {}'.format(split, out_path))
def _write_prepared_data_to_disk(out_path: str,
corpus: Dataset,
queries: Dict[str, str],
qrels: Dict[str, Dict[str, int]],
preds: Dict[str, List[ScoredDoc]],
is_train: bool = False):
cnt_noisy_positive = 0
cnt_output = 0
with open(out_path, 'w', encoding='utf-8') as writer:
for query_id in tqdm(qrels, mininterval=2):
positive_doc_ids: Dict = qrels.get(query_id)
if not positive_doc_ids:
logger.warning('No positive found for query_id={}'.format(query_id))
continue
if is_train and args.filter_noisy_positives \
and all(sd.pid not in positive_doc_ids for sd in preds.get(query_id, [])):
cnt_noisy_positive += 1
continue
# For official triples, only use those with negative doc ids
if not preds.get(query_id, []):
continue
doc_id_to_score = {scored_doc.pid: scored_doc.score for scored_doc in preds.get(query_id, [])}
negative_scored_docs = [scored_doc for scored_doc in preds.get(query_id, [])
if scored_doc.pid not in positive_doc_ids][:args.depth]
np.random.shuffle(negative_scored_docs)
negative_scored_docs = negative_scored_docs[:(args.num_negatives - args.num_random_neg)]
if len(negative_scored_docs) < args.num_negatives:
if not negative_scored_docs:
logger.warning('No negatives found for query_id={} ({}), will use random negatives'
.format(len(negative_scored_docs), queries[query_id], query_id))
while len(negative_scored_docs) < args.num_negatives:
sd = ScoredDoc(qid=query_id, pid=str(random.randint(0, len(corpus) - 1)), rank=args.depth)
if sd.pid not in positive_doc_ids and sd.pid not in doc_id_to_score:
negative_scored_docs.append(sd)
np.random.shuffle(negative_scored_docs)
example = {'query_id': query_id,
'query': queries[query_id],
'positives': {'doc_id': list(positive_doc_ids),
'score': [doc_id_to_score.get(doc_id, -1.) for doc_id in positive_doc_ids]
},
'negatives': {'doc_id': [scored_doc.pid for scored_doc in negative_scored_docs],
'score': [scored_doc.score for scored_doc in negative_scored_docs]
},
}
writer.write(json.dumps(example, ensure_ascii=False, separators=(',', ':')))
writer.write('\n')
cnt_output += 1
if is_train and args.filter_noisy_positives:
logger.info('Filter {} noisy positives'.format(cnt_noisy_positive))
logger.info('Write {} examples to {}'.format(cnt_output, out_path))
if __name__ == '__main__':
if not args.create_train_dev_only:
_write_queries_to_disk(split='dev/small', out_path=os.path.join(args.out_dir, 'dev_queries.tsv'))
_write_queries_to_disk(split='eval/small', out_path=os.path.join(args.out_dir, 'test_queries.tsv'))
_write_queries_to_disk(split='trec-dl-2019/judged',
out_path=os.path.join(args.out_dir, 'trec_dl2019_queries.tsv'))
_write_queries_to_disk(split='trec-dl-2020/judged',
out_path=os.path.join(args.out_dir, 'trec_dl2020_queries.tsv'))
_write_queries_to_disk(split='train/judged', out_path=os.path.join(args.out_dir, 'train_queries.tsv'))
_write_qrels_to_disk(split='dev/small', out_path=os.path.join(args.out_dir, 'dev_qrels.txt'))
_write_qrels_to_disk(split='trec-dl-2019/judged',
out_path=os.path.join(args.out_dir, 'trec_dl2019_qrels.txt'))
_write_qrels_to_disk(split='trec-dl-2020/judged',
out_path=os.path.join(args.out_dir, 'trec_dl2020_qrels.txt'))
_write_qrels_to_disk(split='train/judged', out_path=os.path.join(args.out_dir, 'train_qrels.txt'))
_write_corpus_to_disk()
corpus = load_corpus(path=os.path.join(args.out_dir, 'passages.jsonl.gz'))
_write_prepared_data_to_disk(out_path=os.path.join(args.out_dir, 'dev.jsonl'),
corpus=corpus,
queries=load_queries(path=os.path.join(args.out_dir, 'dev_queries.tsv')),
qrels=load_qrels(path=os.path.join(args.out_dir, 'dev_qrels.txt')),
preds=load_msmarco_predictions(path=args.dev_pred_path))
_write_prepared_data_to_disk(out_path=os.path.join(args.out_dir, 'train.jsonl'),
corpus=corpus,
queries=load_queries(path=os.path.join(args.out_dir, 'train_queries.tsv')),
qrels=load_qrels(path=os.path.join(args.out_dir, 'train_qrels.txt')),
preds=load_msmarco_predictions(path=args.train_pred_path),
is_train=True)
save_to_readable_format(in_path=os.path.join(args.out_dir, 'dev.jsonl'), corpus=corpus)
save_to_readable_format(in_path=os.path.join(args.out_dir, 'train.jsonl'), corpus=corpus)
save_json_to_file(args.__dict__, path=os.path.join(args.out_dir, 'train_dev_create_args.json'))
src_path = args.dev_pred_path
dst_path = '{}/{}'.format(args.out_dir, os.path.basename(args.dev_pred_path))
logger.info('copy {} to {}'.format(src_path, dst_path))
os.system('cp {} {}'.format(src_path, dst_path))
for trec_split in ['trec_dl2019', 'trec_dl2020', 'test']:
trec_pred_path = '{}/{}.msmarco.txt'.format(os.path.dirname(args.dev_pred_path), trec_split)
dst_path = '{}/{}'.format(args.out_dir, os.path.basename(trec_pred_path))
if not os.path.exists(trec_pred_path):
logger.warning('{} does not exist'.format(trec_pred_path))
continue
logger.info('copy {} to {}'.format(trec_pred_path, dst_path))
os.system('cp {} {}'.format(trec_pred_path, dst_path))
| EXA-1-master | exa/models/unilm-master/simlm/misc/prepare_msmarco_data.py |
import os
import json
import argparse
import sys
import numpy as np
sys.path.insert(0, 'src/')
from tqdm import tqdm
from typing import Dict, Any
from logger_config import logger
from data_utils import load_query_answers, load_corpus, save_to_readable_format
parser = argparse.ArgumentParser(description='data preprocessing for NQ & TriviaQA in DPR paper')
parser.add_argument('--out-dir', default='./data/dpr/', type=str, metavar='N',
help='output directory')
parser.add_argument('--task', default='nq', type=str, metavar='N',
help='task name, nq or tq')
parser.add_argument('--train-pred-path', default='amlt/0621_cont_100k_psg16_ft/nq/nq_train.dpr.json',
type=str, metavar='N', help='path to train predictions to construct negatives')
parser.add_argument('--dev-pred-path', default='amlt/0621_cont_100k_psg16_ft/nq/nq_dev.dpr.json',
type=str, metavar='N', help='path to dev predictions to construct negatives')
parser.add_argument('--num-negatives', default=100, type=int, metavar='N',
help='number of negative passages')
parser.add_argument('--depth', default=100, type=int, metavar='N',
help='depth to choose negative passages from')
args = parser.parse_args()
os.makedirs(args.out_dir, exist_ok=True)
logger.info('Args: {}'.format(json.dumps(args.__dict__, ensure_ascii=False, indent=4)))
assert args.task in ['nq', 'tq']
def _load_qid_to_positive(path: str) -> Dict[str, str]:
if args.task != 'nq':
logger.warning('Only NQ has manually labeled positives')
return {}
examples = json.load(open(path, 'r', encoding='utf-8'))
qid_to_pos_id = {}
for ex in examples:
positive_ctxs = ex['positive_ctxs']
if len(positive_ctxs) > 0:
qid_to_pos_id[ex['question']] = str(int(positive_ctxs[0]['passage_id']) - 1)
logger.info('Get {} manually labeled positives from {}'.format(len(qid_to_pos_id), path))
return qid_to_pos_id
def _write_prepared_data_to_disk(out_path: str,
split: str,
queries: Dict[str, Dict[str, Any]],
preds_path: str):
qid_to_pos_id = _load_qid_to_positive(path='{}/biencoder-nq-{}.json'.format(args.out_dir, split))
cnt_filtered = 0
preds = json.load(open(preds_path, 'r', encoding='utf-8'))
with open(out_path, 'w', encoding='utf-8') as writer:
for query_id in tqdm(queries, mininterval=1, desc='prepare {} data'.format(split)):
cur_pred: dict = preds[query_id] if query_id in preds else preds[query_id.strip()]
positive_ids, negative_ids = [], []
manual_positive_id = qid_to_pos_id.get(query_id, None)
if manual_positive_id:
positive_ids.append(manual_positive_id)
for ctx in cur_pred['contexts'][:args.depth]:
doc_id = str(ctx['docid'])
if doc_id == manual_positive_id:
continue
elif ctx['has_answer']:
positive_ids.append(doc_id)
else:
negative_ids.append(doc_id)
if not positive_ids or not negative_ids:
cnt_filtered += 1
continue
np.random.shuffle(negative_ids)
negative_ids = negative_ids[:args.num_negatives]
doc_id_to_score = {str(ctx['docid']): float(ctx['score']) for ctx in cur_pred['contexts']}
doc_id_to_score[manual_positive_id] = 1000.
example = {
'query_id': query_id,
'query': queries[query_id]['query'],
'answers': queries[query_id]['answers'],
'positives': {'doc_id': positive_ids,
'score': [doc_id_to_score.get(doc_id, -1.) for doc_id in positive_ids]
},
'negatives': {'doc_id': negative_ids,
'score': [doc_id_to_score.get(doc_id, -1.) for doc_id in negative_ids]
},
}
writer.write(json.dumps(example, ensure_ascii=False, separators=(',', ':')))
writer.write('\n')
if cnt_filtered > 0:
logger.info('{} questions are filtered out'.format(cnt_filtered))
logger.info('Done write {} data to {}'.format(split, out_path))
if __name__ == '__main__':
for split in ['dev', 'train']:
_write_prepared_data_to_disk(
out_path=os.path.join(args.out_dir, '{}_hard_{}.jsonl'.format(args.task, split)),
split=split,
queries=load_query_answers(path=os.path.join(args.out_dir, '{}_{}_queries.tsv'.format(args.task, split))),
preds_path=(args.train_pred_path if split == 'train' else args.dev_pred_path)
)
corpus = load_corpus(path=os.path.join(args.out_dir, 'passages.jsonl.gz'))
for split in ['dev', 'train']:
save_to_readable_format(in_path=os.path.join(args.out_dir, '{}_hard_{}.jsonl'.format(args.task, split)),
corpus=corpus)
logger.info('Done')
| EXA-1-master | exa/models/unilm-master/simlm/misc/dpr/mine_hard_negatives.py |
import os
import argparse
import json
import sys
sys.path.insert(0, 'src/')
from tqdm import tqdm
from typing import Dict, Any
from datasets import Dataset
from evaluate_dpr_retrieval import has_answers, SimpleTokenizer, evaluate_retrieval
from data_utils import load_query_answers, load_corpus
from utils import save_json_to_file
from logger_config import logger
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert an TREC run to DPR retrieval result json.')
parser.add_argument('--data-dir', required=True, help='data dir')
parser.add_argument('--topics', required=True, help='topic name')
parser.add_argument('--topk', type=int, nargs='+', help="topk to evaluate")
parser.add_argument('--input', required=True, help='Input TREC run file.')
parser.add_argument('--store-raw', action='store_true', help='Store raw text of passage')
parser.add_argument('--regex', action='store_true', default=False, help="regex match")
parser.add_argument('--output', required=True, help='Output DPR Retrieval json file.')
args = parser.parse_args()
qas = load_query_answers(path=args.topics)
corpus = load_corpus(path=os.path.join(args.data_dir, 'passages.jsonl.gz'))
retrieval = {}
tokenizer = SimpleTokenizer()
predictions = []
for line in tqdm(open(args.input), mininterval=1):
question_id, doc_idx, _, score = line.strip().split('\t')[:4]
predictions.append({'question_id': question_id,
'doc_idx': int(doc_idx),
'score': score})
dataset = Dataset.from_dict({'question_id': [ex['question_id'] for ex in predictions],
'doc_idx': [ex['doc_idx'] for ex in predictions],
'score': [ex['score'] for ex in predictions]})
logger.info('Get {} predictions in total'.format(len(dataset)))
def _map_func(example: Dict[str, Any]) -> dict:
question_id, doc_idx, score = example['question_id'], example['doc_idx'], example['score']
question = qas[question_id]['query']
answers = qas[question_id]['answers']
title, text = corpus[doc_idx]['title'], corpus[doc_idx]['contents']
ctx = '{}\n{}'.format(title, text)
answer_exist = has_answers(text, answers, tokenizer, args.regex)
example['question'] = question
example['answers'] = answers
example['docid'] = doc_idx
example['has_answer'] = answer_exist
if args.store_raw:
example['text'] = ctx
return example
dataset = dataset.map(_map_func,
num_proc=min(os.cpu_count(), 16))
retrieval = {}
for ex in tqdm(dataset, mininterval=2, desc='convert to dpr format'):
question_id, question, answers = ex['question_id'], ex['question'], ex['answers']
if question_id not in retrieval:
retrieval[question_id] = {'question': question, 'answers': answers, 'contexts': []}
retrieval[question_id]['contexts'].append(
{k: ex[k] for k in ['docid', 'score', 'text', 'has_answer'] if k in ex}
)
save_json_to_file(retrieval, path=args.output)
logger.info('Convert {} to {} done'.format(args.input, args.output))
metrics = evaluate_retrieval(retrieval_file=args.output,
topk=args.topk,
regex=args.regex)
logger.info('{} recall metrics: {}'.format(
os.path.basename(args.output),
json.dumps(metrics, ensure_ascii=False, indent=4)))
base_dir, base_name = os.path.dirname(args.output), os.path.basename(args.output)
save_json_to_file(metrics, path='{}/metrics_{}'.format(base_dir, base_name))
| EXA-1-master | exa/models/unilm-master/simlm/misc/dpr/format_and_evaluate.py |
"""
Most of the tokenization code here is copied from Facebook/DPR & DrQA codebase to avoid adding an extra dependency
"""
import argparse
import copy
import json
import logging
import re
import unicodedata
from tqdm import tqdm
import numpy as np
import regex
logger = logging.getLogger(__name__)
class Tokens(object):
"""A class to represent a list of tokenized text."""
TEXT = 0
TEXT_WS = 1
SPAN = 2
POS = 3
LEMMA = 4
NER = 5
def __init__(self, data, annotators, opts=None):
self.data = data
self.annotators = annotators
self.opts = opts or {}
def __len__(self):
"""The number of tokens."""
return len(self.data)
def slice(self, i=None, j=None):
"""Return a view of the list of tokens from [i, j)."""
new_tokens = copy.copy(self)
new_tokens.data = self.data[i: j]
return new_tokens
def untokenize(self):
"""Returns the original text (with whitespace reinserted)."""
return ''.join([t[self.TEXT_WS] for t in self.data]).strip()
def words(self, uncased=False):
"""Returns a list of the text of each token
Args:
uncased: lower cases text
"""
if uncased:
return [t[self.TEXT].lower() for t in self.data]
else:
return [t[self.TEXT] for t in self.data]
def offsets(self):
"""Returns a list of [start, end) character offsets of each token."""
return [t[self.SPAN] for t in self.data]
def pos(self):
"""Returns a list of part-of-speech tags of each token.
Returns None if this annotation was not included.
"""
if 'pos' not in self.annotators:
return None
return [t[self.POS] for t in self.data]
def lemmas(self):
"""Returns a list of the lemmatized text of each token.
Returns None if this annotation was not included.
"""
if 'lemma' not in self.annotators:
return None
return [t[self.LEMMA] for t in self.data]
def entities(self):
"""Returns a list of named-entity-recognition tags of each token.
Returns None if this annotation was not included.
"""
if 'ner' not in self.annotators:
return None
return [t[self.NER] for t in self.data]
def ngrams(self, n=1, uncased=False, filter_fn=None, as_strings=True):
"""Returns a list of all ngrams from length 1 to n.
Args:
n: upper limit of ngram length
uncased: lower cases text
filter_fn: user function that takes in an ngram list and returns
True or False to keep or not keep the ngram
as_string: return the ngram as a string vs list
"""
def _skip(gram):
if not filter_fn:
return False
return filter_fn(gram)
words = self.words(uncased)
ngrams = [(s, e + 1)
for s in range(len(words))
for e in range(s, min(s + n, len(words)))
if not _skip(words[s:e + 1])]
# Concatenate into strings
if as_strings:
ngrams = ['{}'.format(' '.join(words[s:e])) for (s, e) in ngrams]
return ngrams
def entity_groups(self):
"""Group consecutive entity tokens with the same NER tag."""
entities = self.entities()
if not entities:
return None
non_ent = self.opts.get('non_ent', 'O')
groups = []
idx = 0
while idx < len(entities):
ner_tag = entities[idx]
# Check for entity tag
if ner_tag != non_ent:
# Chomp the sequence
start = idx
while (idx < len(entities) and entities[idx] == ner_tag):
idx += 1
groups.append((self.slice(start, idx).untokenize(), ner_tag))
else:
idx += 1
return groups
class Tokenizer(object):
"""Base tokenizer class.
Tokenizers implement tokenize, which should return a Tokens class.
"""
def tokenize(self, text):
raise NotImplementedError
def shutdown(self):
pass
def __del__(self):
self.shutdown()
class SimpleTokenizer(Tokenizer):
ALPHA_NUM = r'[\p{L}\p{N}\p{M}]+'
NON_WS = r'[^\p{Z}\p{C}]'
def __init__(self, **kwargs):
"""
Args:
annotators: None or empty set (only tokenizes).
"""
self._regexp = regex.compile(
'(%s)|(%s)' % (self.ALPHA_NUM, self.NON_WS),
flags=regex.IGNORECASE + regex.UNICODE + regex.MULTILINE
)
if len(kwargs.get('annotators', {})) > 0:
logger.warning('%s only tokenizes! Skipping annotators: %s' %
(type(self).__name__, kwargs.get('annotators')))
self.annotators = set()
def tokenize(self, text):
data = []
matches = [m for m in self._regexp.finditer(text)]
for i in range(len(matches)):
# Get text
token = matches[i].group()
# Get whitespace
span = matches[i].span()
start_ws = span[0]
if i + 1 < len(matches):
end_ws = matches[i + 1].span()[0]
else:
end_ws = span[1]
# Format data
data.append((
token,
text[start_ws: end_ws],
span,
))
return Tokens(data, self.annotators)
def regex_match(text, pattern):
"""Test if a regex pattern is contained within a text."""
try:
pattern = re.compile(
pattern,
flags=re.IGNORECASE + re.UNICODE + re.MULTILINE,
)
except BaseException:
return False
return pattern.search(text) is not None
def _normalize(text):
return unicodedata.normalize('NFD', text)
def has_answers(text, answers, tokenizer, regex=False):
text = _normalize(text)
if regex:
for ans in answers:
ans = _normalize(ans)
if regex_match(text, ans):
return True
else:
text = tokenizer.tokenize(text).words(uncased=True)
for ans in answers:
ans = _normalize(ans)
ans = tokenizer.tokenize(ans).words(uncased=True)
for i in range(0, len(text) - len(ans) + 1):
if ans == text[i: i + len(ans)]:
return True
return False
def evaluate_retrieval(retrieval_file, topk, regex=False) -> dict:
tokenizer = SimpleTokenizer()
retrieval = json.load(open(retrieval_file))
accuracy = { k : [] for k in topk }
max_k = max(topk)
for qid in tqdm(list(retrieval.keys())):
answers = retrieval[qid]['answers']
contexts = retrieval[qid]['contexts']
has_ans_idx = max_k # first index in contexts that has answers
for idx, ctx in enumerate(contexts):
if idx >= max_k:
break
if 'has_answer' in ctx:
if ctx['has_answer']:
has_ans_idx = idx
break
else:
text = ctx['text'].split('\n')[1] # [0] is title, [1] is text
if has_answers(text, answers, tokenizer, regex):
has_ans_idx = idx
break
for k in topk:
accuracy[k].append(0 if has_ans_idx >= k else 1)
metrics = {}
for k in topk:
metrics['Acc{}'.format(k)] = np.mean(accuracy[k])
return metrics
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--retrieval', type=str, metavar='path',
help="Path to retrieval output file.")
parser.add_argument('--topk', type=int, nargs='+', help="topk to evaluate")
parser.add_argument('--regex', action='store_true', default=False, help="regex match")
args = parser.parse_args()
metrics = evaluate_retrieval(args.retrieval, args.topk, args.regex)
print('eval metrics: {}'.format(json.dumps(metrics, ensure_ascii=False, indent=4)))
| EXA-1-master | exa/models/unilm-master/simlm/misc/dpr/evaluate_dpr_retrieval.py |
import torch
import pytrec_eval
from typing import List, Dict, Tuple
from data_utils import ScoredDoc
from logger_config import logger
def trec_eval(qrels: Dict[str, Dict[str, int]],
predictions: Dict[str, List[ScoredDoc]],
k_values: Tuple[int] = (10, 50, 100, 200, 1000)) -> Dict[str, float]:
ndcg, _map, recall = {}, {}, {}
for k in k_values:
ndcg[f"NDCG@{k}"] = 0.0
_map[f"MAP@{k}"] = 0.0
recall[f"Recall@{k}"] = 0.0
map_string = "map_cut." + ",".join([str(k) for k in k_values])
ndcg_string = "ndcg_cut." + ",".join([str(k) for k in k_values])
recall_string = "recall." + ",".join([str(k) for k in k_values])
results: Dict[str, Dict[str, float]] = {}
for query_id, scored_docs in predictions.items():
results.update({query_id: {sd.pid: sd.score for sd in scored_docs}})
evaluator = pytrec_eval.RelevanceEvaluator(qrels, {map_string, ndcg_string, recall_string})
scores = evaluator.evaluate(results)
for query_id in scores:
for k in k_values:
ndcg[f"NDCG@{k}"] += scores[query_id]["ndcg_cut_" + str(k)]
_map[f"MAP@{k}"] += scores[query_id]["map_cut_" + str(k)]
recall[f"Recall@{k}"] += scores[query_id]["recall_" + str(k)]
def _normalize(m: dict) -> dict:
return {k: round(v / len(scores), 5) for k, v in m.items()}
ndcg = _normalize(ndcg)
_map = _normalize(_map)
recall = _normalize(recall)
all_metrics = {}
for mt in [ndcg, _map, recall]:
all_metrics.update(mt)
return all_metrics
@torch.no_grad()
def accuracy(output: torch.tensor, target: torch.tensor, topk=(1,)) -> List[float]:
"""Computes the accuracy over the k top predictions for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].contiguous().view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size).item())
return res
@torch.no_grad()
def batch_mrr(output: torch.tensor, target: torch.tensor) -> float:
assert len(output.shape) == 2
assert len(target.shape) == 1
sorted_score, sorted_indices = torch.sort(output, dim=-1, descending=True)
_, rank = torch.nonzero(sorted_indices.eq(target.unsqueeze(-1)).long(), as_tuple=True)
assert rank.shape[0] == output.shape[0]
rank = rank + 1
mrr = torch.sum(100 / rank.float()) / rank.shape[0]
return mrr.item()
def get_rel_threshold(qrels: Dict[str, Dict[str, int]]) -> int:
# For ms-marco passage ranking, score >= 1 is relevant
# for trec dl 2019 & 2020, score >= 2 is relevant
rel_labels = set()
for q_id in qrels:
for doc_id, label in qrels[q_id].items():
rel_labels.add(label)
logger.info('relevance labels: {}'.format(rel_labels))
return 2 if max(rel_labels) >= 3 else 1
def compute_mrr(qrels: Dict[str, Dict[str, int]],
predictions: Dict[str, List[ScoredDoc]],
k: int = 10) -> float:
threshold = get_rel_threshold(qrels)
mrr = 0
for qid in qrels:
scored_docs = predictions.get(qid, [])
for idx, scored_doc in enumerate(scored_docs[:k]):
if scored_doc.pid in qrels[qid] and qrels[qid][scored_doc.pid] >= threshold:
mrr += 1 / (idx + 1)
break
return round(mrr / len(qrels) * 100, 4)
| EXA-1-master | exa/models/unilm-master/simlm/src/metrics.py |
import logging
import torch
from typing import Dict
from transformers.utils.logging import enable_explicit_format
from transformers.trainer_callback import PrinterCallback
from transformers import (
AutoTokenizer,
HfArgumentParser,
EvalPrediction,
Trainer,
set_seed,
PreTrainedTokenizerFast
)
from logger_config import logger, LoggerCallback
from config import Arguments
from trainers.reranker_trainer import RerankerTrainer
from loaders import CrossEncoderDataLoader
from collators import CrossEncoderCollator
from metrics import accuracy
from models import Reranker
def _common_setup(args: Arguments):
if args.process_index > 0:
logger.setLevel(logging.WARNING)
enable_explicit_format()
set_seed(args.seed)
def _compute_metrics(eval_pred: EvalPrediction) -> Dict:
preds = eval_pred.predictions
if isinstance(preds, tuple):
preds = preds[-1]
logits = torch.tensor(preds).float()
labels = torch.tensor(eval_pred.label_ids).long()
acc = accuracy(output=logits, target=labels)[0]
return {'acc': acc}
def main():
parser = HfArgumentParser((Arguments,))
args: Arguments = parser.parse_args_into_dataclasses()[0]
_common_setup(args)
logger.info('Args={}'.format(str(args)))
tokenizer: PreTrainedTokenizerFast = AutoTokenizer.from_pretrained(args.model_name_or_path)
model: Reranker = Reranker.from_pretrained(
all_args=args,
pretrained_model_name_or_path=args.model_name_or_path,
num_labels=1)
logger.info(model)
logger.info('Vocab size: {}'.format(len(tokenizer)))
data_collator = CrossEncoderCollator(
tokenizer=tokenizer,
pad_to_multiple_of=8 if args.fp16 else None)
rerank_data_loader = CrossEncoderDataLoader(args=args, tokenizer=tokenizer)
train_dataset = rerank_data_loader.train_dataset
eval_dataset = rerank_data_loader.eval_dataset
trainer: Trainer = RerankerTrainer(
model=model,
args=args,
train_dataset=train_dataset if args.do_train else None,
eval_dataset=eval_dataset if args.do_eval else None,
data_collator=data_collator,
compute_metrics=_compute_metrics,
tokenizer=tokenizer,
)
trainer.remove_callback(PrinterCallback)
trainer.add_callback(LoggerCallback)
rerank_data_loader.trainer = trainer
if args.do_train:
train_result = trainer.train()
trainer.save_model()
metrics = train_result.metrics
metrics["train_samples"] = len(train_dataset)
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
if args.do_eval:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate(metric_key_prefix="eval")
metrics["eval_samples"] = len(eval_dataset)
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
return
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/simlm/src/train_cross_encoder.py |
import os
import torch
from dataclasses import dataclass, field
from typing import Optional
from transformers import TrainingArguments
from logger_config import logger
@dataclass
class Arguments(TrainingArguments):
model_name_or_path: str = field(
default='bert-base-uncased',
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
data_dir: str = field(
default=None, metadata={"help": "Path to train directory"}
)
task_type: str = field(
default='ir', metadata={"help": "task type: ir / qa"}
)
train_file: Optional[str] = field(
default=None, metadata={"help": "The input training data file (a jsonlines file)."}
)
validation_file: Optional[str] = field(
default=None,
metadata={
"help": "An optional input evaluation data file to evaluate the metrics on (a jsonlines file)."
},
)
train_n_passages: int = field(
default=8,
metadata={"help": "number of passages for each example (including both positive and negative passages)"}
)
share_encoder: bool = field(
default=True,
metadata={"help": "no weight sharing between qry passage encoders"}
)
use_first_positive: bool = field(
default=False,
metadata={"help": "Always use the first positive passage"}
)
use_scaled_loss: bool = field(
default=True,
metadata={"help": "Use scaled loss or not"}
)
loss_scale: float = field(
default=-1.,
metadata={"help": "loss scale, -1 will use world_size"}
)
add_pooler: bool = field(default=False)
out_dimension: int = field(
default=768,
metadata={"help": "output dimension for pooler"}
)
t: float = field(default=0.05, metadata={"help": "temperature of biencoder training"})
l2_normalize: bool = field(default=True, metadata={"help": "L2 normalize embeddings or not"})
t_warmup: bool = field(default=False, metadata={"help": "warmup temperature"})
full_contrastive_loss: bool = field(default=True, metadata={"help": "use full contrastive loss or not"})
# following arguments are used for encoding documents
do_encode: bool = field(default=False, metadata={"help": "run the encoding loop"})
encode_in_path: str = field(default=None, metadata={"help": "Path to data to encode"})
encode_save_dir: str = field(default=None, metadata={"help": "where to save the encode"})
encode_shard_size: int = field(default=int(2 * 10**6))
encode_batch_size: int = field(default=256)
# used for index search
do_search: bool = field(default=False, metadata={"help": "run the index search loop"})
search_split: str = field(default='dev', metadata={"help": "which split to search"})
search_batch_size: int = field(default=128, metadata={"help": "query batch size for index search"})
search_topk: int = field(default=200, metadata={"help": "return topk search results"})
search_out_dir: str = field(default='', metadata={"help": "output directory for writing search results"})
# used for reranking
do_rerank: bool = field(default=False, metadata={"help": "run the reranking loop"})
rerank_max_length: int = field(default=256, metadata={"help": "max length for rerank inputs"})
rerank_in_path: str = field(default='', metadata={"help": "Path to predictions for rerank"})
rerank_out_path: str = field(default='', metadata={"help": "Path to write rerank results"})
rerank_split: str = field(default='dev', metadata={"help": "which split to rerank"})
rerank_batch_size: int = field(default=128, metadata={"help": "rerank batch size"})
rerank_depth: int = field(default=1000, metadata={"help": "rerank depth, useful for debugging purpose"})
rerank_forward_factor: int = field(
default=1,
metadata={"help": "forward n passages, then select top n/factor passages for backward"}
)
rerank_use_rdrop: bool = field(default=False, metadata={"help": "use R-Drop regularization for re-ranker"})
# used for knowledge distillation
do_kd_gen_score: bool = field(default=False, metadata={"help": "run the score generation for distillation"})
kd_gen_score_split: str = field(default='dev', metadata={
"help": "Which split to use for generation of teacher score"
})
kd_gen_score_batch_size: int = field(default=128, metadata={"help": "batch size for teacher score generation"})
kd_gen_score_n_neg: int = field(default=30, metadata={"help": "number of negatives to compute teacher scores"})
do_kd_biencoder: bool = field(default=False, metadata={"help": "knowledge distillation to biencoder"})
kd_mask_hn: bool = field(default=True, metadata={"help": "mask out hard negatives for distillation"})
kd_cont_loss_weight: float = field(default=1.0, metadata={"help": "weight for contrastive loss"})
rlm_generator_model_name: Optional[str] = field(
default='google/electra-base-generator',
metadata={"help": "generator for replace LM pre-training"}
)
rlm_freeze_generator: Optional[bool] = field(
default=True,
metadata={'help': 'freeze generator params or not'}
)
rlm_generator_mlm_weight: Optional[float] = field(
default=0.2,
metadata={'help': 'weight for generator MLM loss'}
)
all_use_mask_token: Optional[bool] = field(
default=False,
metadata={'help': 'Do not use 80:10:10 mask, use [MASK] for all places'}
)
rlm_num_eval_samples: Optional[int] = field(
default=4096,
metadata={"help": "number of evaluation samples pre-training"}
)
rlm_max_length: Optional[int] = field(
default=144,
metadata={"help": "max length for MatchLM pre-training"}
)
rlm_decoder_layers: Optional[int] = field(
default=2,
metadata={"help": "number of transformer layers for MatchLM decoder part"}
)
rlm_encoder_mask_prob: Optional[float] = field(
default=0.3,
metadata={'help': 'mask rate for encoder'}
)
rlm_decoder_mask_prob: Optional[float] = field(
default=0.5,
metadata={'help': 'mask rate for decoder'}
)
q_max_len: int = field(
default=32,
metadata={
"help": "The maximum total input sequence length after tokenization for query."
},
)
p_max_len: int = field(
default=144,
metadata={
"help": "The maximum total input sequence length after tokenization for passage."
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
dry_run: Optional[bool] = field(
default=False,
metadata={'help': 'Set dry_run to True for debugging purpose'}
)
def __post_init__(self):
assert os.path.exists(self.data_dir)
assert torch.cuda.is_available(), 'Only support running on GPUs'
assert self.task_type in ['ir', 'qa']
if self.dry_run:
self.logging_steps = 1
self.max_train_samples = self.max_train_samples or 128
self.num_train_epochs = 1
self.per_device_train_batch_size = min(2, self.per_device_train_batch_size)
self.train_n_passages = min(4, self.train_n_passages)
self.rerank_forward_factor = 1
self.gradient_accumulation_steps = 1
self.rlm_num_eval_samples = min(256, self.rlm_num_eval_samples)
self.max_steps = 30
self.save_steps = self.eval_steps = 30
logger.warning('Dry run: set logging_steps=1')
if self.do_encode:
assert self.encode_save_dir
os.makedirs(self.encode_save_dir, exist_ok=True)
assert os.path.exists(self.encode_in_path)
if self.do_search:
assert os.path.exists(self.encode_save_dir)
assert self.search_out_dir
os.makedirs(self.search_out_dir, exist_ok=True)
if self.do_rerank:
assert os.path.exists(self.rerank_in_path)
logger.info('Rerank result will be written to {}'.format(self.rerank_out_path))
assert self.train_n_passages > 1, 'Having positive passages only does not make sense for training re-ranker'
assert self.train_n_passages % self.rerank_forward_factor == 0
if self.do_kd_gen_score:
assert os.path.exists('{}/{}.jsonl'.format(self.data_dir, self.kd_gen_score_split))
if self.do_kd_biencoder:
if self.use_scaled_loss:
assert not self.kd_mask_hn, 'Use scaled loss only works with not masking out hard negatives'
if torch.cuda.device_count() <= 1:
self.logging_steps = min(10, self.logging_steps)
super(Arguments, self).__post_init__()
if self.output_dir:
os.makedirs(self.output_dir, exist_ok=True)
| EXA-1-master | exa/models/unilm-master/simlm/src/config.py |
import os
import logging
from transformers.trainer_callback import TrainerCallback
def _setup_logger():
log_format = logging.Formatter("[%(asctime)s %(levelname)s] %(message)s")
logger = logging.getLogger()
logger.setLevel(logging.INFO)
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_format)
data_dir = './data/'
os.makedirs(data_dir, exist_ok=True)
file_handler = logging.FileHandler('{}/log.txt'.format(data_dir))
file_handler.setFormatter(log_format)
logger.handlers = [console_handler, file_handler]
return logger
logger = _setup_logger()
class LoggerCallback(TrainerCallback):
def on_log(self, args, state, control, logs=None, **kwargs):
_ = logs.pop("total_flos", None)
if state.is_world_process_zero:
logger.info(logs)
| EXA-1-master | exa/models/unilm-master/simlm/src/logger_config.py |
EXA-1-master | exa/models/unilm-master/simlm/src/__init__.py |
|
import logging
import numpy as np
from typing import Dict
from transformers.utils.logging import enable_explicit_format
from transformers.trainer_callback import PrinterCallback
from transformers import (
AutoTokenizer,
HfArgumentParser,
set_seed,
PreTrainedTokenizerFast,
EvalPrediction,
)
from logger_config import logger, LoggerCallback
from config import Arguments
from loaders import ReplaceLMDataloader
from collators import DataCollatorForReplaceLM
from trainers import ReplaceLMTrainer
from models import ReplaceLM
def _common_setup(args: Arguments):
if args.process_index > 0:
logger.setLevel(logging.WARNING)
enable_explicit_format()
set_seed(args.seed)
def _compute_metrics(eval_pred: EvalPrediction) -> Dict[str, float]:
preds = eval_pred.predictions
avg_enc_mlm_loss = float(np.mean(preds[0]))
avg_dec_mlm_loss = float(np.mean(preds[1]))
avg_g_mlm_loss = float(np.mean(preds[2]))
avg_replace_ratio = float(np.mean(preds[3]))
return {'avg_enc_mlm_loss': round(avg_enc_mlm_loss, 4),
'avg_dec_mlm_loss': round(avg_dec_mlm_loss, 4),
'avg_g_mlm_loss': round(avg_g_mlm_loss, 4),
'avg_replace_ratio': round(avg_replace_ratio, 4)}
def main():
parser = HfArgumentParser((Arguments,))
args: Arguments = parser.parse_args_into_dataclasses()[0]
_common_setup(args)
logger.info('Args={}'.format(str(args)))
tokenizer: PreTrainedTokenizerFast = AutoTokenizer.from_pretrained(args.model_name_or_path)
model: ReplaceLM = ReplaceLM.from_pretrained(
all_args=args, model_name_or_path=args.model_name_or_path)
logger.info(model)
logger.info('Vocab size: {}'.format(len(tokenizer)))
dataloader = ReplaceLMDataloader(args=args, tokenizer=tokenizer)
train_dataset, eval_dataset = dataloader.train_dataset, dataloader.eval_dataset
data_collator = DataCollatorForReplaceLM(
tokenizer,
pad_to_multiple_of=8 if args.fp16 else None,
args=args,
)
trainer: ReplaceLMTrainer = ReplaceLMTrainer(
model=model,
args=args,
train_dataset=train_dataset if args.do_train else None,
eval_dataset=eval_dataset if args.do_eval else None,
data_collator=data_collator,
compute_metrics=_compute_metrics,
tokenizer=tokenizer,
)
trainer.remove_callback(PrinterCallback)
trainer.add_callback(LoggerCallback)
model.trainer = trainer
if args.do_train:
train_result = trainer.train()
trainer.save_model()
metrics = train_result.metrics
metrics["train_samples"] = len(train_dataset)
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
if args.do_eval:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate()
metrics["eval_samples"] = len(eval_dataset)
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
return
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/simlm/src/train_rlm.py |
import os
import random
import tqdm
import json
from typing import Dict, List, Any
from datasets import load_dataset, Dataset
from dataclasses import dataclass, field
from logger_config import logger
from config import Arguments
from utils import save_json_to_file
@dataclass
class ScoredDoc:
qid: str
pid: str
rank: int
score: float = field(default=-1)
def load_qrels(path: str) -> Dict[str, Dict[str, int]]:
assert path.endswith('.txt')
# qid -> pid -> score
qrels = {}
for line in open(path, 'r', encoding='utf-8'):
qid, _, pid, score = line.strip().split('\t')
if qid not in qrels:
qrels[qid] = {}
qrels[qid][pid] = int(score)
logger.info('Load {} queries {} qrels from {}'.format(len(qrels), sum(len(v) for v in qrels.values()), path))
return qrels
def load_queries(path: str, task_type: str = 'ir') -> Dict[str, str]:
assert path.endswith('.tsv')
if task_type == 'qa':
qid_to_query = load_query_answers(path)
qid_to_query = {k: v['query'] for k, v in qid_to_query.items()}
elif task_type == 'ir':
qid_to_query = {}
for line in open(path, 'r', encoding='utf-8'):
qid, query = line.strip().split('\t')
qid_to_query[qid] = query
else:
raise ValueError('Unknown task type: {}'.format(task_type))
logger.info('Load {} queries from {}'.format(len(qid_to_query), path))
return qid_to_query
def normalize_qa_text(text: str) -> str:
# TriviaQA has some weird formats
# For example: """What breakfast food gets its name from the German word for """"stirrup""""?"""
while text.startswith('"') and text.endswith('"'):
text = text[1:-1].replace('""', '"')
return text
def get_question_key(question: str) -> str:
# For QA dataset, we'll use normalized question strings as dict key
return question
def load_query_answers(path: str) -> Dict[str, Dict[str, Any]]:
assert path.endswith('.tsv')
qid_to_query = {}
for line in open(path, 'r', encoding='utf-8'):
query, answers = line.strip().split('\t')
query = normalize_qa_text(query)
answers = normalize_qa_text(answers)
qid = get_question_key(query)
if qid in qid_to_query:
logger.warning('Duplicate question: {} vs {}'.format(query, qid_to_query[qid]['query']))
continue
qid_to_query[qid] = {}
qid_to_query[qid]['query'] = query
qid_to_query[qid]['answers'] = list(eval(answers))
logger.info('Load {} queries from {}'.format(len(qid_to_query), path))
return qid_to_query
def load_corpus(path: str) -> Dataset:
assert path.endswith('.jsonl') or path.endswith('.jsonl.gz')
# two fields: id, contents
corpus = load_dataset('json', data_files=path)['train']
logger.info('Load {} documents from {} with columns {}'.format(len(corpus), path, corpus.column_names))
logger.info('A random document: {}'.format(random.choice(corpus)))
return corpus
def load_msmarco_predictions(path: str) -> Dict[str, List[ScoredDoc]]:
assert path.endswith('.txt')
qid_to_scored_doc = {}
for line in tqdm.tqdm(open(path, 'r', encoding='utf-8'), desc='load prediction', mininterval=3):
fs = line.strip().split('\t')
qid, pid, rank = fs[:3]
rank = int(rank)
score = round(1 / rank, 4) if len(fs) == 3 else float(fs[3])
if qid not in qid_to_scored_doc:
qid_to_scored_doc[qid] = []
scored_doc = ScoredDoc(qid=qid, pid=pid, rank=rank, score=score)
qid_to_scored_doc[qid].append(scored_doc)
qid_to_scored_doc = {qid: sorted(scored_docs, key=lambda sd: sd.rank)
for qid, scored_docs in qid_to_scored_doc.items()}
logger.info('Load {} query predictions from {}'.format(len(qid_to_scored_doc), path))
return qid_to_scored_doc
def save_preds_to_msmarco_format(preds: Dict[str, List[ScoredDoc]], out_path: str):
with open(out_path, 'w', encoding='utf-8') as writer:
for qid in preds:
for idx, scored_doc in enumerate(preds[qid]):
writer.write('{}\t{}\t{}\t{}\n'.format(qid, scored_doc.pid, idx + 1, round(scored_doc.score, 3)))
logger.info('Successfully saved to {}'.format(out_path))
def save_to_readable_format(in_path: str, corpus: Dataset):
out_path = '{}/readable_{}'.format(os.path.dirname(in_path), os.path.basename(in_path))
dataset: Dataset = load_dataset('json', data_files=in_path)['train']
max_to_keep = 5
def _create_readable_field(samples: Dict[str, List]) -> List:
readable_ex = []
for idx in range(min(len(samples['doc_id']), max_to_keep)):
doc_id = samples['doc_id'][idx]
readable_ex.append({'doc_id': doc_id,
'title': corpus[int(doc_id)].get('title', ''),
'contents': corpus[int(doc_id)]['contents'],
'score': samples['score'][idx]})
return readable_ex
def _mp_func(ex: Dict) -> Dict:
ex['positives'] = _create_readable_field(ex['positives'])
ex['negatives'] = _create_readable_field(ex['negatives'])
return ex
dataset = dataset.map(_mp_func, num_proc=8)
dataset.to_json(out_path, force_ascii=False, lines=False, indent=4)
logger.info('Done convert {} to readable format in {}'.format(in_path, out_path))
def get_rerank_shard_path(args: Arguments, worker_idx: int) -> str:
return '{}_shard_{}'.format(args.rerank_out_path, worker_idx)
def merge_rerank_predictions(args: Arguments, gpu_count: int):
from metrics import trec_eval, compute_mrr
qid_to_scored_doc: Dict[str, List[ScoredDoc]] = {}
for worker_idx in range(gpu_count):
path = get_rerank_shard_path(args, worker_idx)
for line in tqdm.tqdm(open(path, 'r', encoding='utf-8'), 'merge results', mininterval=3):
fs = line.strip().split('\t')
qid, pid, _, score = fs
score = float(score)
if qid not in qid_to_scored_doc:
qid_to_scored_doc[qid] = []
scored_doc = ScoredDoc(qid=qid, pid=pid, rank=-1, score=score)
qid_to_scored_doc[qid].append(scored_doc)
qid_to_scored_doc = {k: sorted(v, key=lambda sd: sd.score, reverse=True) for k, v in qid_to_scored_doc.items()}
ori_preds = load_msmarco_predictions(path=args.rerank_in_path)
for query_id in list(qid_to_scored_doc.keys()):
remain_scored_docs = ori_preds[query_id][args.rerank_depth:]
for idx, sd in enumerate(remain_scored_docs):
# make sure the order is not broken
sd.score = qid_to_scored_doc[query_id][-1].score - idx - 1
qid_to_scored_doc[query_id] += remain_scored_docs
assert len(set([sd.pid for sd in qid_to_scored_doc[query_id]])) == len(qid_to_scored_doc[query_id])
save_preds_to_msmarco_format(qid_to_scored_doc, out_path=args.rerank_out_path)
path_qrels = '{}/{}_qrels.txt'.format(args.data_dir, args.rerank_split)
if os.path.exists(path_qrels):
qrels = load_qrels(path=path_qrels)
all_metrics = trec_eval(qrels=qrels, predictions=qid_to_scored_doc)
all_metrics['mrr'] = compute_mrr(qrels=qrels, predictions=qid_to_scored_doc)
logger.info('{} trec metrics = {}'.format(args.rerank_split, json.dumps(all_metrics, ensure_ascii=False, indent=4)))
metrics_out_path = '{}/metrics_rerank_{}.json'.format(os.path.dirname(args.rerank_out_path), args.rerank_split)
save_json_to_file(all_metrics, metrics_out_path)
else:
logger.warning('No qrels found for {}'.format(args.rerank_split))
# cleanup some intermediate results
for worker_idx in range(gpu_count):
path = get_rerank_shard_path(args, worker_idx)
os.remove(path)
if __name__ == '__main__':
load_qrels('./data/msmarco/dev_qrels.txt')
load_queries('./data/msmarco/dev_queries.tsv')
corpus = load_corpus('./data/msmarco/passages.jsonl.gz')
preds = load_msmarco_predictions('./data/bm25.msmarco.txt')
| EXA-1-master | exa/models/unilm-master/simlm/src/data_utils.py |
import json
import torch
import torch.distributed as dist
from typing import List, Union, Optional, Tuple, Mapping, Dict
def save_json_to_file(objects: Union[List, dict], path: str, line_by_line: bool = False):
if line_by_line:
assert isinstance(objects, list), 'Only list can be saved in line by line format'
with open(path, 'w', encoding='utf-8') as writer:
if not line_by_line:
json.dump(objects, writer, ensure_ascii=False, indent=4, separators=(',', ':'))
else:
for obj in objects:
writer.write(json.dumps(obj, ensure_ascii=False, separators=(',', ':')))
writer.write('\n')
def move_to_cuda(sample):
if len(sample) == 0:
return {}
def _move_to_cuda(maybe_tensor):
if torch.is_tensor(maybe_tensor):
return maybe_tensor.cuda(non_blocking=True)
elif isinstance(maybe_tensor, dict):
return {key: _move_to_cuda(value) for key, value in maybe_tensor.items()}
elif isinstance(maybe_tensor, list):
return [_move_to_cuda(x) for x in maybe_tensor]
elif isinstance(maybe_tensor, tuple):
return tuple([_move_to_cuda(x) for x in maybe_tensor])
elif isinstance(maybe_tensor, Mapping):
return type(maybe_tensor)({k: _move_to_cuda(v) for k, v in maybe_tensor.items()})
else:
return maybe_tensor
return _move_to_cuda(sample)
def dist_gather_tensor(t: Optional[torch.Tensor]) -> Optional[torch.Tensor]:
if t is None:
return None
t = t.contiguous()
all_tensors = [torch.empty_like(t) for _ in range(dist.get_world_size())]
dist.all_gather(all_tensors, t)
all_tensors[dist.get_rank()] = t
all_tensors = torch.cat(all_tensors, dim=0)
return all_tensors
@torch.no_grad()
def select_grouped_indices(scores: torch.Tensor,
group_size: int,
start: int = 0) -> torch.Tensor:
assert len(scores.shape) == 2
batch_size = scores.shape[0]
assert batch_size * group_size <= scores.shape[1]
indices = torch.arange(0, group_size, dtype=torch.long)
indices = indices.repeat(batch_size, 1)
indices += torch.arange(0, batch_size, dtype=torch.long).unsqueeze(-1) * group_size
indices += start
return indices.to(scores.device)
def full_contrastive_scores_and_labels(
query: torch.Tensor,
key: torch.Tensor,
use_all_pairs: bool = True) -> Tuple[torch.Tensor, torch.Tensor]:
assert key.shape[0] % query.shape[0] == 0, '{} % {} > 0'.format(key.shape[0], query.shape[0])
train_n_passages = key.shape[0] // query.shape[0]
labels = torch.arange(0, query.shape[0], dtype=torch.long, device=query.device)
labels = labels * train_n_passages
# batch_size x (batch_size x n_psg)
qk = torch.mm(query, key.t())
if not use_all_pairs:
return qk, labels
# batch_size x dim
sliced_key = key.index_select(dim=0, index=labels)
assert query.shape[0] == sliced_key.shape[0]
# batch_size x batch_size
kq = torch.mm(sliced_key, query.t())
kq.fill_diagonal_(float('-inf'))
qq = torch.mm(query, query.t())
qq.fill_diagonal_(float('-inf'))
kk = torch.mm(sliced_key, sliced_key.t())
kk.fill_diagonal_(float('-inf'))
scores = torch.cat([qk, kq, qq, kk], dim=-1)
return scores, labels
def slice_batch_dict(batch_dict: Dict[str, torch.Tensor], prefix: str) -> dict:
return {k[len(prefix):]: v for k, v in batch_dict.items() if k.startswith(prefix)}
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name: str, round_digits: int = 3):
self.name = name
self.round_digits = round_digits
self.reset()
def reset(self):
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
return '{}: {}'.format(self.name, round(self.avg, self.round_digits))
if __name__ == '__main__':
query = torch.randn(4, 16)
key = torch.randn(4 * 3, 16)
scores, labels = full_contrastive_scores_and_labels(query, key)
print(scores.shape)
print(labels)
| EXA-1-master | exa/models/unilm-master/simlm/src/utils.py |
import logging
import torch
from typing import Dict
from functools import partial
from transformers.utils.logging import enable_explicit_format
from transformers.trainer_callback import PrinterCallback
from transformers import (
AutoTokenizer,
HfArgumentParser,
EvalPrediction,
Trainer,
set_seed,
PreTrainedTokenizerFast
)
from logger_config import logger, LoggerCallback
from config import Arguments
from trainers import BiencoderTrainer
from loaders import RetrievalDataLoader
from collators import BiencoderCollator
from metrics import accuracy, batch_mrr
from models import BiencoderModel
def _common_setup(args: Arguments):
if args.process_index > 0:
logger.setLevel(logging.WARNING)
enable_explicit_format()
set_seed(args.seed)
def _compute_metrics(args: Arguments, eval_pred: EvalPrediction) -> Dict[str, float]:
# field consistent with BiencoderOutput
preds = eval_pred.predictions
scores = torch.tensor(preds[-1]).float()
labels = torch.arange(0, scores.shape[0], dtype=torch.long) * args.train_n_passages
labels = labels % scores.shape[1]
topk_metrics = accuracy(output=scores, target=labels, topk=(1, 3))
mrr = batch_mrr(output=scores, target=labels)
return {'mrr': mrr, 'acc1': topk_metrics[0], 'acc3': topk_metrics[1]}
def main():
parser = HfArgumentParser((Arguments,))
args: Arguments = parser.parse_args_into_dataclasses()[0]
_common_setup(args)
logger.info('Args={}'.format(str(args)))
tokenizer: PreTrainedTokenizerFast = AutoTokenizer.from_pretrained(args.model_name_or_path)
model: BiencoderModel = BiencoderModel.build(args=args)
logger.info(model)
logger.info('Vocab size: {}'.format(len(tokenizer)))
data_collator = BiencoderCollator(
tokenizer=tokenizer,
pad_to_multiple_of=8 if args.fp16 else None)
retrieval_data_loader = RetrievalDataLoader(args=args, tokenizer=tokenizer)
train_dataset = retrieval_data_loader.train_dataset
eval_dataset = retrieval_data_loader.eval_dataset
trainer: Trainer = BiencoderTrainer(
model=model,
args=args,
train_dataset=train_dataset if args.do_train else None,
eval_dataset=eval_dataset if args.do_eval else None,
data_collator=data_collator,
compute_metrics=partial(_compute_metrics, args),
tokenizer=tokenizer,
)
trainer.remove_callback(PrinterCallback)
trainer.add_callback(LoggerCallback)
retrieval_data_loader.trainer = trainer
model.trainer = trainer
if args.do_train:
train_result = trainer.train()
trainer.save_model()
metrics = train_result.metrics
metrics["train_samples"] = len(train_dataset)
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
if args.do_eval:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate(metric_key_prefix="eval")
metrics["eval_samples"] = len(eval_dataset)
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
return
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/simlm/src/train_biencoder.py |
import os
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
from dataclasses import dataclass
from typing import Optional, Dict, Tuple
from torch import Tensor
from transformers import (
AutoModel,
PreTrainedModel,
)
from transformers.modeling_outputs import ModelOutput
from config import Arguments
from logger_config import logger
from utils import dist_gather_tensor, select_grouped_indices, full_contrastive_scores_and_labels
@dataclass
class BiencoderOutput(ModelOutput):
q_reps: Optional[Tensor] = None
p_reps: Optional[Tensor] = None
loss: Optional[Tensor] = None
labels: Optional[Tensor] = None
scores: Optional[Tensor] = None
class BiencoderModel(nn.Module):
def __init__(self, args: Arguments,
lm_q: PreTrainedModel,
lm_p: PreTrainedModel):
super().__init__()
self.lm_q = lm_q
self.lm_p = lm_p
self.cross_entropy = nn.CrossEntropyLoss(reduction='mean')
self.kl_loss_fn = torch.nn.KLDivLoss(reduction="batchmean", log_target=True)
self.args = args
self.pooler = nn.Linear(self.lm_q.config.hidden_size, args.out_dimension) if args.add_pooler else nn.Identity()
from trainers import BiencoderTrainer
self.trainer: Optional[BiencoderTrainer] = None
def forward(self, query: Dict[str, Tensor] = None,
passage: Dict[str, Tensor] = None):
assert self.args.process_index >= 0
scores, labels, q_reps, p_reps, all_scores, all_labels = self._compute_scores(query, passage)
start = self.args.process_index * q_reps.shape[0]
group_indices = select_grouped_indices(scores=scores,
group_size=self.args.train_n_passages,
start=start * self.args.train_n_passages)
if not self.args.do_kd_biencoder:
# training biencoder from scratch
if self.args.use_scaled_loss:
loss = self.cross_entropy(all_scores, all_labels)
loss *= self.args.world_size if self.args.loss_scale <= 0 else self.args.loss_scale
else:
loss = self.cross_entropy(scores, labels)
else:
# training biencoder with kd
# batch_size x train_n_passage
group_scores = torch.gather(input=scores, dim=1, index=group_indices)
assert group_scores.shape[1] == self.args.train_n_passages
group_log_scores = torch.log_softmax(group_scores, dim=-1)
kd_log_target = torch.log_softmax(query['kd_labels'], dim=-1)
kd_loss = self.kl_loss_fn(input=group_log_scores, target=kd_log_target)
# (optionally) mask out hard negatives
if self.training and self.args.kd_mask_hn:
scores = torch.scatter(input=scores, dim=1, index=group_indices[:, 1:], value=float('-inf'))
if self.args.use_scaled_loss:
ce_loss = self.cross_entropy(all_scores, all_labels)
ce_loss *= self.args.world_size if self.args.loss_scale <= 0 else self.args.loss_scale
else:
ce_loss = self.cross_entropy(scores, labels)
loss = self.args.kd_cont_loss_weight * ce_loss + kd_loss
total_n_psg = self.args.world_size * q_reps.shape[0] * self.args.train_n_passages
return BiencoderOutput(loss=loss, q_reps=q_reps, p_reps=p_reps,
labels=labels.contiguous(),
scores=scores[:, :total_n_psg].contiguous())
def _compute_scores(self, query: Dict[str, Tensor] = None,
passage: Dict[str, Tensor] = None) -> Tuple:
q_reps = self._encode(self.lm_q, query)
p_reps = self._encode(self.lm_p, passage)
all_q_reps = dist_gather_tensor(q_reps)
all_p_reps = dist_gather_tensor(p_reps)
assert all_p_reps.shape[0] == self.args.world_size * q_reps.shape[0] * self.args.train_n_passages
all_scores, all_labels = full_contrastive_scores_and_labels(
query=all_q_reps, key=all_p_reps,
use_all_pairs=self.args.full_contrastive_loss)
if self.args.l2_normalize:
if self.args.t_warmup:
scale = 1 / self.args.t * min(1.0, self.trainer.state.global_step / self.args.warmup_steps)
scale = max(1.0, scale)
else:
scale = 1 / self.args.t
all_scores = all_scores * scale
start = self.args.process_index * q_reps.shape[0]
local_query_indices = torch.arange(start, start + q_reps.shape[0], dtype=torch.long).to(q_reps.device)
# batch_size x (world_size x batch_size x train_n_passage)
scores = all_scores.index_select(dim=0, index=local_query_indices)
labels = all_labels.index_select(dim=0, index=local_query_indices)
return scores, labels, q_reps, p_reps, all_scores, all_labels
def _encode(self, encoder: PreTrainedModel, input_dict: dict) -> Optional[torch.Tensor]:
if not input_dict:
return None
outputs = encoder(**{k: v for k, v in input_dict.items() if k not in ['kd_labels']}, return_dict=True)
hidden_state = outputs.last_hidden_state
embeds = hidden_state[:, 0]
embeds = self.pooler(embeds)
if self.args.l2_normalize:
embeds = F.normalize(embeds, dim=-1)
return embeds.contiguous()
@classmethod
def build(cls, args: Arguments, **hf_kwargs):
# load local
if os.path.isdir(args.model_name_or_path):
if not args.share_encoder:
_qry_model_path = os.path.join(args.model_name_or_path, 'query_model')
_psg_model_path = os.path.join(args.model_name_or_path, 'passage_model')
if not os.path.exists(_qry_model_path):
_qry_model_path = args.model_name_or_path
_psg_model_path = args.model_name_or_path
logger.info(f'loading query model weight from {_qry_model_path}')
lm_q = AutoModel.from_pretrained(_qry_model_path, **hf_kwargs)
logger.info(f'loading passage model weight from {_psg_model_path}')
lm_p = AutoModel.from_pretrained(_psg_model_path, **hf_kwargs)
else:
logger.info(f'loading shared model weight from {args.model_name_or_path}')
lm_q = AutoModel.from_pretrained(args.model_name_or_path, **hf_kwargs)
lm_p = lm_q
# load pre-trained
else:
lm_q = AutoModel.from_pretrained(args.model_name_or_path, **hf_kwargs)
lm_p = copy.deepcopy(lm_q) if not args.share_encoder else lm_q
model = cls(args=args, lm_q=lm_q, lm_p=lm_p)
return model
def save(self, output_dir: str):
if not self.args.share_encoder:
os.makedirs(os.path.join(output_dir, 'query_model'), exist_ok=True)
os.makedirs(os.path.join(output_dir, 'passage_model'), exist_ok=True)
self.lm_q.save_pretrained(os.path.join(output_dir, 'query_model'))
self.lm_p.save_pretrained(os.path.join(output_dir, 'passage_model'))
else:
self.lm_q.save_pretrained(output_dir)
if self.args.add_pooler:
torch.save(self.pooler.state_dict(), os.path.join(output_dir, 'pooler.pt'))
class BiencoderModelForInference(BiencoderModel):
def __init__(self, args: Arguments,
lm_q: PreTrainedModel,
lm_p: PreTrainedModel):
nn.Module.__init__(self)
self.args = args
self.lm_q = lm_q
self.lm_p = lm_p
self.pooler = nn.Linear(self.lm_q.config.hidden_size, args.out_dimension) if args.add_pooler else nn.Identity()
@torch.no_grad()
def forward(self, query: Dict[str, Tensor] = None,
passage: Dict[str, Tensor] = None):
q_reps = self._encode(self.lm_q, query)
p_reps = self._encode(self.lm_p, passage)
return BiencoderOutput(q_reps=q_reps, p_reps=p_reps)
@classmethod
def build(cls, args: Arguments, **hf_kwargs):
model_name_or_path = args.model_name_or_path
# load local
if os.path.isdir(model_name_or_path):
_qry_model_path = os.path.join(model_name_or_path, 'query_model')
_psg_model_path = os.path.join(model_name_or_path, 'passage_model')
if os.path.exists(_qry_model_path):
logger.info(f'found separate weight for query/passage encoders')
logger.info(f'loading query model weight from {_qry_model_path}')
lm_q = AutoModel.from_pretrained(_qry_model_path, **hf_kwargs)
logger.info(f'loading passage model weight from {_psg_model_path}')
lm_p = AutoModel.from_pretrained(_psg_model_path, **hf_kwargs)
else:
logger.info(f'try loading tied weight')
logger.info(f'loading model weight from {model_name_or_path}')
lm_q = AutoModel.from_pretrained(model_name_or_path, **hf_kwargs)
lm_p = lm_q
else:
logger.info(f'try loading tied weight {model_name_or_path}')
lm_q = AutoModel.from_pretrained(model_name_or_path, **hf_kwargs)
lm_p = lm_q
model = cls(args=args, lm_q=lm_q, lm_p=lm_p)
pooler_path = os.path.join(args.model_name_or_path, 'pooler.pt')
if os.path.exists(pooler_path):
logger.info('loading pooler weights from local files')
state_dict = torch.load(pooler_path, map_location="cpu")
model.pooler.load_state_dict(state_dict)
else:
assert not args.add_pooler
logger.info('No pooler will be loaded')
return model
| EXA-1-master | exa/models/unilm-master/simlm/src/models/biencoder_model.py |
import copy
import os
import torch
import torch.nn as nn
from contextlib import nullcontext
from torch import Tensor
from torch.distributions import Categorical
from typing import Dict, Optional, Tuple
from dataclasses import dataclass
from transformers import AutoModelForMaskedLM, ElectraModel
from transformers.modeling_outputs import MaskedLMOutput, ModelOutput
from transformers.models.bert import BertForMaskedLM
from logger_config import logger
from config import Arguments
from utils import slice_batch_dict
@dataclass
class ReplaceLMOutput(ModelOutput):
loss: Optional[Tensor] = None
encoder_mlm_loss: Optional[Tensor] = None
decoder_mlm_loss: Optional[Tensor] = None
g_mlm_loss: Optional[Tensor] = None
replace_ratio: Optional[Tensor] = None
class ReplaceLM(nn.Module):
def __init__(self, args: Arguments,
bert: BertForMaskedLM):
super(ReplaceLM, self).__init__()
self.encoder = bert
self.decoder = copy.deepcopy(self.encoder.bert.encoder.layer[-args.rlm_decoder_layers:])
self.cross_entropy = nn.CrossEntropyLoss(reduction='mean')
self.generator: ElectraModel = AutoModelForMaskedLM.from_pretrained(args.rlm_generator_model_name)
if args.rlm_freeze_generator:
self.generator.eval()
self.generator.requires_grad_(False)
self.args = args
from trainers.rlm_trainer import ReplaceLMTrainer
self.trainer: Optional[ReplaceLMTrainer] = None
def forward(self, model_input: Dict[str, torch.Tensor]) -> ReplaceLMOutput:
enc_prefix, dec_prefix = 'enc_', 'dec_'
encoder_inputs = slice_batch_dict(model_input, enc_prefix)
decoder_inputs = slice_batch_dict(model_input, dec_prefix)
labels = model_input['labels']
enc_sampled_input_ids, g_mlm_loss = self._replace_tokens(encoder_inputs)
if self.args.rlm_freeze_generator:
g_mlm_loss = torch.tensor(0, dtype=torch.float, device=g_mlm_loss.device)
dec_sampled_input_ids, _ = self._replace_tokens(decoder_inputs, no_grad=True)
encoder_inputs['input_ids'] = enc_sampled_input_ids
decoder_inputs['input_ids'] = dec_sampled_input_ids
# use the un-masked version of labels
encoder_inputs['labels'] = labels
decoder_inputs['labels'] = labels
is_replaced = (encoder_inputs['input_ids'] != labels) & (labels >= 0)
replace_cnt = is_replaced.long().sum().item()
total_cnt = (encoder_inputs['attention_mask'] == 1).long().sum().item()
replace_ratio = torch.tensor(replace_cnt / total_cnt, device=g_mlm_loss.device)
encoder_out: MaskedLMOutput = self.encoder(
**encoder_inputs,
output_hidden_states=True,
return_dict=True)
# batch_size x 1 x hidden_dim
cls_hidden = encoder_out.hidden_states[-1][:, :1]
# batch_size x seq_length x embed_dim
dec_inputs_embeds = self.encoder.bert.embeddings(decoder_inputs['input_ids'])
hiddens = torch.cat([cls_hidden, dec_inputs_embeds[:, 1:]], dim=1)
attention_mask = self.encoder.get_extended_attention_mask(
encoder_inputs['attention_mask'],
encoder_inputs['attention_mask'].shape,
encoder_inputs['attention_mask'].device
)
for layer in self.decoder:
layer_out = layer(hiddens, attention_mask)
hiddens = layer_out[0]
decoder_mlm_loss = self.mlm_loss(hiddens, labels)
loss = decoder_mlm_loss + encoder_out.loss + g_mlm_loss * self.args.rlm_generator_mlm_weight
return ReplaceLMOutput(loss=loss,
encoder_mlm_loss=encoder_out.loss.detach(),
decoder_mlm_loss=decoder_mlm_loss.detach(),
g_mlm_loss=g_mlm_loss.detach(),
replace_ratio=replace_ratio)
def _replace_tokens(self, batch_dict: Dict[str, torch.Tensor],
no_grad: bool = False) -> Tuple:
with torch.no_grad() if self.args.rlm_freeze_generator or no_grad else nullcontext():
outputs: MaskedLMOutput = self.generator(
**batch_dict,
return_dict=True)
with torch.no_grad():
sampled_input_ids = Categorical(logits=outputs.logits).sample()
is_mask = (batch_dict['labels'] >= 0).long()
sampled_input_ids = batch_dict['input_ids'] * (1 - is_mask) + sampled_input_ids * is_mask
return sampled_input_ids.long(), outputs.loss
def mlm_loss(self, hiddens: torch.Tensor, labels: torch.Tensor) -> torch.Tensor:
pred_scores = self.encoder.cls(hiddens)
mlm_loss = self.cross_entropy(
pred_scores.view(-1, self.encoder.config.vocab_size),
labels.view(-1))
return mlm_loss
@classmethod
def from_pretrained(cls, all_args: Arguments,
model_name_or_path: str, *args, **kwargs):
hf_model = AutoModelForMaskedLM.from_pretrained(model_name_or_path, *args, **kwargs)
model = cls(all_args, hf_model)
decoder_save_path = os.path.join(model_name_or_path, 'decoder.pt')
if os.path.exists(decoder_save_path):
logger.info('loading extra weights from local files')
state_dict = torch.load(decoder_save_path, map_location="cpu")
model.decoder.load_state_dict(state_dict)
return model
def save_pretrained(self, output_dir: str):
self.encoder.save_pretrained(output_dir)
torch.save(self.decoder.state_dict(), os.path.join(output_dir, 'decoder.pt'))
| EXA-1-master | exa/models/unilm-master/simlm/src/models/rlm.py |
from .biencoder_model import BiencoderModel, BiencoderModelForInference, BiencoderOutput
from .cross_encoder_model import Reranker, RerankerForInference
from .rlm import ReplaceLM, ReplaceLMOutput
| EXA-1-master | exa/models/unilm-master/simlm/src/models/__init__.py |
import torch
import torch.nn as nn
from typing import Optional, Dict
from transformers import (
PreTrainedModel,
AutoModelForSequenceClassification
)
from transformers.modeling_outputs import SequenceClassifierOutput
from config import Arguments
class Reranker(nn.Module):
def __init__(self, hf_model: PreTrainedModel, args: Arguments):
super().__init__()
self.hf_model = hf_model
self.args = args
self.cross_entropy = nn.CrossEntropyLoss(reduction='mean')
self.kl_loss_fn = torch.nn.KLDivLoss(reduction="batchmean", log_target=True)
def forward(self, batch: Dict[str, torch.Tensor]) -> SequenceClassifierOutput:
input_batch_dict = {k: v for k, v in batch.items() if k != 'labels'}
if self.args.rerank_forward_factor > 1:
assert torch.sum(batch['labels']).long().item() == 0
assert all(len(v.shape) == 2 for v in input_batch_dict.values())
is_train = self.hf_model.training
self.hf_model.eval()
with torch.no_grad():
outputs: SequenceClassifierOutput = self.hf_model(**input_batch_dict, return_dict=True)
outputs.logits = outputs.logits.view(-1, self.args.train_n_passages)
# make sure the target passage is not masked out
outputs.logits[:, 0].fill_(float('inf'))
k = self.args.train_n_passages // self.args.rerank_forward_factor
_, topk_indices = torch.topk(outputs.logits, k=k, dim=-1, largest=True)
topk_indices += self.args.train_n_passages * torch.arange(0, topk_indices.shape[0],
dtype=torch.long,
device=topk_indices.device).unsqueeze(-1)
topk_indices = topk_indices.view(-1)
input_batch_dict = {k: v.index_select(dim=0, index=topk_indices) for k, v in input_batch_dict.items()}
self.hf_model.train(is_train)
n_psg_per_query = self.args.train_n_passages // self.args.rerank_forward_factor
if self.args.rerank_use_rdrop and self.training:
input_batch_dict = {k: torch.cat([v, v], dim=0) for k, v in input_batch_dict.items()}
outputs: SequenceClassifierOutput = self.hf_model(**input_batch_dict, return_dict=True)
if self.args.rerank_use_rdrop and self.training:
logits = outputs.logits.view(2, -1, n_psg_per_query)
outputs.logits = logits[0, :, :].contiguous()
log_prob = torch.log_softmax(logits, dim=2)
log_prob1, log_prob2 = log_prob[0, :, :], log_prob[1, :, :]
rdrop_loss = 0.5 * (self.kl_loss_fn(log_prob1, log_prob2) + self.kl_loss_fn(log_prob2, log_prob1))
ce_loss = 0.5 * (self.cross_entropy(log_prob1, batch['labels'])
+ self.cross_entropy(log_prob2, batch['labels']))
outputs.loss = rdrop_loss + ce_loss
else:
outputs.logits = outputs.logits.view(-1, n_psg_per_query)
loss = self.cross_entropy(outputs.logits, batch['labels'])
outputs.loss = loss
return outputs
@classmethod
def from_pretrained(cls, all_args: Arguments, *args, **kwargs):
hf_model = AutoModelForSequenceClassification.from_pretrained(*args, **kwargs)
return cls(hf_model, all_args)
def save_pretrained(self, output_dir: str):
self.hf_model.save_pretrained(output_dir)
class RerankerForInference(nn.Module):
def __init__(self, hf_model: Optional[PreTrainedModel] = None):
super().__init__()
self.hf_model = hf_model
self.hf_model.eval()
@torch.no_grad()
def forward(self, batch) -> SequenceClassifierOutput:
return self.hf_model(**batch)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path: str):
hf_model = AutoModelForSequenceClassification.from_pretrained(pretrained_model_name_or_path)
return cls(hf_model)
| EXA-1-master | exa/models/unilm-master/simlm/src/models/cross_encoder_model.py |
from .biencoder_trainer import BiencoderTrainer
from .reranker_trainer import RerankerTrainer
from .rlm_trainer import ReplaceLMTrainer
| EXA-1-master | exa/models/unilm-master/simlm/src/trainers/__init__.py |
import os
from typing import Optional
from transformers.trainer import Trainer
from logger_config import logger
from models import ReplaceLM, ReplaceLMOutput
from utils import AverageMeter
class ReplaceLMTrainer(Trainer):
def __init__(self, *pargs, **kwargs):
super(ReplaceLMTrainer, self).__init__(*pargs, **kwargs)
self.model: ReplaceLM
self.enc_mlm_loss = AverageMeter('enc_mlm_loss', round_digits=3)
self.dec_mlm_loss = AverageMeter('dec_mlm_loss', round_digits=3)
self.g_mlm_loss = AverageMeter('g_mlm_loss', round_digits=3)
self.replace_ratio = AverageMeter('replace_ratio', round_digits=3)
self.last_epoch = 0
def _save(self, output_dir: Optional[str] = None, state_dict=None):
output_dir = output_dir if output_dir is not None else self.args.output_dir
os.makedirs(output_dir, exist_ok=True)
logger.info("Saving model checkpoint to {}".format(output_dir))
self.model.save_pretrained(output_dir)
if self.tokenizer is not None:
self.tokenizer.save_pretrained(output_dir)
def compute_loss(self, model, inputs, return_outputs=False):
outputs: ReplaceLMOutput = model(model_input=inputs)
loss = outputs.loss
if self.model.training:
self.enc_mlm_loss.update(outputs.encoder_mlm_loss.item())
self.dec_mlm_loss.update(outputs.decoder_mlm_loss.item())
self.g_mlm_loss.update(outputs.g_mlm_loss.item())
self.replace_ratio.update(outputs.replace_ratio.item())
if self.state.global_step > 0 and self.state.global_step % self.args.logging_steps == 0:
log_info = ', '.join(map(str, [self.enc_mlm_loss, self.dec_mlm_loss, self.g_mlm_loss, self.replace_ratio]))
logger.info('step: {}, {}'.format(self.state.global_step, log_info))
self._reset_meters_if_needed()
return (loss, outputs) if return_outputs else loss
def _reset_meters_if_needed(self):
if int(self.state.epoch) != self.last_epoch:
self.last_epoch = int(self.state.epoch)
self.enc_mlm_loss.reset()
self.dec_mlm_loss.reset()
self.g_mlm_loss.reset()
self.replace_ratio.reset()
| EXA-1-master | exa/models/unilm-master/simlm/src/trainers/rlm_trainer.py |
import os
from typing import Optional, Union
from transformers.trainer import Trainer
from transformers.modeling_outputs import SequenceClassifierOutput
from logger_config import logger
from metrics import accuracy
from utils import AverageMeter
class RerankerTrainer(Trainer):
def __init__(self, *pargs, **kwargs):
super(RerankerTrainer, self).__init__(*pargs, **kwargs)
self.acc_meter = AverageMeter('acc', round_digits=2)
self.last_epoch = 0
def _save(self, output_dir: Optional[str] = None, state_dict=None):
output_dir = output_dir if output_dir is not None else self.args.output_dir
os.makedirs(output_dir, exist_ok=True)
logger.info("Saving model checkpoint to {}".format(output_dir))
self.model.save_pretrained(output_dir)
if self.tokenizer is not None and self.is_world_process_zero():
self.tokenizer.save_pretrained(output_dir)
def compute_loss(self, model, inputs, return_outputs=False):
outputs: SequenceClassifierOutput = model(inputs)
loss = outputs.loss
if self.model.training:
labels = inputs['labels']
step_acc = accuracy(output=outputs.logits.detach(), target=labels)[0]
self.acc_meter.update(step_acc)
if self.state.global_step > 0 and self.state.global_step % self.args.logging_steps == 0:
logger.info('step: {}, {}'.format(self.state.global_step, self.acc_meter))
self._reset_meters_if_needed()
return (loss, outputs) if return_outputs else loss
def _reset_meters_if_needed(self):
if int(self.state.epoch) != self.last_epoch:
self.last_epoch = int(self.state.epoch)
self.acc_meter.reset()
| EXA-1-master | exa/models/unilm-master/simlm/src/trainers/reranker_trainer.py |
import os
import torch
from typing import Optional, Dict, Tuple
from transformers.trainer import Trainer
from logger_config import logger
from metrics import accuracy, batch_mrr
from models import BiencoderOutput, BiencoderModel
from utils import AverageMeter
def _unpack_qp(inputs: Dict[str, torch.Tensor]) -> Tuple:
q_prefix, d_prefix, kd_labels_key = 'q_', 'd_', 'kd_labels'
query_batch_dict = {k[len(q_prefix):]: v for k, v in inputs.items() if k.startswith(q_prefix)}
doc_batch_dict = {k[len(d_prefix):]: v for k, v in inputs.items() if k.startswith(d_prefix)}
if kd_labels_key in inputs:
assert len(query_batch_dict) > 0
query_batch_dict[kd_labels_key] = inputs[kd_labels_key]
if not query_batch_dict:
query_batch_dict = None
if not doc_batch_dict:
doc_batch_dict = None
return query_batch_dict, doc_batch_dict
class BiencoderTrainer(Trainer):
def __init__(self, *pargs, **kwargs):
super(BiencoderTrainer, self).__init__(*pargs, **kwargs)
self.model: BiencoderModel
self.acc1_meter = AverageMeter('Acc@1', round_digits=2)
self.acc3_meter = AverageMeter('Acc@3', round_digits=2)
self.mrr_meter = AverageMeter('mrr', round_digits=2)
self.last_epoch = 0
def _save(self, output_dir: Optional[str] = None, state_dict=None):
output_dir = output_dir if output_dir is not None else self.args.output_dir
os.makedirs(output_dir, exist_ok=True)
logger.info("Saving model checkpoint to {}".format(output_dir))
self.model.save(output_dir)
if self.tokenizer is not None:
self.tokenizer.save_pretrained(output_dir)
def compute_loss(self, model, inputs, return_outputs=False):
query, passage = _unpack_qp(inputs)
outputs: BiencoderOutput = model(query=query, passage=passage)
loss = outputs.loss
if self.model.training:
step_acc1, step_acc3 = accuracy(output=outputs.scores.detach(), target=outputs.labels, topk=(1, 3))
step_mrr = batch_mrr(output=outputs.scores.detach(), target=outputs.labels)
self.acc1_meter.update(step_acc1)
self.acc3_meter.update(step_acc3)
self.mrr_meter.update(step_mrr)
if self.state.global_step > 0 and self.state.global_step % self.args.logging_steps == 0:
log_info = ', '.join(map(str, [self.mrr_meter, self.acc1_meter, self.acc3_meter]))
logger.info('step: {}, {}'.format(self.state.global_step, log_info))
self._reset_meters_if_needed()
return (loss, outputs) if return_outputs else loss
def _reset_meters_if_needed(self):
if int(self.state.epoch) != self.last_epoch:
self.last_epoch = int(self.state.epoch)
self.acc1_meter.reset()
self.acc3_meter.reset()
self.mrr_meter.reset()
| EXA-1-master | exa/models/unilm-master/simlm/src/trainers/biencoder_trainer.py |
import json
import os
import glob
import tqdm
import torch
from contextlib import nullcontext
from torch.utils.data import DataLoader
from functools import partial
from collections import defaultdict
from datasets import Dataset
from typing import Dict, List, Tuple
from transformers.file_utils import PaddingStrategy
from transformers import (
AutoTokenizer,
PreTrainedTokenizerFast,
DataCollatorWithPadding,
HfArgumentParser,
BatchEncoding
)
from config import Arguments
from logger_config import logger
from utils import move_to_cuda, save_json_to_file
from metrics import compute_mrr, trec_eval, ScoredDoc
from data_utils import load_queries, load_qrels, load_msmarco_predictions, save_preds_to_msmarco_format
from models import BiencoderModelForInference, BiencoderOutput
parser = HfArgumentParser((Arguments,))
args: Arguments = parser.parse_args_into_dataclasses()[0]
assert os.path.exists(args.encode_save_dir)
def _get_all_shards_path() -> List[str]:
path_list = glob.glob('{}/shard_*_*'.format(args.encode_save_dir))
assert len(path_list) > 0
def _parse_worker_idx_shard_idx(p: str) -> Tuple:
worker_idx, shard_idx = [int(f) for f in os.path.basename(p).split('_')[-2:]]
return worker_idx, shard_idx
path_list = sorted(path_list, key=lambda path: _parse_worker_idx_shard_idx(path))
logger.info('Embeddings path list: {}'.format(path_list))
return path_list
def _get_topk_result_save_path(worker_idx: int) -> str:
return '{}/top{}_{}_{}.txt'.format(args.search_out_dir, args.search_topk, args.search_split, worker_idx)
def _query_transform_func(tokenizer: PreTrainedTokenizerFast,
examples: Dict[str, List]) -> BatchEncoding:
batch_dict = tokenizer(examples['query'],
max_length=args.q_max_len,
padding=PaddingStrategy.DO_NOT_PAD,
truncation=True)
return batch_dict
@torch.no_grad()
def _worker_encode_queries(gpu_idx: int) -> Tuple:
# fail fast if shard does not exist
_get_all_shards_path()
query_id_to_text = load_queries(path=os.path.join(args.data_dir, '{}_queries.tsv'.format(args.search_split)),
task_type=args.task_type)
query_ids = sorted(list(query_id_to_text.keys()))
queries = [query_id_to_text[query_id] for query_id in query_ids]
dataset = Dataset.from_dict({'query_id': query_ids,
'query': queries})
dataset = dataset.shard(num_shards=torch.cuda.device_count(),
index=gpu_idx,
contiguous=True)
# only keep data for current shard
query_ids = dataset['query_id']
query_id_to_text = {qid: query_id_to_text[qid] for qid in query_ids}
logger.info('GPU {} needs to process {} examples'.format(gpu_idx, len(dataset)))
torch.cuda.set_device(gpu_idx)
tokenizer: PreTrainedTokenizerFast = AutoTokenizer.from_pretrained(args.model_name_or_path)
model: BiencoderModelForInference = BiencoderModelForInference.build(args)
model.eval()
model.cuda()
dataset.set_transform(partial(_query_transform_func, tokenizer))
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8)
data_loader = DataLoader(
dataset,
batch_size=512,
shuffle=False,
drop_last=False,
num_workers=args.dataloader_num_workers,
collate_fn=data_collator,
pin_memory=True)
encoded_embeds = []
for batch_dict in tqdm.tqdm(data_loader, desc='query encoding', mininterval=5):
batch_dict = move_to_cuda(batch_dict)
with torch.cuda.amp.autocast() if args.fp16 else nullcontext():
outputs: BiencoderOutput = model(query=batch_dict, passage=None)
encoded_embeds.append(outputs.q_reps)
query_embeds = torch.cat(encoded_embeds, dim=0)
logger.info('Done query encoding for worker {}'.format(gpu_idx))
return query_embeds, query_ids, query_id_to_text
@torch.no_grad()
def _worker_batch_search(gpu_idx: int):
embeds_path_list = _get_all_shards_path()
query_embeds, query_ids, query_id_to_text = _worker_encode_queries(gpu_idx)
assert query_embeds.shape[0] == len(query_ids), '{} != {}'.format(query_embeds.shape[0], len(query_ids))
query_id_to_topk = defaultdict(list)
psg_idx_offset = 0
for shard_idx, shard_path in enumerate(embeds_path_list):
shard_psg_embed = torch.load(shard_path, map_location=lambda storage, loc: storage).to(query_embeds.device)
logger.info('Load {} passage embeddings from {}'.format(shard_psg_embed.shape[0], shard_path))
for start in tqdm.tqdm(range(0, len(query_ids), args.search_batch_size),
desc="search shard {}".format(shard_idx),
mininterval=5):
batch_query_embed = query_embeds[start:(start + args.search_batch_size)]
batch_query_ids = query_ids[start:(start + args.search_batch_size)]
batch_score = torch.mm(batch_query_embed, shard_psg_embed.t())
batch_sorted_score, batch_sorted_indices = torch.topk(batch_score, k=args.search_topk, dim=-1, largest=True)
for batch_idx, query_id in enumerate(batch_query_ids):
cur_scores = batch_sorted_score[batch_idx].cpu().tolist()
cur_indices = [idx + psg_idx_offset for idx in batch_sorted_indices[batch_idx].cpu().tolist()]
query_id_to_topk[query_id] += list(zip(cur_scores, cur_indices))
query_id_to_topk[query_id] = sorted(query_id_to_topk[query_id], key=lambda t: (-t[0], t[1]))
query_id_to_topk[query_id] = query_id_to_topk[query_id][:args.search_topk]
psg_idx_offset += shard_psg_embed.shape[0]
out_path = _get_topk_result_save_path(worker_idx=gpu_idx)
with open(out_path, 'w', encoding='utf-8') as writer:
for query_id in query_id_to_text:
for rank, (score, doc_id) in enumerate(query_id_to_topk[query_id]):
writer.write('{}\t{}\t{}\t{}\n'.format(query_id, doc_id, rank + 1, round(score, 4)))
logger.info('Write scores to {} done'.format(out_path))
def _compute_and_save_metrics(worker_cnt: int):
preds: Dict[str, List[ScoredDoc]] = {}
for worker_idx in range(worker_cnt):
path = _get_topk_result_save_path(worker_idx)
preds.update(load_msmarco_predictions(path))
out_path = os.path.join(args.search_out_dir, '{}.msmarco.txt'.format(args.search_split))
save_preds_to_msmarco_format(preds, out_path)
logger.info('Merge done: save {} predictions to {}'.format(len(preds), out_path))
path_qrels = os.path.join(args.data_dir, '{}_qrels.txt'.format(args.search_split))
if os.path.exists(path_qrels):
qrels = load_qrels(path=path_qrels)
all_metrics = trec_eval(qrels=qrels, predictions=preds)
all_metrics['mrr'] = compute_mrr(qrels=qrels, predictions=preds)
logger.info('{} trec metrics = {}'.format(args.search_split, json.dumps(all_metrics, ensure_ascii=False, indent=4)))
save_json_to_file(all_metrics, os.path.join(args.search_out_dir, 'metrics_{}.json'.format(args.search_split)))
else:
logger.warning('No qrels found for {}'.format(args.search_split))
# do some cleanup
for worker_idx in range(worker_cnt):
path = _get_topk_result_save_path(worker_idx)
os.remove(path)
def _batch_search_queries():
logger.info('Args={}'.format(str(args)))
gpu_count = torch.cuda.device_count()
if gpu_count == 0:
logger.error('No gpu available')
return
logger.info('Use {} gpus'.format(gpu_count))
torch.multiprocessing.spawn(_worker_batch_search, args=(), nprocs=gpu_count)
logger.info('Done batch search queries')
_compute_and_save_metrics(gpu_count)
if __name__ == '__main__':
_batch_search_queries()
| EXA-1-master | exa/models/unilm-master/simlm/src/inference/search_main.py |
import os
import tqdm
import torch
from contextlib import nullcontext
from torch.utils.data import DataLoader
from functools import partial
from datasets import Dataset
from typing import Dict, List
from transformers.file_utils import PaddingStrategy
from transformers.modeling_outputs import SequenceClassifierOutput
from transformers import (
AutoTokenizer,
PreTrainedTokenizerFast,
DataCollatorWithPadding,
HfArgumentParser,
BatchEncoding
)
from config import Arguments
from logger_config import logger
from utils import move_to_cuda
from models import RerankerForInference
from data_utils import load_msmarco_predictions, load_corpus, load_queries, \
merge_rerank_predictions, get_rerank_shard_path
parser = HfArgumentParser((Arguments,))
args: Arguments = parser.parse_args_into_dataclasses()[0]
def _rerank_transform_func(tokenizer: PreTrainedTokenizerFast,
corpus: Dataset,
queries: Dict[str, str],
examples: Dict[str, List]) -> BatchEncoding:
input_docs: List[str] = []
# ATTENTION: this code should be consistent with RerankDataLoader
for doc_id in examples['doc_id']:
doc_id = int(doc_id)
prefix = ''
if corpus[doc_id].get('title', ''):
prefix = corpus[doc_id]['title'] + ': '
input_docs.append(prefix + corpus[doc_id]['contents'])
input_queries = [queries[query_id] for query_id in examples['query_id']]
batch_dict = tokenizer(input_queries,
text_pair=input_docs,
max_length=args.rerank_max_length,
padding=PaddingStrategy.DO_NOT_PAD,
truncation=True)
return batch_dict
@torch.no_grad()
def _worker_compute_reranker_score(gpu_idx: int):
preds = load_msmarco_predictions(args.rerank_in_path)
query_ids = sorted(list(preds.keys()))
qid_pid = []
for query_id in tqdm.tqdm(query_ids, desc='load qid-pid', mininterval=2):
qid_pid += [(scored_doc.qid, scored_doc.pid) for scored_doc in preds[query_id]
if scored_doc.rank <= args.rerank_depth]
dataset = Dataset.from_dict({'query_id': [t[0] for t in qid_pid],
'doc_id': [t[1] for t in qid_pid]})
dataset = dataset.shard(num_shards=torch.cuda.device_count(),
index=gpu_idx,
contiguous=True)
logger.info('GPU {} needs to process {} examples'.format(gpu_idx, len(dataset)))
torch.cuda.set_device(gpu_idx)
query_ids, doc_ids = dataset['query_id'], dataset['doc_id']
assert len(dataset) == len(query_ids)
tokenizer: PreTrainedTokenizerFast = AutoTokenizer.from_pretrained(args.model_name_or_path)
model: RerankerForInference = RerankerForInference.from_pretrained(args.model_name_or_path)
model.eval()
model.cuda()
corpus: Dataset = load_corpus(path=os.path.join(args.data_dir, 'passages.jsonl.gz'))
queries = load_queries(path='{}/{}_queries.tsv'.format(args.data_dir, args.rerank_split),
task_type=args.task_type)
dataset.set_transform(partial(_rerank_transform_func, tokenizer, corpus, queries))
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8 if args.fp16 else None)
data_loader = DataLoader(
dataset,
batch_size=args.rerank_batch_size,
shuffle=False,
drop_last=False,
num_workers=args.dataloader_num_workers,
collate_fn=data_collator,
pin_memory=True)
scores = []
for batch_dict in tqdm.tqdm(data_loader, desc='passage rerank', mininterval=5):
batch_dict = move_to_cuda(batch_dict)
with torch.cuda.amp.autocast() if args.fp16 else nullcontext():
outputs: SequenceClassifierOutput = model(batch_dict)
scores.append(outputs.logits.squeeze(dim=-1).cpu())
assert len(scores[-1].shape) == 1
all_scores = torch.cat(scores, dim=-1)
assert all_scores.shape[0] == len(query_ids), '{} != {}'.format(all_scores.shape[0], len(query_ids))
all_scores = all_scores.tolist()
with open(get_rerank_shard_path(args, gpu_idx), 'w', encoding='utf-8') as writer:
for idx in range(len(query_ids)):
# dummy rank, since a query may be split across different workers
writer.write('{}\t{}\t{}\t{}\n'.format(query_ids[idx], doc_ids[idx], -1, round(all_scores[idx], 5)))
logger.info('Done computing rerank score for worker {}'.format(gpu_idx))
def _batch_compute_reranker_score():
logger.info('Args={}'.format(str(args)))
gpu_count = torch.cuda.device_count()
if gpu_count == 0:
logger.error('No gpu available')
return
logger.info('Use {} gpus'.format(gpu_count))
torch.multiprocessing.spawn(_worker_compute_reranker_score, args=(), nprocs=gpu_count)
logger.info('Done batch compute rerank score')
merge_rerank_predictions(args, gpu_count)
logger.info('Done merge results')
if __name__ == '__main__':
_batch_compute_reranker_score()
| EXA-1-master | exa/models/unilm-master/simlm/src/inference/rerank_main.py |
import os
import tqdm
import torch
from contextlib import nullcontext
from torch.utils.data import DataLoader
from functools import partial
from datasets import Dataset, load_dataset
from typing import Dict, List
from transformers.file_utils import PaddingStrategy
from transformers.modeling_outputs import SequenceClassifierOutput
from transformers import (
AutoTokenizer,
PreTrainedTokenizerFast,
DataCollatorWithPadding,
HfArgumentParser,
BatchEncoding
)
from config import Arguments
from logger_config import logger
from utils import move_to_cuda
from models import RerankerForInference
from data_utils import load_corpus, load_queries, save_to_readable_format
parser = HfArgumentParser((Arguments,))
args: Arguments = parser.parse_args_into_dataclasses()[0]
kd_gen_score_in_path = os.path.join(args.data_dir, '{}.jsonl'.format(args.kd_gen_score_split))
kd_gen_score_out_path = os.path.join(args.data_dir, 'kd_{}.jsonl'.format(args.kd_gen_score_split))
def _kd_gen_score_transform_func(tokenizer: PreTrainedTokenizerFast,
corpus: Dataset,
queries: Dict[str, str],
examples: Dict[str, List]) -> BatchEncoding:
input_docs: List[str] = []
# ATTENTION: this code should be consistent with CrossEncoderDataLoader
for doc_id in examples['doc_id']:
doc_id = int(doc_id)
prefix = ''
if corpus[doc_id].get('title', ''):
prefix = corpus[doc_id]['title'] + ': '
input_docs.append(prefix + corpus[doc_id]['contents'])
input_queries = [queries[query_id] for query_id in examples['query_id']]
batch_dict = tokenizer(input_queries,
text_pair=input_docs,
max_length=args.rerank_max_length,
padding=PaddingStrategy.DO_NOT_PAD,
truncation=True)
return batch_dict
def _get_shard_path(worker_idx: int) -> str:
return '{}_shard_{}'.format(kd_gen_score_in_path, worker_idx)
@torch.no_grad()
def _worker_gen_teacher_score(gpu_idx: int):
dataset = load_dataset('json', data_files=kd_gen_score_in_path)['train']
if args.dry_run:
dataset = dataset.select(range(100))
dataset = dataset.shard(num_shards=torch.cuda.device_count(),
index=gpu_idx,
contiguous=True)
qid_pids = []
for ex in tqdm.tqdm(dataset, desc='get qid-pid pairs', mininterval=3):
for pos_doc_id in ex['positives']['doc_id']:
qid_pids.append((ex['query_id'], pos_doc_id))
for neg_doc_id in ex['negatives']['doc_id'][:args.kd_gen_score_n_neg]:
qid_pids.append((ex['query_id'], neg_doc_id))
dataset = Dataset.from_dict({'query_id': [t[0] for t in qid_pids],
'doc_id': [t[1] for t in qid_pids]})
query_ids, doc_ids = dataset['query_id'], dataset['doc_id']
logger.info('GPU {} needs to process {} examples'.format(gpu_idx, len(dataset)))
torch.cuda.set_device(gpu_idx)
tokenizer: PreTrainedTokenizerFast = AutoTokenizer.from_pretrained(args.model_name_or_path)
model: RerankerForInference = RerankerForInference.from_pretrained(args.model_name_or_path)
model.eval()
model.cuda()
corpus: Dataset = load_corpus(path=os.path.join(args.data_dir, 'passages.jsonl.gz'))
queries = load_queries(path='{}/{}_queries.tsv'.format(args.data_dir, args.kd_gen_score_split),
task_type=args.task_type)
dataset.set_transform(partial(_kd_gen_score_transform_func, tokenizer, corpus, queries))
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8 if args.fp16 else None)
data_loader = DataLoader(
dataset,
batch_size=args.kd_gen_score_batch_size,
shuffle=False,
drop_last=False,
num_workers=args.dataloader_num_workers,
collate_fn=data_collator,
pin_memory=True)
scores = []
for batch_dict in tqdm.tqdm(data_loader, desc='generate teacher score', mininterval=5):
batch_dict = move_to_cuda(batch_dict)
with torch.cuda.amp.autocast() if args.fp16 else nullcontext():
outputs: SequenceClassifierOutput = model(batch_dict)
scores.append(outputs.logits.squeeze(dim=-1).cpu())
assert len(scores[-1].shape) == 1
all_scores = torch.cat(scores, dim=-1)
assert all_scores.shape[0] == len(dataset), '{} != {}'
all_scores = all_scores.tolist()
with open(_get_shard_path(gpu_idx), 'w', encoding='utf-8') as writer:
for idx in range(len(query_ids)):
writer.write('{}\t{}\t{}\n'.format(query_ids[idx], doc_ids[idx], round(all_scores[idx], 5)))
logger.info('Done computing teacher score for worker {}'.format(gpu_idx))
def _merge_teacher_scores(worker_cnt: int):
qid_to_pid_to_score = {}
for worker_idx in range(worker_cnt):
shard_path = _get_shard_path(worker_idx)
for line in tqdm.tqdm(open(shard_path, 'r', encoding='utf-8'),
desc='Load shard {} score'.format(worker_idx), mininterval=3):
fs = line.strip().split('\t')
assert len(fs) == 3
qid, pid, score = fs
if qid not in qid_to_pid_to_score:
qid_to_pid_to_score[qid] = {}
qid_to_pid_to_score[qid][pid] = float(score)
os.remove(shard_path)
dataset = load_dataset('json', data_files=kd_gen_score_in_path)['train']
if args.dry_run:
dataset = dataset.select(range(100))
def _update_score(ex: Dict) -> Dict:
query_id = ex['query_id']
pid_to_score = qid_to_pid_to_score[query_id]
ex['negatives']['doc_id'] = [neg_doc_id for neg_doc_id in ex['negatives']['doc_id'] if neg_doc_id in pid_to_score]
ex['positives']['score'] = [pid_to_score[pos_doc_id] for pos_doc_id in ex['positives']['doc_id']]
ex['negatives']['score'] = [pid_to_score[neg_doc_id] for neg_doc_id in ex['negatives']['doc_id']]
return ex
dataset = dataset.map(_update_score, num_proc=4)
logger.info('Writing teacher score to {}'.format(kd_gen_score_out_path))
dataset.to_json(kd_gen_score_out_path, force_ascii=False, lines=True)
def _batch_compute_teacher_score():
logger.info('Args={}'.format(str(args)))
gpu_count = torch.cuda.device_count()
if gpu_count == 0:
logger.error('No gpu available')
return
logger.info('Use {} gpus'.format(gpu_count))
torch.multiprocessing.spawn(_worker_gen_teacher_score, args=(), nprocs=gpu_count)
logger.info('Done batch generate teacher score')
_merge_teacher_scores(gpu_count)
logger.info('Done merge results')
corpus = load_corpus(path=os.path.join(args.data_dir, 'passages.jsonl.gz'))
save_to_readable_format(in_path=kd_gen_score_out_path, corpus=corpus)
if __name__ == '__main__':
_batch_compute_teacher_score()
| EXA-1-master | exa/models/unilm-master/simlm/src/inference/gen_teacher_scores.py |
EXA-1-master | exa/models/unilm-master/simlm/src/inference/__init__.py |
|
import os
import tqdm
import torch
from contextlib import nullcontext
from torch.utils.data import DataLoader
from functools import partial
from datasets import load_dataset
from typing import Dict, List
from transformers.file_utils import PaddingStrategy
from transformers import (
AutoTokenizer,
PreTrainedTokenizerFast,
DataCollatorWithPadding,
HfArgumentParser,
BatchEncoding
)
from config import Arguments
from logger_config import logger
from utils import move_to_cuda
from models import BiencoderModelForInference, BiencoderOutput
parser = HfArgumentParser((Arguments,))
args: Arguments = parser.parse_args_into_dataclasses()[0]
def _psg_transform_func(tokenizer: PreTrainedTokenizerFast,
examples: Dict[str, List]) -> BatchEncoding:
batch_dict = tokenizer(examples['title'],
text_pair=examples['contents'],
max_length=args.p_max_len,
padding=PaddingStrategy.DO_NOT_PAD,
truncation=True)
# for co-Condenser reproduction purpose only
if args.model_name_or_path.startswith('Luyu/'):
del batch_dict['token_type_ids']
return batch_dict
@torch.no_grad()
def _worker_encode_passages(gpu_idx: int):
def _get_out_path(shard_idx: int = 0) -> str:
return '{}/shard_{}_{}'.format(args.encode_save_dir, gpu_idx, shard_idx)
if os.path.exists(_get_out_path(0)):
logger.error('{} already exists, will skip encoding'.format(_get_out_path(0)))
return
dataset = load_dataset('json', data_files=args.encode_in_path)['train']
if args.dry_run:
dataset = dataset.select(range(4096))
dataset = dataset.shard(num_shards=torch.cuda.device_count(),
index=gpu_idx,
contiguous=True)
logger.info('GPU {} needs to process {} examples'.format(gpu_idx, len(dataset)))
torch.cuda.set_device(gpu_idx)
tokenizer: PreTrainedTokenizerFast = AutoTokenizer.from_pretrained(args.model_name_or_path)
model: BiencoderModelForInference = BiencoderModelForInference.build(args)
model.eval()
model.cuda()
dataset.set_transform(partial(_psg_transform_func, tokenizer))
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8 if args.fp16 else None)
data_loader = DataLoader(
dataset,
batch_size=args.encode_batch_size,
shuffle=False,
drop_last=False,
num_workers=args.dataloader_num_workers,
collate_fn=data_collator,
pin_memory=True)
num_encoded_docs, encoded_embeds, cur_shard_idx = 0, [], 0
for batch_dict in tqdm.tqdm(data_loader, desc='passage encoding', mininterval=8):
batch_dict = move_to_cuda(batch_dict)
with torch.cuda.amp.autocast() if args.fp16 else nullcontext():
outputs: BiencoderOutput = model(query=None, passage=batch_dict)
encoded_embeds.append(outputs.p_reps.cpu())
num_encoded_docs += outputs.p_reps.shape[0]
if num_encoded_docs >= args.encode_shard_size:
out_path = _get_out_path(cur_shard_idx)
concat_embeds = torch.cat(encoded_embeds, dim=0)
logger.info('GPU {} save {} embeds to {}'.format(gpu_idx, concat_embeds.shape[0], out_path))
torch.save(concat_embeds, out_path)
cur_shard_idx += 1
num_encoded_docs = 0
encoded_embeds.clear()
if num_encoded_docs > 0:
out_path = _get_out_path(cur_shard_idx)
concat_embeds = torch.cat(encoded_embeds, dim=0)
logger.info('GPU {} save {} embeds to {}'.format(gpu_idx, concat_embeds.shape[0], out_path))
torch.save(concat_embeds, out_path)
logger.info('Done computing score for worker {}'.format(gpu_idx))
def _batch_encode_passages():
logger.info('Args={}'.format(str(args)))
gpu_count = torch.cuda.device_count()
if gpu_count == 0:
logger.error('No gpu available')
return
logger.info('Use {} gpus'.format(gpu_count))
torch.multiprocessing.spawn(_worker_encode_passages, args=(), nprocs=gpu_count)
logger.info('Done batch encode passages')
if __name__ == '__main__':
_batch_encode_passages()
| EXA-1-master | exa/models/unilm-master/simlm/src/inference/encode_main.py |
from typing import List, Dict
def _slice_with_mod(elements: List, offset: int, cnt: int) -> List:
return [elements[(offset + idx) % len(elements)] for idx in range(cnt)]
def group_doc_ids(examples: Dict[str, List],
negative_size: int,
offset: int,
use_first_positive: bool = False) -> List[int]:
pos_doc_ids: List[int] = []
positives: List[Dict[str, List]] = examples['positives']
for idx, ex_pos in enumerate(positives):
all_pos_doc_ids = ex_pos['doc_id']
if use_first_positive:
# keep positives that has higher score than all negatives
all_pos_doc_ids = [doc_id for p_idx, doc_id in enumerate(all_pos_doc_ids)
if p_idx == 0 or ex_pos['score'][p_idx] >= ex_pos['score'][0]
or ex_pos['score'][p_idx] > max(examples['negatives'][idx]['score'])]
cur_pos_doc_id = _slice_with_mod(all_pos_doc_ids, offset=offset, cnt=1)[0]
pos_doc_ids.append(int(cur_pos_doc_id))
neg_doc_ids: List[List[int]] = []
negatives: List[Dict[str, List]] = examples['negatives']
for ex_neg in negatives:
cur_neg_doc_ids = _slice_with_mod(ex_neg['doc_id'],
offset=offset * negative_size,
cnt=negative_size)
cur_neg_doc_ids = [int(doc_id) for doc_id in cur_neg_doc_ids]
neg_doc_ids.append(cur_neg_doc_ids)
assert len(pos_doc_ids) == len(neg_doc_ids), '{} != {}'.format(len(pos_doc_ids), len(neg_doc_ids))
assert all(len(doc_ids) == negative_size for doc_ids in neg_doc_ids)
input_doc_ids: List[int] = []
for pos_doc_id, neg_ids in zip(pos_doc_ids, neg_doc_ids):
input_doc_ids.append(pos_doc_id)
input_doc_ids += neg_ids
return input_doc_ids
| EXA-1-master | exa/models/unilm-master/simlm/src/loaders/loader_utils.py |
import os
import random
from typing import Tuple, Dict, List, Optional
from datasets import load_dataset, DatasetDict, Dataset
from transformers.file_utils import PaddingStrategy
from transformers import PreTrainedTokenizerFast, Trainer
from config import Arguments
from logger_config import logger
from .loader_utils import group_doc_ids
class RetrievalDataLoader:
def __init__(self, args: Arguments, tokenizer: PreTrainedTokenizerFast):
self.args = args
self.negative_size = args.train_n_passages - 1
assert self.negative_size > 0
self.tokenizer = tokenizer
corpus_path = os.path.join(args.data_dir, 'passages.jsonl.gz')
self.corpus: Dataset = load_dataset('json', data_files=corpus_path)['train']
self.train_dataset, self.eval_dataset = self._get_transformed_datasets()
# use its state to decide which positives/negatives to sample
self.trainer: Optional[Trainer] = None
def _transform_func(self, examples: Dict[str, List]) -> Dict[str, List]:
current_epoch = int(self.trainer.state.epoch or 0)
input_doc_ids: List[int] = group_doc_ids(
examples=examples,
negative_size=self.negative_size,
offset=current_epoch + self.args.seed,
use_first_positive=self.args.use_first_positive
)
assert len(input_doc_ids) == len(examples['query']) * self.args.train_n_passages
input_docs: List[str] = [self.corpus[doc_id]['contents'] for doc_id in input_doc_ids]
input_titles: List[str] = [self.corpus[doc_id]['title'] for doc_id in input_doc_ids]
query_batch_dict = self.tokenizer(examples['query'],
max_length=self.args.q_max_len,
padding=PaddingStrategy.DO_NOT_PAD,
truncation=True)
doc_batch_dict = self.tokenizer(input_titles,
text_pair=input_docs,
max_length=self.args.p_max_len,
padding=PaddingStrategy.DO_NOT_PAD,
truncation=True)
merged_dict = {'q_{}'.format(k): v for k, v in query_batch_dict.items()}
step_size = self.args.train_n_passages
for k, v in doc_batch_dict.items():
k = 'd_{}'.format(k)
merged_dict[k] = []
for idx in range(0, len(v), step_size):
merged_dict[k].append(v[idx:(idx + step_size)])
if self.args.do_kd_biencoder:
qid_to_doc_id_to_score = {}
def _update_qid_pid_score(q_id: str, ex: Dict):
assert len(ex['doc_id']) == len(ex['score'])
if q_id not in qid_to_doc_id_to_score:
qid_to_doc_id_to_score[q_id] = {}
for doc_id, score in zip(ex['doc_id'], ex['score']):
qid_to_doc_id_to_score[q_id][int(doc_id)] = score
for idx, query_id in enumerate(examples['query_id']):
_update_qid_pid_score(query_id, examples['positives'][idx])
_update_qid_pid_score(query_id, examples['negatives'][idx])
merged_dict['kd_labels'] = []
for idx in range(0, len(input_doc_ids), step_size):
qid = examples['query_id'][idx // step_size]
cur_kd_labels = [qid_to_doc_id_to_score[qid][doc_id] for doc_id in input_doc_ids[idx:idx + step_size]]
merged_dict['kd_labels'].append(cur_kd_labels)
assert len(merged_dict['kd_labels']) == len(examples['query_id']), \
'{} != {}'.format(len(merged_dict['kd_labels']), len(examples['query_id']))
# Custom formatting function must return a dict
return merged_dict
def _get_transformed_datasets(self) -> Tuple:
data_files = {}
if self.args.train_file is not None:
data_files["train"] = self.args.train_file.split(',')
if self.args.validation_file is not None:
data_files["validation"] = self.args.validation_file
raw_datasets: DatasetDict = load_dataset('json', data_files=data_files)
train_dataset, eval_dataset = None, None
if self.args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = raw_datasets["train"]
if self.args.max_train_samples is not None:
train_dataset = train_dataset.select(range(self.args.max_train_samples))
# Log a few random samples from the training set:
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
train_dataset.set_transform(self._transform_func)
if self.args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = raw_datasets["validation"]
eval_dataset.set_transform(self._transform_func)
return train_dataset, eval_dataset
| EXA-1-master | exa/models/unilm-master/simlm/src/loaders/biencoder_dataloader.py |
import random
from typing import Tuple
from transformers import PreTrainedTokenizerFast
from datasets import Dataset, load_dataset
from config import Arguments
from logger_config import logger
def split_dataset(dataset: Dataset,
num_eval_examples: int,
max_train_samples: int = None) -> Tuple[Dataset, Dataset]:
indices = list(range(len(dataset)))
random.Random(123).shuffle(indices)
eval_dataset = dataset.select(indices[:num_eval_examples])
train_dataset = dataset.select(indices[num_eval_examples:])
if max_train_samples is not None:
train_dataset = train_dataset.select(range(max_train_samples))
# Log a few random samples from the training set:
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
return train_dataset, eval_dataset
class ReplaceLMDataloader:
def __init__(self, args: Arguments, tokenizer: PreTrainedTokenizerFast):
self.args = args
self.tokenizer = tokenizer
data_files = args.train_file.strip().split(',')
self.corpus: Dataset = load_dataset('json', data_files=data_files)['train']
self.train_dataset, self.eval_dataset = split_dataset(
self.corpus,
num_eval_examples=args.rlm_num_eval_samples,
max_train_samples=args.max_train_samples)
| EXA-1-master | exa/models/unilm-master/simlm/src/loaders/rlm_dataloader.py |
from .biencoder_dataloader import RetrievalDataLoader
from .cross_encoder_dataloader import CrossEncoderDataLoader
from .rlm_dataloader import ReplaceLMDataloader
| EXA-1-master | exa/models/unilm-master/simlm/src/loaders/__init__.py |
import os.path
import random
from typing import Tuple, Dict, List, Optional
from datasets import load_dataset, DatasetDict, Dataset
from transformers.file_utils import PaddingStrategy
from transformers import PreTrainedTokenizerFast, Trainer
from config import Arguments
from logger_config import logger
from .loader_utils import group_doc_ids
class CrossEncoderDataLoader:
def __init__(self, args: Arguments, tokenizer: PreTrainedTokenizerFast):
self.args = args
self.negative_size = args.train_n_passages - 1
assert self.negative_size > 0
self.tokenizer = tokenizer
corpus_path = os.path.join(args.data_dir, 'passages.jsonl.gz')
self.corpus: Dataset = load_dataset('json', data_files=corpus_path)['train']
self.train_dataset, self.eval_dataset = self._get_transformed_datasets()
# use its state to decide which positives/negatives to sample
self.trainer: Optional[Trainer] = None
def _transform_func(self, examples: Dict[str, List]) -> Dict[str, List]:
current_epoch = int(self.trainer.state.epoch or 0)
input_doc_ids = group_doc_ids(
examples=examples,
negative_size=self.negative_size,
offset=current_epoch + self.args.seed,
use_first_positive=self.args.use_first_positive
)
assert len(input_doc_ids) == len(examples['query']) * self.args.train_n_passages
input_queries, input_docs = [], []
for idx, doc_id in enumerate(input_doc_ids):
prefix = ''
if self.corpus[doc_id].get('title', ''):
prefix = self.corpus[doc_id]['title'] + ': '
input_docs.append(prefix + self.corpus[doc_id]['contents'])
input_queries.append(examples['query'][idx // self.args.train_n_passages])
batch_dict = self.tokenizer(input_queries,
text_pair=input_docs,
max_length=self.args.rerank_max_length,
padding=PaddingStrategy.DO_NOT_PAD,
truncation=True)
packed_batch_dict = {}
for k in batch_dict:
packed_batch_dict[k] = []
assert len(examples['query']) * self.args.train_n_passages == len(batch_dict[k])
for idx in range(len(examples['query'])):
start = idx * self.args.train_n_passages
packed_batch_dict[k].append(batch_dict[k][start:(start + self.args.train_n_passages)])
return packed_batch_dict
def _get_transformed_datasets(self) -> Tuple:
data_files = {}
if self.args.train_file is not None:
data_files["train"] = self.args.train_file.split(',')
if self.args.validation_file is not None:
data_files["validation"] = self.args.validation_file
raw_datasets: DatasetDict = load_dataset('json', data_files=data_files)
train_dataset, eval_dataset = None, None
if self.args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = raw_datasets["train"]
if self.args.max_train_samples is not None:
train_dataset = train_dataset.select(range(self.args.max_train_samples))
# Log a few random samples from the training set:
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
train_dataset.set_transform(self._transform_func)
if self.args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = raw_datasets["validation"]
eval_dataset.set_transform(self._transform_func)
return train_dataset, eval_dataset
| EXA-1-master | exa/models/unilm-master/simlm/src/loaders/cross_encoder_dataloader.py |
import copy
from dataclasses import dataclass
from typing import List, Dict, Optional, Any
from transformers import BatchEncoding, BertTokenizerFast
from transformers.data.data_collator import _torch_collate_batch
from transformers.file_utils import PaddingStrategy
from config import Arguments
from .collator_utils import whole_word_mask, torch_mask_tokens, merge_batch_dict
from logger_config import logger
@dataclass
class DataCollatorForReplaceLM:
tokenizer: BertTokenizerFast
pad_to_multiple_of: Optional[int] = None
args: Arguments = None
def __post_init__(self):
if self.tokenizer.mask_token is None:
raise ValueError(
"This tokenizer does not have a mask token which is necessary for masked language modeling. "
"You should pass `mlm=False` to train on causal language modeling instead."
)
def __call__(self, features: List[Dict]):
return self.torch_call(features)
def torch_call(self, examples: List[Dict[str, Any]]) -> BatchEncoding:
if 'title' in examples[0]:
text, text_pair = [ex['title'] for ex in examples], [ex['contents'] for ex in examples]
else:
text, text_pair = [ex['contents'] for ex in examples], None
batch_dict = self.tokenizer(text,
text_pair=text_pair,
max_length=self.args.rlm_max_length,
padding=PaddingStrategy.DO_NOT_PAD,
truncation=True)
encoder_mask_labels = []
decoder_mask_labels = []
extra_mlm_prob = self.args.rlm_decoder_mask_prob - self.args.rlm_encoder_mask_prob
# mlm_prob + (1 - mlm_prob) x = decoder_prob
# => x = (decoder_prob - mlm_prob) / (1 - mlm_prob)
# since we mask twice independently, we need to adjust extra_mlm_prob accordingly
extra_mlm_prob = extra_mlm_prob / (1 - self.args.rlm_encoder_mask_prob)
for input_ids in batch_dict['input_ids']:
ref_tokens = []
for token_id in input_ids:
token = self.tokenizer._convert_id_to_token(token_id)
ref_tokens.append(token)
encoder_mask_labels.append(whole_word_mask(self.tokenizer, ref_tokens,
mlm_prob=self.args.rlm_encoder_mask_prob))
decoder_mask = encoder_mask_labels[-1][:]
# overlapping mask
if extra_mlm_prob > 1e-4:
decoder_mask = [max(m1, m2) for m1, m2 in zip(decoder_mask,
whole_word_mask(self.tokenizer, ref_tokens, mlm_prob=extra_mlm_prob))]
assert len(decoder_mask) == len(encoder_mask_labels[-1])
decoder_mask_labels.append(decoder_mask)
encoder_batch_mask = _torch_collate_batch(encoder_mask_labels, self.tokenizer,
pad_to_multiple_of=self.pad_to_multiple_of)
encoder_batch_dict = self.tokenizer.pad(batch_dict,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors="pt")
encoder_inputs, encoder_labels = torch_mask_tokens(
self.tokenizer, encoder_batch_dict['input_ids'], encoder_batch_mask,
all_use_mask_token=self.args.all_use_mask_token)
clean_input_ids = encoder_batch_dict['input_ids'].clone()
encoder_batch_dict['input_ids'] = encoder_inputs
encoder_batch_dict['labels'] = encoder_labels
merged_batch_dict = BatchEncoding()
merge_batch_dict(encoder_batch_dict, merged_batch_dict, prefix='enc_')
decoder_batch_dict = copy.deepcopy(encoder_batch_dict)
if extra_mlm_prob > 1e-4:
decoder_batch_mask = _torch_collate_batch(decoder_mask_labels, self.tokenizer,
pad_to_multiple_of=self.pad_to_multiple_of)
decoder_inputs, decoder_labels = torch_mask_tokens(
self.tokenizer, clean_input_ids, decoder_batch_mask,
all_use_mask_token=self.args.all_use_mask_token)
decoder_batch_dict['input_ids'] = decoder_inputs
decoder_batch_dict['labels'] = decoder_labels
merge_batch_dict(decoder_batch_dict, merged_batch_dict, prefix='dec_')
# simple integrity check
# logger.info('encoder mask cnt: {}, decoder mask cnt: {}, non-equal input_ids cnt: {}'.format(
# (merged_batch_dict['enc_labels'] > 0).long().sum(),
# (merged_batch_dict['dec_labels'] > 0).long().sum(),
# (merged_batch_dict['dec_input_ids'] != merged_batch_dict['enc_input_ids']).long().sum()))
labels = clean_input_ids.clone()
for special_id in self.tokenizer.all_special_ids:
labels[labels == special_id] = -100
merged_batch_dict['labels'] = labels
return merged_batch_dict
| EXA-1-master | exa/models/unilm-master/simlm/src/collators/rlm_collator.py |
from .biencoder_collator import BiencoderCollator
from .cross_encoder_collator import CrossEncoderCollator
from .rlm_collator import DataCollatorForReplaceLM
| EXA-1-master | exa/models/unilm-master/simlm/src/collators/__init__.py |
import torch
from dataclasses import dataclass
from typing import List, Dict, Any
from transformers import BatchEncoding, DataCollatorWithPadding
@dataclass
class CrossEncoderCollator(DataCollatorWithPadding):
def __call__(self, features: List[Dict[str, Any]]) -> BatchEncoding:
unpack_features = []
for ex in features:
keys = list(ex.keys())
# assert all(len(ex[k]) == 8 for k in keys)
for idx in range(len(ex[keys[0]])):
unpack_features.append({k: ex[k][idx] for k in keys})
collated_batch_dict = self.tokenizer.pad(
unpack_features,
padding=self.padding,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors=self.return_tensors)
collated_batch_dict['labels'] = torch.zeros(len(features), dtype=torch.long)
return collated_batch_dict
| EXA-1-master | exa/models/unilm-master/simlm/src/collators/cross_encoder_collator.py |
import torch
from dataclasses import dataclass
from typing import List, Dict, Any
from transformers import DataCollatorWithPadding, BatchEncoding
def _unpack_doc_values(features: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
doc_examples = []
for f in features:
keys = list(f.keys())
lists_per_key = len(f[keys[0]])
for idx in range(lists_per_key):
doc_examples.append({k: f[k][idx] for k in keys})
return doc_examples
@dataclass
class BiencoderCollator(DataCollatorWithPadding):
def __call__(self, features: List[Dict[str, Any]]) -> BatchEncoding:
q_prefix, d_prefix = 'q_', 'd_'
query_examples = [{k[len(q_prefix):]: v for k, v in f.items() if k.startswith(q_prefix)} for f in features]
doc_examples = _unpack_doc_values(
[{k[len(d_prefix):]: v for k, v in f.items() if k.startswith(d_prefix)} for f in features])
assert len(doc_examples) % len(query_examples) == 0, \
'{} doc and {} queries'.format(len(doc_examples), len(query_examples))
# already truncated during tokenization
q_collated = self.tokenizer.pad(
query_examples,
padding=self.padding,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors=self.return_tensors)
d_collated = self.tokenizer.pad(
doc_examples,
padding=self.padding,
pad_to_multiple_of=self.pad_to_multiple_of,
return_tensors=self.return_tensors)
# merge into a single BatchEncoding by adding prefix
for k in list(q_collated.keys()):
q_collated[q_prefix + k] = q_collated[k]
del q_collated[k]
for k in d_collated:
q_collated[d_prefix + k] = d_collated[k]
merged_batch_dict = q_collated
# dummy placeholder for field "labels", won't use it to compute loss
labels = torch.zeros(len(query_examples), dtype=torch.long)
merged_batch_dict['labels'] = labels
if 'kd_labels' in features[0]:
kd_labels = torch.stack([torch.tensor(f['kd_labels']) for f in features], dim=0).float()
merged_batch_dict['kd_labels'] = kd_labels
return merged_batch_dict
| EXA-1-master | exa/models/unilm-master/simlm/src/collators/biencoder_collator.py |
import torch
import random
import warnings
from transformers import BertTokenizer, BertTokenizerFast, BatchEncoding
from typing import List, Union, Tuple, Any, Dict
def whole_word_mask(tokenizer: Union[BertTokenizer, BertTokenizerFast],
input_tokens: List[str],
mlm_prob: float,
max_predictions=512) -> List[int]:
"""
Get 0/1 labels for masked tokens with whole word mask proxy
"""
if not isinstance(tokenizer, (BertTokenizer, BertTokenizerFast)):
warnings.warn(
"DataCollatorForWholeWordMask is only suitable for BertTokenizer-like tokenizers. "
"Please refer to the documentation for more information."
)
cand_indexes = []
for (i, token) in enumerate(input_tokens):
if token == "[CLS]" or token == "[SEP]":
continue
if len(cand_indexes) >= 1 and token.startswith("##"):
cand_indexes[-1].append(i)
else:
cand_indexes.append([i])
random.shuffle(cand_indexes)
num_to_predict = min(max_predictions, max(1, int(round(len(input_tokens) * mlm_prob))))
masked_lms = []
covered_indexes = set()
for index_set in cand_indexes:
if len(masked_lms) >= num_to_predict:
break
# If adding a whole-word mask would exceed the maximum number of
# predictions, then just skip this candidate.
if len(masked_lms) + len(index_set) > num_to_predict:
continue
is_any_index_covered = False
for index in index_set:
if index in covered_indexes:
is_any_index_covered = True
break
if is_any_index_covered:
continue
for index in index_set:
covered_indexes.add(index)
masked_lms.append(index)
if len(covered_indexes) != len(masked_lms):
raise ValueError("Length of covered_indexes is not equal to length of masked_lms.")
mask_labels = [1 if i in covered_indexes else 0 for i in range(len(input_tokens))]
return mask_labels
def torch_mask_tokens(tokenizer: Union[BertTokenizer, BertTokenizerFast],
inputs: torch.Tensor,
mask_labels: torch.Tensor,
all_use_mask_token: bool = False) -> Tuple[Any, Any]:
"""
Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set
'mask_labels' means we use whole word mask (wwm), we directly mask idxs according to it's ref.
"""
if tokenizer.mask_token is None:
raise ValueError(
"This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the --mlm flag if you want to use this tokenizer."
)
labels = inputs.clone()
masked_inputs = inputs.clone()
# We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa)
probability_matrix = mask_labels.clone()
special_tokens_mask = [
tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()
]
probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0)
if tokenizer._pad_token is not None:
padding_mask = labels.eq(tokenizer.pad_token_id)
probability_matrix.masked_fill_(padding_mask, value=0.0)
masked_indices = probability_matrix.bool()
labels[~masked_indices] = -100 # We only compute loss on masked tokens
if all_use_mask_token:
masked_inputs[masked_indices] = tokenizer.convert_tokens_to_ids(tokenizer.mask_token)
return masked_inputs, labels
# 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK])
indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices
masked_inputs[indices_replaced] = tokenizer.convert_tokens_to_ids(tokenizer.mask_token)
# 10% of the time, we replace masked input tokens with random word
indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced
random_words = torch.randint(len(tokenizer), labels.shape, dtype=torch.long)
masked_inputs[indices_random] = random_words[indices_random]
# The rest of the time (10% of the time) we keep the masked input tokens unchanged
return masked_inputs, labels
def merge_batch_dict(src_batch_dict: Union[Dict, BatchEncoding],
tgt_batch_dict: Union[Dict, BatchEncoding],
prefix: str = None):
for key in src_batch_dict:
tgt_batch_dict[(prefix or '') + key] = src_batch_dict[key].clone()
| EXA-1-master | exa/models/unilm-master/simlm/src/collators/collator_utils.py |
#!/usr/bin/env python3
import torch
from setuptools import find_packages, setup
torch_ver = [int(x) for x in torch.__version__.split(".")[:2]]
assert torch_ver >= [1, 4], "Requires PyTorch >= 1.4"
setup(
name="layoutlm",
version="0.0",
author="Yiheng Xu",
url="https://github.com/microsoft/unilm/tree/master/layoutlm",
description="LayoutLM",
packages=find_packages(exclude=("configs", "tests")),
python_requires=">=3.6",
install_requires=[
"transformers==2.9.0",
"tensorboardX==2.0",
"lxml==4.9.1",
"seqeval==0.0.12",
"Pillow==9.3.0",
],
extras_require={
"dev": ["flake8==3.8.2", "isort==4.3.21", "black==19.10b0", "pre-commit==2.4.0"]
},
)
| EXA-1-master | exa/models/unilm-master/layoutlm/deprecated/setup.py |
# coding=utf-8
from __future__ import absolute_import, division, print_function
import argparse
import glob
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from transformers import (
WEIGHTS_NAME,
AdamW,
BertConfig,
BertForSequenceClassification,
BertTokenizerFast,
RobertaConfig,
RobertaForSequenceClassification,
RobertaTokenizer,
get_linear_schedule_with_warmup,
)
from layoutlm import LayoutlmConfig, LayoutlmForSequenceClassification
from layoutlm.data.rvl_cdip import CdipProcessor, load_and_cache_examples
try:
from torch.utils.tensorboard import SummaryWriter
except:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
ALL_MODELS = sum(
(
tuple(conf.pretrained_config_archive_map.keys())
for conf in (BertConfig, RobertaConfig, LayoutlmConfig)
),
(),
)
MODEL_CLASSES = {
"bert": (BertConfig, BertForSequenceClassification, BertTokenizerFast),
"roberta": (RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer),
"layoutlm": (LayoutlmConfig, LayoutlmForSequenceClassification, BertTokenizerFast),
}
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def simple_accuracy(preds, labels):
return (preds == labels).mean()
def train(args, train_dataset, model, tokenizer): # noqa C901
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter(comment="_" + os.path.basename(args.output_dir))
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = (
RandomSampler(train_dataset)
if args.local_rank == -1
else DistributedSampler(train_dataset)
)
train_dataloader = DataLoader(
train_dataset, sampler=train_sampler, batch_size=args.train_batch_size
)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = (
args.max_steps
// (len(train_dataloader) // args.gradient_accumulation_steps)
+ 1
)
else:
t_total = (
len(train_dataloader)
// args.gradient_accumulation_steps
* args.num_train_epochs
)
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [
p
for n, p in model.named_parameters()
if not any(nd in n for nd in no_decay)
],
"weight_decay": args.weight_decay,
},
{
"params": [
p
for n, p in model.named_parameters()
if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
optimizer = AdamW(
optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon
)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use fp16 training."
)
model, optimizer = amp.initialize(
model, optimizer, opt_level=args.fp16_opt_level
)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True,
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(
" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size
)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(
int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]
)
set_seed(args) # Added here for reproductibility (even between python 2 and 3)
for _ in train_iterator:
epoch_iterator = tqdm(
train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]
)
for step, batch in enumerate(epoch_iterator):
model.train()
if args.model_type != "layoutlm":
batch = batch[:4]
batch = tuple(t.to(args.device) for t in batch)
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"labels": batch[3],
}
if args.model_type == "layoutlm":
inputs["bbox"] = batch[4]
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "layoutlm"] else None
) # RoBERTa don't use segment_ids
outputs = model(**inputs)
loss = outputs[
0
] # model outputs are always tuple in transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(
amp.master_params(optimizer), args.max_grad_norm
)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if (
args.local_rank in [-1, 0]
and args.logging_steps > 0
and global_step % args.logging_steps == 0
):
# Log metrics
if (
args.local_rank == -1 and args.evaluate_during_training
): # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model, tokenizer, "val")
for key, value in results.items():
tb_writer.add_scalar(
"eval_{}".format(key), value, global_step
)
tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_writer.add_scalar(
"loss",
(tr_loss - logging_loss) / args.logging_steps,
global_step,
)
logging_loss = tr_loss
if (
args.local_rank in [-1, 0]
and args.save_steps > 0
and global_step % args.save_steps == 0
):
# Save model checkpoint
output_dir = os.path.join(
args.output_dir, "checkpoint-{}".format(global_step)
)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
tokenizer.save_pretrained(output_dir)
logger.info("Saving model checkpoint to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def evaluate(args, model, tokenizer, mode, prefix=""):
results = {}
eval_dataset = load_and_cache_examples(args, tokenizer, mode=mode)
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(
eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size
)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
if args.model_type != "layoutlm":
batch = batch[:4]
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"labels": batch[3],
}
if args.model_type == "layoutlm":
inputs["bbox"] = batch[4]
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "layoutlm"] else None
) # RoBERTa don"t use segment_ids
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(
out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0
)
eval_loss = eval_loss / nb_eval_steps
preds = np.argmax(preds, axis=1)
result = {"acc": simple_accuracy(preds=preds, labels=out_label_ids)}
results.update(result)
output_eval_file = os.path.join(
args.output_dir, prefix, "{}_results.txt".format(mode)
)
with open(output_eval_file, "w") as writer:
logger.info("***** {} results {} *****".format(mode, prefix))
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
output_eval_file = os.path.join(
args.output_dir, prefix, "{}_compare.txt".format(mode)
)
with open(output_eval_file, "w") as writer:
for p, l in zip(preds, out_label_ids):
writer.write("%s %s\n" % (p, l))
return results
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument(
"--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.",
)
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()),
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name selected in the list: "
+ ", ".join(ALL_MODELS),
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
## Other parameters
parser.add_argument(
"--config_name",
default="",
type=str,
help="Pretrained config name or path if not the same as model_name",
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3",
)
parser.add_argument(
"--max_seq_length",
default=512,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--do_train", action="store_true", help="Whether to run training."
)
parser.add_argument(
"--do_eval", action="store_true", help="Whether to run eval on the dev set."
)
parser.add_argument(
"--do_test", action="store_true", help="Whether to run test on the test set."
)
parser.add_argument(
"--evaluate_during_training",
action="store_true",
help="Rul evaluation during training at each logging step.",
)
parser.add_argument(
"--do_lower_case",
action="store_true",
help="Set this flag if you are using an uncased model.",
)
parser.add_argument(
"--per_gpu_train_batch_size",
default=8,
type=int,
help="Batch size per GPU/CPU for training.",
)
parser.add_argument(
"--per_gpu_eval_batch_size",
default=8,
type=int,
help="Batch size per GPU/CPU for evaluation.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.",
)
parser.add_argument(
"--weight_decay", default=0.0, type=float, help="Weight deay if we apply some."
)
parser.add_argument(
"--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer."
)
parser.add_argument(
"--max_grad_norm", default=1.0, type=float, help="Max gradient norm."
)
parser.add_argument(
"--num_train_epochs",
default=3.0,
type=float,
help="Total number of training epochs to perform.",
)
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument(
"--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps."
)
parser.add_argument(
"--logging_steps", type=int, default=50, help="Log every X updates steps."
)
parser.add_argument(
"--save_steps",
type=int,
default=50,
help="Save checkpoint every X updates steps.",
)
parser.add_argument(
"--eval_all_checkpoints",
action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number",
)
parser.add_argument(
"--no_cuda", action="store_true", help="Avoid using CUDA when available"
)
parser.add_argument(
"--overwrite_output_dir",
action="store_true",
help="Overwrite the content of the output directory",
)
parser.add_argument(
"--overwrite_cache",
action="store_true",
help="Overwrite the cached training and evaluation sets",
)
parser.add_argument(
"--seed", type=int, default=42, help="random seed for initialization"
)
parser.add_argument(
"--tpu",
action="store_true",
help="Whether to run on the TPU defined in the environment variables",
)
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument(
"--local_rank",
type=int,
default=-1,
help="For distributed training: local_rank",
)
parser.add_argument(
"--server_ip", type=str, default="", help="For distant debugging."
)
parser.add_argument(
"--server_port", type=str, default="", help="For distant debugging."
)
args = parser.parse_args()
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir
):
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir
)
)
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(
address=(args.server_ip, args.server_port), redirect_output=True
)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device(
"cuda:0" if torch.cuda.is_available() and not args.no_cuda else "cpu"
)
if torch.cuda.is_available():
torch.cuda.set_device(device)
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set seed
set_seed(args)
processor = CdipProcessor()
label_list = processor.get_labels()
num_labels = len(label_list)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
)
tokenizer = tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
)
model = model_class.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
train_dataset = load_and_cache_examples(args, tokenizer, mode="train")
global_step, tr_loss = train(args, train_dataset, model, tokenizer)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
# Load a trained model and vocabulary that you have fine-tuned
model = model_class.from_pretrained(args.output_dir)
tokenizer = tokenizer_class.from_pretrained(
args.output_dir, do_lower_case=args.do_lower_case
)
model.to(args.device)
# Evaluation
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
tokenizer = tokenizer_class.from_pretrained(
args.output_dir, do_lower_case=args.do_lower_case
)
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(
os.path.dirname(c)
for c in sorted(
glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True)
)
)
logging.getLogger("transformers.modeling_utils").setLevel(
logging.WARN
) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
prefix = (
checkpoint.split("/")[-1]
if checkpoint.find("checkpoint") != -1 and args.eval_all_checkpoints
else ""
)
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
result = evaluate(args, model, tokenizer, mode="val", prefix=prefix)
result = dict(
("val_" + k + "_{}".format(global_step), v) for k, v in result.items()
)
results.update(result)
if args.do_test and args.local_rank in [-1, 0]:
tokenizer = tokenizer_class.from_pretrained(
args.output_dir, do_lower_case=args.do_lower_case
)
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(
os.path.dirname(c)
for c in sorted(
glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True)
)
)
logging.getLogger("transformers.modeling_utils").setLevel(
logging.WARN
) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
prefix = (
checkpoint.split("/")[-1]
if checkpoint.find("checkpoint") != -1 and args.eval_all_checkpoints
else ""
)
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
result = evaluate(args, model, tokenizer, mode="test", prefix=prefix)
result = dict(
("test_" + k + "_{}".format(global_step), v) for k, v in result.items()
)
results.update(result)
return results
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/layoutlm/deprecated/examples/classification/run_classification.py |
import argparse
import json
import os
from PIL import Image
from transformers import AutoTokenizer
def bbox_string(box, width, length):
return (
str(int(1000 * (box[0] / width)))
+ " "
+ str(int(1000 * (box[1] / length)))
+ " "
+ str(int(1000 * (box[2] / width)))
+ " "
+ str(int(1000 * (box[3] / length)))
)
def actual_bbox_string(box, width, length):
return (
str(box[0])
+ " "
+ str(box[1])
+ " "
+ str(box[2])
+ " "
+ str(box[3])
+ "\t"
+ str(width)
+ " "
+ str(length)
)
def convert(args):
with open(
os.path.join(args.output_dir, args.data_split + ".txt.tmp"),
"w",
encoding="utf8",
) as fw, open(
os.path.join(args.output_dir, args.data_split + "_box.txt.tmp"),
"w",
encoding="utf8",
) as fbw, open(
os.path.join(args.output_dir, args.data_split + "_image.txt.tmp"),
"w",
encoding="utf8",
) as fiw:
for file in os.listdir(args.data_dir):
file_path = os.path.join(args.data_dir, file)
with open(file_path, "r", encoding="utf8") as f:
data = json.load(f)
image_path = file_path.replace("annotations", "images")
image_path = image_path.replace("json", "png")
file_name = os.path.basename(image_path)
image = Image.open(image_path)
width, length = image.size
for item in data["form"]:
words, label = item["words"], item["label"]
words = [w for w in words if w["text"].strip() != ""]
if len(words) == 0:
continue
if label == "other":
for w in words:
fw.write(w["text"] + "\tO\n")
fbw.write(
w["text"]
+ "\t"
+ bbox_string(w["box"], width, length)
+ "\n"
)
fiw.write(
w["text"]
+ "\t"
+ actual_bbox_string(w["box"], width, length)
+ "\t"
+ file_name
+ "\n"
)
else:
if len(words) == 1:
fw.write(words[0]["text"] + "\tS-" + label.upper() + "\n")
fbw.write(
words[0]["text"]
+ "\t"
+ bbox_string(words[0]["box"], width, length)
+ "\n"
)
fiw.write(
words[0]["text"]
+ "\t"
+ actual_bbox_string(words[0]["box"], width, length)
+ "\t"
+ file_name
+ "\n"
)
else:
fw.write(words[0]["text"] + "\tB-" + label.upper() + "\n")
fbw.write(
words[0]["text"]
+ "\t"
+ bbox_string(words[0]["box"], width, length)
+ "\n"
)
fiw.write(
words[0]["text"]
+ "\t"
+ actual_bbox_string(words[0]["box"], width, length)
+ "\t"
+ file_name
+ "\n"
)
for w in words[1:-1]:
fw.write(w["text"] + "\tI-" + label.upper() + "\n")
fbw.write(
w["text"]
+ "\t"
+ bbox_string(w["box"], width, length)
+ "\n"
)
fiw.write(
w["text"]
+ "\t"
+ actual_bbox_string(w["box"], width, length)
+ "\t"
+ file_name
+ "\n"
)
fw.write(words[-1]["text"] + "\tE-" + label.upper() + "\n")
fbw.write(
words[-1]["text"]
+ "\t"
+ bbox_string(words[-1]["box"], width, length)
+ "\n"
)
fiw.write(
words[-1]["text"]
+ "\t"
+ actual_bbox_string(words[-1]["box"], width, length)
+ "\t"
+ file_name
+ "\n"
)
fw.write("\n")
fbw.write("\n")
fiw.write("\n")
def seg_file(file_path, tokenizer, max_len):
subword_len_counter = 0
output_path = file_path[:-4]
with open(file_path, "r", encoding="utf8") as f_p, open(
output_path, "w", encoding="utf8"
) as fw_p:
for line in f_p:
line = line.rstrip()
if not line:
fw_p.write(line + "\n")
subword_len_counter = 0
continue
token = line.split("\t")[0]
current_subwords_len = len(tokenizer.tokenize(token))
# Token contains strange control characters like \x96 or \x95
# Just filter out the complete line
if current_subwords_len == 0:
continue
if (subword_len_counter + current_subwords_len) > max_len:
fw_p.write("\n" + line + "\n")
subword_len_counter = current_subwords_len
continue
subword_len_counter += current_subwords_len
fw_p.write(line + "\n")
def seg(args):
tokenizer = AutoTokenizer.from_pretrained(
args.model_name_or_path, do_lower_case=True
)
seg_file(
os.path.join(args.output_dir, args.data_split + ".txt.tmp"),
tokenizer,
args.max_len,
)
seg_file(
os.path.join(args.output_dir, args.data_split + "_box.txt.tmp"),
tokenizer,
args.max_len,
)
seg_file(
os.path.join(args.output_dir, args.data_split + "_image.txt.tmp"),
tokenizer,
args.max_len,
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_dir", type=str, default="data/training_data/annotations"
)
parser.add_argument("--data_split", type=str, default="train")
parser.add_argument("--output_dir", type=str, default="data")
parser.add_argument("--model_name_or_path", type=str, default="bert-base-uncased")
parser.add_argument("--max_len", type=int, default=510)
args = parser.parse_args()
convert(args)
seg(args)
| EXA-1-master | exa/models/unilm-master/layoutlm/deprecated/examples/seq_labeling/preprocess.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Fine-tuning the library models for named entity recognition on CoNLL-2003 (Bert or Roberta). """
from __future__ import absolute_import, division, print_function
import argparse
import glob
import logging
import os
import random
import shutil
import numpy as np
import torch
from seqeval.metrics import (
classification_report,
f1_score,
precision_score,
recall_score,
)
from tensorboardX import SummaryWriter
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from transformers import (
WEIGHTS_NAME,
AdamW,
BertConfig,
BertForTokenClassification,
BertTokenizer,
RobertaConfig,
RobertaForTokenClassification,
RobertaTokenizer,
get_linear_schedule_with_warmup,
)
from layoutlm import FunsdDataset, LayoutlmConfig, LayoutlmForTokenClassification
logger = logging.getLogger(__name__)
ALL_MODELS = sum(
(
tuple(conf.pretrained_config_archive_map.keys())
for conf in (BertConfig, RobertaConfig, LayoutlmConfig)
),
(),
)
MODEL_CLASSES = {
"bert": (BertConfig, BertForTokenClassification, BertTokenizer),
"roberta": (RobertaConfig, RobertaForTokenClassification, RobertaTokenizer),
"layoutlm": (LayoutlmConfig, LayoutlmForTokenClassification, BertTokenizer),
}
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def collate_fn(data):
batch = [i for i in zip(*data)]
for i in range(len(batch)):
if i < len(batch) - 2:
batch[i] = torch.stack(batch[i], 0)
return tuple(batch)
def get_labels(path):
with open(path, "r") as f:
labels = f.read().splitlines()
if "O" not in labels:
labels = ["O"] + labels
return labels
def train( # noqa C901
args, train_dataset, model, tokenizer, labels, pad_token_label_id
):
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter(logdir="runs/" + os.path.basename(args.output_dir))
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = (
RandomSampler(train_dataset)
if args.local_rank == -1
else DistributedSampler(train_dataset)
)
train_dataloader = DataLoader(
train_dataset,
sampler=train_sampler,
batch_size=args.train_batch_size,
collate_fn=None,
)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = (
args.max_steps
// (len(train_dataloader) // args.gradient_accumulation_steps)
+ 1
)
else:
t_total = (
len(train_dataloader)
// args.gradient_accumulation_steps
* args.num_train_epochs
)
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [
p
for n, p in model.named_parameters()
if not any(nd in n for nd in no_decay)
],
"weight_decay": args.weight_decay,
},
{
"params": [
p
for n, p in model.named_parameters()
if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
optimizer = AdamW(
optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon
)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use fp16 training."
)
model, optimizer = amp.initialize(
model, optimizer, opt_level=args.fp16_opt_level
)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True,
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(
" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size
)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(
int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]
)
set_seed(args) # Added here for reproductibility (even between python 2 and 3)
for _ in train_iterator:
epoch_iterator = tqdm(
train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]
)
for step, batch in enumerate(epoch_iterator):
model.train()
inputs = {
"input_ids": batch[0].to(args.device),
"attention_mask": batch[1].to(args.device),
"labels": batch[3].to(args.device),
}
if args.model_type in ["layoutlm"]:
inputs["bbox"] = batch[4].to(args.device)
inputs["token_type_ids"] = (
batch[2].to(args.device) if args.model_type in ["bert", "layoutlm"] else None
) # RoBERTa don"t use segment_ids
outputs = model(**inputs)
# model outputs are always tuple in pytorch-transformers (see doc)
loss = outputs[0]
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(
amp.master_params(optimizer), args.max_grad_norm
)
else:
torch.nn.utils.clip_grad_norm_(
model.parameters(), args.max_grad_norm
)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if (
args.local_rank in [-1, 0]
and args.logging_steps > 0
and global_step % args.logging_steps == 0
):
# Log metrics
if (
args.local_rank in [-1, 0] and args.evaluate_during_training
): # Only evaluate when single GPU otherwise metrics may not average well
results, _ = evaluate(
args,
model,
tokenizer,
labels,
pad_token_label_id,
mode="dev",
)
for key, value in results.items():
tb_writer.add_scalar(
"eval_{}".format(key), value, global_step
)
tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_writer.add_scalar(
"loss",
(tr_loss - logging_loss) / args.logging_steps,
global_step,
)
logging_loss = tr_loss
if (
args.local_rank in [-1, 0]
and args.save_steps > 0
and global_step % args.save_steps == 0
):
# Save model checkpoint
output_dir = os.path.join(
args.output_dir, "checkpoint-{}".format(global_step)
)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def evaluate(args, model, tokenizer, labels, pad_token_label_id, mode, prefix=""):
eval_dataset = FunsdDataset(args, tokenizer, labels, pad_token_label_id, mode=mode)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(
eval_dataset,
sampler=eval_sampler,
batch_size=args.eval_batch_size,
collate_fn=None,
)
# Eval!
logger.info("***** Running evaluation %s *****", prefix)
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
model.eval()
for batch in tqdm(eval_dataloader, desc="Evaluating"):
with torch.no_grad():
inputs = {
"input_ids": batch[0].to(args.device),
"attention_mask": batch[1].to(args.device),
"labels": batch[3].to(args.device),
}
if args.model_type in ["layoutlm"]:
inputs["bbox"] = batch[4].to(args.device)
inputs["token_type_ids"] = (
batch[2].to(args.device)
if args.model_type in ["bert", "layoutlm"]
else None
) # RoBERTa don"t use segment_ids
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
if args.n_gpu > 1:
tmp_eval_loss = (
tmp_eval_loss.mean()
) # mean() to average on multi-gpu parallel evaluating
eval_loss += tmp_eval_loss.item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(
out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0
)
eval_loss = eval_loss / nb_eval_steps
preds = np.argmax(preds, axis=2)
label_map = {i: label for i, label in enumerate(labels)}
out_label_list = [[] for _ in range(out_label_ids.shape[0])]
preds_list = [[] for _ in range(out_label_ids.shape[0])]
for i in range(out_label_ids.shape[0]):
for j in range(out_label_ids.shape[1]):
if out_label_ids[i, j] != pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
results = {
"loss": eval_loss,
"precision": precision_score(out_label_list, preds_list),
"recall": recall_score(out_label_list, preds_list),
"f1": f1_score(out_label_list, preds_list),
}
report = classification_report(out_label_list, preds_list)
logger.info("\n" + report)
logger.info("***** Eval results %s *****", prefix)
for key in sorted(results.keys()):
logger.info(" %s = %s", key, str(results[key]))
return results, preds_list
def main(): # noqa C901
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument(
"--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the training files for the CoNLL-2003 NER task.",
)
parser.add_argument(
"--model_type",
default=None,
type=str,
required=True,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()),
)
parser.add_argument(
"--model_name_or_path",
default=None,
type=str,
required=True,
help="Path to pre-trained model or shortcut name selected in the list: "
+ ", ".join(ALL_MODELS),
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
## Other parameters
parser.add_argument(
"--labels",
default="",
type=str,
help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.",
)
parser.add_argument(
"--config_name",
default="",
type=str,
help="Pretrained config name or path if not the same as model_name",
)
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--cache_dir",
default="",
type=str,
help="Where do you want to store the pre-trained models downloaded from s3",
)
parser.add_argument(
"--max_seq_length",
default=512,
type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.",
)
parser.add_argument(
"--do_train", action="store_true", help="Whether to run training."
)
parser.add_argument(
"--do_eval", action="store_true", help="Whether to run eval on the dev set."
)
parser.add_argument(
"--do_predict",
action="store_true",
help="Whether to run predictions on the test set.",
)
parser.add_argument(
"--evaluate_during_training",
action="store_true",
help="Whether to run evaluation during training at each logging step.",
)
parser.add_argument(
"--do_lower_case",
action="store_true",
help="Set this flag if you are using an uncased model.",
)
parser.add_argument(
"--per_gpu_train_batch_size",
default=8,
type=int,
help="Batch size per GPU/CPU for training.",
)
parser.add_argument(
"--per_gpu_eval_batch_size",
default=8,
type=int,
help="Batch size per GPU/CPU for evaluation.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.",
)
parser.add_argument(
"--weight_decay", default=0.0, type=float, help="Weight decay if we apply some."
)
parser.add_argument(
"--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer."
)
parser.add_argument(
"--max_grad_norm", default=1.0, type=float, help="Max gradient norm."
)
parser.add_argument(
"--num_train_epochs",
default=3.0,
type=float,
help="Total number of training epochs to perform.",
)
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument(
"--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps."
)
parser.add_argument(
"--logging_steps", type=int, default=50, help="Log every X updates steps."
)
parser.add_argument(
"--save_steps",
type=int,
default=50,
help="Save checkpoint every X updates steps.",
)
parser.add_argument(
"--eval_all_checkpoints",
action="store_true",
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number",
)
parser.add_argument(
"--no_cuda", action="store_true", help="Avoid using CUDA when available"
)
parser.add_argument(
"--overwrite_output_dir",
action="store_true",
help="Overwrite the content of the output directory",
)
parser.add_argument(
"--overwrite_cache",
action="store_true",
help="Overwrite the cached training and evaluation sets",
)
parser.add_argument(
"--seed", type=int, default=42, help="random seed for initialization"
)
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit",
)
parser.add_argument(
"--fp16_opt_level",
type=str,
default="O1",
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html",
)
parser.add_argument(
"--local_rank",
type=int,
default=-1,
help="For distributed training: local_rank",
)
parser.add_argument(
"--server_ip", type=str, default="", help="For distant debugging."
)
parser.add_argument(
"--server_port", type=str, default="", help="For distant debugging."
)
args = parser.parse_args()
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
):
if not args.overwrite_output_dir:
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(
args.output_dir
)
)
else:
if args.local_rank in [-1, 0]:
shutil.rmtree(args.output_dir)
if not os.path.exists(args.output_dir) and (args.do_eval or args.do_predict):
raise ValueError(
"Output directory ({}) does not exist. Please train and save the model before inference stage.".format(
args.output_dir
)
)
if (
not os.path.exists(args.output_dir)
and args.do_train
and args.local_rank in [-1, 0]
):
os.makedirs(args.output_dir)
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(
address=(args.server_ip, args.server_port), redirect_output=True
)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device(
"cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu"
)
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend="nccl")
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(
filename=os.path.join(args.output_dir, "train.log")
if args.local_rank in [-1, 0]
else None,
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
# Set seed
set_seed(args)
labels = get_labels(args.labels)
num_labels = len(labels)
# Use cross entropy ignore index as padding label id so that only real label ids contribute to the loss later
pad_token_label_id = CrossEntropyLoss().ignore_index
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
cache_dir=args.cache_dir if args.cache_dir else None,
)
tokenizer = tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None,
)
model = model_class.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
cache_dir=args.cache_dir if args.cache_dir else None,
)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
train_dataset = FunsdDataset(
args, tokenizer, labels, pad_token_label_id, mode="train"
)
global_step, tr_loss = train(
args, train_dataset, model, tokenizer, labels, pad_token_label_id
)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
# Evaluation
results = {}
if args.do_eval and args.local_rank in [-1, 0]:
tokenizer = tokenizer_class.from_pretrained(
args.output_dir, do_lower_case=args.do_lower_case
)
checkpoints = [args.output_dir]
if args.eval_all_checkpoints:
checkpoints = list(
os.path.dirname(c)
for c in sorted(
glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True)
)
)
logging.getLogger("pytorch_transformers.modeling_utils").setLevel(
logging.WARN
) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
model = model_class.from_pretrained(checkpoint)
model.to(args.device)
result, _ = evaluate(
args,
model,
tokenizer,
labels,
pad_token_label_id,
mode="test",
prefix=global_step,
)
if global_step:
result = {"{}_{}".format(global_step, k): v for k, v in result.items()}
results.update(result)
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
for key in sorted(results.keys()):
writer.write("{} = {}\n".format(key, str(results[key])))
if args.do_predict and args.local_rank in [-1, 0]:
tokenizer = tokenizer_class.from_pretrained(
args.model_name_or_path, do_lower_case=args.do_lower_case
)
model = model_class.from_pretrained(args.output_dir)
model.to(args.device)
result, predictions = evaluate(
args, model, tokenizer, labels, pad_token_label_id, mode="test"
)
# Save results
output_test_results_file = os.path.join(args.output_dir, "test_results.txt")
with open(output_test_results_file, "w") as writer:
for key in sorted(result.keys()):
writer.write("{} = {}\n".format(key, str(result[key])))
# Save predictions
output_test_predictions_file = os.path.join(
args.output_dir, "test_predictions.txt"
)
with open(output_test_predictions_file, "w", encoding="utf8") as writer:
with open(
os.path.join(args.data_dir, "test.txt"), "r", encoding="utf8"
) as f:
example_id = 0
for line in f:
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
writer.write(line)
if not predictions[example_id]:
example_id += 1
elif predictions[example_id]:
output_line = (
line.split()[0]
+ " "
+ predictions[example_id].pop(0)
+ "\n"
)
writer.write(output_line)
else:
logger.warning(
"Maximum sequence length exceeded: No prediction for '%s'.",
line.split()[0],
)
return results
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/layoutlm/deprecated/examples/seq_labeling/run_seq_labeling.py |
# flake8: noqa
from .data.funsd import FunsdDataset
from .modeling.layoutlm import (
LayoutlmConfig,
LayoutlmForSequenceClassification,
LayoutlmForTokenClassification,
)
| EXA-1-master | exa/models/unilm-master/layoutlm/deprecated/layoutlm/__init__.py |
EXA-1-master | exa/models/unilm-master/layoutlm/deprecated/layoutlm/modeling/__init__.py |
|
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import BertConfig, BertModel, BertPreTrainedModel
from transformers.modeling_bert import BertLayerNorm
logger = logging.getLogger(__name__)
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_MAP = {}
LAYOUTLM_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
class LayoutlmConfig(BertConfig):
pretrained_config_archive_map = LAYOUTLM_PRETRAINED_CONFIG_ARCHIVE_MAP
model_type = "bert"
def __init__(self, max_2d_position_embeddings=1024, **kwargs):
super().__init__(**kwargs)
self.max_2d_position_embeddings = max_2d_position_embeddings
class LayoutlmEmbeddings(nn.Module):
def __init__(self, config):
super(LayoutlmEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(
config.vocab_size, config.hidden_size, padding_idx=0
)
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size
)
self.x_position_embeddings = nn.Embedding(
config.max_2d_position_embeddings, config.hidden_size
)
self.y_position_embeddings = nn.Embedding(
config.max_2d_position_embeddings, config.hidden_size
)
self.h_position_embeddings = nn.Embedding(
config.max_2d_position_embeddings, config.hidden_size
)
self.w_position_embeddings = nn.Embedding(
config.max_2d_position_embeddings, config.hidden_size
)
self.token_type_embeddings = nn.Embedding(
config.type_vocab_size, config.hidden_size
)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(
self,
input_ids,
bbox,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
):
seq_length = input_ids.size(1)
if position_ids is None:
position_ids = torch.arange(
seq_length, dtype=torch.long, device=input_ids.device
)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0])
upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1])
right_position_embeddings = self.x_position_embeddings(bbox[:, :, 2])
lower_position_embeddings = self.y_position_embeddings(bbox[:, :, 3])
h_position_embeddings = self.h_position_embeddings(
bbox[:, :, 3] - bbox[:, :, 1]
)
w_position_embeddings = self.w_position_embeddings(
bbox[:, :, 2] - bbox[:, :, 0]
)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = (
words_embeddings
+ position_embeddings
+ left_position_embeddings
+ upper_position_embeddings
+ right_position_embeddings
+ lower_position_embeddings
+ h_position_embeddings
+ w_position_embeddings
+ token_type_embeddings
)
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class LayoutlmModel(BertModel):
config_class = LayoutlmConfig
pretrained_model_archive_map = LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "bert"
def __init__(self, config):
super(LayoutlmModel, self).__init__(config)
self.embeddings = LayoutlmEmbeddings(config)
self.init_weights()
def forward(
self,
input_ids,
bbox,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(
dtype=next(self.parameters()).dtype
) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = (
head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
)
head_mask = head_mask.expand(
self.config.num_hidden_layers, -1, -1, -1, -1
)
elif head_mask.dim() == 2:
head_mask = (
head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
) # We can specify head_mask for each layer
head_mask = head_mask.to(
dtype=next(self.parameters()).dtype
) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.config.num_hidden_layers
embedding_output = self.embeddings(
input_ids, bbox, position_ids=position_ids, token_type_ids=token_type_ids
)
encoder_outputs = self.encoder(
embedding_output, extended_attention_mask, head_mask=head_mask
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output)
outputs = (sequence_output, pooled_output) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions)
class LayoutlmForTokenClassification(BertPreTrainedModel):
config_class = LayoutlmConfig
pretrained_model_archive_map = LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "bert"
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = LayoutlmModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def forward(
self,
input_ids,
bbox,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
outputs = self.bert(
input_ids=input_ids,
bbox=bbox,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
outputs = (logits,) + outputs[
2:
] # add hidden states and attention if they are here
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)[active_loss]
active_labels = labels.view(-1)[active_loss]
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), scores, (hidden_states), (attentions)
class LayoutlmForSequenceClassification(BertPreTrainedModel):
config_class = LayoutlmConfig
pretrained_model_archive_map = LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "bert"
def __init__(self, config):
super(LayoutlmForSequenceClassification, self).__init__(config)
self.num_labels = config.num_labels
self.bert = LayoutlmModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)
self.init_weights()
def forward(
self,
input_ids,
bbox,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
):
outputs = self.bert(
input_ids=input_ids,
bbox=bbox,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
outputs = (logits,) + outputs[
2:
] # add hidden states and attention if they are here
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), logits, (hidden_states), (attentions)
| EXA-1-master | exa/models/unilm-master/layoutlm/deprecated/layoutlm/modeling/layoutlm.py |
# coding=utf-8
import copy
import json
import logging
import os
import re
from multiprocessing import Pool
import torch
from lxml import html
from torch.utils.data import TensorDataset
from tqdm import tqdm
from transformers import DataProcessor
logger = logging.getLogger(__name__)
def get_text(node):
textnodes = node.xpath(".//text()")
s = "".join([text for text in textnodes])
return re.sub(r"\s+", " ", s).strip()
def get_prop(node, name):
title = node.get("title")
props = title.split(";")
for prop in props:
(key, args) = prop.split(None, 1)
args = args.strip('"')
if key == name:
return args
return None
class DocExample(object):
def __init__(self, guid, text_a, text_b=None, bbox=None, label=None):
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.bbox = bbox
self.label = label
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class CdipProcessor(DataProcessor):
"""Processor for the CDIP data set."""
def worker(self, line):
file, label = line.split()
text, bbox = self.read_hocr_file(self.data_dir, file)
return [text, bbox, label]
def get_examples(self, data_dir, mode):
self.data_dir = data_dir
with open(os.path.join(data_dir, "labels", "{}.txt".format(mode))) as f:
lines = f.readlines()
examples = []
with tqdm(lines, desc="Gettting {} examples".format(mode)) as t, Pool(24) as p:
for example in p.imap(self.worker, lines):
examples.append(example)
t.update()
return self._create_examples(examples, mode)
def _get_examples(self, data_dir, mode):
with open(os.path.join(data_dir, "labels", "{}.txt".format(mode))) as f:
lines = []
for line in tqdm(f.readlines(), desc="Gettting {} examples".format(mode)):
file, label = line.split()
text, bbox = self.read_hocr_file(data_dir, file)
lines.append([text, bbox, label])
return self._create_examples(lines, mode)
def read_hocr_file(self, data_dir, file):
hocr_file = os.path.join(data_dir, "images", file[:-4] + ".xml")
text_buffer = []
bbox_buffer = []
try:
doc = html.parse(hocr_file)
except AssertionError:
logger.warning(
"%s is empty or its format is unacceptable. Skipped.", hocr_file
)
return [], []
for page in doc.xpath("//*[@class='ocr_page']"):
page_bbox = [int(x) for x in get_prop(page, "bbox").split()]
width, height = page_bbox[2], page_bbox[3]
for word in doc.xpath("//*[@class='ocrx_word']"):
textnodes = word.xpath(".//text()")
s = "".join([text for text in textnodes])
text = re.sub(r"\s+", " ", s).strip()
if text:
text_buffer.append(text)
bbox = [int(x) for x in get_prop(word, "bbox").split()]
bbox = [
bbox[0] / width,
bbox[1] / height,
bbox[2] / width,
bbox[3] / height,
]
bbox = [int(x * 1000) for x in bbox]
bbox_buffer.append(bbox)
return text_buffer, bbox_buffer
def get_labels(self):
return list(map(str, list(range(16))))
def _create_examples(self, lines, mode):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (mode, i)
text = line[0]
bbox = line[1]
label = line[2]
examples.append(
DocExample(guid=guid, text_a=text, text_b=None, bbox=bbox, label=label)
)
return examples
class DocFeature(object):
def __init__(self, input_ids, bboxes, attention_mask, token_type_ids, label):
assert (
0 <= all(bboxes) <= 1000
), "Error with input bbox ({}): the coordinate value is not between 0 and 1000".format(
bboxes
)
self.input_ids = input_ids
self.bboxes = bboxes
self.attention_mask = attention_mask
self.token_type_ids = token_type_ids
self.label = label
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def convert_examples_to_features(
examples,
tokenizer,
max_length=512,
label_list=None,
pad_on_left=False,
pad_token="[PAD]",
pad_token_id=0,
pad_token_segment_id=0,
mask_padding_with_zero=True,
):
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(tqdm(examples)):
tokens = []
bboxes = []
if len(example.text_a) == 0:
bboxes.append([0, 0, 0, 0])
tokens.append(pad_token)
for token, bbox in zip(example.text_a, example.bbox):
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
bboxes.append(bbox)
tokens.append(sub_token)
tokens = tokens[: max_length - 2]
bboxes = bboxes[: max_length - 2]
bboxes = [[0, 0, 0, 0]] + bboxes + [[1000, 1000, 1000, 1000]]
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_ids = [tokenizer.cls_token_id] + input_ids + [tokenizer.sep_token_id]
token_type_ids = [0] * len(input_ids)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token_id] * padding_length) + input_ids
bboxes = ([[0, 0, 0, 0]] * padding_length) + bboxes
attention_mask = (
[0 if mask_padding_with_zero else 1] * padding_length
) + attention_mask
token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids
else:
input_ids = input_ids + ([pad_token_id] * padding_length)
bboxes = bboxes + ([[0, 0, 0, 0]] * padding_length)
attention_mask = attention_mask + (
[0 if mask_padding_with_zero else 1] * padding_length
)
token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_length, "Error with input length {} vs {}".format(
len(input_ids), max_length
)
assert len(bboxes) == max_length, "Error with input length {} vs {}".format(
len(bboxes), max_length
)
assert (
len(attention_mask) == max_length
), "Error with input length {} vs {}".format(len(attention_mask), max_length)
assert (
len(token_type_ids) == max_length
), "Error with input length {} vs {}".format(len(token_type_ids), max_length)
label = label_map[example.label]
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_ids: %s" % " ".join([str(x) for x in bboxes]))
logger.info(
"attention_mask: %s" % " ".join([str(x) for x in attention_mask])
)
logger.info(
"token_type_ids: %s" % " ".join([str(x) for x in token_type_ids])
)
logger.info("label: %s (id = %d)" % (example.label, label))
features.append(
DocFeature(
input_ids=input_ids,
bboxes=bboxes,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
label=label,
)
)
return features
def load_and_cache_examples(args, tokenizer, mode="train"):
if args.local_rank not in [-1, 0] and mode == "train":
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
processor = CdipProcessor()
# Load data features from cache or dataset file
cached_features_file = os.path.join(
args.data_dir,
"cached_{}_{}_{}".format(
mode,
list(filter(None, args.model_name_or_path.split("/"))).pop(),
str(args.max_seq_length),
),
)
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", args.data_dir)
label_list = processor.get_labels()
examples = processor.get_examples(args.data_dir, mode)
features = convert_examples_to_features(
examples,
tokenizer,
label_list=label_list,
max_length=args.max_seq_length,
pad_on_left=bool(args.model_type in ["xlnet"]),
# pad on the left for xlnet
pad_token=tokenizer.pad_token,
pad_token_id=tokenizer.pad_token_id,
pad_token_segment_id=4 if args.model_type in ["xlnet"] else 0,
)
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
if args.local_rank == 0 and mode == "train":
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_bboxes = torch.tensor([f.bboxes for f in features], dtype=torch.long)
all_attention_mask = torch.tensor(
[f.attention_mask for f in features], dtype=torch.long
)
all_token_type_ids = torch.tensor(
[f.token_type_ids for f in features], dtype=torch.long
)
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
dataset = TensorDataset(
all_input_ids, all_attention_mask, all_token_type_ids, all_labels, all_bboxes
)
return dataset
if __name__ == "__main__":
import argparse
from transformers import BertTokenizerFast
parser = argparse.ArgumentParser()
args = parser.parse_args()
args.local_rank = -1
args.data_dir = "data"
args.model_name_or_path = "bert-base-uncased"
args.max_seq_length = 512
args.model_type = "bert"
args.overwrite_cache = True
tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased")
dataset = load_and_cache_examples(args, tokenizer, mode="test")
print(len(dataset))
| EXA-1-master | exa/models/unilm-master/layoutlm/deprecated/layoutlm/data/rvl_cdip.py |
# flake8: noqa
from .funsd import FunsdDataset
| EXA-1-master | exa/models/unilm-master/layoutlm/deprecated/layoutlm/data/__init__.py |
import logging
import os
import torch
from torch.utils.data import Dataset
logger = logging.getLogger(__name__)
class FunsdDataset(Dataset):
def __init__(self, args, tokenizer, labels, pad_token_label_id, mode):
if args.local_rank not in [-1, 0] and mode == "train":
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Load data features from cache or dataset file
cached_features_file = os.path.join(
args.data_dir,
"cached_{}_{}_{}".format(
mode,
list(filter(None, args.model_name_or_path.split("/"))).pop(),
str(args.max_seq_length),
),
)
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", args.data_dir)
examples = read_examples_from_file(args.data_dir, mode)
features = convert_examples_to_features(
examples,
labels,
args.max_seq_length,
tokenizer,
cls_token_at_end=bool(args.model_type in ["xlnet"]),
# xlnet has a cls token at the end
cls_token=tokenizer.cls_token,
cls_token_segment_id=2 if args.model_type in ["xlnet"] else 0,
sep_token=tokenizer.sep_token,
sep_token_extra=bool(args.model_type in ["roberta"]),
# roberta uses an extra separator b/w pairs of sentences, cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805
pad_on_left=bool(args.model_type in ["xlnet"]),
# pad on the left for xlnet
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=4 if args.model_type in ["xlnet"] else 0,
pad_token_label_id=pad_token_label_id,
)
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
if args.local_rank == 0 and mode == "train":
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
self.features = features
# Convert to Tensors and build dataset
self.all_input_ids = torch.tensor(
[f.input_ids for f in features], dtype=torch.long
)
self.all_input_mask = torch.tensor(
[f.input_mask for f in features], dtype=torch.long
)
self.all_segment_ids = torch.tensor(
[f.segment_ids for f in features], dtype=torch.long
)
self.all_label_ids = torch.tensor(
[f.label_ids for f in features], dtype=torch.long
)
self.all_bboxes = torch.tensor([f.boxes for f in features], dtype=torch.long)
def __len__(self):
return len(self.features)
def __getitem__(self, index):
return (
self.all_input_ids[index],
self.all_input_mask[index],
self.all_segment_ids[index],
self.all_label_ids[index],
self.all_bboxes[index],
)
class InputExample(object):
"""A single training/test example for token classification."""
def __init__(self, guid, words, labels, boxes, actual_bboxes, file_name, page_size):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
words: list. The words of the sequence.
labels: (Optional) list. The labels for each word of the sequence. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.words = words
self.labels = labels
self.boxes = boxes
self.actual_bboxes = actual_bboxes
self.file_name = file_name
self.page_size = page_size
class InputFeatures(object):
"""A single set of features of data."""
def __init__(
self,
input_ids,
input_mask,
segment_ids,
label_ids,
boxes,
actual_bboxes,
file_name,
page_size,
):
assert (
0 <= all(boxes) <= 1000
), "Error with input bbox ({}): the coordinate value is not between 0 and 1000".format(
boxes
)
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_ids = label_ids
self.boxes = boxes
self.actual_bboxes = actual_bboxes
self.file_name = file_name
self.page_size = page_size
def read_examples_from_file(data_dir, mode):
file_path = os.path.join(data_dir, "{}.txt".format(mode))
box_file_path = os.path.join(data_dir, "{}_box.txt".format(mode))
image_file_path = os.path.join(data_dir, "{}_image.txt".format(mode))
guid_index = 1
examples = []
with open(file_path, encoding="utf-8") as f, open(
box_file_path, encoding="utf-8"
) as fb, open(image_file_path, encoding="utf-8") as fi:
words = []
boxes = []
actual_bboxes = []
file_name = None
page_size = None
labels = []
for line, bline, iline in zip(f, fb, fi):
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
if words:
examples.append(
InputExample(
guid="{}-{}".format(mode, guid_index),
words=words,
labels=labels,
boxes=boxes,
actual_bboxes=actual_bboxes,
file_name=file_name,
page_size=page_size,
)
)
guid_index += 1
words = []
boxes = []
actual_bboxes = []
file_name = None
page_size = None
labels = []
else:
splits = line.split("\t")
bsplits = bline.split("\t")
isplits = iline.split("\t")
assert len(splits) == 2
assert len(bsplits) == 2
assert len(isplits) == 4
assert splits[0] == bsplits[0]
words.append(splits[0])
if len(splits) > 1:
labels.append(splits[-1].replace("\n", ""))
box = bsplits[-1].replace("\n", "")
box = [int(b) for b in box.split()]
boxes.append(box)
actual_bbox = [int(b) for b in isplits[1].split()]
actual_bboxes.append(actual_bbox)
page_size = [int(i) for i in isplits[2].split()]
file_name = isplits[3].strip()
else:
# Examples could have no label for mode = "test"
labels.append("O")
if words:
examples.append(
InputExample(
guid="%s-%d".format(mode, guid_index),
words=words,
labels=labels,
boxes=boxes,
actual_bboxes=actual_bboxes,
file_name=file_name,
page_size=page_size,
)
)
return examples
def convert_examples_to_features(
examples,
label_list,
max_seq_length,
tokenizer,
cls_token_at_end=False,
cls_token="[CLS]",
cls_token_segment_id=1,
sep_token="[SEP]",
sep_token_extra=False,
pad_on_left=False,
pad_token=0,
cls_token_box=[0, 0, 0, 0],
sep_token_box=[1000, 1000, 1000, 1000],
pad_token_box=[0, 0, 0, 0],
pad_token_segment_id=0,
pad_token_label_id=-1,
sequence_a_segment_id=0,
mask_padding_with_zero=True,
):
""" Loads a data file into a list of `InputBatch`s
`cls_token_at_end` define the location of the CLS token:
- False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
- True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
`cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
"""
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
file_name = example.file_name
page_size = example.page_size
width, height = page_size
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d", ex_index, len(examples))
tokens = []
token_boxes = []
actual_bboxes = []
label_ids = []
for word, label, box, actual_bbox in zip(
example.words, example.labels, example.boxes, example.actual_bboxes
):
word_tokens = tokenizer.tokenize(word)
tokens.extend(word_tokens)
token_boxes.extend([box] * len(word_tokens))
actual_bboxes.extend([actual_bbox] * len(word_tokens))
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend(
[label_map[label]] + [pad_token_label_id] * (len(word_tokens) - 1)
)
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
special_tokens_count = 3 if sep_token_extra else 2
if len(tokens) > max_seq_length - special_tokens_count:
tokens = tokens[: (max_seq_length - special_tokens_count)]
token_boxes = token_boxes[: (max_seq_length - special_tokens_count)]
actual_bboxes = actual_bboxes[: (max_seq_length - special_tokens_count)]
label_ids = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
token_boxes += [sep_token_box]
actual_bboxes += [[0, 0, width, height]]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
token_boxes += [sep_token_box]
actual_bboxes += [[0, 0, width, height]]
label_ids += [pad_token_label_id]
segment_ids = [sequence_a_segment_id] * len(tokens)
if cls_token_at_end:
tokens += [cls_token]
token_boxes += [cls_token_box]
actual_bboxes += [[0, 0, width, height]]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
tokens = [cls_token] + tokens
token_boxes = [cls_token_box] + token_boxes
actual_bboxes = [[0, 0, width, height]] + actual_bboxes
label_ids = [pad_token_label_id] + label_ids
segment_ids = [cls_token_segment_id] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = (
[0 if mask_padding_with_zero else 1] * padding_length
) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
label_ids = ([pad_token_label_id] * padding_length) + label_ids
token_boxes = ([pad_token_box] * padding_length) + token_boxes
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
token_boxes += [pad_token_box] * padding_length
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(label_ids) == max_seq_length
assert len(token_boxes) == max_seq_length
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s", example.guid)
logger.info("tokens: %s", " ".join([str(x) for x in tokens]))
logger.info("input_ids: %s", " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s", " ".join([str(x) for x in input_mask]))
logger.info("segment_ids: %s", " ".join([str(x) for x in segment_ids]))
logger.info("label_ids: %s", " ".join([str(x) for x in label_ids]))
logger.info("boxes: %s", " ".join([str(x) for x in token_boxes]))
logger.info("actual_bboxes: %s", " ".join([str(x) for x in actual_bboxes]))
features.append(
InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_ids=label_ids,
boxes=token_boxes,
actual_bboxes=actual_bboxes,
file_name=file_name,
page_size=page_size,
)
)
return features
| EXA-1-master | exa/models/unilm-master/layoutlm/deprecated/layoutlm/data/funsd.py |
from setuptools import setup, find_packages
setup(
name = "adalm",
version = "0.0",
author = "Microsoft",
author_email = "",
description = "domain adaptation toolkit",
keywords = "domain adaptation with extended vocab",
license='Apache',
url = "https://github.com/littlefive5/AdaLM",
packages=find_packages(exclude=["*.tests", "*.tests.*",
"tests.*", "tests"]),
install_requires=['numpy',
'boto3',
'requests',
'tqdm',
'urllib3==1.26.5'],
python_requires='>=3.5.0',
tests_require=['pytest'],
classifiers=[
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
) | EXA-1-master | exa/models/unilm-master/adalm/setup.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning the library models for sequence classification on GLUE (Bert, XLM, XLNet, RoBERTa)."""
from __future__ import absolute_import, division, print_function
import argparse
import glob
import logging
import os
import random
import json
import numpy as np
import torch
from seqeval.metrics import f1_score, precision_score, recall_score
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from torch.utils.data.distributed import DistributedSampler
from torch.nn import CrossEntropyLoss
try:
from torch.utils.tensorboard import SummaryWriter
except:
from tensorboardX import SummaryWriter
from tqdm import tqdm, trange
from transformers import (
WEIGHTS_NAME,
AdamW,
BertConfig,
BertForTokenClassification,
BertTokenizer,
DistilBertConfig,
DistilBertForTokenClassification,
DistilBertTokenizer,
RobertaConfig,
RobertaForTokenClassification,
RobertaTokenizer,
XLMRobertaConfig,
XLMRobertaForTokenClassification,
XLMRobertaTokenizer
)
from transformers import AdamW, get_linear_schedule_with_warmup
from utils_ner import convert_examples_to_features, get_labels, read_examples_from_file
logger = logging.getLogger(__name__)
ALL_MODELS = sum(
(
tuple(conf.pretrained_config_archive_map.keys())
for conf in (BertConfig, RobertaConfig, DistilBertConfig, XLMRobertaConfig)
),
(),
)
MODEL_CLASSES = {
"bert": (BertConfig, BertForTokenClassification, BertTokenizer),
"roberta": (RobertaConfig, RobertaForTokenClassification, RobertaTokenizer),
"distilbert": (DistilBertConfig, DistilBertForTokenClassification, DistilBertTokenizer),
"xlmroberta": (XLMRobertaConfig, XLMRobertaForTokenClassification, XLMRobertaTokenizer),
}
TOKENIZER_ARGS = ["do_lower_case", "strip_accents", "keep_accents", "use_fast"]
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def train(args, train_dataset, model, tokenizer, labels, pad_token_label_id):
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
if args.warmup_ratio > 0:
args.warmup_steps = int(t_total*args.warmup_ratio)
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
# Check if saved optimizer or scheduler states exist
if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile(
os.path.join(args.model_name_or_path, "scheduler.pt")
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
metric_for_best = args.metric_for_choose_best_checkpoint
best_performance = None
best_epoch = None
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0])
set_seed(args) # Added here for reproductibility (even between python 2 and 3)
for _ in train_iterator:
if args.disable_tqdm:
epoch_iterator = train_dataloader
else:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'labels': batch[3]}
if args.model_type != 'distilbert':
inputs['token_type_ids'] = batch[2] if args.model_type in ['bert', 'xlnet', 'unilm', 'adapterbert'] else None # XLM, DistilBERT and RoBERTa don't use segment_ids
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.max_grad_norm > 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
epoch_iterator.set_description('Iter (loss=%5.3f) lr=%9.7f' % (loss.item(), scheduler.get_lr()[0]))
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
logs = {}
loss_scalar = (tr_loss - logging_loss) / args.logging_steps
learning_rate_scalar = scheduler.get_lr()[0]
logs['learning_rate'] = learning_rate_scalar
logs['loss'] = loss_scalar
logging_loss = tr_loss
for key, value in logs.items():
tb_writer.add_scalar(key, value, global_step)
logger.info(json.dumps({**logs, **{'step': global_step}}))
if args.max_steps > 0 and global_step > args.max_steps:
if not args.disable_tqdm:
epoch_iterator.close()
break
if args.local_rank in [-1, 0]:
logs = {}
if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model, tokenizer, prefix='epoch-{}'.format(_ + 1))
for key, value in results.items():
eval_key = 'eval_{}'.format(key)
logs[eval_key] = value
if metric_for_best is None:
metric_for_best = key
if best_epoch is None or best_performance[metric_for_best] < results[metric_for_best]:
best_epoch = 'epoch-{}'.format(_ + 1)
best_performance = results
loss_scalar = (tr_loss - logging_loss) / args.logging_steps
learning_rate_scalar = scheduler.get_lr()[0]
logs['learning_rate'] = learning_rate_scalar
logs['loss'] = loss_scalar
logging_loss = tr_loss
for key, value in logs.items():
tb_writer.add_scalar(key, value, global_step)
print(json.dumps({**logs, **{'step': global_step}}))
# Save model checkpoint
output_dir = os.path.join(args.output_dir, 'epoch-{}'.format(_ + 1))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, 'training_args.bin'))
logger.info("Saving model checkpoint to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
if best_epoch is not None:
logger.info(" ***************** Best checkpoint: {}, choosed by {} *****************".format(
best_epoch, metric_for_best))
logger.info("Best performance = %s" % json.dumps(best_performance))
save_best_result(best_epoch, best_performance, args.output_dir)
return global_step, tr_loss / global_step
def save_best_result(best_epoch, best_performance, output_dir):
best_performance["checkpoint"] = best_epoch
with open(os.path.join(output_dir, "best_performance.json"), mode="w") as writer:
writer.write(json.dumps(best_performance, indent=2))
def evaluate(args, model, tokenizer, labels, pad_token_label_id, mode, prefix=""):
eval_dataset = load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode=mode)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu evaluate
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation %s *****", prefix)
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
model.eval()
for batch in tqdm(eval_dataloader, desc="Evaluating"):
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet", "adapterbert"] else None
) # XLM and RoBERTa don"t use segment_ids
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
if args.n_gpu > 1:
tmp_eval_loss = tmp_eval_loss.mean() # mean() to average on multi-gpu parallel evaluating
eval_loss += tmp_eval_loss.item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
preds = np.argmax(preds, axis=2)
label_map = {i: label for i, label in enumerate(labels)}
out_label_list = [[] for _ in range(out_label_ids.shape[0])]
preds_list = [[] for _ in range(out_label_ids.shape[0])]
for i in range(out_label_ids.shape[0]):
for j in range(out_label_ids.shape[1]):
if out_label_ids[i, j] != pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
results = {
"loss": eval_loss,
"precision": precision_score(out_label_list, preds_list),
"recall": recall_score(out_label_list, preds_list),
"f1": f1_score(out_label_list, preds_list),
}
logger.info("***** Eval results %s *****", prefix)
for key in sorted(results.keys()):
logger.info(" %s = %s", key, str(results[key]))
output_file = os.path.join(args.output_dir, "eval_out.txt")
with open(output_file, "w+", encoding="utf-8") as f:
for line in tqdm(preds_list):
line = " ".join(line) + "\n"
f.write(line)
return results, preds_list
def test(args, model, tokenizer, labels, pad_token_label_id, mode, prefix=""):
test_dataset = load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode="test")
args.test_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
test_sampler = SequentialSampler(test_dataset) if args.local_rank == -1 else DistributedSampler(test_dataset)
test_dataloader = DataLoader(test_dataset, sampler=test_sampler, batch_size=args.test_batch_size)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
logger.info("***** Running Prediction %s *****", prefix)
logger.info(" Num examples = %d", len(test_dataset))
logger.info(" Batch size = %d", args.test_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
model.eval()
for batch in tqdm(test_dataloader, desc="Prediction"):
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet","adapterbert"] else None
) # XLM and RoBERTa don"t use segment_ids
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
if args.n_gpu > 1:
tmp_eval_loss = tmp_eval_loss.mean() # mean() to average on multi-gpu parallel evaluating
eval_loss += tmp_eval_loss.item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
preds = np.argmax(preds, axis=2)
label_map = {i: label for i, label in enumerate(labels)}
out_label_list = [[] for _ in range(out_label_ids.shape[0])]
preds_list = [[] for _ in range(out_label_ids.shape[0])]
for i in range(out_label_ids.shape[0]):
for j in range(out_label_ids.shape[1]):
if out_label_ids[i, j] != pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
results = {
"loss": eval_loss,
"precision": precision_score(out_label_list, preds_list),
"recall": recall_score(out_label_list, preds_list),
"f1": f1_score(out_label_list, preds_list),
}
print(out_label_list[0])
print(preds_list[0])
out_file = os.path.join(args.output_dir, "predict.txt")
logger.info("write results into {}".format(out_file))
output_eval_file = os.path.join(args.output_dir, "predict_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Predict results {} *****".format(prefix))
writer.write(json.dumps(results, indent=2))
logger.info("Result = %s" % json.dumps(results, indent=2))
with open(out_file, "w+", encoding="utf-8") as f:
for line in preds_list:
line = " ".join(line) + "\n"
f.write(line)
return results, preds_list
def load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode):
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Load data features from cache or dataset file
cached_features_file = os.path.join(
args.data_dir,
"cached_{}_{}_{}".format(
mode, list(filter(None, args.model_name_or_path.split("/"))).pop(), str(args.max_seq_length)
),
)
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", args.data_dir)
examples = read_examples_from_file(args.data_dir, mode)
features = convert_examples_to_features(
examples,
labels,
args.max_seq_length,
tokenizer,
cls_token_at_end=bool(args.model_type in ["xlnet"]),
# xlnet has a cls token at the end
cls_token=tokenizer.cls_token,
cls_token_segment_id=2 if args.model_type in ["xlnet"] else 0,
sep_token=tokenizer.sep_token,
sep_token_extra=bool(args.model_type in ["roberta"]),
# roberta uses an extra separator b/w pairs of sentences, cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805
pad_on_left=bool(args.model_type in ["xlnet"]),
# pad on the left for xlnet
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=4 if args.model_type in ["xlnet"] else 0,
pad_token_label_id=pad_token_label_id,
mode=mode,
)
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
if args.local_rank == 0 and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_ids for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
return dataset
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--data_dir", default=None, type=str, required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--model_type", default="unilm", type=str,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()))
parser.add_argument("--model_name_or_path", default=None, type=str, required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS))
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument('--disable_tqdm', action='store_true',
help='Disable the tqdm bar. ')
## Other parameters
parser.add_argument("--labels", default="", type=str,
help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.")
parser.add_argument("--config_name", default="", type=str,
help="Pretrained config name or path if not the same as model_name")
parser.add_argument("--tokenizer_name", default="", type=str,
help="Pretrained tokenizer name or path if not the same as model_name")
parser.add_argument("--cache_dir", default="", type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument("--max_seq_length", default=128, type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.")
parser.add_argument("--do_train", action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_predict", action="store_true", help="Whether to run predictions on the test set.")
parser.add_argument("--evaluate_during_training", action='store_true',
help="Rul evaluation during training at each logging step.")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument(
"--keep_accents", action="store_const", const=True, help="Set this flag if model is trained with accents."
)
parser.add_argument(
"--strip_accents", action="store_const", const=True, help="Set this flag if model is trained without accents."
)
parser.add_argument("--use_fast", action="store_const", const=True, help="Set this flag to use fast tokenization.")
parser.add_argument("--per_gpu_train_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--learning_rate", default=5e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float,
help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--num_train_epochs", default=3.0, type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--max_steps", default=-1, type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
parser.add_argument("--warmup_ratio", default=0.1, type=float,
help="Linear warmup over warmup_ratio.")
parser.add_argument('--logging_steps', type=int, default=50,
help="Log every X updates steps.")
parser.add_argument("--save_steps", type=int, default=500, help="Save checkpoint every X updates steps.")
parser.add_argument("--eval_all_checkpoints", action='store_true',
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number")
parser.add_argument("--no_cuda", action='store_true',
help="Avoid using CUDA when available")
parser.add_argument('--overwrite_output_dir', action='store_true',
help="Overwrite the content of the output directory")
parser.add_argument('--overwrite_cache', action='store_true',
help="Overwrite the cached training and evaluation sets")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--metric_for_choose_best_checkpoint', type=str, default=None,
help="Set the metric to choose the best checkpoint")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument("--local_rank", type=int, default=-1,
help="For distributed training: local_rank")
parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.")
parser.add_argument('--server_port', type=str, default='', help="For distant debugging.")
args = parser.parse_args()
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir:
raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir))
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
# Set seed
set_seed(args)
# Prepare CONLL-2003 task
labels = get_labels(args.labels)
num_labels = len(labels)
# Use cross entropy ignore index as padding label id so that only real label ids contribute to the loss later
pad_token_label_id = CrossEntropyLoss().ignore_index
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
id2label={str(i): label for i, label in enumerate(labels)},
label2id={label: i for i, label in enumerate(labels)},
cache_dir=args.cache_dir if args.cache_dir else None,
)
tokenizer_args = {k: v for k, v in vars(args).items() if v is not None and k in TOKENIZER_ARGS}
logger.info("Tokenizer arguments: %s", tokenizer_args)
tokenizer_name = args.tokenizer_name if args.tokenizer_name else args.model_name_or_path
tokenizer = tokenizer_class.from_pretrained(
tokenizer_name,
cache_dir=args.cache_dir if args.cache_dir else None,
**tokenizer_args,
)
if not hasattr(config, 'need_pooler') or config.need_pooler is not True:
setattr(config, 'need_pooler', True)
model = model_class.from_pretrained(
args.model_name_or_path, config=config,
cache_dir=args.cache_dir if args.cache_dir else None)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
train_dataset = load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode="train")
global_step, tr_loss = train(args, train_dataset, model, tokenizer, labels, pad_token_label_id)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
tokenizer.save_pretrained(args.output_dir)
# Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
# Evaluation
if args.do_eval and args.local_rank in [-1, 0]:
tokenizer = tokenizer_class.from_pretrained(args.output_dir, **tokenizer_args)
checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True)))
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
metric_for_best = args.metric_for_choose_best_checkpoint
best_performance = None
best_epoch = None
for checkpoint in checkpoints:
prefix = checkpoint.split('/')[-1] if checkpoint.find('checkpoint') != -1 else ""
checkpoint_config = config_class.from_pretrained(checkpoint)
model = model_class.from_pretrained(checkpoint, config=checkpoint_config)
model.to(args.device)
result, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode="dev", prefix=global_step)
if metric_for_best is None:
metric_for_best = list(result.keys())[-1]
if best_epoch is None:
best_epoch = checkpoint
best_performance = result
else:
if best_performance[metric_for_best] < result[metric_for_best]:
best_performance = result
best_epoch = checkpoint
if best_epoch is not None:
logger.info(" ***************** Best checkpoint: {}, choosed by {} *****************".format(
best_epoch, metric_for_best))
logger.info("Best performance = %s" % json.dumps(best_performance))
save_best_result(best_epoch, best_performance, args.output_dir)
checkpoint = best_epoch
checkpoint_config = config_class.from_pretrained(checkpoint)
model = model_class.from_pretrained(checkpoint, config=checkpoint_config)
model.to(args.device)
result, _ = test(args, model, tokenizer, labels, pad_token_label_id, mode="test", prefix=global_step)
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/adalm/finetune/run_ner.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Named entity recognition fine-tuning: utilities to work with CoNLL-2003 task. """
import logging
import os
from tqdm import *
logger = logging.getLogger(__name__)
class InputExample(object):
"""A single training/test example for token classification."""
def __init__(self, guid, words, labels=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
words: list. The words of the sequence.
labels: (Optional) list. The labels for each word of the sequence. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.words = words
self.labels = labels
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_ids = None):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_ids = label_ids
def read_examples_from_file(data_dir, mode):
file_path = os.path.join(data_dir, "{}.txt".format(mode))
guid_index = 1
examples = []
with open(file_path, encoding="utf-8") as f:
for line in f.readlines():
line = line.strip().split("\t")
words = line[0].split()
labels = line[1].split()
assert len(words) == len(labels)
guid_index +=1
examples.append(InputExample(guid=guid_index, words=words, labels=labels))
return examples
def convert_examples_to_features(
examples,
label_list,
max_seq_length,
tokenizer,
cls_token_at_end=False,
cls_token="[CLS]",
cls_token_segment_id=1,
sep_token="[SEP]",
sep_token_extra=False,
pad_on_left=False,
pad_token=0,
pad_token_segment_id=0,
pad_token_label_id=-100,
sequence_a_segment_id=0,
mask_padding_with_zero=True,
mode="train",
):
""" Loads a data file into a list of `InputBatch`s
`cls_token_at_end` define the location of the CLS token:
- False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
- True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
`cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
"""
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(tqdm(examples)):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d", ex_index, len(examples))
tokens = []
label_ids = []
for word, label in zip(example.words, example.labels):
word_tokens = tokenizer.tokenize(word)
tokens.extend(word_tokens)
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(word_tokens) - 1))
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
special_tokens_count = 3 if sep_token_extra else 2
if len(tokens) > max_seq_length - special_tokens_count:
tokens = tokens[: (max_seq_length - special_tokens_count)]
label_ids = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
segment_ids = [sequence_a_segment_id] * len(tokens)
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
tokens = [cls_token] + tokens
label_ids = [pad_token_label_id] + label_ids
segment_ids = [cls_token_segment_id] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
label_ids = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(label_ids) == max_seq_length
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s", example.guid)
logger.info("tokens: %s", " ".join([str(x) for x in tokens]))
logger.info("input_ids: %s", " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s", " ".join([str(x) for x in input_mask]))
logger.info("segment_ids: %s", " ".join([str(x) for x in segment_ids]))
logger.info("label_ids: %s", " ".join([str(x) for x in label_ids]))
features.append(
InputFeatures(input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_ids=label_ids)
)
return features
def get_labels(path):
if path:
with open(path, "r") as f:
labels = f.read().splitlines()
if "O" not in labels:
labels = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] | EXA-1-master | exa/models/unilm-master/adalm/finetune/utils_ner.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning the library models for sequence classification on GLUE (Bert, XLM, XLNet, RoBERTa)."""
from __future__ import absolute_import, division, print_function
import argparse
import glob
import logging
import os
import random
import json
import numpy as np
import torch
from sklearn.metrics import matthews_corrcoef, f1_score
from sklearn.metrics import cohen_kappa_score, precision_score, recall_score, precision_recall_fscore_support
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from torch.utils.data.distributed import DistributedSampler
from torch.nn import CrossEntropyLoss
try:
from torch.utils.tensorboard import SummaryWriter
except:
from tensorboardX import SummaryWriter
from tqdm import tqdm, trange
from transformers import (
WEIGHTS_NAME,
AdamW,
BertConfig,
BertForTokenClassification,
BertTokenizer,
DistilBertConfig,
DistilBertForTokenClassification,
DistilBertTokenizer,
RobertaConfig,
RobertaForTokenClassification,
RobertaTokenizer,
XLMRobertaConfig,
XLMRobertaForTokenClassification,
XLMRobertaTokenizer
)
from transformers import AdamW, get_linear_schedule_with_warmup
from utils_ner import convert_examples_to_features, get_labels, read_examples_from_file
logger = logging.getLogger(__name__)
ALL_MODELS = sum(
(
tuple(conf.pretrained_config_archive_map.keys())
for conf in (BertConfig, RobertaConfig, DistilBertConfig, XLMRobertaConfig)
),
(),
)
MODEL_CLASSES = {
"bert": (BertConfig, BertForTokenClassification, BertTokenizer),
"roberta": (RobertaConfig, RobertaForTokenClassification, RobertaTokenizer),
"distilbert": (DistilBertConfig, DistilBertForTokenClassification, DistilBertTokenizer),
"xlmroberta": (XLMRobertaConfig, XLMRobertaForTokenClassification, XLMRobertaTokenizer),
}
TOKENIZER_ARGS = ["do_lower_case", "strip_accents", "keep_accents", "use_fast"]
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def get_f1(prec, rec):
return 2*prec*rec/(prec+rec)
def token_f1(true, pred, labels):
print(true[:30])
print(pred[:30])
print(labels)
total_f1 = 0.0
class_scores = zip(labels, precision_score(true,pred,labels,average=None), recall_score(true,pred,labels,average=None))
for label, prec, rec in class_scores:
print('Label: %s' %label)
if label != 'O':
total_f1 += get_f1(prec, rec)
print('\tf1 = %f' %get_f1(prec, rec))
print('\tprecision = %f' %prec)
print('\trecall = %f' %rec)
return total_f1/3
def train(args, train_dataset, model, tokenizer, labels, pad_token_label_id):
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
if args.warmup_ratio > 0:
args.warmup_steps = int(t_total*args.warmup_ratio)
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
# Check if saved optimizer or scheduler states exist
if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile(
os.path.join(args.model_name_or_path, "scheduler.pt")
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
metric_for_best = args.metric_for_choose_best_checkpoint
best_performance = None
best_epoch = None
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0])
set_seed(args) # Added here for reproductibility (even between python 2 and 3)
for _ in train_iterator:
if args.disable_tqdm:
epoch_iterator = train_dataloader
else:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'labels': batch[3]}
if args.model_type != 'distilbert':
inputs['token_type_ids'] = batch[2] if args.model_type in ['bert', 'xlnet', 'unilm', 'adapterbert'] else None # XLM, DistilBERT and RoBERTa don't use segment_ids
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.max_grad_norm > 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
epoch_iterator.set_description('Iter (loss=%5.3f) lr=%9.7f' % (loss.item(), scheduler.get_lr()[0]))
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
logs = {}
loss_scalar = (tr_loss - logging_loss) / args.logging_steps
learning_rate_scalar = scheduler.get_lr()[0]
logs['learning_rate'] = learning_rate_scalar
logs['loss'] = loss_scalar
logging_loss = tr_loss
for key, value in logs.items():
tb_writer.add_scalar(key, value, global_step)
logger.info(json.dumps({**logs, **{'step': global_step}}))
if args.max_steps > 0 and global_step > args.max_steps:
if not args.disable_tqdm:
epoch_iterator.close()
break
if args.local_rank in [-1, 0]:
logs = {}
if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model, tokenizer, prefix='epoch-{}'.format(_ + 1))
for key, value in results.items():
eval_key = 'eval_{}'.format(key)
logs[eval_key] = value
if metric_for_best is None:
metric_for_best = key
if best_epoch is None or best_performance[metric_for_best] < results[metric_for_best]:
best_epoch = 'epoch-{}'.format(_ + 1)
best_performance = results
loss_scalar = (tr_loss - logging_loss) / args.logging_steps
learning_rate_scalar = scheduler.get_lr()[0]
logs['learning_rate'] = learning_rate_scalar
logs['loss'] = loss_scalar
logging_loss = tr_loss
for key, value in logs.items():
tb_writer.add_scalar(key, value, global_step)
print(json.dumps({**logs, **{'step': global_step}}))
# Save model checkpoint
output_dir = os.path.join(args.output_dir, 'epoch-{}'.format(_ + 1))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, 'training_args.bin'))
logger.info("Saving model checkpoint to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
if best_epoch is not None:
logger.info(" ***************** Best checkpoint: {}, choosed by {} *****************".format(
best_epoch, metric_for_best))
logger.info("Best performance = %s" % json.dumps(best_performance))
save_best_result(best_epoch, best_performance, args.output_dir)
return global_step, tr_loss / global_step
def save_best_result(best_epoch, best_performance, output_dir):
best_performance["checkpoint"] = best_epoch
with open(os.path.join(output_dir, "best_performance.json"), mode="w") as writer:
writer.write(json.dumps(best_performance, indent=2))
def evaluate(args, model, tokenizer, labels, pad_token_label_id, mode, prefix=""):
eval_dataset = load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode=mode)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu evaluate
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation %s *****", prefix)
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
model.eval()
for batch in tqdm(eval_dataloader, desc="Evaluating"):
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet", "adapterbert"] else None
) # XLM and RoBERTa don"t use segment_ids
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
if args.n_gpu > 1:
tmp_eval_loss = tmp_eval_loss.mean() # mean() to average on multi-gpu parallel evaluating
eval_loss += tmp_eval_loss.item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
preds = np.argmax(preds, axis=2)
label_map = {i: label for i, label in enumerate(labels)}
out_label_list = [[] for _ in range(out_label_ids.shape[0])]
preds_list = [[] for _ in range(out_label_ids.shape[0])]
for i in range(out_label_ids.shape[0]):
for j in range(out_label_ids.shape[1]):
if out_label_ids[i, j] != pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
out_labels = [i for item in out_label_list for i in item]
preds_labels = [i for item in preds_list for i in item]
results = {
"loss": eval_loss,
"f1": token_f1(true = out_labels,pred = preds_labels, labels = labels),
}
logger.info("***** Eval results %s *****", prefix)
for key in sorted(results.keys()):
logger.info(" %s = %s", key, str(results[key]))
output_file = os.path.join(args.output_dir, "eval_out.txt")
with open(output_file, "w+", encoding="utf-8") as f:
for line in tqdm(preds_list):
line = " ".join(line) + "\n"
f.write(line)
return results, preds_list
def test(args, model, tokenizer, labels, pad_token_label_id, mode, prefix=""):
test_dataset = load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode="test")
args.test_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
test_sampler = SequentialSampler(test_dataset) if args.local_rank == -1 else DistributedSampler(test_dataset)
test_dataloader = DataLoader(test_dataset, sampler=test_sampler, batch_size=args.test_batch_size)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
logger.info("***** Running Prediction %s *****", prefix)
logger.info(" Num examples = %d", len(test_dataset))
logger.info(" Batch size = %d", args.test_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
model.eval()
for batch in tqdm(test_dataloader, desc="Prediction"):
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet", "adapterbert"] else None
) # XLM and RoBERTa don"t use segment_ids
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
if args.n_gpu > 1:
tmp_eval_loss = tmp_eval_loss.mean() # mean() to average on multi-gpu parallel evaluating
eval_loss += tmp_eval_loss.item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
preds = np.argmax(preds, axis=2)
label_map = {i: label for i, label in enumerate(labels)}
out_label_list = [[] for _ in range(out_label_ids.shape[0])]
preds_list = [[] for _ in range(out_label_ids.shape[0])]
for i in range(out_label_ids.shape[0]):
for j in range(out_label_ids.shape[1]):
if out_label_ids[i, j] != pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
out_file = os.path.join(args.output_dir, "predict.txt")
out_labels = [i for item in out_label_list for i in item]
preds_labels = [i for item in preds_list for i in item]
results = {
"loss": eval_loss,
"f1": token_f1(true = out_labels,pred = preds_labels, labels = labels),
}
print(out_label_list[0])
print(preds_list[0])
logger.info("write results into {}".format(out_file))
output_eval_file = os.path.join(args.output_dir, "predict_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Predict results {} *****".format(prefix))
writer.write(json.dumps(results, indent=2))
logger.info("Result = %s" % json.dumps(results, indent=2))
with open(out_file, "w+", encoding="utf-8") as f:
for line in preds_list:
line = " ".join(line) + "\n"
f.write(line)
for key in sorted(results.keys()):
logger.info(" %s = %s", key, str(results[key]))
return results, preds_list
def load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode):
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Load data features from cache or dataset file
cached_features_file = os.path.join(
args.data_dir,
"cached_{}_{}_{}".format(
mode, list(filter(None, args.model_name_or_path.split("/"))).pop(), str(args.max_seq_length)
),
)
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", args.data_dir)
examples = read_examples_from_file(args.data_dir, mode)
features = convert_examples_to_features(
examples,
labels,
args.max_seq_length,
tokenizer,
cls_token_at_end=bool(args.model_type in ["xlnet"]),
# xlnet has a cls token at the end
cls_token=tokenizer.cls_token,
cls_token_segment_id=2 if args.model_type in ["xlnet"] else 0,
sep_token=tokenizer.sep_token,
sep_token_extra=bool(args.model_type in ["roberta"]),
# roberta uses an extra separator b/w pairs of sentences, cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805
pad_on_left=bool(args.model_type in ["xlnet"]),
# pad on the left for xlnet
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=4 if args.model_type in ["xlnet"] else 0,
pad_token_label_id=pad_token_label_id,
mode=mode,
)
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
if args.local_rank == 0 and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_ids for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
return dataset
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--data_dir", default=None, type=str, required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--model_type", default="unilm", type=str,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()))
parser.add_argument("--model_name_or_path", default=None, type=str, required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS))
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument('--disable_tqdm', action='store_true',
help='Disable the tqdm bar. ')
## Other parameters
parser.add_argument("--labels", default="", type=str,
help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.")
parser.add_argument("--config_name", default="", type=str,
help="Pretrained config name or path if not the same as model_name")
parser.add_argument("--tokenizer_name", default="", type=str,
help="Pretrained tokenizer name or path if not the same as model_name")
parser.add_argument("--cache_dir", default="", type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument("--max_seq_length", default=128, type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.")
parser.add_argument("--do_train", action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_predict", action="store_true", help="Whether to run predictions on the test set.")
parser.add_argument("--evaluate_during_training", action='store_true',
help="Rul evaluation during training at each logging step.")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument(
"--keep_accents", action="store_const", const=True, help="Set this flag if model is trained with accents."
)
parser.add_argument(
"--strip_accents", action="store_const", const=True, help="Set this flag if model is trained without accents."
)
parser.add_argument("--use_fast", action="store_const", const=True, help="Set this flag to use fast tokenization.")
parser.add_argument("--per_gpu_train_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--learning_rate", default=5e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float,
help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--num_train_epochs", default=3.0, type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--max_steps", default=-1, type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
parser.add_argument("--warmup_ratio", default=0.1, type=float,
help="Linear warmup over warmup_ratio.")
parser.add_argument('--logging_steps', type=int, default=50,
help="Log every X updates steps.")
parser.add_argument("--save_steps", type=int, default=500, help="Save checkpoint every X updates steps.")
parser.add_argument("--eval_all_checkpoints", action='store_true',
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number")
parser.add_argument("--no_cuda", action='store_true',
help="Avoid using CUDA when available")
parser.add_argument('--overwrite_output_dir', action='store_true',
help="Overwrite the content of the output directory")
parser.add_argument('--overwrite_cache', action='store_true',
help="Overwrite the cached training and evaluation sets")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--metric_for_choose_best_checkpoint', type=str, default=None,
help="Set the metric to choose the best checkpoint")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument("--local_rank", type=int, default=-1,
help="For distributed training: local_rank")
parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.")
parser.add_argument('--server_port', type=str, default='', help="For distant debugging.")
args = parser.parse_args()
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir:
raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir))
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
# Set seed
set_seed(args)
# Prepare CONLL-2003 task
labels = get_labels(args.labels)
num_labels = len(labels)
# Use cross entropy ignore index as padding label id so that only real label ids contribute to the loss later
pad_token_label_id = CrossEntropyLoss().ignore_index
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
id2label={str(i): label for i, label in enumerate(labels)},
label2id={label: i for i, label in enumerate(labels)},
cache_dir=args.cache_dir if args.cache_dir else None,
)
tokenizer_args = {k: v for k, v in vars(args).items() if v is not None and k in TOKENIZER_ARGS}
logger.info("Tokenizer arguments: %s", tokenizer_args)
tokenizer_name = args.tokenizer_name if args.tokenizer_name else args.model_name_or_path
tokenizer = tokenizer_class.from_pretrained(
args.tokenizer_name if args.tokenizer_name else args.model_name_or_path,
cache_dir=args.cache_dir if args.cache_dir else None,
**tokenizer_args,
)
if not hasattr(config, 'need_pooler') or config.need_pooler is not True:
setattr(config, 'need_pooler', True)
model = model_class.from_pretrained(
args.model_name_or_path, config=config,
cache_dir=args.cache_dir if args.cache_dir else None)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
train_dataset = load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode="train")
global_step, tr_loss = train(args, train_dataset, model, tokenizer, labels, pad_token_label_id)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
tokenizer.save_pretrained(args.output_dir)
# Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
# Create output directory if needed
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
# Good practice: save your training arguments together with the trained model
torch.save(args, os.path.join(args.output_dir, "training_args.bin"))
# Evaluation
if args.do_eval and args.local_rank in [-1, 0]:
tokenizer = tokenizer_class.from_pretrained(args.output_dir, **tokenizer_args)
checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True)))
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
metric_for_best = args.metric_for_choose_best_checkpoint
best_performance = None
best_epoch = None
for checkpoint in checkpoints:
prefix = checkpoint.split('/')[-1] if checkpoint.find('checkpoint') != -1 else ""
checkpoint_config = config_class.from_pretrained(checkpoint)
model = model_class.from_pretrained(checkpoint, config=checkpoint_config)
model.to(args.device)
result, _ = evaluate(args, model, tokenizer, labels, pad_token_label_id, mode="dev", prefix=global_step)
if metric_for_best is None:
metric_for_best = list(result.keys())[-1]
if best_epoch is None:
best_epoch = checkpoint
best_performance = result
else:
if best_performance[metric_for_best] < result[metric_for_best]:
best_performance = result
best_epoch = checkpoint
if best_epoch is not None:
logger.info(" ***************** Best checkpoint: {}, choosed by {} *****************".format(
best_epoch, metric_for_best))
logger.info("Best performance = %s" % json.dumps(best_performance))
save_best_result(best_epoch, best_performance, args.output_dir)
checkpoint = best_epoch
checkpoint_config = config_class.from_pretrained(checkpoint)
model = model_class.from_pretrained(checkpoint, config=checkpoint_config)
model.to(args.device)
result, _ = test(args, model, tokenizer, labels, pad_token_label_id, mode="test", prefix=global_step)
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/adalm/finetune/run_pico.py |
EXA-1-master | exa/models/unilm-master/adalm/finetune/__init__.py |
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning the library models for sequence classification on GLUE (Bert, XLM, XLNet, RoBERTa)."""
from __future__ import absolute_import, division, print_function
import argparse
import glob
import logging
import os
import random
import json
import time
import numpy as np
import torch
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from torch.utils.data.distributed import DistributedSampler
try:
from torch.utils.tensorboard import SummaryWriter
except:
from tensorboardX import SummaryWriter
from tqdm import tqdm, trange
from transformers import (WEIGHTS_NAME, BertConfig,
BertForSequenceClassification, BertTokenizer,
RobertaConfig,
RobertaForSequenceClassification,
RobertaTokenizer,
XLMConfig, XLMForSequenceClassification,
XLMTokenizer, XLNetConfig,
XLNetForSequenceClassification,
XLNetTokenizer,
DistilBertConfig,
DistilBertForSequenceClassification,
DistilBertTokenizer,
AlbertConfig,
AlbertForSequenceClassification,
AlbertTokenizer,
XLMRobertaConfig,
XLMRobertaForSequenceClassification,
XLMRobertaTokenizer,
)
from transformers import AdamW, get_linear_schedule_with_warmup
from nlu_finetune.utils_for_glue import glue_compute_metrics as compute_metrics
from nlu_finetune.utils_for_glue import glue_output_modes as output_modes
from nlu_finetune.utils_for_glue import glue_processors as processors
from nlu_finetune.utils_for_glue import glue_convert_examples_to_features as convert_examples_to_features
logger = logging.getLogger(__name__)
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (BertConfig, XLNetConfig, XLMConfig,
RobertaConfig, DistilBertConfig)), ())
MODEL_CLASSES = {
'bert': (BertConfig, BertForSequenceClassification, BertTokenizer),
'xlnet': (XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer),
'xlm': (XLMConfig, XLMForSequenceClassification, XLMTokenizer),
'roberta': (RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer),
'distilbert': (DistilBertConfig, DistilBertForSequenceClassification, DistilBertTokenizer),
'albert': (AlbertConfig, AlbertForSequenceClassification, AlbertTokenizer),
'xlm-roberta': (XLMRobertaConfig, XLMRobertaForSequenceClassification, XLMRobertaTokenizer),
}
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def train(args, train_dataset, model, tokenizer):
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(
train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, num_workers=1)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
warmup_steps = t_total * args.warmup_ratio
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
metric_for_best = args.metric_for_choose_best_checkpoint
best_performance = None
best_epoch = None
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0])
set_seed(args) # Added here for reproductibility (even between python 2 and 3)
for _ in train_iterator:
if args.disable_tqdm:
epoch_iterator = train_dataloader
else:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'labels': batch[3]}
if args.model_type != 'distilbert':
inputs['token_type_ids'] = batch[2] if args.model_type in ['bert', 'xlnet', 'unilm', 'adapterbert'] else None # XLM, DistilBERT and RoBERTa don't use segment_ids
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.max_grad_norm > 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
logs = {}
loss_scalar = (tr_loss - logging_loss) / args.logging_steps
learning_rate_scalar = scheduler.get_lr()[0]
logs['learning_rate'] = learning_rate_scalar
logs['loss'] = loss_scalar
logging_loss = tr_loss
for key, value in logs.items():
tb_writer.add_scalar(key, value, global_step)
logger.info(json.dumps({**logs, **{'step': global_step}}))
if args.max_steps > 0 and global_step > args.max_steps:
if not args.disable_tqdm:
epoch_iterator.close()
break
if args.local_rank in [-1, 0]:
logs = {}
if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model, tokenizer, prefix='epoch-{}'.format(_ + 1))
for key, value in results.items():
eval_key = 'eval_{}'.format(key)
logs[eval_key] = value
if metric_for_best is None:
metric_for_best = key
if best_epoch is None or best_performance[metric_for_best] < results[metric_for_best]:
best_epoch = 'epoch-{}'.format(_ + 1)
best_performance = results
loss_scalar = (tr_loss - logging_loss) / args.logging_steps
learning_rate_scalar = scheduler.get_lr()[0]
logs['learning_rate'] = learning_rate_scalar
logs['loss'] = loss_scalar
logging_loss = tr_loss
for key, value in logs.items():
tb_writer.add_scalar(key, value, global_step)
print(json.dumps({**logs, **{'step': global_step}}))
# Save model checkpoint
output_dir = os.path.join(args.output_dir, 'epoch-{}'.format(_ + 1))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, 'training_args.bin'))
logger.info("Saving model checkpoint to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
if best_epoch is not None:
logger.info(" ***************** Best checkpoint: {}, choosed by {} *****************".format(
best_epoch, metric_for_best))
logger.info("Best performance = %s" % json.dumps(best_performance))
save_best_result(best_epoch, best_performance, args.output_dir)
return global_step, tr_loss / global_step
def save_best_result(best_epoch, best_performance, output_dir):
best_performance["checkpoint"] = best_epoch
with open(os.path.join(output_dir, "best_performance.json"), mode="w") as writer:
writer.write(json.dumps(best_performance, indent=2))
def evaluate(args, model, tokenizer, prefix=""):
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,)
eval_outputs_dirs = (args.output_dir, args.output_dir + '-MM') if args.task_name == "mnli" else (args.output_dir,)
results = {}
for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
cached_dev_file = args.cached_dev_file
if cached_dev_file is not None:
cached_dev_file = cached_dev_file + '_' + eval_task
eval_dataset = load_and_cache_examples(
args, eval_task, tokenizer, cached_features_file=cached_dev_file, evaluate=True)
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu eval
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
if args.disable_tqdm:
epoch_iterator = eval_dataloader
else:
epoch_iterator = tqdm(eval_dataloader, desc="Evaluating")
for batch in epoch_iterator:
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'labels': batch[3]}
if args.model_type != 'distilbert':
inputs['token_type_ids'] = batch[2] if args.model_type in ['bert', 'xlnet', 'adapterbert'] else None # XLM, DistilBERT and RoBERTa don't use segment_ids
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs['labels'].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
if args.output_mode == "classification":
preds = np.argmax(preds, axis=1)
elif args.output_mode == "regression":
preds = np.squeeze(preds)
processor = processors[eval_task]()
result = compute_metrics(eval_task, preds, out_label_ids,processor.get_labels()[1:])
results[eval_task] = result
eval_output_dir = os.path.join(eval_output_dir, prefix)
if not os.path.exists(eval_output_dir):
os.makedirs(eval_output_dir)
output_eval_file = os.path.join(eval_output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results {} *****".format(prefix))
writer.write(json.dumps(result, indent=2))
logger.info("Result = %s" % json.dumps(result, indent=2))
return results
def load_and_cache_examples(args, task, tokenizer, cached_features_file=None, evaluate=False):
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
processor = processors[task]()
output_mode = output_modes[task]
examples = None
if cached_features_file is None:
if args.disable_auto_cache and args.local_rank != -1:
logger.warning("Please cache the features in DDP mode !")
raise RuntimeError()
if not args.disable_auto_cache:
# Load data features from cache or dataset file
cached_features_file = os.path.join(args.data_dir, 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train',
list(filter(None, args.model_name_or_path.split('/'))).pop(),
str(args.max_seq_length),
str(task)))
if cached_features_file is not None and os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("Creating features from dataset file at %s", args.data_dir)
label_list = processor.get_labels()
if task in ['mnli', 'mnli-mm'] and args.model_type in ['roberta', 'xlmroberta']:
# HACK(label indices are swapped in RoBERTa pretrained model)
label_list[1], label_list[2] = label_list[2], label_list[1]
examples = processor.get_dev_examples(args.data_dir) if evaluate else processor.get_train_examples(args.data_dir)
features = convert_examples_to_features(examples,
tokenizer,
label_list=label_list,
max_length=args.max_seq_length,
output_mode=output_mode,
pad_on_left=bool(args.model_type in ['xlnet']), # pad on the left for xlnet
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],
pad_token_segment_id=4 if args.model_type in ['xlnet'] else 0,
)
if args.local_rank in [-1, 0] and cached_features_file is not None:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
if args.local_rank == 0 and not evaluate:
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
if output_mode == "classification":
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
elif output_mode == "regression":
all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels)
return dataset
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--data_dir", default=None, type=str, required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--model_type", default="unilm", type=str,
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()))
parser.add_argument("--model_name_or_path", default=None, type=str, required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS))
parser.add_argument("--task_name", default=None, type=str, required=True,
help="The name of the task to train selected in the list: " + ", ".join(processors.keys()))
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--cached_train_file", default=None, type=str,
help="Path to cache the train set features. ")
parser.add_argument("--cached_dev_file", default=None, type=str,
help="Path to cache the dev set features. ")
parser.add_argument('--disable_auto_cache', action='store_true',
help='Disable the function for automatic cache the training/dev features.')
parser.add_argument('--disable_tqdm', action='store_true',
help='Disable the tqdm bar. ')
## Other parameters
parser.add_argument("--config_name", default="", type=str,
help="Pretrained config name or path if not the same as model_name")
parser.add_argument("--tokenizer_name", default="", type=str,
help="Pretrained tokenizer name or path if not the same as model_name")
parser.add_argument("--sentencepieces_model_path", default=None, type=str,
help="File path to the sentencepieces model, will repleace the default tokenizer and --tokenizer_name. ")
parser.add_argument("--cache_dir", default="", type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument("--max_seq_length", default=128, type=int,
help="The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.")
parser.add_argument("--do_train", action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--evaluate_during_training", action='store_true',
help="Rul evaluation during training at each logging step.")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--per_gpu_train_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--learning_rate", default=5e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay", default=0.0, type=float,
help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--num_train_epochs", default=3.0, type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--max_steps", default=-1, type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
parser.add_argument("--warmup_ratio", default=0.1, type=float,
help="Linear warmup over warmup_ratio.")
parser.add_argument('--logging_steps', type=int, default=50,
help="Log every X updates steps.")
parser.add_argument("--eval_all_checkpoints", action='store_true',
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number")
parser.add_argument("--no_cuda", action='store_true',
help="Avoid using CUDA when available")
parser.add_argument('--overwrite_output_dir', action='store_true',
help="Overwrite the content of the output directory")
parser.add_argument('--overwrite_cache', action='store_true',
help="Overwrite the cached training and evaluation sets")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--metric_for_choose_best_checkpoint', type=str, default=None,
help="Set the metric to choose the best checkpoint")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O1',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument("--local_rank", type=int, default=-1,
help="For distributed training: local_rank")
parser.add_argument('--server_ip', type=str, default='', help="For distant debugging.")
parser.add_argument('--server_port', type=str, default='', help="For distant debugging.")
args = parser.parse_args()
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir:
raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir))
# Setup distant debugging if needed
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True)
ptvsd.wait_for_attach()
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
# Set seed
set_seed(args)
# Prepare GLUE task
args.task_name = args.task_name.lower()
if args.task_name not in processors:
raise ValueError("Task not found: %s" % (args.task_name))
processor = processors[args.task_name]()
args.output_mode = output_modes[args.task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
# Load pretrained model and tokenizer
if args.local_rank not in [-1, 0]:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
args.model_type = args.model_type.lower()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type]
config = config_class.from_pretrained(args.config_name if args.config_name else args.model_name_or_path,
num_labels=num_labels,
finetuning_task=args.task_name,
cache_dir=args.cache_dir if args.cache_dir else None)
tokenizer_name = args.tokenizer_name if args.tokenizer_name else args.model_name_or_path
tokenizer = tokenizer_class.from_pretrained(tokenizer_name,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None)
if not hasattr(config, 'need_pooler') or config.need_pooler is not True:
setattr(config, 'need_pooler', True)
model = model_class.from_pretrained(
args.model_name_or_path, config=config,
cache_dir=args.cache_dir if args.cache_dir else None)
if args.local_rank == 0:
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
# Training
if args.do_train:
train_dataset = load_and_cache_examples(
args, args.task_name, tokenizer, cached_features_file=args.cached_train_file, evaluate=False)
global_step, tr_loss = train(args, train_dataset, model, tokenizer)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
tokenizer.save_pretrained(args.output_dir)
# Evaluation
if args.do_eval and args.local_rank in [-1, 0]:
tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case)
checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True)))
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info("Evaluate the following checkpoints: %s", checkpoints)
metric_for_best = args.metric_for_choose_best_checkpoint
best_performance = None
best_epoch = None
for checkpoint in checkpoints:
prefix = checkpoint.split('/')[-1] if checkpoint.find('checkpoint') != -1 else ""
checkpoint_config = config_class.from_pretrained(checkpoint)
model = model_class.from_pretrained(checkpoint, config=checkpoint_config)
model.to(args.device)
result = evaluate(args, model, tokenizer, prefix=prefix)
if metric_for_best is None:
metric_for_best = list(list(result.values())[0].keys())[0]
if best_epoch is None:
best_epoch = checkpoint
best_performance = result
else:
for eval_task in result:
if best_performance[eval_task][metric_for_best] < result[eval_task][metric_for_best]:
best_performance[eval_task] = result[eval_task]
best_epoch = checkpoint
if best_epoch is not None:
logger.info(" ***************** Best checkpoint: {}, choosed by {} *****************".format(
best_epoch, metric_for_best))
logger.info("Best performance = %s" % json.dumps(best_performance))
save_best_result(best_epoch, best_performance, args.output_dir)
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/unilm-master/adalm/finetune/run_classifier.py |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" GLUE processors and helpers """
import logging
import os
import csv
import sys
import copy
import json
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import matthews_corrcoef, f1_score
from sklearn.preprocessing import MultiLabelBinarizer
logger = logging.getLogger(__name__)
class InputExample(object):
"""
A single training/test example for simple sequence classification.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
def __init__(self, guid, text_a, text_b=None, label=None):
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class InputFeatures(object):
"""
A single set of features of data.
Args:
input_ids: Indices of input sequence tokens in the vocabulary.
attention_mask: Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
Usually ``1`` for tokens that are NOT MASKED, ``0`` for MASKED (padded) tokens.
token_type_ids: Segment token indices to indicate first and second portions of the inputs.
label: Label corresponding to the input
"""
def __init__(self, input_ids, attention_mask=None, token_type_ids=None, label=None):
self.input_ids = input_ids
self.attention_mask = attention_mask
self.token_type_ids = token_type_ids
self.label = label
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding="utf-8-sig") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
if sys.version_info[0] == 2:
line = list(unicode(cell, 'utf-8') for cell in line)
lines.append(line)
return lines
@classmethod
def _read_json(cls, input_file):
with open(input_file, "r", encoding="utf-8-sig") as f:
lines = json.loads(f.read())
return lines
@classmethod
def _read_jsonl(cls, input_file):
with open(input_file, "r", encoding="utf-8-sig") as f:
lines = f.readlines()
return lines
def glue_convert_examples_to_features(examples, tokenizer,
max_length=512,
task=None,
label_list=None,
output_mode=None,
pad_on_left=False,
pad_token=0,
pad_token_segment_id=0,
mask_padding_with_zero=True):
"""
Loads a data file into a list of ``InputFeatures``
Args:
examples: List of ``InputExamples`` or ``tf.data.Dataset`` containing the examples.
tokenizer: Instance of a tokenizer that will tokenize the examples
max_length: Maximum example length
task: GLUE task
label_list: List of labels. Can be obtained from the processor using the ``processor.get_labels()`` method
output_mode: String indicating the output mode. Either ``regression`` or ``classification``
pad_on_left: If set to ``True``, the examples will be padded on the left rather than on the right (default)
pad_token: Padding token
pad_token_segment_id: The segment ID for the padding token (It is usually 0, but can vary such as for XLNet where it is 4)
mask_padding_with_zero: If set to ``True``, the attention mask will be filled by ``1`` for actual values
and by ``0`` for padded values. If set to ``False``, inverts it (``1`` for padded values, ``0`` for
actual values)
Returns:
If the ``examples`` input is a ``tf.data.Dataset``, will return a ``tf.data.Dataset``
containing the task-specific features. If the input is a list of ``InputExamples``, will return
a list of task-specific ``InputFeatures`` which can be fed to the model.
"""
is_tf_dataset = False
if task is not None:
processor = glue_processors[task]()
if label_list is None:
label_list = processor.get_labels()
logger.info("Using label list %s for task %s" % (label_list, task))
if output_mode is None:
output_mode = glue_output_modes[task]
logger.info("Using output mode %s for task %s" % (output_mode, task))
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
logger.info("Writing example %d" % (ex_index))
if is_tf_dataset:
example = processor.get_example_from_tensor_dict(example)
example = processor.tfds_map(example)
inputs = tokenizer.encode_plus(
example.text_a,
example.text_b,
add_special_tokens=True,
max_length=max_length,
)
input_ids = inputs["input_ids"]
if "token_type_ids" in inputs:
token_type_ids = inputs["token_type_ids"]
else:
token_type_ids = []
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask
if len(token_type_ids) == 0:
padding_length = max_length
token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids
else:
input_ids = input_ids + ([pad_token] * padding_length)
attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
if len(token_type_ids) == 0:
padding_length = max_length
token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_length, "Error with input length {} vs {}".format(len(input_ids), max_length)
assert len(attention_mask) == max_length, "Error with input length {} vs {}".format(len(attention_mask), max_length)
assert len(token_type_ids) == max_length, "Error with input length {} vs {}".format(len(token_type_ids), max_length)
if output_mode == "classification":
label = label_map[example.label]
elif output_mode == "regression":
label = float(example.label)
else:
raise KeyError(output_mode)
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_tokens: %s" % " ".join(tokenizer.convert_ids_to_tokens(input_ids)))
logger.info("attention_mask: %s" % " ".join([str(x) for x in attention_mask]))
logger.info("token_type_ids: %s" % " ".join([str(x) for x in token_type_ids]))
logger.info("label: %s (id = %d)" % (example.label, label))
features.append(
InputFeatures(input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
label=label))
return features
class MrpcProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(tensor_dict['idx'].numpy(),
tensor_dict['sentence1'].numpy().decode('utf-8'),
tensor_dict['sentence2'].numpy().decode('utf-8'),
str(tensor_dict['label'].numpy()))
def get_train_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {}".format(os.path.join(data_dir, "train.tsv")))
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = line[3]
text_b = line[4]
label = line[0]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MnliProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(tensor_dict['idx'].numpy(),
tensor_dict['premise'].numpy().decode('utf-8'),
tensor_dict['hypothesis'].numpy().decode('utf-8'),
str(tensor_dict['label'].numpy()))
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
"dev_matched")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test_matched.tsv")),
"test_matched")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[8]
text_b = line[9]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MnliMismatchedProcessor(MnliProcessor):
"""Processor for the MultiNLI Mismatched data set (GLUE version)."""
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_mismatched.tsv")),
"dev_mismatched")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test_mismatched.tsv")),
"test_mismatched")
class ColaProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(tensor_dict['idx'].numpy(),
tensor_dict['sentence'].numpy().decode('utf-8'),
None,
str(tensor_dict['label'].numpy()))
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = line[3]
label = line[1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class Sst2Processor(DataProcessor):
"""Processor for the SST-2 data set (GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(tensor_dict['idx'].numpy(),
tensor_dict['sentence'].numpy().decode('utf-8'),
None,
str(tensor_dict['label'].numpy()))
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = line[0]
label = line[1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class StsbProcessor(DataProcessor):
"""Processor for the STS-B data set (GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(tensor_dict['idx'].numpy(),
tensor_dict['sentence1'].numpy().decode('utf-8'),
tensor_dict['sentence2'].numpy().decode('utf-8'),
str(tensor_dict['label'].numpy()))
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return [None]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[1]
text_b = line[2]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class QqpProcessor(DataProcessor):
"""Processor for the QQP data set (GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(tensor_dict['idx'].numpy(),
tensor_dict['question1'].numpy().decode('utf-8'),
tensor_dict['question2'].numpy().decode('utf-8'),
str(tensor_dict['label'].numpy()))
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
try:
text_a = line[3]
text_b = line[4]
label = line[5]
except IndexError:
continue
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class QnliProcessor(DataProcessor):
"""Processor for the QNLI data set (GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(tensor_dict['idx'].numpy(),
tensor_dict['question'].numpy().decode('utf-8'),
tensor_dict['sentence'].numpy().decode('utf-8'),
str(tensor_dict['label'].numpy()))
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")),
"dev_matched")
def get_labels(self):
"""See base class."""
return ["entailment", "not_entailment"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[1]
text_b = line[2]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class RteProcessor(DataProcessor):
"""Processor for the RTE data set (GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(tensor_dict['idx'].numpy(),
tensor_dict['sentence1'].numpy().decode('utf-8'),
tensor_dict['sentence2'].numpy().decode('utf-8'),
str(tensor_dict['label'].numpy()))
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["entailment", "not_entailment"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[1]
text_b = line[2]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class WnliProcessor(DataProcessor):
"""Processor for the WNLI data set (GLUE version)."""
def get_example_from_tensor_dict(self, tensor_dict):
"""See base class."""
return InputExample(tensor_dict['idx'].numpy(),
tensor_dict['sentence1'].numpy().decode('utf-8'),
tensor_dict['sentence2'].numpy().decode('utf-8'),
str(tensor_dict['label'].numpy()))
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, line[0])
text_a = line[1]
text_b = line[2]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class ChemProcessor(DataProcessor):
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["false","CPR:3", "CPR:4", "CPR:5", "CPR:6", "CPR:9"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, line[0])
text_a = line[1]
label = line[-1]
examples.append(
InputExample(guid=guid, text_a=text_a, label=label))
return examples
class ARCProcessor(DataProcessor):
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_jsonl(os.path.join(data_dir, "train.jsonl")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_jsonl(os.path.join(data_dir, "dev.jsonl")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_jsonl(os.path.join(data_dir, "test.jsonl")), "test")
def get_labels(self):
"""See base class."""
return ["CompareOrContrast", "Background", "Uses", "Motivation", "Extends", "Future"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
line = json.loads(line)
guid = "%s-%s" % (set_type, i)
text_a = line["text"]
label = line["label"]
examples.append(
InputExample(guid=guid, text_a=text_a, label=label))
return examples
class SCIProcessor(DataProcessor):
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_jsonl(os.path.join(data_dir, "train.jsonl")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_jsonl(os.path.join(data_dir, "dev.jsonl")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_jsonl(os.path.join(data_dir, "test.jsonl")), "test")
def get_labels(self):
"""See base class."""
return ["COMPARE","CONJUNCTION","FEATURE-OF","HYPONYM-OF","USED-FOR","EVALUATE-FOR","PART-OF"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
line = json.loads(line)
guid = "%s-%s" % (set_type, i)
text_a = line["text"]
label = line["label"]
examples.append(
InputExample(guid=guid, text_a=text_a, label=label))
return examples
glue_tasks_num_labels = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
"chemprot": 6,
"arc": 6,
"sci": 7,
}
glue_processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mnli-mm": MnliMismatchedProcessor,
"mrpc": MrpcProcessor,
"sst-2": Sst2Processor,
"sts-b": StsbProcessor,
"qqp": QqpProcessor,
"qnli": QnliProcessor,
"rte": RteProcessor,
"wnli": WnliProcessor,
"chemprot": ChemProcessor,
"arc": ARCProcessor,
"sci": SCIProcessor,
}
glue_output_modes = {
"cola": "classification",
"mnli": "classification",
"mnli-mm": "classification",
"mrpc": "classification",
"sst-2": "classification",
"sts-b": "regression",
"qqp": "classification",
"qnli": "classification",
"rte": "classification",
"wnli": "classification",
"chemprot": "classification",
"arc": "classification",
"sci": "classification",
}
def simple_accuracy(preds, labels):
return (preds == labels).mean()
def acc_and_f1(preds, labels):
acc = simple_accuracy(preds, labels)
f1 = f1_score(y_true=labels, y_pred=preds)
return {
"acc": acc,
"f1": f1,
"acc_and_f1": (acc + f1) / 2,
}
def acc_and_macro_f1(preds, labels):
acc = simple_accuracy(preds, labels)
f1 = f1_score(y_true=labels, y_pred=preds,average="macro")
return {
"f1": f1,
"acc": acc,
"acc_and_f1": (acc + f1) / 2,
}
def acc_and_micro_f1(preds, labels, label_list):
acc = simple_accuracy(preds, labels)
print(label_list)
label_list = [str(i+1) for i in range(len(label_list))]
print(label_list)
mlb = MultiLabelBinarizer(classes = label_list)
labels = labels.tolist()
labels = [str(i) for i in labels]
print(labels[:20])
labels = mlb.fit_transform(labels)
preds = preds.tolist()
preds = [str(i) for i in preds]
print(preds[:20])
preds = mlb.fit_transform(preds)
f1 = f1_score(y_true=labels, y_pred=preds,average="micro")
return {
"f1": f1,
"acc": acc,
"f1_macro": f1_score(y_true=labels, y_pred=preds,average="macro"),
"acc_and_f1": (acc + f1) / 2,
}
def pearson_and_spearman(preds, labels):
pearson_corr = pearsonr(preds, labels)[0]
spearman_corr = spearmanr(preds, labels)[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def glue_compute_metrics(task_name, preds, labels, label_list):
assert len(preds) == len(labels)
if task_name == "cola":
return {"mcc": matthews_corrcoef(labels, preds)}
elif task_name == "sst-2":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "mrpc":
return acc_and_f1(preds, labels)
elif task_name == "sts-b":
return pearson_and_spearman(preds, labels)
elif task_name == "qqp":
return acc_and_f1(preds, labels)
elif task_name == "mnli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "mnli-mm":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "qnli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "rte":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "wnli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "chemprot":
return acc_and_micro_f1(preds, labels, label_list)
elif task_name == "arc" or task_name == "sci":
return acc_and_macro_f1(preds, labels)
else:
raise KeyError(task_name)
| EXA-1-master | exa/models/unilm-master/adalm/finetune/utils_for_glue.py |
from __future__ import absolute_import
from __future__ import division
from numpy.core.fromnumeric import argsort
from text_encoder import SubwordTextEncoder
import tokenizer
import tempfile
import argparse
from transformers import BertTokenizer
import random
import math
import numpy as np
def merge_output_file_with_bert_vocab(output_filename, bert_vocab, temp_path):
writer = open(output_filename, 'w', encoding='utf-8')
_set = set()
with open(bert_vocab, 'r', encoding='utf-8') as reader:
for line in reader:
writer.write(line)
_set.add(line.strip())
print(temp_path)
with open(temp_path, 'r', encoding='utf-8') as reader:
for line in reader:
if line.strip() not in _set:
writer.write(line)
writer.close()
def build_target_size_vocab(token_counts, reserved_tokens, target_size):
min_val = 1
max_val = len(token_counts) // (target_size ** 0.5)
encoder = SubwordTextEncoder.build_to_target_size(target_size,token_counts,min_val, max_val, num_iterations=5,
reserved_tokens=reserved_tokens, max_subtoken_length=None)
fd, temp_vocab = tempfile.mkstemp()
encoder.store_to_file(temp_vocab, add_single_quotes=False)
return encoder, temp_vocab
def compute_language_model(documents, vocab_file):
all_tokens = 0
tokenized_documents = []
bert_tokenizer = BertTokenizer(vocab_file ,do_lower_case = True)
words = bert_tokenizer.vocab
for word in words.keys():
words[word] = 0
for doc in documents:
tokens = bert_tokenizer.tokenize(doc)
all_tokens += len(tokens)
for token in tokens:
words[token] +=1
tokenized_documents.append(tokens)
for word in words.keys():
words[word] /= all_tokens
probs = []
for doc in tokenized_documents:
p = 0.0
for token in doc:
p += math.log(words[token])
probs.append(p)
return np.mean(probs)
def vocab_extend(corpus, raw_vocab, output_filename, interval=10000 , threshold = 0.01):
"""
@description : The function to get the incremental vocabulary for
@param :
@Returns :
"""
documents = []
for line in open(corpus, "r",encoding='utf-8'):
line = line.replace('\n','')
if len(line) < 5:
continue
documents.append(line)
print("docunments: "+str(len(documents)))
token_counts = tokenizer.corpus_token_counts(
corpus, corpus_max_lines = 4400000,
split_on_newlines = True, additional_chars="", do_lower_case=True)
lines = open(raw_vocab, 'r', encoding='utf-8').readlines()
lines = [s.strip() for s in lines if len(s) > 0]
reserved_tokens = lines
random.shuffle(documents)
origin_size = (len(reserved_tokens) // interval) * interval
pre_lm = compute_language_model(documents, raw_vocab)
print("origin_size: " + str(origin_size))
print("pre_lm: "+ str(pre_lm))
target_size = origin_size
while True:
target_size = target_size + interval
_, temp_vocab = build_target_size_vocab(token_counts, reserved_tokens, target_size)
now_lm = compute_language_model(documents, temp_vocab)
print('now_lm: '+ str(now_lm))
delta = (pre_lm - now_lm)/pre_lm
print('delta: ' + str(delta))
if delta <= threshold:
merge_output_file_with_bert_vocab(output_filename, raw_vocab, temp_vocab)
break
pre_lm = now_lm
#vocab_extend('cs_data.txt', 'vocab.txt', 'cs.vocab')
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--corpus", default=None, type=str, required=True,
help="the file of the corpus to train the vocabulary.")
parser.add_argument("--raw_vocab", default=None, type=str, required=True,
help="the path to the file of the origin vocabulary")
parser.add_argument("--output_file", default=None, type=str, required=True,
help="the output file of the final vocabulary")
parser.add_argument('--interval', type=int, default=10000,
help="The interval of the vocabulary size.")
parser.add_argument('--threshold', type=int, default=10000,
help="The final threhold of the P(D)'s increase")
args = parser.parse_args()
return args
def main():
args = get_args()
vocab_extend(args.corpus, args.raw_vocab, args.output_file, args.interval, args.threshold)
if __name__ == '__main__':
main()
| EXA-1-master | exa/models/unilm-master/adalm/incr_bpe/vocab_extend.py |
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Encoders for text data.
* TextEncoder: base class
* SubwordTextEncoder: invertible
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from itertools import chain
import re
import time
import logging
import six
from six.moves import range # pylint: disable=redefined-builtin
# from tensor2tensor.data_generators import tokenizer
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
# Reserved tokens for things like padding and EOS symbols.
PAD = "[PAD]"
EOS = "[EOS]"
UNK = "[UNK]"
CLS = "[CLS]"
SEP = "[SEP]"
MASK = "[MASK]"
RESERVED_TOKENS = [PAD, EOS, UNK, CLS, SEP, MASK]
NUM_RESERVED_TOKENS = len(RESERVED_TOKENS)
PAD_ID = RESERVED_TOKENS.index(PAD) # Normally 0
EOS_ID = RESERVED_TOKENS.index(EOS) # Normally 1
if six.PY2:
RESERVED_TOKENS_BYTES = RESERVED_TOKENS
else:
RESERVED_TOKENS_BYTES = [bytes(PAD, "ascii"), bytes(EOS, "ascii")]
# Regular expression for unescaping token strings.
# '\u' is converted to '_'
# '\\' is converted to '\'
# '\213;' is converted to unichr(213)
_UNESCAPE_REGEX = re.compile(r"\\u|\\\\|\\([0-9]+);")
_ESCAPE_CHARS = set(u"\\_u;0123456789")
_SPECIAL_CHARS = set(u"!\"\'#$%&*()`+,-./:;<=>?@[]^_{}~|")
# Unicode utility functions that work with Python 2 and 3
def native_to_unicode(s):
if is_unicode(s):
return s
try:
return to_unicode(s)
except UnicodeDecodeError:
res = to_unicode(s, ignore_errors=True)
logger.info("Ignoring Unicode error, outputting: %s" % res)
return res
def unicode_to_native(s):
if six.PY2:
return s.encode("utf-8") if is_unicode(s) else s
else:
return s
def is_unicode(s):
return isinstance(s, six.text_type)
def to_unicode(s, ignore_errors=False):
if is_unicode(s):
return s
error_mode = "ignore" if ignore_errors else "strict"
return s.decode("utf-8", errors=error_mode)
# def to_unicode_ignore_errors(s):
# return to_unicode(s, ignore_errors=True)
# def to_unicode_utf8(s):
# return unicode(s, "utf-8") if six.PY2 else s.decode("utf-8")
# def strip_ids(ids, ids_to_strip):
# """Strip ids_to_strip from the end ids."""
# ids = list(ids)
# while ids and ids[-1] in ids_to_strip:
# ids.pop()
# return ids
class TextEncoder(object):
"""Base class for converting from ints to/from human readable strings."""
def __init__(self, num_reserved_ids=NUM_RESERVED_TOKENS):
self._num_reserved_ids = num_reserved_ids
@property
def num_reserved_ids(self):
return self._num_reserved_ids
# def encode(self, s):
# """Transform a human-readable string into a sequence of int ids.
#
# The ids should be in the range [num_reserved_ids, vocab_size). Ids [0,
# num_reserved_ids) are reserved.
#
# EOS is not appended.
#
# Args:
# s: human-readable string to be converted.
#
# Returns:
# ids: list of integers
# """
# return [int(w) + self._num_reserved_ids for w in s.split()]
#
# def decode(self, ids, strip_extraneous=False):
# """Transform a sequence of int ids into a human-readable string.
#
# EOS is not expected in ids.
#
# Args:
# ids: list of integers to be converted.
# strip_extraneous: bool, whether to strip off extraneous tokens
# (EOS and PAD).
#
# Returns:
# s: human-readable string.
# """
# if strip_extraneous:
# ids = strip_ids(ids, list(range(self._num_reserved_ids or 0)))
# return " ".join(self.decode_list(ids))
#
# def decode_list(self, ids):
# """Transform a sequence of int ids into a their string versions.
#
# This method supports transforming individual input/output ids to their
# string versions so that sequence to/from text conversions can be visualized
# in a human readable format.
#
# Args:
# ids: list of integers to be converted.
#
# Returns:
# strs: list of human-readable string.
# """
# decoded_ids = []
# for id_ in ids:
# if 0 <= id_ < self._num_reserved_ids:
# decoded_ids.append(RESERVED_TOKENS[int(id_)])
# else:
# decoded_ids.append(id_ - self._num_reserved_ids)
# return [str(d) for d in decoded_ids]
@property
def vocab_size(self):
raise NotImplementedError()
def _escape_token(token, alphabet):
"""Escape away underscores and OOV characters and append '_'.
This allows the token to be expressed as the concatenation of a list
of subtokens from the vocabulary. The underscore acts as a sentinel
which allows us to invertibly concatenate multiple such lists.
Args:
token: A unicode string to be escaped.
alphabet: A set of all characters in the vocabulary's alphabet.
Returns:
escaped_token: An escaped unicode string.
Raises:
ValueError: If the provided token is not unicode.
"""
if not isinstance(token, six.text_type):
raise ValueError("Expected string type for token, got %s" % type(token))
token = token.replace(u"\\", u"\\\\").replace(u"_", u"\\u")
ret = [c if c in alphabet and c != u"\n" else r"\%d;" % ord(c) for c in token]
return u"".join(ret) + "_"
def _my_escape_token(token, alphabet):
if not isinstance(token, six.text_type):
raise ValueError("Expected string type for token, got %s" % type(token))
token = token.replace(u"\\", u"\\\\").replace(u"_", u"\\u")
ret = [c if c in alphabet and c != u"\n" else r"\%d;" % ord(c) for c in token]
return "_" + u"".join(ret)
# def _unescape_token(escaped_token):
# """Inverse of _escape_token().
#
# Args:
# escaped_token: a unicode string
#
# Returns:
# token: a unicode string
# """
#
# def match(m):
# if m.group(1) is None:
# return u"_" if m.group(0) == u"\\u" else u"\\"
#
# try:
# return six.unichr(int(m.group(1)))
# except (ValueError, OverflowError) as _:
# return u"\u3013" # Unicode for undefined character.
#
# trimmed = escaped_token[:-1] if escaped_token.endswith("_") else escaped_token
# return _UNESCAPE_REGEX.sub(match, trimmed)
class SubwordTextEncoder(TextEncoder):
"""Class for invertibly encoding text using a limited vocabulary.
Invertibly encodes a native string as a sequence of subtokens from a limited
vocabulary.
A SubwordTextEncoder is built from a corpus (so it is tailored to the text in
the corpus), and stored to a file. See text_encoder_build_subword.py.
It can then be loaded and used to encode/decode any text.
Encoding has four phases:
1. Tokenize into a list of tokens. Each token is a unicode string of either
all alphanumeric characters or all non-alphanumeric characters. We drop
tokens consisting of a single space that are between two alphanumeric
tokens.
2. Escape each token. This escapes away special and out-of-vocabulary
characters, and makes sure that each token ends with an underscore, and
has no other underscores.
3. Represent each escaped token as a the concatenation of a list of subtokens
from the limited vocabulary. Subtoken selection is done greedily from
beginning to end. That is, we construct the list in order, always picking
the longest subtoken in our vocabulary that matches a prefix of the
remaining portion of the encoded token.
4. Concatenate these lists. This concatenation is invertible due to the
fact that the trailing underscores indicate when one list is finished.
"""
def __init__(self, filename=None):
"""Initialize and read from a file, if provided.
Args:
filename: filename from which to read vocab. If None, do not load a
vocab
"""
self._alphabet = set()
# self.filename = filename
# if filename is not None:
# self._load_from_file(filename)
super(SubwordTextEncoder, self).__init__()
# def encode(self, s):
# """Converts a native string to a list of subtoken ids.
#
# Args:
# s: a native string.
# Returns:
# a list of integers in the range [0, vocab_size)
# """
# return self._tokens_to_subtoken_ids(
# tokenizer.encode(native_to_unicode(s)))
#
# def encode_without_tokenizing(self, token_text):
# """Converts string to list of subtoken ids without calling tokenizer.
#
# This treats `token_text` as a single token and directly converts it
# to subtoken ids. This may be useful when the default tokenizer doesn't
# do what we want (e.g., when encoding text with tokens composed of lots of
# nonalphanumeric characters). It is then up to the caller to make sure that
# raw text is consistently converted into tokens. Only use this if you are
# sure that `encode` doesn't suit your needs.
#
# Args:
# token_text: A native string representation of a single token.
# Returns:
# A list of subword token ids; i.e., integers in the range [0, vocab_size).
# """
# return self._tokens_to_subtoken_ids([native_to_unicode(token_text)])
# def decode(self, ids, strip_extraneous=False):
# """Converts a sequence of subtoken ids to a native string.
#
# Args:
# ids: a list of integers in the range [0, vocab_size)
# strip_extraneous: bool, whether to strip off extraneous tokens
# (EOS and PAD).
#
# Returns:
# a native string
# """
# if strip_extraneous:
# ids = strip_ids(ids, list(range(self._num_reserved_ids or 0)))
# return unicode_to_native(
# tokenizer.decode(self._subtoken_ids_to_tokens(ids)))
# def decode_list(self, ids):
# return [self._subtoken_id_to_subtoken_string(s) for s in ids]
@property
def vocab_size(self):
"""The subtoken vocabulary size."""
return len(self._all_subtoken_strings)
# def _tokens_to_subtoken_ids(self, tokens):
# """Converts a list of tokens to a list of subtoken ids.
#
# Args:
# tokens: a list of strings.
# Returns:
# a list of integers in the range [0, vocab_size)
# """
# ret = []
# for token in tokens:
# ret.extend(self._token_to_subtoken_ids(token))
# return ret
# def _token_to_subtoken_ids(self, token):
# """Converts token to a list of subtoken ids.
#
# Args:
# token: a string.
# Returns:
# a list of integers in the range [0, vocab_size)
# """
# cache_location = hash(token) % self._cache_size
# cache_key, cache_value = self._cache[cache_location]
# if cache_key == token:
# return cache_value
# ret = self._escaped_token_to_subtoken_ids(
# _escape_token(token, self._alphabet))
# self._cache[cache_location] = (token, ret)
# return ret
# def _subtoken_ids_to_tokens(self, subtokens):
# """Converts a list of subtoken ids to a list of tokens.
#
# Args:
# subtokens: a list of integers in the range [0, vocab_size)
# Returns:
# a list of strings.
# """
# concatenated = "".join(
# [self._subtoken_id_to_subtoken_string(s) for s in subtokens])
# split = concatenated.split("_")
# ret = []
# for t in split:
# if t:
# unescaped = _unescape_token(t + "_")
# if unescaped:
# ret.append(unescaped)
# return ret
# def _subtoken_id_to_subtoken_string(self, subtoken):
# """Converts a subtoken integer ID to a subtoken string."""
# if 0 <= subtoken < self.vocab_size:
# return self._all_subtoken_strings[subtoken]
# return u""
def _escaped_token_to_subtoken_strings(self, escaped_token):
"""Converts an escaped token string to a list of subtoken strings.
Args:
escaped_token: An escaped token as a unicode string.
Returns:
A list of subtokens as unicode strings.
"""
# NOTE: This algorithm is greedy; it won't necessarily produce the "best"
# list of subtokens.
ret = []
start = 0
token_len = len(escaped_token)
while start < token_len:
for end in range(
min(token_len, start + self._max_subtoken_len), start, -1):
subtoken = escaped_token[start:end]
if subtoken in self._subtoken_string_to_id:
ret.append(subtoken)
start = end
break
else: # Did not break
# If there is no possible encoding of the escaped token then one of the
# characters in the token is not in the alphabet. This should be
# impossible and would be indicative of a bug.
assert False, "Token substring not found in subtoken vocabulary."
return ret
# def _escaped_token_to_subtoken_ids(self, escaped_token):
# """Converts an escaped token string to a list of subtoken IDs.
#
# Args:
# escaped_token: An escaped token as a unicode string.
# Returns:
# A list of subtoken IDs as integers.
# """
# return [
# self._subtoken_string_to_id[subtoken]
# for subtoken in self._escaped_token_to_subtoken_strings(escaped_token)
# ]
# @classmethod
# def build_from_generator(cls,
# generator,
# target_size,
# max_subtoken_length=None,
# reserved_tokens=None):
# """Builds a SubwordTextEncoder from the generated text.
#
# Args:
# generator: yields text.
# target_size: int, approximate vocabulary size to create.
# max_subtoken_length: Maximum length of a subtoken. If this is not set,
# then the runtime and memory use of creating the vocab is quadratic in
# the length of the longest token. If this is set, then it is instead
# O(max_subtoken_length * length of longest token).
# reserved_tokens: List of reserved tokens. The global variable
# `RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this
# argument is `None`, it will use `RESERVED_TOKENS`.
#
# Returns:
# SubwordTextEncoder with `vocab_size` approximately `target_size`.
# """
# token_counts = collections.defaultdict(int)
# for item in generator:
# for tok in tokenizer.encode(native_to_unicode(item)):
# token_counts[tok] += 1
# encoder = cls.build_to_target_size(
# target_size, token_counts, 1, 1e3,
# max_subtoken_length=max_subtoken_length,
# reserved_tokens=reserved_tokens)
# return encoder
#
@classmethod
def build_to_target_size(cls,
target_size,
token_counts,
min_val,
max_val,
max_subtoken_length=None,
reserved_tokens=None,
num_iterations=4):
"""Builds a SubwordTextEncoder that has `vocab_size` near `target_size`.
Uses simple recursive binary search to find a minimum token count that most
closely matches the `target_size`.
Args:
target_size: Desired vocab_size to approximate.
token_counts: A dictionary of token counts, mapping string to int.
min_val: An integer; lower bound for the minimum token count.
max_val: An integer; upper bound for the minimum token count.
max_subtoken_length: Maximum length of a subtoken. If this is not set,
then the runtime and memory use of creating the vocab is quadratic in
the length of the longest token. If this is set, then it is instead
O(max_subtoken_length * length of longest token).
reserved_tokens: List of reserved tokens. The global variable
`RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this
argument is `None`, it will use `RESERVED_TOKENS`.
num_iterations: An integer; how many iterations of refinement.
Returns:
A SubwordTextEncoder instance.
Raises:
ValueError: If `min_val` is greater than `max_val`.
"""
if min_val > max_val:
raise ValueError("Lower bound for the minimum token count "
"is greater than the upper bound.")
if target_size < 1:
raise ValueError("Target size must be positive.")
if reserved_tokens is None:
reserved_tokens = RESERVED_TOKENS
def bisect(min_val, max_val):
"""Bisection to find the right size."""
present_count = (max_val + min_val) // 2
logger.info("Trying min_count %d" % present_count)
subtokenizer = cls()
subtokenizer.build_from_token_counts(
token_counts, present_count, num_iterations,
max_subtoken_length=max_subtoken_length,
reserved_tokens=reserved_tokens)
# Being within 1% of the target size is ok.
is_ok = abs(subtokenizer.vocab_size - target_size) * 100 < target_size
# If min_val == max_val, we can't do any better than this.
if is_ok or min_val >= max_val or present_count < 2:
return subtokenizer
if subtokenizer.vocab_size > target_size:
other_subtokenizer = bisect(present_count + 1, max_val)
else:
other_subtokenizer = bisect(min_val, present_count - 1)
if other_subtokenizer is None:
return subtokenizer
if (abs(other_subtokenizer.vocab_size - target_size) <
abs(subtokenizer.vocab_size - target_size)):
return other_subtokenizer
return subtokenizer
return bisect(min_val, max_val)
def build_from_token_counts(self,
token_counts,
min_count,
num_iterations=4,
reserved_tokens=None,
max_subtoken_length=None):
"""Train a SubwordTextEncoder based on a dictionary of word counts.
Args:
token_counts: a dictionary of Unicode strings to int.
min_count: an integer - discard subtokens with lower counts.
num_iterations: an integer. how many iterations of refinement.
reserved_tokens: List of reserved tokens. The global variable
`RESERVED_TOKENS` must be a prefix of `reserved_tokens`. If this
argument is `None`, it will use `RESERVED_TOKENS`.
max_subtoken_length: Maximum length of a subtoken. If this is not set,
then the runtime and memory use of creating the vocab is quadratic in
the length of the longest token. If this is set, then it is instead
O(max_subtoken_length * length of longest token).
Raises:
ValueError: if reserved is not 0 or len(RESERVED_TOKENS). In this case, it
is not clear what the space is being reserved for, or when it will be
filled in.
"""
# import pudb; pu.db
if reserved_tokens is None:
reserved_tokens = RESERVED_TOKENS
else:
# There is not complete freedom in replacing RESERVED_TOKENS.
new_reserved_tokens = RESERVED_TOKENS
for token in reserved_tokens:
if token in new_reserved_tokens:
continue
new_reserved_tokens.append(token)
reserved_tokens = new_reserved_tokens
for default, proposed in zip(RESERVED_TOKENS, reserved_tokens):
if default != proposed:
raise ValueError("RESERVED_TOKENS must be a prefix of "
"reserved_tokens.")
start_time = time.time()
#import pudb; pu.db
# Initialize the alphabet. Note, this must include reserved tokens or it can
# result in encoding failures. Remove RESERVED_TOKENS.
alphabet_tokens = chain(six.iterkeys(token_counts),
[native_to_unicode(t) for t in reserved_tokens[len(RESERVED_TOKENS):]])
# all alphabets in tokens
self._init_alphabet_from_tokens(alphabet_tokens)
# Bootstrap the initial list of subtokens with the characters from the
# alphabet plus the escaping characters.
self._init_subtokens_from_list(list(self._alphabet),
reserved_tokens=reserved_tokens)
# We build iteratively. On each iteration, we segment all the words,
# then count the resulting potential subtokens, keeping the ones
# with high enough counts for our new vocabulary.
if min_count < 1:
min_count = 1
for i in range(num_iterations):
#logger.info("Iteration {0}".format(i))
# Collect all substrings of the encoded token that break along current
# subtoken boundaries.
subtoken_counts = collections.defaultdict(int)
for token, count in six.iteritems(token_counts):
iter_start_time = time.time()
# escaped_token = _escape_token(token, self._alphabet) # added "_" at the end
escaped_token = _my_escape_token(token, self._alphabet)
subtokens = self._escaped_token_to_subtoken_strings(escaped_token)
# print(escaped_token)
# print(subtokens)
# excaped_token '_1234' -> subtoknes ['_12', '34'] (ex)
# '_1234':100 -> '_', '_1', '_12', '_123', '_1234','3', '34' :+= 100,
start = 0
for subtoken in subtokens:
last_position = len(escaped_token) + 1
if max_subtoken_length is not None:
last_position = min(last_position, start + max_subtoken_length)
for end in range(start + 1, last_position):
new_subtoken = escaped_token[start:end]
subtoken_counts[new_subtoken] += count
start += len(subtoken)
iter_time_secs = time.time() - iter_start_time
if iter_time_secs > 0.1:
logger.info(u"Processing token [{0}] took {1} seconds, consider "
"setting Text2TextProblem.max_subtoken_length to a "
"smaller value.".format(token, iter_time_secs))
# print(len(subtoken_counts))
# Array of sets of candidate subtoken strings, by length.
len_to_subtoken_strings = []
for subtoken_string, count in six.iteritems(subtoken_counts):
lsub = len(subtoken_string)
if count >= min_count:
while len(len_to_subtoken_strings) <= lsub:
len_to_subtoken_strings.append(set())
len_to_subtoken_strings[lsub].add(subtoken_string)
# Consider the candidates longest to shortest, so that if we accept
# a longer subtoken string, we can decrement the counts of its prefixes.
new_subtoken_strings_with_count = []
for lsub in range(len(len_to_subtoken_strings) - 1, 0, -1):
subtoken_strings = len_to_subtoken_strings[lsub]
for subtoken_string in subtoken_strings:
count = subtoken_counts[subtoken_string]
if count >= min_count:
# Exclude alphabet tokens here, as they must be included later,
# explicitly, regardless of count.
if subtoken_string not in self._alphabet:
new_subtoken_strings_with_count.append((count, subtoken_string))
for l in range(1, lsub):
subtoken_counts[subtoken_string[:l]] -= count
# Include the alphabet explicitly to guarantee all strings are encodable.
new_subtoken_strings_with_count.extend((subtoken_counts.get(a, 0), a)
for a in self._alphabet)
new_subtoken_strings_with_count.sort(reverse=True)
# Reinitialize to the candidate vocabulary.
new_subtoken_strings = [subtoken for _, subtoken in new_subtoken_strings_with_count]
if reserved_tokens:
# escaped_reserved_tokens = [
# _escape_token(native_to_unicode(t), self._alphabet)
# for t in reserved_tokens
# ]
# new_subtoken_strings = escaped_reserved_tokens + new_subtoken_strings
new_subtoken_strings = reserved_tokens + new_subtoken_strings
new_subtoken_strings = list(set(new_subtoken_strings))
self._init_subtokens_from_list(new_subtoken_strings)
#logger.info("vocab_size = %d" % self.vocab_size)
# print("vocab_size = %d" % self.vocab_size)
# print(self.vocab_size)
self.subtokens_with_counts = new_subtoken_strings_with_count
# Frequency of "_" is high.
# So remove from current position and add to the last.
new_subtoken_strings.remove("_")
new_subtoken_strings.insert(len(new_subtoken_strings), "_")
oov_list = []
for idx, subtoken in enumerate(new_subtoken_strings):
if subtoken.startswith("_") and subtoken != "_":
new_subtoken_strings[idx] = subtoken[1:]
elif subtoken[0] in self._alphabet and subtoken not in reserved_tokens:
new_subtoken_strings[idx] = "##" + subtoken
else:
oov_list.append(subtoken)
new_subtoken_strings.extend(char for char in self._alphabet
if char not in new_subtoken_strings)
# print(new_subtoken_strings)
# print(oov_list)
new_subtoken_strings = list(set(new_subtoken_strings))
self._init_subtokens_from_list(new_subtoken_strings)
#logger.info("vocab_size = %d" % self.vocab_size)
logger.info("total vocab size : {}, {} seconds elapsed ".format(self.vocab_size, time.time() - start_time))
# @property
# def all_subtoken_strings(self):
# return tuple(self._all_subtoken_strings)
#
# def dump(self):
# """Debugging dump of the current subtoken vocabulary."""
# subtoken_strings = [(i, s)
# for s, i in six.iteritems(self._subtoken_string_to_id)]
# print(u", ".join(u"{0} : '{1}'".format(i, s)
# for i, s in sorted(subtoken_strings)))
def _init_subtokens_from_list(self, subtoken_strings, reserved_tokens=None):
"""Initialize token information from a list of subtoken strings.
Args:
subtoken_strings: a list of subtokens
reserved_tokens: List of reserved tokens. We must have `reserved_tokens`
as None or the empty list, or else the global variable `RESERVED_TOKENS`
must be a prefix of `reserved_tokens`.
Raises:
ValueError: if reserved is not 0 or len(RESERVED_TOKENS). In this case, it
is not clear what the space is being reserved for, or when it will be
filled in.
"""
if reserved_tokens is None:
reserved_tokens = []
if reserved_tokens:
self._all_subtoken_strings = reserved_tokens + subtoken_strings
else:
self._all_subtoken_strings = subtoken_strings
# we remember the maximum length of any subtoken to avoid having to
# check arbitrarily long strings.
self._max_subtoken_len = max([len(s) for s in subtoken_strings])
self._subtoken_string_to_id = {
s: i + len(reserved_tokens)
for i, s in enumerate(subtoken_strings) if s
}
# Initialize the cache to empty.
self._cache_size = 2 ** 20
self._cache = [(None, None)] * self._cache_size
def _init_alphabet_from_tokens(self, tokens):
"""Initialize alphabet from an iterable of token or subtoken strings."""
# Include all characters from all tokens in the alphabet to guarantee that
# any token can be encoded. Additionally, include all escaping characters.
self._alphabet = {c for token in tokens for c in token}
self._alphabet |= _ESCAPE_CHARS
self._alphabet |= _SPECIAL_CHARS
# def _load_from_file_object(self, f):
# """Load from a file object.
#
# Args:
# f: File object to load vocabulary from
# """
# subtoken_strings = []
# for line in f:
# s = line.strip()
# # Some vocab files wrap words in single quotes, but others don't
# if ((s.startswith("'") and s.endswith("'")) or
# (s.startswith("\"") and s.endswith("\""))):
# s = s[1:-1]
# subtoken_strings.append(native_to_unicode(s))
# self._init_subtokens_from_list(subtoken_strings)
# self._init_alphabet_from_tokens(subtoken_strings)
#
# def _load_from_file(self, filename):
# """Load from a vocab file."""
# if not tf.gfile.Exists(filename):
# raise ValueError("File %s not found" % filename)
# with tf.gfile.Open(filename) as f:
# self._load_from_file_object(f)
def store_to_file(self, filename, add_single_quotes=True):
#with tf.gfile.Open(filename, "w") as f:
with open(filename, "w") as f:
for subtoken_string in self._all_subtoken_strings:
if add_single_quotes:
f.write("'" + unicode_to_native(subtoken_string) + "'\n")
else:
f.write(unicode_to_native(subtoken_string) + "\n")
def store_to_file_with_counts(self, filename):
# with tf.gfile.Open(filename, "w") as f:
with open(filename, "w") as f:
for subtoken_string, count in self.subtokens_with_counts:
f.write(unicode_to_native(subtoken_string + "\t" + str(count)) + "\n")
| EXA-1-master | exa/models/unilm-master/adalm/incr_bpe/text_encoder.py |
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple invertible tokenizer.
Converts from a unicode string to a list of tokens
(represented as Unicode strings).
This tokenizer has the following desirable properties:
- It is invertible.
- Alphanumeric characters are broken away from non-alphanumeric characters.
- A single space between words does not produce an extra token.
- The full Unicode punctuation and separator set is recognized.
The tokenization algorithm is as follows:
1. Split the text into a list of tokens, splitting at every boundary of an
alphanumeric character and a non-alphanumeric character. This produces
a list which alternates between "alphanumeric tokens"
(strings of alphanumeric characters) and "non-alphanumeric tokens"
(strings of non-alphanumeric characters).
2. Remove every token consisting of a single space, unless it is
the very first or very last token in the list. These tokens are now
implied by the fact that there are two adjacent alphanumeric tokens.
e.g. u"Dude - that's so cool."
-> [u"Dude", u" - ", u"that", u"'", u"s", u"so", u"cool", u"."]
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import sys
import unicodedata
import six
import logging
from six.moves import range # pylint: disable=redefined-builtin
# from tensor2tensor.utils import mlperf_log
import time
import glob
# Conversion between Unicode and UTF-8, if required (on Python2)
_native_to_unicode = (lambda s: s.decode("utf-8")) if six.PY2 else (lambda s: s)
logger = logging.getLogger(__name__)
# This set contains all letter and number characters.
_ALPHANUMERIC_CHAR_SET = set(
six.unichr(i) for i in range(sys.maxunicode)
if (unicodedata.category(six.unichr(i)).startswith("L") or
unicodedata.category(six.unichr(i)).startswith("N") or
unicodedata.category(six.unichr(i)).startswith("P")))
# unicodedata.category(six.unichr(i)).startswith("S")
def encode(text):
"""Encode a unicode string as a list of tokens.
Args:
text: a unicode string
Returns:
a list of tokens as Unicode strings
"""
if not text:
return []
ret = []
token_start = 0
# Classify each character in the input string
is_alnum = [c in _ALPHANUMERIC_CHAR_SET for c in text]
add_remaining = False
for pos in range(1, len(text)):
add_remaining = False
if is_alnum[pos] != is_alnum[pos - 1]:
if not is_alnum[pos]:
token = text[token_start:pos]
if token != u" " or token_start == 0:
add_remaining = False
ret.append(token)
else:
add_remaining = True
token_start = pos
final_token = text[token_start:] if text[-1] in _ALPHANUMERIC_CHAR_SET else text[token_start:-1]
if add_remaining:
ret.append(final_token)
# split on punctuation
final_tokens = []
for token in ret:
splitted_token = _run_split_on_punc(token)
final_tokens.extend(splitted_token)
return final_tokens
def _run_split_on_punc(text, never_split=None):
"""Splits punctuation on a piece of text."""
if never_split is not None and text in never_split:
return [text]
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
def decode(tokens):
"""Decode a list of tokens to a unicode string.
Args:
tokens: a list of Unicode strings
Returns:
a unicode string
"""
token_is_alnum = [t[0] in _ALPHANUMERIC_CHAR_SET for t in tokens]
ret = []
for i, token in enumerate(tokens):
if i > 0 and token_is_alnum[i - 1] and token_is_alnum[i]:
ret.append(u" ")
ret.append(token)
return "".join(ret)
def _read_filepattern(filepattern, max_lines=None, split_on_newlines=True, do_lower_case=False):
"""Reads files matching a wildcard pattern, yielding the contents.
Args:
filepattern: A wildcard pattern matching one or more files.
max_lines: If set, stop reading after reading this many lines.
split_on_newlines: A boolean. If true, then split files by lines and strip
leading and trailing whitespace from each line. Otherwise, treat each
file as a single string.
Yields:
The contents of the files as lines, if split_on_newlines is True, or
the entire contents of each file if False.
"""
filenames = sorted(glob.glob(filepattern))
print(filenames, 'do lower case:', do_lower_case)
lines_read = 0
for filename in filenames:
start = time.time()
with open(filename) as f:
if split_on_newlines:
for line in f:
if do_lower_case:
line = line.lower()
yield line.strip()
lines_read += 1
if max_lines and lines_read >= max_lines:
return
if lines_read % 100000 == 0:
print("read", lines_read, "lines,", time.time() - start, "secs elapsed")
else:
if max_lines:
doc = []
for line in f:
if do_lower_case:
line = line.lower()
doc.append(line)
lines_read += 1
if max_lines and lines_read >= max_lines:
yield "".join(doc)
return
yield "".join(doc)
else:
yield f.read()
print(time.time() - start, "for reading read file :", filename)
def corpus_token_counts(
text_filepattern, corpus_max_lines, split_on_newlines=True, additional_chars="", do_lower_case=False):
"""Read the corpus and compute a dictionary of token counts.
Args:
text_filepattern: A pattern matching one or more files.
corpus_max_lines: An integer; maximum total lines to read.
split_on_newlines: A boolean. If true, then split files by lines and strip
leading and trailing whitespace from each line. Otherwise, treat each
file as a single string.
additional_chars: A String. Each consisting characters will be treat as normal
alphabets so that they will be included in each vocab.
Returns:
a dictionary mapping token to count.
"""
if additional_chars:
_ALPHANUMERIC_CHAR_SET.add(additional_chars)
counts = collections.Counter()
for doc in _read_filepattern(
text_filepattern,
max_lines=corpus_max_lines,
split_on_newlines=split_on_newlines,
do_lower_case=do_lower_case):
counts.update(encode(_native_to_unicode(doc)))
print("read all files")
return counts
def vocab_token_counts(text_filepattern, max_lines, do_lower_case=False):
"""Read a vocab file and return a dictionary of token counts.
Reads a two-column CSV file of tokens and their frequency in a dataset. The
tokens are presumed to be generated by encode() or the equivalent.
Args:
text_filepattern: A pattern matching one or more files.
max_lines: An integer; maximum total lines to read.
Returns:
a dictionary mapping token to count.
"""
ret = {}
for i, line in enumerate(
_read_filepattern(text_filepattern, max_lines=max_lines)):
if "," not in line:
logger.warning("Malformed vocab line #%d '%s'", i, line)
continue
if do_lower_case:
line = line.lower()
token, count = line.rsplit(",", 1)
ret[_native_to_unicode(token)] = int(count)
return ret
| EXA-1-master | exa/models/unilm-master/adalm/incr_bpe/tokenizer.py |
#-*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from text_encoder import SubwordTextEncoder
import tokenizer
import os
import tempfile
import tensorflow as tf
tf.flags.DEFINE_string('output_filename', '/tmp/my.subword_text_encoder',
'where to store the SubwordTextEncoder')
tf.flags.DEFINE_string('corpus_filepattern', '',
'Corpus of one or more text files')
tf.flags.DEFINE_string('vocab_filepattern', '', 'One or more vocabulary files '
'(one word per line as "word,count")')
tf.flags.DEFINE_integer('min_count', 5, 'Minimum subtoken count in corpus')
tf.flags.DEFINE_integer('vocab_size', 30000, 'The final vocab size. It will produce a vocab with a near vocab size')
tf.flags.DEFINE_integer('corpus_max_lines', None,
'How many lines of corpus to read')
tf.flags.DEFINE_integer('num_iterations', 5, 'Number of iterations')
tf.flags.DEFINE_bool('split_on_newlines', True, 'Break corpus into lines.')
tf.flags.DEFINE_string('additional_chars', "", 'Set special characters to be included in vocab. ex : "~", "/".')
tf.flags.DEFINE_integer('max_subtoken_length', None, 'Max subtoken length')
tf.flags.DEFINE_string('raw_vocab', None, 'Raw bert vovab file')
tf.flags.DEFINE_bool('do_lower_case', False, 'Whether or not to lowercase the input corpus')
FLAGS = tf.flags.FLAGS
def merge_output_file_with_bert_vocab(output_filename, bert_vocab, temp_path):
writer = open(output_filename, 'w', encoding='utf-8')
_set = set()
with open(bert_vocab, 'r', encoding='utf-8') as reader:
for line in reader:
writer.write(line)
_set.add(line.strip())
print(temp_path)
with open(temp_path, 'r', encoding='utf-8') as reader:
for line in reader:
if line.strip() not in _set:
writer.write(line)
writer.close()
# os.remove(temp_path)
def main(unused_argv):
if FLAGS.corpus_filepattern and FLAGS.vocab_filepattern:
raise ValueError(
'Must only provide one of --corpus_filepattern or --vocab_filepattern')
elif FLAGS.corpus_filepattern:
token_counts = tokenizer.corpus_token_counts(
FLAGS.corpus_filepattern,
FLAGS.corpus_max_lines,
split_on_newlines=FLAGS.split_on_newlines, additional_chars=FLAGS.additional_chars, do_lower_case=FLAGS.do_lower_case)
elif FLAGS.vocab_filepattern:
token_counts = tokenizer.vocab_token_counts(FLAGS.vocab_filepattern,
FLAGS.corpus_max_lines, FLAGS.do_lower_case)
else:
raise ValueError(
'Must provide one of --corpus_filepattern or --vocab_filepattern')
reserved_tokens = None
if FLAGS.raw_vocab:
lines = open(FLAGS.raw_vocab, 'r', encoding='utf-8').readlines()
lines = [s.strip() for s in lines if len(s) > 0]
reserved_tokens = lines
print(len(token_counts))
print(len(reserved_tokens))
target_size = FLAGS.vocab_size
if target_size <= len(reserved_tokens):
raise ValueError("The vocab_size must be larger than the origin vocab's size ")
if target_size >= len(token_counts):
raise ValueError("The vocab_size is too large. Please set it smaller or prepare more corpus.")
min_val = 1
max_val = len(token_counts) // (target_size ** 0.5)
fd, temp_path = tempfile.mkstemp()
encoder = SubwordTextEncoder.build_to_target_size(target_size,token_counts,min_val, max_val, num_iterations=FLAGS.num_iterations,
reserved_tokens=reserved_tokens, max_subtoken_length=FLAGS.max_subtoken_length)
# encoder = SubwordTextEncoder()
# encoder.build_from_token_counts(token_counts, FLAGS.min_count,
# FLAGS.num_iterations, reserved_tokens=reserved_tokens, max_subtoken_length=FLAGS.max_subtoken_length)
encoder.store_to_file(temp_path, add_single_quotes=False)
merge_output_file_with_bert_vocab(FLAGS.output_filename, FLAGS.raw_vocab, temp_path)
if __name__ == '__main__':
tf.app.run()
| EXA-1-master | exa/models/unilm-master/adalm/incr_bpe/subword_builder.py |
from setuptools import setup, find_packages
setup(
name = 'PaLM-rlhf-pytorch',
packages = find_packages(exclude=[]),
version = '0.2.1',
license='MIT',
description = 'PaLM + Reinforcement Learning with Human Feedback - Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/PaLM-rlhf-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'attention mechanism',
'reinforcement learning',
'human feedback'
],
install_requires=[
'accelerate',
'beartype',
'einops>=0.6',
'lion-pytorch',
'torch>=1.6',
'tqdm'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| EXA-1-master | exa/models/PaLM-rlhf-pytorch-main 2/setup.py |
import gzip
import random
import tqdm
import numpy as np
import torch
from lion_pytorch import Lion
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
from palm_rlhf_pytorch import PaLM
from accelerate import Accelerator
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 1e-4
VALIDATE_EVERY = 100
PRIME_LENGTH = 128
GENERATE_EVERY = 500
GENERATE_LENGTH = 512
SEQ_LEN = 1024
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return "".join(list(map(decode_token, tokens)))
# accelerator
accelerator = Accelerator()
device = accelerator.device
# instantiate palm
model = PaLM(
num_tokens=256,
dim=512,
depth=8,
flash_attn=True
).to(device)
# prepare enwik8 data
with gzip.open("./data/enwik8.gz") as file:
data = np.frombuffer(file.read(int(95e6)), dtype=np.uint8).copy()
np_train, np_valid = np.split(data, [int(90e6)])
data_train, data_val = torch.from_numpy(np_train), torch.from_numpy(np_valid)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len, (1,))
full_seq = self.data[rand_start : rand_start + self.seq_len + 1].long()
return full_seq.to(device)
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size=BATCH_SIZE))
val_loader = cycle(DataLoader(val_dataset, batch_size=BATCH_SIZE))
# optimizer
optim = Lion(model.palm_parameters(), lr = LEARNING_RATE)
model, optim, train_loader, val_loader = accelerator.prepare(
model, optim, train_loader, val_loader
)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10.0, desc="training"):
model.train()
for _ in range(GRADIENT_ACCUMULATE_EVERY):
loss = model(next(train_loader), return_loss = True)
accelerator.backward(loss / GRADIENT_ACCUMULATE_EVERY)
accelerator.print(f"training loss: {loss.item()}")
accelerator.clip_grad_norm_(model.parameters(), 0.5)
optim.step()
optim.zero_grad()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
loss = model(next(val_loader), return_loss = True)
accelerator.print(f"validation loss: {loss.item()}")
if i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:PRIME_LENGTH]
prime = decode_tokens(inp)
accelerator.print(f"%s \n\n %s", (prime, "*" * 100))
sample = model.generate(GENERATE_LENGTH, inp[None, ...])
output_str = decode_tokens(sample[0])
accelerator.print(output_str, "\n")
| EXA-1-master | exa/models/PaLM-rlhf-pytorch-main 2/train.py |
import torch
from torch import nn, einsum
import torch.nn.functional as F
from collections import namedtuple
from functools import wraps
from packaging import version
from einops import rearrange
# constants
Config = namedtuple('EfficientAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient'])
# helpers
def exists(val):
return val is not None
def once(fn):
called = False
@wraps(fn)
def inner(x):
nonlocal called
if called:
return
called = True
return fn(x)
return inner
print_once = once(print)
# main class
class Attention(nn.Module):
def __init__(
self,
dropout = 0.,
causal = False,
use_flash_attn = False
):
super().__init__()
self.dropout = dropout
self.attn_dropout = nn.Dropout(dropout)
self.causal = causal
self.register_buffer("mask", None, persistent=False)
self.use_flash_attn = use_flash_attn
assert not (use_flash_attn and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above'
# determine efficient attention configs for cuda and cpu
self.cpu_config = Config(True, True, True)
self.cuda_config = None
if not torch.cuda.is_available() or not use_flash_attn:
return
device_properties = torch.cuda.get_device_properties(torch.device('cuda'))
if device_properties.major == 8 and device_properties.minor == 0:
print_once('A100 GPU detected, using flash attention if input tensor is on cuda')
self.cuda_config = Config(True, False, False)
else:
print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda')
self.cuda_config = Config(False, True, True)
def get_mask(self, n, device):
if exists(self.mask) and self.mask.shape[-1] >= n:
return self.mask[:n, :n]
mask = torch.ones((n, n), device=device, dtype=torch.bool).triu(1)
self.register_buffer("mask", mask, persistent=False)
return mask
def flash_attn(self, q, k, v, mask = None):
_, heads, q_len, _, k_len, is_cuda = *q.shape, k.shape[-2], q.is_cuda
# Recommended for multi-query single-key-value attention by Tri Dao
# kv shape torch.Size([1, 512, 64]) -> torch.Size([1, 8, 512, 64])
k = rearrange(k, 'b ... -> b 1 ...').expand_as(q)
v = rearrange(v, 'b ... -> b 1 ...').expand_as(q)
# Check if mask exists and expand to compatible shape
# The mask is B L, so it would have to be expanded to B H N L
if exists(mask):
mask = rearrange(mask, 'b j -> b 1 1 j')
mask = mask.expand(-1, heads, q_len, -1)
# Check if there is a compatible device for flash attention
config = self.cuda_config if is_cuda else self.cpu_config
# pytorch 2.0 flash attn: q, k, v, mask, dropout, causal, softmax_scale
with torch.backends.cuda.sdp_kernel(**config._asdict()):
out = F.scaled_dot_product_attention(
q, k, v,
attn_mask = mask,
dropout_p = self.dropout if self.training else 0.,
is_causal = self.causal
)
return out
def forward(self, q, k, v, mask = None):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
n, device = q.shape[-2], q.device
scale = q.shape[-1] ** -0.5
if self.use_flash_attn:
return self.flash_attn(q, k, v, mask = mask)
# similarity
sim = einsum("b h i d, b j d -> b h i j", q, k) * scale
# key padding mask
if exists(mask):
mask = rearrange(mask, 'b j -> b 1 1 j')
sim = sim.masked_fill(~mask, -torch.finfo(sim.dtype).max)
# causal mask
if self.causal:
causal_mask = self.get_mask(n, device)
sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max)
# attention
attn = sim.softmax(dim=-1)
attn = self.attn_dropout(attn)
# aggregate values
out = einsum("b h i j, b j d -> b h i d", attn, v)
return out
| EXA-1-master | exa/models/PaLM-rlhf-pytorch-main 2/palm_rlhf_pytorch/attention.py |
import math
import copy
from pathlib import Path
from collections import namedtuple
from functools import wraps
from itertools import zip_longest
from tqdm import tqdm
from beartype import beartype
from beartype.typing import Tuple, Optional
import torch
from torch import einsum, nn
import torch.nn.functional as F
from einops import rearrange, repeat, reduce, pack, unpack
from einops.layers.torch import Rearrange, Reduce
from palm_rlhf_pytorch.attention import Attention
from palm_rlhf_pytorch.utils import top_p, top_k, masked_mean, gumbel_sample, eval_decorator
from palm_rlhf_pytorch.lora import LoRA
# functions and decorators
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def identity(t, *args, **kwargs):
return t
def l2norm(t):
return F.normalize(t, dim = -1)
# normalization
# they use layernorm without bias, something that pytorch does not offer
class LayerNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.gamma = nn.Parameter(torch.ones(dim))
self.register_buffer("beta", torch.zeros(dim))
def forward(self, x):
return F.layer_norm(x, x.shape[-1:], self.gamma, self.beta)
# residual
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, **kwargs):
y = self.fn(x, **kwargs)
if not any([t.requires_grad for t in (x, y)]):
return x.add_(y)
return y + x
# rotary positional embedding w/ xpos
# https://arxiv.org/abs/2104.09864
# https://arxiv.org/abs/2212.10554v1
class RotaryEmbedding(nn.Module):
def __init__(self, dim, scale_base = 512, use_xpos = True):
super().__init__()
inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer("inv_freq", inv_freq)
self.use_xpos = use_xpos
self.scale_base = scale_base
scale = (torch.arange(0, dim, 2) + 0.4 * dim) / (1.4 * dim)
self.register_buffer('scale', scale)
def forward(self, seq_len, device):
t = torch.arange(seq_len, device = device).type_as(self.inv_freq)
freqs = torch.einsum('i , j -> i j', t, self.inv_freq)
freqs = torch.cat((freqs, freqs), dim = -1)
if not self.use_xpos:
return freqs, torch.ones(1, device = device)
power = (t - (seq_len // 2)) / self.scale_base
scale = self.scale ** rearrange(power, 'n -> n 1')
scale = torch.cat((scale, scale), dim = -1)
return freqs, scale
def rotate_half(x):
x1, x2 = x.chunk(2, dim=-1)
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(pos, t, scale = 1.):
return (t * pos.cos() * scale) + (rotate_half(t) * pos.sin() * scale)
# classic Noam Shazeer paper, except here they use SwiGLU instead of the more popular GEGLU for gating the feedforward
# https://arxiv.org/abs/2002.05202
class SwiGLU(nn.Module):
def forward(self, x):
x, gate = x.chunk(2, dim=-1)
return F.silu(gate) * x
# parallel attention and feedforward with residual
# discovered by Wang et al + EleutherAI from GPT-J fame
class ParallelTransformerBlock(nn.Module):
def __init__(
self,
dim,
dim_head = 64,
causal = True,
heads = 8,
qk_rmsnorm = False,
qk_scale = 8,
ff_mult = 4,
attn_dropout = 0.,
ff_dropout = 0.,
use_xpos = True,
xpos_scale_base = 512,
flash_attn = False,
):
super().__init__()
self.norm = LayerNorm(dim)
attn_inner_dim = dim_head * heads
ff_inner_dim = dim * ff_mult
self.fused_dims = (attn_inner_dim, dim_head, dim_head, (ff_inner_dim * 2))
self.qk_rmsnorm = qk_rmsnorm
if qk_rmsnorm:
self.q_scale = nn.Parameter(torch.ones(dim_head))
self.k_scale = nn.Parameter(torch.ones(dim_head))
self.attend = Attention(
causal = causal,
dropout = attn_dropout,
use_flash_attn = flash_attn
)
self.heads = heads
self.scale = (dim_head ** -0.5) if not qk_rmsnorm else qk_scale
self.causal = causal
self.rotary_emb = RotaryEmbedding(dim_head, scale_base = xpos_scale_base, use_xpos = use_xpos and causal)
self.fused_attn_ff_proj = nn.Linear(dim, sum(self.fused_dims), bias=False)
self.flash_attn = flash_attn
self.attn_out = nn.Linear(attn_inner_dim, dim, bias=False)
self.attn_dropout = nn.Dropout(attn_dropout)
self.flash_attn_dropout = attn_dropout
# parallel feedforward tail
self.ff_out = nn.Sequential(
SwiGLU(),
nn.Dropout(ff_dropout),
nn.Linear(ff_inner_dim, dim, bias=False)
)
# for caching causal mask and rotary embeddings
self.register_buffer("pos_emb", None, persistent=False)
self.register_buffer("pos_emb_scale", None, persistent=False)
def get_rotary_embedding(self, n, device):
if exists(self.pos_emb) and self.pos_emb.shape[-2] >= n:
return self.pos_emb[:n], self.pos_emb_scale[:n]
pos_emb, scale = self.rotary_emb(n, device=device)
self.register_buffer("pos_emb", pos_emb, persistent=False)
self.register_buffer("pos_emb_scale", scale, persistent=False)
return pos_emb, scale
def forward(
self,
x,
mask = None,
finetune_modules = None
):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
n, device, h = x.shape[1], x.device, self.heads
# pre layernorm
x = self.norm(x)
# attention queries, keys, values, and feedforward inner
q, k, v, ff = self.fused_attn_ff_proj(x).split(self.fused_dims, dim=-1)
# finetune loras
lora_q = lora_k = lora_v = lora_o = None
if exists(finetune_modules):
lora_q, lora_k, lora_v, lora_o = finetune_modules
q = q + lora_q(x)
k = k + lora_k(x)
v = v + lora_v(x)
# split heads
# they use multi-query single-key-value attention, yet another Noam Shazeer paper
# they found no performance loss past a certain scale, and more efficient decoding obviously
# https://arxiv.org/abs/1911.02150
q = rearrange(q, "b n (h d) -> b h n d", h=h)
# qk rmsnorm
if self.qk_rmsnorm:
q, k = map(l2norm, (q, k))
q = q * self.q_scale
k = k * self.k_scale
# rotary embeddings with xpos decay for better length extrapolation
positions, scale = self.get_rotary_embedding(n, device)
q = apply_rotary_pos_emb(positions, q, scale)
k = apply_rotary_pos_emb(positions, k, scale ** -1)
# attention function, either regular or flash
out = self.attend(q, k, v, mask = mask)
# merge heads
out = rearrange(out, "b h n d -> b n (h d)")
attn_out = self.attn_out(out)
ff_out = self.ff_out(ff)
if exists(lora_o):
attn_out = attn_out + lora_o(out)
return attn_out + ff_out
# transformer
@beartype
class PaLM(nn.Module):
def __init__(
self,
*,
dim,
num_tokens,
depth,
causal = True,
dim_head = 64,
heads = 8,
ff_mult = 4,
attn_dropout = 0.,
ff_dropout = 0.,
qk_rmsnorm = False,
lora_r = 8,
rotary_xpos_scale_base = 512,
flash_attn = False,
finetune_scopes = tuple(),
cross_entropy_ignore_index = 0
):
super().__init__()
self.dim = dim
self.dim_head = dim_head
self.heads = heads
self.causal = causal
self.num_tokens = num_tokens
self.token_emb = nn.Embedding(num_tokens, dim)
self.layers = nn.ModuleList([])
for _ in range(depth):
block = Residual(ParallelTransformerBlock(
dim = dim,
causal = causal,
dim_head = dim_head,
heads = heads,
qk_rmsnorm = qk_rmsnorm,
ff_mult = ff_mult,
attn_dropout = attn_dropout,
ff_dropout = ff_dropout,
xpos_scale_base = rotary_xpos_scale_base,
flash_attn = flash_attn
))
self.layers.append(block)
self.norm = LayerNorm(dim)
self.to_logits = nn.Linear(dim, num_tokens, bias=False)
self.to_logits.weight = self.token_emb.weight
nn.init.normal_(self.token_emb.weight, std=0.02)
# fine tuning related
self.lora_r = lora_r
self.finetune_modules = nn.ModuleDict({})
for scope in finetune_scopes:
self.add_finetune_params(scope)
# loss related
self.cross_entropy_ignore_index = cross_entropy_ignore_index
@property
def device(self):
return next(self.parameters()).device
def load(self, path):
path = Path(path)
assert path.exists()
self.load_state_dict(torch.load(str(path)))
def set_dropout(self, dropout):
for module in self.layers.modules():
if isinstance(module, nn.Dropout):
module.p = dropout
return self
def add_finetune_params(self, scope, lora_r = None):
assert scope not in self.finetune_modules, f'finetune scope {scope} already found'
dim, dim_head, heads, r, device = self.dim, self.dim_head, self.heads, default(lora_r, self.lora_r), self.device
q_inner_dim = heads * dim_head
kv_inner_dim = dim_head
lora_modules = nn.ModuleList([])
for _ in range(len(self.layers)):
lora_modules.append(nn.ModuleList([
LoRA(dim, q_inner_dim, r = r), # queries
LoRA(dim, kv_inner_dim, r = r), # keys
LoRA(dim, kv_inner_dim, r = r), # values
LoRA(q_inner_dim, dim, r = r) # wo
]))
self.finetune_modules[scope] = lora_modules.to(device)
def remove_finetune_params(self, scope):
assert scope in self.finetune_modules, f'finetune scope {scope} not found'
return self.finetune_modules.pop(scope)
@torch.no_grad()
def merge_finetune_params(self, scope):
""" in the case one wants to merge the fine-tuned actor LORA parameters and do multiple rounds of fine tuning off different reward models """
assert scope in self.finetune_modules, f'finetune scope {scope} not found'
lora_modules = self.finetune_modules.pop(scope)
for layer, (lora_q, lora_k, lora_v, lora_o) in zip(self.layers, lora_modules):
block = layer.fn
fused_attn_ff_weight = block.fused_attn_ff_proj.weight
attn_out_weight = block.attn_out.weight
fused_proj_out_dim = fused_attn_ff_weight.shape[0]
lora_qkv_weight, _ = pack([lora_q.weight, lora_k.weight, lora_v.weight], 'i *')
lora_qkv_weight = F.pad(lora_qkv_weight, (0, fused_proj_out_dim - lora_qkv_weight.shape[1]))
lora_qkv_weight = rearrange(lora_qkv_weight, 'i o -> o i')
lora_o_weight = rearrange(lora_o.weight, 'i o -> o i')
fused_attn_ff_weight.add_(lora_qkv_weight)
attn_out_weight.add_(lora_o_weight)
# researcher train palm parameters first
# before finetuning
def palm_parameters(self):
return set(self.parameters()) - set(self.finetune_modules.parameters())
def finetune_parameters(self, scope = 'default'):
assert scope in self.finetune_modules, f'finetune parameters of scope {scope} not found'
return self.finetune_modules[scope].parameters()
# generate function
@torch.no_grad()
@eval_decorator
def generate(
self,
seq_len,
prompt = None,
temperature = 1.,
filter_logits_fn = top_k,
filter_thres = 0.9,
pad_value = 0.,
eos_token = None,
return_seq_without_prompt = True,
use_tqdm = False,
**kwargs
):
if not exists(prompt):
prompt = torch.randint(0, self.num_tokens, (1, 1))
prompt = prompt.to(self.device)
return_seq_without_prompt = False
prompt, leading_dims = pack([prompt], '* n')
n, out = prompt.shape[-1], prompt.clone()
wrapper_fn = identity if not use_tqdm else tqdm
sample_num_times = max(1, seq_len - prompt.shape[-1])
for _ in wrapper_fn(range(sample_num_times)):
logits, embeds = self.forward(out, return_logits_with_embedding = True, **kwargs)
logits, embeds = logits[:, -1], embeds[:, -1]
if exists(filter_logits_fn):
logits = filter_logits_fn(logits, thres = filter_thres)
sample = gumbel_sample(logits, temperature = temperature, dim = -1)
out, _ = pack([out, sample], 'b *')
if exists(eos_token):
is_eos_tokens = (out == eos_token)
if is_eos_tokens.any(dim = -1).all():
# mask out everything after the eos tokens
shifted_is_eos_tokens = F.pad(is_eos_tokens, (1, -1))
mask = shifted_is_eos_tokens.float().cumsum(dim = -1) >= 1
out = out.masked_fill(mask, pad_value)
break
out, = unpack(out, leading_dims, '* n')
if not return_seq_without_prompt:
return out
return out[..., n:]
def forward(
self,
x,
return_loss = False,
disable_lora = False,
finetune_scope = None,
extra_embed = None,
return_only_embedding = False,
return_logits_with_embedding = False
):
if return_loss:
x, labels = x[:, :-1], x[:, 1:]
# mask if encoder
# treat any token ids that are negative as tokens to mask out - only needed if not autoregressive
if not self.causal:
mask = x >= 0
x = x.masked_fill(~mask, 0)
else:
mask = None
# get token embedding
x = self.token_emb(x)
if exists(extra_embed):
x = x + extra_embed
# finetune modules
finetune_modules = tuple()
if exists(finetune_scope) and not disable_lora:
assert finetune_scope in self.finetune_modules
finetune_modules = self.finetune_modules[finetune_scope]
# parallel attention / ff blocks, passing in finetuning loras
for layer, finetune_modules in zip_longest(self.layers, finetune_modules):
x = layer(x, mask = mask, finetune_modules = finetune_modules)
# final norm
embeds = self.norm(x)
if return_only_embedding:
return embeds
# to logits
logits = self.to_logits(embeds)
ret = (logits, embeds) if return_logits_with_embedding else logits
if not return_loss:
return ret
logits = rearrange(logits, 'b n c -> b c n')
return F.cross_entropy(logits, labels, ignore_index = self.cross_entropy_ignore_index) | EXA-1-master | exa/models/PaLM-rlhf-pytorch-main 2/palm_rlhf_pytorch/palm.py |
from palm_rlhf_pytorch.palm import PaLM
from palm_rlhf_pytorch.reward import RewardModel
from palm_rlhf_pytorch.ppo import RLHFTrainer, ActorCritic
| EXA-1-master | exa/models/PaLM-rlhf-pytorch-main 2/palm_rlhf_pytorch/__init__.py |
import math
import torch
from torch import einsum, nn
import torch.nn.functional as F
from einops import rearrange
def exists(val):
return val is not None
# decorators
def eval_decorator(fn):
def inner(self, *args, **kwargs):
was_training = self.training
self.eval()
out = fn(self, *args, **kwargs)
self.train(was_training)
return out
return inner
# tensor helpers
def log(t, eps = 1e-20):
return torch.log(t.clamp(min = eps))
def masked_mean(seq, mask = None, dim = 1, keepdim = False):
if not exists(mask):
return seq.mean(dim = dim)
if seq.ndim == 3:
mask = rearrange(mask, 'b n -> b n 1')
masked_seq = seq.masked_fill(~mask, 0.)
numer = masked_seq.sum(dim = dim, keepdim = keepdim)
denom = mask.sum(dim = dim, keepdim = keepdim)
masked_mean = numer / denom.clamp(min = 1e-3)
masked_mean = masked_mean.masked_fill(denom == 0, 0.)
return masked_mean
# sampling helpers
def gumbel_noise(t):
noise = torch.zeros_like(t).uniform_(0, 1)
return -log(-log(noise))
def gumbel_sample(t, temperature = 1., dim = -1):
return ((t / max(temperature, 1e-10)) + gumbel_noise(t)).argmax(dim = dim)
def top_p(logits, thres = 0.9):
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cum_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
sorted_indices_to_remove = cum_probs > (1 - thres)
sorted_indices_to_remove[:, 1:] = sorted_indices_to_remove[:, :-1].clone()
sorted_indices_to_remove[:, 0] = 0
sorted_logits[sorted_indices_to_remove] = float('-inf')
return sorted_logits.scatter(1, sorted_indices, sorted_logits)
def top_k(logits, thres = 0.9):
k = math.ceil((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
| EXA-1-master | exa/models/PaLM-rlhf-pytorch-main 2/palm_rlhf_pytorch/utils.py |
from torch.optim import AdamW, Adam
from lion_pytorch import Lion
def separate_weight_decayable_params(params):
wd_params, no_wd_params = [], []
for param in params:
param_list = no_wd_params if param.ndim < 2 else wd_params
param_list.append(param)
return wd_params, no_wd_params
def get_optimizer(
params,
lr = 1e-4,
wd = 1e-2,
betas = (0.9, 0.99),
eps = 1e-8,
filter_by_requires_grad = False,
group_wd_params = True,
use_lion = True,
**kwargs
):
if filter_by_requires_grad:
params = list(filter(lambda t: t.requires_grad, params))
if group_wd_params and wd > 0:
wd_params, no_wd_params = separate_weight_decayable_params(params)
params = [
{'params': wd_params},
{'params': no_wd_params, 'weight_decay': 0},
]
if use_lion:
return Lion(params, lr = lr, betas = betas, weight_decay = wd)
if wd == 0:
return Adam(params, lr = lr, betas = betas, eps = eps)
return AdamW(params, lr = lr, weight_decay = wd, betas = betas, eps = eps)
| EXA-1-master | exa/models/PaLM-rlhf-pytorch-main 2/palm_rlhf_pytorch/optimizer.py |
import torch
from torch import nn
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
# LoRA - https://arxiv.org/abs/2106.09685
class LoRA(nn.Module):
def __init__(
self,
dim,
dim_out,
r = 8,
alpha = None
):
super().__init__()
alpha = default(alpha, r)
self.scale = alpha / r
self.A = nn.Parameter(torch.randn(dim, r))
self.B = nn.Parameter(torch.zeros(r, dim_out))
@property
def weight(self):
return (self.A @ self.B) * self.scale
def forward(self, x):
return x @ self.weight
| EXA-1-master | exa/models/PaLM-rlhf-pytorch-main 2/palm_rlhf_pytorch/lora.py |
import math
from pathlib import Path
import copy
from tqdm import tqdm
from functools import partial
from collections import deque, namedtuple
from random import randrange
from beartype import beartype
from beartype.typing import List, Optional, Callable, Deque
import torch
from torch import nn
import torch.nn.functional as F
from torch.optim import Adam
from torch.utils.data import Dataset, DataLoader
from torch.nn.utils.rnn import pad_sequence
from einops import rearrange, repeat, reduce
from einops.layers.torch import Rearrange
from palm_rlhf_pytorch.palm import PaLM
from palm_rlhf_pytorch.reward import RewardModel
from palm_rlhf_pytorch.optimizer import get_optimizer
from palm_rlhf_pytorch.utils import masked_mean, eval_decorator
from accelerate import Accelerator
# actor critic - PaLM with lora
PPOActionCriticReturn = namedtuple('PPOActionCriticReturn', [
'actions',
'sequence',
'mask',
'prompt_mask',
'action_logits',
'values'
])
@beartype
class ActorCritic(nn.Module):
def __init__(
self,
palm: PaLM,
critic_palm: Optional[PaLM] = None,
pooled_values = False,
actor_lora = True,
critic_lora = True,
actor_lora_r = 8,
critic_lora_r = 8,
actor_lora_scope = 'actor',
critic_lora_scope = 'critic',
actor_dropout = 0.,
critic_dropout = 0.
):
super().__init__()
self.actor_palm = palm
self.critic_palm = critic_palm
if not exists(self.critic_palm):
self.critic_palm = copy.deepcopy(palm)
self.actor_palm.set_dropout(actor_dropout)
self.critic_palm.set_dropout(critic_dropout)
self.actor_lora = actor_lora
self.critic_lora = critic_lora
self.actor_lora_scope = actor_lora_scope if actor_lora else None
self.critic_lora_scope = critic_lora_scope if critic_lora else None
if self.actor_lora:
self.actor_palm.add_finetune_params(actor_lora_scope, lora_r = actor_lora_r)
if self.critic_lora:
self.critic_palm.add_finetune_params(critic_lora_scope, lora_r = critic_lora_r)
self.pooled_values = pooled_values
self.value_head = nn.Sequential(
nn.Linear(palm.dim, 1),
Rearrange('... 1 -> ...')
)
nn.init.zeros_(self.value_head[0].bias)
nn.init.orthogonal_(self.value_head[0].weight, gain = math.sqrt(2))
def actor_parameters(self):
if not self.actor_lora:
return self.actor_palm.parameters()
return [
*self.actor_palm.finetune_parameters(self.actor_lora_scope)
]
def critic_parameters(self):
if not self.actor_lora:
return [*self.critic_palm.parameters(), *self.value_head.parameters()]
return [
*self.critic_palm.finetune_parameters(self.critic_lora_scope),
*self.value_head.parameters()
]
@torch.no_grad()
@eval_decorator
def generate(
self,
state,
max_seq_len,
eos_token = None,
return_values = False,
**kwargs
):
actions = self.actor_palm.generate(
max_seq_len,
prompt = state,
eos_token = eos_token,
finetune_scope = self.actor_lora_scope,
use_tqdm = True,
**kwargs
)
sequence = torch.cat((state, actions), dim = -1)
action_len = actions.shape[-1]
state_len = state.shape[-1]
prompt_mask = torch.arange(sequence.shape[-1], device = state.device) < state_len
prompt_mask = repeat(prompt_mask, 'n -> b n', b = sequence.shape[0])
action_mask = ~prompt_mask
mask = None
if exists(eos_token):
mask = ((sequence == eos_token).cumsum(dim = -1) == 0)
mask = F.pad(mask, (1, -1), value = True) # include eos token
action_mask &= mask
action_logits, value = self.forward(
sequence,
mask = action_mask,
return_values = return_values
)
return PPOActionCriticReturn(
actions,
sequence,
mask,
prompt_mask,
action_logits,
value
)
def forward(
self,
x,
mask = None,
return_values = True
):
action_logits = self.actor_palm(
x,
finetune_scope = self.actor_lora_scope
)
if not return_values:
return action_logits, None
critic_embeds = self.critic_palm(
x,
return_only_embedding = True,
finetune_scope = self.critic_lora_scope
)
if self.pooled_values:
critic_embeds = shift(critic_embeds, shift = 1, dim = -2)
critic_embeds = masked_mean(critic_embeds, mask, dim = 1)
values = self.value_head(critic_embeds)
return action_logits, values
# data
Memory = namedtuple('Memory', [
'sequence',
'prompt_mask',
'mask',
'action_prob',
'action_log_prob',
'reward',
'value'
])
@beartype
class ExperienceDataset(Dataset):
def __init__(
self,
data: List[torch.Tensor],
device = None
):
super().__init__()
self.data = data
self.device = device
def __len__(self):
return self.data[0].shape[0]
def __getitem__(self, ind):
return tuple(map(lambda t: t[ind].to(self.device), self.data))
def create_dataloader(data, batch_size, shuffle = True, device = None, **kwargs):
ds = ExperienceDataset(data, device = device)
return DataLoader(ds, batch_size = batch_size, shuffle = shuffle, **kwargs)
# helper functions
def exists(val):
return val is not None
def default(val, d):
if exists(val):
return val
return d() if callable(d) else d
def masked_normalize(t, eps = 1e-5, mask = None, dim = None):
dim = default(dim, tuple(range(t.ndim)))
kwargs = dict(dim = dim, keepdim = True)
mean = masked_mean(t, mask = mask, **kwargs)
mean_centered = t - mean
var = masked_mean(mean_centered ** 2, mask = mask, **kwargs)
return mean_centered * var.clamp(min = eps).rsqrt()
def pad_sequence_fixed(sequences, *args, **kwargs):
first_el = sequences[0]
has_no_dimension = first_el.ndim == 0
# if no dimensions, add a single dimension
if has_no_dimension:
sequences = tuple(map(lambda t: t[None], sequences))
out = pad_sequence(sequences, *args, **kwargs)
if has_no_dimension:
out = rearrange(out, '... 1 -> ...')
return out
def log(t, eps = 1e-20):
return torch.log(t.clamp(min = eps))
def log_prob(prob, indices):
assert prob.shape[:2] == indices.shape, f'preceding shapes of prob {prob.shape[:2]} and indices {indices.shape} must match'
return log(prob.gather(-1, indices[..., None])).squeeze(-1)
def shift(t, value = 0, shift = 1, dim = -1):
zeros = (0, 0) * (-dim - 1)
return F.pad(t, (*zeros, shift, -shift), value = value)
def masked_entropy(prob, dim = -1, mask = None):
entropies = (prob * log(prob)).sum(dim = -1)
return masked_mean(entropies, mask = mask).mean()
def masked_kl_div(prob1, prob2, mask = None, reduce_batch = False):
"""
need to account for variable sequence lengths, therefore not using the built-in functional version
"""
kl_divs = (prob1 * (log(prob1) - log(prob2))).sum(dim = -1)
loss = masked_mean(kl_divs, mask)
if reduce_batch:
return loss.mean()
return loss
def clipped_value_loss(values, rewards, old_values, clip):
value_clipped = old_values + (values - old_values).clamp(-clip, clip)
value_loss_1 = (value_clipped.flatten() - rewards) ** 2
value_loss_2 = (values.flatten() - rewards) ** 2
return torch.mean(torch.max(value_loss_1, value_loss_2))
# rlhf trainer
@beartype
class RLHFTrainer(nn.Module):
def __init__(
self,
*,
prompts: Optional[List[str]] = None,
prompts_path: Optional[str] = None,
prompt_token_ids: Optional[torch.Tensor] = None,
tokenizer: Callable = None,
palm: PaLM,
reward_model: RewardModel,
critic_palm: Optional[PaLM] = None,
actor_critic: Optional[ActorCritic] = None,
actor_lr = 1e-4,
critic_lr = 1e-4,
actor_wd = 0.,
critic_wd = 0.,
actor_adam_eps = 1e-7,
critic_adam_eps = 1e-7,
actor_lora = True,
critic_lora = True,
actor_lora_r = 8,
critic_lora_r = 8,
critic_pooled_values = True,
actor_dropout = 0.,
critic_dropout = 0.,
betas = (0.9, 0.999),
max_norm = None,
eps_clip = 0.2,
value_clip = 0.4,
beta_s = .01,
pad_value = 0.,
minibatch_size = 16,
epochs = 1,
kl_div_loss_weight = 0.1, # between old action probs and new action probs - not sure what the right value is
accelerate_kwargs: dict = {},
use_lion = False
):
super().__init__()
self.accelerate = Accelerator(**accelerate_kwargs)
# take care of prompts -> token ids
assert (exists(prompts) + exists(prompts_path) + exists(prompt_token_ids)) == 1
if exists(prompts_path):
path = Path(prompts_path)
prompts = path.read_text().split('\n')
if exists(prompts):
assert len(prompts) > 0, 'no prompts'
assert exists(tokenizer), 'tokenizer must be passed in if raw text prompts are given'
prompt_token_ids = tokenizer(prompts)
self.pad_value = pad_value # token pad value
self.num_prompts = prompt_token_ids.shape[0]
self.register_buffer('prompt_token_ids', prompt_token_ids)
# models
self.palm = palm
if not exists(actor_critic):
actor_critic = ActorCritic(
palm = palm,
critic_palm = critic_palm,
actor_lora = actor_lora,
critic_lora = critic_lora,
actor_lora_r = actor_lora_r,
critic_lora_r = critic_lora_r,
pooled_values = critic_pooled_values,
actor_dropout = actor_dropout,
critic_dropout = critic_dropout
).to(palm.device)
self.actor_critic = actor_critic
self.reward_model = reward_model.eval()
# train hyperparameters
self.epochs = epochs
self.minibatch_size = minibatch_size
self.max_norm = max_norm
self.kl_div_loss_weight = kl_div_loss_weight
# optimizers
self.actor_optim = get_optimizer(actor_critic.actor_parameters(), lr = actor_lr, wd = actor_wd, betas = betas, eps = actor_adam_eps, use_lion = use_lion)
self.critic_optim = get_optimizer(actor_critic.critic_parameters(), lr = critic_lr, wd = critic_wd, betas = betas, eps = critic_adam_eps, use_lion = use_lion)
# ppo hyperparams
self.eps_clip = eps_clip
self.value_clip = value_clip
self.beta_s = beta_s
# prepare with accelerator
(
self.actor_critic,
self.reward_model,
self.actor_optim,
self.critic_optim
) = self.accelerate.prepare(
self.actor_critic,
self.reward_model,
self.actor_optim,
self.critic_optim
)
def print(self, msg):
return self.accelerate.print(msg)
def save(self, filepath = './checkpoint.pt'):
torch.save(self.actor_critic.state_dict(), filepath)
def load(self, filepath = './checkpoint.pt'):
state_dict = torch.load(filepath)
self.actor_critic.load_state_dict(state_dict)
@property
def device(self):
return self.accelerate.device
@torch.no_grad()
def generate(
self,
max_seq_len,
*args,
prompt,
num_samples = 4, # sample 4 per prompt and select the one with highest reward
**kwargs
):
assert prompt.ndim == 1, 'only one prompt allowed at a time for now'
prompt = repeat(prompt, 'n -> b n', b = num_samples)
actor_critic = self.accelerate.unwrap_model(self.actor_critic)
reward_model = self.accelerate.unwrap_model(self.reward_model)
actor_critic.eval()
(
actions,
sequences,
mask,
prompt_mask,
action_logits,
_
) = actor_critic.generate(
prompt,
*args,
max_seq_len = max_seq_len,
return_values = False,
**kwargs
)
rewards = reward_model(
sequences,
prompt_mask = prompt_mask,
mask = mask,
sample = True
)
best_sequence_index = rewards.topk(1, dim = -1).indices
best_sequence = sequences[best_sequence_index]
best_sequence = rearrange(best_sequence, '1 ... -> ...')
return best_sequence
def learn(
self,
memories: Deque[Memory]
):
# stack all data stored in the memories
all_memories_stacked_and_padded = list(map(partial(pad_sequence_fixed, batch_first = True), zip(*memories)))
# prepare dataloader for policy phase training
dl = create_dataloader(all_memories_stacked_and_padded, self.minibatch_size, device = self.device)
self.actor_critic.train()
# PPO training
for _ in range(self.epochs):
for (
sequences,
prompt_masks,
masks,
old_action_probs,
old_log_probs,
rewards,
old_values
) in dl:
action_masks = ~prompt_masks & masks
action_logits, values = self.actor_critic(
sequences,
mask = action_masks
)
action_logits = shift(action_logits, shift = 1, dim = -2) # need to shift along sequence dimension by 1, since actions start from the last prompt (state) token
action_len = old_log_probs.shape[-1]
action_probs = action_logits.softmax(dim = -1)
action_log_probs = log_prob(action_probs, sequences)
action_log_probs = action_log_probs[:, -action_len:]
# calculate entropies, taking into account which part of the sequence is actually an action
entropies = masked_entropy(action_probs, mask = action_masks)
# calculate kl div between old action probs and new ones, taking into account which part of the sequence is action or not
kl_penalty = 0.
if self.kl_div_loss_weight > 0:
kl_penalty = masked_kl_div(old_action_probs, action_probs, mask = action_masks) * self.kl_div_loss_weight
# subtract the kl penalty from the rewards
rewards = rewards - kl_penalty
# handle non-pooled values
normalize_kwargs = dict()
if old_values.ndim == 2:
old_values, values = map(lambda t: shift(t, shift = 1, dim = -2), (old_values, values))
old_values = old_values[:, -action_len:]
values = values[:, -action_len:]
rewards = rearrange(rewards, 'b -> b 1')
normalize_kwargs = dict(dim = -1, mask = action_masks[:, -action_len:])
if values.ndim < rewards.ndim:
values = rearrange(values, '... -> ... 1')
# calculate clipped surrogate objective, classic PPO loss
ratios = (action_log_probs - old_log_probs).exp()
advantages = masked_normalize(rewards - old_values, **normalize_kwargs)
if advantages.ndim == 1:
advantages = rearrange(advantages, 'b -> b 1')
surr1 = ratios * advantages
surr2 = ratios.clamp(1 - self.eps_clip, 1 + self.eps_clip) * advantages
policy_loss = - torch.min(surr1, surr2) - self.beta_s * entropies
# combine losses
loss = policy_loss.mean()
# update actor
self.accelerate.backward(loss)
self.print(f'policy_loss: {loss.item():.3f}')
if exists(self.max_norm):
self.accelerator.clip_grad_norm_(self.actor_critic.actor_parameters(), self.max_norm)
self.actor_optim.step()
self.actor_optim.zero_grad()
# calculate value loss and update value network separate from policy network
value_loss = clipped_value_loss(values, rewards.detach(), old_values, self.value_clip)
value_loss = value_loss.mean()
self.print(f'critic_loss: {value_loss.item():.3f}')
self.accelerate.backward(value_loss)
if exists(self.max_norm):
self.accelerator.clip_grad_norm_(self.actor_critic.critic_parameters(), self.max_norm)
self.critic_optim.step()
self.critic_optim.zero_grad()
def train(
self,
num_episodes = 50000,
max_timesteps = 500,
update_timesteps = 5000,
max_batch_size = 16,
max_seq_len = 2048,
eos_token = None,
temperature = 1.
):
device = self.device
time = 0
memories = deque([])
for eps in tqdm(range(num_episodes), desc = 'episodes'):
for timestep in range(max_timesteps):
time += 1
# select a bunch of random states (prompts)
# and get the action (sampled sequence from palm as well as the action probs)
# also calculate the reward using reward model and store
rand_prompt_index = randrange(0, self.num_prompts)
state = self.prompt_token_ids[rand_prompt_index]
# remove padding from state
state_mask = state != self.pad_value
state = state[state_mask]
# get predicted sequence
(
actions,
sequence,
mask,
prompt_mask,
action_logits,
value
) = self.actor_critic.generate(
rearrange(state, 'n -> 1 n'),
max_seq_len = max_seq_len,
eos_token = eos_token,
temperature = temperature,
return_values = True
)
action_logits = shift(action_logits, shift = 1, dim = -2) # need to shift along sequence dimension by 1, since actions start from the last prompt (state) token
action_prob = action_logits.softmax(dim = -1)
action_len = actions.shape[-1]
action_log_prob = log_prob(action_prob, sequence)
action_log_prob = action_log_prob[:, -action_len:]
actions = rearrange(actions, '1 ... -> ...')
# get reward as given by supervised trained reward model
sequence = torch.cat((state, actions), dim = 0)
prompt_length = len(state)
prompt_mask = torch.arange(sequence.shape[-1], device = device) < prompt_length
sequence = rearrange(sequence, 'n -> 1 n')
prompt_mask = rearrange(prompt_mask, 'n -> 1 n')
mask = default(mask, lambda: torch.ones(sequence.shape, dtype = torch.bool, device = device))
reward = self.reward_model(
sequence,
prompt_mask = prompt_mask,
mask = mask,
sample = True
)
detach_to_cpu_ = lambda t: rearrange(t.detach().cpu(), '1 ... -> ...')
# store memory for learning
memories.append(Memory(*map(detach_to_cpu_, (
sequence,
prompt_mask,
mask,
action_prob,
action_log_prob,
reward,
value
))))
# learn from the stored memories
if time % update_timesteps == 0:
self.learn(memories)
memories.clear()
print('rlhf training complete')
| EXA-1-master | exa/models/PaLM-rlhf-pytorch-main 2/palm_rlhf_pytorch/ppo.py |
import copy
from pathlib import Path
from tqdm import tqdm
from beartype import beartype
from beartype.typing import Tuple, Optional
import torch
from torch import nn
import torch.nn.functional as F
from einops import rearrange, repeat, reduce, pack, unpack
from einops.layers.torch import Rearrange, Reduce
from palm_rlhf_pytorch.utils import masked_mean, gumbel_sample
from palm_rlhf_pytorch.palm import PaLM
# helper functions
def exists(val):
return val is not None
# Reward Model - PaLM with a scalar head
@beartype
class RewardModel(nn.Module):
def __init__(
self,
palm: PaLM,
dropout = 0.1,
num_binned_output = 0.,
use_lora = True,
lora_r = 8,
reward_lora_scope = 'reward',
):
super().__init__()
self.palm = copy.deepcopy(palm)
self.palm.set_dropout(dropout)
self.reward_lora_scope = reward_lora_scope if use_lora else None
if exists(self.reward_lora_scope):
self.palm.add_finetune_params(reward_lora_scope, lora_r = lora_r)
dim = palm.dim
self.binned_output = num_binned_output > 1
self.prompt_embed = nn.Parameter(torch.zeros(1, 1, dim))
self.response_embed = nn.Parameter(torch.zeros(1, 1, dim))
if self.binned_output:
self.to_pred = nn.Linear(dim, num_binned_output)
else:
self.to_pred = nn.Sequential(
nn.Linear(dim, 1, bias = False),
Rearrange('... 1 -> ...')
)
def load(self, path):
path = Path(path)
assert path.exists()
self.load_state_dict(torch.load(str(path)))
def finetune_parameters(self):
return [
*self.to_pred.parameters(),
*(self.palm.finetune_parameters(self.reward_lora_scope) if exists(self.reward_lora_scope) else self.palm.parameters())
]
def forward(
self,
x,
mask = None,
prompt_mask = None,
prompt_lengths = None,
labels = None,
sample = False,
sample_temperature = 1.,
disable_lora = False
):
assert not (exists(prompt_mask) and exists(prompt_lengths))
# derive prompt mask from prompt lengths
if exists(prompt_lengths):
batch, seq_len = x.shape
arange = torch.arange(seq_len, device = x.device)
prompt_mask = repeat(arange, 'n -> b n', b = batch) < rearrange(prompt_lengths, 'b -> b 1')
# reward model should have an understanding of which section is prompt, and which section is response
extra_embed = None
if exists(prompt_mask):
extra_embed = torch.where(
rearrange(prompt_mask, 'b n -> b n 1'),
self.prompt_embed,
self.response_embed
)
# get embeddings from palm
embeds = self.palm(
x,
extra_embed = extra_embed,
return_only_embedding = True,
disable_lora = disable_lora,
finetune_scope = self.reward_lora_scope
)
pooled = masked_mean(embeds, mask, dim = 1)
pred = self.to_pred(pooled)
if sample and self.binned_output:
assert not exists(labels)
pred = gumbel_sample(pred, temperature = sample_temperature, dim = -1)
if not exists(labels):
return pred
if not self.binned_output:
return F.mse_loss(pred, labels)
return F.cross_entropy(pred, labels)
| EXA-1-master | exa/models/PaLM-rlhf-pytorch-main 2/palm_rlhf_pytorch/reward.py |
from setuptools import setup, find_packages
setup(
name = 'MaMMUT-pytorch',
packages = find_packages(exclude=[]),
version = '0.0.2',
license='MIT',
description = 'MaMMUT - Pytorch',
author = 'Phil Wang',
author_email = '[email protected]',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/MaMMUT-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'multimodal',
'attention mechanism',
'contrastive learning'
],
install_requires=[
'einops>=0.6.1',
'torch>=1.6',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
| EXA-1-master | exa/models/MaMMUT-pytorch/setup.py |
import torch
from torch import einsum, nn
import torch.nn.functional as F
from einops import rearrange, repeat
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def divisible_by(numer, denom):
return (numer % denom) == 0
# normalization
# they use layernorm without bias, something that pytorch does not offer
class LayerNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.gamma = nn.Parameter(torch.ones(dim))
self.register_buffer("beta", torch.zeros(dim))
def forward(self, x):
return F.layer_norm(x, x.shape[-1:], self.gamma, self.beta)
# residual
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, *args, **kwargs):
return self.fn(x, *args, **kwargs) + x
# to latents
class EmbedToLatents(nn.Module):
def __init__(self, dim, dim_latents):
super().__init__()
self.to_latents = nn.Linear(dim, dim_latents, bias=False)
def forward(self, x):
latents = self.to_latents(x)
return F.normalize(latents, dim=-1)
# rotary positional embedding
# https://arxiv.org/abs/2104.09864
class RotaryEmbedding(nn.Module):
def __init__(self, dim):
super().__init__()
inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer("inv_freq", inv_freq)
def forward(self, max_seq_len, *, device):
seq = torch.arange(max_seq_len, device=device, dtype=self.inv_freq.dtype)
freqs = einsum("i , j -> i j", seq, self.inv_freq)
return torch.cat((freqs, freqs), dim=-1)
def rotate_half(x):
x = rearrange(x, "... (j d) -> ... j d", j=2)
x1, x2 = x.unbind(dim=-2)
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(pos, t):
return (t * pos.cos()) + (rotate_half(t) * pos.sin())
# classic Noam Shazeer paper, except here they use SwiGLU instead of the more popular GEGLU for gating the feedforward
# https://arxiv.org/abs/2002.05202
class SwiGLU(nn.Module):
def forward(self, x):
x, gate = x.chunk(2, dim=-1)
return F.silu(gate) * x
# parallel attention and feedforward with residual
# discovered by Wang et al + EleutherAI from GPT-J fame
class ParallelTransformerBlock(nn.Module):
def __init__(self, dim, dim_head=64, heads=8, ff_mult=4):
super().__init__()
self.norm = LayerNorm(dim)
attn_inner_dim = dim_head * heads
ff_inner_dim = dim * ff_mult
self.fused_dims = (attn_inner_dim, dim_head, dim_head, (ff_inner_dim * 2))
self.heads = heads
self.scale = dim_head**-0.5
self.rotary_emb = RotaryEmbedding(dim_head)
self.fused_attn_ff_proj = nn.Linear(dim, sum(self.fused_dims), bias=False)
self.attn_out = nn.Linear(attn_inner_dim, dim, bias=False)
self.ff_out = nn.Sequential(
SwiGLU(),
nn.Linear(ff_inner_dim, dim, bias=False)
)
# for caching causal mask and rotary embeddings
self.register_buffer("mask", None, persistent=False)
self.register_buffer("pos_emb", None, persistent=False)
def get_mask(self, n, device):
if self.mask is not None and self.mask.shape[-1] >= n:
return self.mask[:n, :n]
mask = torch.ones((n, n), device=device, dtype=torch.bool).triu(1)
self.register_buffer("mask", mask, persistent=False)
return mask
def get_rotary_embedding(self, n, device):
if self.pos_emb is not None and self.pos_emb.shape[-2] >= n:
return self.pos_emb[:n]
pos_emb = self.rotary_emb(n, device=device)
self.register_buffer("pos_emb", pos_emb, persistent=False)
return pos_emb
def forward(self, x, attn_mask=None):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
n, device, h = x.shape[1], x.device, self.heads
# pre layernorm
x = self.norm(x)
# attention queries, keys, values, and feedforward inner
q, k, v, ff = self.fused_attn_ff_proj(x).split(self.fused_dims, dim=-1)
# split heads
# they use multi-query single-key-value attention, yet another Noam Shazeer paper
# they found no performance loss past a certain scale, and more efficient decoding obviously
# https://arxiv.org/abs/1911.02150
q = rearrange(q, "b n (h d) -> b h n d", h=h)
# rotary embeddings
positions = self.get_rotary_embedding(n, device)
q, k = map(lambda t: apply_rotary_pos_emb(positions, t), (q, k))
# scale
q = q * self.scale
# similarity
sim = einsum("b h i d, b j d -> b h i j", q, k)
# causal mask
causal_mask = self.get_mask(n, device)
sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max)
# extra attention mask - for masking out attention from text CLS token to padding
if exists(attn_mask):
attn_mask = rearrange(attn_mask, 'b i j -> b 1 i j')
sim = sim.masked_fill(~attn_mask, -torch.finfo(sim.dtype).max)
# attention
sim = sim - sim.amax(dim=-1, keepdim=True).detach()
attn = sim.softmax(dim=-1)
# aggregate values
out = einsum("b h i j, b j d -> b h i d", attn, v)
# merge heads
out = rearrange(out, "b h n d -> b n (h d)")
return self.attn_out(out) + self.ff_out(ff)
# cross attention - using multi-query + one-headed key / values as in PaLM w/ optional parallel feedforward
class CrossAttention(nn.Module):
def __init__(
self,
dim,
*,
context_dim=None,
dim_head=64,
heads=8,
parallel_ff=False,
ff_mult=4,
norm_context=False
):
super().__init__()
self.heads = heads
self.scale = dim_head ** -0.5
inner_dim = heads * dim_head
context_dim = default(context_dim, dim)
self.norm = LayerNorm(dim)
self.context_norm = LayerNorm(context_dim) if norm_context else nn.Identity()
self.to_q = nn.Linear(dim, inner_dim, bias=False)
self.to_kv = nn.Linear(context_dim, dim_head * 2, bias=False)
self.to_out = nn.Linear(inner_dim, dim, bias=False)
# whether to have parallel feedforward
ff_inner_dim = ff_mult * dim
self.ff = nn.Sequential(
nn.Linear(dim, ff_inner_dim * 2, bias=False),
SwiGLU(),
nn.Linear(ff_inner_dim, dim, bias=False)
) if parallel_ff else None
def forward(self, x, context):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
# pre-layernorm, for queries and context
x = self.norm(x)
context = self.context_norm(context)
# get queries
q = self.to_q(x)
q = rearrange(q, 'b n (h d) -> b h n d', h = self.heads)
# scale
q = q * self.scale
# get key / values
k, v = self.to_kv(context).chunk(2, dim=-1)
# query / key similarity
sim = einsum('b h i d, b j d -> b h i j', q, k)
# attention
sim = sim - sim.amax(dim=-1, keepdim=True)
attn = sim.softmax(dim=-1)
# aggregate
out = einsum('b h i j, b j d -> b h i d', attn, v)
# merge and combine heads
out = rearrange(out, 'b h n d -> b n (h d)')
out = self.to_out(out)
# add parallel feedforward (for multimodal layers)
if exists(self.ff):
out = out + self.ff(x)
return out
# transformer
class MaMMUT(nn.Module):
def __init__(
self,
*,
dim,
num_tokens,
depth,
cross_attend_every=1,
cross_attend_layers=None,
dim_latents=None,
image_dim=None,
num_img_queries=256,
dim_head=64,
heads=8,
ff_mult=4,
img_encoder=None,
caption_loss_weight=1.,
contrastive_loss_weight=1.,
pad_id=0
):
super().__init__()
self.dim = dim
self.pad_id = pad_id
self.caption_loss_weight = caption_loss_weight
self.contrastive_loss_weight = contrastive_loss_weight
# token embeddings
self.token_emb = nn.Embedding(num_tokens, dim)
self.text_cls_token = nn.Parameter(torch.randn(dim))
# image encoder
self.img_encoder = img_encoder
# attention pooling for image tokens
self.img_queries = nn.Parameter(torch.randn(num_img_queries + 1, dim)) # num image queries for multimodal, but 1 extra CLS for contrastive learning
self.img_attn_pool = CrossAttention(dim=dim, context_dim=image_dim, dim_head=dim_head, heads=heads, norm_context=True)
self.img_attn_pool_norm = LayerNorm(dim)
self.text_cls_norm = LayerNorm(dim)
# to latents
dim_latents = default(dim_latents, dim)
self.img_to_latents = EmbedToLatents(dim, dim_latents)
self.text_to_latents = EmbedToLatents(dim, dim_latents)
# contrastive learning temperature
self.temperature = nn.Parameter(torch.Tensor([1.]))
# layers
self.layers = nn.ModuleList([])
for ind in range(depth):
layer = ind + 1
has_cross_attn = divisible_by(layer, cross_attend_every)
if exists(cross_attend_layers):
assert isinstance(cross_attend_layers, tuple)
has_cross_attn = layer in cross_attend_layers
self.layers.append(nn.ModuleList([
Residual(ParallelTransformerBlock(dim=dim, dim_head=dim_head, heads=heads, ff_mult=ff_mult)),
Residual(CrossAttention(dim=dim, dim_head=dim_head, heads=heads, parallel_ff=True, ff_mult=ff_mult)) if has_cross_attn else None
]))
# to logits
self.to_logits = nn.Sequential(
LayerNorm(dim),
nn.Linear(dim, num_tokens, bias=False)
)
# they used embedding weight tied projection out to logits, not common, but works
self.to_logits[-1].weight = self.token_emb.weight
nn.init.normal_(self.token_emb.weight, std=0.02)
def embed_text(self, text):
batch, device = text.shape[0], text.device
seq = text.shape[1]
text_tokens = self.token_emb(text)
# append text cls tokens
text_cls_tokens = repeat(self.text_cls_token, 'd -> b 1 d', b=batch)
text_tokens = torch.cat((text_tokens, text_cls_tokens), dim=-2)
# create specific mask for text cls token at the end
# to prevent it from attending to padding
cls_mask = rearrange(text!=self.pad_id, 'b j -> b 1 j')
attn_mask = F.pad(cls_mask, (0, 1, seq, 0), value=True)
# go through layers, but do not cross attend
for attn_ff, _ in self.layers:
text_tokens = attn_ff(text_tokens, attn_mask=attn_mask)
# get text cls token
text_tokens, text_cls_tokens = text_tokens[:, :-1], text_tokens[:, -1]
text_embeds = self.text_cls_norm(text_cls_tokens)
return text_embeds, text_tokens
def embed_image(self, images=None, image_tokens=None):
# encode images into embeddings
# with the img_encoder passed in at init
# it can also accept precomputed image tokens
assert not (exists(images) and exists(image_tokens))
if exists(images):
assert exists(self.img_encoder), 'img_encoder must be passed in for automatic image encoding'
image_tokens = self.img_encoder(images)
# attention pool image tokens
img_queries = repeat(self.img_queries, 'n d -> b n d', b=image_tokens.shape[0])
img_queries = self.img_attn_pool(img_queries, image_tokens)
img_queries = self.img_attn_pool_norm(img_queries)
return img_queries[:, 0], img_queries[:, 1:]
def forward(
self,
text,
text_mask = None,
images=None,
image_tokens=None,
labels=None,
return_loss=False,
return_embeddings=False
):
batch, device = text.shape[0], text.device
if return_loss and not exists(labels):
text, labels = text[:, :-1], text[:, 1:]
text_embeds, text_tokens = self.embed_text(text)
image_embeds, image_tokens = self.embed_image(images=images, image_tokens=image_tokens)
# return embeddings if that is what the researcher wants
if return_embeddings:
return text_embeds, image_embeds
# go through layers
for attn_ff, cross_attn in self.layers:
text_tokens = attn_ff(text_tokens)
if exists(cross_attn):
text_tokens = cross_attn(text_tokens, image_tokens)
logits = self.to_logits(text_tokens)
if not return_loss:
return logits
# shorthand
ce = F.cross_entropy
# calculate caption loss (cross entropy loss)
logits = rearrange(logits, 'b n c -> b c n')
caption_loss = ce(logits, labels, ignore_index=self.pad_id)
caption_loss = caption_loss * self.caption_loss_weight
# embedding to latents
text_latents = self.text_to_latents(text_embeds)
image_latents = self.img_to_latents(image_embeds)
# calculate contrastive loss
sim = einsum('i d, j d -> i j', text_latents, image_latents)
sim = sim * self.temperature.exp()
contrastive_labels = torch.arange(batch, device=device)
contrastive_loss = (ce(sim, contrastive_labels) + ce(sim.t(), contrastive_labels)) * 0.5
contrastive_loss = contrastive_loss * self.contrastive_loss_weight
return caption_loss + contrastive_loss
| EXA-1-master | exa/models/MaMMUT-pytorch/mammut_pytorch/mammut_pytorch.py |
from mammut_pytorch.mammut_pytorch import MaMMUT
| EXA-1-master | exa/models/MaMMUT-pytorch/mammut_pytorch/__init__.py |
# coding=utf-8
import os
import sys
from pathlib import Path
from subprocess import DEVNULL, PIPE, run
from setuptools import find_packages, setup
project_root = Path(__file__).parent
# modified from https://github.com/lhotse-speech/lhotse/blob/master/setup.py
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! #
# NOTE: REMEMBER TO UPDATE THE FALLBACK VERSION IN valle/__init__.py WHEN RELEASING #
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! #
MAJOR_VERSION = 1
MINOR_VERSION = 0
PATCH_VERSION = 0
IS_DEV_VERSION = True # False = public release, True = otherwise
if sys.version_info < (3,):
# fmt: off
print(
"Python 2 has reached end-of-life and is no longer supported by valle."
)
# fmt: on
sys.exit(-1)
if sys.version_info < (3, 7):
print(
"Python 3.6 has reached end-of-life on December 31st, 2021 "
"and is no longer supported by valle."
)
sys.exit(-1)
def discover_valle_version() -> str:
"""
Scans Valle source code to determine the current version.
When development version is detected, it queries git for the commit hash
to append it as a local version identifier.
Ideally this function would have been imported from valle.version and
re-used when valle is imported to set the version, but it introduces
a circular dependency. To avoid this, we write the determined version
into project_root / 'valle' / 'version.py' during setup and read it
from there later. If it's not detected, the version will be 0.0.0.dev.
"""
version = f"{MAJOR_VERSION}.{MINOR_VERSION}.{PATCH_VERSION}"
if not IS_DEV_VERSION:
# This is a PyPI public release -- return a clean version string.
return version
version = version + ".dev"
# This is not a PyPI release -- try to read the git commit
try:
git_commit = (
run(
["git", "rev-parse", "--short", "HEAD"],
check=True,
stdout=PIPE,
stderr=DEVNULL,
)
.stdout.decode()
.rstrip("\n")
.strip()
)
dirty_commit = (
len(
run(
["git", "diff", "--shortstat"],
check=True,
stdout=PIPE,
stderr=DEVNULL,
)
.stdout.decode()
.rstrip("\n")
.strip()
)
> 0
)
git_commit = (
git_commit + ".dirty" if dirty_commit else git_commit + ".clean"
)
source_version = f"+git.{git_commit}"
except Exception:
source_version = ".unknownsource"
# See the format:
# https://packaging.python.org/guides/distributing-packages-using-setuptools/#local-version-identifiers
version = version + source_version
return version
def mark_valle_version(version: str) -> None:
(project_root / "valle" / "version.py").write_text(
f'__version__ = "{version}"'
)
VALLE_VERSION = discover_valle_version()
mark_valle_version(VALLE_VERSION)
install_requires = [
"encodec",
"phonemizer",
]
try:
# If the user already installed PyTorch, make sure he has torchaudio too.
# Otherwise, we'll just install the latest versions from PyPI for the user.
import torch
try:
import torchaudio
except ImportError:
raise ValueError(
"We detected that you have already installed PyTorch, but haven't installed torchaudio. "
"Unfortunately we can't detect the compatible torchaudio version for you; "
"you will have to install it manually. "
"For instructions, please refer either to https://pytorch.org/get-started/locally/ "
"or https://github.com/pytorch/audio#dependencies"
)
except ImportError:
install_requires.extend(["torch", "torchaudio"])
docs_require = (
(project_root / "docs" / "requirements.txt").read_text().splitlines()
)
tests_require = [
# "pytest==7.1.3",
# "pytest-forked==1.4.0",
# "pytest-xdist==2.5.0",
# "pytest-cov==4.0.0",
]
workflow_requires = [""]
dev_requires = sorted(
docs_require
+ tests_require
+ workflow_requires
+ ["jupyterlab", "matplotlib"]
)
all_requires = sorted(dev_requires)
if os.environ.get("READTHEDOCS", False):
# When building documentation, omit torchaudio installation and mock it instead.
# This works around the inability to install libsoundfile1 in read-the-docs env,
# which caused the documentation builds to silently crash.
install_requires = [
req
for req in install_requires
if not any(req.startswith(dep) for dep in ["torchaudio", "SoundFile"])
]
setup(
name="valle",
version=VALLE_VERSION,
python_requires=">=3.7.0",
description="Neural Codec Language Models are Zero-Shot Text to Speech Synthesizers",
author="The Valle Development Team",
author_email="[email protected]",
long_description=(project_root / "README.md").read_text(encoding="utf-8"),
long_description_content_type="text/markdown",
license="Apache-2.0 License",
packages=find_packages(exclude=["test", "test.*"]),
include_package_data=True,
entry_points={},
install_requires=install_requires,
extras_require={
"docs": docs_require,
"tests": tests_require,
"dev": dev_requires,
"all": all_requires,
},
classifiers=[
"Development Status :: 1 - Beta",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Intended Audience :: Science/Research",
"Operating System :: POSIX :: Linux",
"Operating System :: MacOS :: MacOS X",
"License :: OSI Approved :: Apache Software License",
"Topic :: Multimedia :: Sound/Audio :: Speech",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development :: Libraries :: Python Modules",
"Typing :: Typed",
],
)
| EXA-1-master | exa/models/valle/vall-e-main/setup.py |
from . import data, models, modules, utils
| EXA-1-master | exa/models/valle/vall-e-main/valle/__init__.py |
EXA-1-master | exa/models/valle/vall-e-main/valle/bin/__init__.py |
|
#!/usr/bin/env python3
# Copyright 2023 (authors: Feiteng Li)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Phonemize Text and EnCodec Audio.
Usage example:
python3 bin/tokenizer.py \
--src_dir ./data/manifests --output_dir ./data/tokenized
"""
import argparse
import logging
import os
from pathlib import Path
import torch
import torch.multiprocessing
from icefall.utils import get_executor
from lhotse import CutSet, NumpyHdf5Writer
from lhotse.recipes.utils import read_manifests_if_cached
from tqdm.auto import tqdm
from valle.data import (
AudioTokenConfig,
AudioTokenExtractor,
TextTokenizer,
tokenize_text,
)
from valle.data.fbank import get_fbank_extractor
from valle.utils import SymbolTable
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
# Torch's multithreaded behavior needs to be disabled or
# it wastes a lot of CPU and slow things down.
# Do this outside of main() in case it needs to take effect
# even when we are not invoking the main (e.g. when spawning subprocesses).
torch.set_num_threads(1)
torch.set_num_interop_threads(1)
torch.multiprocessing.set_sharing_strategy("file_system")
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--src-dir",
type=Path,
default=Path("data/manifests"),
help="Path to the manifest files",
)
parser.add_argument(
"--output-dir",
type=Path,
default=Path("data/tokenized"),
help="Path to the tokenized files",
)
parser.add_argument(
"--text-extractor",
type=str,
default="espeak",
help="espeak or pypinyin or pypinyin_initials_finals",
)
parser.add_argument(
"--audio-extractor",
type=str,
default="Encodec",
help="Encodec or Fbank",
)
parser.add_argument(
"--dataset-parts",
type=str,
default="dev-clean test-clean",
help="Space separated dataset parts",
)
parser.add_argument(
"--prefix",
type=str,
default="libritts",
help="prefix of the manifest file",
)
parser.add_argument(
"--suffix",
type=str,
default="jsonl.gz",
help="suffix of the manifest file",
)
parser.add_argument(
"--batch-duration",
type=float,
default=400.0,
help="The maximum number of audio seconds in a batch."
"Determines batch size dynamically.",
)
return parser.parse_args()
def main():
args = get_args()
dataset_parts = args.dataset_parts.replace("--dataset-parts", "").strip()
if dataset_parts == "all":
dataset_parts = (
"dev-clean",
"dev-other",
"test-clean",
"test-other",
"train-clean-100",
"train-clean-360",
"train-other-500",
)
else:
dataset_parts = dataset_parts.replace("-p", "").split(" ")
manifests = read_manifests_if_cached(
dataset_parts=dataset_parts,
output_dir=args.src_dir,
prefix=args.prefix,
suffix=args.suffix,
)
text_tokenizer = TextTokenizer(backend=args.text_extractor)
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
unique_symbols = set()
if args.audio_extractor == "Encodec":
extractor = AudioTokenExtractor(AudioTokenConfig())
else:
assert args.audio_extractor == "Fbank"
extractor = get_fbank_extractor()
num_jobs = min(32, os.cpu_count())
with get_executor() as ex:
for partition, m in manifests.items():
logging.info(
f"Processing partition: {partition} CUDA: {torch.cuda.is_available()}"
)
cut_set = CutSet.from_manifests(
recordings=m["recordings"],
supervisions=m["supervisions"],
)
# Tokenize Audio
if args.audio_extractor == "Encodec":
storage_path = (
f"{args.output_dir}/{args.prefix}_encodec_{partition}"
)
else:
storage_path = (
f"{args.output_dir}/{args.prefix}_fbank_{partition}"
)
if args.prefix == "ljspeech" or args.prefix == "aishell":
if args.prefix == "aishell":
# NOTE: the loudness of aishell audio files is around -33
# The best way is datamodule --on-the-fly-feats --enable-audio-aug
cut_set = cut_set.normalize_loudness(
target=-20.0, affix_id=True
)
cut_set = cut_set.resample(24000)
with torch.no_grad():
if (
torch.cuda.is_available()
and args.audio_extractor == "Encodec"
):
cut_set = cut_set.compute_and_store_features_batch(
extractor=extractor,
storage_path=storage_path,
num_workers=num_jobs,
batch_duration=args.batch_duration,
collate=False,
overwrite=True,
storage_type=NumpyHdf5Writer,
)
else:
cut_set = cut_set.compute_and_store_features(
extractor=extractor,
storage_path=storage_path,
num_jobs=num_jobs if ex is None else 64,
executor=ex,
storage_type=NumpyHdf5Writer,
)
# Tokenize Text
for c in tqdm(cut_set):
if args.prefix == "ljspeech":
text = c.supervisions[0].custom["normalized_text"]
text = text.replace("”", '"').replace("“", '"')
phonemes = tokenize_text(text_tokenizer, text=text)
elif args.prefix == "aishell":
phonemes = tokenize_text(
text_tokenizer, text=c.supervisions[0].text
)
c.supervisions[0].custom = {}
else:
assert args.prefix == "libritts"
phonemes = tokenize_text(
text_tokenizer, text=c.supervisions[0].text
)
c.supervisions[0].custom["tokens"] = {"text": phonemes}
unique_symbols.update(phonemes)
cuts_filename = f"{args.prefix}_cuts_{partition}.{args.suffix}"
cut_set.to_file(f"{args.output_dir}/{cuts_filename}")
unique_phonemes = SymbolTable()
for s in sorted(list(unique_symbols)):
unique_phonemes.add(s)
logging.info(f"unique phonemes: {unique_symbols}")
unique_phonemes_file = f"{args.output_dir}/unique_text_tokens.k2symbols"
unique_phonemes.to_file(unique_phonemes_file)
if __name__ == "__main__":
formatter = (
"%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s"
)
logging.basicConfig(format=formatter, level=logging.INFO)
main()
| EXA-1-master | exa/models/valle/vall-e-main/valle/bin/tokenizer.py |
#!/usr/bin/env python3
# Copyright 2023 (authors: Feiteng Li)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Phonemize Text and EnCodec Audio.
Usage example:
python3 bin/infer.py \
--decoder-dim 128 --nhead 4 --num-decoder-layers 4 --model-name valle \
--text-prompts "Go to her." \
--audio-prompts ./prompts/61_70970_000007_000001.wav \
--output-dir infer/demo_valle_epoch20 \
--checkpoint exp/valle_nano_v2/epoch-20.pt
"""
import argparse
import logging
import os
from pathlib import Path
os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
import torch
import torchaudio
from icefall.utils import str2bool
from valle.data import (
AudioTokenizer,
TextTokenizer,
tokenize_audio,
tokenize_text,
)
from valle.data.collation import get_text_token_collater
from valle.models import add_model_arguments, get_model
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--text-prompts",
type=str,
default="",
help="Text prompts which are separated by |.",
)
parser.add_argument(
"--audio-prompts",
type=str,
default="",
help="Audio prompts which are separated by | and should be aligned with --text-prompts.",
)
parser.add_argument(
"--text",
type=str,
default="To get up and running quickly just follow the steps below.",
help="Text to be synthesized.",
)
# model
add_model_arguments(parser)
parser.add_argument(
"--text-tokens",
type=str,
default="data/tokenized/unique_text_tokens.k2symbols",
help="Path to the unique text tokens file.",
)
parser.add_argument(
"--text-extractor",
type=str,
default="espeak",
help="espeak or pypinyin or pypinyin_initials_finals",
)
parser.add_argument(
"--checkpoint",
type=str,
default="exp/vallf_nano_full/checkpoint-100000.pt",
help="Path to the saved checkpoint.",
)
parser.add_argument(
"--output-dir",
type=Path,
default=Path("infer/demo"),
help="Path to the tokenized files.",
)
parser.add_argument(
"--top-k",
type=int,
default=-100,
help="Whether AR Decoder do top_k(if > 0) sampling.",
)
parser.add_argument(
"--temperature",
type=float,
default=1.0,
help="The temperature of AR Decoder top_k sampling.",
)
parser.add_argument(
"--continual",
type=str2bool,
default=False,
help="Do continual task.",
)
return parser.parse_args()
@torch.no_grad()
def main():
args = get_args()
text_tokenizer = TextTokenizer(backend=args.text_extractor)
text_collater = get_text_token_collater(args.text_tokens)
audio_tokenizer = AudioTokenizer()
device = torch.device("cpu")
if torch.cuda.is_available():
device = torch.device("cuda", 0)
model = get_model(args)
if args.checkpoint:
checkpoint = torch.load(args.checkpoint, map_location=device)
missing_keys, unexpected_keys = model.load_state_dict(
checkpoint["model"], strict=True
)
assert not missing_keys
# from icefall.checkpoint import save_checkpoint
# save_checkpoint(f"{args.checkpoint}", model=model)
model.to(device)
model.eval()
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
text_prompts = " ".join(args.text_prompts.split("|"))
audio_prompts = []
if args.audio_prompts:
for n, audio_file in enumerate(args.audio_prompts.split("|")):
encoded_frames = tokenize_audio(audio_tokenizer, audio_file)
if False:
samples = audio_tokenizer.decode(encoded_frames)
torchaudio.save(
f"{args.output_dir}/p{n}.wav", samples[0], 24000
)
audio_prompts.append(encoded_frames[0][0])
assert len(args.text_prompts.split("|")) == len(audio_prompts)
audio_prompts = torch.concat(audio_prompts, dim=-1).transpose(2, 1)
audio_prompts = audio_prompts.to(device)
if os.path.isfile(args.text): # for demos
# https://github.com/lifeiteng/lifeiteng.github.com/blob/main/valle/prepare.py
with open(args.text) as f:
for line in f:
fields = line.strip().split("\t")
assert len(fields) == 4
prompt_text, prompt_audio, text, audio_path = fields
logging.info(f"synthesize text: {text}")
text_tokens, text_tokens_lens = text_collater(
[
tokenize_text(
text_tokenizer, text=f"{prompt_text} {text}".strip()
)
]
)
_, enroll_x_lens = text_collater(
[
tokenize_text(
text_tokenizer, text=f"{prompt_text}".strip()
)
]
)
audio_prompts = tokenize_audio(audio_tokenizer, prompt_audio)
audio_prompts = audio_prompts[0][0].transpose(2, 1).to(device)
# synthesis
encoded_frames = model.inference(
text_tokens.to(device),
text_tokens_lens.to(device),
audio_prompts,
enroll_x_lens=enroll_x_lens,
top_k=args.top_k,
temperature=args.temperature,
)
samples = audio_tokenizer.decode(
[(encoded_frames.transpose(2, 1), None)]
)
# store
torchaudio.save(audio_path, samples[0].cpu(), 24000)
return
for n, text in enumerate(args.text.split("|")):
logging.info(f"synthesize text: {text}")
text_tokens, text_tokens_lens = text_collater(
[
tokenize_text(
text_tokenizer, text=f"{text_prompts} {text}".strip()
)
]
)
# synthesis
if args.continual:
assert text == ""
encoded_frames = model.continual(
text_tokens.to(device),
text_tokens_lens.to(device),
audio_prompts,
)
else:
enroll_x_lens = None
if text_prompts:
_, enroll_x_lens = text_collater(
[
tokenize_text(
text_tokenizer, text=f"{text_prompts}".strip()
)
]
)
encoded_frames = model.inference(
text_tokens.to(device),
text_tokens_lens.to(device),
audio_prompts,
enroll_x_lens=enroll_x_lens,
top_k=args.top_k,
temperature=args.temperature,
)
if audio_prompts != []:
samples = audio_tokenizer.decode(
[(encoded_frames.transpose(2, 1), None)]
)
# store
torchaudio.save(
f"{args.output_dir}/{n}.wav", samples[0].cpu(), 24000
)
else: # Transformer
model.visualize(encoded_frames, args.output_dir)
torch.set_num_threads(1)
torch.set_num_interop_threads(1)
torch._C._jit_set_profiling_executor(False)
torch._C._jit_set_profiling_mode(False)
torch._C._set_graph_executor_optimize(False)
if __name__ == "__main__":
formatter = (
"%(asctime)s %(levelname)s [%(filename)s:%(lineno)d] %(message)s"
)
logging.basicConfig(format=formatter, level=logging.INFO)
main()
| EXA-1-master | exa/models/valle/vall-e-main/valle/bin/infer.py |
#!/usr/bin/env python3
# Copyright 2021 Xiaomi Corp. (authors: Fangjun Kuang)
# Copyright 2023 (authors: Feiteng Li)
#
# See ../../../../LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file displays duration statistics of utterances in the manifests.
You can use the displayed value to choose minimum/maximum duration
to remove short and long utterances during the training.
"""
from lhotse import load_manifest_lazy
def main():
for part in ["train", "dev", "test"]:
print(f"## {part}")
cuts = load_manifest_lazy(f"./data/tokenized/cuts_{part}.jsonl.gz")
cuts.describe()
print("\n")
if __name__ == "__main__":
main()
| EXA-1-master | exa/models/valle/vall-e-main/valle/bin/display_manifest_statistics.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.